From 1c29bc5a3dd18b048e6f33cdedea8221bd73699b Mon Sep 17 00:00:00 2001 From: William Villeneuve Date: Tue, 23 Jan 2018 23:31:23 -0500 Subject: [PATCH 001/223] fixed quoting of script path in cmd.script --- salt/modules/cmdmod.py | 6 ++- salt/utils/win_functions.py | 49 ++++++++++++++++++++++++ tests/unit/utils/test_win_functions.py | 52 ++++++++++++++++++++++++++ 3 files changed, 106 insertions(+), 1 deletion(-) create mode 100644 tests/unit/utils/test_win_functions.py diff --git a/salt/modules/cmdmod.py b/salt/modules/cmdmod.py index cd23383654..ca3a516155 100644 --- a/salt/modules/cmdmod.py +++ b/salt/modules/cmdmod.py @@ -47,6 +47,7 @@ except ImportError: if salt.utils.is_windows(): from salt.utils.win_runas import runas as win_runas + from salt.utils.win_functions import escape_argument as win_cmd_quote HAS_WIN_RUNAS = True else: HAS_WIN_RUNAS = False @@ -2147,7 +2148,10 @@ def script(source, os.chmod(path, 320) os.chown(path, __salt__['file.user_to_uid'](runas), -1) - path = _cmd_quote(path) + if salt.utils.is_windows(): + path = win_cmd_quote(path) + else: + path = _cmd_quote(path) ret = _run(path + ' ' + str(args) if args else path, cwd=cwd, diff --git a/salt/utils/win_functions.py b/salt/utils/win_functions.py index 9f9f1d7a10..67718d0184 100644 --- a/salt/utils/win_functions.py +++ b/salt/utils/win_functions.py @@ -5,6 +5,7 @@ missing functions in other modules ''' from __future__ import absolute_import import platform +import re # Import Salt Libs from salt.exceptions import CommandExecutionError @@ -159,3 +160,51 @@ def get_sam_name(username): return '\\'.join([platform.node()[:15].upper(), username]) username, domain, _ = win32security.LookupAccountSid(None, sid_obj) return '\\'.join([domain, username]) + +def escape_argument(arg): + ''' + Escape the argument for the cmd.exe shell. + See http://blogs.msdn.com/b/twistylittlepassagesallalike/archive/2011/04/23/everyone-quotes-arguments-the-wrong-way.aspx + + First we escape the quote chars to produce a argument suitable for + CommandLineToArgvW. We don't need to do this for simple arguments. + + Args: + arg (str): a single command line argument to escape for the cmd.exe shell + + Returns: + str: an escaped string suitable to be passed as a program argument to the cmd.exe shell + ''' + if not arg or re.search(r'(["\s])', arg): + arg = '"' + arg.replace('"', r'\"') + '"' + + return escape_for_cmd_exe(arg) + +def escape_for_cmd_exe(arg): + ''' + Escape an argument string to be suitable to be passed to + cmd.exe on Windows + + This method takes an argument that is expected to already be properly + escaped for the receiving program to be properly parsed. This argument + will be further escaped to pass the interpolation performed by cmd.exe + unchanged. + + Any meta-characters will be escaped, removing the ability to e.g. use + redirects or variables. + + Args: + arg (str): a single command line argument to escape for cmd.exe + + Returns: + str: an escaped string suitable to be passed as a program argument to cmd.exe + ''' + meta_chars = '()%!^"<>&|' + meta_re = re.compile('(' + '|'.join(re.escape(char) for char in list(meta_chars)) + ')') + meta_map = { char: "^%s" % char for char in meta_chars } + + def escape_meta_chars(m): + char = m.group(1) + return meta_map[char] + + return meta_re.sub(escape_meta_chars, arg) diff --git a/tests/unit/utils/test_win_functions.py b/tests/unit/utils/test_win_functions.py new file mode 100644 index 0000000000..10d19a5215 --- /dev/null +++ b/tests/unit/utils/test_win_functions.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- + +# Import Python Libs +from __future__ import absolute_import, unicode_literals, print_function + +# Import Salt Testing Libs +from tests.support.unit import TestCase, skipIf +from tests.support.mock import ( + NO_MOCK, + NO_MOCK_REASON +) + +# Import Salt Libs +import salt.utils.win_functions as win_functions + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class WinFunctionsTestCase(TestCase): + ''' + Test cases for salt.utils.win_functions + ''' + + def test_escape_argument_simple(self): + ''' + Test to make sure we encode simple arguments correctly + ''' + encoded = win_functions.escape_argument('simple') + + self.assertEqual(encoded, 'simple') + + def test_escape_argument_with_space(self): + ''' + Test to make sure we encode arguments containing spaces correctly + ''' + encoded = win_functions.escape_argument('with space') + + self.assertEqual(encoded, '^"with space^"') + + def test_escape_argument_simple_path(self): + ''' + Test to make sure we encode simple path arguments correctly + ''' + encoded = win_functions.escape_argument('C:\\some\\path') + + self.assertEqual(encoded, 'C:\\some\\path') + + def test_escape_argument_path_with_space(self): + ''' + Test to make sure we encode path arguments containing spaces correctly + ''' + encoded = win_functions.escape_argument('C:\\Some Path\\With Spaces') + + self.assertEqual(encoded, '^"C:\\Some Path\\With Spaces^"') From 912347abc3c7260ed5cb6a51df210427243f8d5b Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Fri, 26 Jan 2018 10:05:06 -0800 Subject: [PATCH 002/223] Include the duration when a state does not run, for example when the `onchanges` requisite is not met. --- salt/state.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/salt/state.py b/salt/state.py index 17a57a51a7..a3b238ecfd 100644 --- a/salt/state.py +++ b/salt/state.py @@ -162,6 +162,23 @@ def _l_tag(name, id_): return _gen_tag(low) +def _calculate_fake_duration(): + ''' + Generate a NULL duration for when states do not run + but we want the results to be consistent. + ''' + utc_start_time = datetime.datetime.utcnow() + local_start_time = utc_start_time - \ + (datetime.datetime.utcnow() - datetime.datetime.now()) + utc_finish_time = datetime.datetime.utcnow() + start_time = local_start_time.time().isoformat() + delta = (utc_finish_time - utc_start_time) + # duration in milliseconds.microseconds + duration = (delta.seconds * 1000000 + delta.microseconds)/1000.0 + + return start_time, duration + + def trim_req(req): ''' Trim any function off of a requisite @@ -2401,9 +2418,12 @@ class State(object): _cmt = 'One or more requisite failed: {0}'.format( ', '.join(str(i) for i in failed_requisites) ) + start_time, duration = _calculate_fake_duration() running[tag] = { 'changes': {}, 'result': False, + 'duration': duration, + 'start_time': start_time, 'comment': _cmt, '__run_num__': self.__run_num, '__sls__': low['__sls__'] @@ -2419,8 +2439,11 @@ class State(object): ret = self.call(low, chunks, running) running[tag] = ret elif status == 'pre': + start_time, duration = _calculate_fake_duration() pre_ret = {'changes': {}, 'result': True, + 'duration': duration, + 'start_time': start_time, 'comment': 'No changes detected', '__run_num__': self.__run_num, '__sls__': low['__sls__']} @@ -2428,15 +2451,21 @@ class State(object): self.pre[tag] = pre_ret self.__run_num += 1 elif status == 'onfail': + start_time, duration = _calculate_fake_duration() running[tag] = {'changes': {}, 'result': True, + 'duration': duration, + 'start_time': start_time, 'comment': 'State was not run because onfail req did not change', '__run_num__': self.__run_num, '__sls__': low['__sls__']} self.__run_num += 1 elif status == 'onchanges': + start_time, duration = _calculate_fake_duration() running[tag] = {'changes': {}, 'result': True, + 'duration': duration, + 'start_time': start_time, 'comment': 'State was not run because none of the onchanges reqs changed', '__run_num__': self.__run_num, '__sls__': low['__sls__']} From 359265869fa0b9ab6b5d1aa8b0d62890adf69564 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Fri, 26 Jan 2018 12:04:04 -0800 Subject: [PATCH 003/223] Adding a couple tests to ensure that duration is included in state run results even when states do not run. --- salt/state.py | 3 +++ tests/integration/modules/test_state.py | 26 +++++++++++++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/salt/state.py b/salt/state.py index a3b238ecfd..9ba668fd3b 100644 --- a/salt/state.py +++ b/salt/state.py @@ -2334,8 +2334,11 @@ class State(object): run_dict = self.pre else: run_dict = running + start_time, duration = _calculate_fake_duration() run_dict[tag] = {'changes': {}, 'result': False, + 'duration': duration, + 'start_time': start_time, 'comment': comment, '__run_num__': self.__run_num, '__sls__': low['__sls__']} diff --git a/tests/integration/modules/test_state.py b/tests/integration/modules/test_state.py index 591a946315..2d8a494d8b 100644 --- a/tests/integration/modules/test_state.py +++ b/tests/integration/modules/test_state.py @@ -1016,6 +1016,20 @@ class StateModuleTest(ModuleCase, SaltReturnAssertsMixin): expected_result = 'State was not run because none of the onchanges reqs changed' self.assertIn(expected_result, test_data) + def test_onchanges_requisite_with_duration(self): + ''' + Tests a simple state using the onchanges requisite + the state will not run but results will include duration + ''' + + # Only run the state once and keep the return data + state_run = self.run_function('state.sls', mods='requisites.onchanges_simple') + + # Then, test the result of the state run when changes are not expected to happen + # and ensure duration is included in the results + test_data = state_run['cmd_|-test_non_changing_state_|-echo "Should not run"_|-run'] + self.assertIn('duration', test_data) + # onfail tests def test_onfail_requisite(self): @@ -1069,6 +1083,18 @@ class StateModuleTest(ModuleCase, SaltReturnAssertsMixin): expected_result = 'State was not run because onfail req did not change' self.assertIn(expected_result, test_data) + def test_onfail_requisite_with_duration(self): + ''' + Tests a simple state using the onfail requisite + ''' + + # Only run the state once and keep the return data + state_run = self.run_function('state.sls', mods='requisites.onfail_simple') + + # Then, test the result of the state run when a failure is not expected to happen + test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run'] + self.assertIn('duration', test_data) + # listen tests def test_listen_requisite(self): From 217791079bab810a0eae8f742404ec897ff5f0ef Mon Sep 17 00:00:00 2001 From: William Villeneuve Date: Sun, 28 Jan 2018 12:42:33 -0500 Subject: [PATCH 004/223] some code cleanup (lint errors and escape_argument as _cmd_quote) --- salt/modules/cmdmod.py | 9 +++------ salt/utils/win_functions.py | 12 +++++++----- tests/unit/utils/test_win_functions.py | 1 + 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/salt/modules/cmdmod.py b/salt/modules/cmdmod.py index ca3a516155..ba9b421531 100644 --- a/salt/modules/cmdmod.py +++ b/salt/modules/cmdmod.py @@ -36,7 +36,6 @@ from salt.exceptions import CommandExecutionError, TimedProcTimeoutError, \ SaltInvocationError from salt.log import LOG_LEVELS from salt.ext.six.moves import range, zip -from salt.ext.six.moves import shlex_quote as _cmd_quote from salt.utils.locales import sdecode # Only available on POSIX systems, nonfatal on windows @@ -47,9 +46,10 @@ except ImportError: if salt.utils.is_windows(): from salt.utils.win_runas import runas as win_runas - from salt.utils.win_functions import escape_argument as win_cmd_quote + from salt.utils.win_functions import escape_argument as _cmd_quote HAS_WIN_RUNAS = True else: + from salt.ext.six.moves import shlex_quote as _cmd_quote HAS_WIN_RUNAS = False __proxyenabled__ = ['*'] @@ -2148,10 +2148,7 @@ def script(source, os.chmod(path, 320) os.chown(path, __salt__['file.user_to_uid'](runas), -1) - if salt.utils.is_windows(): - path = win_cmd_quote(path) - else: - path = _cmd_quote(path) + path = _cmd_quote(path) ret = _run(path + ' ' + str(args) if args else path, cwd=cwd, diff --git a/salt/utils/win_functions.py b/salt/utils/win_functions.py index 67718d0184..6c7ff4040b 100644 --- a/salt/utils/win_functions.py +++ b/salt/utils/win_functions.py @@ -161,11 +161,12 @@ def get_sam_name(username): username, domain, _ = win32security.LookupAccountSid(None, sid_obj) return '\\'.join([domain, username]) + def escape_argument(arg): ''' Escape the argument for the cmd.exe shell. See http://blogs.msdn.com/b/twistylittlepassagesallalike/archive/2011/04/23/everyone-quotes-arguments-the-wrong-way.aspx - + First we escape the quote chars to produce a argument suitable for CommandLineToArgvW. We don't need to do this for simple arguments. @@ -180,19 +181,20 @@ def escape_argument(arg): return escape_for_cmd_exe(arg) + def escape_for_cmd_exe(arg): ''' Escape an argument string to be suitable to be passed to cmd.exe on Windows - + This method takes an argument that is expected to already be properly escaped for the receiving program to be properly parsed. This argument will be further escaped to pass the interpolation performed by cmd.exe unchanged. - + Any meta-characters will be escaped, removing the ability to e.g. use redirects or variables. - + Args: arg (str): a single command line argument to escape for cmd.exe @@ -201,7 +203,7 @@ def escape_for_cmd_exe(arg): ''' meta_chars = '()%!^"<>&|' meta_re = re.compile('(' + '|'.join(re.escape(char) for char in list(meta_chars)) + ')') - meta_map = { char: "^%s" % char for char in meta_chars } + meta_map = {char: "^{0}".format(char) for char in meta_chars} def escape_meta_chars(m): char = m.group(1) diff --git a/tests/unit/utils/test_win_functions.py b/tests/unit/utils/test_win_functions.py index 10d19a5215..bbd0ef86fd 100644 --- a/tests/unit/utils/test_win_functions.py +++ b/tests/unit/utils/test_win_functions.py @@ -13,6 +13,7 @@ from tests.support.mock import ( # Import Salt Libs import salt.utils.win_functions as win_functions + @skipIf(NO_MOCK, NO_MOCK_REASON) class WinFunctionsTestCase(TestCase): ''' From 37e067c7b5989e2ea7eba091cd4b297c00332f74 Mon Sep 17 00:00:00 2001 From: Christian McHugh Date: Mon, 29 Jan 2018 20:13:55 +0000 Subject: [PATCH 005/223] support amazon linux 2 for service module --- salt/modules/rh_service.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/salt/modules/rh_service.py b/salt/modules/rh_service.py index c7ce0d51f7..3c0c1c3441 100644 --- a/salt/modules/rh_service.py +++ b/salt/modules/rh_service.py @@ -97,6 +97,15 @@ def __virtual__(): 'RedHat-based distros >= version 7 use systemd, will not ' 'load rh_service.py as virtual \'service\'' ) + if __grains__['os'] == 'Amazon': + if int(osrelease_major) in (2016, 2017): + return __virtualname__ + else: + return ( + False, + 'Amazon Linux >= version 2 use systemd, will not ' + 'load rh_service.py as virtual \'service\'' + ) return __virtualname__ return (False, 'Cannot load rh_service module: OS not in {0}'.format(enable)) From 47cf00d88eb6ec3dfdf30829fb7bbb79786403bc Mon Sep 17 00:00:00 2001 From: Ronald van Zantvoort Date: Tue, 30 Jan 2018 14:44:54 +0100 Subject: [PATCH 006/223] SSH shell shim: Don't use $() for optimal support Some shells don't support the `$()` syntax, so use the backticks instead. --- salt/client/ssh/__init__.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/client/ssh/__init__.py b/salt/client/ssh/__init__.py index f0dc2b3b1b..86ce24be17 100644 --- a/salt/client/ssh/__init__.py +++ b/salt/client/ssh/__init__.py @@ -157,17 +157,17 @@ do py_cmd_path=`"$py_cmd" -c \ 'from __future__ import print_function; import sys; print(sys.executable);'` - cmdpath=$(command -v $py_cmd 2>/dev/null || which $py_cmd 2>/dev/null) + cmdpath=`command -v $py_cmd 2>/dev/null || which $py_cmd 2>/dev/null` if file $cmdpath | grep "shell script" > /dev/null then ex_vars="'PATH', 'LD_LIBRARY_PATH', 'MANPATH', \ 'XDG_DATA_DIRS', 'PKG_CONFIG_PATH'" - export $($py_cmd -c \ + export `$py_cmd -c \ "from __future__ import print_function; import sys; import os; map(sys.stdout.write, ['{{{{0}}}}={{{{1}}}} ' \ - .format(x, os.environ[x]) for x in [$ex_vars]])") + .format(x, os.environ[x]) for x in [$ex_vars]])"` exec $SUDO PATH=$PATH LD_LIBRARY_PATH=$LD_LIBRARY_PATH \ MANPATH=$MANPATH XDG_DATA_DIRS=$XDG_DATA_DIRS \ PKG_CONFIG_PATH=$PKG_CONFIG_PATH \ From 0a7f1a4d75d13cee1d5ffd72ff5008435269de45 Mon Sep 17 00:00:00 2001 From: Christian McHugh Date: Tue, 30 Jan 2018 17:15:41 +0000 Subject: [PATCH 007/223] English better --- salt/modules/rh_service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/rh_service.py b/salt/modules/rh_service.py index 3c0c1c3441..e4f6e57bc1 100644 --- a/salt/modules/rh_service.py +++ b/salt/modules/rh_service.py @@ -103,7 +103,7 @@ def __virtual__(): else: return ( False, - 'Amazon Linux >= version 2 use systemd, will not ' + 'Amazon Linux >= version 2 uses systemd. Will not ' 'load rh_service.py as virtual \'service\'' ) return __virtualname__ From a38d4d44faa7eb406c78ca24a717eed0d22cf427 Mon Sep 17 00:00:00 2001 From: rallytime Date: Tue, 30 Jan 2018 15:35:55 -0500 Subject: [PATCH 008/223] [2016.11] Bump latest and previous versions --- doc/conf.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index adc0ff0234..2dda8a2abf 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -240,8 +240,8 @@ on_saltstack = 'SALT_ON_SALTSTACK' in os.environ project = 'Salt' version = salt.version.__version__ -latest_release = '2017.7.2' # latest release -previous_release = '2016.11.8' # latest release from previous branch +latest_release = '2017.7.3' # latest release +previous_release = '2016.11.9' # latest release from previous branch previous_release_dir = '2016.11' # path on web server for previous branch next_release = '' # next release next_release_dir = '' # path on web server for next release branch From d5faf6126b8a4701fd82f0b8c3b022ff9fafce8d Mon Sep 17 00:00:00 2001 From: rallytime Date: Tue, 30 Jan 2018 15:35:55 -0500 Subject: [PATCH 009/223] [2017.7] Bump latest and previous versions --- doc/conf.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index 2c1dfd75e6..bf00f06cc1 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -248,8 +248,8 @@ on_saltstack = 'SALT_ON_SALTSTACK' in os.environ project = 'Salt' version = salt.version.__version__ -latest_release = '2017.7.2' # latest release -previous_release = '2016.11.8' # latest release from previous branch +latest_release = '2017.7.3' # latest release +previous_release = '2016.11.9' # latest release from previous branch previous_release_dir = '2016.11' # path on web server for previous branch next_release = '' # next release next_release_dir = '' # path on web server for next release branch From 16136d4b02e4271c92a87f01d9f5ca4dbf99137c Mon Sep 17 00:00:00 2001 From: Dmitry Kuzmenko Date: Tue, 23 Jan 2018 16:11:30 +0300 Subject: [PATCH 010/223] Fixed RAET tests --- salt/daemons/test/minion.flo | 9 +++++++-- salt/daemons/test/test_master.py | 6 ++++++ salt/daemons/test/test_presence.py | 4 ++++ salt/daemons/test/test_stats.py | 4 ++++ 4 files changed, 21 insertions(+), 2 deletions(-) diff --git a/salt/daemons/test/minion.flo b/salt/daemons/test/minion.flo index b88d381f9e..02a4e465dd 100644 --- a/salt/daemons/test/minion.flo +++ b/salt/daemons/test/minion.flo @@ -11,7 +11,12 @@ framer minionudpstack be active first start exit do salt raet road stack closer per inode ".salt.road.manor." -framer bootstrap be active first join +framer bootstrap be active first setup + frame setup + enter + do salt raet road usher minion setup per inode ".salt.road.manor." + go join + frame join print Joining... enter @@ -44,7 +49,7 @@ framer bootstrap be active first join frame message print Messaging... enter - do raet road stack messenger to contents "Minion 1 Hello" code 15 \ + do raet road stack messenger with contents "Minion 1 Hello" code 15 \ per inode ".salt.road.manor." go next diff --git a/salt/daemons/test/test_master.py b/salt/daemons/test/test_master.py index ab1e5996bb..d99b9e3000 100644 --- a/salt/daemons/test/test_master.py +++ b/salt/daemons/test/test_master.py @@ -22,6 +22,10 @@ def test(): if not os.path.exists(pkiDirpath): os.makedirs(pkiDirpath) + keyDirpath = os.path.join('/tmp', 'raet', 'testo', 'key') + if not os.path.exists(keyDirpath): + os.makedirs(keyDirpath) + acceptedDirpath = os.path.join(pkiDirpath, 'accepted') if not os.path.exists(acceptedDirpath): os.makedirs(acceptedDirpath) @@ -64,10 +68,12 @@ def test(): client_acl=dict(), publisher_acl=dict(), pki_dir=pkiDirpath, + key_dir=keyDirpath, sock_dir=sockDirpath, cachedir=cacheDirpath, open_mode=True, auto_accept=True, + client_acl_verify=True, ) master = salt.daemons.flo.IofloMaster(opts=opts) diff --git a/salt/daemons/test/test_presence.py b/salt/daemons/test/test_presence.py index 975546726b..e98e8eb698 100644 --- a/salt/daemons/test/test_presence.py +++ b/salt/daemons/test/test_presence.py @@ -5,6 +5,7 @@ Raet Ioflo Behavior Unittests from __future__ import absolute_import, print_function, unicode_literals import sys from salt.ext.six.moves import map +import importlib # pylint: disable=blacklisted-import if sys.version_info < (2, 7): import unittest2 as unittest @@ -40,6 +41,9 @@ class PresenterTestCase(testing.FrameIofloTestCase): ''' Call super if override so House Framer and Frame are setup correctly ''' + behaviors = ['salt.daemons.flo', 'salt.daemons.test.plan'] + for behavior in behaviors: + mod = importlib.import_module(behavior) super(PresenterTestCase, self).setUp() def tearDown(self): diff --git a/salt/daemons/test/test_stats.py b/salt/daemons/test/test_stats.py index 9289fd0955..f0cfd2e4a3 100644 --- a/salt/daemons/test/test_stats.py +++ b/salt/daemons/test/test_stats.py @@ -5,6 +5,7 @@ Raet Ioflo Behavior Unittests from __future__ import absolute_import, print_function, unicode_literals import sys from salt.ext.six.moves import map +import importlib # pylint: disable=blacklisted-import if sys.version_info < (2, 7): import unittest2 as unittest @@ -43,6 +44,9 @@ class StatsEventerTestCase(testing.FrameIofloTestCase): ''' Call super if override so House Framer and Frame are setup correctly ''' + behaviors = ['salt.daemons.flo', 'salt.daemons.test.plan'] + for behavior in behaviors: + mod = importlib.import_module(behavior) super(StatsEventerTestCase, self).setUp() def tearDown(self): From 0fa6e89024f2fb4f002dc3447237f220fad80c2c Mon Sep 17 00:00:00 2001 From: Dmitry Kuzmenko Date: Tue, 23 Jan 2018 21:03:48 +0300 Subject: [PATCH 011/223] Python3 RAET fixes. --- salt/daemons/flo/__init__.py | 1 + salt/daemons/test/test_multimaster.py | 4 +- salt/daemons/test/test_raetkey.py | 253 +++++++++++++----------- salt/daemons/test/test_saltkeep.py | 275 +++++++++++++------------- salt/daemons/test/test_stats.py | 4 +- salt/key.py | 8 +- salt/utils/stringutils.py | 3 +- 7 files changed, 285 insertions(+), 263 deletions(-) diff --git a/salt/daemons/flo/__init__.py b/salt/daemons/flo/__init__.py index 07d7e512d4..80c66f6860 100644 --- a/salt/daemons/flo/__init__.py +++ b/salt/daemons/flo/__init__.py @@ -121,6 +121,7 @@ class IofloMinion(object): ''' warn_deprecated() self.opts = opts + self.restart = False def tune_in(self, behaviors=None): ''' diff --git a/salt/daemons/test/test_multimaster.py b/salt/daemons/test/test_multimaster.py index 45c1eb6ac2..aa8b3f7405 100644 --- a/salt/daemons/test/test_multimaster.py +++ b/salt/daemons/test/test_multimaster.py @@ -348,8 +348,8 @@ if __name__ == '__main__' and __package__ is None: #console.reinit(verbosity=console.Wordage.concise) - #runAll() # run all unittests + runAll() # run all unittests - runSome() # only run some + #runSome() # only run some #runOne('testParseHostname') diff --git a/salt/daemons/test/test_raetkey.py b/salt/daemons/test/test_raetkey.py index 173de3d614..b4a0218881 100644 --- a/salt/daemons/test/test_raetkey.py +++ b/salt/daemons/test/test_raetkey.py @@ -7,6 +7,7 @@ from __future__ import absolute_import, print_function, unicode_literals # pylint: skip-file # pylint: disable=C0103 import sys +import salt.utils.stringutils from salt.ext.six.moves import map if sys.version_info < (2, 7): import unittest2 as unittest @@ -30,12 +31,15 @@ from raet.road import estating, keeping, stacking from salt.key import RaetKey + def setUpModule(): console.reinit(verbosity=console.Wordage.concise) + def tearDownModule(): pass + class BasicTestCase(unittest.TestCase): """""" @@ -47,7 +51,7 @@ class BasicTestCase(unittest.TestCase): pkiDirpath = os.path.join(self.saltDirpath, 'pki') if not os.path.exists(pkiDirpath): - os.makedirs(pkiDirpath) + os.makedirs(pkiDirpath) acceptedDirpath = os.path.join(pkiDirpath, 'accepted') if not os.path.exists(acceptedDirpath): @@ -81,7 +85,7 @@ class BasicTestCase(unittest.TestCase): ) self.mainKeeper = RaetKey(opts=self.opts) - self.baseDirpath = tempfile.mkdtemp(prefix="salt", suffix="base", dir='/tmp') + self.baseDirpath = tempfile.mkdtemp(prefix="salt", suffix="base", dir='/tmp') def tearDown(self): if os.path.exists(self.saltDirpath): @@ -119,9 +123,9 @@ class BasicTestCase(unittest.TestCase): self.opts['auto_accept'] = True self.assertTrue(self.opts['auto_accept']) self.assertDictEqual(self.mainKeeper.all_keys(), {'accepted': [], - 'local': [], - 'rejected': [], - 'pending': []}) + 'local': [], + 'rejected': [], + 'pending': []}) localkeys = self.mainKeeper.read_local() self.assertDictEqual(localkeys, {}) @@ -129,8 +133,9 @@ class BasicTestCase(unittest.TestCase): main = self.createRoadData(name='main', base=self.baseDirpath) self.mainKeeper.write_local(main['prihex'], main['sighex']) localkeys = self.mainKeeper.read_local() - self.assertDictEqual(localkeys, {'priv': main['prihex'], - 'sign': main['sighex']}) + self.assertDictEqual(localkeys, + {'priv': salt.utils.stringutils.to_str(main['prihex']), + 'sign': salt.utils.stringutils.to_str(main['sighex'])}) allkeys = self.mainKeeper.all_keys() self.assertDictEqual(allkeys, {'accepted': [], 'local': [self.localFilepath], @@ -147,39 +152,38 @@ class BasicTestCase(unittest.TestCase): allkeys = self.mainKeeper.all_keys() self.assertDictEqual(allkeys, {'accepted': ['other1', 'other2'], - 'local': [self.localFilepath], - 'pending': [], - 'rejected': []} ) + 'local': [self.localFilepath], + 'pending': [], + 'rejected': []}) remotekeys = self.mainKeeper.read_remote(other1['name']) - self.assertDictEqual(remotekeys, { 'minion_id': 'other1', - 'pub': other1['pubhex'], - 'verify': other1['verhex']} ) + self.assertDictEqual(remotekeys, {'minion_id': 'other1', + 'pub': salt.utils.stringutils.to_str(other1['pubhex']), + 'verify': salt.utils.stringutils.to_str(other1['verhex'])}) remotekeys = self.mainKeeper.read_remote(other2['name']) - self.assertDictEqual(remotekeys, { 'minion_id': 'other2', - 'pub': other2['pubhex'], - 'verify': other2['verhex']} ) + self.assertDictEqual(remotekeys, {'minion_id': 'other2', + 'pub': salt.utils.stringutils.to_str(other2['pubhex']), + 'verify': salt.utils.stringutils.to_str(other2['verhex'])}) listkeys = self.mainKeeper.list_keys() self.assertDictEqual(listkeys, {'accepted': ['other1', 'other2'], 'rejected': [], 'pending': []}) - allremotekeys = self.mainKeeper.read_all_remote() - self.assertDictEqual(allremotekeys, {'other1': - {'verify': other1['verhex'], - 'minion_id': 'other1', - 'acceptance': 'accepted', - 'pub': other1['pubhex'],}, - 'other2': - {'verify': other2['verhex'], - 'minion_id': 'other2', - 'acceptance': 'accepted', - 'pub': other2['pubhex'],} - }) - + self.assertDictEqual(allremotekeys, + {'other1': + {'verify': salt.utils.stringutils.to_str(other1['verhex']), + 'minion_id': 'other1', + 'acceptance': 'accepted', + 'pub': salt.utils.stringutils.to_str(other1['pubhex']), }, + 'other2': + {'verify': salt.utils.stringutils.to_str(other2['verhex']), + 'minion_id': 'other2', + 'acceptance': 'accepted', + 'pub': salt.utils.stringutils.to_str(other2['pubhex']), } + }) def testManualAccept(self): ''' @@ -189,9 +193,9 @@ class BasicTestCase(unittest.TestCase): self.opts['auto_accept'] = False self.assertFalse(self.opts['auto_accept']) self.assertDictEqual(self.mainKeeper.all_keys(), {'accepted': [], - 'local': [], - 'rejected': [], - 'pending': []}) + 'local': [], + 'rejected': [], + 'pending': []}) localkeys = self.mainKeeper.read_local() self.assertDictEqual(localkeys, {}) @@ -199,8 +203,9 @@ class BasicTestCase(unittest.TestCase): main = self.createRoadData(name='main', base=self.baseDirpath) self.mainKeeper.write_local(main['prihex'], main['sighex']) localkeys = self.mainKeeper.read_local() - self.assertDictEqual(localkeys, {'priv': main['prihex'], - 'sign': main['sighex']}) + self.assertDictEqual(localkeys, + {'priv': salt.utils.stringutils.to_str(main['prihex']), + 'sign': salt.utils.stringutils.to_str(main['sighex'])}) allkeys = self.mainKeeper.all_keys() self.assertDictEqual(allkeys, {'accepted': [], 'local': [self.localFilepath], @@ -217,9 +222,9 @@ class BasicTestCase(unittest.TestCase): allkeys = self.mainKeeper.all_keys() self.assertDictEqual(allkeys, {'accepted': [], - 'local': [self.localFilepath], - 'pending': ['other1', 'other2'], - 'rejected': []} ) + 'local': [self.localFilepath], + 'pending': ['other1', 'other2'], + 'rejected': []}) remotekeys = self.mainKeeper.read_remote(other1['name']) self.assertDictEqual(remotekeys, {}) @@ -232,56 +237,60 @@ class BasicTestCase(unittest.TestCase): 'rejected': [], 'pending': ['other1', 'other2']}) - allremotekeys = self.mainKeeper.read_all_remote() - self.assertDictEqual(allremotekeys, {'other1': - {'verify': other1['verhex'], - 'minion_id': 'other1', - 'acceptance': 'pending', - 'pub': other1['pubhex'],}, - 'other2': - {'verify': other2['verhex'], - 'minion_id': 'other2', - 'acceptance': 'pending', - 'pub': other2['pubhex'],} - }) + self.assertDictEqual(allremotekeys, + {'other1': + {'verify': salt.utils.stringutils.to_str(other1['verhex']), + 'minion_id': 'other1', + 'acceptance': 'pending', + 'pub': salt.utils.stringutils.to_str(other1['pubhex']), + }, + 'other2': + {'verify': salt.utils.stringutils.to_str(other2['verhex']), + 'minion_id': 'other2', + 'acceptance': 'pending', + 'pub': salt.utils.stringutils.to_str(other2['pubhex']), + } + }) self.mainKeeper.accept_all() allkeys = self.mainKeeper.all_keys() self.assertDictEqual(allkeys, {'accepted': ['other1', 'other2'], - 'local': [self.localFilepath], - 'pending': [], - 'rejected': []} ) + 'local': [self.localFilepath], + 'pending': [], + 'rejected': []}) remotekeys = self.mainKeeper.read_remote(other1['name']) - self.assertDictEqual(remotekeys, { 'minion_id': 'other1', - 'pub': other1['pubhex'], - 'verify': other1['verhex']} ) + self.assertDictEqual(remotekeys, {'minion_id': 'other1', + 'pub': salt.utils.stringutils.to_str(other1['pubhex']), + 'verify': salt.utils.stringutils.to_str(other1['verhex'])}) remotekeys = self.mainKeeper.read_remote(other2['name']) - self.assertDictEqual(remotekeys, { 'minion_id': 'other2', - 'pub': other2['pubhex'], - 'verify': other2['verhex']} ) + self.assertDictEqual(remotekeys, {'minion_id': 'other2', + 'pub': salt.utils.stringutils.to_str(other2['pubhex']), + 'verify': salt.utils.stringutils.to_str(other2['verhex'])}) listkeys = self.mainKeeper.list_keys() self.assertDictEqual(listkeys, {'accepted': ['other1', 'other2'], 'rejected': [], 'pending': []}) - allremotekeys = self.mainKeeper.read_all_remote() - self.assertDictEqual(allremotekeys, {'other1': - {'verify': other1['verhex'], - 'minion_id': 'other1', - 'acceptance': 'accepted', - 'pub': other1['pubhex'],}, - 'other2': - {'verify': other2['verhex'], - 'minion_id': 'other2', - 'acceptance': 'accepted', - 'pub': other2['pubhex'],} - }) + self.assertDictEqual(allremotekeys, + {'other1': + {'verify': salt.utils.stringutils.to_str(other1['verhex']), + 'minion_id': 'other1', + 'acceptance': 'accepted', + 'pub': salt.utils.stringutils.to_str(other1['pubhex']), + }, + 'other2': + {'verify': salt.utils.stringutils.to_str(other2['verhex']), + 'minion_id': 'other2', + 'acceptance': 'accepted', + 'pub': salt.utils.stringutils.to_str(other2['pubhex']), + } + }) def testDelete(self): ''' @@ -291,9 +300,9 @@ class BasicTestCase(unittest.TestCase): self.opts['auto_accept'] = True self.assertTrue(self.opts['auto_accept']) self.assertDictEqual(self.mainKeeper.all_keys(), {'accepted': [], - 'local': [], - 'rejected': [], - 'pending': []}) + 'local': [], + 'rejected': [], + 'pending': []}) localkeys = self.mainKeeper.read_local() self.assertDictEqual(localkeys, {}) @@ -301,8 +310,9 @@ class BasicTestCase(unittest.TestCase): main = self.createRoadData(name='main', base=self.baseDirpath) self.mainKeeper.write_local(main['prihex'], main['sighex']) localkeys = self.mainKeeper.read_local() - self.assertDictEqual(localkeys, {'priv': main['prihex'], - 'sign': main['sighex']}) + self.assertDictEqual(localkeys, + {'priv': salt.utils.stringutils.to_str(main['prihex']), + 'sign': salt.utils.stringutils.to_str(main['sighex'])}) allkeys = self.mainKeeper.all_keys() self.assertDictEqual(allkeys, {'accepted': [], 'local': [self.localFilepath], @@ -319,70 +329,73 @@ class BasicTestCase(unittest.TestCase): allkeys = self.mainKeeper.all_keys() self.assertDictEqual(allkeys, {'accepted': ['other1', 'other2'], - 'local': [self.localFilepath], - 'pending': [], - 'rejected': []} ) + 'local': [self.localFilepath], + 'pending': [], + 'rejected': []}) remotekeys = self.mainKeeper.read_remote(other1['name']) - self.assertDictEqual(remotekeys, { 'minion_id': 'other1', - 'pub': other1['pubhex'], - 'verify': other1['verhex']} ) + self.assertDictEqual(remotekeys, {'minion_id': 'other1', + 'pub': salt.utils.stringutils.to_str(other1['pubhex']), + 'verify': salt.utils.stringutils.to_str(other1['verhex']), + }) remotekeys = self.mainKeeper.read_remote(other2['name']) - self.assertDictEqual(remotekeys, { 'minion_id': 'other2', - 'pub': other2['pubhex'], - 'verify': other2['verhex']} ) + self.assertDictEqual(remotekeys, {'minion_id': 'other2', + 'pub': salt.utils.stringutils.to_str(other2['pubhex']), + 'verify': salt.utils.stringutils.to_str(other2['verhex']), + }) listkeys = self.mainKeeper.list_keys() self.assertDictEqual(listkeys, {'accepted': ['other1', 'other2'], 'rejected': [], 'pending': []}) - allremotekeys = self.mainKeeper.read_all_remote() - self.assertDictEqual(allremotekeys, {'other1': - {'verify': other1['verhex'], - 'minion_id': 'other1', - 'acceptance': 'accepted', - 'pub': other1['pubhex']}, - 'other2': - {'verify': other2['verhex'], - 'minion_id': 'other2', - 'acceptance': 'accepted', - 'pub': other2['pubhex'],} - }) + self.assertDictEqual(allremotekeys, + {'other1': + {'verify': salt.utils.stringutils.to_str(other1['verhex']), + 'minion_id': 'other1', + 'acceptance': 'accepted', + 'pub': salt.utils.stringutils.to_str(other1['pubhex']) + }, + 'other2': + {'verify': salt.utils.stringutils.to_str(other2['verhex']), + 'minion_id': 'other2', + 'acceptance': 'accepted', + 'pub': salt.utils.stringutils.to_str(other2['pubhex']), + } + }) self.mainKeeper.delete_key(match=other1['name']) allkeys = self.mainKeeper.all_keys() self.assertDictEqual(allkeys, {'accepted': ['other2'], - 'local': [self.localFilepath], - 'pending': [], - 'rejected': []} ) + 'local': [self.localFilepath], + 'pending': [], + 'rejected': []}) remotekeys = self.mainKeeper.read_remote(other1['name']) - self.assertDictEqual(remotekeys, {} ) + self.assertDictEqual(remotekeys, {}) remotekeys = self.mainKeeper.read_remote(other2['name']) - self.assertDictEqual(remotekeys, { 'minion_id': 'other2', - 'pub': other2['pubhex'], - 'verify': other2['verhex']} ) + self.assertDictEqual(remotekeys, {'minion_id': 'other2', + 'pub': salt.utils.stringutils.to_str(other2['pubhex']), + 'verify': salt.utils.stringutils.to_str(other2['verhex'])}) listkeys = self.mainKeeper.list_keys() - self.assertDictEqual(listkeys, {'accepted': [ 'other2'], + self.assertDictEqual(listkeys, {'accepted': ['other2'], 'rejected': [], 'pending': []}) - allremotekeys = self.mainKeeper.read_all_remote() - self.assertDictEqual(allremotekeys, { - 'other2': - {'verify': other2['verhex'], - 'minion_id': 'other2', - 'acceptance': 'accepted', - 'pub': other2['pubhex'],} - }) - + self.assertDictEqual(allremotekeys, + {'other2': + {'verify': salt.utils.stringutils.to_str(other2['verhex']), + 'minion_id': 'other2', + 'acceptance': 'accepted', + 'pub': salt.utils.stringutils.to_str(other2['pubhex']), + } + }) def runOne(test): @@ -393,11 +406,12 @@ def runOne(test): suite = unittest.TestSuite([test]) unittest.TextTestRunner(verbosity=2).run(suite) + def runSome(): ''' Unittest runner ''' - tests = [] + tests = [] names = ['testAutoAccept', 'testManualAccept', 'testDelete'] @@ -407,6 +421,7 @@ def runSome(): suite = unittest.TestSuite(tests) unittest.TextTestRunner(verbosity=2).run(suite) + def runAll(): ''' Unittest runner @@ -416,12 +431,12 @@ def runAll(): unittest.TextTestRunner(verbosity=2).run(suite) + if __name__ == '__main__' and __package__ is None: + # console.reinit(verbosity=console.Wordage.concise) - #console.reinit(verbosity=console.Wordage.concise) + runAll() # run all unittests - runAll() #run all unittests + # runSome() #only run some - #runSome()#only run some - - #runOne('testDelete') + # runOne('testDelete') diff --git a/salt/daemons/test/test_saltkeep.py b/salt/daemons/test/test_saltkeep.py index 8176af1ce6..e548eed93e 100644 --- a/salt/daemons/test/test_saltkeep.py +++ b/salt/daemons/test/test_saltkeep.py @@ -13,10 +13,11 @@ else: # pylint: enable=blacklisted-import import os -import stat -import time -import tempfile import shutil +import socket +import stat +import tempfile +import time from ioflo.aid.odicting import odict from ioflo.aid.timing import StoreTimer @@ -29,6 +30,7 @@ from raet.road import estating, stacking from salt.daemons import salting import salt.utils.kinds as kinds +import salt.utils.stringutils def setUpModule(): @@ -232,20 +234,21 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class os.path.join('main', 'raet', 'main_master'))) self.assertTrue(main.ha, ("0.0.0.0", raeting.RAET_PORT)) self.assertIs(main.keep.auto, raeting.AutoMode.never.value) - self.assertDictEqual(main.keep.loadLocalData(), {'name': mainData['name'], - 'uid': 1, - 'ha': ['127.0.0.1', 7530], - 'iha': None, - 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', - 'dyned': None, - 'sid': 0, - 'puid': 1, - 'aha': ['0.0.0.0', 7530], - 'role': mainData['role'], - 'sighex': mainData['sighex'], - 'prihex': mainData['prihex'], - }) + self.assertDictEqual(main.keep.loadLocalData(), + {'name': mainData['name'], + 'uid': 1, + 'ha': ['127.0.0.1', 7530], + 'iha': None, + 'natted': None, + 'fqdn': socket.getfqdn('127.0.0.1'), + 'dyned': None, + 'sid': 0, + 'puid': 1, + 'aha': ['0.0.0.0', 7530], + 'role': mainData['role'], + 'sighex': salt.utils.stringutils.to_str(mainData['sighex']), + 'prihex': salt.utils.stringutils.to_str(mainData['prihex']), + }) data1 = self.createRoadData(role='remote1', kind=kinds.APPL_KIND_NAMES[kinds.applKinds.minion], @@ -282,7 +285,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7532], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'main': False, 'kind': data1['kind'], @@ -290,8 +293,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'joined': None, 'role': data1['role'], 'acceptance': 0, - 'verhex': data1['verhex'], - 'pubhex': data1['pubhex'], + 'verhex': salt.utils.stringutils.to_str(data1['verhex']), + 'pubhex': salt.utils.stringutils.to_str(data1['pubhex']), }, 'remote2_minion': {'name': data2['name'], @@ -300,7 +303,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7533], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'main': False, 'kind': data2['kind'], @@ -308,8 +311,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'joined': None, 'role': data2['role'], 'acceptance': 0, - 'verhex': data2['verhex'], - 'pubhex': data2['pubhex'], + 'verhex': salt.utils.stringutils.to_str(data2['verhex']), + 'pubhex': salt.utils.stringutils.to_str(data2['pubhex']), } }) @@ -362,14 +365,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7531], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'sid': 0, 'puid': 1, 'aha': ['0.0.0.0', 7531], 'role': otherData['role'], - 'sighex': otherData['sighex'], - 'prihex': otherData['prihex'], + 'sighex': salt.utils.stringutils.to_str(otherData['sighex']), + 'prihex': salt.utils.stringutils.to_str(otherData['prihex']), }) data3 = self.createRoadData(role='remote3', @@ -405,7 +408,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7534], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'main': False, 'kind': data3['kind'], @@ -413,8 +416,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'joined': None, 'role': data3['role'], 'acceptance': 0, - 'verhex': data3['verhex'], - 'pubhex': data3['pubhex'], + 'verhex': salt.utils.stringutils.to_str(data3['verhex']), + 'pubhex': salt.utils.stringutils.to_str(data3['pubhex']), }, 'remote4_minion': { @@ -424,7 +427,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7535], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'main': False, 'kind': data4['kind'], @@ -432,8 +435,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'joined': None, 'role': data4['role'], 'acceptance': 0, - 'verhex': data4['verhex'], - 'pubhex': data4['pubhex'], + 'verhex': salt.utils.stringutils.to_str(data4['verhex']), + 'pubhex': salt.utils.stringutils.to_str(data4['pubhex']), } }) @@ -477,14 +480,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7530], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'sid': 0, 'puid': 1, 'aha': ['0.0.0.0', 7530], 'role': mainData['role'], - 'sighex': mainData['sighex'], - 'prihex': mainData['prihex'], + 'sighex': salt.utils.stringutils.to_str(mainData['sighex']), + 'prihex': salt.utils.stringutils.to_str(mainData['prihex']), }) data1 = self.createRoadData(role='remote1', @@ -520,7 +523,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7532], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'main': False, 'kind': data1['kind'], @@ -528,8 +531,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'joined': None, 'role': data1['role'], 'acceptance': 1, - 'verhex': data1['verhex'], - 'pubhex': data1['pubhex'], + 'verhex': salt.utils.stringutils.to_str(data1['verhex']), + 'pubhex': salt.utils.stringutils.to_str(data1['pubhex']), }, 'remote2_minion': {'name': data2['name'], @@ -538,7 +541,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7533], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'main': False, 'kind': data2['kind'], @@ -546,8 +549,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'joined': None, 'role': data2['role'], 'acceptance': 1, - 'verhex': data2['verhex'], - 'pubhex': data2['pubhex'], + 'verhex': salt.utils.stringutils.to_str(data2['verhex']), + 'pubhex': salt.utils.stringutils.to_str(data2['pubhex']), } }) @@ -600,14 +603,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7531], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'sid': 0, 'puid': 1, 'aha': ['0.0.0.0', 7531], 'role': otherData['role'], - 'sighex': otherData['sighex'], - 'prihex': otherData['prihex'], + 'sighex': salt.utils.stringutils.to_str(otherData['sighex']), + 'prihex': salt.utils.stringutils.to_str(otherData['prihex']), }) data3 = self.createRoadData(role='remote3', @@ -643,7 +646,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7534], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'main': False, 'kind': data3['kind'], @@ -651,8 +654,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'joined': None, 'role': data3['role'], 'acceptance': 1, - 'verhex': data3['verhex'], - 'pubhex': data3['pubhex'], + 'verhex': salt.utils.stringutils.to_str(data3['verhex']), + 'pubhex': salt.utils.stringutils.to_str(data3['pubhex']), }, 'remote4_minion': { @@ -662,7 +665,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7535], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'main': False, 'kind': data4['kind'], @@ -670,8 +673,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'joined': None, 'role': data4['role'], 'acceptance': 1, - 'verhex': data4['verhex'], - 'pubhex': data4['pubhex'], + 'verhex': salt.utils.stringutils.to_str(data4['verhex']), + 'pubhex': salt.utils.stringutils.to_str(data4['pubhex']), } }) @@ -715,13 +718,13 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7530], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'sid': 0, 'puid': 1, 'aha': ['0.0.0.0', 7530], - 'sighex': mainData['sighex'], - 'prihex': mainData['prihex'], + 'sighex': salt.utils.stringutils.to_str(mainData['sighex']), + 'prihex': salt.utils.stringutils.to_str(mainData['prihex']), 'role': mainData['role'], }) @@ -759,7 +762,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7532], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'main': False, 'kind': data1['kind'], @@ -767,8 +770,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'joined': None, 'role': data1['role'], 'acceptance': 1, - 'verhex': data1['verhex'], - 'pubhex': data1['pubhex'], + 'verhex': salt.utils.stringutils.to_str(data1['verhex']), + 'pubhex': salt.utils.stringutils.to_str(data1['pubhex']), }, 'remote2_minion': { @@ -778,7 +781,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7533], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'main': False, 'kind': data2['kind'], @@ -786,8 +789,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'joined': None, 'role': data2['role'], 'acceptance': 1, - 'verhex': data2['verhex'], - 'pubhex': data2['pubhex'], + 'verhex': salt.utils.stringutils.to_str(data2['verhex']), + 'pubhex': salt.utils.stringutils.to_str(data2['pubhex']), } }) @@ -840,14 +843,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7531], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'sid': 0, 'puid': 1, 'aha': ['0.0.0.0', 7531], 'role': otherData['role'], - 'sighex': otherData['sighex'], - 'prihex': otherData['prihex'], + 'sighex': salt.utils.stringutils.to_str(otherData['sighex']), + 'prihex': salt.utils.stringutils.to_str(otherData['prihex']), }) data3 = self.createRoadData(role='remote3', @@ -883,7 +886,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7534], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'main': False, 'kind': data3['kind'], @@ -891,8 +894,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'joined': None, 'role': data3['role'], 'acceptance': 1, - 'verhex': data3['verhex'], - 'pubhex': data3['pubhex'], + 'verhex': salt.utils.stringutils.to_str(data3['verhex']), + 'pubhex': salt.utils.stringutils.to_str(data3['pubhex']), }, 'remote4_minion': { @@ -902,7 +905,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7535], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'main': False, 'kind': data4['kind'], @@ -910,8 +913,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'joined': None, 'role': data4['role'], 'acceptance': 1, - 'verhex': data4['verhex'], - 'pubhex': data4['pubhex'], + 'verhex': salt.utils.stringutils.to_str(data4['verhex']), + 'pubhex': salt.utils.stringutils.to_str(data4['pubhex']), } }) @@ -955,14 +958,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7530], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'sid': 0, 'puid': 1, 'aha': ['0.0.0.0', 7530], 'role': mainData['role'], - 'sighex': mainData['sighex'], - 'prihex': mainData['prihex'], + 'sighex': salt.utils.stringutils.to_str(mainData['sighex']), + 'prihex': salt.utils.stringutils.to_str(mainData['prihex']), }) # add multiple remotes all with same role @@ -1006,7 +1009,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7532], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'main': False, 'kind': data1['kind'], @@ -1014,8 +1017,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'joined': None, 'role': data1['role'], 'acceptance': 0, - 'verhex': data1['verhex'], - 'pubhex': data1['pubhex'], + 'verhex': salt.utils.stringutils.to_str(data1['verhex']), + 'pubhex': salt.utils.stringutils.to_str(data1['pubhex']), }, 'primary_caller': { @@ -1025,7 +1028,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7533], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'main': False, 'kind': data2['kind'], @@ -1033,8 +1036,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'joined': None, 'role': data1['role'], 'acceptance': 0, - 'verhex': data1['verhex'], - 'pubhex': data1['pubhex'], + 'verhex': salt.utils.stringutils.to_str(data1['verhex']), + 'pubhex': salt.utils.stringutils.to_str(data1['pubhex']), } }) @@ -1104,14 +1107,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7530], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'sid': 0, 'puid': 1, 'aha': ['0.0.0.0', 7530], 'role': mainData['role'], - 'sighex': mainData['sighex'], - 'prihex': mainData['prihex'], + 'sighex': salt.utils.stringutils.to_str(mainData['sighex']), + 'prihex': salt.utils.stringutils.to_str(mainData['prihex']), }) # add multiple remotes all with same role @@ -1149,7 +1152,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7532], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'main': False, 'kind': data1['kind'], @@ -1157,8 +1160,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'joined': None, 'role': data1['role'], 'acceptance': 1, - 'verhex': data2['verhex'], - 'pubhex': data2['pubhex'], + 'verhex': salt.utils.stringutils.to_str(data2['verhex']), + 'pubhex': salt.utils.stringutils.to_str(data2['pubhex']), }, 'primary_syndic': { @@ -1168,7 +1171,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7533], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'main': False, 'kind': data2['kind'], @@ -1176,8 +1179,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'joined': None, 'role': data2['role'], 'acceptance': 1, - 'verhex': data2['verhex'], - 'pubhex': data2['pubhex'], + 'verhex': salt.utils.stringutils.to_str(data2['verhex']), + 'pubhex': salt.utils.stringutils.to_str(data2['pubhex']), } }) @@ -1248,14 +1251,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7530], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'sid': 0, 'puid': 1, 'aha': ['0.0.0.0', 7530], 'role': mainData['role'], - 'sighex': mainData['sighex'], - 'prihex': mainData['prihex'], + 'sighex': salt.utils.stringutils.to_str(mainData['sighex']), + 'prihex': salt.utils.stringutils.to_str(mainData['prihex']), }) # add multiple remotes all with same role but different keys @@ -1300,7 +1303,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7532], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'main': False, 'kind': data1['kind'], @@ -1308,8 +1311,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'joined': None, 'role': data1['role'], 'acceptance': 1, - 'verhex': data1['verhex'], - 'pubhex': data1['pubhex'], + 'verhex': salt.utils.stringutils.to_str(data1['verhex']), + 'pubhex': salt.utils.stringutils.to_str(data1['pubhex']), }, 'primary_syndic': { @@ -1319,7 +1322,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7533], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'main': False, 'kind': data2['kind'], @@ -1327,8 +1330,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'joined': None, 'role': data2['role'], 'acceptance': 1, - 'verhex': data1['verhex'], - 'pubhex': data1['pubhex'], + 'verhex': salt.utils.stringutils.to_str(data1['verhex']), + 'pubhex': salt.utils.stringutils.to_str(data1['pubhex']), } }) @@ -1399,14 +1402,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7530], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'sid': 0, 'puid': 1, 'aha': ['0.0.0.0', 7530], 'role': mainData['role'], - 'sighex': mainData['sighex'], - 'prihex': mainData['prihex'], + 'sighex': salt.utils.stringutils.to_str(mainData['sighex']), + 'prihex': salt.utils.stringutils.to_str(mainData['prihex']), }) opts = self.createOpts(role='other', @@ -1441,14 +1444,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7531], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'sid': 0, 'puid': 1, 'aha': ['0.0.0.0', 7531], 'role': otherData['role'], - 'sighex': otherData['sighex'], - 'prihex': otherData['prihex'], + 'sighex': salt.utils.stringutils.to_str(otherData['sighex']), + 'prihex': salt.utils.stringutils.to_str(otherData['prihex']), }) self.join(other, main) @@ -1524,14 +1527,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7530], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'sid': 0, 'puid': 1, 'aha': ['0.0.0.0', 7530], 'role': mainData['role'], - 'sighex': mainData['sighex'], - 'prihex': mainData['prihex'], + 'sighex': salt.utils.stringutils.to_str(mainData['sighex']), + 'prihex': salt.utils.stringutils.to_str(mainData['prihex']), }) opts = self.createOpts(role='other', @@ -1566,14 +1569,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7531], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'sid': 0, 'puid': 1, 'aha': ['0.0.0.0', 7531], 'role': otherData['role'], - 'sighex': otherData['sighex'], - 'prihex': otherData['prihex'], + 'sighex': salt.utils.stringutils.to_str(otherData['sighex']), + 'prihex': salt.utils.stringutils.to_str(otherData['prihex']), }) self.join(other, main) @@ -1645,14 +1648,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7530], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'sid': 0, 'puid': 1, 'aha': ['0.0.0.0', 7530], 'role': mainData['role'], - 'sighex': mainData['sighex'], - 'prihex': mainData['prihex'], + 'sighex': salt.utils.stringutils.to_str(mainData['sighex']), + 'prihex': salt.utils.stringutils.to_str(mainData['prihex']), }) opts = self.createOpts(role='other', @@ -1687,13 +1690,13 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7531], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'sid': 0, 'puid': 1, 'aha': ['0.0.0.0', 7531], - 'sighex': otherData['sighex'], - 'prihex': otherData['prihex'], + 'sighex': salt.utils.stringutils.to_str(otherData['sighex']), + 'prihex': salt.utils.stringutils.to_str(otherData['prihex']), 'role': otherData['role'], }) @@ -1766,14 +1769,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7530], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'sid': 0, 'puid': 1, 'aha': ['0.0.0.0', 7530], 'role': mainData['role'], - 'sighex': mainData['sighex'], - 'prihex': mainData['prihex'], + 'sighex': salt.utils.stringutils.to_str(mainData['sighex']), + 'prihex': salt.utils.stringutils.to_str(mainData['prihex']), }) opts = self.createOpts(role='primary', @@ -1808,14 +1811,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7531], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'sid': 0, 'puid': 1, 'aha': ['0.0.0.0', 7531], 'role': other1Data['role'], - 'sighex': other1Data['sighex'], - 'prihex': other1Data['prihex'], + 'sighex': salt.utils.stringutils.to_str(other1Data['sighex']), + 'prihex': salt.utils.stringutils.to_str(other1Data['prihex']), }) self.join(other1, main) @@ -1876,13 +1879,13 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7532], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'sid': 0, 'puid': 1, 'aha': ['0.0.0.0', 7532], - 'sighex': other2Data['sighex'], - 'prihex': other2Data['prihex'], + 'sighex': salt.utils.stringutils.to_str(other2Data['sighex']), + 'prihex': salt.utils.stringutils.to_str(other2Data['prihex']), 'role': other2Data['role'], }) @@ -1936,14 +1939,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7532], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'sid': 0, 'puid': 1, 'aha': ['0.0.0.0', 7532], 'role': other2Data['role'], - 'sighex': other1Data['sighex'], - 'prihex': other1Data['prihex'], + 'sighex': salt.utils.stringutils.to_str(other1Data['sighex']), + 'prihex': salt.utils.stringutils.to_str(other1Data['prihex']), }) # should join since same role and keys @@ -2021,14 +2024,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7530], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'sid': 0, 'puid': 1, 'aha': ['0.0.0.0', 7530], 'role': mainData['role'], - 'sighex': mainData['sighex'], - 'prihex': mainData['prihex'], + 'sighex': salt.utils.stringutils.to_str(mainData['sighex']), + 'prihex': salt.utils.stringutils.to_str(mainData['prihex']), }) opts = self.createOpts(role='primary', @@ -2063,14 +2066,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7531], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'sid': 0, 'puid': 1, 'aha': ['0.0.0.0', 7531], 'role': other1Data['role'], - 'sighex': other1Data['sighex'], - 'prihex': other1Data['prihex'], + 'sighex': salt.utils.stringutils.to_str(other1Data['sighex']), + 'prihex': salt.utils.stringutils.to_str(other1Data['prihex']), }) self.join(other1, main) @@ -2130,14 +2133,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class 'ha': ['127.0.0.1', 7532], 'iha': None, 'natted': None, - 'fqdn': '1.0.0.127.in-addr.arpa', + 'fqdn': socket.getfqdn('127.0.0.1'), 'dyned': None, 'sid': 0, 'puid': 1, 'aha': ['0.0.0.0', 7532], 'role': other2Data['role'], - 'sighex': other2Data['sighex'], - 'prihex': other2Data['prihex'], + 'sighex': salt.utils.stringutils.to_str(other2Data['sighex']), + 'prihex': salt.utils.stringutils.to_str(other2Data['prihex']), }) # should join since open mode @@ -2225,8 +2228,8 @@ if __name__ == '__main__' and __package__ is None: #console.reinit(verbosity=console.Wordage.concise) - #runAll() # run all unittests + runAll() # run all unittests - runSome() # only run some + #runSome() # only run some #runOne('testBootstrapRoleAuto') diff --git a/salt/daemons/test/test_stats.py b/salt/daemons/test/test_stats.py index f0cfd2e4a3..6e23d0fdfb 100644 --- a/salt/daemons/test/test_stats.py +++ b/salt/daemons/test/test_stats.py @@ -727,8 +727,8 @@ if __name__ == '__main__' and __package__ is None: # console.reinit(verbosity=console.Wordage.concise) - #runAll() # run all unittests + runAll() # run all unittests - runSome() # only run some + #runSome() # only run some #runOne('testMasterLaneStats') diff --git a/salt/key.py b/salt/key.py index 19fa60d09f..f93ccf8c11 100644 --- a/salt/key.py +++ b/salt/key.py @@ -1082,6 +1082,8 @@ class RaetKey(Key): pre_path = os.path.join(pre, minion_id) rej_path = os.path.join(rej, minion_id) # open mode is turned on, force accept the key + pub = salt.utils.stringutils.to_str(pub) + verify = salt.utils.stringutils.to_str(verify) keydata = { 'minion_id': minion_id, 'pub': pub, @@ -1148,7 +1150,7 @@ class RaetKey(Key): verify: ''' path = os.path.join(self.opts['pki_dir'], status, minion_id) - with salt.utils.files.fopen(path, 'r') as fp_: + with salt.utils.files.fopen(path, 'rb') as fp_: keydata = self.serial.loads(fp_.read()) return 'pub: {0}\nverify: {1}'.format( keydata['pub'], @@ -1158,7 +1160,7 @@ class RaetKey(Key): ''' Return a sha256 kingerprint for the key ''' - with salt.utils.files.fopen(path, 'r') as fp_: + with salt.utils.files.fopen(path, 'rb') as fp_: keydata = self.serial.loads(fp_.read()) key = 'pub: {0}\nverify: {1}'.format( keydata['pub'], @@ -1442,7 +1444,7 @@ class RaetKey(Key): if os.path.exists(path): #mode = os.stat(path).st_mode os.chmod(path, stat.S_IWUSR | stat.S_IRUSR) - with salt.utils.files.fopen(path, 'w+') as fp_: + with salt.utils.files.fopen(path, 'w+b') as fp_: fp_.write(self.serial.dumps(keydata)) os.chmod(path, stat.S_IRUSR) os.umask(c_umask) diff --git a/salt/utils/stringutils.py b/salt/utils/stringutils.py index ba2dbb7893..1b8dd9d2af 100644 --- a/salt/utils/stringutils.py +++ b/salt/utils/stringutils.py @@ -5,6 +5,7 @@ Functions for manipulating or otherwise processing strings # Import Python libs from __future__ import absolute_import, print_function, unicode_literals +import base64 import errno import fnmatch import logging @@ -203,7 +204,7 @@ def is_binary(data): @jinja_filter('random_str') def random(size=32): key = os.urandom(size) - return key.encode('base64').replace('\n', '')[:size] + return to_unicode(base64.b64encode(key).replace(b'\n', b'')[:size]) @jinja_filter('contains_whitespace') From c64ad435e7aab820d6b0ab236e697f08a8142fd2 Mon Sep 17 00:00:00 2001 From: Dmitry Kuzmenko Date: Wed, 31 Jan 2018 17:37:55 +0300 Subject: [PATCH 012/223] Fixed raet master and minion shutdown --- salt/cli/daemons.py | 16 +++++++++------- salt/log/setup.py | 13 +++++++++++++ 2 files changed, 22 insertions(+), 7 deletions(-) diff --git a/salt/cli/daemons.py b/salt/cli/daemons.py index ac1e22dd7a..d53c7e1ace 100644 --- a/salt/cli/daemons.py +++ b/salt/cli/daemons.py @@ -119,11 +119,12 @@ class Master(salt.utils.parsers.MasterOptionParser, DaemonsMixin): # pylint: di Creates a master server ''' def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument - # escalate signal to the process manager processes - self.master.process_manager.stop_restarting() - self.master.process_manager.send_signal_to_processes(signum) - # kill any remaining processes - self.master.process_manager.kill_children() + if hasattr(self.master, 'process_manager'): # IofloMaster has no process manager + # escalate signal to the process manager processes + self.master.process_manager.stop_restarting() + self.master.process_manager.send_signal_to_processes(signum) + # kill any remaining processes + self.master.process_manager.kill_children() super(Master, self)._handle_signals(signum, sigframe) def prepare(self): @@ -234,7 +235,8 @@ class Minion(salt.utils.parsers.MinionOptionParser, DaemonsMixin): # pylint: di def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument # escalate signal to the process manager processes - self.minion.stop(signum) + if hasattr(self.minion, 'stop'): + self.minion.stop(signum) super(Minion, self)._handle_signals(signum, sigframe) # pylint: disable=no-member @@ -392,7 +394,7 @@ class Minion(salt.utils.parsers.MinionOptionParser, DaemonsMixin): # pylint: di :param exitmsg ''' self.action_log_info('Shutting down') - if hasattr(self, 'minion'): + if hasattr(self, 'minion') and hasattr(self.minion, 'destroy'): self.minion.destroy() super(Minion, self).shutdown( exitcode, ('The Salt {0} is shutdown. {1}'.format( diff --git a/salt/log/setup.py b/salt/log/setup.py index e14064e1d0..66d2c28a67 100644 --- a/salt/log/setup.py +++ b/salt/log/setup.py @@ -120,6 +120,7 @@ __MP_LOGGING_QUEUE = None __MP_LOGGING_QUEUE_PROCESS = None __MP_LOGGING_QUEUE_HANDLER = None __MP_IN_MAINPROCESS = multiprocessing.current_process().name == 'MainProcess' +__MP_MAINPROCESS_ID = None class __NullLoggingHandler(TemporaryLoggingHandler): @@ -822,6 +823,7 @@ def set_multiprocessing_logging_queue(queue): def setup_multiprocessing_logging_listener(opts, queue=None): global __MP_LOGGING_QUEUE_PROCESS global __MP_LOGGING_LISTENER_CONFIGURED + global __MP_MAINPROCESS_ID if __MP_IN_MAINPROCESS is False: # We're not in the MainProcess, return! No logging listener setup shall happen @@ -830,6 +832,11 @@ def setup_multiprocessing_logging_listener(opts, queue=None): if __MP_LOGGING_LISTENER_CONFIGURED is True: return + if __MP_MAINPROCESS_ID is not None and __MP_MAINPROCESS_ID != os.getpid(): + # We're not in the MainProcess, return! No logging listener setup shall happen + return + + __MP_MAINPROCESS_ID = os.getpid() __MP_LOGGING_QUEUE_PROCESS = multiprocessing.Process( target=__process_multiprocessing_logging_queue, args=(opts, queue or get_multiprocessing_logging_queue(),) @@ -954,6 +961,7 @@ def shutdown_multiprocessing_logging_listener(daemonizing=False): global __MP_LOGGING_QUEUE global __MP_LOGGING_QUEUE_PROCESS global __MP_LOGGING_LISTENER_CONFIGURED + global __MP_MAINPROCESS_ID if daemonizing is False and __MP_IN_MAINPROCESS is True: # We're in the MainProcess and we're not daemonizing, return! @@ -967,6 +975,11 @@ def shutdown_multiprocessing_logging_listener(daemonizing=False): if __MP_LOGGING_QUEUE_PROCESS is None: return + + if __MP_MAINPROCESS_ID is not None and __MP_MAINPROCESS_ID != os.getpid(): + # We're not in the MainProcess, return! No logging listener setup shall happen + return + if __MP_LOGGING_QUEUE_PROCESS.is_alive(): logging.getLogger(__name__).debug('Stopping the multiprocessing logging queue listener') try: From 0b7fb33b0395cf03fe208368b84456fa155ae3b0 Mon Sep 17 00:00:00 2001 From: Dmitry Kuzmenko Date: Wed, 31 Jan 2018 19:15:46 +0300 Subject: [PATCH 013/223] Fixed lint errors --- salt/daemons/test/test_multimaster.py | 2 +- salt/log/setup.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/salt/daemons/test/test_multimaster.py b/salt/daemons/test/test_multimaster.py index aa8b3f7405..88114a9d4c 100644 --- a/salt/daemons/test/test_multimaster.py +++ b/salt/daemons/test/test_multimaster.py @@ -348,7 +348,7 @@ if __name__ == '__main__' and __package__ is None: #console.reinit(verbosity=console.Wordage.concise) - runAll() # run all unittests + runAll() # run all unittests #runSome() # only run some diff --git a/salt/log/setup.py b/salt/log/setup.py index 66d2c28a67..c06dffa7bf 100644 --- a/salt/log/setup.py +++ b/salt/log/setup.py @@ -961,7 +961,6 @@ def shutdown_multiprocessing_logging_listener(daemonizing=False): global __MP_LOGGING_QUEUE global __MP_LOGGING_QUEUE_PROCESS global __MP_LOGGING_LISTENER_CONFIGURED - global __MP_MAINPROCESS_ID if daemonizing is False and __MP_IN_MAINPROCESS is True: # We're in the MainProcess and we're not daemonizing, return! From d8eec9aa9726b7acb8d7d620eb8a1f1b14dad9f8 Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Wed, 31 Jan 2018 17:41:18 -0700 Subject: [PATCH 014/223] fix cookies dict size changing in http.query --- salt/utils/http.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/salt/utils/http.py b/salt/utils/http.py index 70a1c812ba..acf794dda2 100644 --- a/salt/utils/http.py +++ b/salt/utils/http.py @@ -532,7 +532,7 @@ def query(url, not isinstance(result_text, six.text_type): result_text = result_text.decode(res_params['charset']) ret['body'] = result_text - if 'Set-Cookie' in result_headers.keys() and cookies is not None: + if 'Set-Cookie' in result_headers and cookies is not None: result_cookies = parse_cookie_header(result_headers['Set-Cookie']) for item in result_cookies: sess_cookies.set_cookie(item) @@ -857,12 +857,10 @@ def parse_cookie_header(header): for cookie in cookies: name = None value = None - for item in cookie: + for item in list(cookie): if item in attribs: continue - name = item - value = cookie[item] - del cookie[name] + value = cookie.pop(item) # cookielib.Cookie() requires an epoch if 'expires' in cookie: @@ -870,7 +868,7 @@ def parse_cookie_header(header): # Fill in missing required fields for req in reqd: - if req not in cookie.keys(): + if req not in cookie: cookie[req] = '' if cookie['version'] == '': cookie['version'] = 0 From 971e59ebe27bef1e437808c6eb1b2d0e51671a40 Mon Sep 17 00:00:00 2001 From: Benjamin Drung Date: Fri, 26 Jan 2018 15:38:43 +0100 Subject: [PATCH 015/223] Drop enforcing new-style object for SaltYamlSafeLoader Building the documentation with the Python 3 version of sphinx fails: $ make -C doc html SPHINXBUILD=/usr/share/sphinx/scripts/python3/sphinx-build /usr/share/sphinx/scripts/python3/sphinx-build -b html -d _build/doctrees . _build/html Running Sphinx v1.5.6 making output directory... loading translations [en]... done loading pickled environment... not yet created [autosummary] generating autosummary for: contents.rst, faq.rst, glossary.rst, ref/auth/all/index.rst, ref/auth/all/salt.auth.auto.rst, ref/auth/all/salt.auth.django.rst, ref/auth/all/salt.auth.keystone.rst, ref/auth/all/salt.auth.ldap.rst, ref/auth/all/salt.auth.mysql.rst, ref/auth/all/salt.auth.pam.rst, ..., topics/using_salt.rst, topics/utils/index.rst, topics/venafi/index.rst, topics/virt/disk.rst, topics/virt/index.rst, topics/virt/nic.rst, topics/windows/index.rst, topics/windows/windows-package-manager.rst, topics/windows/windows-specific-behavior.rst, topics/yaml/index.rst Exception occurred: File "salt/utils/yamlloader.py", line 33, in class SaltYamlSafeLoader(yaml.SafeLoader, object): TypeError: metaclass conflict: the metaclass of a derived class must be a (non-strict) subclass of the metaclasses of all its bases The full traceback has been saved in /tmp/sphinx-err-hzfmdxlm.log, if you want to report the issue to the developers. Please also report this if it was a user error, so that a better error message can be provided next time. A bug report can be filed in the tracker at . Thanks! SaltYamlSafeLoader uses multiple inheritance to convert this object into a new-style object. At least since pyyaml 3.11, yaml.SafeLoader is already a new-style object. Therefore the multiple inheritance can be dropped. This fixes #45684. --- salt/utils/yamlloader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/utils/yamlloader.py b/salt/utils/yamlloader.py index da0fd8d466..f6932f6109 100644 --- a/salt/utils/yamlloader.py +++ b/salt/utils/yamlloader.py @@ -30,7 +30,7 @@ warnings.simplefilter('always', category=DuplicateKeyWarning) # with code integrated from https://gist.github.com/844388 -class SaltYamlSafeLoader(yaml.SafeLoader, object): +class SaltYamlSafeLoader(yaml.SafeLoader): ''' Create a custom YAML loader that uses the custom constructor. This allows for the YAML loading defaults to be manipulated based on needs within salt From 179e8fbe73f064166b5e75a2aa2a572a83705939 Mon Sep 17 00:00:00 2001 From: Benjamin Drung Date: Fri, 26 Jan 2018 17:13:57 +0100 Subject: [PATCH 016/223] doc: Do not mock non-existing __qualname__ attribute Building the documentation with the Python 3 version of sphinx fails: $ make -C doc html SPHINXBUILD=/usr/share/sphinx/scripts/python3/sphinx-build [...] Traceback (most recent call last): File "/usr/lib/python3/dist-packages/sphinx/cmdline.py", line 296, in main app.build(opts.force_all, filenames) File "/usr/lib/python3/dist-packages/sphinx/application.py", line 333, in build self.builder.build_update() File "/usr/lib/python3/dist-packages/sphinx/builders/__init__.py", line 251, in build_update 'out of date' % len(to_build)) File "/usr/lib/python3/dist-packages/sphinx/builders/__init__.py", line 265, in build self.doctreedir, self.app)) File "/usr/lib/python3/dist-packages/sphinx/environment/__init__.py", line 556, in update self._read_serial(docnames, app) File "/usr/lib/python3/dist-packages/sphinx/environment/__init__.py", line 576, in _read_serial self.read_doc(docname, app) File "/usr/lib/python3/dist-packages/sphinx/environment/__init__.py", line 684, in read_doc pub.publish() File "/usr/lib/python3/dist-packages/docutils/core.py", line 217, in publish self.settings) File "/usr/lib/python3/dist-packages/sphinx/io.py", line 55, in read self.parse() File "/usr/lib/python3/dist-packages/docutils/readers/__init__.py", line 78, in parse self.parser.parse(self.input, document) File "/usr/lib/python3/dist-packages/docutils/parsers/rst/__init__.py", line 191, in parse self.statemachine.run(inputlines, document, inliner=self.inliner) File "/usr/lib/python3/dist-packages/docutils/parsers/rst/states.py", line 172, in run input_source=document['source']) File "/usr/lib/python3/dist-packages/docutils/statemachine.py", line 239, in run context, state, transitions) File "/usr/lib/python3/dist-packages/docutils/statemachine.py", line 460, in check_line return method(match, context, next_state) File "/usr/lib/python3/dist-packages/docutils/parsers/rst/states.py", line 2989, in text self.section(title.lstrip(), source, style, lineno + 1, messages) File "/usr/lib/python3/dist-packages/docutils/parsers/rst/states.py", line 328, in section self.new_subsection(title, lineno, messages) File "/usr/lib/python3/dist-packages/docutils/parsers/rst/states.py", line 396, in new_subsection node=section_node, match_titles=True) File "/usr/lib/python3/dist-packages/docutils/parsers/rst/states.py", line 283, in nested_parse node=node, match_titles=match_titles) File "/usr/lib/python3/dist-packages/docutils/parsers/rst/states.py", line 197, in run results = StateMachineWS.run(self, input_lines, input_offset) File "/usr/lib/python3/dist-packages/docutils/statemachine.py", line 239, in run context, state, transitions) File "/usr/lib/python3/dist-packages/docutils/statemachine.py", line 460, in check_line return method(match, context, next_state) File "/usr/lib/python3/dist-packages/docutils/parsers/rst/states.py", line 2754, in underline self.section(title, source, style, lineno - 1, messages) File "/usr/lib/python3/dist-packages/docutils/parsers/rst/states.py", line 328, in section self.new_subsection(title, lineno, messages) File "/usr/lib/python3/dist-packages/docutils/parsers/rst/states.py", line 396, in new_subsection node=section_node, match_titles=True) File "/usr/lib/python3/dist-packages/docutils/parsers/rst/states.py", line 283, in nested_parse node=node, match_titles=match_titles) File "/usr/lib/python3/dist-packages/docutils/parsers/rst/states.py", line 197, in run results = StateMachineWS.run(self, input_lines, input_offset) File "/usr/lib/python3/dist-packages/docutils/statemachine.py", line 239, in run context, state, transitions) File "/usr/lib/python3/dist-packages/docutils/statemachine.py", line 460, in check_line return method(match, context, next_state) File "/usr/lib/python3/dist-packages/docutils/parsers/rst/states.py", line 2754, in underline self.section(title, source, style, lineno - 1, messages) File "/usr/lib/python3/dist-packages/docutils/parsers/rst/states.py", line 328, in section self.new_subsection(title, lineno, messages) File "/usr/lib/python3/dist-packages/docutils/parsers/rst/states.py", line 396, in new_subsection node=section_node, match_titles=True) File "/usr/lib/python3/dist-packages/docutils/parsers/rst/states.py", line 283, in nested_parse node=node, match_titles=match_titles) File "/usr/lib/python3/dist-packages/docutils/parsers/rst/states.py", line 197, in run results = StateMachineWS.run(self, input_lines, input_offset) File "/usr/lib/python3/dist-packages/docutils/statemachine.py", line 239, in run context, state, transitions) File "/usr/lib/python3/dist-packages/docutils/statemachine.py", line 460, in check_line return method(match, context, next_state) File "/usr/lib/python3/dist-packages/docutils/parsers/rst/states.py", line 2327, in explicit_markup nodelist, blank_finish = self.explicit_construct(match) File "/usr/lib/python3/dist-packages/docutils/parsers/rst/states.py", line 2339, in explicit_construct return method(self, expmatch) File "/usr/lib/python3/dist-packages/docutils/parsers/rst/states.py", line 2082, in directive directive_class, match, type_name, option_presets) File "/usr/lib/python3/dist-packages/docutils/parsers/rst/states.py", line 2131, in run_directive result = directive_instance.run() File "/usr/lib/python3/dist-packages/sphinx/ext/autodoc.py", line 1668, in run documenter.generate(more_content=self.content) File "/usr/lib/python3/dist-packages/sphinx/ext/autodoc.py", line 1013, in generate self.document_members(all_members) File "/usr/lib/python3/dist-packages/sphinx/ext/autodoc.py", line 1388, in document_members ModuleLevelDocumenter.document_members(self, all_members) File "/usr/lib/python3/dist-packages/sphinx/ext/autodoc.py", line 903, in document_members for (mname, member, isattr) in self.filter_members(members, want_all): File "/usr/lib/python3/dist-packages/sphinx/ext/autodoc.py", line 871, in filter_members not keep, self.options) File "/usr/lib/python3/dist-packages/sphinx/application.py", line 593, in emit_firstresult for result in self.emit(event, *args): File "/usr/lib/python3/dist-packages/sphinx/application.py", line 589, in emit results.append(callback(self, *args)) File "/usr/lib/python3/dist-packages/sphinx/ext/napoleon/__init__.py", line 426, in _skip_member cls_path, _, _ = qualname.rpartition('.') ValueError: not enough values to unpack (expected 3, got 0) The napoleon sphinx extensions queries the attribute __qualname__ of the given obj and expect a string as result. It uses a default value if this attribute does not exist. The real Python modules do not have a __qualname__ attribute, but the doc.conf.Mock object returns a Mock object when queried for the __qualname__ attribute. Change the Mock object to raise an AttributeError instead. This fixes #45684. --- doc/conf.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/conf.py b/doc/conf.py index 2c1dfd75e6..e946e78044 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -46,6 +46,8 @@ class Mock(object): data = self.__mapping.get(name) elif name in ('__file__', '__path__'): data = '/dev/null' + elif name == '__qualname__': + raise AttributeError("'Mock' object has no attribute '__qualname__'") else: data = Mock(mapping=self.__mapping) return data From b6181b5ed6e2880747d9e6f8c8f573dbc13d2a03 Mon Sep 17 00:00:00 2001 From: Benjamin Drung Date: Fri, 26 Jan 2018 13:00:13 +0100 Subject: [PATCH 017/223] Fix Unicode tests when run with LC_ALL=POSIX When running the unit tests with the locale set to POSIX, some Unicode tests fail: $ LC_ALL=POSIX python3 ./tests/runtests.py --unit [...] ====================================================================== ERROR: test_list_products (unit.modules.test_zypper.ZypperTestCase) [CPU:0.0%|MEM:73.2%] ---------------------------------------------------------------------- Traceback (most recent call last): File "tests/unit/modules/test_zypper.py", line 236, in test_list_products 'stdout': get_test_data(filename) File "tests/unit/modules/test_zypper.py", line 53, in get_test_data return rfh.read() File "/usr/lib/python3.6/encodings/ascii.py", line 26, in decode return codecs.ascii_decode(input, self.errors)[0] UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 828: ordinal not in range(128) ====================================================================== ERROR: test_non_ascii (unit.templates.test_jinja.TestGetTemplate) [CPU:0.0%|MEM:73.2%] ---------------------------------------------------------------------- Traceback (most recent call last): File "tests/unit/templates/test_jinja.py", line 341, in test_non_ascii result = fp.read() File "/usr/lib/python3.6/encodings/ascii.py", line 26, in decode return codecs.ascii_decode(input, self.errors)[0] UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 5: ordinal not in range(128) ====================================================================== ERROR: test_non_ascii_encoding (unit.templates.test_jinja.TestGetTemplate) [CPU:0.0%|MEM:73.2%] ---------------------------------------------------------------------- Traceback (most recent call last): File "tests/unit/templates/test_jinja.py", line 303, in test_non_ascii_encoding fp_.read(), File "/usr/lib/python3.6/encodings/ascii.py", line 26, in decode return codecs.ascii_decode(input, self.errors)[0] UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 5: ordinal not in range(128) ---------------------------------------------------------------------- Therefore open files in binary mode and explicitly decode them with utf-8 instead of their default locale. --- tests/unit/modules/test_zypper.py | 4 ++-- tests/unit/templates/test_jinja.py | 14 ++++++-------- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/tests/unit/modules/test_zypper.py b/tests/unit/modules/test_zypper.py index ac8621307a..8d5e52aa69 100644 --- a/tests/unit/modules/test_zypper.py +++ b/tests/unit/modules/test_zypper.py @@ -49,8 +49,8 @@ def get_test_data(filename): with salt.utils.fopen( os.path.join( os.path.join( - os.path.dirname(os.path.abspath(__file__)), 'zypp'), filename)) as rfh: - return rfh.read() + os.path.dirname(os.path.abspath(__file__)), 'zypp'), filename), mode='rb') as rfh: + return salt.utils.to_str(rfh.read(), 'utf-8') @skipIf(NO_MOCK, NO_MOCK_REASON) diff --git a/tests/unit/templates/test_jinja.py b/tests/unit/templates/test_jinja.py index 8516e974ec..0e69ea80b3 100644 --- a/tests/unit/templates/test_jinja.py +++ b/tests/unit/templates/test_jinja.py @@ -294,13 +294,13 @@ class TestGetTemplate(TestCase): 'file_roots': self.local_opts['file_roots'], 'pillar_roots': self.local_opts['pillar_roots']}, a='Hi', b='Sàlt', saltenv='test', salt=self.local_salt)) - self.assertEqual(out, salt.utils.to_unicode('Hey world !Hi Sàlt !' + os.linesep)) + self.assertEqual(out, u'Hey world !Hi Sàlt !' + os.linesep) self.assertEqual(fc.requests[0]['path'], 'salt://macro') filename = os.path.join(TEMPLATES_DIR, 'files', 'test', 'non_ascii') - with salt.utils.fopen(filename) as fp_: + with salt.utils.fopen(filename, mode='rb') as fp_: out = render_jinja_tmpl( - fp_.read(), + salt.utils.to_unicode(fp_.read(), 'utf-8'), dict(opts={'cachedir': TEMPLATES_DIR, 'file_client': 'remote', 'file_roots': self.local_opts['file_roots'], 'pillar_roots': self.local_opts['pillar_roots']}, @@ -337,11 +337,9 @@ class TestGetTemplate(TestCase): def test_non_ascii(self): fn = os.path.join(TEMPLATES_DIR, 'files', 'test', 'non_ascii') out = JINJA(fn, opts=self.local_opts, saltenv='test') - with salt.utils.fopen(out['data']) as fp: - result = fp.read() - if six.PY2: - result = salt.utils.to_unicode(result) - self.assertEqual(salt.utils.to_unicode('Assunção' + os.linesep), result) + with salt.utils.fopen(out['data'], mode='rb') as fp: + result = salt.utils.to_unicode(fp.read(), 'utf-8') + self.assertEqual(u'Assunção' + os.linesep, result) def test_get_context_has_enough_context(self): template = '1\n2\n3\n4\n5\n6\n7\n8\n9\na\nb\nc\nd\ne\nf' From 24d41f245154d6a9f61e65a14afba05d4c885463 Mon Sep 17 00:00:00 2001 From: kstreee Date: Fri, 2 Feb 2018 09:55:08 +0900 Subject: [PATCH 018/223] Fixes base dir making logic to ensure not raising the exception when base directory already exists. --- salt/cache/localfs.py | 14 ++++++++------ salt/utils/gitfs.py | 30 +++++++++++++++++++++--------- tests/unit/cache/test_localfs.py | 23 ++++++++++++++++------- 3 files changed, 45 insertions(+), 22 deletions(-) diff --git a/salt/cache/localfs.py b/salt/cache/localfs.py index 45dfa6d086..22c030599c 100644 --- a/salt/cache/localfs.py +++ b/salt/cache/localfs.py @@ -14,6 +14,7 @@ from __future__ import absolute_import import logging import os import os.path +import errno import shutil import tempfile @@ -45,13 +46,14 @@ def store(bank, key, data, cachedir): Store information in a file. ''' base = os.path.join(cachedir, os.path.normpath(bank)) - if not os.path.isdir(base): - try: - os.makedirs(base) - except OSError as exc: + try: + os.makedirs(base) + except OSError as exc: + if exc.errno != errno.EEXIST: raise SaltCacheError( - 'The cache directory, {0}, does not exist and could not be ' - 'created: {1}'.format(base, exc) + 'The cache directory, {0}, could not be created: {1}'.format( + base, exc + ) ) outfile = os.path.join(base, '{0}.p'.format(key)) diff --git a/salt/utils/gitfs.py b/salt/utils/gitfs.py index 35501ca2ba..fb8143a409 100644 --- a/salt/utils/gitfs.py +++ b/salt/utils/gitfs.py @@ -2531,13 +2531,17 @@ class GitFS(GitBase): return fnd salt.fileserver.wait_lock(lk_fn, dest) - if os.path.isfile(blobshadest) and os.path.isfile(dest): + try: with salt.utils.fopen(blobshadest, 'r') as fp_: sha = fp_.read() if sha == blob_hexsha: fnd['rel'] = path fnd['path'] = dest return _add_file_stat(fnd, blob_mode) + except IOError as exc: + if exc.errno != errno.ENOENT: + raise exc + with salt.utils.fopen(lk_fn, 'w+') as fp_: fp_.write('') for filename in glob.glob(hashes_glob): @@ -2623,17 +2627,25 @@ class GitFS(GitBase): load['saltenv'], '{0}.hash.{1}'.format(relpath, self.opts['hash_type'])) - if not os.path.isfile(hashdest): - if not os.path.exists(os.path.dirname(hashdest)): - os.makedirs(os.path.dirname(hashdest)) - ret['hsum'] = salt.utils.get_hash(path, self.opts['hash_type']) - with salt.utils.fopen(hashdest, 'w+') as fp_: - fp_.write(ret['hsum']) - return ret - else: + + try: with salt.utils.fopen(hashdest, 'rb') as fp_: ret['hsum'] = fp_.read() return ret + except IOError as exc: + if exc.errno != errno.ENOENT: + raise exc + + try: + os.makedirs(os.path.dirname(hashdest)) + except OSError as exc: + if exc.errno != errno.EEXIST: + raise exc + + ret['hsum'] = salt.utils.get_hash(path, self.opts['hash_type']) + with salt.utils.fopen(hashdest, 'w+') as fp_: + fp_.write(ret['hsum']) + return ret def _file_lists(self, load, form): ''' diff --git a/tests/unit/cache/test_localfs.py b/tests/unit/cache/test_localfs.py index e1aa8ecc32..19d082821c 100644 --- a/tests/unit/cache/test_localfs.py +++ b/tests/unit/cache/test_localfs.py @@ -5,6 +5,7 @@ unit tests for the localfs cache # Import Python libs from __future__ import absolute_import +import os import shutil import tempfile @@ -48,16 +49,24 @@ class LocalFSTest(TestCase, LoaderModuleMockMixin): with patch.dict(localfs.__context__, {'serial': serializer}): localfs.store(bank='bank', key='key', data='payload data', cachedir=tmp_dir) - # 'store' function tests: 4 + # 'store' function tests: 5 - def test_store_no_base_cache_dir(self): + def test_handled_exception_cache_dir(self): ''' Tests that a SaltCacheError is raised when the base directory doesn't exist and cannot be created. ''' - with patch('os.path.isdir', MagicMock(return_value=None)): - with patch('os.makedirs', MagicMock(side_effect=OSError)): - self.assertRaises(SaltCacheError, localfs.store, bank='', key='', data='', cachedir='') + with patch('os.makedirs', MagicMock(side_effect=OSError(os.errno.EEXIST, ''))): + with patch('tempfile.mkstemp', MagicMock(side_effect=Exception)): + self.assertRaises(Exception, localfs.store, bank='', key='', data='', cachedir='') + + def test_unhandled_exception_cache_dir(self): + ''' + Tests that a SaltCacheError is raised when the base directory doesn't exist and + cannot be created. + ''' + with patch('os.makedirs', MagicMock(side_effect=OSError(1, ''))): + self.assertRaises(SaltCacheError, localfs.store, bank='', key='', data='', cachedir='') def test_store_close_mkstemp_file_handle(self): ''' @@ -67,7 +76,7 @@ class LocalFSTest(TestCase, LoaderModuleMockMixin): This test mocks the call to mkstemp, but forces an OSError to be raised when the close() function is called on a file descriptor that doesn't exist. ''' - with patch('os.path.isdir', MagicMock(return_value=True)): + with patch('os.makedirs', MagicMock(side_effect=OSError(os.errno.EEXIST, ''))): with patch('tempfile.mkstemp', MagicMock(return_value=(12345, 'foo'))): self.assertRaises(OSError, localfs.store, bank='', key='', data='', cachedir='') @@ -76,7 +85,7 @@ class LocalFSTest(TestCase, LoaderModuleMockMixin): Tests that a SaltCacheError is raised when there is a problem writing to the cache file. ''' - with patch('os.path.isdir', MagicMock(return_value=True)): + with patch('os.makedirs', MagicMock(side_effect=OSError(os.errno.EEXIST, ''))): with patch('tempfile.mkstemp', MagicMock(return_value=('one', 'two'))): with patch('os.close', MagicMock(return_value=None)): with patch('salt.utils.fopen', MagicMock(side_effect=IOError)): From 68c7f3dcba478545de2d761b4024e17a3fb12402 Mon Sep 17 00:00:00 2001 From: kstreee Date: Fri, 2 Feb 2018 14:22:49 +0900 Subject: [PATCH 019/223] Adds a missing return statement. --- salt/fileclient.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/fileclient.py b/salt/fileclient.py index 7b4e2235df..1f557815cc 100644 --- a/salt/fileclient.py +++ b/salt/fileclient.py @@ -1241,7 +1241,7 @@ class RemoteClient(Client): load = {'saltenv': saltenv, 'prefix': prefix, 'cmd': '_file_list_emptydirs'} - self.channel.send(load) + return self.channel.send(load) def dir_list(self, saltenv='base', prefix=''): ''' From 96533631311216820099207460b90f16bd2fb7a7 Mon Sep 17 00:00:00 2001 From: Andreas Putzo Date: Mon, 22 Jan 2018 12:57:12 +0100 Subject: [PATCH 020/223] Fix for duplicate entries with pkrepo.managed pkgrepo.managed creates duplicate entries every time salt runs when the same repository is set up twice, with different file attributes. This change simply removes the file attributes before comparing if the repo already exists. This might be related to #22412. --- salt/states/pkgrepo.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/salt/states/pkgrepo.py b/salt/states/pkgrepo.py index 8a5775b19a..e38d44dd30 100644 --- a/salt/states/pkgrepo.py +++ b/salt/states/pkgrepo.py @@ -377,6 +377,9 @@ def managed(name, ppa=None, **kwargs): repo = salt.utils.pkg.deb.strip_uri(repo) if pre: + #22412: Remove file attribute in case same repo is set up multiple times but with different files + pre.pop('file', None) + sanitizedkwargs.pop('file', None) for kwarg in sanitizedkwargs: if kwarg not in pre: if kwarg == 'enabled': From c631598a877535a8397d87c8f7dae2299b6cc0d4 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Thu, 1 Feb 2018 12:39:38 -0600 Subject: [PATCH 021/223] Fix traceback in disks grains when /sys/block not available This prevents a traceback when salt is run in a container where /sys/block is not present. --- salt/grains/disks.py | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/salt/grains/disks.py b/salt/grains/disks.py index ce40682842..d19b96ef9a 100644 --- a/salt/grains/disks.py +++ b/salt/grains/disks.py @@ -127,16 +127,21 @@ def _linux_disks(): ret = {'disks': [], 'SSDs': []} for entry in glob.glob('/sys/block/*/queue/rotational'): - with salt.utils.fopen(entry) as entry_fp: - device = entry.split('/')[3] - flag = entry_fp.read(1) - if flag == '0': - ret['SSDs'].append(device) - log.trace('Device {0} reports itself as an SSD'.format(device)) - elif flag == '1': - ret['disks'].append(device) - log.trace('Device {0} reports itself as an HDD'.format(device)) - else: - log.trace('Unable to identify device {0} as an SSD or HDD.' - ' It does not report 0 or 1'.format(device)) + try: + with salt.utils.files.fopen(entry) as entry_fp: + device = entry.split('/')[3] + flag = entry_fp.read(1) + if flag == '0': + ret['SSDs'].append(device) + log.trace('Device %s reports itself as an SSD', device) + elif flag == '1': + ret['disks'].append(device) + log.trace('Device %s reports itself as an HDD', device) + else: + log.trace( + 'Unable to identify device %s as an SSD or HDD. It does ' + 'not report 0 or 1', device + ) + except IOError: + pass return ret From 6ae6c635d2fc7f89e148f95164d1e025c7e92321 Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Mon, 5 Feb 2018 07:40:39 -0700 Subject: [PATCH 022/223] fix building with rackspace on openstack driver --- salt/cloud/clouds/openstack.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/cloud/clouds/openstack.py b/salt/cloud/clouds/openstack.py index 4fd63643bf..62ef280410 100644 --- a/salt/cloud/clouds/openstack.py +++ b/salt/cloud/clouds/openstack.py @@ -355,7 +355,7 @@ def _get_ips(node, addr_type='public'): ret = [] for _, interface in node.addresses.items(): for addr in interface: - if addr_type in ('floating', 'fixed') and addr_type == addr['OS-EXT-IPS:type']: + if addr_type in ('floating', 'fixed') and addr_type == addr.get('OS-EXT-IPS:type'): ret.append(addr['addr']) elif addr_type == 'public' and __utils__['cloud.is_public_ip'](addr['addr']): ret.append(addr['addr']) From 1024856f9a1ca226c20981661c1de4fe24fb2504 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Thu, 1 Feb 2018 14:02:14 -0800 Subject: [PATCH 023/223] Wrapping the put_nowait in a try...except and catching the exception when the multiprocessing queue is full. This situation is happening when running the full testing suite on MacOS where the queue limit is 32767 vs on Linux where the queue limit is unlimited. --- salt/log/handlers/__init__.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/salt/log/handlers/__init__.py b/salt/log/handlers/__init__.py index ceb8f50232..ae02e364fa 100644 --- a/salt/log/handlers/__init__.py +++ b/salt/log/handlers/__init__.py @@ -174,7 +174,12 @@ if sys.version_info < (3, 2): this method if you want to use blocking, timeouts or custom queue implementations. ''' - self.queue.put_nowait(record) + try: + self.queue.put_nowait(record) + except self.queue.Full: + sys.stderr.write('[WARNING ] Message queue is full, ' + 'unable to write "{0}" to log', record + ) def prepare(self, record): ''' From 104a24f24465974998052b1b485de7c3ba6d661d Mon Sep 17 00:00:00 2001 From: rallytime Date: Mon, 5 Feb 2018 16:34:19 -0500 Subject: [PATCH 024/223] Remove extraneous ] in release notes for 2016.11.9 --- doc/topics/releases/2016.11.9.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/topics/releases/2016.11.9.rst b/doc/topics/releases/2016.11.9.rst index f330b9ae2b..69c261c437 100644 --- a/doc/topics/releases/2016.11.9.rst +++ b/doc/topics/releases/2016.11.9.rst @@ -2,7 +2,7 @@ Salt 2016.11.9 Release Notes ============================ -Version 2016.11.9 is a bugfix release for :ref:`2016.11.0 `.] +Version 2016.11.9 is a bugfix release for :ref:`2016.11.0 `. Changes for v2016.11.8..v2016.11.9 ---------------------------------------------------------------- From f937e8ba81f45c7f341174f179cd18e2a08e4efb Mon Sep 17 00:00:00 2001 From: rallytime Date: Tue, 6 Feb 2018 09:57:22 -0500 Subject: [PATCH 025/223] Add release notes file for 2017.7.4 release --- doc/topics/releases/2017.7.4.rst | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 doc/topics/releases/2017.7.4.rst diff --git a/doc/topics/releases/2017.7.4.rst b/doc/topics/releases/2017.7.4.rst new file mode 100644 index 0000000000..129c80e6f9 --- /dev/null +++ b/doc/topics/releases/2017.7.4.rst @@ -0,0 +1,5 @@ +=========================== +Salt 2017.7.4 Release Notes +=========================== + +Version 2017.7.4 is a bugfix release for :ref:`2017.7.0 `. From afb3968aa7cc8eca9ce6e91410212870bc07c39c Mon Sep 17 00:00:00 2001 From: Damon Atkins Date: Wed, 7 Feb 2018 02:03:15 +1100 Subject: [PATCH 026/223] ec2_pillar could not find instance-id, resolved. add support to use any tag to compare minion id against. --- salt/pillar/ec2_pillar.py | 173 ++++++++++++++++++++++++++------------ 1 file changed, 118 insertions(+), 55 deletions(-) diff --git a/salt/pillar/ec2_pillar.py b/salt/pillar/ec2_pillar.py index 2fc014ae6e..8399f8f1e9 100644 --- a/salt/pillar/ec2_pillar.py +++ b/salt/pillar/ec2_pillar.py @@ -1,19 +1,25 @@ -# -*- coding: utf-8 -*- +#-*- coding: utf-8 -*- ''' Retrieve EC2 instance data for minions. -The minion id must be the instance-id retrieved from AWS. As an -option, use_grain can be set to True. This allows the use of an +The minion id must be the AWS instance-id or value in tag_key (default Name). +To use tag_key, need to set what standard is used for setting the tag. +The value of tag_value can be 'uqdn' or 'asis'. if uqdn strips any domain before +comparision. +The option use_grain can be set to True. This allows the use of an instance-id grain instead of the minion-id. Since this is a potential security risk, the configuration can be further expanded to include a list of minions that are trusted to only allow the alternate id of the instances to specific hosts. There is no glob matching at -this time. +this time. Note: restart the salt-master for changes to take effect. + .. code-block:: yaml ext_pillar: - ec2_pillar: + tag_key: 'Name' + tag_value: 'asis' use_grain: True minion_ids: - trusted-minion-1 @@ -31,6 +37,7 @@ the instance. from __future__ import absolute_import import re import logging +import salt.ext.six as six # Import salt libs from salt.utils.versions import StrictVersion as _StrictVersion @@ -46,6 +53,8 @@ except ImportError: # Set up logging log = logging.getLogger(__name__) +# DEBUG boto is far too verbose +logging.getLogger('boto').setLevel(logging.WARNING) def __virtual__(): @@ -59,7 +68,7 @@ def __virtual__(): required_boto_version = _StrictVersion('2.8.0') if boto_version < required_boto_version: log.error("%s: installed boto version %s < %s, can't retrieve instance data", - __name__, boto_version, required_boto_version) + __name__, boto_version, required_boto_version) return False return True @@ -76,68 +85,122 @@ def _get_instance_info(): def ext_pillar(minion_id, pillar, # pylint: disable=W0613 use_grain=False, - minion_ids=None): + minion_ids=None, + tag_key=None, + tag_value='asis'): ''' Execute a command and read the output as YAML ''' + valid_tag_value = ['uqdn', 'asis'] - log.debug("Querying EC2 tags for minion id {0}".format(minion_id)) + # meta-data:instance-id + grain_instance_id = __grains__.get('meta-data', {}).get('instance-id', None) + if not grain_instance_id: + # dynamic:instance-identity:document:instanceId + grain_instance_id = \ + __grains__.get('dynamic', {}).get('instance-identity', {}).get('document', {}).get('instance-id', None) + if grain_instance_id and re.search(r'^i-([0-9a-z]{17}|[0-9a-z]{8})$', grain_instance_id) is None: + log.error('External pillar {0}, instance-id \'{1}\' is not valid for ' + '\'{2}\''.format(__name__, grain_instance_id, minion_id)) + grain_instance_id = None # invalid instance id found, remove it from use. - # If minion_id is not in the format of an AWS EC2 instance, check to see - # if there is a grain named 'instance-id' use that. Because this is a - # security risk, the master config must contain a use_grain: True option - # for this external pillar, which defaults to no - if re.search(r'^i-([0-9a-z]{17}|[0-9a-z]{8})$', minion_id) is None: - if 'instance-id' not in __grains__: - log.debug("Minion-id is not in AWS instance-id formation, and there " - "is no instance-id grain for minion {0}".format(minion_id)) - return {} - if not use_grain: - log.debug("Minion-id is not in AWS instance-id formation, and option " - "not set to use instance-id grain, for minion {0}, use_grain " - " is {1}".format( - minion_id, - use_grain)) - return {} - log.debug("use_grain set to {0}".format(use_grain)) - if minion_ids is not None and minion_id not in minion_ids: - log.debug("Minion-id is not in AWS instance ID format, and minion_ids " - "is set in the ec2_pillar configuration, but minion {0} is " - "not in the list of allowed minions {1}".format(minion_id, - minion_ids)) - return {} - if re.search(r'^i-([0-9a-z]{17}|[0-9a-z]{8})$', __grains__['instance-id']) is not None: - minion_id = __grains__['instance-id'] - log.debug("Minion-id is not in AWS instance ID format, but a grain" - " is, so using {0} as the minion ID".format(minion_id)) + # Check AWS Tag restrictions .i.e. letters, spaces, and numbers and + - = . _ : / @ + if tag_key and re.match(r'[\w=.:/@-]+$', tag_key) is None: + log.error('External pillar %s, tag_key \'%s\' is not valid ', + __name__, tag_key if isinstance(tag_key, six.text_type) else 'non-string') + return {} + + if tag_key and tag_value not in valid_tag_value: + log.error('External pillar {0}, tag_value \'{1}\' is not valid must be one ' + 'of {2}'.format(__name__, tag_value, ' '.join(valid_tag_value))) + return {} + + if not tag_key: + base_msg = ('External pillar {0}, querying EC2 tags for minion id \'{1}\' ' + 'against instance-id'.format(__name__, minion_id)) + else: + base_msg = ('External pillar {0}, querying EC2 tags for minion id \'{1}\' ' + 'against instance-id or \'{2}\' against \'{3}\''.format(__name__, minion_id, tag_key, tag_value)) + + log.debug(base_msg) + find_filter = None + find_id = None + + if re.search(r'^i-([0-9a-z]{17}|[0-9a-z]{8})$', minion_id) is not None: + find_filter = None + find_id = minion_id + elif tag_key: + if tag_value == 'uqdn': + find_filter = {'tag:{0}'.format(tag_key): minion_id.split('.', 1)[0]} else: - log.debug("Nether minion id nor a grain named instance-id is in " - "AWS format, can't query EC2 tags for minion {0}".format( - minion_id)) - return {} + find_filter = {'tag:{0}'.format(tag_key): minion_id} + if grain_instance_id: + # we have an untrusted grain_instance_id, use it to narrow the search + # even more. Combination will be unique even if uqdn is set. + find_filter.update({'instance-id': grain_instance_id}) + # Add this if running state is not dependant on EC2Config + # find_filter.update('instance-state-name': 'running') - m = boto.utils.get_instance_metadata(timeout=0.1, num_retries=1) - if len(m.keys()) < 1: - log.info("%s: not an EC2 instance, skipping", __name__) - return None + # no minion-id is instance-id and no suitable filter, try use_grain if enabled + if not find_filter and not find_id and use_grain: + if not grain_instance_id: + log.debug('Minion-id is not in AWS instance-id formation, and there ' + 'is no instance-id grain for minion %s', minion_id) + return {} + if minion_ids is not None and minion_id not in minion_ids: + log.debug('Minion-id is not in AWS instance ID format, and minion_ids ' + 'is set in the ec2_pillar configuration, but minion {0} is ' + 'not in the list of allowed minions {1}'.format(minion_id, minion_ids)) + return {} + find_id = grain_instance_id + + if not (find_filter or find_id): + log.debug('External pillar %s, querying EC2 tags for minion id \'%s\' against ' + 'instance-id or \'%s\' against \'%s\' noughthing to match against', + __name__, minion_id, tag_key, tag_value) + return {} + + myself = boto.utils.get_instance_metadata(timeout=0.1, num_retries=1) + if len(myself.keys()) < 1: + log.info("%s: salt master not an EC2 instance, skipping", __name__) + return {} # Get the Master's instance info, primarily the region - (instance_id, region) = _get_instance_info() + (_, region) = _get_instance_info() try: conn = boto.ec2.connect_to_region(region) - except boto.exception as e: # pylint: disable=E0712 - log.error("%s: invalid AWS credentials.", __name__) - return None + except boto.exception.AWSConnectionError as exc: + log.error('%s: invalid AWS credentials, %s', __name__, exc) + return {} + except: + raise + + if conn is None: + log.error('%s: Could not connect to region %s', __name__, region) + return {} - tags = {} try: - _tags = conn.get_all_tags(filters={'resource-type': 'instance', - 'resource-id': minion_id}) - for tag in _tags: - tags[tag.name] = tag.value - except IndexError as e: - log.error("Couldn't retrieve instance information: %s", e) - return None + if find_id: + instance_data = conn.get_only_instances(instance_ids=[find_id], dry_run=False) + else: + # filters and max_results can not be used togther. + instance_data = conn.get_only_instances(filters=find_filter, dry_run=False) - return {'ec2_tags': tags} + if instance_data: + if len(instance_data) == 1: + instance = instance_data[0] + else: + log.error('%s multiple matches using \'%s\'', base_msg, find_id if find_id else find_filter) + return {} + else: + log.debug('%s no match using \'%s\'', base_msg, find_id if find_id else find_filter) + return {} + except boto.exception.EC2ResponseError as exc: + log.error('{0} failed with \'{1}\''.format(base_msg, exc)) + return {} + + if instance.tags: + log.debug('External pillar {0}, for minion id \'{1}\', tags: {2}'.format(__name__, minion_id, instance.tags)) + return {'ec2_tags': instance.tags} + return {} From c8a9df0179158183a2b52b52e9934bfdc557501d Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Tue, 6 Feb 2018 13:02:07 -0700 Subject: [PATCH 027/223] ec2 can have m2crypto or pycrypto --- salt/cloud/clouds/ec2.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/salt/cloud/clouds/ec2.py b/salt/cloud/clouds/ec2.py index 1fcefb7c8d..0cb5e234ad 100644 --- a/salt/cloud/clouds/ec2.py +++ b/salt/cloud/clouds/ec2.py @@ -200,8 +200,7 @@ def get_dependencies(): ''' deps = { 'requests': HAS_REQUESTS, - 'm2crypto': HAS_M2, - 'pycrypto': HAS_PYCRYPTO + 'pycrypto or m2crypto': HAS_M2 or HAS_PYCRYPTO } return config.check_driver_dependencies( __virtualname__, From f88ff75c19cedacd30d1e16f44fa75c164343c25 Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Tue, 6 Feb 2018 13:27:11 -0700 Subject: [PATCH 028/223] fix gce tests --- tests/integration/cloud/providers/test_gce.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/cloud/providers/test_gce.py b/tests/integration/cloud/providers/test_gce.py index 7a5720b661..902002ffc0 100644 --- a/tests/integration/cloud/providers/test_gce.py +++ b/tests/integration/cloud/providers/test_gce.py @@ -36,7 +36,7 @@ class GCETest(ShellCase): provider = 'gce' providers = self.run_cloud('--list-providers') # Create the cloud instance name to be used throughout the tests - self.INSTANCE_NAME = generate_random_name('cloud-test-') + self.INSTANCE_NAME = generate_random_name('cloud-test-').lower() if profile_str not in providers: self.skipTest( From 57ab20308e4d44f5c1111f7e2b208678ebbd35e8 Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Tue, 6 Feb 2018 14:50:20 -0700 Subject: [PATCH 029/223] fix msazure cloud test --- tests/integration/cloud/providers/test_msazure.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/cloud/providers/test_msazure.py b/tests/integration/cloud/providers/test_msazure.py index fcecb43718..c02cabf5d2 100644 --- a/tests/integration/cloud/providers/test_msazure.py +++ b/tests/integration/cloud/providers/test_msazure.py @@ -45,7 +45,7 @@ def __has_required_azure(): else: version = LooseVersion(azure.common.__version__) - if REQUIRED_AZURE <= version: + if LooseVersion(REQUIRED_AZURE) <= version: return True return False From 6cef37f310b7c129b1a7c9d78a0296e665a802a8 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Thu, 1 Feb 2018 22:24:28 -0600 Subject: [PATCH 030/223] 2 unicode-compatibility fixes for cmdmod.py First, `shlex.split()` will raise an exception when passed a unicode type with unicode characters in the string. This modifies our `shlex.split()` helper to first convert the passed string to a `str` type, and then return a decoded copy of the result of the split. Second, this uses our `to_unicode` helper to more gracefully decode the stdout and stderr from the command. Unit tests have been added to confirm that the output is properly decoded, including instances where decoding fails because the return from the command contains binary data. --- salt/modules/cmdmod.py | 80 +++++++------- salt/utils/args.py | 8 +- .../integration/files/file/base/random_bytes | 1 + tests/integration/modules/test_cmdmod.py | 2 +- tests/unit/modules/test_cmdmod.py | 103 ++++++++++++++++++ 5 files changed, 155 insertions(+), 39 deletions(-) create mode 100644 tests/integration/files/file/base/random_bytes diff --git a/salt/modules/cmdmod.py b/salt/modules/cmdmod.py index 09e71dc90e..f8041310f0 100644 --- a/salt/modules/cmdmod.py +++ b/salt/modules/cmdmod.py @@ -281,8 +281,9 @@ def _run(cmd, if _is_valid_shell(shell) is False: log.warning( 'Attempt to run a shell command with what may be an invalid shell! ' - 'Check to ensure that the shell <{0}> is valid for this user.' - .format(shell)) + 'Check to ensure that the shell <%s> is valid for this user.', + shell + ) log_callback = _check_cb(log_callback) @@ -347,14 +348,15 @@ def _run(cmd, # checked if blacklisted if '__pub_jid' in kwargs: if not _check_avail(cmd): - msg = 'This shell command is not permitted: "{0}"'.format(cmd) - raise CommandExecutionError(msg) + raise CommandExecutionError( + 'The shell command "{0}" is not permitted'.format(cmd) + ) env = _parse_env(env) for bad_env_key in (x for x, y in six.iteritems(env) if y is None): - log.error('Environment variable \'{0}\' passed without a value. ' - 'Setting value to an empty string'.format(bad_env_key)) + log.error('Environment variable \'%s\' passed without a value. ' + 'Setting value to an empty string', bad_env_key) env[bad_env_key] = '' def _get_stripped(cmd): @@ -504,8 +506,7 @@ def _run(cmd, try: _umask = int(_umask, 8) except ValueError: - msg = 'Invalid umask: \'{0}\''.format(umask) - raise CommandExecutionError(msg) + raise CommandExecutionError("Invalid umask: '{0}'".format(umask)) else: _umask = None @@ -570,20 +571,28 @@ def _run(cmd, return ret try: - out = proc.stdout.decode(__salt_system_encoding__) - except AttributeError: + out = salt.utils.stringutils.to_unicode(proc.stdout) + except TypeError: + # stdout is None out = '' except UnicodeDecodeError: - log.error('UnicodeDecodeError while decoding output of cmd {0}'.format(cmd)) - out = proc.stdout.decode(__salt_system_encoding__, 'replace') + log.error( + 'Failed to decode stdout from command %s, non-decodable ' + 'characters have been replaced', cmd + ) + out = salt.utils.stringutils.to_unicode(proc.stdout, errors='replace') try: - err = proc.stderr.decode(__salt_system_encoding__) - except AttributeError: + err = salt.utils.stringutils.to_unicode(proc.stderr) + except TypeError: + # stderr is None err = '' except UnicodeDecodeError: - log.error('UnicodeDecodeError while decoding error of cmd {0}'.format(cmd)) - err = proc.stderr.decode(__salt_system_encoding__, 'replace') + log.error( + 'Failed to decode stderr from command %s, non-decodable ' + 'characters have been replaced', cmd + ) + err = salt.utils.stringutils.to_unicode(proc.stderr, errors='replace') if rstrip: if out is not None: @@ -648,9 +657,8 @@ def _run(cmd, ret['retcode'] = 1 break except salt.utils.vt.TerminalException as exc: - log.error( - 'VT: {0}'.format(exc), - exc_info_on_loglevel=logging.DEBUG) + log.error('VT: %s', exc, + exc_info_on_loglevel=logging.DEBUG) ret = {'retcode': 1, 'pid': '2'} break # only set stdout on success as we already mangled in other @@ -1401,11 +1409,11 @@ def run_stdout(cmd, ) log.error(log_callback(msg)) if ret['stdout']: - log.log(lvl, 'stdout: {0}'.format(log_callback(ret['stdout']))) + log.log(lvl, 'stdout: %s', log_callback(ret['stdout'])) if ret['stderr']: - log.log(lvl, 'stderr: {0}'.format(log_callback(ret['stderr']))) + log.log(lvl, 'stderr: %s', log_callback(ret['stderr'])) if ret['retcode']: - log.log(lvl, 'retcode: {0}'.format(ret['retcode'])) + log.log(lvl, 'retcode: %s', ret['retcode']) return ret['stdout'] if not hide_output else '' @@ -1603,11 +1611,11 @@ def run_stderr(cmd, ) log.error(log_callback(msg)) if ret['stdout']: - log.log(lvl, 'stdout: {0}'.format(log_callback(ret['stdout']))) + log.log(lvl, 'stdout: %s', log_callback(ret['stdout'])) if ret['stderr']: - log.log(lvl, 'stderr: {0}'.format(log_callback(ret['stderr']))) + log.log(lvl, 'stderr: %s', log_callback(ret['stderr'])) if ret['retcode']: - log.log(lvl, 'retcode: {0}'.format(ret['retcode'])) + log.log(lvl, 'retcode: %s', ret['retcode']) return ret['stderr'] if not hide_output else '' @@ -1832,11 +1840,11 @@ def run_all(cmd, ) log.error(log_callback(msg)) if ret['stdout']: - log.log(lvl, 'stdout: {0}'.format(log_callback(ret['stdout']))) + log.log(lvl, 'stdout: %s', log_callback(ret['stdout'])) if ret['stderr']: - log.log(lvl, 'stderr: {0}'.format(log_callback(ret['stderr']))) + log.log(lvl, 'stderr: %s', log_callback(ret['stderr'])) if ret['retcode']: - log.log(lvl, 'retcode: {0}'.format(ret['retcode'])) + log.log(lvl, 'retcode: %s', ret['retcode']) if hide_output: ret['stdout'] = ret['stderr'] = '' @@ -2017,7 +2025,7 @@ def retcode(cmd, ) ) log.error(log_callback(msg)) - log.log(lvl, 'output: {0}'.format(log_callback(ret['stdout']))) + log.log(lvl, 'output: %s', log_callback(ret['stdout'])) return ret['retcode'] @@ -2217,10 +2225,8 @@ def script(source, __salt__['file.remove'](path) except (SaltInvocationError, CommandExecutionError) as exc: log.error( - 'cmd.script: Unable to clean tempfile \'{0}\': {1}'.format( - path, - exc - ) + 'cmd.script: Unable to clean tempfile \'%s\': %s', + path, exc, exc_info_on_loglevel=logging.DEBUG ) if '__env__' in kwargs: @@ -2853,7 +2859,7 @@ def shells(): else: ret.append(line) except OSError: - log.error("File '{0}' was not found".format(shells_fn)) + log.error("File '%s' was not found", shells_fn) return ret @@ -2980,7 +2986,7 @@ def shell_info(shell, list_modules=False): newenv = os.environ if ('HOME' not in newenv) and (not salt.utils.platform.is_windows()): newenv['HOME'] = os.path.expanduser('~') - log.debug('HOME environment set to {0}'.format(newenv['HOME'])) + log.debug('HOME environment set to %s', newenv['HOME']) try: proc = salt.utils.timed_subprocess.TimedProc( shell_data, @@ -3231,7 +3237,7 @@ def powershell(cmd, if encode_cmd: # Convert the cmd to UTF-16LE without a BOM and base64 encode. # Just base64 encoding UTF-8 or including a BOM is not valid. - log.debug('Encoding PowerShell command \'{0}\''.format(cmd)) + log.debug('Encoding PowerShell command \'%s\'', cmd) cmd_utf16 = cmd.decode('utf-8').encode('utf-16le') cmd = base64.standard_b64encode(cmd_utf16) encoded_cmd = True @@ -3534,7 +3540,7 @@ def powershell_all(cmd, if encode_cmd: # Convert the cmd to UTF-16LE without a BOM and base64 encode. # Just base64 encoding UTF-8 or including a BOM is not valid. - log.debug('Encoding PowerShell command \'{0}\''.format(cmd)) + log.debug('Encoding PowerShell command \'%s\'', cmd) cmd_utf16 = cmd.decode('utf-8').encode('utf-16le') cmd = base64.standard_b64encode(cmd_utf16) encoded_cmd = True diff --git a/salt/utils/args.py b/salt/utils/args.py index 29ba3d8211..660810703b 100644 --- a/salt/utils/args.py +++ b/salt/utils/args.py @@ -269,7 +269,13 @@ def shlex_split(s, **kwargs): Only split if variable is a string ''' if isinstance(s, six.string_types): - return shlex.split(s, **kwargs) + # On PY2, shlex.split will fail with unicode types if there are + # non-ascii characters in the string. So, we need to make sure we + # invoke it with a str type, and then decode the resulting string back + # to unicode to return it. + return salt.utils.data.decode( + shlex.split(salt.utils.stringutils.to_str(s), **kwargs) + ) else: return s diff --git a/tests/integration/files/file/base/random_bytes b/tests/integration/files/file/base/random_bytes new file mode 100644 index 0000000000..bac5364ab8 --- /dev/null +++ b/tests/integration/files/file/base/random_bytes @@ -0,0 +1 @@ +©ˆÛ diff --git a/tests/integration/modules/test_cmdmod.py b/tests/integration/modules/test_cmdmod.py index 18f08744ff..ec00a83d80 100644 --- a/tests/integration/modules/test_cmdmod.py +++ b/tests/integration/modules/test_cmdmod.py @@ -121,7 +121,7 @@ class CMDModuleTest(ModuleCase): ''' self.assertEqual(self.run_function('cmd.run', ['bad_command --foo']).rstrip(), - 'ERROR: This shell command is not permitted: "bad_command --foo"') + 'ERROR: The shell command "bad_command --foo" is not permitted') def test_script(self): ''' diff --git a/tests/unit/modules/test_cmdmod.py b/tests/unit/modules/test_cmdmod.py index fff5732baa..82a739c24b 100644 --- a/tests/unit/modules/test_cmdmod.py +++ b/tests/unit/modules/test_cmdmod.py @@ -10,14 +10,17 @@ import sys import tempfile # Import Salt Libs +import salt.utils.files import salt.utils.platform import salt.modules.cmdmod as cmdmod from salt.exceptions import CommandExecutionError from salt.log import LOG_LEVELS +from salt.ext.six.moves import builtins # pylint: disable=import-error # Import Salt Testing Libs from tests.support.mixins import LoaderModuleMockMixin from tests.support.unit import TestCase, skipIf +from tests.support.paths import FILES from tests.support.mock import ( mock_open, Mock, @@ -33,6 +36,39 @@ MOCK_SHELL_FILE = '# List of acceptable shells\n' \ '/bin/bash\n' +class MockTimedProc(object): + ''' + Class used as a stand-in for salt.utils.timed_subprocess.TimedProc + ''' + class _Process(object): + ''' + Used to provide a dummy "process" attribute + ''' + def __init__(self, returncode=0, pid=12345): + self.returncode = returncode + self.pid = pid + + def __init__(self, stdout=None, stderr=None, returncode=0, pid=12345): + if stdout is not None and not isinstance(stdout, bytes): + raise TypeError('Must pass stdout to MockTimedProc as bytes') + if stderr is not None and not isinstance(stderr, bytes): + raise TypeError('Must pass stderr to MockTimedProc as bytes') + self._stdout = stdout + self._stderr = stderr + self.process = self._Process(returncode=returncode, pid=pid) + + def run(self): + pass + + @property + def stdout(self): + return self._stdout + + @property + def stderr(self): + return self._stderr + + @skipIf(NO_MOCK, NO_MOCK_REASON) class CMDMODTestCase(TestCase, LoaderModuleMockMixin): ''' @@ -303,3 +339,70 @@ class CMDMODTestCase(TestCase, LoaderModuleMockMixin): pass else: raise RuntimeError + + def test_run_all_binary_replace(self): + ''' + Test for failed decoding of binary data, for instance when doing + something silly like using dd to read from /dev/urandom and write to + /dev/stdout. + ''' + # Since we're using unicode_literals, read the random bytes from a file + rand_bytes_file = os.path.join(FILES, 'file', 'base', 'random_bytes') + with salt.utils.files.fopen(rand_bytes_file, 'rb') as fp_: + stdout_bytes = fp_.read() + + # stdout with the non-decodable bits replaced with the unicode + # replacement character U+FFFD. + stdout_unicode = '\ufffd\x1b\ufffd\ufffd\n' + stderr_bytes = b'1+0 records in\n1+0 records out\n' \ + b'4 bytes copied, 9.1522e-05 s, 43.7 kB/s\n' + stderr_unicode = stderr_bytes.decode() + + proc = MagicMock( + return_value=MockTimedProc( + stdout=stdout_bytes, + stderr=stderr_bytes + ) + ) + with patch('salt.utils.timed_subprocess.TimedProc', proc): + ret = cmdmod.run_all( + 'dd if=/dev/urandom of=/dev/stdout bs=4 count=1', + rstrip=False) + + self.assertEqual(ret['stdout'], stdout_unicode) + self.assertEqual(ret['stderr'], stderr_unicode) + + def test_run_all_none(self): + ''' + Tests cases when proc.stdout or proc.stderr are None. These should be + caught and replaced with empty strings. + ''' + proc = MagicMock(return_value=MockTimedProc(stdout=None, stderr=None)) + with patch('salt.utils.timed_subprocess.TimedProc', proc): + ret = cmdmod.run_all('some command', rstrip=False) + + self.assertEqual(ret['stdout'], '') + self.assertEqual(ret['stderr'], '') + + def test_run_all_unicode(self): + ''' + Ensure that unicode stdout and stderr are decoded properly + ''' + stdout_unicode = 'Here is some unicode: Ñпам' + stderr_unicode = 'Here is some unicode: Ñйца' + stdout_bytes = stdout_unicode.encode('utf-8') + stderr_bytes = stderr_unicode.encode('utf-8') + + proc = MagicMock( + return_value=MockTimedProc( + stdout=stdout_bytes, + stderr=stderr_bytes + ) + ) + + with patch('salt.utils.timed_subprocess.TimedProc', proc), \ + patch.object(builtins, '__salt_system_encoding__', 'utf-8'): + ret = cmdmod.run_all('some command', rstrip=False) + + self.assertEqual(ret['stdout'], stdout_unicode) + self.assertEqual(ret['stderr'], stderr_unicode) From 22c676ce57ca5e59162e46ef7758802fd228383a Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Fri, 2 Feb 2018 14:24:58 -0600 Subject: [PATCH 031/223] Add option to cmdmod funcs to specify the output encoding --- salt/modules/cmdmod.py | 1782 ++++++++++++++--------------- salt/modules/dracr.py | 23 +- tests/unit/modules/test_cmdmod.py | 15 + 3 files changed, 870 insertions(+), 950 deletions(-) diff --git a/salt/modules/cmdmod.py b/salt/modules/cmdmod.py index f8041310f0..516773e0e6 100644 --- a/salt/modules/cmdmod.py +++ b/salt/modules/cmdmod.py @@ -66,11 +66,9 @@ log = logging.getLogger(__name__) DEFAULT_SHELL = salt.grains.extra.shell()['shell'] +# Overwriting the cmd python module makes debugging modules with pdb a bit +# harder so lets do it this way instead. def __virtual__(): - ''' - Overwriting the cmd python module makes debugging modules - with pdb a bit harder so lets do it this way instead. - ''' return __virtualname__ @@ -250,6 +248,7 @@ def _run(cmd, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, + output_encoding=None, output_loglevel='debug', log_callback=None, runas=None, @@ -570,29 +569,43 @@ def _run(cmd, ret['retcode'] = 1 return ret + if output_encoding is not None: + log.debug('Decoding output from command %s using %s encoding', + cmd, output_encoding) + try: - out = salt.utils.stringutils.to_unicode(proc.stdout) + out = salt.utils.stringutils.to_unicode( + proc.stdout, + encoding=output_encoding) except TypeError: # stdout is None out = '' except UnicodeDecodeError: + out = salt.utils.stringutils.to_unicode( + proc.stdout, + encoding=output_encoding, + errors='replace') log.error( 'Failed to decode stdout from command %s, non-decodable ' 'characters have been replaced', cmd ) - out = salt.utils.stringutils.to_unicode(proc.stdout, errors='replace') try: - err = salt.utils.stringutils.to_unicode(proc.stderr) + err = salt.utils.stringutils.to_unicode( + proc.stderr, + encoding=output_encoding) except TypeError: # stderr is None err = '' except UnicodeDecodeError: + err = salt.utils.stringutils.to_unicode( + proc.stderr, + encoding=output_encoding, + errors='replace') log.error( 'Failed to decode stderr from command %s, non-decodable ' 'characters have been replaced', cmd ) - err = salt.utils.stringutils.to_unicode(proc.stderr, errors='replace') if rstrip: if out is not None: @@ -686,6 +699,7 @@ def _run(cmd, def _run_quiet(cmd, cwd=None, stdin=None, + output_encoding=None, runas=None, shell=DEFAULT_SHELL, python_shell=False, @@ -705,6 +719,7 @@ def _run_quiet(cmd, cwd=cwd, stdin=stdin, stderr=subprocess.STDOUT, + output_encoding=output_encoding, output_loglevel='quiet', log_callback=None, shell=shell, @@ -733,13 +748,13 @@ def _run_all_quiet(cmd, saltenv='base', pillarenv=None, pillar_override=None, - output_loglevel=None): + output_encoding=None): ''' Helper for running commands quietly for minion startup. Returns a dict of return data. - output_loglevel argument is ignored. This is here for when we alias + output_loglevel argument is ignored. This is here for when we alias cmd.run_all directly to _run_all_quiet in certain chicken-and-egg situations where modules need to work both before and after the __salt__ dictionary is populated (cf dracr.py) @@ -751,6 +766,7 @@ def _run_all_quiet(cmd, shell=shell, python_shell=python_shell, env=env, + output_encoding=output_encoding, output_loglevel='quiet', log_callback=None, template=template, @@ -773,6 +789,7 @@ def run(cmd, template=None, rstrip=True, umask=None, + output_encoding=None, output_loglevel='debug', log_callback=None, hide_output=False, @@ -790,124 +807,111 @@ def run(cmd, r''' Execute the passed command and return the output as a string - Note that ``env`` represents the environment variables for the command, and - should be formatted as a dict, or a YAML string which resolves to a dict. - :param str cmd: The command to run. ex: ``ls -lart /home`` - :param str cwd: The current working directory to execute the command in. - Defaults to the home directory of the user specified by ``runas``. + :param str cwd: The directory from which to execute the command. Defaults + to the home directory of the user specified by ``runas`` (or the user + under which Salt is running if ``runas`` is not specified). :param str stdin: A string of standard input can be specified for the - command to be run using the ``stdin`` parameter. This can be useful in cases - where sensitive information must be read from standard input. + command to be run using the ``stdin`` parameter. This can be useful in + cases where sensitive information must be read from standard input. - :param str runas: User to run command as. If running on a Windows minion you - must also pass a password. The target user account must be in the - Administrators group. + :param str runas: Specify an alternate user to run the command. The default + behavior is to run as the user under which Salt is running. If running + on a Windows minion you must also use the ``password`` argument, and + the target user account must be in the Administrators group. :param str password: Windows only. Required when specifying ``runas``. This - parameter will be ignored on non-Windows platforms. + parameter will be ignored on non-Windows platforms. - .. versionadded:: 2016.3.0 + .. versionadded:: 2016.3.0 - :param str shell: Shell to execute under. Defaults to the system default - shell. + :param str shell: Specify an alternate shell. Defaults to the system's + default shell. :param bool python_shell: If ``False``, let python handle the positional - arguments. Set to ``True`` to use shell features, such as pipes or - redirection. + arguments. Set to ``True`` to use shell features, such as pipes or + redirection. :param bool bg: If ``True``, run command in background and do not await or - deliver it's results + deliver it's results - .. versionadded:: 2016.3.0 + .. versionadded:: 2016.3.0 - :param list env: A list of environment variables to be set prior to - execution. + :param dict env: Environment variables to be set prior to execution. - Example: + .. note:: + When passing environment variables on the CLI, they should be + passed as the string representation of a dictionary. - .. code-block:: yaml + .. code-block:: bash - salt://scripts/foo.sh: - cmd.script: - - env: - - BATCH: 'yes' - - .. warning:: - - The above illustrates a common PyYAML pitfall, that **yes**, - **no**, **on**, **off**, **true**, and **false** are all loaded as - boolean ``True`` and ``False`` values, and must be enclosed in - quotes to be used as strings. More info on this (and other) PyYAML - idiosyncrasies can be found :ref:`here `. - - Variables as values are not evaluated. So $PATH in the following - example is a literal '$PATH': - - .. code-block:: yaml - - salt://scripts/bar.sh: - cmd.script: - - env: "PATH=/some/path:$PATH" - - One can still use the existing $PATH by using a bit of Jinja: - - .. code-block:: jinja - - {% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %} - - mycommand: - cmd.run: - - name: ls -l / - - env: - - PATH: {{ [current_path, '/my/special/bin']|join(':') }} + salt myminion cmd.run 'some command' env='{"FOO": "bar"}' :param bool clean_env: Attempt to clean out all other shell environment - variables and set only those provided in the 'env' argument to this - function. + variables and set only those provided in the 'env' argument to this + function. - :param str prepend_path: $PATH segment to prepend (trailing ':' not necessary) - to $PATH + :param str prepend_path: $PATH segment to prepend (trailing ':' not + necessary) to $PATH - .. versionadded:: Oxygen + .. versionadded:: Oxygen :param str template: If this setting is applied then the named templating - engine will be used to render the downloaded file. Currently jinja, mako, - and wempy are supported + engine will be used to render the downloaded file. Currently jinja, + mako, and wempy are supported. :param bool rstrip: Strip all whitespace off the end of output before it is - returned. + returned. :param str umask: The umask (in octal) to use when running the command. - :param str output_loglevel: Control the loglevel at which the output from - the command is logged to the minion log. + :param str output_encoding: Control the encoding used to decode the + command's output. - .. note:: - The command being run will still be logged at the ``debug`` - loglevel regardless, unless ``quiet`` is used for this value. + .. note:: + This should not need to be used in most cases. By default, Salt + will try to use the encoding detected from the system locale, and + will fall back to UTF-8 if this fails. This should only need to be + used in cases where the output of the command is encoded in + something other than the system locale or UTF-8. + + .. versionadded:: Oxygen + + :param str output_loglevel: Control the loglevel at which the output from + the command is logged to the minion log. + + .. note:: + The command being run will still be logged at the ``debug`` + loglevel regardless, unless ``quiet`` is used for this value. + + :param bool ignore_retcode: If the exit code of the command is nonzero, + this is treated as an error condition, and the output from the command + will be logged to the minion log. However, there are some cases where + programs use the return code for signaling and a nonzero exit code + doesn't necessarily mean failure. Pass this argument as ``True`` to + skip logging the output if the command has a nonzero exit code. :param bool hide_output: If ``True``, suppress stdout and stderr in the - return data. + return data. - .. note:: - This is separate from ``output_loglevel``, which only handles how - Salt logs to the minion log. + .. note:: + This is separate from ``output_loglevel``, which only handles how + Salt logs to the minion log. - .. versionadded:: Oxygen + .. versionadded:: Oxygen :param int timeout: A timeout in seconds for the executed process to return. :param bool use_vt: Use VT utils (saltstack) to stream the command output - more interactively to the console and the logs. This is experimental. + more interactively to the console and the logs. This is experimental. :param bool encoded_cmd: Specify if the supplied command is encoded. - Only applies to shell 'powershell'. + Only applies to shell 'powershell'. :param bool raise_err: If ``True`` and the command has a nonzero exit code, - a CommandExecutionError exception will be raised. + a CommandExecutionError exception will be raised. .. warning:: This function does not process commands through a shell @@ -943,7 +947,7 @@ def run(cmd, A string of standard input can be specified for the command to be run using the ``stdin`` parameter. This can be useful in cases where sensitive - information must be read from standard input.: + information must be read from standard input. .. code-block:: bash @@ -973,6 +977,7 @@ def run(cmd, template=template, rstrip=rstrip, umask=umask, + output_encoding=output_encoding, output_loglevel=output_loglevel, log_callback=log_callback, timeout=timeout, @@ -1017,6 +1022,7 @@ def shell(cmd, template=None, rstrip=True, umask=None, + output_encoding=None, output_loglevel='debug', log_callback=None, hide_output=False, @@ -1034,121 +1040,106 @@ def shell(cmd, .. versionadded:: 2015.5.0 - :param str cmd: The command to run. ex: 'ls -lart /home' + :param str cmd: The command to run. ex: ``ls -lart /home`` - :param str cwd: The current working directory to execute the command in. - Defaults to the home directory of the user specified by ``runas``. + :param str cwd: The directory from which to execute the command. Defaults + to the home directory of the user specified by ``runas`` (or the user + under which Salt is running if ``runas`` is not specified). :param str stdin: A string of standard input can be specified for the - command to be run using the ``stdin`` parameter. This can be useful in cases - where sensitive information must be read from standard input. + command to be run using the ``stdin`` parameter. This can be useful in + cases where sensitive information must be read from standard input. - :param str runas: User to run command as. If running on a Windows minion you - must also pass a password. The target user account must be in the - Administrators group. + :param str runas: Specify an alternate user to run the command. The default + behavior is to run as the user under which Salt is running. If running + on a Windows minion you must also use the ``password`` argument, and + the target user account must be in the Administrators group. :param str password: Windows only. Required when specifying ``runas``. This - parameter will be ignored on non-Windows platforms. + parameter will be ignored on non-Windows platforms. - .. versionadded:: 2016.3.0 + .. versionadded:: 2016.3.0 :param int shell: Shell to execute under. Defaults to the system default - shell. + shell. :param bool bg: If True, run command in background and do not await or - deliver its results + deliver its results - :param list env: A list of environment variables to be set prior to - execution. + :param dict env: Environment variables to be set prior to execution. - Example: + .. note:: + When passing environment variables on the CLI, they should be + passed as the string representation of a dictionary. - .. code-block:: yaml + .. code-block:: bash - salt://scripts/foo.sh: - cmd.script: - - env: - - BATCH: 'yes' - - .. warning:: - - The above illustrates a common PyYAML pitfall, that **yes**, - **no**, **on**, **off**, **true**, and **false** are all loaded as - boolean ``True`` and ``False`` values, and must be enclosed in - quotes to be used as strings. More info on this (and other) PyYAML - idiosyncrasies can be found :ref:`here `. - - Variables as values are not evaluated. So $PATH in the following - example is a literal '$PATH': - - .. code-block:: yaml - - salt://scripts/bar.sh: - cmd.script: - - env: "PATH=/some/path:$PATH" - - One can still use the existing $PATH by using a bit of Jinja: - - .. code-block:: jinja - - {% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %} - - mycommand: - cmd.run: - - name: ls -l / - - env: - - PATH: {{ [current_path, '/my/special/bin']|join(':') }} + salt myminion cmd.shell 'some command' env='{"FOO": "bar"}' :param bool clean_env: Attempt to clean out all other shell environment - variables and set only those provided in the 'env' argument to this - function. + variables and set only those provided in the 'env' argument to this + function. :param str prepend_path: $PATH segment to prepend (trailing ':' not necessary) - to $PATH + to $PATH - .. versionadded:: Oxygen + .. versionadded:: Oxygen :param str template: If this setting is applied then the named templating - engine will be used to render the downloaded file. Currently jinja, mako, - and wempy are supported + engine will be used to render the downloaded file. Currently jinja, + mako, and wempy are supported. :param bool rstrip: Strip all whitespace off the end of output before it is - returned. + returned. :param str umask: The umask (in octal) to use when running the command. - :param str output_loglevel: Control the loglevel at which the output from - the command is logged to the minion log. + :param str output_encoding: Control the encoding used to decode the + command's output. - .. note:: - The command being run will still be logged at the ``debug`` - loglevel regardless, unless ``quiet`` is used for this value. + .. note:: + This should not need to be used in most cases. By default, Salt + will try to use the encoding detected from the system locale, and + will fall back to UTF-8 if this fails. This should only need to be + used in cases where the output of the command is encoded in + something other than the system locale or UTF-8. + + .. versionadded:: Oxygen + + :param str output_loglevel: Control the loglevel at which the output from + the command is logged to the minion log. + + .. note:: + The command being run will still be logged at the ``debug`` + loglevel regardless, unless ``quiet`` is used for this value. + + :param bool ignore_retcode: If the exit code of the command is nonzero, + this is treated as an error condition, and the output from the command + will be logged to the minion log. However, there are some cases where + programs use the return code for signaling and a nonzero exit code + doesn't necessarily mean failure. Pass this argument as ``True`` to + skip logging the output if the command has a nonzero exit code. :param bool hide_output: If ``True``, suppress stdout and stderr in the - return data. + return data. - .. note:: - This is separate from ``output_loglevel``, which only handles how - Salt logs to the minion log. + .. note:: + This is separate from ``output_loglevel``, which only handles how + Salt logs to the minion log. - .. versionadded:: Oxygen + .. versionadded:: Oxygen - :param int timeout: A timeout in seconds for the executed process to return. + :param int timeout: A timeout in seconds for the executed process to + return. :param bool use_vt: Use VT utils (saltstack) to stream the command output - more interactively to the console and the logs. This is experimental. + more interactively to the console and the logs. This is experimental. .. warning:: - This passes the cmd argument directly to the shell - without any further processing! Be absolutely sure that you - have properly sanitized the command passed to this function - and do not use untrusted inputs. - - .. note:: - - ``env`` represents the environment variables for the command, and - should be formatted as a dict, or a YAML string which resolves to a dict. + This passes the cmd argument directly to the shell without any further + processing! Be absolutely sure that you have properly sanitized the + command passed to this function and do not use untrusted inputs. CLI Example: @@ -1172,7 +1163,7 @@ def shell(cmd, A string of standard input can be specified for the command to be run using the ``stdin`` parameter. This can be useful in cases where sensitive - information must be read from standard input.: + information must be read from standard input. .. code-block:: bash @@ -1202,6 +1193,7 @@ def shell(cmd, template=template, rstrip=rstrip, umask=umask, + output_encoding=output_encoding, output_loglevel=output_loglevel, log_callback=log_callback, hide_output=hide_output, @@ -1227,6 +1219,7 @@ def run_stdout(cmd, template=None, rstrip=True, umask=None, + output_encoding=None, output_loglevel='debug', log_callback=None, hide_output=False, @@ -1241,112 +1234,101 @@ def run_stdout(cmd, ''' Execute a command, and only return the standard out - :param str cmd: The command to run. ex: 'ls -lart /home' + :param str cmd: The command to run. ex: ``ls -lart /home`` - :param str cwd: The current working directory to execute the command in. - Defaults to the home directory of the user specified by ``runas``. + :param str cwd: The directory from which to execute the command. Defaults + to the home directory of the user specified by ``runas`` (or the user + under which Salt is running if ``runas`` is not specified). :param str stdin: A string of standard input can be specified for the - command to be run using the ``stdin`` parameter. This can be useful in cases - where sensitive information must be read from standard input.: + command to be run using the ``stdin`` parameter. This can be useful in + cases where sensitive information must be read from standard input. - :param str runas: User to run command as. If running on a Windows minion you - must also pass a password. The target user account must be in the - Administrators group. + :param str runas: Specify an alternate user to run the command. The default + behavior is to run as the user under which Salt is running. If running + on a Windows minion you must also use the ``password`` argument, and + the target user account must be in the Administrators group. :param str password: Windows only. Required when specifying ``runas``. This - parameter will be ignored on non-Windows platforms. + parameter will be ignored on non-Windows platforms. - .. versionadded:: 2016.3.0 + .. versionadded:: 2016.3.0 - :param str shell: Shell to execute under. Defaults to the system default shell. + :param str shell: Specify an alternate shell. Defaults to the system's + default shell. :param bool python_shell: If False, let python handle the positional - arguments. Set to True to use shell features, such as pipes or redirection + arguments. Set to True to use shell features, such as pipes or + redirection. - :param list env: A list of environment variables to be set prior to - execution. + :param dict env: Environment variables to be set prior to execution. - Example: + .. note:: + When passing environment variables on the CLI, they should be + passed as the string representation of a dictionary. - .. code-block:: yaml + .. code-block:: bash - salt://scripts/foo.sh: - cmd.script: - - env: - - BATCH: 'yes' - - .. warning:: - - The above illustrates a common PyYAML pitfall, that **yes**, - **no**, **on**, **off**, **true**, and **false** are all loaded as - boolean ``True`` and ``False`` values, and must be enclosed in - quotes to be used as strings. More info on this (and other) PyYAML - idiosyncrasies can be found :ref:`here `. - - Variables as values are not evaluated. So $PATH in the following - example is a literal '$PATH': - - .. code-block:: yaml - - salt://scripts/bar.sh: - cmd.script: - - env: "PATH=/some/path:$PATH" - - One can still use the existing $PATH by using a bit of Jinja: - - .. code-block:: jinja - - {% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %} - - mycommand: - cmd.run: - - name: ls -l / - - env: - - PATH: {{ [current_path, '/my/special/bin']|join(':') }} + salt myminion cmd.run_stdout 'some command' env='{"FOO": "bar"}' :param bool clean_env: Attempt to clean out all other shell environment - variables and set only those provided in the 'env' argument to this - function. + variables and set only those provided in the 'env' argument to this + function. :param str prepend_path: $PATH segment to prepend (trailing ':' not necessary) - to $PATH + to $PATH - .. versionadded:: Oxygen + .. versionadded:: Oxygen :param str template: If this setting is applied then the named templating - engine will be used to render the downloaded file. Currently jinja, mako, - and wempy are supported + engine will be used to render the downloaded file. Currently jinja, + mako, and wempy are supported. :param bool rstrip: Strip all whitespace off the end of output before it is - returned. + returned. :param str umask: The umask (in octal) to use when running the command. - :param str output_loglevel: Control the loglevel at which the output from - the command is logged to the minion log. + :param str output_encoding: Control the encoding used to decode the + command's output. - .. note:: - The command being run will still be logged at the ``debug`` - loglevel regardless, unless ``quiet`` is used for this value. + .. note:: + This should not need to be used in most cases. By default, Salt + will try to use the encoding detected from the system locale, and + will fall back to UTF-8 if this fails. This should only need to be + used in cases where the output of the command is encoded in + something other than the system locale or UTF-8. + + .. versionadded:: Oxygen + + :param str output_loglevel: Control the loglevel at which the output from + the command is logged to the minion log. + + .. note:: + The command being run will still be logged at the ``debug`` + loglevel regardless, unless ``quiet`` is used for this value. + + :param bool ignore_retcode: If the exit code of the command is nonzero, + this is treated as an error condition, and the output from the command + will be logged to the minion log. However, there are some cases where + programs use the return code for signaling and a nonzero exit code + doesn't necessarily mean failure. Pass this argument as ``True`` to + skip logging the output if the command has a nonzero exit code. :param bool hide_output: If ``True``, suppress stdout and stderr in the - return data. + return data. - .. note:: - This is separate from ``output_loglevel``, which only handles how - Salt logs to the minion log. + .. note:: + This is separate from ``output_loglevel``, which only handles how + Salt logs to the minion log. - .. versionadded:: Oxygen + .. versionadded:: Oxygen - :param int timeout: A timeout in seconds for the executed process to return. + :param int timeout: A timeout in seconds for the executed process to + return. :param bool use_vt: Use VT utils (saltstack) to stream the command output - more interactively to the console and the logs. This is experimental. - - .. note:: - ``env`` represents the environment variables for the command, and - should be formatted as a dict, or a YAML string which resolves to a dict. + more interactively to the console and the logs. This is experimental. CLI Example: @@ -1364,7 +1346,7 @@ def run_stdout(cmd, A string of standard input can be specified for the command to be run using the ``stdin`` parameter. This can be useful in cases where sensitive - information must be read from standard input.: + information must be read from standard input. .. code-block:: bash @@ -1384,6 +1366,7 @@ def run_stdout(cmd, template=template, rstrip=rstrip, umask=umask, + output_encoding=output_encoding, output_loglevel=output_loglevel, log_callback=log_callback, timeout=timeout, @@ -1428,6 +1411,7 @@ def run_stderr(cmd, template=None, rstrip=True, umask=None, + output_encoding=None, output_loglevel='debug', log_callback=None, hide_output=False, @@ -1442,113 +1426,101 @@ def run_stderr(cmd, ''' Execute a command and only return the standard error - :param str cmd: The command to run. ex: 'ls -lart /home' + :param str cmd: The command to run. ex: ``ls -lart /home`` - :param str cwd: The current working directory to execute the command in. - Defaults to the home directory of the user specified by ``runas``. + :param str cwd: The directory from which to execute the command. Defaults + to the home directory of the user specified by ``runas`` (or the user + under which Salt is running if ``runas`` is not specified). :param str stdin: A string of standard input can be specified for the - command to be run using the ``stdin`` parameter. This can be useful in cases - where sensitive information must be read from standard input.: + command to be run using the ``stdin`` parameter. This can be useful in + cases where sensitive information must be read from standard input. - :param str runas: User to run command as. If running on a Windows minion you - must also pass a password. The target user account must be in the - Administrators group. + :param str runas: Specify an alternate user to run the command. The default + behavior is to run as the user under which Salt is running. If running + on a Windows minion you must also use the ``password`` argument, and + the target user account must be in the Administrators group. :param str password: Windows only. Required when specifying ``runas``. This - parameter will be ignored on non-Windows platforms. + parameter will be ignored on non-Windows platforms. - .. versionadded:: 2016.3.0 + .. versionadded:: 2016.3.0 - :param str shell: Shell to execute under. Defaults to the system default - shell. + :param str shell: Specify an alternate shell. Defaults to the system's + default shell. :param bool python_shell: If False, let python handle the positional - arguments. Set to True to use shell features, such as pipes or redirection + arguments. Set to True to use shell features, such as pipes or + redirection. - :param list env: A list of environment variables to be set prior to - execution. + :param dict env: Environment variables to be set prior to execution. - Example: + .. note:: + When passing environment variables on the CLI, they should be + passed as the string representation of a dictionary. - .. code-block:: yaml + .. code-block:: bash - salt://scripts/foo.sh: - cmd.script: - - env: - - BATCH: 'yes' - - .. warning:: - - The above illustrates a common PyYAML pitfall, that **yes**, - **no**, **on**, **off**, **true**, and **false** are all loaded as - boolean ``True`` and ``False`` values, and must be enclosed in - quotes to be used as strings. More info on this (and other) PyYAML - idiosyncrasies can be found :ref:`here `. - - Variables as values are not evaluated. So $PATH in the following - example is a literal '$PATH': - - .. code-block:: yaml - - salt://scripts/bar.sh: - cmd.script: - - env: "PATH=/some/path:$PATH" - - One can still use the existing $PATH by using a bit of Jinja: - - .. code-block:: jinja - - {% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %} - - mycommand: - cmd.run: - - name: ls -l / - - env: - - PATH: {{ [current_path, '/my/special/bin']|join(':') }} + salt myminion cmd.run_stderr 'some command' env='{"FOO": "bar"}' :param bool clean_env: Attempt to clean out all other shell environment - variables and set only those provided in the 'env' argument to this - function. + variables and set only those provided in the 'env' argument to this + function. - :param str prepend_path: $PATH segment to prepend (trailing ':' not necessary) - to $PATH + :param str prepend_path: $PATH segment to prepend (trailing ':' not + necessary) to $PATH - .. versionadded:: Oxygen + .. versionadded:: Oxygen :param str template: If this setting is applied then the named templating - engine will be used to render the downloaded file. Currently jinja, mako, - and wempy are supported + engine will be used to render the downloaded file. Currently jinja, + mako, and wempy are supported. :param bool rstrip: Strip all whitespace off the end of output before it is - returned. + returned. :param str umask: The umask (in octal) to use when running the command. - :param str output_loglevel: Control the loglevel at which the output from - the command is logged to the minion log. + :param str output_encoding: Control the encoding used to decode the + command's output. - .. note:: - The command being run will still be logged at the ``debug`` - loglevel regardless, unless ``quiet`` is used for this value. + .. note:: + This should not need to be used in most cases. By default, Salt + will try to use the encoding detected from the system locale, and + will fall back to UTF-8 if this fails. This should only need to be + used in cases where the output of the command is encoded in + something other than the system locale or UTF-8. + + .. versionadded:: Oxygen + + :param str output_loglevel: Control the loglevel at which the output from + the command is logged to the minion log. + + .. note:: + The command being run will still be logged at the ``debug`` + loglevel regardless, unless ``quiet`` is used for this value. + + :param bool ignore_retcode: If the exit code of the command is nonzero, + this is treated as an error condition, and the output from the command + will be logged to the minion log. However, there are some cases where + programs use the return code for signaling and a nonzero exit code + doesn't necessarily mean failure. Pass this argument as ``True`` to + skip logging the output if the command has a nonzero exit code. :param bool hide_output: If ``True``, suppress stdout and stderr in the - return data. + return data. - .. note:: - This is separate from ``output_loglevel``, which only handles how - Salt logs to the minion log. + .. note:: + This is separate from ``output_loglevel``, which only handles how + Salt logs to the minion log. - .. versionadded:: Oxygen + .. versionadded:: Oxygen - :param int timeout: A timeout in seconds for the executed process to return. + :param int timeout: A timeout in seconds for the executed process to + return. :param bool use_vt: Use VT utils (saltstack) to stream the command output - more interactively to the console and the logs. This is experimental. - - .. note:: - ``env`` represents the environment variables for the command, and - should be formatted as a dict, or a YAML string which resolves to a dict. + more interactively to the console and the logs. This is experimental. CLI Example: @@ -1566,7 +1538,7 @@ def run_stderr(cmd, A string of standard input can be specified for the command to be run using the ``stdin`` parameter. This can be useful in cases where sensitive - information must be read from standard input.: + information must be read from standard input. .. code-block:: bash @@ -1586,6 +1558,7 @@ def run_stderr(cmd, template=template, rstrip=rstrip, umask=umask, + output_encoding=output_encoding, output_loglevel=output_loglevel, log_callback=log_callback, timeout=timeout, @@ -1630,6 +1603,7 @@ def run_all(cmd, template=None, rstrip=True, umask=None, + output_encoding=None, output_loglevel='debug', log_callback=None, hide_output=False, @@ -1646,135 +1620,123 @@ def run_all(cmd, ''' Execute the passed command and return a dict of return data - :param str cmd: The command to run. ex: 'ls -lart /home' + :param str cmd: The command to run. ex: ``ls -lart /home`` - :param str cwd: The current working directory to execute the command in. - Defaults to the home directory of the user specified by ``runas``. + :param str cwd: The directory from which to execute the command. Defaults + to the home directory of the user specified by ``runas`` (or the user + under which Salt is running if ``runas`` is not specified). :param str stdin: A string of standard input can be specified for the - command to be run using the ``stdin`` parameter. This can be useful in cases - where sensitive information must be read from standard input.: + command to be run using the ``stdin`` parameter. This can be useful in + cases where sensitive information must be read from standard input. - :param str runas: User to run command as. If running on a Windows minion you - must also pass a password. The target user account must be in the - Administrators group. + :param str runas: Specify an alternate user to run the command. The default + behavior is to run as the user under which Salt is running. If running + on a Windows minion you must also use the ``password`` argument, and + the target user account must be in the Administrators group. :param str password: Windows only. Required when specifying ``runas``. This - parameter will be ignored on non-Windows platforms. + parameter will be ignored on non-Windows platforms. - .. versionadded:: 2016.3.0 + .. versionadded:: 2016.3.0 - :param str shell: Shell to execute under. Defaults to the system default - shell. + :param str shell: Specify an alternate shell. Defaults to the system's + default shell. :param bool python_shell: If False, let python handle the positional - arguments. Set to True to use shell features, such as pipes or redirection + arguments. Set to True to use shell features, such as pipes or + redirection. - :param list env: A list of environment variables to be set prior to - execution. + :param dict env: Environment variables to be set prior to execution. - Example: + .. note:: + When passing environment variables on the CLI, they should be + passed as the string representation of a dictionary. - .. code-block:: yaml + .. code-block:: bash - salt://scripts/foo.sh: - cmd.script: - - env: - - BATCH: 'yes' - - .. warning:: - - The above illustrates a common PyYAML pitfall, that **yes**, - **no**, **on**, **off**, **true**, and **false** are all loaded as - boolean ``True`` and ``False`` values, and must be enclosed in - quotes to be used as strings. More info on this (and other) PyYAML - idiosyncrasies can be found :ref:`here `. - - Variables as values are not evaluated. So $PATH in the following - example is a literal '$PATH': - - .. code-block:: yaml - - salt://scripts/bar.sh: - cmd.script: - - env: "PATH=/some/path:$PATH" - - One can still use the existing $PATH by using a bit of Jinja: - - .. code-block:: jinja - - {% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %} - - mycommand: - cmd.run: - - name: ls -l / - - env: - - PATH: {{ [current_path, '/my/special/bin']|join(':') }} + salt myminion cmd.run_all 'some command' env='{"FOO": "bar"}' :param bool clean_env: Attempt to clean out all other shell environment - variables and set only those provided in the 'env' argument to this - function. + variables and set only those provided in the 'env' argument to this + function. - :param str prepend_path: $PATH segment to prepend (trailing ':' not necessary) - to $PATH + :param str prepend_path: $PATH segment to prepend (trailing ':' not + necessary) to $PATH - .. versionadded:: Oxygen + .. versionadded:: Oxygen :param str template: If this setting is applied then the named templating - engine will be used to render the downloaded file. Currently jinja, mako, - and wempy are supported + engine will be used to render the downloaded file. Currently jinja, + mako, and wempy are supported. :param bool rstrip: Strip all whitespace off the end of output before it is - returned. + returned. :param str umask: The umask (in octal) to use when running the command. - :param str output_loglevel: Control the loglevel at which the output from - the command is logged to the minion log. + :param str output_encoding: Control the encoding used to decode the + command's output. - .. note:: - The command being run will still be logged at the ``debug`` - loglevel regardless, unless ``quiet`` is used for this value. + .. note:: + This should not need to be used in most cases. By default, Salt + will try to use the encoding detected from the system locale, and + will fall back to UTF-8 if this fails. This should only need to be + used in cases where the output of the command is encoded in + something other than the system locale or UTF-8. + + .. versionadded:: Oxygen + + :param str output_loglevel: Control the loglevel at which the output from + the command is logged to the minion log. + + .. note:: + The command being run will still be logged at the ``debug`` + loglevel regardless, unless ``quiet`` is used for this value. + + :param bool ignore_retcode: If the exit code of the command is nonzero, + this is treated as an error condition, and the output from the command + will be logged to the minion log. However, there are some cases where + programs use the return code for signaling and a nonzero exit code + doesn't necessarily mean failure. Pass this argument as ``True`` to + skip logging the output if the command has a nonzero exit code. :param bool hide_output: If ``True``, suppress stdout and stderr in the - return data. + return data. - .. note:: - This is separate from ``output_loglevel``, which only handles how - Salt logs to the minion log. + .. note:: + This is separate from ``output_loglevel``, which only handles how + Salt logs to the minion log. - .. versionadded:: Oxygen + .. versionadded:: Oxygen - :param int timeout: A timeout in seconds for the executed process to return. + :param int timeout: A timeout in seconds for the executed process to + return. :param bool use_vt: Use VT utils (saltstack) to stream the command output - more interactively to the console and the logs. This is experimental. - - .. note:: - ``env`` represents the environment variables for the command, and - should be formatted as a dict, or a YAML string which resolves to a dict. + more interactively to the console and the logs. This is experimental. :param bool encoded_cmd: Specify if the supplied command is encoded. - Only applies to shell 'powershell'. + Only applies to shell 'powershell'. - .. versionadded:: Oxygen + .. versionadded:: Oxygen :param bool redirect_stderr: If set to ``True``, then stderr will be - redirected to stdout. This is helpful for cases where obtaining both the - retcode and output is desired, but it is not desired to have the output - separated into both stdout and stderr. + redirected to stdout. This is helpful for cases where obtaining both + the retcode and output is desired, but it is not desired to have the + output separated into both stdout and stderr. .. versionadded:: 2015.8.2 :param str password: Windows only. Required when specifying ``runas``. This - parameter will be ignored on non-Windows platforms. + parameter will be ignored on non-Windows platforms. - .. versionadded:: 2016.3.0 + .. versionadded:: 2016.3.0 :param bool bg: If ``True``, run command in background and do not await or - deliver it's results + deliver its results - .. versionadded:: 2016.3.6 + .. versionadded:: 2016.3.6 CLI Example: @@ -1792,7 +1754,7 @@ def run_all(cmd, A string of standard input can be specified for the command to be run using the ``stdin`` parameter. This can be useful in cases where sensitive - information must be read from standard input.: + information must be read from standard input. .. code-block:: bash @@ -1814,6 +1776,7 @@ def run_all(cmd, template=template, rstrip=rstrip, umask=umask, + output_encoding=output_encoding, output_loglevel=output_loglevel, log_callback=log_callback, timeout=timeout, @@ -1861,6 +1824,7 @@ def retcode(cmd, clean_env=False, template=None, umask=None, + output_encoding=None, output_loglevel='debug', log_callback=None, timeout=None, @@ -1873,97 +1837,87 @@ def retcode(cmd, ''' Execute a shell command and return the command's return code. - :param str cmd: The command to run. ex: 'ls -lart /home' + :param str cmd: The command to run. ex: ``ls -lart /home`` - :param str cwd: The current working directory to execute the command in. - Defaults to the home directory of the user specified by ``runas``. + :param str cwd: The directory from which to execute the command. Defaults + to the home directory of the user specified by ``runas`` (or the user + under which Salt is running if ``runas`` is not specified). :param str stdin: A string of standard input can be specified for the - command to be run using the ``stdin`` parameter. This can be useful in cases - where sensitive information must be read from standard input.: + command to be run using the ``stdin`` parameter. This can be useful in + cases where sensitive information must be read from standard input. - :param str runas: User to run command as. If running on a Windows minion you - must also pass a password. The target user account must be in the - Administrators group. + :param str runas: Specify an alternate user to run the command. The default + behavior is to run as the user under which Salt is running. If running + on a Windows minion you must also use the ``password`` argument, and + the target user account must be in the Administrators group. :param str password: Windows only. Required when specifying ``runas``. This - parameter will be ignored on non-Windows platforms. + parameter will be ignored on non-Windows platforms. - .. versionadded:: 2016.3.0 + .. versionadded:: 2016.3.0 - :param str shell: Shell to execute under. Defaults to the system default - shell. + :param str shell: Specify an alternate shell. Defaults to the system's + default shell. :param bool python_shell: If False, let python handle the positional - arguments. Set to True to use shell features, such as pipes or redirection + arguments. Set to True to use shell features, such as pipes or + redirection. - :param list env: A list of environment variables to be set prior to - execution. + :param dict env: Environment variables to be set prior to execution. - Example: + .. note:: + When passing environment variables on the CLI, they should be + passed as the string representation of a dictionary. - .. code-block:: yaml + .. code-block:: bash - salt://scripts/foo.sh: - cmd.script: - - env: - - BATCH: 'yes' - - .. warning:: - - The above illustrates a common PyYAML pitfall, that **yes**, - **no**, **on**, **off**, **true**, and **false** are all loaded as - boolean ``True`` and ``False`` values, and must be enclosed in - quotes to be used as strings. More info on this (and other) PyYAML - idiosyncrasies can be found :ref:`here `. - - Variables as values are not evaluated. So $PATH in the following - example is a literal '$PATH': - - .. code-block:: yaml - - salt://scripts/bar.sh: - cmd.script: - - env: "PATH=/some/path:$PATH" - - One can still use the existing $PATH by using a bit of Jinja: - - .. code-block:: jinja - - {% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %} - - mycommand: - cmd.run: - - name: ls -l / - - env: - - PATH: {{ [current_path, '/my/special/bin']|join(':') }} + salt myminion cmd.retcode 'some command' env='{"FOO": "bar"}' :param bool clean_env: Attempt to clean out all other shell environment - variables and set only those provided in the 'env' argument to this - function. + variables and set only those provided in the 'env' argument to this + function. :param str template: If this setting is applied then the named templating - engine will be used to render the downloaded file. Currently jinja, mako, - and wempy are supported + engine will be used to render the downloaded file. Currently jinja, + mako, and wempy are supported. :param bool rstrip: Strip all whitespace off the end of output before it is - returned. + returned. :param str umask: The umask (in octal) to use when running the command. + :param str output_encoding: Control the encoding used to decode the + command's output. + + .. note:: + This should not need to be used in most cases. By default, Salt + will try to use the encoding detected from the system locale, and + will fall back to UTF-8 if this fails. This should only need to be + used in cases where the output of the command is encoded in + something other than the system locale or UTF-8. + + .. versionadded:: Oxygen + :param str output_loglevel: Control the loglevel at which the output from - the command is logged. Note that the command being run will still be logged - (loglevel: DEBUG) regardless, unless ``quiet`` is used for this value. + the command is logged to the minion log. + + .. note:: + The command being run will still be logged at the ``debug`` + loglevel regardless, unless ``quiet`` is used for this value. + + :param bool ignore_retcode: If the exit code of the command is nonzero, + this is treated as an error condition, and the output from the command + will be logged to the minion log. However, there are some cases where + programs use the return code for signaling and a nonzero exit code + doesn't necessarily mean failure. Pass this argument as ``True`` to + skip logging the output if the command has a nonzero exit code. :param int timeout: A timeout in seconds for the executed process to return. :param bool use_vt: Use VT utils (saltstack) to stream the command output more interactively to the console and the logs. This is experimental. - .. note:: - ``env`` represents the environment variables for the command, and - should be formatted as a dict, or a YAML string which resolves to a dict. - :rtype: int :rtype: None :returns: Return Code as an int or None if there was an exception. @@ -1984,7 +1938,7 @@ def retcode(cmd, A string of standard input can be specified for the command to be run using the ``stdin`` parameter. This can be useful in cases where sensitive - information must be read from standard input.: + information must be read from standard input. .. code-block:: bash @@ -2001,6 +1955,7 @@ def retcode(cmd, clean_env=clean_env, template=template, umask=umask, + output_encoding=output_encoding, output_loglevel=output_loglevel, log_callback=log_callback, timeout=timeout, @@ -2039,7 +1994,7 @@ def _retcode_quiet(cmd, clean_env=False, template=None, umask=None, - output_loglevel='quiet', + output_encoding=None, log_callback=None, timeout=None, reset_system_locale=True, @@ -2049,8 +2004,8 @@ def _retcode_quiet(cmd, password=None, **kwargs): ''' - Helper for running commands quietly for minion startup. - Returns same as retcode + Helper for running commands quietly for minion startup. Returns same as + the retcode() function. ''' return retcode(cmd, cwd=cwd, @@ -2062,7 +2017,8 @@ def _retcode_quiet(cmd, clean_env=clean_env, template=template, umask=umask, - output_loglevel=output_loglevel, + output_encoding=output_encoding, + output_loglevel='quiet', log_callback=log_callback, timeout=timeout, reset_system_locale=reset_system_locale, @@ -2083,6 +2039,7 @@ def script(source, env=None, template=None, umask=None, + output_encoding=None, output_loglevel='debug', log_callback=None, hide_output=False, @@ -2102,107 +2059,102 @@ def script(source, programming language. :param str source: The location of the script to download. If the file is - located on the master in the directory named spam, and is called eggs, the - source string is salt://spam/eggs + located on the master in the directory named spam, and is called eggs, + the source string is salt://spam/eggs - :param str args: String of command line args to pass to the script. Only - used if no args are specified as part of the `name` argument. To pass a - string containing spaces in YAML, you will need to doubly-quote it: - "arg1 'arg two' arg3" + :param str args: String of command line args to pass to the script. Only + used if no args are specified as part of the `name` argument. To pass a + string containing spaces in YAML, you will need to doubly-quote it: - :param str cwd: The current working directory to execute the command in. - Defaults to the home directory of the user specified by ``runas``. + .. code-block:: bash + + salt myminion cmd.script salt://foo.sh "arg1 'arg two' arg3" + + :param str cwd: The directory from which to execute the command. Defaults + to the home directory of the user specified by ``runas`` (or the user + under which Salt is running if ``runas`` is not specified). :param str stdin: A string of standard input can be specified for the - command to be run using the ``stdin`` parameter. This can be useful in cases - where sensitive information must be read from standard input.: + command to be run using the ``stdin`` parameter. This can be useful in + cases where sensitive information must be read from standard input. - :param str runas: User to run script as. If running on a Windows minion you - must also pass a password. The target user account must be in the - Administrators group. + :param str runas: Specify an alternate user to run the command. The default + behavior is to run as the user under which Salt is running. If running + on a Windows minion you must also use the ``password`` argument, and + the target user account must be in the Administrators group. :param str password: Windows only. Required when specifying ``runas``. This - parameter will be ignored on non-Windows platforms. + parameter will be ignored on non-Windows platforms. - .. versionadded:: 2016.3.0 + .. versionadded:: 2016.3.0 - :param str shell: Shell to execute under. Defaults to the system default - shell. + :param str shell: Specify an alternate shell. Defaults to the system's + default shell. :param bool python_shell: If False, let python handle the positional - arguments. Set to True to use shell features, such as pipes or redirection + arguments. Set to True to use shell features, such as pipes or + redirection. - :param bool bg: If True, run script in background and do not await or deliver it's results + :param bool bg: If True, run script in background and do not await or + deliver it's results - :param list env: A list of environment variables to be set prior to - execution. + :param dict env: Environment variables to be set prior to execution. - Example: + .. note:: + When passing environment variables on the CLI, they should be + passed as the string representation of a dictionary. - .. code-block:: yaml + .. code-block:: bash - salt://scripts/foo.sh: - cmd.script: - - env: - - BATCH: 'yes' - - .. warning:: - - The above illustrates a common PyYAML pitfall, that **yes**, - **no**, **on**, **off**, **true**, and **false** are all loaded as - boolean ``True`` and ``False`` values, and must be enclosed in - quotes to be used as strings. More info on this (and other) PyYAML - idiosyncrasies can be found :ref:`here `. - - Variables as values are not evaluated. So $PATH in the following - example is a literal '$PATH': - - .. code-block:: yaml - - salt://scripts/bar.sh: - cmd.script: - - env: "PATH=/some/path:$PATH" - - One can still use the existing $PATH by using a bit of Jinja: - - .. code-block:: jinja - - {% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %} - - mycommand: - cmd.run: - - name: ls -l / - - env: - - PATH: {{ [current_path, '/my/special/bin']|join(':') }} + salt myminion cmd.script 'some command' env='{"FOO": "bar"}' :param str template: If this setting is applied then the named templating - engine will be used to render the downloaded file. Currently jinja, mako, - and wempy are supported + engine will be used to render the downloaded file. Currently jinja, + mako, and wempy are supported. :param str umask: The umask (in octal) to use when running the command. - :param str output_loglevel: Control the loglevel at which the output from - the command is logged to the minion log. + :param str output_encoding: Control the encoding used to decode the + command's output. - .. note:: - The command being run will still be logged at the ``debug`` - loglevel regardless, unless ``quiet`` is used for this value. + .. note:: + This should not need to be used in most cases. By default, Salt + will try to use the encoding detected from the system locale, and + will fall back to UTF-8 if this fails. This should only need to be + used in cases where the output of the command is encoded in + something other than the system locale or UTF-8. + + .. versionadded:: Oxygen + + :param str output_loglevel: Control the loglevel at which the output from + the command is logged to the minion log. + + .. note:: + The command being run will still be logged at the ``debug`` + loglevel regardless, unless ``quiet`` is used for this value. + + :param bool ignore_retcode: If the exit code of the command is nonzero, + this is treated as an error condition, and the output from the command + will be logged to the minion log. However, there are some cases where + programs use the return code for signaling and a nonzero exit code + doesn't necessarily mean failure. Pass this argument as ``True`` to + skip logging the output if the command has a nonzero exit code. :param bool hide_output: If ``True``, suppress stdout and stderr in the - return data. + return data. - .. note:: - This is separate from ``output_loglevel``, which only handles how - Salt logs to the minion log. + .. note:: + This is separate from ``output_loglevel``, which only handles how + Salt logs to the minion log. - .. versionadded:: Oxygen + .. versionadded:: Oxygen - :param int timeout: If the command has not terminated after timeout seconds, - send the subprocess sigterm, and if sigterm is ignored, follow up with - sigkill + :param int timeout: If the command has not terminated after timeout + seconds, send the subprocess sigterm, and if sigterm is ignored, follow + up with sigkill :param bool use_vt: Use VT utils (saltstack) to stream the command output - more interactively to the console and the logs. This is experimental. + more interactively to the console and the logs. This is experimental. CLI Example: @@ -2282,6 +2234,7 @@ def script(source, ret = _run(path + ' ' + six.text_type(args) if args else path, cwd=cwd, stdin=stdin, + output_encoding=output_encoding, output_loglevel=output_loglevel, log_callback=log_callback, runas=runas, @@ -2319,6 +2272,7 @@ def script_retcode(source, timeout=None, reset_system_locale=True, saltenv='base', + output_encoding=None, output_loglevel='debug', log_callback=None, use_vt=False, @@ -2337,101 +2291,87 @@ def script_retcode(source, Only evaluate the script return code and do not block for terminal output :param str source: The location of the script to download. If the file is - located on the master in the directory named spam, and is called eggs, the - source string is salt://spam/eggs + located on the master in the directory named spam, and is called eggs, + the source string is salt://spam/eggs :param str args: String of command line args to pass to the script. Only - used if no args are specified as part of the `name` argument. To pass a - string containing spaces in YAML, you will need to doubly-quote it: "arg1 - 'arg two' arg3" + used if no args are specified as part of the `name` argument. To pass a + string containing spaces in YAML, you will need to doubly-quote it: + "arg1 'arg two' arg3" - :param str cwd: The current working directory to execute the command in. - Defaults to the home directory of the user specified by ``runas``. + :param str cwd: The directory from which to execute the command. Defaults + to the home directory of the user specified by ``runas`` (or the user + under which Salt is running if ``runas`` is not specified). :param str stdin: A string of standard input can be specified for the - command to be run using the ``stdin`` parameter. This can be useful in cases - where sensitive information must be read from standard input.: + command to be run using the ``stdin`` parameter. This can be useful in + cases where sensitive information must be read from standard input. - :param str runas: User to run script as. If running on a Windows minion you - must also pass a password. The target user account must be in the - Administrators group. + :param str runas: Specify an alternate user to run the command. The default + behavior is to run as the user under which Salt is running. If running + on a Windows minion you must also use the ``password`` argument, and + the target user account must be in the Administrators group. :param str password: Windows only. Required when specifying ``runas``. This - parameter will be ignored on non-Windows platforms. + parameter will be ignored on non-Windows platforms. - .. versionadded:: 2016.3.0 + .. versionadded:: 2016.3.0 - :param str shell: Shell to execute under. Defaults to the system default - shell. + :param str shell: Specify an alternate shell. Defaults to the system's + default shell. :param bool python_shell: If False, let python handle the positional - arguments. Set to True to use shell features, such as pipes or redirection + arguments. Set to True to use shell features, such as pipes or + redirection. - :param list env: A list of environment variables to be set prior to - execution. + :param dict env: Environment variables to be set prior to execution. - Example: + .. note:: + When passing environment variables on the CLI, they should be + passed as the string representation of a dictionary. - .. code-block:: yaml + .. code-block:: bash - salt://scripts/foo.sh: - cmd.script: - - env: - - BATCH: 'yes' - - .. warning:: - - The above illustrates a common PyYAML pitfall, that **yes**, - **no**, **on**, **off**, **true**, and **false** are all loaded as - boolean ``True`` and ``False`` values, and must be enclosed in - quotes to be used as strings. More info on this (and other) PyYAML - idiosyncrasies can be found :ref:`here `. - - Variables as values are not evaluated. So $PATH in the following - example is a literal '$PATH': - - .. code-block:: yaml - - salt://scripts/bar.sh: - cmd.script: - - env: "PATH=/some/path:$PATH" - - One can still use the existing $PATH by using a bit of Jinja: - - .. code-block:: jinja - - {% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %} - - mycommand: - cmd.run: - - name: ls -l / - - env: - - PATH: {{ [current_path, '/my/special/bin']|join(':') }} + salt myminion cmd.script_retcode 'some command' env='{"FOO": "bar"}' :param str template: If this setting is applied then the named templating - engine will be used to render the downloaded file. Currently jinja, mako, - and wempy are supported + engine will be used to render the downloaded file. Currently jinja, + mako, and wempy are supported. :param str umask: The umask (in octal) to use when running the command. + :param str output_encoding: Control the encoding used to decode the + command's output. + + .. note:: + This should not need to be used in most cases. By default, Salt + will try to use the encoding detected from the system locale, and + will fall back to UTF-8 if this fails. This should only need to be + used in cases where the output of the command is encoded in + something other than the system locale or UTF-8. + + .. versionadded:: Oxygen + :param str output_loglevel: Control the loglevel at which the output from - the command is logged to the minion log. + the command is logged to the minion log. - .. note:: - The command being run will still be logged at the ``debug`` - loglevel regardless, unless ``quiet`` is used for this value. + .. note:: + The command being run will still be logged at the ``debug`` + loglevel regardless, unless ``quiet`` is used for this value. - :param bool quiet: The command will be executed quietly, meaning no log - entries of the actual command or its return data. This is deprecated as of - the **2014.1.0** release, and is being replaced with ``output_loglevel: - quiet``. + :param bool ignore_retcode: If the exit code of the command is nonzero, + this is treated as an error condition, and the output from the command + will be logged to the minion log. However, there are some cases where + programs use the return code for signaling and a nonzero exit code + doesn't necessarily mean failure. Pass this argument as ``True`` to + skip logging the output if the command has a nonzero exit code. - :param int timeout: If the command has not terminated after timeout seconds, - send the subprocess sigterm, and if sigterm is ignored, follow up with - sigkill + :param int timeout: If the command has not terminated after timeout + seconds, send the subprocess sigterm, and if sigterm is ignored, follow + up with sigkill :param bool use_vt: Use VT utils (saltstack) to stream the command output - more interactively to the console and the logs. This is experimental. + more interactively to the console and the logs. This is experimental. CLI Example: @@ -2443,7 +2383,7 @@ def script_retcode(source, A string of standard input can be specified for the command to be run using the ``stdin`` parameter. This can be useful in cases where sensitive - information must be read from standard input.: + information must be read from standard input. .. code-block:: bash @@ -2466,6 +2406,7 @@ def script_retcode(source, timeout=timeout, reset_system_locale=reset_system_locale, saltenv=saltenv, + output_encoding=output_encoding, output_loglevel=output_loglevel, log_callback=log_callback, use_vt=use_vt, @@ -2612,6 +2553,7 @@ def run_chroot(root, template=None, rstrip=True, umask=None, + output_encoding=None, output_loglevel='quiet', log_callback=None, hide_output=False, @@ -2628,96 +2570,82 @@ def run_chroot(root, This function runs :mod:`cmd.run_all ` wrapped within a chroot, with dev and proc mounted in the chroot - root - Path to the root of the jail to use. + :param str root: Path to the root of the jail to use. - cmd - The command to run. ex: 'ls -lart /home' + :param str cmd: The command to run. ex: ``ls -lart /home`` - cwd - The current working directory to execute the command in. defaults to - /root + :param str cwd: The directory from which to execute the command. Defaults + to the home directory of the user specified by ``runas`` (or the user + under which Salt is running if ``runas`` is not specified). - stdin - A string of standard input can be specified for the command to be run using - the ``stdin`` parameter. This can be useful in cases where sensitive - information must be read from standard input.: + :parar str stdin: A string of standard input can be specified for the + command to be run using the ``stdin`` parameter. This can be useful in + cases where sensitive information must be read from standard input. - runas - User to run script as. + :param str runas: Specify an alternate user to run the command. The default + behavior is to run as the user under which Salt is running. If running + on a Windows minion you must also use the ``password`` argument, and + the target user account must be in the Administrators group. - shell - Shell to execute under. Defaults to the system default shell. + :param str shell: Specify an alternate shell. Defaults to the system's + default shell. - python_shell - If False, let python handle the positional arguments. Set to True - to use shell features, such as pipes or redirection + :param bool python_shell: If False, let python handle the positional + arguments. Set to True to use shell features, such as pipes or + redirection. - env - A list of environment variables to be set prior to execution. - Example: + :param dict env: Environment variables to be set prior to execution. - .. code-block:: yaml + .. note:: + When passing environment variables on the CLI, they should be + passed as the string representation of a dictionary. - salt://scripts/foo.sh: - cmd.script: - - env: - - BATCH: 'yes' + .. code-block:: bash - .. warning:: + salt myminion cmd.run_chroot 'some command' env='{"FOO": "bar"}' - The above illustrates a common PyYAML pitfall, that **yes**, - **no**, **on**, **off**, **true**, and **false** are all loaded as - boolean ``True`` and ``False`` values, and must be enclosed in - quotes to be used as strings. More info on this (and other) PyYAML - idiosyncrasies can be found :ref:`here `. + :param dict clean_env: Attempt to clean out all other shell environment + variables and set only those provided in the 'env' argument to this + function. - Variables as values are not evaluated. So $PATH in the following - example is a literal '$PATH': + :param str template: If this setting is applied then the named templating + engine will be used to render the downloaded file. Currently jinja, + mako, and wempy are supported. - .. code-block:: yaml - - salt://scripts/bar.sh: - cmd.script: - - env: "PATH=/some/path:$PATH" - - One can still use the existing $PATH by using a bit of Jinja: - - .. code-block:: jinja - - {% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %} - - mycommand: - cmd.run: - - name: ls -l / - - env: - - PATH: {{ [current_path, '/my/special/bin']|join(':') }} - - clean_env: - Attempt to clean out all other shell environment variables and set - only those provided in the 'env' argument to this function. - - template - If this setting is applied then the named templating engine will be - used to render the downloaded file. Currently jinja, mako, and wempy - are supported - - rstrip + :param bool rstrip: Strip all whitespace off the end of output before it is returned. - umask + :param str umask: The umask (in octal) to use when running the command. - output_loglevel : quiet - Control the loglevel at which the output from the command is logged to - the minion log. + :param str output_encoding: Control the encoding used to decode the + command's output. + + .. note:: + This should not need to be used in most cases. By default, Salt + will try to use the encoding detected from the system locale, and + will fall back to UTF-8 if this fails. This should only need to be + used in cases where the output of the command is encoded in + something other than the system locale or UTF-8. + + .. versionadded:: Oxygen + + :param str output_loglevel: Control the loglevel at which the output from + the command is logged to the minion log. .. note:: The command being run will still be logged at the ``debug`` loglevel regardless, unless ``quiet`` is used for this value. - hide_output : False - If ``True``, suppress stdout and stderr in the return data. + :param bool ignore_retcode: If the exit code of the command is nonzero, + this is treated as an error condition, and the output from the command + will be logged to the minion log. However, there are some cases where + programs use the return code for signaling and a nonzero exit code + doesn't necessarily mean failure. Pass this argument as ``True`` to + skip logging the output if the command has a nonzero exit code. + + :param bool hide_output: If ``True``, suppress stdout and stderr in the + return data. .. note:: This is separate from ``output_loglevel``, which only handles how @@ -2725,14 +2653,12 @@ def run_chroot(root, .. versionadded:: Oxygen - timeout + :param int timeout: A timeout in seconds for the executed process to return. - use_vt + :param bool use_vt: Use VT utils (saltstack) to stream the command output more - interactively to the console and the logs. - This is experimental. - + interactively to the console and the logs. This is experimental. CLI Example: @@ -2771,6 +2697,7 @@ def run_chroot(root, template=template, rstrip=rstrip, umask=umask, + output_encoding=output_encoding, output_loglevel=output_loglevel, log_callback=log_callback, timeout=timeout, @@ -3050,6 +2977,7 @@ def powershell(cmd, template=None, rstrip=True, umask=None, + output_encoding=None, output_loglevel='debug', hide_output=False, timeout=None, @@ -3087,9 +3015,6 @@ def powershell(cmd, have properly sanitized the command passed to this function and do not use untrusted inputs. - Note that ``env`` represents the environment variables for the command, and - should be formatted as a dict, or a YAML string which resolves to a dict. - In addition to the normal ``cmd.run`` parameters, this command offers the ``depth`` parameter to change the Windows default depth for the ``ConvertTo-JSON`` powershell command. The Windows default is 2. If you need @@ -3102,107 +3027,96 @@ def powershell(cmd, :param str cmd: The powershell command to run. - :param str cwd: The current working directory to execute the command in. - Defaults to the home directory of the user specified by ``runas``. + :param str cwd: The directory from which to execute the command. Defaults + to the home directory of the user specified by ``runas`` (or the user + under which Salt is running if ``runas`` is not specified). :param str stdin: A string of standard input can be specified for the command to be run using the ``stdin`` parameter. This can be useful in cases - where sensitive information must be read from standard input.: + where sensitive information must be read from standard input. - :param str runas: User to run command as. If running on a Windows minion you - must also pass a password. The target user account must be in the - Administrators group. + :param str runas: Specify an alternate user to run the command. The default + behavior is to run as the user under which Salt is running. If running + on a Windows minion you must also use the ``password`` argument, and + the target user account must be in the Administrators group. :param str password: Windows only. Required when specifying ``runas``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.0 - :param str shell: Shell to execute under. Defaults to the system default - shell. + :param str shell: Specify an alternate shell. Defaults to the system's + default shell. :param bool python_shell: If False, let python handle the positional - arguments. Set to True to use shell features, such as pipes or redirection + arguments. Set to True to use shell features, such as pipes or + redirection. - :param list env: A list of environment variables to be set prior to - execution. + :param dict env: Environment variables to be set prior to execution. - Example: + .. note:: + When passing environment variables on the CLI, they should be + passed as the string representation of a dictionary. - .. code-block:: yaml + .. code-block:: bash - salt://scripts/foo.sh: - cmd.script: - - env: - - BATCH: 'yes' - - .. warning:: - - The above illustrates a common PyYAML pitfall, that **yes**, - **no**, **on**, **off**, **true**, and **false** are all loaded as - boolean ``True`` and ``False`` values, and must be enclosed in - quotes to be used as strings. More info on this (and other) PyYAML - idiosyncrasies can be found :ref:`here `. - - Variables as values are not evaluated. So $PATH in the following - example is a literal '$PATH': - - .. code-block:: yaml - - salt://scripts/bar.sh: - cmd.script: - - env: "PATH=/some/path:$PATH" - - One can still use the existing $PATH by using a bit of Jinja: - - .. code-block:: jinja - - {% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %} - - mycommand: - cmd.run: - - name: ls -l / - - env: - - PATH: {{ [current_path, '/my/special/bin']|join(':') }} + salt myminion cmd.powershell 'some command' env='{"FOO": "bar"}' :param bool clean_env: Attempt to clean out all other shell environment - variables and set only those provided in the 'env' argument to this - function. + variables and set only those provided in the 'env' argument to this + function. :param str template: If this setting is applied then the named templating - engine will be used to render the downloaded file. Currently jinja, mako, - and wempy are supported + engine will be used to render the downloaded file. Currently jinja, + mako, and wempy are supported. :param bool rstrip: Strip all whitespace off the end of output before it is - returned. + returned. :param str umask: The umask (in octal) to use when running the command. - :param str output_loglevel: Control the loglevel at which the output from - the command is logged to the minion log. + :param str output_encoding: Control the encoding used to decode the + command's output. - .. note:: - The command being run will still be logged at the ``debug`` - loglevel regardless, unless ``quiet`` is used for this value. + .. note:: + This should not need to be used in most cases. By default, Salt + will try to use the encoding detected from the system locale, and + will fall back to UTF-8 if this fails. This should only need to be + used in cases where the output of the command is encoded in + something other than the system locale or UTF-8. + + .. versionadded:: Oxygen + + :param str output_loglevel: Control the loglevel at which the output from + the command is logged to the minion log. + + .. note:: + The command being run will still be logged at the ``debug`` + loglevel regardless, unless ``quiet`` is used for this value. + + :param bool ignore_retcode: If the exit code of the command is nonzero, + this is treated as an error condition, and the output from the command + will be logged to the minion log. However, there are some cases where + programs use the return code for signaling and a nonzero exit code + doesn't necessarily mean failure. Pass this argument as ``True`` to + skip logging the output if the command has a nonzero exit code. :param bool hide_output: If ``True``, suppress stdout and stderr in the - return data. + return data. - .. note:: - This is separate from ``output_loglevel``, which only handles how - Salt logs to the minion log. + .. note:: + This is separate from ``output_loglevel``, which only handles how + Salt logs to the minion log. - .. versionadded:: Oxygen + .. versionadded:: Oxygen :param int timeout: A timeout in seconds for the executed process to return. :param bool use_vt: Use VT utils (saltstack) to stream the command output - more interactively to the console and the logs. This is experimental. + more interactively to the console and the logs. This is experimental. :param bool reset_system_locale: Resets the system locale - :param bool ignore_retcode: Ignore the return code - :param str saltenv: The salt environment to use. Default is 'base' :param int depth: The number of levels of contained objects to be included. @@ -3212,8 +3126,8 @@ def powershell(cmd, .. versionadded:: 2016.3.4 :param bool encode_cmd: Encode the command before executing. Use in cases - where characters may be dropped or incorrectly converted when executed. - Default is False. + where characters may be dropped or incorrectly converted when executed. + Default is False. :returns: :dict: A dictionary of data returned by the powershell command. @@ -3262,6 +3176,7 @@ def powershell(cmd, template=template, rstrip=rstrip, umask=umask, + output_encoding=output_encoding, output_loglevel=output_loglevel, hide_output=hide_output, timeout=timeout, @@ -3291,6 +3206,7 @@ def powershell_all(cmd, template=None, rstrip=True, umask=None, + output_encoding=None, output_loglevel='debug', quiet=False, timeout=None, @@ -3304,62 +3220,64 @@ def powershell_all(cmd, force_list=False, **kwargs): ''' - Execute the passed PowerShell command and return a dictionary with a result field - representing the output of the command, as well as other fields - showing us what the PowerShell invocation wrote to ``stderr``, the process id, - and the exit code of the invocation. + Execute the passed PowerShell command and return a dictionary with a result + field representing the output of the command, as well as other fields + showing us what the PowerShell invocation wrote to ``stderr``, the process + id, and the exit code of the invocation. - This function appends ``| ConvertTo-JSON`` to the command before actually invoking powershell. + This function appends ``| ConvertTo-JSON`` to the command before actually + invoking powershell. - An unquoted empty string is not valid JSON, but it's very normal for the Powershell - output to be exactly that. Therefore, we do not attempt to - parse empty Powershell output (which would - result in an exception). Instead we treat this as a special case and one of two things - will happen: - * If the value of the ``force_list`` paramater - is ``True`` then the ``result`` field of the return dictionary will be an empty list. - * If the value of the ``force_list`` paramater is ``False``, then the return dictionary - **will not have a result key added to it**. We aren't setting ``result`` to ``None`` in this - case, because ``None`` is the Python representation of "null" in JSON. (We likewise can't use - ``False`` for the equivalent reason.) + An unquoted empty string is not valid JSON, but it's very normal for the + Powershell output to be exactly that. Therefore, we do not attempt to parse + empty Powershell output (which would result in an exception). Instead we + treat this as a special case and one of two things will happen: - If Powershell's output is not an empty string and Python cannot parse its content, - then a ``CommandExecutionError`` exception will be raised. + - If the value of the ``force_list`` paramater is ``True``, then the + ``result`` field of the return dictionary will be an empty list. - If Powershell's output is not an empty string, Python is able to parse its content, - and the type of the resulting Python object is other than ``list`` then one of two things - will happen: - * If the value of the ``force_list`` paramater is ``True``, then the ``result`` field - will be a singleton list - with the Python object as its sole member. - * If the value of the ``force_list`` paramater is ``False``, then the value of - ``result`` will be - the unmodified Python object. + - If the value of the ``force_list`` paramater is ``False``, then the + return dictionary **will not have a result key added to it**. We aren't + setting ``result`` to ``None`` in this case, because ``None`` is the + Python representation of "null" in JSON. (We likewise can't use ``False`` + for the equivalent reason.) - If Powershell's output is not an empty string, Python is able to parse its content, - and the type of the resulting Python object is ``list``, then the value of ``result`` - will be the unmodified Python object. The ``force_list`` paramater has no effect in this case. + If Powershell's output is not an empty string and Python cannot parse its + content, then a ``CommandExecutionError`` exception will be raised. - .. Note:: - An example of why the ``force_list`` paramater is useful is as follows: The - Powershell command - ``dir x | Convert-ToJson`` results in + If Powershell's output is not an empty string, Python is able to parse its + content, and the type of the resulting Python object is other than ``list`` + then one of two things will happen: - * no output when x is an empty directory. - * a dictionary object when x contains just one item. - * a list of dictionary objects when x contains multiple items. + - If the value of the ``force_list`` paramater is ``True``, then the + ``result`` field will be a singleton list with the Python object as its + sole member. - By setting ``force_list`` to ``True`` we will always end up with a list of dictionary items, - representing files, - no matter how many files x contains. - Conversely, if ``force_list`` is ``False``, we will end up with no ``result`` key in our - return dictionary - when x is an - empty directory, and a dictionary object when x contains just one file. + - If the value of the ``force_list`` paramater is ``False``, then the value + of ``result`` will be the unmodified Python object. - If you want a similar function but with a raw - textual result instead of a Python dictionary, - you should use ``cmd.run_all`` in combination with ``shell=powershell``. + If Powershell's output is not an empty string, Python is able to parse its + content, and the type of the resulting Python object is ``list``, then the + value of ``result`` will be the unmodified Python object. The + ``force_list`` paramater has no effect in this case. + + .. note:: + An example of why the ``force_list`` paramater is useful is as + follows: The Powershell command ``dir x | Convert-ToJson`` results in + + - no output when x is an empty directory. + - a dictionary object when x contains just one item. + - a list of dictionary objects when x contains multiple items. + + By setting ``force_list`` to ``True`` we will always end up with a + list of dictionary items, representing files, no matter how many files + x contains. Conversely, if ``force_list`` is ``False``, we will end + up with no ``result`` key in our return dictionary when x is an empty + directory, and a dictionary object when x contains just one file. + + If you want a similar function but with a raw textual result instead of a + Python dictionary, you should use ``cmd.run_all`` in combination with + ``shell=powershell``. The remaining fields in the return dictionary are described in more detail in the ``Returns`` section. @@ -3375,13 +3293,9 @@ def powershell_all(cmd, .. warning:: - This passes the cmd argument directly to PowerShell - without any further processing! Be absolutely sure that you - have properly sanitized the command passed to this function - and do not use untrusted inputs. - - Note that ``env`` represents the environment variables for the command, and - should be formatted as a dict, or a YAML string which resolves to a dict. + This passes the cmd argument directly to PowerShell without any further + processing! Be absolutely sure that you have properly sanitized the + command passed to this function and do not use untrusted inputs. In addition to the normal ``cmd.run`` parameters, this command offers the ``depth`` parameter to change the Windows default depth for the @@ -3395,92 +3309,92 @@ def powershell_all(cmd, :param str cmd: The powershell command to run. - :param str cwd: The current working directory to execute the command in. - Defaults to the home directory of the user specified by ``runas``. + :param str cwd: The directory from which to execute the command. Defaults + to the home directory of the user specified by ``runas`` (or the user + under which Salt is running if ``runas`` is not specified). :param str stdin: A string of standard input can be specified for the - command to be run using the ``stdin`` parameter. This can be useful in cases - where sensitive information must be read from standard input.: + command to be run using the ``stdin`` parameter. This can be useful in + cases where sensitive information must be read from standard input. - :param str runas: User to run command as. If running on a Windows minion you - must also pass a password. The target user account must be in the - Administrators group. + :param str runas: Specify an alternate user to run the command. The default + behavior is to run as the user under which Salt is running. If running + on a Windows minion you must also use the ``password`` argument, and + the target user account must be in the Administrators group. :param str password: Windows only. Required when specifying ``runas``. This - parameter will be ignored on non-Windows platforms. + parameter will be ignored on non-Windows platforms. - :param str shell: Shell to execute under. Defaults to the system default - shell. + :param str shell: Specify an alternate shell. Defaults to the system's + default shell. :param bool python_shell: If False, let python handle the positional - arguments. Set to True to use shell features, such as pipes or redirection + arguments. Set to True to use shell features, such as pipes or + redirection. - :param list env: A list of environment variables to be set prior to - execution. + :param dict env: Environment variables to be set prior to execution. - Example: + .. note:: + When passing environment variables on the CLI, they should be + passed as the string representation of a dictionary. - .. code-block:: yaml + .. code-block:: bash - salt://scripts/foo.sh: - cmd.script: - - env: - - BATCH: 'yes' - - .. warning:: - - The above illustrates a common PyYAML pitfall, that **yes**, - **no**, **on**, **off**, **true**, and **false** are all loaded as - boolean ``True`` and ``False`` values, and must be enclosed in - quotes to be used as strings. More info on this (and other) PyYAML - idiosyncrasies can be found :ref:`here `. - - Variables as values are not evaluated. So $PATH in the following - example is a literal '$PATH': - - .. code-block:: yaml - - salt://scripts/bar.sh: - cmd.script: - - env: "PATH=/some/path:$PATH" - - One can still use the existing $PATH by using a bit of Jinja: - - .. code-block:: jinja - - {% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %} - - mycommand: - cmd.run: - - name: ls -l / - - env: - - PATH: {{ [current_path, '/my/special/bin']|join(':') }} + salt myminion cmd.powershell_all 'some command' env='{"FOO": "bar"}' :param bool clean_env: Attempt to clean out all other shell environment - variables and set only those provided in the 'env' argument to this - function. + variables and set only those provided in the 'env' argument to this + function. :param str template: If this setting is applied then the named templating - engine will be used to render the downloaded file. Currently jinja, mako, - and wempy are supported + engine will be used to render the downloaded file. Currently jinja, + mako, and wempy are supported. :param bool rstrip: Strip all whitespace off the end of output before it is - returned. + returned. :param str umask: The umask (in octal) to use when running the command. - :param str output_loglevel: Control the loglevel at which the output from - the command is logged. Note that the command being run will still be logged - (loglevel: DEBUG) regardless, unless ``quiet`` is used for this value. + :param str output_encoding: Control the encoding used to decode the + command's output. - :param int timeout: A timeout in seconds for the executed process to return. + .. note:: + This should not need to be used in most cases. By default, Salt + will try to use the encoding detected from the system locale, and + will fall back to UTF-8 if this fails. This should only need to be + used in cases where the output of the command is encoded in + something other than the system locale or UTF-8. + + .. versionadded:: Oxygen + + :param str output_loglevel: Control the loglevel at which the output from + the command is logged to the minion log. + + .. note:: + The command being run will still be logged at the ``debug`` + loglevel regardless, unless ``quiet`` is used for this value. + + :param bool ignore_retcode: If the exit code of the command is nonzero, + this is treated as an error condition, and the output from the command + will be logged to the minion log. However, there are some cases where + programs use the return code for signaling and a nonzero exit code + doesn't necessarily mean failure. Pass this argument as ``True`` to + skip logging the output if the command has a nonzero exit code. + + :param int timeout: A timeout in seconds for the executed process to + return. :param bool use_vt: Use VT utils (saltstack) to stream the command output - more interactively to the console and the logs. This is experimental. + more interactively to the console and the logs. This is experimental. :param bool reset_system_locale: Resets the system locale - :param bool ignore_retcode: Ignore the return code + :param bool ignore_retcode: If the exit code of the command is nonzero, + this is treated as an error condition, and the output from the command + will be logged to the minion log. However, there are some cases where + programs use the return code for signaling and a nonzero exit code + doesn't necessarily mean failure. Pass this argument as ``True`` to + skip logging the output if the command has a nonzero exit code. :param str saltenv: The salt environment to use. Default is 'base' @@ -3489,19 +3403,19 @@ def powershell_all(cmd, it takes for the command to complete for some commands. eg: ``dir`` :param bool encode_cmd: Encode the command before executing. Use in cases - where characters may be dropped or incorrectly converted when executed. - Default is False. + where characters may be dropped or incorrectly converted when executed. + Default is False. - :param bool force_list: The purpose of this paramater is described in the preamble - of this function's documentation. Default value is False. + :param bool force_list: The purpose of this paramater is described in the + preamble of this function's documentation. Default value is False. :return: A dictionary with the following entries: result For a complete description of this field, please refer to this function's preamble. **This key will not be added to the dictionary - when force_list is False and Powershell's output - is the empty string.** + when force_list is False and Powershell's output is the empty + string.** stderr What the PowerShell invocation wrote to ``stderr``. pid @@ -3558,6 +3472,7 @@ def powershell_all(cmd, template=template, rstrip=rstrip, umask=umask, + output_encoding=output_encoding, output_loglevel=output_loglevel, quiet=quiet, timeout=timeout, @@ -3614,6 +3529,7 @@ def run_bg(cmd, template=None, umask=None, timeout=None, + output_encoding=None, output_loglevel='debug', log_callback=None, reset_system_locale=True, @@ -3627,92 +3543,85 @@ def run_bg(cmd, Execute the passed command in the background and return it's PID - Note that ``env`` represents the environment variables for the command, and - should be formatted as a dict, or a YAML string which resolves to a dict. - .. note:: - If the init system is systemd and the backgrounded task should run even if the salt-minion process - is restarted, prepend ``systemd-run --scope`` to the command. This will reparent the process in its - own scope separate from salt-minion, and will not be affected by restarting the minion service. + If the init system is systemd and the backgrounded task should run even + if the salt-minion process is restarted, prepend ``systemd-run + --scope`` to the command. This will reparent the process in its own + scope separate from salt-minion, and will not be affected by restarting + the minion service. - :param str cmd: The command to run. ex: 'ls -lart /home' + :param str cmd: The command to run. ex: ``ls -lart /home`` - :param str cwd: The current working directory to execute the command in. - Defaults to the home directory of the user specified by ``runas``. + :param str cwd: The directory from which to execute the command. Defaults + to the home directory of the user specified by ``runas`` (or the user + under which Salt is running if ``runas`` is not specified). + + :param str output_encoding: Control the encoding used to decode the + command's output. + + .. note:: + This should not need to be used in most cases. By default, Salt + will try to use the encoding detected from the system locale, and + will fall back to UTF-8 if this fails. This should only need to be + used in cases where the output of the command is encoded in + something other than the system locale or UTF-8. + + .. versionadded:: Oxygen :param str output_loglevel: Control the loglevel at which the output from - the command is logged. Note that the command being run will still be logged - (loglevel: DEBUG) regardless, unless ``quiet`` is used for this value. + the command is logged to the minion log. - :param str runas: User to run command as. If running on a Windows minion you - must also pass a password. The target user account must be in the - Administrators group. + .. note:: + The command being run will still be logged at the ``debug`` + loglevel regardless, unless ``quiet`` is used for this value. + + :param bool ignore_retcode: If the exit code of the command is nonzero, + this is treated as an error condition, and the output from the command + will be logged to the minion log. However, there are some cases where + programs use the return code for signaling and a nonzero exit code + doesn't necessarily mean failure. Pass this argument as ``True`` to + skip logging the output if the command has a nonzero exit code. + + :param str runas: Specify an alternate user to run the command. The default + behavior is to run as the user under which Salt is running. If running + on a Windows minion you must also use the ``password`` argument, and + the target user account must be in the Administrators group. :param str password: Windows only. Required when specifying ``runas``. This - parameter will be ignored on non-Windows platforms. + parameter will be ignored on non-Windows platforms. - .. versionadded:: 2016.3.0 + .. versionadded:: 2016.3.0 - :param str shell: Shell to execute under. Defaults to the system default - shell. + :param str shell: Specify an alternate shell. Defaults to the system's + default shell. :param bool python_shell: If False, let python handle the positional - arguments. Set to True to use shell features, such as pipes or redirection + arguments. Set to True to use shell features, such as pipes or + redirection. - :param list env: A list of environment variables to be set prior to - execution. + :param dict env: Environment variables to be set prior to execution. - Example: + .. note:: + When passing environment variables on the CLI, they should be + passed as the string representation of a dictionary. - .. code-block:: yaml + .. code-block:: bash - salt://scripts/foo.sh: - cmd.script: - - env: - - BATCH: 'yes' - - .. warning:: - - The above illustrates a common PyYAML pitfall, that **yes**, - **no**, **on**, **off**, **true**, and **false** are all loaded as - boolean ``True`` and ``False`` values, and must be enclosed in - quotes to be used as strings. More info on this (and other) PyYAML - idiosyncrasies can be found :ref:`here `. - - Variables as values are not evaluated. So $PATH in the following - example is a literal '$PATH': - - .. code-block:: yaml - - salt://scripts/bar.sh: - cmd.script: - - env: "PATH=/some/path:$PATH" - - One can still use the existing $PATH by using a bit of Jinja: - - .. code-block:: jinja - - {% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %} - - mycommand: - cmd.run: - - name: ls -l / - - env: - - PATH: {{ [current_path, '/my/special/bin']|join(':') }} + salt myminion cmd.run_bg 'some command' env='{"FOO": "bar"}' :param bool clean_env: Attempt to clean out all other shell environment - variables and set only those provided in the 'env' argument to this - function. + variables and set only those provided in the 'env' argument to this + function. - :param str prepend_path: $PATH segment to prepend (trailing ':' not necessary) - to $PATH + :param str prepend_path: $PATH segment to prepend (trailing ':' not + necessary) to $PATH - .. versionadded:: Oxygen + .. versionadded:: Oxygen :param str template: If this setting is applied then the named templating - engine will be used to render the downloaded file. Currently jinja, mako, - and wempy are supported + engine will be used to render the downloaded file. Currently jinja, + mako, and wempy are supported. :param str umask: The umask (in octal) to use when running the command. @@ -3720,16 +3629,16 @@ def run_bg(cmd, .. warning:: - This function does not process commands through a shell - unless the python_shell flag is set to True. This means that any + This function does not process commands through a shell unless the + ``python_shell`` argument is set to ``True``. This means that any shell-specific functionality such as 'echo' or the use of pipes, - redirection or &&, should either be migrated to cmd.shell or - have the python_shell=True flag set here. + redirection or &&, should either be migrated to cmd.shell or have the + python_shell=True flag set here. - The use of python_shell=True means that the shell will accept _any_ input - including potentially malicious commands such as 'good_command;rm -rf /'. - Be absolutely certain that you have sanitized your input prior to using - python_shell=True + The use of ``python_shell=True`` means that the shell will accept _any_ + input including potentially malicious commands such as 'good_command;rm + -rf /'. Be absolutely certain that you have sanitized your input prior + to using ``python_shell=True``. CLI Example: @@ -3767,6 +3676,7 @@ def run_bg(cmd, stdin=None, stderr=None, stdout=None, + output_encoding=output_encoding, output_loglevel=output_loglevel, use_vt=None, bg=True, diff --git a/salt/modules/dracr.py b/salt/modules/dracr.py index c8122c7756..d5b278d201 100644 --- a/salt/modules/dracr.py +++ b/salt/modules/dracr.py @@ -29,7 +29,7 @@ try: except (NameError, KeyError): import salt.modules.cmdmod __salt__ = { - 'cmd.run_all': salt.modules.cmdmod._run_all_quiet + 'cmd.run_all': salt.modules.cmdmod.run_all } @@ -95,8 +95,7 @@ def __execute_cmd(command, host=None, output_loglevel='quiet') if cmd['retcode'] != 0: - log.warning('racadm return an exit code \'{0}\'.' - .format(cmd['retcode'])) + log.warning('racadm returned an exit code of %s', cmd['retcode']) return False return True @@ -129,8 +128,7 @@ def __execute_ret(command, host=None, output_loglevel='quiet') if cmd['retcode'] != 0: - log.warning('racadm return an exit code \'{0}\'.' - .format(cmd['retcode'])) + log.warning('racadm returned an exit code of %s', cmd['retcode']) else: fmtlines = [] for l in cmd['stdout'].splitlines(): @@ -193,8 +191,7 @@ def system_info(host=None, module=module) if cmd['retcode'] != 0: - log.warning('racadm return an exit code \'{0}\'.' - .format(cmd['retcode'])) + log.warning('racadm returned an exit code of %s', cmd['retcode']) return cmd return __parse_drac(cmd['stdout']) @@ -272,8 +269,7 @@ def network_info(host=None, module=module) if cmd['retcode'] != 0: - log.warning('racadm return an exit code \'{0}\'.' - .format(cmd['retcode'])) + log.warning('racadm returned an exit code of %s', cmd['retcode']) cmd['stdout'] = 'Network:\n' + 'Device = ' + module + '\n' + \ cmd['stdout'] @@ -395,8 +391,7 @@ def list_users(host=None, admin_password=admin_password) if cmd['retcode'] != 0: - log.warning('racadm return an exit code \'{0}\'.' - .format(cmd['retcode'])) + log.warning('racadm returned an exit code of %s', cmd['retcode']) for user in cmd['stdout'].splitlines(): if not user.startswith('cfg'): @@ -444,7 +439,7 @@ def delete_user(username, admin_password=admin_password) else: - log.warning('\'{0}\' does not exist'.format(username)) + log.warning('User \'%s\' does not exist', username) return False @@ -485,7 +480,7 @@ def change_password(username, password, uid=None, host=None, host=host, admin_username=admin_username, admin_password=admin_password, module=module) else: - log.warning('\'{0}\' does not exist'.format(username)) + log.warning('racadm: user \'%s\' does not exist', username) return False @@ -567,7 +562,7 @@ def create_user(username, password, permissions, users = list_users() if username in users: - log.warning('\'{0}\' already exists'.format(username)) + log.warning('racadm: user \'%s\' already exists', username) return False for idx in six.iterkeys(users): diff --git a/tests/unit/modules/test_cmdmod.py b/tests/unit/modules/test_cmdmod.py index 82a739c24b..89acfb81e5 100644 --- a/tests/unit/modules/test_cmdmod.py +++ b/tests/unit/modules/test_cmdmod.py @@ -406,3 +406,18 @@ class CMDMODTestCase(TestCase, LoaderModuleMockMixin): self.assertEqual(ret['stdout'], stdout_unicode) self.assertEqual(ret['stderr'], stderr_unicode) + + def test_run_all_output_encoding(self): + ''' + Test that specifying the output encoding works as expected + ''' + stdout = 'Æ' + stdout_latin1_enc = stdout.encode('latin1') + + proc = MagicMock(return_value=MockTimedProc(stdout=stdout_latin1_enc)) + + with patch('salt.utils.timed_subprocess.TimedProc', proc), \ + patch.object(builtins, '__salt_system_encoding__', 'utf-8'): + ret = cmdmod.run_all('some command', output_encoding='latin1') + + self.assertEqual(ret['stdout'], stdout) From c5c03f4c3ef9d831b3a2d771a0719c3cd26e4d90 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Fri, 2 Feb 2018 14:28:16 -0600 Subject: [PATCH 032/223] Add link to test.versions_report --- salt/modules/cmdmod.py | 48 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/salt/modules/cmdmod.py b/salt/modules/cmdmod.py index 516773e0e6..69f571a46a 100644 --- a/salt/modules/cmdmod.py +++ b/salt/modules/cmdmod.py @@ -877,6 +877,10 @@ def run(cmd, used in cases where the output of the command is encoded in something other than the system locale or UTF-8. + To see the encoding Salt has detected from the system locale, check + the `locale` line in the output of :py:func:`test.versions_report + `. + .. versionadded:: Oxygen :param str output_loglevel: Control the loglevel at which the output from @@ -1104,6 +1108,10 @@ def shell(cmd, used in cases where the output of the command is encoded in something other than the system locale or UTF-8. + To see the encoding Salt has detected from the system locale, check + the `locale` line in the output of :py:func:`test.versions_report + `. + .. versionadded:: Oxygen :param str output_loglevel: Control the loglevel at which the output from @@ -1299,6 +1307,10 @@ def run_stdout(cmd, used in cases where the output of the command is encoded in something other than the system locale or UTF-8. + To see the encoding Salt has detected from the system locale, check + the `locale` line in the output of :py:func:`test.versions_report + `. + .. versionadded:: Oxygen :param str output_loglevel: Control the loglevel at which the output from @@ -1491,6 +1503,10 @@ def run_stderr(cmd, used in cases where the output of the command is encoded in something other than the system locale or UTF-8. + To see the encoding Salt has detected from the system locale, check + the `locale` line in the output of :py:func:`test.versions_report + `. + .. versionadded:: Oxygen :param str output_loglevel: Control the loglevel at which the output from @@ -1685,6 +1701,10 @@ def run_all(cmd, used in cases where the output of the command is encoded in something other than the system locale or UTF-8. + To see the encoding Salt has detected from the system locale, check + the `locale` line in the output of :py:func:`test.versions_report + `. + .. versionadded:: Oxygen :param str output_loglevel: Control the loglevel at which the output from @@ -1897,6 +1917,10 @@ def retcode(cmd, used in cases where the output of the command is encoded in something other than the system locale or UTF-8. + To see the encoding Salt has detected from the system locale, check + the `locale` line in the output of :py:func:`test.versions_report + `. + .. versionadded:: Oxygen :param str output_loglevel: Control the loglevel at which the output from @@ -2124,6 +2148,10 @@ def script(source, used in cases where the output of the command is encoded in something other than the system locale or UTF-8. + To see the encoding Salt has detected from the system locale, check + the `locale` line in the output of :py:func:`test.versions_report + `. + .. versionadded:: Oxygen :param str output_loglevel: Control the loglevel at which the output from @@ -2350,6 +2378,10 @@ def script_retcode(source, used in cases where the output of the command is encoded in something other than the system locale or UTF-8. + To see the encoding Salt has detected from the system locale, check + the `locale` line in the output of :py:func:`test.versions_report + `. + .. versionadded:: Oxygen :param str output_loglevel: Control the loglevel at which the output from @@ -2628,6 +2660,10 @@ def run_chroot(root, used in cases where the output of the command is encoded in something other than the system locale or UTF-8. + To see the encoding Salt has detected from the system locale, check + the `locale` line in the output of :py:func:`test.versions_report + `. + .. versionadded:: Oxygen :param str output_loglevel: Control the loglevel at which the output from @@ -3085,6 +3121,10 @@ def powershell(cmd, used in cases where the output of the command is encoded in something other than the system locale or UTF-8. + To see the encoding Salt has detected from the system locale, check + the `locale` line in the output of :py:func:`test.versions_report + `. + .. versionadded:: Oxygen :param str output_loglevel: Control the loglevel at which the output from @@ -3365,6 +3405,10 @@ def powershell_all(cmd, used in cases where the output of the command is encoded in something other than the system locale or UTF-8. + To see the encoding Salt has detected from the system locale, check + the `locale` line in the output of :py:func:`test.versions_report + `. + .. versionadded:: Oxygen :param str output_loglevel: Control the loglevel at which the output from @@ -3567,6 +3611,10 @@ def run_bg(cmd, used in cases where the output of the command is encoded in something other than the system locale or UTF-8. + To see the encoding Salt has detected from the system locale, check + the `locale` line in the output of :py:func:`test.versions_report + `. + .. versionadded:: Oxygen :param str output_loglevel: Control the loglevel at which the output from From b4e24a69e6fde31d2c2cdc6ac8f747cef47a9091 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Tue, 6 Feb 2018 13:23:23 -0600 Subject: [PATCH 033/223] Move get_context to salt.utils.stringutils This avoids circular dependencies when salt.utils.jinja (which is imported by salt.utils.templates) needs to use get_context() --- doc/topics/releases/oxygen.rst | 72 ++++++++++++++-------------- salt/exceptions.py | 2 +- salt/utils/__init__.py | 6 +-- salt/utils/stringutils.py | 35 ++++++++++++++ salt/utils/templates.py | 37 +------------- tests/unit/templates/test_jinja.py | 37 +------------- tests/unit/utils/test_stringutils.py | 41 ++++++++++++++++ tests/unit/utils/test_templates.py | 25 ---------- 8 files changed, 119 insertions(+), 136 deletions(-) delete mode 100644 tests/unit/utils/test_templates.py diff --git a/doc/topics/releases/oxygen.rst b/doc/topics/releases/oxygen.rst index 99742e8eb8..389f066967 100644 --- a/doc/topics/releases/oxygen.rst +++ b/doc/topics/releases/oxygen.rst @@ -79,7 +79,7 @@ with release Neon. The functions have been moved as follows: -- ``salt.utils.appendproctitle``: use ``salt.utils.process.appendproctitle`` +- ``salt.utils.appendproctitle``: use ``salt.utils.process.appendproctitle`` instead. - ``salt.utils.daemonize``: use ``salt.utils.process.daemonize`` instead. - ``salt.utils.daemonize_if``: use ``salt.utils.process.daemonize_if`` instead. @@ -94,22 +94,22 @@ The functions have been moved as follows: - ``salt.utils.is_hex``: use ``salt.utils.stringutils.is_hex`` instead. - ``salt.utils.is_bin_str``: use ``salt.utils.stringutils.is_bin_str`` instead. - ``salt.utils.rand_string``: use ``salt.utils.stringutils.random`` instead. -- ``salt.utils.contains_whitespace``: use +- ``salt.utils.contains_whitespace``: use ``salt.utils.stringutils.contains_whitespace`` instead. -- ``salt.utils.build_whitespace_split_regex``: use +- ``salt.utils.build_whitespace_split_regex``: use ``salt.utils.stringutils.build_whitespace_split_regex`` instead. - ``salt.utils.expr_match``: use ``salt.utils.stringutils.expr_match`` instead. -- ``salt.utils.check_whitelist_blacklist``: use +- ``salt.utils.check_whitelist_blacklist``: use ``salt.utils.stringutils.check_whitelist_blacklist`` instead. -- ``salt.utils.check_include_exclude``: use +- ``salt.utils.check_include_exclude``: use ``salt.utils.stringutils.check_include_exclude`` instead. - ``salt.utils.print_cli``: use ``salt.utils.stringutils.print_cli`` instead. - ``salt.utils.clean_kwargs``: use ``salt.utils.args.clean_kwargs`` instead. -- ``salt.utils.invalid_kwargs``: use ``salt.utils.args.invalid_kwargs`` +- ``salt.utils.invalid_kwargs``: use ``salt.utils.args.invalid_kwargs`` instead. - ``salt.utils.shlex_split``: use ``salt.utils.args.shlex_split`` instead. - ``salt.utils.arg_lookup``: use ``salt.utils.args.arg_lookup`` instead. -- ``salt.utils.argspec_report``: use ``salt.utils.args.argspec_report`` +- ``salt.utils.argspec_report``: use ``salt.utils.args.argspec_report`` instead. - ``salt.utils.split_input``: use ``salt.utils.args.split_input`` instead. - ``salt.utils.test_mode``: use ``salt.utils.args.test_mode`` instead. @@ -118,7 +118,7 @@ The functions have been moved as follows: - ``salt.utils.which_bin``: use ``salt.utils.path.which_bin`` instead. - ``salt.utils.path_join``: use ``salt.utils.path.join`` instead. - ``salt.utils.check_or_die``: use ``salt.utils.path.check_or_die`` instead. -- ``salt.utils.sanitize_win_path_string``: use +- ``salt.utils.sanitize_win_path_string``: use ``salt.utils.path.sanitize_win_path`` instead. - ``salt.utils.rand_str``: use ``salt.utils.hashutils.random_hash`` instead. - ``salt.utils.get_hash``: use ``salt.utils.hashutils.get_hash`` instead. @@ -128,9 +128,9 @@ The functions have been moved as follows: - ``salt.utils.is_darwin``: use ``salt.utils.platform.is_darwin`` instead. - ``salt.utils.is_sunos``: use ``salt.utils.platform.is_sunos`` instead. - ``salt.utils.is_smartos``: use ``salt.utils.platform.is_smartos`` instead. -- ``salt.utils.is_smartos_globalzone``: use +- ``salt.utils.is_smartos_globalzone``: use ``salt.utils.platform.is_smartos_globalzone`` instead. -- ``salt.utils.is_smartos_zone``: use ``salt.utils.platform.is_smartos_zone`` +- ``salt.utils.is_smartos_zone``: use ``salt.utils.platform.is_smartos_zone`` instead. - ``salt.utils.is_freebsd``: use ``salt.utils.platform.is_freebsd`` instead. - ``salt.utils.is_netbsd``: use ``salt.utils.platform.is_netbsd`` instead. @@ -147,55 +147,55 @@ The functions have been moved as follows: - ``salt.utils.is_bin_file``: use ``salt.utils.files.is_binary`` instead. - ``salt.utils.list_files``: use ``salt.utils.files.list_files`` instead. - ``salt.utils.safe_walk``: use ``salt.utils.files.safe_walk`` instead. -- ``salt.utils.st_mode_to_octal``: use ``salt.utils.files.st_mode_to_octal`` +- ``salt.utils.st_mode_to_octal``: use ``salt.utils.files.st_mode_to_octal`` instead. -- ``salt.utils.normalize_mode``: use ``salt.utils.files.normalize_mode`` +- ``salt.utils.normalize_mode``: use ``salt.utils.files.normalize_mode`` instead. -- ``salt.utils.human_size_to_bytes``: use +- ``salt.utils.human_size_to_bytes``: use ``salt.utils.files.human_size_to_bytes`` instead. - ``salt.utils.backup_minion``: use ``salt.utils.files.backup_minion`` instead. - ``salt.utils.str_version_to_evr``: use ``salt.utils.pkg.rpm.version_to_evr`` instead. -- ``salt.utils.parse_docstring``: use ``salt.utils.doc.parse_docstring`` +- ``salt.utils.parse_docstring``: use ``salt.utils.doc.parse_docstring`` instead. - ``salt.utils.compare_versions``: use ``salt.utils.versions.compare`` instead. - ``salt.utils.version_cmp``: use ``salt.utils.versions.version_cmp`` instead. - ``salt.utils.warn_until``: use ``salt.utils.versions.warn_until`` instead. -- ``salt.utils.kwargs_warn_until``: use +- ``salt.utils.kwargs_warn_until``: use ``salt.utils.versions.kwargs_warn_until`` instead. -- ``salt.utils.get_color_theme``: use ``salt.utils.color.get_color_theme`` +- ``salt.utils.get_color_theme``: use ``salt.utils.color.get_color_theme`` instead. - ``salt.utils.get_colors``: use ``salt.utils.color.get_colors`` instead. - ``salt.utils.gen_state_tag``: use ``salt.utils.state.gen_tag`` instead. -- ``salt.utils.search_onfail_requisites``: use +- ``salt.utils.search_onfail_requisites``: use ``salt.utils.state.search_onfail_requisites`` instead. -- ``salt.utils.check_state_result``: use ``salt.utils.state.check_result`` +- ``salt.utils.check_state_result``: use ``salt.utils.state.check_result`` instead. - ``salt.utils.get_user``: use ``salt.utils.user.get_user`` instead. - ``salt.utils.get_uid``: use ``salt.utils.user.get_uid`` instead. -- ``salt.utils.get_specific_user``: use ``salt.utils.user.get_specific_user`` +- ``salt.utils.get_specific_user``: use ``salt.utils.user.get_specific_user`` instead. - ``salt.utils.chugid``: use ``salt.utils.user.chugid`` instead. -- ``salt.utils.chugid_and_umask``: use ``salt.utils.user.chugid_and_umask`` +- ``salt.utils.chugid_and_umask``: use ``salt.utils.user.chugid_and_umask`` instead. -- ``salt.utils.get_default_group``: use ``salt.utils.user.get_default_group`` +- ``salt.utils.get_default_group``: use ``salt.utils.user.get_default_group`` instead. -- ``salt.utils.get_group_list``: use ``salt.utils.user.get_group_list`` +- ``salt.utils.get_group_list``: use ``salt.utils.user.get_group_list`` instead. -- ``salt.utils.get_group_dict``: use ``salt.utils.user.get_group_dict`` +- ``salt.utils.get_group_dict``: use ``salt.utils.user.get_group_dict`` instead. - ``salt.utils.get_gid_list``: use ``salt.utils.user.get_gid_list`` instead. - ``salt.utils.get_gid``: use ``salt.utils.user.get_gid`` instead. -- ``salt.utils.enable_ctrl_logoff_handler``: use +- ``salt.utils.enable_ctrl_logoff_handler``: use ``salt.utils.win_functions.enable_ctrl_logoff_handler`` instead. - ``salt.utils.traverse_dict``: use ``salt.utils.data.traverse_dict`` instead. -- ``salt.utils.traverse_dict_and_list``: use +- ``salt.utils.traverse_dict_and_list``: use ``salt.utils.data.traverse_dict_and_list`` instead. - ``salt.utils.filter_by``: use ``salt.utils.data.filter_by`` instead. - ``salt.utils.subdict_match``: use ``salt.utils.data.subdict_match`` instead. - ``salt.utils.substr_in_list``: use ``salt.utils.data.substr_in_list`` instead. - ``salt.utils.is_dictlist``: use ``salt.utils.data.is_dictlist``. -- ``salt.utils.repack_dictlist``: use ``salt.utils.data.repack_dictlist`` +- ``salt.utils.repack_dictlist``: use ``salt.utils.data.repack_dictlist`` instead. - ``salt.utils.compare_dicts``: use ``salt.utils.data.compare_dicts`` instead. - ``salt.utils.compare_lists``: use ``salt.utils.data.compare_lists`` instead. @@ -208,33 +208,33 @@ The functions have been moved as follows: - ``salt.utils.isorted``: use ``salt.utils.data.sorted_ignorecase`` instead. - ``salt.utils.is_true``: use ``salt.utils.data.is_true`` instead. - ``salt.utils.mysql_to_dict``: use ``salt.utils.data.mysql_to_dict`` instead. -- ``salt.utils.simple_types_filter``: use +- ``salt.utils.simple_types_filter``: use ``salt.utils.data.simple_types_filter`` instead. - ``salt.utils.ip_bracket``: use ``salt.utils.zeromq.ip_bracket`` instead. - ``salt.utils.gen_mac``: use ``salt.utils.network.gen_mac`` instead. -- ``salt.utils.mac_str_to_bytes``: use ``salt.utils.network.mac_str_to_bytes`` +- ``salt.utils.mac_str_to_bytes``: use ``salt.utils.network.mac_str_to_bytes`` instead. - ``salt.utils.refresh_dns``: use ``salt.utils.network.refresh_dns`` instead. - ``salt.utils.dns_check``: use ``salt.utils.network.dns_check`` instead. -- ``salt.utils.get_context``: use ``salt.utils.templates.get_context`` instead. -- ``salt.utils.get_master_key``: use ``salt.utils.master.get_master_key`` +- ``salt.utils.get_context``: use ``salt.utils.stringutils.get_context`` instead. +- ``salt.utils.get_master_key``: use ``salt.utils.master.get_master_key`` instead. -- ``salt.utils.get_values_of_matching_keys``: use +- ``salt.utils.get_values_of_matching_keys``: use ``salt.utils.master.get_values_of_matching_keys`` instead. - ``salt.utils.date_cast``: use ``salt.utils.dateutils.date_cast`` instead. - ``salt.utils.date_format``: use ``salt.utils.dateutils.strftime`` instead. -- ``salt.utils.total_seconds``: use ``salt.utils.dateutils.total_seconds`` +- ``salt.utils.total_seconds``: use ``salt.utils.dateutils.total_seconds`` instead. - ``salt.utils.find_json``: use ``salt.utils.json.find_json`` instead. - ``salt.utils.import_json``: use ``salt.utils.json.import_json`` instead. -- ``salt.utils.namespaced_function``: use +- ``salt.utils.namespaced_function``: use ``salt.utils.functools.namespaced_function`` instead. -- ``salt.utils.alias_function``: use ``salt.utils.functools.alias_function`` +- ``salt.utils.alias_function``: use ``salt.utils.functools.alias_function`` instead. - ``salt.utils.profile_func``: use ``salt.utils.profile.profile_func`` instead. -- ``salt.utils.activate_profile``: use ``salt.utils.profile.activate_profile`` +- ``salt.utils.activate_profile``: use ``salt.utils.profile.activate_profile`` instead. -- ``salt.utils.output_profile``: use ``salt.utils.profile.output_profile`` +- ``salt.utils.output_profile``: use ``salt.utils.profile.output_profile`` instead. State and Execution Module Support for ``docker run`` Functionality diff --git a/salt/exceptions.py b/salt/exceptions.py index 5abfef1dfa..63774c640b 100644 --- a/salt/exceptions.py +++ b/salt/exceptions.py @@ -285,7 +285,7 @@ class SaltRenderError(SaltException): if self.line_num and self.buffer: # Avoid circular import import salt.utils.templates - self.context = salt.utils.templates.get_context( + self.context = salt.utils.stringutils.get_context( self.buffer, self.line_num, marker=marker diff --git a/salt/utils/__init__.py b/salt/utils/__init__.py index d9bd472d7e..f1d42fe0c1 100644 --- a/salt/utils/__init__.py +++ b/salt/utils/__init__.py @@ -1740,15 +1740,15 @@ def dns_check(addr, port, safe=False, ipv6=None): def get_context(template, line, num_lines=5, marker=None): # Late import to avoid circular import. import salt.utils.versions - import salt.utils.templates + import salt.utils.stringutils salt.utils.versions.warn_until( 'Neon', 'Use of \'salt.utils.get_context\' detected. This function ' - 'has been moved to \'salt.utils.templates.get_context\' as of ' + 'has been moved to \'salt.utils.stringutils.get_context\' as of ' 'Salt Oxygen. This warning will be removed in Salt Neon.', stacklevel=3 ) - return salt.utils.templates.get_context(template, line, num_lines, marker) + return salt.utils.stringutils.get_context(template, line, num_lines, marker) def get_master_key(key_user, opts, skip_perm_errors=False): diff --git a/salt/utils/stringutils.py b/salt/utils/stringutils.py index 1b8dd9d2af..cd7860b841 100644 --- a/salt/utils/stringutils.py +++ b/salt/utils/stringutils.py @@ -430,3 +430,38 @@ def print_cli(msg, retries=10, step=0.01): else: raise break + + +def get_context(template, line, num_lines=5, marker=None): + ''' + Returns debugging context around a line in a given string + + Returns:: string + ''' + template_lines = template.splitlines() + num_template_lines = len(template_lines) + + # In test mode, a single line template would return a crazy line number like, + # 357. Do this sanity check and if the given line is obviously wrong, just + # return the entire template + if line > num_template_lines: + return template + + context_start = max(0, line - num_lines - 1) # subt 1 for 0-based indexing + context_end = min(num_template_lines, line + num_lines) + error_line_in_context = line - context_start - 1 # subtr 1 for 0-based idx + + buf = [] + if context_start > 0: + buf.append('[...]') + error_line_in_context += 1 + + buf.extend(template_lines[context_start:context_end]) + + if context_end < num_template_lines: + buf.append('[...]') + + if marker: + buf[error_line_in_context] += marker + + return '---\n{0}\n---'.format('\n'.join(buf)) diff --git a/salt/utils/templates.py b/salt/utils/templates.py index 8e69a81502..fb520ee8d9 100644 --- a/salt/utils/templates.py +++ b/salt/utils/templates.py @@ -95,41 +95,6 @@ class AliasedModule(object): return getattr(self.wrapped, name) -def get_context(template, line, num_lines=5, marker=None): - ''' - Returns debugging context around a line in a given string - - Returns:: string - ''' - template_lines = template.splitlines() - num_template_lines = len(template_lines) - - # in test, a single line template would return a crazy line number like, - # 357. do this sanity check and if the given line is obviously wrong, just - # return the entire template - if line > num_template_lines: - return template - - context_start = max(0, line - num_lines - 1) # subt 1 for 0-based indexing - context_end = min(num_template_lines, line + num_lines) - error_line_in_context = line - context_start - 1 # subtr 1 for 0-based idx - - buf = [] - if context_start > 0: - buf.append('[...]') - error_line_in_context += 1 - - buf.extend(template_lines[context_start:context_end]) - - if context_end < num_template_lines: - buf.append('[...]') - - if marker: - buf[error_line_in_context] += marker - - return '---\n{0}\n---'.format('\n'.join(buf)) - - def wrap_tmpl_func(render_str): def render_tmpl(tmplsrc, @@ -315,7 +280,7 @@ def _get_jinja_error(trace, context=None): out = '\n{0}\n'.format(msg.splitlines()[0]) with salt.utils.files.fopen(template_path) as fp_: template_contents = salt.utils.stringutils.to_unicode(fp_.read()) - out += get_context( + out += salt.utils.stringutils.get_context( template_contents, line, marker=' <======================') diff --git a/tests/unit/templates/test_jinja.py b/tests/unit/templates/test_jinja.py index 6fc083be02..186acf778d 100644 --- a/tests/unit/templates/test_jinja.py +++ b/tests/unit/templates/test_jinja.py @@ -34,11 +34,8 @@ from salt.utils.jinja import ( ensure_sequence_filter ) from salt.utils.odict import OrderedDict -from salt.utils.templates import ( - get_context, - JINJA, - render_jinja_tmpl -) +from salt.utils.templates import JINJA, render_jinja_tmpl + # dateutils is needed so that the strftime jinja filter is loaded import salt.utils.dateutils # pylint: disable=unused-import import salt.utils.files @@ -379,36 +376,6 @@ class TestGetTemplate(TestCase): result = salt.utils.stringutils.to_unicode(fp.read(), 'utf-8') self.assertEqual(salt.utils.stringutils.to_unicode('Assunção' + os.linesep), result) - def test_get_context_has_enough_context(self): - template = '1\n2\n3\n4\n5\n6\n7\n8\n9\na\nb\nc\nd\ne\nf' - context = get_context(template, 8) - expected = '---\n[...]\n3\n4\n5\n6\n7\n8\n9\na\nb\nc\nd\n[...]\n---' - self.assertEqual(expected, context) - - def test_get_context_at_top_of_file(self): - template = '1\n2\n3\n4\n5\n6\n7\n8\n9\na\nb\nc\nd\ne\nf' - context = get_context(template, 1) - expected = '---\n1\n2\n3\n4\n5\n6\n[...]\n---' - self.assertEqual(expected, context) - - def test_get_context_at_bottom_of_file(self): - template = '1\n2\n3\n4\n5\n6\n7\n8\n9\na\nb\nc\nd\ne\nf' - context = get_context(template, 15) - expected = '---\n[...]\na\nb\nc\nd\ne\nf\n---' - self.assertEqual(expected, context) - - def test_get_context_2_context_lines(self): - template = '1\n2\n3\n4\n5\n6\n7\n8\n9\na\nb\nc\nd\ne\nf' - context = get_context(template, 8, num_lines=2) - expected = '---\n[...]\n6\n7\n8\n9\na\n[...]\n---' - self.assertEqual(expected, context) - - def test_get_context_with_marker(self): - template = '1\n2\n3\n4\n5\n6\n7\n8\n9\na\nb\nc\nd\ne\nf' - context = get_context(template, 8, num_lines=2, marker=' <---') - expected = '---\n[...]\n6\n7\n8 <---\n9\na\n[...]\n---' - self.assertEqual(expected, context) - def test_render_with_syntax_error(self): template = 'hello\n\n{{ bad\n\nfoo' expected = r'.*---\nhello\n\n{{ bad\n\nfoo <======================\n---' diff --git a/tests/unit/utils/test_stringutils.py b/tests/unit/utils/test_stringutils.py index 55e794d6e9..cec2953913 100644 --- a/tests/unit/utils/test_stringutils.py +++ b/tests/unit/utils/test_stringutils.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals +import textwrap # Import Salt libs from tests.support.mock import patch @@ -108,3 +109,43 @@ class StringutilsTestCase(TestCase): '(?:[\\s]+)?$' ret = salt.utils.stringutils.build_whitespace_split_regex(' '.join(LOREM_IPSUM.split()[:5])) self.assertEqual(ret, expected_regex) + + def test_get_context(self): + expected_context = textwrap.dedent('''\ + --- + Lorem ipsum dolor sit amet, consectetur adipiscing elit. Quisque eget urna a arcu lacinia sagittis. + Sed scelerisque, lacus eget malesuada vestibulum, justo diam facilisis tortor, in sodales dolor + [...] + ---''') + ret = salt.utils.stringutils.get_context(LOREM_IPSUM, 1, num_lines=1) + self.assertEqual(ret, expected_context) + + def test_get_context_has_enough_context(self): + template = '1\n2\n3\n4\n5\n6\n7\n8\n9\na\nb\nc\nd\ne\nf' + context = salt.utils.stringutils.get_context(template, 8) + expected = '---\n[...]\n3\n4\n5\n6\n7\n8\n9\na\nb\nc\nd\n[...]\n---' + self.assertEqual(expected, context) + + def test_get_context_at_top_of_file(self): + template = '1\n2\n3\n4\n5\n6\n7\n8\n9\na\nb\nc\nd\ne\nf' + context = salt.utils.stringutils.get_context(template, 1) + expected = '---\n1\n2\n3\n4\n5\n6\n[...]\n---' + self.assertEqual(expected, context) + + def test_get_context_at_bottom_of_file(self): + template = '1\n2\n3\n4\n5\n6\n7\n8\n9\na\nb\nc\nd\ne\nf' + context = salt.utils.stringutils.get_context(template, 15) + expected = '---\n[...]\na\nb\nc\nd\ne\nf\n---' + self.assertEqual(expected, context) + + def test_get_context_2_context_lines(self): + template = '1\n2\n3\n4\n5\n6\n7\n8\n9\na\nb\nc\nd\ne\nf' + context = salt.utils.stringutils.get_context(template, 8, num_lines=2) + expected = '---\n[...]\n6\n7\n8\n9\na\n[...]\n---' + self.assertEqual(expected, context) + + def test_get_context_with_marker(self): + template = '1\n2\n3\n4\n5\n6\n7\n8\n9\na\nb\nc\nd\ne\nf' + context = salt.utils.stringutils.get_context(template, 8, num_lines=2, marker=' <---') + expected = '---\n[...]\n6\n7\n8 <---\n9\na\n[...]\n---' + self.assertEqual(expected, context) diff --git a/tests/unit/utils/test_templates.py b/tests/unit/utils/test_templates.py deleted file mode 100644 index 5da6dd78dd..0000000000 --- a/tests/unit/utils/test_templates.py +++ /dev/null @@ -1,25 +0,0 @@ -# -*- coding: utf-8 -*- -''' -Tests for salt.utils.data -''' - -# Import Python libs -from __future__ import absolute_import, print_function, unicode_literals -import textwrap - -# Import Salt libs -import salt.utils.templates -from tests.support.unit import TestCase, LOREM_IPSUM - - -class TemplatesTestCase(TestCase): - - def test_get_context(self): - expected_context = textwrap.dedent('''\ - --- - Lorem ipsum dolor sit amet, consectetur adipiscing elit. Quisque eget urna a arcu lacinia sagittis. - Sed scelerisque, lacus eget malesuada vestibulum, justo diam facilisis tortor, in sodales dolor - [...] - ---''') - ret = salt.utils.templates.get_context(LOREM_IPSUM, 1, num_lines=1) - self.assertEqual(ret, expected_context) From 3688ae04404e0985a0ba6b1d2acc8e2bab3d83c9 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Tue, 6 Feb 2018 13:27:08 -0600 Subject: [PATCH 034/223] Properly raise ConstructorError in construct_mapping We were passing the wrong arguments to this exception's constructor. This commit changes the exceptions so that they end up with the proper attributes, allowing them to be successfully interpreted when caught. --- salt/utils/yamlloader.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/salt/utils/yamlloader.py b/salt/utils/yamlloader.py index 9342818d02..f0910e443a 100644 --- a/salt/utils/yamlloader.py +++ b/salt/utils/yamlloader.py @@ -76,18 +76,25 @@ class SaltYamlSafeLoader(yaml.SafeLoader, object): self.flatten_mapping(node) + context = 'while constructing a mapping' mapping = self.dictclass() for key_node, value_node in node.value: key = self.construct_object(key_node, deep=deep) try: hash(key) except TypeError: - err = ('While constructing a mapping {0} found unacceptable ' - 'key {1}').format(node.start_mark, key_node.start_mark) - raise ConstructorError(err) + raise ConstructorError( + context, + node.start_mark, + "found unacceptable key {0}".format(key_node.value), + key_node.start_mark) value = self.construct_object(value_node, deep=deep) if key in mapping: - raise ConstructorError('Conflicting ID \'{0}\''.format(key)) + raise ConstructorError( + context, + node.start_mark, + "found conflicting ID '{0}'".format(key), + key_node.start_mark) mapping[key] = value return mapping From fa72e16575e7d0794330c86c08089ff56604d95f Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Tue, 6 Feb 2018 13:41:00 -0600 Subject: [PATCH 035/223] Catch YAML errors when executing a load_yaml from jinja Use the context information from these exceptions to provide a meaningful error message. --- salt/utils/jinja.py | 18 ++++++++++++++++++ salt/utils/templates.py | 3 ++- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/salt/utils/jinja.py b/salt/utils/jinja.py index 5a0bacfcc5..27e1894fd7 100644 --- a/salt/utils/jinja.py +++ b/salt/utils/jinja.py @@ -850,6 +850,24 @@ class SerializerExtension(Extension, object): value = six.text_type(value) try: return salt.utils.data.decode(salt.utils.yaml.safe_load(value)) + except salt.utils.yaml.YAMLError as exc: + msg = 'Encountered error loading yaml: ' + try: + # Reported line is off by one, add 1 to correct it + line = exc.problem_mark.line + 1 + buf = exc.problem_mark.buffer + problem = exc.problem + except AttributeError: + # No context information available in the exception, fall back + # to the stringified version of the exception. + msg += six.text_type(exc) + else: + msg += '{0}\n'.format(problem) + msg += salt.utils.stringutils.get_context( + buf, + line, + marker=' <======================') + raise TemplateRuntimeError(msg) except AttributeError: raise TemplateRuntimeError( 'Unable to load yaml from {0}'.format(value)) diff --git a/salt/utils/templates.py b/salt/utils/templates.py index fb520ee8d9..ae2578ffcf 100644 --- a/salt/utils/templates.py +++ b/salt/utils/templates.py @@ -382,7 +382,8 @@ def render_jinja_tmpl(tmplstr, context, tmplpath=None): template = jinja_env.from_string(tmplstr) template.globals.update(decoded_context) output = template.render(**decoded_context) - except jinja2.exceptions.TemplateSyntaxError as exc: + except (jinja2.exceptions.TemplateRuntimeError, + jinja2.exceptions.TemplateSyntaxError) as exc: trace = traceback.extract_tb(sys.exc_info()[2]) line, out = _get_jinja_error(trace, context=decoded_context) if not line: From b45f5720db1074f584d20ea336fe7b5323d3ed83 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Tue, 6 Feb 2018 14:20:25 -0600 Subject: [PATCH 036/223] Move exception catch This was caught by pylint. Adding the TemplateRuntimeError should have been done below UndefinedError because UndefinedError is a child class of the TemplateRuntimeError. --- salt/utils/templates.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/salt/utils/templates.py b/salt/utils/templates.py index ae2578ffcf..cf5f0d6439 100644 --- a/salt/utils/templates.py +++ b/salt/utils/templates.py @@ -382,16 +382,6 @@ def render_jinja_tmpl(tmplstr, context, tmplpath=None): template = jinja_env.from_string(tmplstr) template.globals.update(decoded_context) output = template.render(**decoded_context) - except (jinja2.exceptions.TemplateRuntimeError, - jinja2.exceptions.TemplateSyntaxError) as exc: - trace = traceback.extract_tb(sys.exc_info()[2]) - line, out = _get_jinja_error(trace, context=decoded_context) - if not line: - tmplstr = '' - raise SaltRenderError( - 'Jinja syntax error: {0}{1}'.format(exc, out), - line, - tmplstr) except jinja2.exceptions.UndefinedError as exc: trace = traceback.extract_tb(sys.exc_info()[2]) out = _get_jinja_error(trace, context=decoded_context)[1] @@ -402,6 +392,16 @@ def render_jinja_tmpl(tmplstr, context, tmplpath=None): 'Jinja variable {0}{1}'.format( exc, out), buf=tmplstr) + except (jinja2.exceptions.TemplateRuntimeError, + jinja2.exceptions.TemplateSyntaxError) as exc: + trace = traceback.extract_tb(sys.exc_info()[2]) + line, out = _get_jinja_error(trace, context=decoded_context) + if not line: + tmplstr = '' + raise SaltRenderError( + 'Jinja syntax error: {0}{1}'.format(exc, out), + line, + tmplstr) except (SaltInvocationError, CommandExecutionError) as exc: trace = traceback.extract_tb(sys.exc_info()[2]) line, out = _get_jinja_error(trace, context=decoded_context) From 0093472a37e4183631aabef14b05d9c380f8219b Mon Sep 17 00:00:00 2001 From: Damon Atkins Date: Thu, 8 Feb 2018 01:24:42 +1100 Subject: [PATCH 037/223] added tag_key_list and tag_key_sep to create ec2_tags_list --- salt/pillar/ec2_pillar.py | 40 +++++++++++++++++++++++++++++++-------- 1 file changed, 32 insertions(+), 8 deletions(-) diff --git a/salt/pillar/ec2_pillar.py b/salt/pillar/ec2_pillar.py index 8399f8f1e9..08f894cbce 100644 --- a/salt/pillar/ec2_pillar.py +++ b/salt/pillar/ec2_pillar.py @@ -1,17 +1,26 @@ #-*- coding: utf-8 -*- ''' -Retrieve EC2 instance data for minions. +Retrieve EC2 instance data for minions for ec2_tags and ec2_tags_list + +The minion id must be the AWS instance-id or value in 'tag_key'. +For example set 'tag_key' to 'Name', to have the minion-id matched against the +tag 'Name'. The tag contents must be unique. The value of tag_value can +be 'uqdn' or 'asis'. if 'uqdn' strips any domain before comparision. -The minion id must be the AWS instance-id or value in tag_key (default Name). -To use tag_key, need to set what standard is used for setting the tag. -The value of tag_value can be 'uqdn' or 'asis'. if uqdn strips any domain before -comparision. The option use_grain can be set to True. This allows the use of an instance-id grain instead of the minion-id. Since this is a potential security risk, the configuration can be further expanded to include a list of minions that are trusted to only allow the alternate id of the instances to specific hosts. There is no glob matching at -this time. Note: restart the salt-master for changes to take effect. +this time. + +The optional 'tag_list_key' indicates which keys should be added to +'ec2_tags_list' and be split by tag_list_sep (default `;`). If a tag key is +included in 'tag_list_key' it is removed from ec2_tags. If a tag does not +exist it is still included as an empty list. + + + Note: restart the salt-master for changes to take effect. .. code-block:: yaml @@ -20,6 +29,9 @@ this time. Note: restart the salt-master for changes to take effect. - ec2_pillar: tag_key: 'Name' tag_value: 'asis' + tag_list_key: + - Role + tag_list_sep: ';' use_grain: True minion_ids: - trusted-minion-1 @@ -87,7 +99,9 @@ def ext_pillar(minion_id, use_grain=False, minion_ids=None, tag_key=None, - tag_value='asis'): + tag_value='asis', + tag_list_key=None, + tag_list_sep=';'): ''' Execute a command and read the output as YAML ''' @@ -201,6 +215,16 @@ def ext_pillar(minion_id, return {} if instance.tags: + ec2_tags = instance.tags + ec2_tags_list = {} log.debug('External pillar {0}, for minion id \'{1}\', tags: {2}'.format(__name__, minion_id, instance.tags)) - return {'ec2_tags': instance.tags} + if tag_list_key and isinstance(tag_list_key, list): + for item in tag_list_key: + if item in ec2_tags: + ec2_tags_list[item] = ec2_tags[item].split(tag_list_sep) + del ec2_tags[item] # make sure its only in ec2_tags_list + else: + ec2_tags_list[item] = [] # always return a result + + return {'ec2_tags': ec2_tags, 'ec2_tags_list': ec2_tags_list} return {} From a0f7eec7e5949ed9de9764268990e5a9b2deb671 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Wed, 7 Feb 2018 01:22:27 -0600 Subject: [PATCH 038/223] Work around upstream pygit2 bug affecting unicode refs This adds a workaround for a bug fixed upstream on 2 Feb 2018, which caused a branch or tag containing a unicode character to raise a UnicodeDecodeError. Additionally, it changes how we handle version analysis in salt.utils.gitfs. We should be using the LooseVersion from salt.utils.versions instead of distutils.version. --- salt/utils/gitfs.py | 79 ++++++++++++--------- tests/integration/pillar/test_git_pillar.py | 44 ++++-------- 2 files changed, 59 insertions(+), 64 deletions(-) diff --git a/salt/utils/gitfs.py b/salt/utils/gitfs.py index 55413ecb0b..4fd7d71179 100644 --- a/salt/utils/gitfs.py +++ b/salt/utils/gitfs.py @@ -7,7 +7,6 @@ Classes which provide the shared base for GitFS, git_pillar, and winrepo from __future__ import absolute_import, print_function, unicode_literals import copy import contextlib -import distutils import errno import fnmatch import glob @@ -90,9 +89,9 @@ log = logging.getLogger(__name__) try: import git import gitdb - HAS_GITPYTHON = True + GITPYTHON_VERSION = _LooseVersion(git.__version__) except ImportError: - HAS_GITPYTHON = False + GITPYTHON_VERSION = None try: # Squelch warning on cent7 due to them upgrading cffi @@ -100,7 +99,31 @@ try: with warnings.catch_warnings(): warnings.simplefilter('ignore') import pygit2 - HAS_PYGIT2 = True + PYGIT2_VERSION = _LooseVersion(pygit2.__version__) + LIBGIT2_VERSION = _LooseVersion(pygit2.LIBGIT2_VERSION) + + # Work around upstream bug where bytestrings were being decoded using the + # default encoding (which is usually ascii on Python 2). This was fixed + # on 2 Feb 2018, so releases prior to 0.26.2 will need a workaround. + if PYGIT2_VERSION <= _LooseVersion('0.26.2'): + try: + import pygit2.ffi + import pygit2.remote + except ImportError: + # If we couldn't import these, then we're using an old enough + # version where ffi isn't in use and this workaround would be + # useless. + pass + else: + def __maybe_string(ptr): + if not ptr: + return None + return pygit2.ffi.string(ptr).decode('utf-8') + + pygit2.remote.maybe_string = __maybe_string + + # Older pygit2 releases did not raise a specific exception class, this + # try/except makes Salt's exception catching work on any supported release. try: GitError = pygit2.errors.GitError except AttributeError: @@ -111,16 +134,17 @@ except Exception as exc: # to rebuild itself against the newer cffi). Therefore, we simply will # catch a generic exception, and log the exception if it is anything other # than an ImportError. - HAS_PYGIT2 = False + PYGIT2_VERSION = None + LIBGIT2_VERSION = None if not isinstance(exc, ImportError): log.exception('Failed to import pygit2') # pylint: enable=import-error # Minimum versions for backend providers -GITPYTHON_MINVER = '0.3' -PYGIT2_MINVER = '0.20.3' -LIBGIT2_MINVER = '0.20.0' +GITPYTHON_MINVER = _LooseVersion('0.3') +PYGIT2_MINVER = _LooseVersion('0.20.3') +LIBGIT2_MINVER = _LooseVersion('0.20.0') def enforce_types(key, val): @@ -1841,10 +1865,7 @@ class Pygit2(GitProvider): ''' Assign attributes for pygit2 callbacks ''' - # pygit2 radically changed fetching in 0.23.2 - pygit2_version = pygit2.__version__ - if distutils.version.LooseVersion(pygit2_version) >= \ - distutils.version.LooseVersion('0.23.2'): + if PYGIT2_VERSION >= _LooseVersion('0.23.2'): self.remotecallbacks = pygit2.RemoteCallbacks( credentials=self.credentials) if not self.ssl_verify: @@ -1859,7 +1880,7 @@ class Pygit2(GitProvider): 'pygit2 does not support disabling the SSL certificate ' 'check in versions prior to 0.23.2 (installed: {0}). ' 'Fetches for self-signed certificates will fail.'.format( - pygit2_version + PYGIT2_VERSION ) ) @@ -2435,10 +2456,10 @@ class GitBase(object): Check if GitPython is available and at a compatible version (>= 0.3.0) ''' def _recommend(): - if HAS_PYGIT2 and 'pygit2' in self.git_providers: + if PYGIT2_VERSION and 'pygit2' in self.git_providers: log.error(_RECOMMEND_PYGIT2, self.role, self.role) - if not HAS_GITPYTHON: + if not GITPYTHON_VERSION: if not quiet: log.error( '%s is configured but could not be loaded, is GitPython ' @@ -2449,18 +2470,14 @@ class GitBase(object): elif 'gitpython' not in self.git_providers: return False - # pylint: disable=no-member - gitver = _LooseVersion(git.__version__) - minver = _LooseVersion(GITPYTHON_MINVER) - # pylint: enable=no-member errors = [] - if gitver < minver: + if GITPYTHON_VERSION < GITPYTHON_MINVER: errors.append( '{0} is configured, but the GitPython version is earlier than ' '{1}. Version {2} detected.'.format( self.role, GITPYTHON_MINVER, - git.__version__ + GITPYTHON_VERSION ) ) if not salt.utils.path.which('git'): @@ -2486,10 +2503,10 @@ class GitBase(object): Pygit2 must be at least 0.20.3 and libgit2 must be at least 0.20.0. ''' def _recommend(): - if HAS_GITPYTHON and 'gitpython' in self.git_providers: + if GITPYTHON_VERSION and 'gitpython' in self.git_providers: log.error(_RECOMMEND_GITPYTHON, self.role, self.role) - if not HAS_PYGIT2: + if not PYGIT2_VERSION: if not quiet: log.error( '%s is configured but could not be loaded, are pygit2 ' @@ -2500,31 +2517,23 @@ class GitBase(object): elif 'pygit2' not in self.git_providers: return False - # pylint: disable=no-member - pygit2ver = _LooseVersion(pygit2.__version__) - pygit2_minver = _LooseVersion(PYGIT2_MINVER) - - libgit2ver = _LooseVersion(pygit2.LIBGIT2_VERSION) - libgit2_minver = _LooseVersion(LIBGIT2_MINVER) - # pylint: enable=no-member - errors = [] - if pygit2ver < pygit2_minver: + if PYGIT2_VERSION < PYGIT2_MINVER: errors.append( '{0} is configured, but the pygit2 version is earlier than ' '{1}. Version {2} detected.'.format( self.role, PYGIT2_MINVER, - pygit2.__version__ + PYGIT2_VERSION ) ) - if libgit2ver < libgit2_minver: + if LIBGIT2_VERSION < LIBGIT2_MINVER: errors.append( '{0} is configured, but the libgit2 version is earlier than ' '{1}. Version {2} detected.'.format( self.role, LIBGIT2_MINVER, - pygit2.LIBGIT2_VERSION + LIBGIT2_VERSION ) ) if not salt.utils.path.which('git'): diff --git a/tests/integration/pillar/test_git_pillar.py b/tests/integration/pillar/test_git_pillar.py index 8dc6ce01b0..b0030471a1 100644 --- a/tests/integration/pillar/test_git_pillar.py +++ b/tests/integration/pillar/test_git_pillar.py @@ -87,24 +87,28 @@ from tests.support.unit import skipIf # Import Salt libs import salt.utils.path import salt.utils.platform -from salt.utils.gitfs import GITPYTHON_MINVER, PYGIT2_MINVER from salt.utils.versions import LooseVersion from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES as VIRTUALENV_NAMES from salt.ext.six.moves import range # pylint: disable=redefined-builtin +from salt.utils.gitfs import ( + GITPYTHON_VERSION, + GITPYTHON_MINVER, + PYGIT2_VERSION, + PYGIT2_MINVER, + LIBGIT2_VERSION, + LIBGIT2_MINVER +) # Check for requisite components try: - import git - HAS_GITPYTHON = \ - LooseVersion(git.__version__) >= LooseVersion(GITPYTHON_MINVER) + HAS_GITPYTHON = GITPYTHON_VERSION >= GITPYTHON_MINVER except ImportError: HAS_GITPYTHON = False try: - import pygit2 - HAS_PYGIT2 = \ - LooseVersion(pygit2.__version__) >= LooseVersion(PYGIT2_MINVER) -except ImportError: + HAS_PYGIT2 = PYGIT2_VERSION >= PYGIT2_MINVER \ + and LIBGIT2_VERSION >= LIBGIT2_MINVER +except AttributeError: HAS_PYGIT2 = False HAS_SSHD = bool(salt.utils.path.which('sshd')) @@ -419,7 +423,7 @@ class TestGitPythonAuthenticatedHTTP(TestGitPythonHTTP, GitPythonMixin): @skipIf(NO_MOCK, NO_MOCK_REASON) @skipIf(_windows_or_mac(), 'minion is windows or mac') @skip_if_not_root -@skipIf(not HAS_PYGIT2, 'pygit2 >= {0} required'.format(PYGIT2_MINVER)) +@skipIf(not HAS_PYGIT2, 'pygit2 >= {0} and libgit2 >= {1} required'.format(PYGIT2_MINVER, LIBGIT2_MINVER)) @skipIf(not HAS_SSHD, 'sshd not present') class TestPygit2SSH(GitPillarSSHTestBase): ''' @@ -433,12 +437,6 @@ class TestPygit2SSH(GitPillarSSHTestBase): username = USERNAME passphrase = PASSWORD - def setUp(self): - super(TestPygit2SSH, self).setUp() - if self.is_el7(): # pylint: disable=E1120 - self.skipTest( - 'skipped until EPEL7 fixes pygit2/libgit2 version mismatch') - @requires_system_grains def test_single_source(self, grains): ''' @@ -1199,19 +1197,13 @@ class TestPygit2SSH(GitPillarSSHTestBase): @skipIf(NO_MOCK, NO_MOCK_REASON) @skipIf(_windows_or_mac(), 'minion is windows or mac') @skip_if_not_root -@skipIf(not HAS_PYGIT2, 'pygit2 >= {0} required'.format(PYGIT2_MINVER)) +@skipIf(not HAS_PYGIT2, 'pygit2 >= {0} and libgit2 >= {1} required'.format(PYGIT2_MINVER, LIBGIT2_MINVER)) @skipIf(not HAS_NGINX, 'nginx not present') @skipIf(not HAS_VIRTUALENV, 'virtualenv not present') class TestPygit2HTTP(GitPillarHTTPTestBase): ''' Test git_pillar with pygit2 using SSH authentication ''' - def setUp(self): - super(TestPygit2HTTP, self).setUp() - if self.is_el7(): # pylint: disable=E1120 - self.skipTest( - 'skipped until EPEL7 fixes pygit2/libgit2 version mismatch') - def test_single_source(self): ''' Test using a single ext_pillar repo @@ -1452,7 +1444,7 @@ class TestPygit2HTTP(GitPillarHTTPTestBase): @skipIf(NO_MOCK, NO_MOCK_REASON) @skipIf(_windows_or_mac(), 'minion is windows or mac') @skip_if_not_root -@skipIf(not HAS_PYGIT2, 'pygit2 >= {0} required'.format(PYGIT2_MINVER)) +@skipIf(not HAS_PYGIT2, 'pygit2 >= {0} and libgit2 >= {1} required'.format(PYGIT2_MINVER, LIBGIT2_MINVER)) @skipIf(not HAS_NGINX, 'nginx not present') @skipIf(not HAS_VIRTUALENV, 'virtualenv not present') class TestPygit2AuthenticatedHTTP(GitPillarHTTPTestBase): @@ -1465,12 +1457,6 @@ class TestPygit2AuthenticatedHTTP(GitPillarHTTPTestBase): user = USERNAME password = PASSWORD - def setUp(self): - super(TestPygit2AuthenticatedHTTP, self).setUp() - if self.is_el7(): # pylint: disable=E1120 - self.skipTest( - 'skipped until EPEL7 fixes pygit2/libgit2 version mismatch') - def test_single_source(self): ''' Test using a single ext_pillar repo From fb05654b712a8d487082f4a66686e7afa4ce706f Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Wed, 7 Feb 2018 01:31:28 -0600 Subject: [PATCH 039/223] Add unit tests for new Oxygen gitfs features Also expand unit tests to test both GitPython and pygit2 --- tests/unit/fileserver/test_gitfs.py | 487 +++++++++++++++++++--------- 1 file changed, 338 insertions(+), 149 deletions(-) diff --git a/tests/unit/fileserver/test_gitfs.py b/tests/unit/fileserver/test_gitfs.py index cc8bdbac3b..b93071daab 100644 --- a/tests/unit/fileserver/test_gitfs.py +++ b/tests/unit/fileserver/test_gitfs.py @@ -1,10 +1,11 @@ # -*- coding: utf-8 -*- ''' - :codeauthor: :email:`Erik Johnson ` +:codeauthor: :email:`Erik Johnson ` ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals +import copy import errno import os import shutil @@ -18,15 +19,6 @@ try: except ImportError: pass -# Import 3rd-party libs -try: - import git # pylint: disable=unused-import - HAS_GITPYTHON = True - GITFS_AVAILABLE = True -except ImportError: - HAS_GITPYTHON = False - GITFS_AVAILABLE = False - # Import Salt Testing Libs from tests.support.mixins import LoaderModuleMockMixin from tests.support.unit import TestCase, skipIf @@ -36,10 +28,36 @@ from tests.support.paths import TMP, FILES # Import salt libs import salt.fileserver.gitfs as gitfs import salt.utils.files -import salt.utils.gitfs import salt.utils.platform import salt.utils.win_functions import salt.utils.yaml +from salt.utils.versions import LooseVersion as _LooseVersion + +import salt.utils.gitfs +from salt.utils.gitfs import ( + GITPYTHON_VERSION, + GITPYTHON_MINVER, + PYGIT2_VERSION, + PYGIT2_MINVER, + LIBGIT2_VERSION, + LIBGIT2_MINVER +) + +try: + import git + # We still need to use GitPython here for temp repo setup, so we do need to + # actually import it. But we don't need import pygit2 in this module, we + # can just use the LooseVersion instances imported along with + # salt.utils.gitfs to check if we have a compatible version. + HAS_GITPYTHON = GITPYTHON_VERSION >= GITPYTHON_MINVER +except (ImportError, AttributeError): + HAS_GITPYTHON = False + +try: + HAS_PYGIT2 = PYGIT2_VERSION >= PYGIT2_MINVER \ + and LIBGIT2_VERSION >= LIBGIT2_MINVER +except AttributeError: + HAS_PYGIT2 = False log = logging.getLogger(__name__) @@ -48,6 +66,38 @@ TMP_REPO_DIR = os.path.join(TMP, 'gitfs_root') INTEGRATION_BASE_FILES = os.path.join(FILES, 'file', 'base') UNICODE_FILENAME = 'питон.txt' UNICODE_DIRNAME = UNICODE_ENVNAME = 'Ñоль' +TAG_NAME = 'mytag' + +OPTS = { + 'sock_dir': TMP_SOCK_DIR, + 'gitfs_remotes': ['file://' + TMP_REPO_DIR], + 'gitfs_root': '', + 'fileserver_backend': ['gitfs'], + 'gitfs_base': 'master', + 'fileserver_events': True, + 'transport': 'zeromq', + 'gitfs_mountpoint': '', + 'gitfs_saltenv': [], + 'gitfs_env_whitelist': [], + 'gitfs_env_blacklist': [], + 'gitfs_saltenv_whitelist': [], + 'gitfs_saltenv_blacklist': [], + 'gitfs_user': '', + 'gitfs_password': '', + 'gitfs_insecure_auth': False, + 'gitfs_privkey': '', + 'gitfs_pubkey': '', + 'gitfs_passphrase': '', + 'gitfs_refspecs': [ + '+refs/heads/*:refs/remotes/origin/*', + '+refs/tags/*:refs/tags/*' + ], + 'gitfs_ssl_verify': True, + 'gitfs_disable_saltenv_mapping': False, + 'gitfs_ref_types': ['branch', 'tag', 'sha'], + 'gitfs_update_interval': 60, + '__role': 'master', +} def _rmtree_error(func, path, excinfo): @@ -55,43 +105,23 @@ def _rmtree_error(func, path, excinfo): func(path) -@skipIf(not HAS_GITPYTHON, 'GitPython is not installed') +def _clear_instance_map(): + try: + del salt.utils.gitfs.GitFS.instance_map[tornado.ioloop.IOLoop.current()] + except KeyError: + pass + + +@skipIf(not HAS_GITPYTHON, 'GitPython >= {0} required'.format(GITPYTHON_MINVER)) class GitfsConfigTestCase(TestCase, LoaderModuleMockMixin): def setup_loader_modules(self): - self.tmp_cachedir = tempfile.mkdtemp(dir=TMP) + opts = copy.deepcopy(OPTS) + opts['cachedir'] = self.tmp_cachedir + opts['sock_dir'] = self.tmp_sock_dir return { gitfs: { - '__opts__': { - 'cachedir': self.tmp_cachedir, - 'sock_dir': TMP_SOCK_DIR, - 'gitfs_root': 'salt', - 'fileserver_backend': ['gitfs'], - 'gitfs_base': 'master', - 'fileserver_events': True, - 'transport': 'zeromq', - 'gitfs_mountpoint': '', - 'gitfs_saltenv': [], - 'gitfs_env_whitelist': [], - 'gitfs_env_blacklist': [], - 'gitfs_saltenv_whitelist': [], - 'gitfs_saltenv_blacklist': [], - 'gitfs_user': '', - 'gitfs_password': '', - 'gitfs_insecure_auth': False, - 'gitfs_privkey': '', - 'gitfs_pubkey': '', - 'gitfs_passphrase': '', - 'gitfs_refspecs': [ - '+refs/heads/*:refs/remotes/origin/*', - '+refs/tags/*:refs/tags/*' - ], - 'gitfs_ssl_verify': True, - 'gitfs_disable_saltenv_mapping': False, - 'gitfs_ref_types': ['branch', 'tag', 'sha'], - 'gitfs_update_interval': 60, - '__role': 'master', - } + '__opts__': opts, } } @@ -99,16 +129,27 @@ class GitfsConfigTestCase(TestCase, LoaderModuleMockMixin): def setUpClass(cls): # Clear the instance map so that we make sure to create a new instance # for this test class. - try: - del salt.utils.gitfs.GitFS.instance_map[tornado.ioloop.IOLoop.current()] - except KeyError: - pass + _clear_instance_map() + cls.tmp_cachedir = tempfile.mkdtemp(dir=TMP) + cls.tmp_sock_dir = tempfile.mkdtemp(dir=TMP) - def tearDown(self): - shutil.rmtree(self.tmp_cachedir) + @classmethod + def tearDownClass(cls): + ''' + Remove the temporary git repository and gitfs cache directory to ensure + a clean environment for the other test class(es). + ''' + for path in (cls.tmp_cachedir, cls.tmp_sock_dir): + try: + shutil.rmtree(path, onerror=_rmtree_error) + except OSError as exc: + if exc.errno != errno.EEXIST: + raise def test_per_saltenv_config(self): opts_override = textwrap.dedent(''' + gitfs_root: salt + gitfs_saltenv: - baz: # when loaded, the "salt://" prefix will be removed @@ -186,110 +227,27 @@ class GitfsConfigTestCase(TestCase, LoaderModuleMockMixin): LOAD = {'saltenv': 'base'} -@skipIf(not GITFS_AVAILABLE, "GitFS could not be loaded. Skipping GitFS tests!") -@skipIf(NO_MOCK, NO_MOCK_REASON) -class GitFSTest(TestCase, LoaderModuleMockMixin): +class GitFSTestFuncs(object): + ''' + These are where the tests go, so that they can be run using both GitPython + and pygit2. - def setup_loader_modules(self): - self.tmp_cachedir = tempfile.mkdtemp(dir=TMP) - return { - gitfs: { - '__opts__': { - 'cachedir': self.tmp_cachedir, - 'sock_dir': TMP_SOCK_DIR, - 'gitfs_remotes': ['file://' + TMP_REPO_DIR], - 'gitfs_root': '', - 'fileserver_backend': ['gitfs'], - 'gitfs_base': 'master', - 'fileserver_events': True, - 'transport': 'zeromq', - 'gitfs_mountpoint': '', - 'gitfs_saltenv': [], - 'gitfs_env_whitelist': [], - 'gitfs_env_blacklist': [], - 'gitfs_saltenv_whitelist': [], - 'gitfs_saltenv_blacklist': [], - 'gitfs_user': '', - 'gitfs_password': '', - 'gitfs_insecure_auth': False, - 'gitfs_privkey': '', - 'gitfs_pubkey': '', - 'gitfs_passphrase': '', - 'gitfs_refspecs': [ - '+refs/heads/*:refs/remotes/origin/*', - '+refs/tags/*:refs/tags/*' - ], - 'gitfs_ssl_verify': True, - 'gitfs_disable_saltenv_mapping': False, - 'gitfs_ref_types': ['branch', 'tag', 'sha'], - 'gitfs_update_interval': 60, - '__role': 'master', - } - } - } + NOTE: The gitfs.update() has to happen AFTER the setUp is called. This is + because running it inside the setUp will spawn a new singleton, which means + that tests which need to mock the __opts__ will be too late; the setUp will + have created a new singleton that will bypass our mocking. To ensure that + our tests are reliable and correct, we want to make sure that each test + uses a new gitfs object, allowing different manipulations of the opts to be + tested. - @classmethod - def setUpClass(cls): - # Clear the instance map so that we make sure to create a new instance - # for this test class. - try: - del salt.utils.gitfs.GitFS.instance_map[tornado.ioloop.IOLoop.current()] - except KeyError: - pass - - # Create the dir if it doesn't already exist - try: - shutil.copytree(INTEGRATION_BASE_FILES, TMP_REPO_DIR + '/') - except OSError: - # We probably caught an error because files already exist. Ignore - pass - - try: - repo = git.Repo(TMP_REPO_DIR) - except git.exc.InvalidGitRepositoryError: - repo = git.Repo.init(TMP_REPO_DIR) - - if 'USERNAME' not in os.environ: - try: - if salt.utils.platform.is_windows(): - os.environ['USERNAME'] = salt.utils.win_functions.get_current_user() - else: - os.environ['USERNAME'] = pwd.getpwuid(os.geteuid()).pw_name - except AttributeError: - log.error('Unable to get effective username, falling back to ' - '\'root\'.') - os.environ['USERNAME'] = 'root' - - repo.index.add([x for x in os.listdir(TMP_REPO_DIR) - if x != '.git']) - repo.index.commit('Test') - - # Add another branch with unicode characters in the name - repo.create_head(UNICODE_ENVNAME, 'HEAD') - - def setUp(self): - ''' - We don't want to check in another .git dir into GH because that just - gets messy. Instead, we'll create a temporary repo on the fly for the - tests to examine. - ''' - if not gitfs.__virtual__(): - self.skipTest("GitFS could not be loaded. Skipping GitFS tests!") - self.tmp_cachedir = tempfile.mkdtemp(dir=TMP) - gitfs.update() - - def tearDown(self): - ''' - Remove the temporary git repository and gitfs cache directory to ensure - a clean environment for each test. - ''' - try: - shutil.rmtree(self.tmp_cachedir, onerror=_rmtree_error) - except OSError as exc: - if exc.errno != errno.EEXIST: - raise + Therefore, keep the following in mind: + 1. Each test needs to call gitfs.update() *after* any patching, and + *before* calling the function being tested. + 2. Do *NOT* move the gitfs.update() into the setUp. + ''' def test_file_list(self): + gitfs.update() ret = gitfs.file_list(LOAD) self.assertIn('testfile', ret) self.assertIn(UNICODE_FILENAME, ret) @@ -298,11 +256,242 @@ class GitFSTest(TestCase, LoaderModuleMockMixin): self.assertIn('/'.join((UNICODE_DIRNAME, 'foo.txt')), ret) def test_dir_list(self): + gitfs.update() ret = gitfs.dir_list(LOAD) self.assertIn('grail', ret) self.assertIn(UNICODE_DIRNAME, ret) def test_envs(self): + gitfs.update() ret = gitfs.envs(ignore_cache=True) self.assertIn('base', ret) self.assertIn(UNICODE_ENVNAME, ret) + self.assertIn(TAG_NAME, ret) + + def test_ref_types_global(self): + ''' + Test the global gitfs_ref_types config option + ''' + with patch.dict(gitfs.__opts__, {'gitfs_ref_types': ['branch']}): + gitfs.update() + ret = gitfs.envs(ignore_cache=True) + # Since we are restricting to branches only, the tag should not + # appear in the envs list. + self.assertIn('base', ret) + self.assertIn(UNICODE_ENVNAME, ret) + self.assertNotIn(TAG_NAME, ret) + + def test_ref_types_per_remote(self): + ''' + Test the per_remote ref_types config option, using a different + ref_types setting than the global test. + ''' + remotes = [{'file://' + TMP_REPO_DIR: [{'ref_types': ['tag']}]}] + with patch.dict(gitfs.__opts__, {'gitfs_remotes': remotes}): + gitfs.update() + ret = gitfs.envs(ignore_cache=True) + # Since we are restricting to tags only, the tag should appear in + # the envs list, but the branches should not. + self.assertNotIn('base', ret) + self.assertNotIn(UNICODE_ENVNAME, ret) + self.assertIn(TAG_NAME, ret) + + def test_disable_saltenv_mapping_global_with_mapping_defined_globally(self): + ''' + Test the global gitfs_disable_saltenv_mapping config option, combined + with the per-saltenv mapping being defined in the global gitfs_saltenv + option. + ''' + opts = salt.utils.yaml.safe_load(textwrap.dedent('''\ + gitfs_disable_saltenv_mapping: True + gitfs_saltenv: + - foo: + - ref: base + ''')) + with patch.dict(gitfs.__opts__, opts): + gitfs.update() + ret = gitfs.envs(ignore_cache=True) + # Since we are restricting to tags only, the tag should appear in + # the envs list, but the branches should not. + self.assertEqual(ret, ['foo']) + + def test_disable_saltenv_mapping_global_with_mapping_defined_per_remote(self): + ''' + Test the global gitfs_disable_saltenv_mapping config option, combined + with the per-saltenv mapping being defined in the remote itself via the + "saltenv" per-remote option. + ''' + opts = salt.utils.yaml.safe_load(textwrap.dedent('''\ + gitfs_disable_saltenv_mapping: True + gitfs_remotes: + - file://{0}: + - saltenv: + - bar: + - ref: base + '''.format(TMP_REPO_DIR))) + with patch.dict(gitfs.__opts__, opts): + gitfs.update() + ret = gitfs.envs(ignore_cache=True) + # Since we are restricting to tags only, the tag should appear in + # the envs list, but the branches should not. + self.assertEqual(ret, ['bar']) + + def test_disable_saltenv_mapping_per_remote_with_mapping_defined_globally(self): + ''' + Test the per-remote disable_saltenv_mapping config option, combined + with the per-saltenv mapping being defined in the global gitfs_saltenv + option. + ''' + opts = salt.utils.yaml.safe_load(textwrap.dedent('''\ + gitfs_remotes: + - file://{0}: + - disable_saltenv_mapping: True + + gitfs_saltenv: + - hello: + - ref: base + ''')) + with patch.dict(gitfs.__opts__, opts): + gitfs.update() + ret = gitfs.envs(ignore_cache=True) + # Since we are restricting to tags only, the tag should appear in + # the envs list, but the branches should not. + self.assertEqual(ret, ['hello']) + + def test_disable_saltenv_mapping_per_remote_with_mapping_defined_per_remote(self): + ''' + Test the per-remote disable_saltenv_mapping config option, combined + with the per-saltenv mapping being defined in the remote itself via the + "saltenv" per-remote option. + ''' + opts = salt.utils.yaml.safe_load(textwrap.dedent('''\ + gitfs_remotes: + - file://{0}: + - disable_saltenv_mapping: True + - saltenv: + - world: + - ref: base + '''.format(TMP_REPO_DIR))) + with patch.dict(gitfs.__opts__, opts): + gitfs.update() + ret = gitfs.envs(ignore_cache=True) + # Since we are restricting to tags only, the tag should appear in + # the envs list, but the branches should not. + self.assertEqual(ret, ['world']) + + +class GitFSTestBase(object): + + @classmethod + def setUpClass(cls): + cls.tmp_cachedir = tempfile.mkdtemp(dir=TMP) + cls.tmp_sock_dir = tempfile.mkdtemp(dir=TMP) + + try: + shutil.rmtree(TMP_REPO_DIR) + except OSError as exc: + if exc.errno != errno.ENOENT: + raise + shutil.copytree(INTEGRATION_BASE_FILES, TMP_REPO_DIR + '/') + + repo = git.Repo.init(TMP_REPO_DIR) + + username_key = str('USERNAME') + orig_username = os.environ.get(username_key) + try: + if username_key not in os.environ: + try: + if salt.utils.platform.is_windows(): + os.environ[username_key] = \ + salt.utils.win_functions.get_current_user() + else: + os.environ[username_key] = \ + pwd.getpwuid(os.geteuid()).pw_name + except AttributeError: + log.error( + 'Unable to get effective username, falling back to ' + '\'root\'.' + ) + os.environ[username_key] = str('root') + + repo.index.add([x for x in os.listdir(TMP_REPO_DIR) + if x != '.git']) + repo.index.commit('Test') + + # Add another branch with unicode characters in the name + repo.create_head(UNICODE_ENVNAME, 'HEAD') + + # Add a tag + repo.create_tag(TAG_NAME, 'HEAD') + finally: + if orig_username is not None: + os.environ[username_key] = orig_username + else: + os.environ.pop(username_key, None) + + @classmethod + def tearDownClass(cls): + ''' + Remove the temporary git repository and gitfs cache directory to ensure + a clean environment for the other test class(es). + ''' + for path in (cls.tmp_cachedir, cls.tmp_sock_dir, TMP_REPO_DIR): + try: + shutil.rmtree(path, onerror=_rmtree_error) + except OSError as exc: + if exc.errno != errno.EEXIST: + raise + + def setUp(self): + ''' + We don't want to check in another .git dir into GH because that just + gets messy. Instead, we'll create a temporary repo on the fly for the + tests to examine. + + Also ensure we A) don't re-use the singleton, and B) that the cachedirs + are cleared. This keeps these performance enhancements from affecting + the results of subsequent tests. + ''' + if not gitfs.__virtual__(): + self.skipTest("GitFS could not be loaded. Skipping GitFS tests!") + + _clear_instance_map() + for subdir in ('gitfs', 'file_lists'): + try: + shutil.rmtree(os.path.join(self.tmp_cachedir, subdir)) + except OSError as exc: + if exc.errno != errno.ENOENT: + raise + + +@skipIf(not HAS_GITPYTHON, 'GitPython >= {0} required'.format(GITPYTHON_MINVER)) +@skipIf(NO_MOCK, NO_MOCK_REASON) +class GitPythonTest(GitFSTestBase, GitFSTestFuncs, TestCase, LoaderModuleMockMixin): + + def setup_loader_modules(self): + opts = copy.deepcopy(OPTS) + opts['cachedir'] = self.tmp_cachedir + opts['sock_dir'] = self.tmp_sock_dir + opts['gitfs_provider'] = 'gitpython' + return { + gitfs: { + '__opts__': opts, + } + } + + +@skipIf(not HAS_GITPYTHON, 'GitPython >= {0} required for temp repo setup'.format(GITPYTHON_MINVER)) +@skipIf(not HAS_PYGIT2, 'pygit2 >= {0} and libgit2 >= {1} required'.format(PYGIT2_MINVER, LIBGIT2_MINVER)) +@skipIf(NO_MOCK, NO_MOCK_REASON) +class Pygit2Test(GitFSTestBase, GitFSTestFuncs, TestCase, LoaderModuleMockMixin): + + def setup_loader_modules(self): + opts = copy.deepcopy(OPTS) + opts['cachedir'] = self.tmp_cachedir + opts['sock_dir'] = self.tmp_sock_dir + opts['gitfs_provider'] = 'pygit2' + return { + gitfs: { + '__opts__': opts, + } + } From 6cc5cd9b8a7b573a50bd550e05c5c87fe62b8501 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Wed, 7 Feb 2018 12:46:26 -0600 Subject: [PATCH 040/223] Check the effective saltenv for cached archive This fixes a regression caused by using a saltenv other than `base`. --- salt/states/archive.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/states/archive.py b/salt/states/archive.py index 025ccf43bf..ba8c94031c 100644 --- a/salt/states/archive.py +++ b/salt/states/archive.py @@ -975,7 +975,7 @@ def extracted(name, if result['result']: # Get the path of the file in the minion cache - cached = __salt__['cp.is_cached'](source_match) + cached = __salt__['cp.is_cached'](source_match, saltenv=__env__) else: log.debug( 'failed to download %s', From 1468f1d0ff31649a585cf34fae259f3b37e5d04f Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Wed, 7 Feb 2018 13:12:30 -0600 Subject: [PATCH 041/223] Remove duplicated section in docstring and fix example --- salt/states/file.py | 32 +------------------------------- 1 file changed, 1 insertion(+), 31 deletions(-) diff --git a/salt/states/file.py b/salt/states/file.py index 04893a716d..6c11dc3b0a 100644 --- a/salt/states/file.py +++ b/salt/states/file.py @@ -6535,37 +6535,7 @@ def cached(name, .. code-block:: python - cached = __salt__['cp.is_cached'](source_match) - - This function will return the cached path of the file, or an empty string - if the file is not present in the minion cache. - - This state will in most cases not be useful in SLS files, but it is useful - when writing a state or remote-execution module that needs to make sure - that a file at a given URL has been downloaded to the cachedir. One example - of this is in the :py:func:`archive.extracted ` - state: - - .. code-block:: python - - result = __states__['file.cached'](source_match, - source_hash=source_hash, - source_hash_name=source_hash_name, - skip_verify=skip_verify, - saltenv=__env__) - - This will return a dictionary containing the state's return data, including - a ``result`` key which will state whether or not the state was successful. - Note that this will not catch exceptions, so it is best used within a - try/except. - - Once this state has been run from within another state or remote-execution - module, the actual location of the cached file can be obtained using - :py:func:`cp.is_cached `: - - .. code-block:: python - - cached = __salt__['cp.is_cached'](source_match) + cached = __salt__['cp.is_cached'](source_match, saltenv=__env__) This function will return the cached path of the file, or an empty string if the file is not present in the minion cache. From 80c56cdceaa55671b7704e1e2c2ff245d3dbb1b9 Mon Sep 17 00:00:00 2001 From: Carsten Brandt Date: Tue, 2 May 2017 23:37:31 +0200 Subject: [PATCH 042/223] Fixed typo in pkg state documentation A `:` is missing for the yaml to be valid. This was introduced by f4abf7f03a64c30fc179957f74d5308d1b342dda, which added sub items but missed changing this line too. --- salt/states/pkg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/states/pkg.py b/salt/states/pkg.py index afe0774a82..60eec6b55f 100644 --- a/salt/states/pkg.py +++ b/salt/states/pkg.py @@ -39,7 +39,7 @@ A more involved example involves pulling from a custom repository. - keyserver: keyserver.ubuntu.com logstash: - pkg.installed + pkg.installed: - fromrepo: ppa:wolfnet/logstash Multiple packages can also be installed with the use of the pkgs From 9bd94e0d17a2bc5857b8e17a2e8791063f5c0a16 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Fri, 2 Feb 2018 09:27:55 -0800 Subject: [PATCH 043/223] Track the next scheduled fire time and when jobs are skipped. --- salt/utils/schedule.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/salt/utils/schedule.py b/salt/utils/schedule.py index c8b1924ab2..faa43cb176 100644 --- a/salt/utils/schedule.py +++ b/salt/utils/schedule.py @@ -922,6 +922,8 @@ class Schedule(object): if interval < self.loop_interval: self.loop_interval = interval + data['_next_scheduled_fire_time'] = now + data['_seconds'] + elif 'once' in data: if data['_next_fire_time'] and \ data['_next_fire_time'] < now - self.opts['loop_interval'] and \ @@ -937,6 +939,8 @@ class Schedule(object): once_fmt) data['_next_fire_time'] = int( time.mktime(once.timetuple())) + data['_next_scheduled_fire_time'] = int( + time.mktime(once.timetuple())) except (TypeError, ValueError): log.error('Date string could not be parsed: %s, %s', data['once'], once_fmt) @@ -1018,6 +1022,8 @@ class Schedule(object): if not data['_next_fire_time']: data['_next_fire_time'] = when + data['_next_scheduled_fire_time'] = when + if data['_next_fire_time'] < when and \ not run and \ not data['_run']: @@ -1073,6 +1079,8 @@ class Schedule(object): if not data['_next_fire_time']: data['_next_fire_time'] = when + data['_next_scheduled_fire_time'] = when + if data['_next_fire_time'] < when and \ not data['_run']: data['_next_fire_time'] = when @@ -1089,6 +1097,8 @@ class Schedule(object): try: data['_next_fire_time'] = int( croniter.croniter(data['cron'], now).get_next()) + data['_next_scheduled_fire_time'] = int( + croniter.croniter(data['cron'], now).get_next()) except (ValueError, KeyError): log.error('Invalid cron string. Ignoring') continue @@ -1247,6 +1257,8 @@ class Schedule(object): func = self.skip_function else: run = False + data['_skipped_time'] = now + data['_skipped'] = True else: run = True else: @@ -1282,6 +1294,8 @@ class Schedule(object): func = self.skip_function else: run = False + data['_skipped_time'] = now + data['_skipped'] = True else: run = True From b23e5a7db22da482ee7b67d937a1874279a7ee83 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Wed, 7 Feb 2018 09:47:08 -0800 Subject: [PATCH 044/223] Updating utils/schedule.py to be able to track when jobs are skipped because of the max_running parameter. This change moves that check out of handle_func and moves it to it's own function. Also updated skip tests to include a test that _skip_reason is included in the results when a job is skipped. --- salt/utils/schedule.py | 111 +++++++++++++---------- tests/integration/scheduler/test_skip.py | 3 + 2 files changed, 68 insertions(+), 46 deletions(-) diff --git a/salt/utils/schedule.py b/salt/utils/schedule.py index faa43cb176..6e2ec37e43 100644 --- a/salt/utils/schedule.py +++ b/salt/utils/schedule.py @@ -151,6 +151,44 @@ class Schedule(object): schedule.update(opts_schedule) return schedule + def _check_max_running(self, func, data, opts): + ''' + Return the schedule data structure + ''' + # Check to see if there are other jobs with this + # signature running. If there are more than maxrunning + # jobs present then don't start another. + # If jid_include is False for this job we can ignore all this + # NOTE--jid_include defaults to True, thus if it is missing from the data + # dict we treat it like it was there and is True + data['run'] = True + if 'jid_include' not in data or data['jid_include']: + jobcount = 0 + for job in salt.utils.minion.running(self.opts): + if 'schedule' in job: + log.debug( + 'schedule.handle_func: Checking job against fun ' + '%s: %s', func, job + ) + if data['name'] == job['schedule'] \ + and salt.utils.process.os_is_running(job['pid']): + jobcount += 1 + log.debug( + 'schedule.handle_func: Incrementing jobcount, ' + 'now %s, maxrunning is %s', + jobcount, data['maxrunning'] + ) + if jobcount >= data['maxrunning']: + log.debug( + 'schedule.handle_func: The scheduled job ' + '%s was not started, %s already running', + data['name'], data['maxrunning'] + ) + data['_skip_reason'] = 'maxrunning' + data['run'] = False + return data + return data + def persist(self): ''' Persist the modified schedule into <>/<>/_schedule.conf @@ -350,22 +388,27 @@ class Schedule(object): data['name'] = name log.info('Running Job: %s', name) - multiprocessing_enabled = self.opts.get('multiprocessing', True) - if multiprocessing_enabled: - thread_cls = salt.utils.process.SignalHandlingMultiprocessingProcess - else: - thread_cls = threading.Thread + if not self.standalone: + data = self._check_max_running(func, data, self.opts) - if multiprocessing_enabled: - with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM): + run = data['run'] + if run: + multiprocessing_enabled = self.opts.get('multiprocessing', True) + if multiprocessing_enabled: + thread_cls = salt.utils.process.SignalHandlingMultiprocessingProcess + else: + thread_cls = threading.Thread + + if multiprocessing_enabled: + with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM): + proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, func, data)) + # Reset current signals before starting the process in + # order not to inherit the current signal handlers + proc.start() + proc.join() + else: proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, func, data)) - # Reset current signals before starting the process in - # order not to inherit the current signal handlers proc.start() - proc.join() - else: - proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, func, data)) - proc.start() def enable_schedule(self): ''' @@ -538,36 +581,6 @@ class Schedule(object): ret['jid'] ) - # Check to see if there are other jobs with this - # signature running. If there are more than maxrunning - # jobs present then don't start another. - # If jid_include is False for this job we can ignore all this - # NOTE--jid_include defaults to True, thus if it is missing from the data - # dict we treat it like it was there and is True - if 'jid_include' not in data or data['jid_include']: - jobcount = 0 - for job in salt.utils.minion.running(self.opts): - if 'schedule' in job: - log.debug( - 'schedule.handle_func: Checking job against fun ' - '%s: %s', ret['fun'], job - ) - if ret['schedule'] == job['schedule'] \ - and salt.utils.process.os_is_running(job['pid']): - jobcount += 1 - log.debug( - 'schedule.handle_func: Incrementing jobcount, ' - 'now %s, maxrunning is %s', - jobcount, data['maxrunning'] - ) - if jobcount >= data['maxrunning']: - log.debug( - 'schedule.handle_func: The scheduled job ' - '%s was not started, %s already running', - ret['schedule'], data['maxrunning'] - ) - return False - if multiprocessing_enabled and not salt.utils.platform.is_windows(): # Reconfigure multiprocessing logging after daemonizing log_setup.setup_multiprocessing_logging() @@ -786,6 +799,11 @@ class Schedule(object): 'skip_function', 'skip_during_range'] for job, data in six.iteritems(schedule): + + # Clear out _skip_reason from previous runs + if '_skip_reason' in data: + del data['_skip_reason'] + run = False if job in _hidden and not data: @@ -797,9 +815,6 @@ class Schedule(object): job, type(data) ) continue - # Job is disabled, continue - if 'enabled' in data and not data['enabled']: - continue if 'function' in data: func = data['function'] elif 'func' in data: @@ -1182,6 +1197,7 @@ class Schedule(object): if now <= start or now >= end: run = True else: + data['_skip_reason'] = 'in_skip_range' run = False else: if start <= now <= end: @@ -1191,6 +1207,7 @@ class Schedule(object): run = True func = self.skip_function else: + data['_skip_reason'] = 'not_in_range' run = False else: log.error( @@ -1257,6 +1274,7 @@ class Schedule(object): func = self.skip_function else: run = False + data['_skip_reason'] = 'in_skip_range' data['_skipped_time'] = now data['_skipped'] = True else: @@ -1294,6 +1312,7 @@ class Schedule(object): func = self.skip_function else: run = False + data['_skip_reason'] = 'skip_explicit' data['_skipped_time'] = now data['_skipped'] = True else: diff --git a/tests/integration/scheduler/test_skip.py b/tests/integration/scheduler/test_skip.py index 228b26be17..f26e18651b 100644 --- a/tests/integration/scheduler/test_skip.py +++ b/tests/integration/scheduler/test_skip.py @@ -99,6 +99,7 @@ class SchedulerSkipTest(ModuleCase, SaltReturnAssertsMixin): self.schedule.eval(now=run_time) ret = self.schedule.job_status('job1') self.assertNotIn('_last_run', ret) + self.assertEqual(ret['_skip_reason'], 'in_skip_range') # eval at 3:30pm, will run. run_time = int(time.mktime(dateutil_parser.parse('11/29/2017 3:30pm').timetuple())) @@ -131,6 +132,7 @@ class SchedulerSkipTest(ModuleCase, SaltReturnAssertsMixin): self.schedule.eval(now=run_time) ret = self.schedule.job_status('job1') self.assertNotIn('_last_run', ret) + self.assertEqual(ret['_skip_reason'], 'in_skip_range') # eval at 3:30pm, will run. run_time = int(time.mktime(dateutil_parser.parse('11/29/2017 3:30pm').timetuple())) @@ -164,6 +166,7 @@ class SchedulerSkipTest(ModuleCase, SaltReturnAssertsMixin): self.schedule.eval(now=run_time) ret = self.schedule.job_status('job1') self.assertNotIn('_last_run', ret) + self.assertEqual(ret['_skip_reason'], 'in_skip_range') # eval at 3:00:01pm, will run. run_time = int(time.mktime(dateutil_parser.parse('11/29/2017 3:00:01pm').timetuple())) From 09d960c0cbd6558cfc28078444cf13c0239581dc Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Wed, 7 Feb 2018 11:35:30 -0800 Subject: [PATCH 045/223] Updating utils/schedule.py to be able to track when jobs are skipped because of the max_running parameter. This change moves that check out of handle_func and moves it to it's own function. Also updated skip tests to include a test that _skip_reason is included in the results when a job is skipped. --- salt/utils/schedule.py | 39 +++++++++++++------- tests/integration/scheduler/test_eval.py | 3 ++ tests/integration/scheduler/test_postpone.py | 8 +++- tests/integration/scheduler/test_skip.py | 26 +++++++++++-- 4 files changed, 57 insertions(+), 19 deletions(-) diff --git a/salt/utils/schedule.py b/salt/utils/schedule.py index 6e2ec37e43..0df058094a 100644 --- a/salt/utils/schedule.py +++ b/salt/utils/schedule.py @@ -806,7 +806,7 @@ class Schedule(object): run = False - if job in _hidden and not data: + if job in _hidden: continue if not isinstance(data, dict): @@ -1355,22 +1355,33 @@ class Schedule(object): returners = self.returners self.returners = {} try: - if multiprocessing_enabled: - thread_cls = salt.utils.process.SignalHandlingMultiprocessingProcess + # Job is disabled, continue + if 'enabled' in data and not data['enabled']: + log.debug('Job: %s is disabled', job) + data['_skip_reason'] = 'disabled' + continue else: - thread_cls = threading.Thread - proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, func, data)) + if not self.standalone: + data = self._check_max_running(func, data, self.opts) - if multiprocessing_enabled: - with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM): - # Reset current signals before starting the process in - # order not to inherit the current signal handlers - proc.start() - else: - proc.start() + run = data['run'] + if run: + if multiprocessing_enabled: + thread_cls = salt.utils.process.SignalHandlingMultiprocessingProcess + else: + thread_cls = threading.Thread + proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, func, data)) - if multiprocessing_enabled: - proc.join() + if multiprocessing_enabled: + with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM): + # Reset current signals before starting the process in + # order not to inherit the current signal handlers + proc.start() + else: + proc.start() + + if multiprocessing_enabled: + proc.join() finally: if '_seconds' in data: data['_next_fire_time'] = now + data['_seconds'] diff --git a/tests/integration/scheduler/test_eval.py b/tests/integration/scheduler/test_eval.py index e5163d52be..1af001948c 100644 --- a/tests/integration/scheduler/test_eval.py +++ b/tests/integration/scheduler/test_eval.py @@ -52,6 +52,9 @@ class SchedulerEvalTest(ModuleCase, SaltReturnAssertsMixin): self.schedule = salt.utils.schedule.Schedule(copy.deepcopy(DEFAULT_CONFIG), functions, returners={}) self.schedule.opts['loop_interval'] = 1 + def tearDown(self): + del self.schedule + def test_eval(self): ''' verify that scheduled job runs diff --git a/tests/integration/scheduler/test_postpone.py b/tests/integration/scheduler/test_postpone.py index 76c59a4944..3191e97770 100644 --- a/tests/integration/scheduler/test_postpone.py +++ b/tests/integration/scheduler/test_postpone.py @@ -5,6 +5,9 @@ from __future__ import absolute_import import copy import logging import os +import time + +import dateutil.parser as dateutil_parser # Import Salt Testing libs from tests.support.case import ModuleCase @@ -41,6 +44,9 @@ class SchedulerPostponeTest(ModuleCase, SaltReturnAssertsMixin): self.schedule = salt.utils.schedule.Schedule(copy.deepcopy(DEFAULT_CONFIG), functions, returners={}) self.schedule.opts['loop_interval'] = 1 + def tearDown(self): + del self.schedule + def test_postpone(self): ''' verify that scheduled job is postponed until the specified time. @@ -55,7 +61,7 @@ class SchedulerPostponeTest(ModuleCase, SaltReturnAssertsMixin): } # 11/29/2017 4pm - run_time = 1512000000 + run_time = int(time.mktime(dateutil_parser.parse('11/29/2017 4:00pm').timetuple())) # 5 minute delay delay = 300 diff --git a/tests/integration/scheduler/test_skip.py b/tests/integration/scheduler/test_skip.py index f26e18651b..94b044887d 100644 --- a/tests/integration/scheduler/test_skip.py +++ b/tests/integration/scheduler/test_skip.py @@ -44,6 +44,9 @@ class SchedulerSkipTest(ModuleCase, SaltReturnAssertsMixin): self.schedule = salt.utils.schedule.Schedule(copy.deepcopy(DEFAULT_CONFIG), functions, returners={}) self.schedule.opts['loop_interval'] = 1 + def tearDown(self): + del self.schedule + def test_skip(self): ''' verify that scheduled job is skipped at the specified time @@ -67,6 +70,8 @@ class SchedulerSkipTest(ModuleCase, SaltReturnAssertsMixin): self.schedule.eval(now=run_time) ret = self.schedule.job_status('job1') self.assertNotIn('_last_run', ret) + self.assertEqual(ret['_skip_reason'], 'skip_explicit') + self.assertEqual(ret['_skipped_time'], run_time) # Run 11/29/2017 at 5pm run_time = int(time.mktime(dateutil_parser.parse('11/29/2017 5:00pm').timetuple())) @@ -84,8 +89,8 @@ class SchedulerSkipTest(ModuleCase, SaltReturnAssertsMixin): 'function': 'test.ping', 'hours': '1', 'skip_during_range': { - 'start': '2pm', - 'end': '3pm' + 'start': '11/29/2017 2pm', + 'end': '11/29/2017 3pm' } } } @@ -94,12 +99,18 @@ class SchedulerSkipTest(ModuleCase, SaltReturnAssertsMixin): # Add job to schedule self.schedule.opts.update(job) + # eval at 1:30pm to prime. + run_time = int(time.mktime(dateutil_parser.parse('11/29/2017 1:30pm').timetuple())) + self.schedule.eval(now=run_time) + ret = self.schedule.job_status('job1') + # eval at 2:30pm, will not run during range. run_time = int(time.mktime(dateutil_parser.parse('11/29/2017 2:30pm').timetuple())) self.schedule.eval(now=run_time) ret = self.schedule.job_status('job1') self.assertNotIn('_last_run', ret) self.assertEqual(ret['_skip_reason'], 'in_skip_range') + self.assertEqual(ret['_skipped_time'], run_time) # eval at 3:30pm, will run. run_time = int(time.mktime(dateutil_parser.parse('11/29/2017 3:30pm').timetuple())) @@ -114,8 +125,8 @@ class SchedulerSkipTest(ModuleCase, SaltReturnAssertsMixin): job = { 'schedule': { 'skip_during_range': { - 'start': '2pm', - 'end': '3pm' + 'start': '11/29/2017 2pm', + 'end': '11/29/2017 3pm' }, 'job1': { 'function': 'test.ping', @@ -127,12 +138,18 @@ class SchedulerSkipTest(ModuleCase, SaltReturnAssertsMixin): # Add job to schedule self.schedule.opts.update(job) + # eval at 1:30pm to prime. + run_time = int(time.mktime(dateutil_parser.parse('11/29/2017 1:30pm').timetuple())) + self.schedule.eval(now=run_time) + ret = self.schedule.job_status('job1') + # eval at 2:30pm, will not run during range. run_time = int(time.mktime(dateutil_parser.parse('11/29/2017 2:30pm').timetuple())) self.schedule.eval(now=run_time) ret = self.schedule.job_status('job1') self.assertNotIn('_last_run', ret) self.assertEqual(ret['_skip_reason'], 'in_skip_range') + self.assertEqual(ret['_skipped_time'], run_time) # eval at 3:30pm, will run. run_time = int(time.mktime(dateutil_parser.parse('11/29/2017 3:30pm').timetuple())) @@ -167,6 +184,7 @@ class SchedulerSkipTest(ModuleCase, SaltReturnAssertsMixin): ret = self.schedule.job_status('job1') self.assertNotIn('_last_run', ret) self.assertEqual(ret['_skip_reason'], 'in_skip_range') + self.assertEqual(ret['_skipped_time'], run_time) # eval at 3:00:01pm, will run. run_time = int(time.mktime(dateutil_parser.parse('11/29/2017 3:00:01pm').timetuple())) From 1befa7386c022f44f6f37001cf8907feedb3dc1a Mon Sep 17 00:00:00 2001 From: Viktor Daniel <32159337+viktordaniel@users.noreply.github.com> Date: Fri, 19 Jan 2018 15:01:24 +0100 Subject: [PATCH 046/223] Update x509.py Fixed typo in docu string --- salt/modules/x509.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/x509.py b/salt/modules/x509.py index 3626bd42d8..5cead96e1d 100644 --- a/salt/modules/x509.py +++ b/salt/modules/x509.py @@ -508,7 +508,7 @@ def get_pem_entries(glob_path): .. code-block:: bash - salt '*' x509.read_pem_entries "/etc/pki/*.crt" + salt '*' x509.get_pem_entries "/etc/pki/*.crt" ''' ret = {} From 6cf7e50cc475d5afe0616fc6e65c0f8e15f3d301 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Wed, 7 Feb 2018 13:59:01 -0600 Subject: [PATCH 047/223] Fix backport of grains fix A fix recently made to the salt/grains/disks.py was backported to 2017.7, but the fopen function was moved in oxygen and so the function does not exist in 2017.7. This did not make it into 2017.7.3 so there is no need to add this fix to any hotfixes. --- salt/grains/disks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/grains/disks.py b/salt/grains/disks.py index d19b96ef9a..6a27f765b6 100644 --- a/salt/grains/disks.py +++ b/salt/grains/disks.py @@ -128,7 +128,7 @@ def _linux_disks(): for entry in glob.glob('/sys/block/*/queue/rotational'): try: - with salt.utils.files.fopen(entry) as entry_fp: + with salt.utils.fopen(entry) as entry_fp: device = entry.split('/')[3] flag = entry_fp.read(1) if flag == '0': From 9d200efc267607988391369594b0fdeae7ec7c3d Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Wed, 7 Feb 2018 14:32:59 -0600 Subject: [PATCH 048/223] Add regression test for issue 45893 --- .../files/file/prod/issue45893/custom.tar.gz | Bin 0 -> 152 bytes .../files/file/prod/issue45893/init.sls | 5 +++ tests/integration/states/test_archive.py | 33 ++++++++++++------ 3 files changed, 27 insertions(+), 11 deletions(-) create mode 100644 tests/integration/files/file/prod/issue45893/custom.tar.gz create mode 100644 tests/integration/files/file/prod/issue45893/init.sls diff --git a/tests/integration/files/file/prod/issue45893/custom.tar.gz b/tests/integration/files/file/prod/issue45893/custom.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..584852716c8ead74cc1a9a88fed42eb2c7e7d22e GIT binary patch literal 152 zcmb2|=3tn;W>Po<^V^Gixtt7n+8)Muy+5F~?5$3PoA`T&T^2Hee|FYa2RMo!U3mE5 z_eiEpMb7xW^Yo7&cB<;~)y!NKHEG{9^+4@+CpXRX4%vE*Yvt=O{Tc!({ij{R|om3;<=J BMb-cS literal 0 HcmV?d00001 diff --git a/tests/integration/files/file/prod/issue45893/init.sls b/tests/integration/files/file/prod/issue45893/init.sls new file mode 100644 index 0000000000..28e4ff0fe2 --- /dev/null +++ b/tests/integration/files/file/prod/issue45893/init.sls @@ -0,0 +1,5 @@ +test_non_base_env: + archive.extracted: + - name: {{ pillar['issue45893.name'] }} + - source: salt://issue45893/custom.tar.gz + - keep: False diff --git a/tests/integration/states/test_archive.py b/tests/integration/states/test_archive.py index 3722c948fb..c1b9ca8f5b 100644 --- a/tests/integration/states/test_archive.py +++ b/tests/integration/states/test_archive.py @@ -68,6 +68,16 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): log.debug('Checking for extracted file: %s', path) self.assertTrue(os.path.isfile(path)) + def run_function(self, *args, **kwargs): + ret = super(ArchiveTest, self).run_function(*args, **kwargs) + log.debug('ret = %s', ret) + return ret + + def run_state(self, *args, **kwargs): + ret = super(ArchiveTest, self).run_state(*args, **kwargs) + log.debug('ret = %s', ret) + return ret + def test_archive_extracted_skip_verify(self): ''' test archive.extracted with skip_verify @@ -75,7 +85,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): ret = self.run_state('archive.extracted', name=ARCHIVE_DIR, source=self.archive_tar_source, archive_format='tar', skip_verify=True) - log.debug('ret = %s', ret) if 'Timeout' in ret: self.skipTest('Timeout talking to local tornado server.') self.assertSaltTrueReturn(ret) @@ -91,7 +100,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): ret = self.run_state('archive.extracted', name=ARCHIVE_DIR, source=self.archive_tar_source, archive_format='tar', source_hash=ARCHIVE_TAR_HASH) - log.debug('ret = %s', ret) if 'Timeout' in ret: self.skipTest('Timeout talking to local tornado server.') @@ -111,7 +119,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): source=self.archive_tar_source, archive_format='tar', source_hash=ARCHIVE_TAR_HASH, user='root', group=r_group) - log.debug('ret = %s', ret) if 'Timeout' in ret: self.skipTest('Timeout talking to local tornado server.') @@ -128,7 +135,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): source_hash=ARCHIVE_TAR_HASH, options='--strip=1', enforce_toplevel=False) - log.debug('ret = %s', ret) if 'Timeout' in ret: self.skipTest('Timeout talking to local tornado server.') @@ -145,7 +151,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): source_hash=ARCHIVE_TAR_HASH, options='--strip-components=1', enforce_toplevel=False) - log.debug('ret = %s', ret) if 'Timeout' in ret: self.skipTest('Timeout talking to local tornado server.') @@ -160,7 +165,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): ret = self.run_state('archive.extracted', name=ARCHIVE_DIR, source=self.archive_tar_source, source_hash=ARCHIVE_TAR_HASH) - log.debug('ret = %s', ret) if 'Timeout' in ret: self.skipTest('Timeout talking to local tornado server.') self.assertSaltTrueReturn(ret) @@ -177,7 +181,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): source_hash=ARCHIVE_TAR_HASH, use_cmd_unzip=False, archive_format='tar') - log.debug('ret = %s', ret) if 'Timeout' in ret: self.skipTest('Timeout talking to local tornado server.') self.assertSaltTrueReturn(ret) @@ -190,7 +193,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): ''' ret = self.run_state('archive.extracted', name=ARCHIVE_DIR, source=ARCHIVE_LOCAL_TAR_SOURCE, archive_format='tar') - log.debug('ret = %s', ret) self.assertSaltTrueReturn(ret) @@ -203,7 +205,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): ret = self.run_state('archive.extracted', name=ARCHIVE_DIR, source=ARCHIVE_LOCAL_TAR_SOURCE, archive_format='tar', source_hash=ARCHIVE_TAR_BAD_HASH, skip_verify=True) - log.debug('ret = %s', ret) self.assertSaltTrueReturn(ret) @@ -216,7 +217,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): ret = self.run_state('archive.extracted', name=ARCHIVE_DIR, source=ARCHIVE_LOCAL_TAR_SOURCE, archive_format='tar', source_hash=ARCHIVE_TAR_HASH) - log.debug('ret = %s', ret) self.assertSaltTrueReturn(ret) @@ -229,6 +229,17 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): ret = self.run_state('archive.extracted', name=ARCHIVE_DIR, source=ARCHIVE_LOCAL_TAR_SOURCE, archive_format='tar', source_hash=ARCHIVE_TAR_BAD_HASH) - log.debug('ret = %s', ret) self.assertSaltFalseReturn(ret) + + def test_archive_extracted_with_non_base_saltenv(self): + ''' + test archive.extracted with a saltenv other than `base` + ''' + ret = self.run_function( + 'state.sls', + ['issue45893'], + pillar={'issue45893.name': ARCHIVE_DIR}, + saltenv='prod') + self.assertSaltTrueReturn(ret) + self._check_extracted(os.path.join(ARCHIVE_DIR, UNTAR_FILE)) From a0586748b65225a752218dae76a10201621d8240 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Wed, 7 Feb 2018 14:44:15 -0600 Subject: [PATCH 049/223] Remove unused imports --- tests/integration/pillar/test_git_pillar.py | 1 - tests/unit/fileserver/test_gitfs.py | 1 - 2 files changed, 2 deletions(-) diff --git a/tests/integration/pillar/test_git_pillar.py b/tests/integration/pillar/test_git_pillar.py index b0030471a1..e97e720bab 100644 --- a/tests/integration/pillar/test_git_pillar.py +++ b/tests/integration/pillar/test_git_pillar.py @@ -87,7 +87,6 @@ from tests.support.unit import skipIf # Import Salt libs import salt.utils.path import salt.utils.platform -from salt.utils.versions import LooseVersion from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES as VIRTUALENV_NAMES from salt.ext.six.moves import range # pylint: disable=redefined-builtin from salt.utils.gitfs import ( diff --git a/tests/unit/fileserver/test_gitfs.py b/tests/unit/fileserver/test_gitfs.py index b93071daab..2d89b86bb5 100644 --- a/tests/unit/fileserver/test_gitfs.py +++ b/tests/unit/fileserver/test_gitfs.py @@ -31,7 +31,6 @@ import salt.utils.files import salt.utils.platform import salt.utils.win_functions import salt.utils.yaml -from salt.utils.versions import LooseVersion as _LooseVersion import salt.utils.gitfs from salt.utils.gitfs import ( From e892f756600900f95b83267d2ecc407dc0d6b5d8 Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Wed, 7 Feb 2018 14:03:45 -0700 Subject: [PATCH 050/223] fix digital ocean tests --- .../cloud.profiles.d/{digital_ocean.conf => digitalocean.conf} | 0 .../cloud.providers.d/{digital_ocean.conf => digitalocean.conf} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename tests/integration/files/conf/cloud.profiles.d/{digital_ocean.conf => digitalocean.conf} (100%) rename tests/integration/files/conf/cloud.providers.d/{digital_ocean.conf => digitalocean.conf} (100%) diff --git a/tests/integration/files/conf/cloud.profiles.d/digital_ocean.conf b/tests/integration/files/conf/cloud.profiles.d/digitalocean.conf similarity index 100% rename from tests/integration/files/conf/cloud.profiles.d/digital_ocean.conf rename to tests/integration/files/conf/cloud.profiles.d/digitalocean.conf diff --git a/tests/integration/files/conf/cloud.providers.d/digital_ocean.conf b/tests/integration/files/conf/cloud.providers.d/digitalocean.conf similarity index 100% rename from tests/integration/files/conf/cloud.providers.d/digital_ocean.conf rename to tests/integration/files/conf/cloud.providers.d/digitalocean.conf From f6ea9fed7df2229f4980c72f76a4002fe7165fef Mon Sep 17 00:00:00 2001 From: Adam Friedman Date: Wed, 7 Feb 2018 09:09:59 +1100 Subject: [PATCH 051/223] Ensure that event data provided by the dimensiondata driver is serialisable. saltstack/salt#45884 --- salt/cloud/clouds/dimensiondata.py | 54 +++++++++++++++++++++++++++--- 1 file changed, 50 insertions(+), 4 deletions(-) diff --git a/salt/cloud/clouds/dimensiondata.py b/salt/cloud/clouds/dimensiondata.py index d022c5719f..26ef2b99c1 100644 --- a/salt/cloud/clouds/dimensiondata.py +++ b/salt/cloud/clouds/dimensiondata.py @@ -32,7 +32,7 @@ from salt.utils.versions import LooseVersion as _LooseVersion # Import libcloud try: import libcloud - from libcloud.compute.base import NodeState + from libcloud.compute.base import NodeDriver, NodeState from libcloud.compute.base import NodeAuthPassword from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver @@ -258,7 +258,7 @@ def create(vm_): 'ex_is_started': vm_['is_started'] } - event_data = kwargs.copy() + event_data = _to_event_data(kwargs) del event_data['auth'] __utils__['cloud.fire_event']( @@ -418,11 +418,13 @@ def create_lb(kwargs=None, call=None): log.debug('Network Domain: %s', network_domain.id) lb_conn.ex_set_current_network_domain(network_domain.id) + event_data = _to_event_data(kwargs) + __utils__['cloud.fire_event']( 'event', 'create load_balancer', 'salt/cloud/loadbalancer/creating', - args=kwargs, + args=event_data, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) @@ -431,11 +433,13 @@ def create_lb(kwargs=None, call=None): name, port, protocol, algorithm, members ) + event_data = _to_event_data(kwargs) + __utils__['cloud.fire_event']( 'event', 'created load_balancer', 'salt/cloud/loadbalancer/created', - args=kwargs, + args=event_data, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) @@ -577,3 +581,45 @@ def get_lb_conn(dd_driver=None): 'Missing dimensiondata_driver for get_lb_conn method.' ) return get_driver_lb(Provider_lb.DIMENSIONDATA)(user_id, key, region=region) + +def _to_event_data(obj): + ''' + Convert the specified object into a form that can be serialised by msgpack as event data. + + :param obj: The object to convert. + ''' + + if obj is None: + return None + if isinstance(obj, bool): + return obj + if isinstance(obj, int): + return obj + if isinstance(obj, float): + return obj + if isinstance(obj, str): + return obj + if isinstance(obj, bytes): + return obj + if isinstance(obj, dict): + return obj + + if isinstance(obj, NodeDriver): # Special case for NodeDriver (cyclic references) + return obj.name + + if isinstance(obj, list): + return [_to_event_data(item) for item in obj] + + event_data = {} + for attribute_name in dir(obj): + if attribute_name.startswith('_'): + continue + + attribute_value = getattr(obj, attribute_name) + + if callable(attribute_value): # Strip out methods + continue + + event_data[attribute_name] = _to_event_data(attribute_value) + + return event_data From 6b1b6be42731c36caad25c55350c9bba0e6b3123 Mon Sep 17 00:00:00 2001 From: Adam Friedman Date: Wed, 7 Feb 2018 11:51:45 +1100 Subject: [PATCH 052/223] Add integration tests for dimensiondata cloud provider. saltstack/salt#45884 --- .../cloud/providers/test_dimensiondata.py | 124 ++++++++++++++++++ .../conf/cloud.profiles.d/dimensiondata.conf | 10 ++ .../conf/cloud.providers.d/dimensiondata.conf | 5 + 3 files changed, 139 insertions(+) create mode 100644 tests/integration/cloud/providers/test_dimensiondata.py create mode 100644 tests/integration/files/conf/cloud.profiles.d/dimensiondata.conf create mode 100644 tests/integration/files/conf/cloud.providers.d/dimensiondata.conf diff --git a/tests/integration/cloud/providers/test_dimensiondata.py b/tests/integration/cloud/providers/test_dimensiondata.py new file mode 100644 index 0000000000..32f3e06d27 --- /dev/null +++ b/tests/integration/cloud/providers/test_dimensiondata.py @@ -0,0 +1,124 @@ +# -*- coding: utf-8 -*- +''' +Integration tests for the Dimension Data cloud provider +''' + +# Import Python Libs +from __future__ import absolute_import, print_function, unicode_literals +import os + +# Import Salt Testing Libs +from tests.support.case import ShellCase +from tests.support.paths import FILES +from tests.support.helpers import expensiveTest, generate_random_name + +# Import Salt Libs +from salt.config import cloud_providers_config + +# Create the cloud instance name to be used throughout the tests +INSTANCE_NAME = generate_random_name('CLOUD-TEST-') +PROVIDER_NAME = 'dimensiondata' + + +class DimensionDataTest(ShellCase): + ''' + Integration tests for the Dimension Data cloud provider in Salt-Cloud + ''' + + @expensiveTest + def setUp(self): + ''' + Sets up the test requirements + ''' + super(DimensionDataTest, self).setUp() + + # check if appropriate cloud provider and profile files are present + profile_str = 'dimensiondata-config' + providers = self.run_cloud('--list-providers') + if profile_str + ':' not in providers: + self.skipTest( + 'Configuration file for {0} was not found. Check {0}.conf files ' + 'in tests/integration/files/conf/cloud.*.d/ to run these tests.' + .format(PROVIDER_NAME) + ) + + # check if user_id, key, and region are present + config = cloud_providers_config( + os.path.join( + FILES, + 'conf', + 'cloud.providers.d', + PROVIDER_NAME + '.conf' + ) + ) + + user_id = config[profile_str][PROVIDER_NAME]['user_id'] + key = config[profile_str][PROVIDER_NAME]['key'] + region = config[profile_str][PROVIDER_NAME]['region'] + + if personal_token == '' or ssh_file == '' or ssh_name == '': + self.skipTest( + 'A user Id, password, and a region ' + 'must be provided to run these tests. Check ' + 'tests/integration/files/conf/cloud.providers.d/{0}.conf' + .format(PROVIDER_NAME) + ) + + def test_list_images(self): + ''' + Tests the return of running the --list-images command for the dimensiondata cloud provider + ''' + image_list = self.run_cloud('--list-images {0}'.format(PROVIDER_NAME)) + self.assertIn( + 'Ubuntu 14.04 2 CPU', + [i.strip() for i in image_list] + ) + + def test_list_locations(self): + ''' + Tests the return of running the --list-locations command for the dimensiondata cloud provider + ''' + _list_locations = self.run_cloud('--list-locations {0}'.format(PROVIDER_NAME)) + self.assertIn( + 'Australia - Melbourne MCP2', + [i.strip() for i in _list_locations] + ) + + def test_list_sizes(self): + ''' + Tests the return of running the --list-sizes command for the dimensiondata cloud provider + ''' + _list_sizes = self.run_cloud('--list-sizes {0}'.format(PROVIDER_NAME)) + self.assertIn( + 'default', + [i.strip() for i in _list_sizes] + ) + + def test_instance(self): + ''' + Test creating an instance on Dimension Data's cloud + ''' + # check if instance with salt installed returned + try: + self.assertIn( + INSTANCE_NAME, + [i.strip() for i in self.run_cloud('-p dimensiondata-test {0}'.format(INSTANCE_NAME), timeout=500)] + ) + except AssertionError: + self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500) + raise + + # delete the instance + try: + self.assertIn( + 'True', + [i.strip() for i in self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500)] + ) + except AssertionError: + raise + + # Final clean-up of created instance, in case something went wrong. + # This was originally in a tearDown function, but that didn't make sense + # To run this for each test when not all tests create instances. + if INSTANCE_NAME in [i.strip() for i in self.run_cloud('--query')]: + self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500) diff --git a/tests/integration/files/conf/cloud.profiles.d/dimensiondata.conf b/tests/integration/files/conf/cloud.profiles.d/dimensiondata.conf new file mode 100644 index 0000000000..fc64754025 --- /dev/null +++ b/tests/integration/files/conf/cloud.profiles.d/dimensiondata.conf @@ -0,0 +1,10 @@ +dimensiondata-test: + provider: dimensiondata-config + image: 42816eb2-9846-4483-95c3-7d7fbddebf2c + size: default + location: AU10 + is_started: yes + description: 'Salt Ubuntu test' + network_domain: '' + vlan: '' + auth: '' diff --git a/tests/integration/files/conf/cloud.providers.d/dimensiondata.conf b/tests/integration/files/conf/cloud.providers.d/dimensiondata.conf new file mode 100644 index 0000000000..f54d77f326 --- /dev/null +++ b/tests/integration/files/conf/cloud.providers.d/dimensiondata.conf @@ -0,0 +1,5 @@ +dimensiondata-config: + driver: dimensiondata + user_id: '' + key: '' + region: 'dd-au' From de26b03e2ce53d9499a25af4c11045fbde800a69 Mon Sep 17 00:00:00 2001 From: Adam Friedman Date: Wed, 7 Feb 2018 13:39:34 +1100 Subject: [PATCH 053/223] Fix copy/paste bug in dimensiondata provider integration test. saltstack/salt#45884 --- tests/integration/cloud/providers/test_dimensiondata.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/cloud/providers/test_dimensiondata.py b/tests/integration/cloud/providers/test_dimensiondata.py index 32f3e06d27..11786b2951 100644 --- a/tests/integration/cloud/providers/test_dimensiondata.py +++ b/tests/integration/cloud/providers/test_dimensiondata.py @@ -56,7 +56,7 @@ class DimensionDataTest(ShellCase): key = config[profile_str][PROVIDER_NAME]['key'] region = config[profile_str][PROVIDER_NAME]['region'] - if personal_token == '' or ssh_file == '' or ssh_name == '': + if user_id == '' or key == '' or region == '': self.skipTest( 'A user Id, password, and a region ' 'must be provided to run these tests. Check ' From 98907a32cba4ee3d704a21e500d886e2626c6f21 Mon Sep 17 00:00:00 2001 From: Adam Friedman Date: Wed, 7 Feb 2018 13:59:18 +1100 Subject: [PATCH 054/223] Ensure 'auth' parameter is correctly passed to dimensiondata driver. Also ensure that it is nevertheless excluded from the event payload. saltstack/salt#45884 --- salt/cloud/clouds/dimensiondata.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/salt/cloud/clouds/dimensiondata.py b/salt/cloud/clouds/dimensiondata.py index 26ef2b99c1..092dfdb86a 100644 --- a/salt/cloud/clouds/dimensiondata.py +++ b/salt/cloud/clouds/dimensiondata.py @@ -220,7 +220,6 @@ def create(vm_): log.info('Creating Cloud VM %s', vm_['name']) conn = get_conn() - rootPw = NodeAuthPassword(vm_['auth']) location = conn.ex_get_location_by_id(vm_['location']) images = conn.list_images(location=location) @@ -251,7 +250,6 @@ def create(vm_): kwargs = { 'name': vm_['name'], 'image': image, - 'auth': rootPw, 'ex_description': vm_['description'], 'ex_network_domain': network_domain, 'ex_vlan': vlan, @@ -259,7 +257,6 @@ def create(vm_): } event_data = _to_event_data(kwargs) - del event_data['auth'] __utils__['cloud.fire_event']( 'event', @@ -270,6 +267,10 @@ def create(vm_): transport=__opts__['transport'] ) + # Initial password (excluded from event payload) + rootPw = NodeAuthPassword(vm_['auth']) + kwargs['auth'] = rootPw + try: data = conn.create_node(**kwargs) except Exception as exc: From a2bc155c73d8384a20aaec7beadb757da2f021c3 Mon Sep 17 00:00:00 2001 From: Adam Friedman Date: Wed, 7 Feb 2018 14:46:23 +1100 Subject: [PATCH 055/223] Use __utils__['cloud.'] instead of salt.cloud.utils. saltstack/salt#45884 --- salt/cloud/clouds/dimensiondata.py | 14 ++++---------- .../files/conf/cloud.profiles.d/dimensiondata.conf | 1 + 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/salt/cloud/clouds/dimensiondata.py b/salt/cloud/clouds/dimensiondata.py index 092dfdb86a..e4fc49a5e9 100644 --- a/salt/cloud/clouds/dimensiondata.py +++ b/salt/cloud/clouds/dimensiondata.py @@ -52,12 +52,6 @@ try: except ImportError: HAS_LIBCLOUD = False -# Import generic libcloud functions -# from salt.cloud.libcloudfuncs import * - -# Import salt libs -import salt.utils - # Import salt.cloud libs from salt.cloud.libcloudfuncs import * # pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import from salt.utils import namespaced_function @@ -170,7 +164,7 @@ def _query_node_data(vm_, data): private_ip = preferred_ip(vm_, [private_ip]) if private_ip is False: continue - if salt.utils.cloud.is_public_ip(private_ip): + if __utils__['cloud.is_public_ip'](private_ip): log.warning('%s is a public IP', private_ip) data.public_ips.append(private_ip) else: @@ -284,7 +278,7 @@ def create(vm_): return False try: - data = salt.utils.cloud.wait_for_ip( + data = __utils__['cloud.wait_for_ip']( _query_node_data, update_args=(vm_, data), timeout=config.get_cloud_config_value( @@ -310,7 +304,7 @@ def create(vm_): ip_address = preferred_ip(vm_, data.public_ips) log.debug('Using IP address %s', ip_address) - if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips': + if __utils__['cloud.get_salt_interface'](vm_, __opts__) == 'private_ips': salt_ip_address = preferred_ip(vm_, data.private_ips) log.info('Salt interface set to: %s', salt_ip_address) else: @@ -326,7 +320,7 @@ def create(vm_): vm_['ssh_host'] = ip_address vm_['password'] = vm_['auth'] - ret = salt.utils.cloud.bootstrap(vm_, __opts__) + ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data.__dict__) diff --git a/tests/integration/files/conf/cloud.profiles.d/dimensiondata.conf b/tests/integration/files/conf/cloud.profiles.d/dimensiondata.conf index fc64754025..a698e7a5cc 100644 --- a/tests/integration/files/conf/cloud.profiles.d/dimensiondata.conf +++ b/tests/integration/files/conf/cloud.profiles.d/dimensiondata.conf @@ -7,4 +7,5 @@ dimensiondata-test: description: 'Salt Ubuntu test' network_domain: '' vlan: '' + ssh_interface: private_ips auth: '' From 9b6b01873b6365303c62f41e784f25372545d795 Mon Sep 17 00:00:00 2001 From: Adam Friedman Date: Thu, 8 Feb 2018 07:50:02 +1100 Subject: [PATCH 056/223] Fix violations reported by flake8. saltstack/salt#45884 --- salt/cloud/clouds/dimensiondata.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/salt/cloud/clouds/dimensiondata.py b/salt/cloud/clouds/dimensiondata.py index e4fc49a5e9..0778fea12a 100644 --- a/salt/cloud/clouds/dimensiondata.py +++ b/salt/cloud/clouds/dimensiondata.py @@ -262,8 +262,8 @@ def create(vm_): ) # Initial password (excluded from event payload) - rootPw = NodeAuthPassword(vm_['auth']) - kwargs['auth'] = rootPw + initial_password = NodeAuthPassword(vm_['auth']) + kwargs['auth'] = initial_password try: data = conn.create_node(**kwargs) @@ -577,6 +577,7 @@ def get_lb_conn(dd_driver=None): ) return get_driver_lb(Provider_lb.DIMENSIONDATA)(user_id, key, region=region) + def _to_event_data(obj): ''' Convert the specified object into a form that can be serialised by msgpack as event data. @@ -599,7 +600,7 @@ def _to_event_data(obj): if isinstance(obj, dict): return obj - if isinstance(obj, NodeDriver): # Special case for NodeDriver (cyclic references) + if isinstance(obj, NodeDriver): # Special case for NodeDriver (cyclic references) return obj.name if isinstance(obj, list): @@ -612,7 +613,7 @@ def _to_event_data(obj): attribute_value = getattr(obj, attribute_name) - if callable(attribute_value): # Strip out methods + if callable(attribute_value): # Strip out methods continue event_data[attribute_name] = _to_event_data(attribute_value) From 82ec0b589cc524cea8cdb9fe14acd44724816fc0 Mon Sep 17 00:00:00 2001 From: Adam Friedman Date: Thu, 8 Feb 2018 10:14:52 +1100 Subject: [PATCH 057/223] Revert to using salt.utils.cloud.is_public_ip. saltstack/salt#45884 --- salt/cloud/clouds/dimensiondata.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/cloud/clouds/dimensiondata.py b/salt/cloud/clouds/dimensiondata.py index 0778fea12a..017832f6be 100644 --- a/salt/cloud/clouds/dimensiondata.py +++ b/salt/cloud/clouds/dimensiondata.py @@ -164,7 +164,7 @@ def _query_node_data(vm_, data): private_ip = preferred_ip(vm_, [private_ip]) if private_ip is False: continue - if __utils__['cloud.is_public_ip'](private_ip): + if salt.utils.cloud.is_public_ip(private_ip): log.warning('%s is a public IP', private_ip) data.public_ips.append(private_ip) else: From 56c5a333cd6ac09f2112c259145d485fe3731f37 Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Wed, 7 Feb 2018 17:00:03 -0700 Subject: [PATCH 058/223] add test to verify clusters This test just double checks that the get_mor_containers is called twice when trying to use a Cluster, and once if a datastore is specified. --- tests/unit/cloud/clouds/test_vmware.py | 31 ++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/tests/unit/cloud/clouds/test_vmware.py b/tests/unit/cloud/clouds/test_vmware.py index 447e7dcdca..da8a2a68e4 100644 --- a/tests/unit/cloud/clouds/test_vmware.py +++ b/tests/unit/cloud/clouds/test_vmware.py @@ -1239,6 +1239,37 @@ class VMwareTestCase(ExtendedTestCase): kwargs={'name': 'cCD2GgJGPG1DUnPeFBoPeqtdmUxIWxDoVFbA14vIG0BPoUECkgbRMnnY6gaUPBvIDCcsZ5HU48ubgQu5c'}, call='function') + def test__add_new_hard_disk_helper(self): + with patch('salt.cloud.clouds.vmware._get_si', MagicMock(return_value=None)): + with patch('salt.utils.vmware.get_mor_using_container_view', side_effect=[None, None]): + self.assertRaises( + SaltCloudSystemExit, + vmware._add_new_hard_disk_helper, + disk_label='test', + size_gb=100, + unit_number=0, + datastore='whatever' + ) + with patch('salt.utils.vmware.get_mor_using_container_view', side_effect=['Datastore', None]): + self.assertRaises( + AttributeError, + vmware._add_new_hard_disk_helper, + disk_label='test', + size_gb=100, + unit_number=0, + datastore='whatever' + ) + vmware.salt.utils.vmware.get_mor_using_container_view.assert_called_with(None, vim.Datastore, 'whatever') + with patch('salt.utils.vmware.get_mor_using_container_view', side_effect=[None, 'Cluster']): + self.assertRaises( + AttributeError, + vmware._add_new_hard_disk_helper, + disk_label='test', + size_gb=100, + unit_number=0, + datastore='whatever' + ) + vmware.salt.utils.vmware.get_mor_using_container_view.assert_called_with(None, vim.StoragePod, 'whatever') class CloneFromSnapshotTest(TestCase): ''' From 75d9e20d8a9b61547fe60cf2b0f04b4d7af3a878 Mon Sep 17 00:00:00 2001 From: Damon Atkins Date: Thu, 8 Feb 2018 23:06:10 +1100 Subject: [PATCH 059/223] Add ignoring 'terminated', 'stopped' instances, to improve changes of a single match --- salt/pillar/ec2_pillar.py | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/salt/pillar/ec2_pillar.py b/salt/pillar/ec2_pillar.py index 08f894cbce..4111f1cc1a 100644 --- a/salt/pillar/ec2_pillar.py +++ b/salt/pillar/ec2_pillar.py @@ -5,7 +5,7 @@ Retrieve EC2 instance data for minions for ec2_tags and ec2_tags_list The minion id must be the AWS instance-id or value in 'tag_key'. For example set 'tag_key' to 'Name', to have the minion-id matched against the tag 'Name'. The tag contents must be unique. The value of tag_value can -be 'uqdn' or 'asis'. if 'uqdn' strips any domain before comparision. +be 'uqdn' or 'asis'. if 'uqdn' strips any domain before comparison. The option use_grain can be set to True. This allows the use of an instance-id grain instead of the minion-id. Since this is a potential @@ -201,19 +201,30 @@ def ext_pillar(minion_id, # filters and max_results can not be used togther. instance_data = conn.get_only_instances(filters=find_filter, dry_run=False) - if instance_data: - if len(instance_data) == 1: - instance = instance_data[0] - else: - log.error('%s multiple matches using \'%s\'', base_msg, find_id if find_id else find_filter) - return {} - else: - log.debug('%s no match using \'%s\'', base_msg, find_id if find_id else find_filter) - return {} except boto.exception.EC2ResponseError as exc: log.error('{0} failed with \'{1}\''.format(base_msg, exc)) return {} + if not instance_data: + log.debug('%s no match using \'%s\'', base_msg, find_id if find_id else find_filter) + return {} + + # Find a active instance, i.e. ignore terminated and stopped instances + active_inst = [] + for inst in range(0, len(instance_data)): + if instance_data[inst].state not in ['terminated', 'stopped']: + active_inst.append(inst) + + valid_inst = len(active_inst) + if not valid_inst: + log.debug('%s match found but not active \'%s\'', base_msg, find_id if find_id else find_filter) + return {} + + if valid_inst > 1: + log.error('%s multiple matches, ignored, using \'%s\'', base_msg, find_id if find_id else find_filter) + return {} + + instance = instance_data[active_inst[0]] if instance.tags: ec2_tags = instance.tags ec2_tags_list = {} From b4d0b23891a59527f44bf5c2bc21810a3c76afb1 Mon Sep 17 00:00:00 2001 From: Damon Atkins Date: Thu, 8 Feb 2018 23:51:30 +1100 Subject: [PATCH 060/223] py3 fix --- salt/pillar/ec2_pillar.py | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/pillar/ec2_pillar.py b/salt/pillar/ec2_pillar.py index 4111f1cc1a..b13c9d0483 100644 --- a/salt/pillar/ec2_pillar.py +++ b/salt/pillar/ec2_pillar.py @@ -50,6 +50,7 @@ from __future__ import absolute_import import re import logging import salt.ext.six as six +from salt.ext.six.moves import range # Import salt libs from salt.utils.versions import StrictVersion as _StrictVersion From 6260cddb11b301a38509be12b0201b86f39f816d Mon Sep 17 00:00:00 2001 From: Dmitry Kuzmenko Date: Thu, 8 Feb 2018 17:40:00 +0300 Subject: [PATCH 061/223] Don't use unicode for WinDLL and ctypes.LoadLibrary on Windows --- salt/utils/rsax931.py | 3 ++- salt/utils/win_osinfo.py | 3 ++- salt/utils/win_runas.py | 4 ++-- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/salt/utils/rsax931.py b/salt/utils/rsax931.py index 55be7dc804..168c02734b 100644 --- a/salt/utils/rsax931.py +++ b/salt/utils/rsax931.py @@ -28,7 +28,8 @@ def _load_libcrypto(): Load OpenSSL libcrypto ''' if sys.platform.startswith('win'): - return cdll.LoadLibrary('libeay32') + # cdll.LoadLibrary on windows requires an 'str' argument + return cdll.LoadLibrary(str('libeay32')) # future lint: disable=blacklisted-function elif getattr(sys, 'frozen', False) and salt.utils.platform.is_smartos(): return cdll.LoadLibrary(glob.glob(os.path.join( os.path.dirname(sys.executable), diff --git a/salt/utils/win_osinfo.py b/salt/utils/win_osinfo.py index 98d8fc9c5b..ff409cdd1e 100644 --- a/salt/utils/win_osinfo.py +++ b/salt/utils/win_osinfo.py @@ -14,7 +14,8 @@ except (ImportError, ValueError): HAS_WIN32 = False if HAS_WIN32: - kernel32 = ctypes.WinDLL('kernel32', use_last_error=True) + kernel32 = ctypes.WinDLL(str('kernel32'), # future lint: disable=blacklisted-function + use_last_error=True) # Although utils are often directly imported, it is also possible to use the diff --git a/salt/utils/win_runas.py b/salt/utils/win_runas.py index 66a22b3f1e..6dfa3008e2 100644 --- a/salt/utils/win_runas.py +++ b/salt/utils/win_runas.py @@ -46,8 +46,8 @@ def __virtual__(): if HAS_WIN32: # ctypes definitions - kernel32 = ctypes.WinDLL('kernel32') - advapi32 = ctypes.WinDLL('advapi32') + kernel32 = ctypes.WinDLL(str('kernel32')) # future lint: disable=blacklisted-function + advapi32 = ctypes.WinDLL(str('advapi32')) # future lint: disable=blacklisted-function INVALID_HANDLE_VALUE = wintypes.HANDLE(-1).value INVALID_DWORD_VALUE = wintypes.DWORD(-1).value # ~WinAPI From 3f42a5836385dfed9cda22a19d324a6bbed1f50b Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Fri, 8 Dec 2017 15:49:21 -0600 Subject: [PATCH 062/223] Force Salt's YAML loader to load all strings as unicode types This reduces the likelihood that non-unicode strings make their way into Salt and cause str/unicode mismatches resulting in UnicodeDecodeErrors. --- salt/utils/yamlloader.py | 12 +++++-- tests/unit/utils/test_yamlloader.py | 55 ++++++++++++++++++++++------- 2 files changed, 52 insertions(+), 15 deletions(-) diff --git a/salt/utils/yamlloader.py b/salt/utils/yamlloader.py index f0910e443a..8f922f877f 100644 --- a/salt/utils/yamlloader.py +++ b/salt/utils/yamlloader.py @@ -5,10 +5,9 @@ Custom YAML loading in Salt # Import python libs from __future__ import absolute_import, print_function, unicode_literals +import re import warnings -# Import third party libs -import re import yaml # pylint: disable=blacklisted-import from yaml.nodes import MappingNode, SequenceNode from yaml.constructor import ConstructorError @@ -18,6 +17,8 @@ try: except Exception: pass +import salt.utils.stringutils + __all__ = ['SaltYamlSafeLoader', 'load', 'safe_load'] @@ -46,6 +47,9 @@ class SaltYamlSafeLoader(yaml.SafeLoader, object): self.add_constructor( 'tag:yaml.org,2002:omap', type(self).construct_yaml_map) + self.add_constructor( + 'tag:yaml.org,2002:str', + type(self).construct_yaml_str) self.add_constructor( 'tag:yaml.org,2002:python/unicode', type(self).construct_unicode) @@ -119,6 +123,10 @@ class SaltYamlSafeLoader(yaml.SafeLoader, object): node.value = eval(node.value, {}, {}) # pylint: disable=W0123 return super(SaltYamlSafeLoader, self).construct_scalar(node) + def construct_yaml_str(self, node): + value = self.construct_scalar(node) + return salt.utils.stringutils.to_unicode(value) + def flatten_mapping(self, node): merge = [] index = 0 diff --git a/tests/unit/utils/test_yamlloader.py b/tests/unit/utils/test_yamlloader.py index bf39c32d02..48f81b462e 100644 --- a/tests/unit/utils/test_yamlloader.py +++ b/tests/unit/utils/test_yamlloader.py @@ -5,6 +5,7 @@ # Import python libs from __future__ import absolute_import, print_function, unicode_literals +import collections import textwrap # Import Salt Libs @@ -17,6 +18,9 @@ from salt.ext import six from tests.support.unit import TestCase, skipIf from tests.support.mock import patch, NO_MOCK, NO_MOCK_REASON, mock_open +# Import 3rd-party libs +from salt.ext import six + @skipIf(NO_MOCK, NO_MOCK_REASON) class YamlLoaderTestCase(TestCase): @@ -25,7 +29,7 @@ class YamlLoaderTestCase(TestCase): ''' @staticmethod - def _render_yaml(data): + def render_yaml(data): ''' Takes a YAML string, puts it into a mock file, passes that to the YAML SaltYamlSafeLoader and then returns the rendered/parsed YAML data @@ -41,12 +45,37 @@ class YamlLoaderTestCase(TestCase): with salt.utils.files.fopen(mocked_file) as mocked_stream: return SaltYamlSafeLoader(mocked_stream).get_data() + @staticmethod + def raise_error(value): + raise TypeError('{0!r} is not a unicode string'.format(value)) # pylint: disable=repr-flag-used-in-string + + def assert_unicode(self, value): + ''' + Make sure the entire data structure is unicode + ''' + if six.PY3: + return + if isinstance(value, six.string_types): + if not isinstance(value, six.text_type): + self.raise_error(value) + elif isinstance(value, collections.Mapping): + for k, v in six.iteritems(value): + self.assert_unicode(k) + self.assert_unicode(v) + elif isinstance(value, collections.Iterable): + for item in value: + self.assert_unicode(item) + + def assert_matches(self, ret, expected): + self.assertEqual(ret, expected) + self.assert_unicode(ret) + def test_yaml_basics(self): ''' Test parsing an ordinary path ''' - self.assertEqual( - self._render_yaml(textwrap.dedent('''\ + self.assert_matches( + self.render_yaml(textwrap.dedent('''\ p1: - alpha - beta''')), @@ -58,8 +87,8 @@ class YamlLoaderTestCase(TestCase): Test YAML anchors ''' # Simple merge test - self.assertEqual( - self._render_yaml(textwrap.dedent('''\ + self.assert_matches( + self.render_yaml(textwrap.dedent('''\ p1: &p1 v1: alpha p2: @@ -69,8 +98,8 @@ class YamlLoaderTestCase(TestCase): ) # Test that keys/nodes are overwritten - self.assertEqual( - self._render_yaml(textwrap.dedent('''\ + self.assert_matches( + self.render_yaml(textwrap.dedent('''\ p1: &p1 v1: alpha p2: @@ -80,8 +109,8 @@ class YamlLoaderTestCase(TestCase): ) # Test merging of lists - self.assertEqual( - self._render_yaml(textwrap.dedent('''\ + self.assert_matches( + self.render_yaml(textwrap.dedent('''\ p1: &p1 v1: &v1 - t1 @@ -96,12 +125,12 @@ class YamlLoaderTestCase(TestCase): Test that duplicates still throw an error ''' with self.assertRaises(ConstructorError): - self._render_yaml(textwrap.dedent('''\ + self.render_yaml(textwrap.dedent('''\ p1: alpha p1: beta''')) with self.assertRaises(ConstructorError): - self._render_yaml(textwrap.dedent('''\ + self.render_yaml(textwrap.dedent('''\ p1: &p1 v1: alpha p2: @@ -113,8 +142,8 @@ class YamlLoaderTestCase(TestCase): ''' Test proper loading of unicode literals ''' - self.assertEqual( - self._render_yaml(textwrap.dedent('''\ + self.assert_matches( + self.render_yaml(textwrap.dedent('''\ foo: a: Д b: {'a': u'\\u0414'}''')), From 3e093ba0227ecf6460c08072691ab2082f5e2b07 Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Thu, 8 Feb 2018 09:37:11 -0700 Subject: [PATCH 063/223] fix pylint --- tests/unit/cloud/clouds/test_vmware.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/unit/cloud/clouds/test_vmware.py b/tests/unit/cloud/clouds/test_vmware.py index da8a2a68e4..3887ee0982 100644 --- a/tests/unit/cloud/clouds/test_vmware.py +++ b/tests/unit/cloud/clouds/test_vmware.py @@ -1271,6 +1271,7 @@ class VMwareTestCase(ExtendedTestCase): ) vmware.salt.utils.vmware.get_mor_using_container_view.assert_called_with(None, vim.StoragePod, 'whatever') + class CloneFromSnapshotTest(TestCase): ''' Test functionality to clone from snapshot From 1a75786b5a677c76d342385c05e0c540688ee57e Mon Sep 17 00:00:00 2001 From: Adam Friedman Date: Fri, 9 Feb 2018 05:16:59 +1100 Subject: [PATCH 064/223] Fix linter warnings. saltstack/salt#45884 --- .../cloud/providers/test_dimensiondata.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/tests/integration/cloud/providers/test_dimensiondata.py b/tests/integration/cloud/providers/test_dimensiondata.py index 11786b2951..bd8425f528 100644 --- a/tests/integration/cloud/providers/test_dimensiondata.py +++ b/tests/integration/cloud/providers/test_dimensiondata.py @@ -6,20 +6,33 @@ Integration tests for the Dimension Data cloud provider # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import os +import random +import string # Import Salt Testing Libs from tests.support.case import ShellCase from tests.support.paths import FILES -from tests.support.helpers import expensiveTest, generate_random_name +from tests.support.helpers import expensiveTest # Import Salt Libs from salt.config import cloud_providers_config +from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin # Create the cloud instance name to be used throughout the tests -INSTANCE_NAME = generate_random_name('CLOUD-TEST-') +INSTANCE_NAME = _random_name('CLOUD-TEST-') PROVIDER_NAME = 'dimensiondata' +def _random_name(size=6): + ''' + Generates a random cloud instance name + ''' + return 'cloud-test-' + ''.join( + random.choice(string.ascii_lowercase + string.digits) + for x in range(size) + ) + + class DimensionDataTest(ShellCase): ''' Integration tests for the Dimension Data cloud provider in Salt-Cloud From 34ecdffa716ea9713f9a676b1582fef18b970233 Mon Sep 17 00:00:00 2001 From: rallytime Date: Thu, 8 Feb 2018 14:23:22 -0500 Subject: [PATCH 065/223] Replace old utils paths with new paths --- salt/modules/localemod.py | 2 +- salt/utils/gitfs.py | 4 +-- tests/unit/modules/test_localemod.py | 46 ++++++++++++++-------------- 3 files changed, 26 insertions(+), 26 deletions(-) diff --git a/salt/modules/localemod.py b/salt/modules/localemod.py index 13abd74caf..e97af32647 100644 --- a/salt/modules/localemod.py +++ b/salt/modules/localemod.py @@ -67,7 +67,7 @@ def _localectl_status(): Parse localectl status into a dict. :return: dict ''' - if salt.utils.which('localectl') is None: + if salt.utils.path.which('localectl') is None: raise CommandExecutionError('Unable to find "localectl"') ret = {} diff --git a/salt/utils/gitfs.py b/salt/utils/gitfs.py index 3b90272b38..0991b7a3a9 100644 --- a/salt/utils/gitfs.py +++ b/salt/utils/gitfs.py @@ -2848,8 +2848,8 @@ class GitFS(GitBase): if exc.errno != errno.EEXIST: raise exc - ret['hsum'] = salt.utils.get_hash(path, self.opts['hash_type']) - with salt.utils.fopen(hashdest, 'w+') as fp_: + ret['hsum'] = salt.utils.hashutils.get_hash(path, self.opts['hash_type']) + with salt.utils.files.fopen(hashdest, 'w+') as fp_: fp_.write(ret['hsum']) return ret diff --git a/tests/unit/modules/test_localemod.py b/tests/unit/modules/test_localemod.py index 2d6665a385..5332367031 100644 --- a/tests/unit/modules/test_localemod.py +++ b/tests/unit/modules/test_localemod.py @@ -64,7 +64,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): {'cmd.run': MagicMock(return_value='A\nB')}): assert localemod.list_avail() == ['A', 'B'] - @patch('salt.utils.which', MagicMock(return_value="/usr/bin/localctl")) + @patch('salt.utils.path.which', MagicMock(return_value="/usr/bin/localctl")) @patch('salt.modules.localemod.__salt__', {'cmd.run': MagicMock(return_value=locale_ctl_out)}) def test_localectl_status_parser(self): ''' @@ -119,7 +119,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): assert msg == ('Odd locale parameter "Fatal error right in front of screen" detected in dbus locale output.' ' This should not happen. You should probably investigate what caused this.') - @patch('salt.utils.which', MagicMock(return_value=None)) + @patch('salt.utils.path.which', MagicMock(return_value=None)) @patch('salt.modules.localemod.log', MagicMock()) def test_localectl_status_parser_no_systemd(self): ''' @@ -131,21 +131,21 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): assert 'Unable to find "localectl"' in six.text_type(err) assert not localemod.log.debug.called - @patch('salt.utils.which', MagicMock(return_value="/usr/bin/localctl")) + @patch('salt.utils.path.which', MagicMock(return_value="/usr/bin/localctl")) @patch('salt.modules.localemod.__salt__', {'cmd.run': MagicMock(return_value=locale_ctl_out_empty)}) def test_localectl_status_parser_empty(self): with pytest.raises(CommandExecutionError) as err: localemod._localectl_status() assert 'Unable to parse result of "localectl"' in six.text_type(err) - @patch('salt.utils.which', MagicMock(return_value="/usr/bin/localctl")) + @patch('salt.utils.path.which', MagicMock(return_value="/usr/bin/localctl")) @patch('salt.modules.localemod.__salt__', {'cmd.run': MagicMock(return_value=locale_ctl_out_broken)}) def test_localectl_status_parser_broken(self): with pytest.raises(CommandExecutionError) as err: localemod._localectl_status() assert 'Unable to parse result of "localectl"' in six.text_type(err) - @patch('salt.utils.which', MagicMock(return_value="/usr/bin/localctl")) + @patch('salt.utils.path.which', MagicMock(return_value="/usr/bin/localctl")) @patch('salt.modules.localemod.__salt__', {'cmd.run': MagicMock(return_value=locale_ctl_out_structure)}) def test_localectl_status_parser_structure(self): out = localemod._localectl_status() @@ -156,7 +156,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): assert isinstance(out[key][in_key], six.text_type) assert isinstance(out['reason'], six.text_type) - @patch('salt.utils.which', MagicMock(return_value="/usr/bin/localctl")) + @patch('salt.utils.path.which', MagicMock(return_value="/usr/bin/localctl")) @patch('salt.modules.localemod.__grains__', {'os_family': 'Ubuntu', 'osmajorrelease': 42}) @patch('salt.modules.localemod.dbus', None) @patch('salt.modules.localemod._parse_dbus_locale', MagicMock(return_value={'LANG': 'en_US.utf8'})) @@ -169,7 +169,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): ''' assert localemod.get_locale() == 'de_DE.utf8' - @patch('salt.utils.which', MagicMock(return_value="/usr/bin/localctl")) + @patch('salt.utils.path.which', MagicMock(return_value="/usr/bin/localctl")) @patch('salt.modules.localemod.__grains__', {'os_family': 'Ubuntu', 'osmajorrelease': 42}) @patch('salt.modules.localemod.dbus', True) @patch('salt.modules.localemod._parse_dbus_locale', MagicMock(return_value={'LANG': 'en_US.utf8'})) @@ -182,7 +182,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): ''' assert localemod.get_locale() == 'en_US.utf8' - @patch('salt.utils.which', MagicMock(return_value="/usr/bin/localctl")) + @patch('salt.utils.path.which', MagicMock(return_value="/usr/bin/localctl")) @patch('salt.modules.localemod.__grains__', {'os_family': 'Suse', 'osmajorrelease': 12}) @patch('salt.modules.localemod.dbus', True) @patch('salt.modules.localemod._parse_dbus_locale', MagicMock(return_value={'LANG': 'en_US.utf8'})) @@ -197,7 +197,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): localemod.get_locale() assert localemod.__salt__['cmd.run'].call_args[0][0] == 'grep "^RC_LANG" /etc/sysconfig/language' - @patch('salt.utils.which', MagicMock(return_value=None)) + @patch('salt.utils.path.which', MagicMock(return_value=None)) @patch('salt.modules.localemod.__grains__', {'os_family': 'RedHat', 'osmajorrelease': 12}) @patch('salt.modules.localemod.dbus', None) @patch('salt.modules.localemod.__salt__', {'cmd.run': MagicMock()}) @@ -210,7 +210,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): localemod.get_locale() assert localemod.__salt__['cmd.run'].call_args[0][0] == 'grep "^LANG=" /etc/sysconfig/i18n' - @patch('salt.utils.which', MagicMock(return_value=None)) + @patch('salt.utils.path.which', MagicMock(return_value=None)) @patch('salt.modules.localemod.__grains__', {'os_family': 'Debian', 'osmajorrelease': 12}) @patch('salt.modules.localemod.dbus', None) @patch('salt.modules.localemod.__salt__', {'cmd.run': MagicMock()}) @@ -223,7 +223,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): localemod.get_locale() assert localemod.__salt__['cmd.run'].call_args[0][0] == 'grep "^LANG=" /etc/default/locale' - @patch('salt.utils.which', MagicMock(return_value=None)) + @patch('salt.utils.path.which', MagicMock(return_value=None)) @patch('salt.modules.localemod.__grains__', {'os_family': 'Gentoo', 'osmajorrelease': 12}) @patch('salt.modules.localemod.dbus', None) @patch('salt.modules.localemod.__salt__', {'cmd.run': MagicMock()}) @@ -236,7 +236,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): localemod.get_locale() assert localemod.__salt__['cmd.run'].call_args[0][0] == 'eselect --brief locale show' - @patch('salt.utils.which', MagicMock(return_value=None)) + @patch('salt.utils.path.which', MagicMock(return_value=None)) @patch('salt.modules.localemod.__grains__', {'os_family': 'Solaris', 'osmajorrelease': 12}) @patch('salt.modules.localemod.dbus', None) @patch('salt.modules.localemod.__salt__', {'cmd.run': MagicMock()}) @@ -249,7 +249,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): localemod.get_locale() assert localemod.__salt__['cmd.run'].call_args[0][0] == 'grep "^LANG=" /etc/default/init' - @patch('salt.utils.which', MagicMock(return_value=None)) + @patch('salt.utils.path.which', MagicMock(return_value=None)) @patch('salt.modules.localemod.__grains__', {'os_family': 'BSD', 'osmajorrelease': 8, 'oscodename': 'DrunkDragon'}) @patch('salt.modules.localemod.dbus', None) @patch('salt.modules.localemod.__salt__', {'cmd.run': MagicMock()}) @@ -263,7 +263,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): localemod.get_locale() assert '"DrunkDragon" is unsupported' in six.text_type(err) - @patch('salt.utils.which', MagicMock(return_value="/usr/bin/localctl")) + @patch('salt.utils.path.which', MagicMock(return_value="/usr/bin/localctl")) @patch('salt.modules.localemod.__grains__', {'os_family': 'Ubuntu', 'osmajorrelease': 42}) @patch('salt.modules.localemod.dbus', None) @patch('salt.utils.systemd.booted', MagicMock(return_value=True)) @@ -277,7 +277,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): localemod.set_locale(loc) assert localemod._localectl_set.call_args[0][0] == 'de_DE.utf8' - @patch('salt.utils.which', MagicMock(return_value="/usr/bin/localctl")) + @patch('salt.utils.path.which', MagicMock(return_value="/usr/bin/localctl")) @patch('salt.modules.localemod.__grains__', {'os_family': 'Ubuntu', 'osmajorrelease': 42}) @patch('salt.modules.localemod.dbus', True) @patch('salt.utils.systemd.booted', MagicMock(return_value=True)) @@ -291,7 +291,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): localemod.set_locale(loc) assert localemod._localectl_set.call_args[0][0] == 'de_DE.utf8' - @patch('salt.utils.which', MagicMock(return_value="/usr/bin/localctl")) + @patch('salt.utils.path.which', MagicMock(return_value="/usr/bin/localctl")) @patch('salt.modules.localemod.__grains__', {'os_family': 'Suse', 'osmajorrelease': 12}) @patch('salt.modules.localemod.dbus', True) @patch('salt.modules.localemod.__salt__', MagicMock()) @@ -310,7 +310,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): assert localemod.__salt__['file.replace'].call_args[0][1] == '^RC_LANG=.*' assert localemod.__salt__['file.replace'].call_args[0][2] == 'RC_LANG="{}"'.format(loc) - @patch('salt.utils.which', MagicMock(return_value=None)) + @patch('salt.utils.path.which', MagicMock(return_value=None)) @patch('salt.modules.localemod.__grains__', {'os_family': 'RedHat', 'osmajorrelease': 42}) @patch('salt.modules.localemod.dbus', None) @patch('salt.modules.localemod.__salt__', MagicMock()) @@ -329,7 +329,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): assert localemod.__salt__['file.replace'].call_args[0][1] == '^LANG=.*' assert localemod.__salt__['file.replace'].call_args[0][2] == 'LANG="{}"'.format(loc) - @patch('salt.utils.which', MagicMock(return_value=None)) + @patch('salt.utils.path.which', MagicMock(return_value=None)) @patch('salt.utils.path.which', MagicMock(return_value='/usr/sbin/update-locale')) @patch('salt.modules.localemod.__grains__', {'os_family': 'Debian', 'osmajorrelease': 42}) @patch('salt.modules.localemod.dbus', None) @@ -349,7 +349,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): assert localemod.__salt__['file.replace'].call_args[0][1] == '^LANG=.*' assert localemod.__salt__['file.replace'].call_args[0][2] == 'LANG="{}"'.format(loc) - @patch('salt.utils.which', MagicMock(return_value=None)) + @patch('salt.utils.path.which', MagicMock(return_value=None)) @patch('salt.utils.path.which', MagicMock(return_value=None)) @patch('salt.modules.localemod.__grains__', {'os_family': 'Debian', 'osmajorrelease': 42}) @patch('salt.modules.localemod.dbus', None) @@ -367,7 +367,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): assert not localemod._localectl_set.called assert 'Cannot set locale: "update-locale" was not found.' in six.text_type(err) - @patch('salt.utils.which', MagicMock(return_value=None)) + @patch('salt.utils.path.which', MagicMock(return_value=None)) @patch('salt.modules.localemod.__grains__', {'os_family': 'Gentoo', 'osmajorrelease': 42}) @patch('salt.modules.localemod.dbus', None) @patch('salt.modules.localemod.__salt__', MagicMock()) @@ -383,7 +383,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): assert not localemod._localectl_set.called assert localemod.__salt__['cmd.retcode'].call_args[0][0] == 'eselect --brief locale set de_DE.utf8' - @patch('salt.utils.which', MagicMock(return_value=None)) + @patch('salt.utils.path.which', MagicMock(return_value=None)) @patch('salt.modules.localemod.__grains__', {'os_family': 'Solaris', 'osmajorrelease': 42}) @patch('salt.modules.localemod.dbus', None) @patch('salt.modules.localemod.__salt__', {'locale.list_avail': MagicMock(return_value=['de_DE.utf8']), @@ -404,7 +404,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): assert localemod.__salt__['file.replace'].call_args[0][1] == '^LANG=.*' assert localemod.__salt__['file.replace'].call_args[0][2] == 'LANG="{}"'.format(loc) - @patch('salt.utils.which', MagicMock(return_value=None)) + @patch('salt.utils.path.which', MagicMock(return_value=None)) @patch('salt.modules.localemod.__grains__', {'os_family': 'Solaris', 'osmajorrelease': 42}) @patch('salt.modules.localemod.dbus', None) @patch('salt.modules.localemod.__salt__', {'locale.list_avail': MagicMock(return_value=['en_GB.utf8']), @@ -422,7 +422,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): assert not localemod._localectl_set.called assert not localemod.__salt__['file.replace'].called - @patch('salt.utils.which', MagicMock(return_value=None)) + @patch('salt.utils.path.which', MagicMock(return_value=None)) @patch('salt.modules.localemod.__grains__', {'os_family': 'BSD', 'osmajorrelease': 42}) @patch('salt.modules.localemod.dbus', None) @patch('salt.modules.localemod.__salt__', {'locale.list_avail': MagicMock(return_value=['en_GB.utf8']), From b96f4cf8ada3a6c2410c54c69489792b4252a9ea Mon Sep 17 00:00:00 2001 From: rallytime Date: Thu, 8 Feb 2018 14:24:48 -0500 Subject: [PATCH 066/223] Remove duplicate import in cmdmod.py --- salt/modules/cmdmod.py | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/modules/cmdmod.py b/salt/modules/cmdmod.py index 15b02a3662..a509208593 100644 --- a/salt/modules/cmdmod.py +++ b/salt/modules/cmdmod.py @@ -42,7 +42,6 @@ from salt.exceptions import CommandExecutionError, TimedProcTimeoutError, \ SaltInvocationError from salt.log import LOG_LEVELS from salt.ext.six.moves import range, zip -from salt.ext.six.moves import shlex_quote as _cmd_quote # Only available on POSIX systems, nonfatal on windows try: From ebb1f81a9b7585279dd6b70b1ab0a1af7042578a Mon Sep 17 00:00:00 2001 From: Ronald van Zantvoort Date: Thu, 8 Feb 2018 20:33:31 +0100 Subject: [PATCH 067/223] cmd run: when running in bg, force ignore_retcode=True --- salt/modules/cmdmod.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/salt/modules/cmdmod.py b/salt/modules/cmdmod.py index ba9b421531..fa169abf5d 100644 --- a/salt/modules/cmdmod.py +++ b/salt/modules/cmdmod.py @@ -317,6 +317,9 @@ def _run(cmd, # yaml-ified into non-string types cwd = str(cwd) + if bg: + ignore_retcode = True + if not salt.utils.is_windows(): if not os.path.isfile(shell) or not os.access(shell, os.X_OK): msg = 'The shell {0} is not available'.format(shell) @@ -3118,7 +3121,6 @@ def run_bg(cmd, output_loglevel='debug', log_callback=None, reset_system_locale=True, - ignore_retcode=False, saltenv='base', password=None, **kwargs): @@ -3278,7 +3280,6 @@ def run_bg(cmd, log_callback=log_callback, timeout=timeout, reset_system_locale=reset_system_locale, - ignore_retcode=ignore_retcode, saltenv=saltenv, password=password, **kwargs From 259e60e5d4998e4004a16ccc39db28763063fd18 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Thu, 8 Feb 2018 11:34:10 -0800 Subject: [PATCH 068/223] Fixing vault when used with pillar over salt-ssh --- salt/utils/vault.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/salt/utils/vault.py b/salt/utils/vault.py index 0a96fbc1a1..98f71a107e 100644 --- a/salt/utils/vault.py +++ b/salt/utils/vault.py @@ -98,7 +98,7 @@ def _get_vault_connection(): Get the connection details for calling Vault, from local configuration if it exists, or from the master otherwise ''' - if 'vault' in __opts__ and __opts__.get('__role', 'minion') == 'master': + def _use_local_config(): log.debug('Using Vault connection details from local config') try: return { @@ -108,6 +108,11 @@ def _get_vault_connection(): except KeyError as err: errmsg = 'Minion has "vault" config section, but could not find key "{0}" within'.format(err.message) raise salt.exceptions.CommandExecutionError(errmsg) + + if 'vault' in __opts__ and __opts__.get('__role', 'minion') == 'master': + return _use_local_config() + elif '_ssh_version' in __opts__: + return _use_local_config() else: log.debug('Contacting master for Vault connection details') return _get_token_and_url_from_master() From a16ea53430385539032a7b1fa7a46cbab3ca8951 Mon Sep 17 00:00:00 2001 From: rallytime Date: Thu, 8 Feb 2018 14:38:25 -0500 Subject: [PATCH 069/223] Update old utils path to new path for which() function --- salt/modules/localemod.py | 2 +- tests/unit/modules/test_localemod.py | 46 ++++++++++++++-------------- 2 files changed, 24 insertions(+), 24 deletions(-) diff --git a/salt/modules/localemod.py b/salt/modules/localemod.py index 13abd74caf..e97af32647 100644 --- a/salt/modules/localemod.py +++ b/salt/modules/localemod.py @@ -67,7 +67,7 @@ def _localectl_status(): Parse localectl status into a dict. :return: dict ''' - if salt.utils.which('localectl') is None: + if salt.utils.path.which('localectl') is None: raise CommandExecutionError('Unable to find "localectl"') ret = {} diff --git a/tests/unit/modules/test_localemod.py b/tests/unit/modules/test_localemod.py index 2d6665a385..5332367031 100644 --- a/tests/unit/modules/test_localemod.py +++ b/tests/unit/modules/test_localemod.py @@ -64,7 +64,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): {'cmd.run': MagicMock(return_value='A\nB')}): assert localemod.list_avail() == ['A', 'B'] - @patch('salt.utils.which', MagicMock(return_value="/usr/bin/localctl")) + @patch('salt.utils.path.which', MagicMock(return_value="/usr/bin/localctl")) @patch('salt.modules.localemod.__salt__', {'cmd.run': MagicMock(return_value=locale_ctl_out)}) def test_localectl_status_parser(self): ''' @@ -119,7 +119,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): assert msg == ('Odd locale parameter "Fatal error right in front of screen" detected in dbus locale output.' ' This should not happen. You should probably investigate what caused this.') - @patch('salt.utils.which', MagicMock(return_value=None)) + @patch('salt.utils.path.which', MagicMock(return_value=None)) @patch('salt.modules.localemod.log', MagicMock()) def test_localectl_status_parser_no_systemd(self): ''' @@ -131,21 +131,21 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): assert 'Unable to find "localectl"' in six.text_type(err) assert not localemod.log.debug.called - @patch('salt.utils.which', MagicMock(return_value="/usr/bin/localctl")) + @patch('salt.utils.path.which', MagicMock(return_value="/usr/bin/localctl")) @patch('salt.modules.localemod.__salt__', {'cmd.run': MagicMock(return_value=locale_ctl_out_empty)}) def test_localectl_status_parser_empty(self): with pytest.raises(CommandExecutionError) as err: localemod._localectl_status() assert 'Unable to parse result of "localectl"' in six.text_type(err) - @patch('salt.utils.which', MagicMock(return_value="/usr/bin/localctl")) + @patch('salt.utils.path.which', MagicMock(return_value="/usr/bin/localctl")) @patch('salt.modules.localemod.__salt__', {'cmd.run': MagicMock(return_value=locale_ctl_out_broken)}) def test_localectl_status_parser_broken(self): with pytest.raises(CommandExecutionError) as err: localemod._localectl_status() assert 'Unable to parse result of "localectl"' in six.text_type(err) - @patch('salt.utils.which', MagicMock(return_value="/usr/bin/localctl")) + @patch('salt.utils.path.which', MagicMock(return_value="/usr/bin/localctl")) @patch('salt.modules.localemod.__salt__', {'cmd.run': MagicMock(return_value=locale_ctl_out_structure)}) def test_localectl_status_parser_structure(self): out = localemod._localectl_status() @@ -156,7 +156,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): assert isinstance(out[key][in_key], six.text_type) assert isinstance(out['reason'], six.text_type) - @patch('salt.utils.which', MagicMock(return_value="/usr/bin/localctl")) + @patch('salt.utils.path.which', MagicMock(return_value="/usr/bin/localctl")) @patch('salt.modules.localemod.__grains__', {'os_family': 'Ubuntu', 'osmajorrelease': 42}) @patch('salt.modules.localemod.dbus', None) @patch('salt.modules.localemod._parse_dbus_locale', MagicMock(return_value={'LANG': 'en_US.utf8'})) @@ -169,7 +169,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): ''' assert localemod.get_locale() == 'de_DE.utf8' - @patch('salt.utils.which', MagicMock(return_value="/usr/bin/localctl")) + @patch('salt.utils.path.which', MagicMock(return_value="/usr/bin/localctl")) @patch('salt.modules.localemod.__grains__', {'os_family': 'Ubuntu', 'osmajorrelease': 42}) @patch('salt.modules.localemod.dbus', True) @patch('salt.modules.localemod._parse_dbus_locale', MagicMock(return_value={'LANG': 'en_US.utf8'})) @@ -182,7 +182,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): ''' assert localemod.get_locale() == 'en_US.utf8' - @patch('salt.utils.which', MagicMock(return_value="/usr/bin/localctl")) + @patch('salt.utils.path.which', MagicMock(return_value="/usr/bin/localctl")) @patch('salt.modules.localemod.__grains__', {'os_family': 'Suse', 'osmajorrelease': 12}) @patch('salt.modules.localemod.dbus', True) @patch('salt.modules.localemod._parse_dbus_locale', MagicMock(return_value={'LANG': 'en_US.utf8'})) @@ -197,7 +197,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): localemod.get_locale() assert localemod.__salt__['cmd.run'].call_args[0][0] == 'grep "^RC_LANG" /etc/sysconfig/language' - @patch('salt.utils.which', MagicMock(return_value=None)) + @patch('salt.utils.path.which', MagicMock(return_value=None)) @patch('salt.modules.localemod.__grains__', {'os_family': 'RedHat', 'osmajorrelease': 12}) @patch('salt.modules.localemod.dbus', None) @patch('salt.modules.localemod.__salt__', {'cmd.run': MagicMock()}) @@ -210,7 +210,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): localemod.get_locale() assert localemod.__salt__['cmd.run'].call_args[0][0] == 'grep "^LANG=" /etc/sysconfig/i18n' - @patch('salt.utils.which', MagicMock(return_value=None)) + @patch('salt.utils.path.which', MagicMock(return_value=None)) @patch('salt.modules.localemod.__grains__', {'os_family': 'Debian', 'osmajorrelease': 12}) @patch('salt.modules.localemod.dbus', None) @patch('salt.modules.localemod.__salt__', {'cmd.run': MagicMock()}) @@ -223,7 +223,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): localemod.get_locale() assert localemod.__salt__['cmd.run'].call_args[0][0] == 'grep "^LANG=" /etc/default/locale' - @patch('salt.utils.which', MagicMock(return_value=None)) + @patch('salt.utils.path.which', MagicMock(return_value=None)) @patch('salt.modules.localemod.__grains__', {'os_family': 'Gentoo', 'osmajorrelease': 12}) @patch('salt.modules.localemod.dbus', None) @patch('salt.modules.localemod.__salt__', {'cmd.run': MagicMock()}) @@ -236,7 +236,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): localemod.get_locale() assert localemod.__salt__['cmd.run'].call_args[0][0] == 'eselect --brief locale show' - @patch('salt.utils.which', MagicMock(return_value=None)) + @patch('salt.utils.path.which', MagicMock(return_value=None)) @patch('salt.modules.localemod.__grains__', {'os_family': 'Solaris', 'osmajorrelease': 12}) @patch('salt.modules.localemod.dbus', None) @patch('salt.modules.localemod.__salt__', {'cmd.run': MagicMock()}) @@ -249,7 +249,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): localemod.get_locale() assert localemod.__salt__['cmd.run'].call_args[0][0] == 'grep "^LANG=" /etc/default/init' - @patch('salt.utils.which', MagicMock(return_value=None)) + @patch('salt.utils.path.which', MagicMock(return_value=None)) @patch('salt.modules.localemod.__grains__', {'os_family': 'BSD', 'osmajorrelease': 8, 'oscodename': 'DrunkDragon'}) @patch('salt.modules.localemod.dbus', None) @patch('salt.modules.localemod.__salt__', {'cmd.run': MagicMock()}) @@ -263,7 +263,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): localemod.get_locale() assert '"DrunkDragon" is unsupported' in six.text_type(err) - @patch('salt.utils.which', MagicMock(return_value="/usr/bin/localctl")) + @patch('salt.utils.path.which', MagicMock(return_value="/usr/bin/localctl")) @patch('salt.modules.localemod.__grains__', {'os_family': 'Ubuntu', 'osmajorrelease': 42}) @patch('salt.modules.localemod.dbus', None) @patch('salt.utils.systemd.booted', MagicMock(return_value=True)) @@ -277,7 +277,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): localemod.set_locale(loc) assert localemod._localectl_set.call_args[0][0] == 'de_DE.utf8' - @patch('salt.utils.which', MagicMock(return_value="/usr/bin/localctl")) + @patch('salt.utils.path.which', MagicMock(return_value="/usr/bin/localctl")) @patch('salt.modules.localemod.__grains__', {'os_family': 'Ubuntu', 'osmajorrelease': 42}) @patch('salt.modules.localemod.dbus', True) @patch('salt.utils.systemd.booted', MagicMock(return_value=True)) @@ -291,7 +291,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): localemod.set_locale(loc) assert localemod._localectl_set.call_args[0][0] == 'de_DE.utf8' - @patch('salt.utils.which', MagicMock(return_value="/usr/bin/localctl")) + @patch('salt.utils.path.which', MagicMock(return_value="/usr/bin/localctl")) @patch('salt.modules.localemod.__grains__', {'os_family': 'Suse', 'osmajorrelease': 12}) @patch('salt.modules.localemod.dbus', True) @patch('salt.modules.localemod.__salt__', MagicMock()) @@ -310,7 +310,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): assert localemod.__salt__['file.replace'].call_args[0][1] == '^RC_LANG=.*' assert localemod.__salt__['file.replace'].call_args[0][2] == 'RC_LANG="{}"'.format(loc) - @patch('salt.utils.which', MagicMock(return_value=None)) + @patch('salt.utils.path.which', MagicMock(return_value=None)) @patch('salt.modules.localemod.__grains__', {'os_family': 'RedHat', 'osmajorrelease': 42}) @patch('salt.modules.localemod.dbus', None) @patch('salt.modules.localemod.__salt__', MagicMock()) @@ -329,7 +329,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): assert localemod.__salt__['file.replace'].call_args[0][1] == '^LANG=.*' assert localemod.__salt__['file.replace'].call_args[0][2] == 'LANG="{}"'.format(loc) - @patch('salt.utils.which', MagicMock(return_value=None)) + @patch('salt.utils.path.which', MagicMock(return_value=None)) @patch('salt.utils.path.which', MagicMock(return_value='/usr/sbin/update-locale')) @patch('salt.modules.localemod.__grains__', {'os_family': 'Debian', 'osmajorrelease': 42}) @patch('salt.modules.localemod.dbus', None) @@ -349,7 +349,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): assert localemod.__salt__['file.replace'].call_args[0][1] == '^LANG=.*' assert localemod.__salt__['file.replace'].call_args[0][2] == 'LANG="{}"'.format(loc) - @patch('salt.utils.which', MagicMock(return_value=None)) + @patch('salt.utils.path.which', MagicMock(return_value=None)) @patch('salt.utils.path.which', MagicMock(return_value=None)) @patch('salt.modules.localemod.__grains__', {'os_family': 'Debian', 'osmajorrelease': 42}) @patch('salt.modules.localemod.dbus', None) @@ -367,7 +367,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): assert not localemod._localectl_set.called assert 'Cannot set locale: "update-locale" was not found.' in six.text_type(err) - @patch('salt.utils.which', MagicMock(return_value=None)) + @patch('salt.utils.path.which', MagicMock(return_value=None)) @patch('salt.modules.localemod.__grains__', {'os_family': 'Gentoo', 'osmajorrelease': 42}) @patch('salt.modules.localemod.dbus', None) @patch('salt.modules.localemod.__salt__', MagicMock()) @@ -383,7 +383,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): assert not localemod._localectl_set.called assert localemod.__salt__['cmd.retcode'].call_args[0][0] == 'eselect --brief locale set de_DE.utf8' - @patch('salt.utils.which', MagicMock(return_value=None)) + @patch('salt.utils.path.which', MagicMock(return_value=None)) @patch('salt.modules.localemod.__grains__', {'os_family': 'Solaris', 'osmajorrelease': 42}) @patch('salt.modules.localemod.dbus', None) @patch('salt.modules.localemod.__salt__', {'locale.list_avail': MagicMock(return_value=['de_DE.utf8']), @@ -404,7 +404,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): assert localemod.__salt__['file.replace'].call_args[0][1] == '^LANG=.*' assert localemod.__salt__['file.replace'].call_args[0][2] == 'LANG="{}"'.format(loc) - @patch('salt.utils.which', MagicMock(return_value=None)) + @patch('salt.utils.path.which', MagicMock(return_value=None)) @patch('salt.modules.localemod.__grains__', {'os_family': 'Solaris', 'osmajorrelease': 42}) @patch('salt.modules.localemod.dbus', None) @patch('salt.modules.localemod.__salt__', {'locale.list_avail': MagicMock(return_value=['en_GB.utf8']), @@ -422,7 +422,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): assert not localemod._localectl_set.called assert not localemod.__salt__['file.replace'].called - @patch('salt.utils.which', MagicMock(return_value=None)) + @patch('salt.utils.path.which', MagicMock(return_value=None)) @patch('salt.modules.localemod.__grains__', {'os_family': 'BSD', 'osmajorrelease': 42}) @patch('salt.modules.localemod.dbus', None) @patch('salt.modules.localemod.__salt__', {'locale.list_avail': MagicMock(return_value=['en_GB.utf8']), From 7a143fe4546a34a9d1b63aafb605c44fb9e67592 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Thu, 8 Feb 2018 10:50:21 -0600 Subject: [PATCH 070/223] Fix spelling error in docstring --- salt/modules/state.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/modules/state.py b/salt/modules/state.py index 5232ded592..39fcdd6388 100644 --- a/salt/modules/state.py +++ b/salt/modules/state.py @@ -777,8 +777,8 @@ def highstate(test=None, queue=False, **kwargs): .. code-block:: bash - salt '*' state.higstate exclude=bar,baz - salt '*' state.higstate exclude=foo* + salt '*' state.highstate exclude=bar,baz + salt '*' state.highstate exclude=foo* salt '*' state.highstate exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]" saltenv From 9edc0baf0cbc3387d03ed28f6c62562dd580aa0b Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Thu, 8 Feb 2018 11:34:28 -0700 Subject: [PATCH 071/223] dont touch log_file if log_level is quiet --- salt/cli/salt.py | 8 +++++--- salt/utils/parsers.py | 2 +- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/salt/cli/salt.py b/salt/cli/salt.py index fb59b7e0d0..a380ccbfdb 100644 --- a/salt/cli/salt.py +++ b/salt/cli/salt.py @@ -10,6 +10,7 @@ import os import salt.utils.job import salt.utils.parsers import salt.utils.stringutils +import salt.log from salt.utils.args import yamlify_arg from salt.utils.verify import verify_log from salt.exceptions import ( @@ -38,9 +39,10 @@ class SaltCMD(salt.utils.parsers.SaltCMDOptionParser): import salt.client self.parse_args() - # Setup file logging! - self.setup_logfile_logger() - verify_log(self.config) + if self.config['log_level'] not in ('quiet', ): + # Setup file logging! + self.setup_logfile_logger() + verify_log(self.config) try: # We don't need to bail on config file permission errors diff --git a/salt/utils/parsers.py b/salt/utils/parsers.py index 09fe7d880e..b19cf068ce 100644 --- a/salt/utils/parsers.py +++ b/salt/utils/parsers.py @@ -717,7 +717,7 @@ class LogLevelMixIn(six.with_metaclass(MixInMeta, object)): # Remove it from config so it inherits from log_file self.config.pop(self._logfile_config_setting_name_) - if self.config['verify_env']: + if self.config['verify_env'] and self.config['log_level'] not in ('quiet', ): # Verify the logfile if it was explicitly set but do not try to # verify the default if logfile is not None and not logfile.startswith(('tcp://', 'udp://', 'file://')): From dc910550596e6bf90c09d565eea27e013bbcc8cf Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Thu, 8 Feb 2018 12:48:18 -0700 Subject: [PATCH 072/223] Revert "Separates key_dir from cache_dir, The key files (i.e. '.root_key', '.sudo_...') must not be shared with other masters." This reverts commit 20bf4eed1d34cf8edfa41a73a9769ffa7a977449. This change breaks publisher_acls. 1) The key_dir's permissions are controlled by `permissive_pki_access` which is not required by publisher_acls. By default, it is also changed back to 700 each time that the salt-master restarts, so it will have to be chmodded each time. 2) The default directory for these keys is changed, which will break a lot of users publisher_acls setups on an upgrade to Oxygen, and require them to go back in to chmod new directories. I was going through and switching out the key dir to default back to /var/cache/salt/master, and allow it to be changed, and also be able to specify that it is a sensitive dir, but once I ran across the `permissive_pki_access` stuff, I thought it was better to just revert this change and try again against Fluorine, since we do not have a lot of tests in this area around publisher_acl. --- salt/cli/daemons.py | 9 ++++----- salt/cli/salt.py | 2 +- salt/client/__init__.py | 4 ++-- salt/config/__init__.py | 8 ++------ salt/daemons/masterapi.py | 4 ++-- salt/key.py | 2 +- salt/runner.py | 2 +- salt/utils/verify.py | 23 +++++++++-------------- tests/unit/utils/test_verify.py | 19 ++++++------------- 9 files changed, 28 insertions(+), 45 deletions(-) diff --git a/salt/cli/daemons.py b/salt/cli/daemons.py index d53c7e1ace..d90286c1ea 100644 --- a/salt/cli/daemons.py +++ b/salt/cli/daemons.py @@ -152,7 +152,6 @@ class Master(salt.utils.parsers.MasterOptionParser, DaemonsMixin): # pylint: di os.path.join(self.config['cachedir'], 'jobs'), os.path.join(self.config['cachedir'], 'proc'), self.config['sock_dir'], - self.config['key_dir'], self.config['token_dir'], self.config['syndic_dir'], self.config['sqlite_queue_dir'], @@ -167,7 +166,7 @@ class Master(salt.utils.parsers.MasterOptionParser, DaemonsMixin): # pylint: di self.config['user'], permissive=self.config['permissive_pki_access'], root_dir=self.config['root_dir'], - sensitive_dirs=[self.config['pki_dir'], self.config['key_dir']], + pki_dir=self.config['pki_dir'], ) # Clear out syndics from cachedir for syndic_file in os.listdir(self.config['syndic_dir']): @@ -289,7 +288,7 @@ class Minion(salt.utils.parsers.MinionOptionParser, DaemonsMixin): # pylint: di self.config['user'], permissive=self.config['permissive_pki_access'], root_dir=self.config['root_dir'], - sensitive_dirs=[self.config['pki_dir']], + pki_dir=self.config['pki_dir'], ) except OSError as error: self.environment_failure(error) @@ -471,7 +470,7 @@ class ProxyMinion(salt.utils.parsers.ProxyMinionOptionParser, DaemonsMixin): # self.config['user'], permissive=self.config['permissive_pki_access'], root_dir=self.config['root_dir'], - sensitive_dirs=[self.config['pki_dir']], + pki_dir=self.config['pki_dir'], ) except OSError as error: self.environment_failure(error) @@ -580,7 +579,7 @@ class Syndic(salt.utils.parsers.SyndicOptionParser, DaemonsMixin): # pylint: di self.config['user'], permissive=self.config['permissive_pki_access'], root_dir=self.config['root_dir'], - sensitive_dirs=[self.config['pki_dir']], + pki_dir=self.config['pki_dir'], ) except OSError as error: self.environment_failure(error) diff --git a/salt/cli/salt.py b/salt/cli/salt.py index a380ccbfdb..3e17b8645a 100644 --- a/salt/cli/salt.py +++ b/salt/cli/salt.py @@ -84,7 +84,7 @@ class SaltCMD(salt.utils.parsers.SaltCMDOptionParser): if 'token' in self.config: import salt.utils.files try: - with salt.utils.files.fopen(os.path.join(self.config['key_dir'], '.root_key'), 'r') as fp_: + with salt.utils.files.fopen(os.path.join(self.config['cachedir'], '.root_key'), 'r') as fp_: kwargs['key'] = fp_.readline() except IOError: kwargs['token'] = self.config['token'] diff --git a/salt/client/__init__.py b/salt/client/__init__.py index 45ee27a76a..e65a186777 100644 --- a/salt/client/__init__.py +++ b/salt/client/__init__.py @@ -194,11 +194,11 @@ class LocalClient(object): # The username may contain '\' if it is in Windows # 'DOMAIN\username' format. Fix this for the keyfile path. key_user = key_user.replace('\\', '_') - keyfile = os.path.join(self.opts['key_dir'], + keyfile = os.path.join(self.opts['cachedir'], '.{0}_key'.format(key_user)) try: # Make sure all key parent directories are accessible - salt.utils.verify.check_path_traversal(self.opts['key_dir'], + salt.utils.verify.check_path_traversal(self.opts['cachedir'], key_user, self.skip_perm_errors) with salt.utils.files.fopen(keyfile, 'r') as key: diff --git a/salt/config/__init__.py b/salt/config/__init__.py index f42cf9f5e4..6090be4cc7 100644 --- a/salt/config/__init__.py +++ b/salt/config/__init__.py @@ -197,9 +197,6 @@ VALID_OPTS = { # The directory used to store public key data 'pki_dir': six.string_types, - # The directory to store authentication keys of a master's local environment. - 'key_dir': six.string_types, - # A unique identifier for this daemon 'id': six.string_types, @@ -1495,7 +1492,6 @@ DEFAULT_MASTER_OPTS = { 'archive_jobs': False, 'root_dir': salt.syspaths.ROOT_DIR, 'pki_dir': os.path.join(salt.syspaths.CONFIG_DIR, 'pki', 'master'), - 'key_dir': os.path.join(salt.syspaths.CONFIG_DIR, 'key'), 'key_cache': '', 'cachedir': os.path.join(salt.syspaths.CACHE_DIR, 'master'), 'file_roots': { @@ -2497,7 +2493,7 @@ def syndic_config(master_config_path, opts.update(syndic_opts) # Prepend root_dir to other paths prepend_root_dirs = [ - 'pki_dir', 'key_dir', 'cachedir', 'pidfile', 'sock_dir', 'extension_modules', + 'pki_dir', 'cachedir', 'pidfile', 'sock_dir', 'extension_modules', 'autosign_file', 'autoreject_file', 'token_dir', 'autosign_grains_dir' ] for config_key in ('log_file', 'key_logfile', 'syndic_log_file'): @@ -3934,7 +3930,7 @@ def apply_master_config(overrides=None, defaults=None): # Prepend root_dir to other paths prepend_root_dirs = [ - 'pki_dir', 'key_dir', 'cachedir', 'pidfile', 'sock_dir', 'extension_modules', + 'pki_dir', 'cachedir', 'pidfile', 'sock_dir', 'extension_modules', 'autosign_file', 'autoreject_file', 'token_dir', 'syndic_dir', 'sqlite_queue_dir', 'autosign_grains_dir' ] diff --git a/salt/daemons/masterapi.py b/salt/daemons/masterapi.py index b4c7526ff8..4fe00934cd 100644 --- a/salt/daemons/masterapi.py +++ b/salt/daemons/masterapi.py @@ -186,11 +186,11 @@ def mk_key(opts, user): # The username may contain '\' if it is in Windows # 'DOMAIN\username' format. Fix this for the keyfile path. keyfile = os.path.join( - opts['key_dir'], '.{0}_key'.format(user.replace('\\', '_')) + opts['cachedir'], '.{0}_key'.format(user.replace('\\', '_')) ) else: keyfile = os.path.join( - opts['key_dir'], '.{0}_key'.format(user) + opts['cachedir'], '.{0}_key'.format(user) ) if os.path.exists(keyfile): diff --git a/salt/key.py b/salt/key.py index f93ccf8c11..3b936d2e42 100644 --- a/salt/key.py +++ b/salt/key.py @@ -125,7 +125,7 @@ class KeyCLI(object): if self.opts['eauth']: if 'token' in self.opts: try: - with salt.utils.files.fopen(os.path.join(self.opts['key_dir'], '.root_key'), 'r') as fp_: + with salt.utils.files.fopen(os.path.join(self.opts['cachedir'], '.root_key'), 'r') as fp_: low['key'] = \ salt.utils.stringutils.to_unicode(fp_.readline()) except IOError: diff --git a/salt/runner.py b/salt/runner.py index e7bd4cd2c3..37072bf113 100644 --- a/salt/runner.py +++ b/salt/runner.py @@ -205,7 +205,7 @@ class Runner(RunnerClient): if self.opts.get('eauth'): if 'token' in self.opts: try: - with salt.utils.files.fopen(os.path.join(self.opts['key_dir'], '.root_key'), 'r') as fp_: + with salt.utils.files.fopen(os.path.join(self.opts['cachedir'], '.root_key'), 'r') as fp_: low['key'] = salt.utils.stringutils.to_unicode(fp_.readline()) except IOError: low['token'] = self.opts['token'] diff --git a/salt/utils/verify.py b/salt/utils/verify.py index 4bcf4689d4..1d00257115 100644 --- a/salt/utils/verify.py +++ b/salt/utils/verify.py @@ -204,8 +204,7 @@ def verify_env( permissive=False, pki_dir='', skip_extra=False, - root_dir=ROOT_DIR, - sensitive_dirs=None): + root_dir=ROOT_DIR): ''' Verify that the named directories are in place and that the environment can shake the salt @@ -224,8 +223,7 @@ def verify_env( return win_verify_env(root_dir, dirs, permissive=permissive, - skip_extra=skip_extra, - sensitive_dirs=sensitive_dirs) + skip_extra=skip_extra) import pwd # after confirming not running Windows try: pwnam = pwd.getpwnam(user) @@ -300,11 +298,10 @@ def verify_env( # to read in what it needs to integrate. # # If the permissions aren't correct, default to the more secure 700. - # If acls are enabled, the sensitive_dirs (i.e. pki_dir, key_dir) needs to - # remain readable, this is still secure because the private keys are still - # only readable by the user running the master - sensitive_dirs = sensitive_dirs or [] - if dir_ in sensitive_dirs: + # If acls are enabled, the pki_dir needs to remain readable, this + # is still secure because the private keys are still only readable + # by the user running the master + if dir_ == pki_dir: smode = stat.S_IMODE(mode.st_mode) if smode != 448 and smode != 488: if os.access(dir_, os.W_OK): @@ -555,8 +552,7 @@ def win_verify_env( dirs, permissive=False, pki_dir='', - skip_extra=False, - sensitive_dirs=None): + skip_extra=False): ''' Verify that the named directories are in place and that the environment can shake the salt @@ -647,9 +643,8 @@ def win_verify_env( sys.stderr.write(msg.format(dir_, err)) sys.exit(err.errno) - # The senitive_dirs (i.e. pki_dir, key_dir) gets its own permissions - sensitive_dirs = sensitive_dirs or [] - if dir_ in sensitive_dirs: + # The PKI dir gets its own permissions + if dir_ == pki_dir: try: # Make Administrators group the owner salt.utils.win_dacl.set_owner(path, 'S-1-5-32-544') diff --git a/tests/unit/utils/test_verify.py b/tests/unit/utils/test_verify.py index ab1149a68e..fb801da217 100644 --- a/tests/unit/utils/test_verify.py +++ b/tests/unit/utils/test_verify.py @@ -113,20 +113,13 @@ class TestVerify(TestCase): root_dir = tempfile.mkdtemp(dir=TMP) var_dir = os.path.join(root_dir, 'var', 'log', 'salt') key_dir = os.path.join(root_dir, 'key_dir') - verify_env([var_dir, key_dir], getpass.getuser(), root_dir=root_dir, sensitive_dirs=[key_dir]) + verify_env([var_dir], getpass.getuser(), root_dir=root_dir) self.assertTrue(os.path.exists(var_dir)) - self.assertTrue(os.path.exists(key_dir)) - - var_dir_stat = os.stat(var_dir) - self.assertEqual(var_dir_stat.st_uid, os.getuid()) - self.assertEqual(var_dir_stat.st_mode & stat.S_IRWXU, stat.S_IRWXU) - self.assertEqual(var_dir_stat.st_mode & stat.S_IRWXG, 40) - self.assertEqual(var_dir_stat.st_mode & stat.S_IRWXO, 5) - - key_dir_stat = os.stat(key_dir) - self.assertEqual(key_dir_stat.st_mode & stat.S_IRWXU, stat.S_IRWXU) - self.assertEqual(key_dir_stat.st_mode & stat.S_IRWXG, 0) - self.assertEqual(key_dir_stat.st_mode & stat.S_IRWXO, 0) + dir_stat = os.stat(var_dir) + self.assertEqual(dir_stat.st_uid, os.getuid()) + self.assertEqual(dir_stat.st_mode & stat.S_IRWXU, stat.S_IRWXU) + self.assertEqual(dir_stat.st_mode & stat.S_IRWXG, 40) + self.assertEqual(dir_stat.st_mode & stat.S_IRWXO, 5) @requires_network(only_local_network=True) def test_verify_socket(self): From c2f681a9c2a649db94825f5096d19ca1f2b18fa3 Mon Sep 17 00:00:00 2001 From: Simon Dodsley Date: Thu, 8 Feb 2018 14:40:57 -0500 Subject: [PATCH 073/223] Fix documentation in snap_eradicate function (purefa module) --- salt/modules/purefa.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/modules/purefa.py b/salt/modules/purefa.py index b85ed7d5ec..2988008a25 100644 --- a/salt/modules/purefa.py +++ b/salt/modules/purefa.py @@ -287,7 +287,7 @@ def snap_eradicate(name, suffix=None): Eradicate a deleted volume snapshot on a Pure Storage FlashArray. - Will retunr False is snapshot is not in a deleted state. + Will return False if snapshot is not in a deleted state. .. versionadded:: Oxygen @@ -300,7 +300,7 @@ def snap_eradicate(name, suffix=None): .. code-block:: bash - salt '*' purefa.snap_delete foo suffix=snap eradicate=True + salt '*' purefa.snap_eradicate foo suffix=snap ''' array = _get_system() From 26b532300ed6708842551468605128bd17314d30 Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Thu, 8 Feb 2018 13:05:49 -0700 Subject: [PATCH 074/223] Revert "Handle deprecation path for pki_dir in verify_env util" This reverts commit d50b7e0062f88a04789ddfef3b7fee62bd3f8dc1. --- salt/utils/verify.py | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/salt/utils/verify.py b/salt/utils/verify.py index 1d00257115..0eeb95b402 100644 --- a/salt/utils/verify.py +++ b/salt/utils/verify.py @@ -31,7 +31,6 @@ import salt.utils.files import salt.utils.path import salt.utils.platform import salt.utils.user -import salt.utils.versions log = logging.getLogger(__name__) @@ -209,16 +208,6 @@ def verify_env( Verify that the named directories are in place and that the environment can shake the salt ''' - if pki_dir: - salt.utils.versions.warn_until( - 'Neon', - 'Use of \'pki_dir\' was detected: \'pki_dir\' has been deprecated ' - 'in favor of \'sensitive_dirs\'. Support for \'pki_dir\' will be ' - 'removed in Salt Neon.' - ) - sensitive_dirs = sensitive_dirs or [] - sensitive_dirs.append(list(pki_dir)) - if salt.utils.platform.is_windows(): return win_verify_env(root_dir, dirs, @@ -557,16 +546,6 @@ def win_verify_env( Verify that the named directories are in place and that the environment can shake the salt ''' - if pki_dir: - salt.utils.versions.warn_until( - 'Neon', - 'Use of \'pki_dir\' was detected: \'pki_dir\' has been deprecated ' - 'in favor of \'sensitive_dirs\'. Support for \'pki_dir\' will be ' - 'removed in Salt Neon.' - ) - sensitive_dirs = sensitive_dirs or [] - sensitive_dirs.append(list(pki_dir)) - import salt.utils.win_functions import salt.utils.win_dacl import salt.utils.path From c54fcf7a2de6a0b0364475c38cbd58737adb4717 Mon Sep 17 00:00:00 2001 From: Ronald van Zantvoort Date: Thu, 8 Feb 2018 21:43:02 +0100 Subject: [PATCH 075/223] cmd: move separate DRY logging blocks into _run, prevent logging on bg=True, don't use_vt on bg --- salt/modules/cmdmod.py | 118 +++++++++-------------------------------- 1 file changed, 25 insertions(+), 93 deletions(-) diff --git a/salt/modules/cmdmod.py b/salt/modules/cmdmod.py index fa169abf5d..dc5ffb1e10 100644 --- a/salt/modules/cmdmod.py +++ b/salt/modules/cmdmod.py @@ -290,6 +290,7 @@ def _run(cmd, 'Check to ensure that the shell <{0}> is valid for this user.' .format(shell)) + output_loglevel = _check_loglevel(output_loglevel) log_callback = _check_cb(log_callback) if runas is None and '__context__' in globals(): @@ -319,6 +320,7 @@ def _run(cmd, if bg: ignore_retcode = True + use_vt = False if not salt.utils.is_windows(): if not os.path.isfile(shell) or not os.access(shell, os.X_OK): @@ -375,7 +377,7 @@ def _run(cmd, else: return cmd - if _check_loglevel(output_loglevel) is not None: + if output_loglevel is not None: # Always log the shell commands at INFO unless quiet logging is # requested. The command output is what will be controlled by the # 'loglevel' parameter. @@ -551,7 +553,7 @@ def _run(cmd, msg = ( 'Unable to run command \'{0}\' with the context \'{1}\', ' 'reason: '.format( - cmd if _check_loglevel(output_loglevel) is not None + cmd if output_loglevel is not None else 'REDACTED', kwargs ) @@ -598,7 +600,7 @@ def _run(cmd, to = '' if timeout: to = ' (timeout: {0}s)'.format(timeout) - if _check_loglevel(output_loglevel) is not None: + if output_loglevel is not None: msg = 'Running {0} in VT{1}'.format(cmd, to) log.debug(log_callback(msg)) stdout, stderr = '', '' @@ -672,6 +674,26 @@ def _run(cmd, except NameError: # Ignore the context error during grain generation pass + + # Log the output + if output_loglevel is not None: + if not ignore_retcode and ret['retcode'] != 0: + if output_loglevel < LOG_LEVELS['error']: + output_loglevel = LOG_LEVELS['error'] + msg = ( + 'Command \'{0}\' failed with return code: {1}'.format( + cmd, + ret['retcode'] + ) + ) + log.error(log_callback(msg)) + if ret['stdout']: + log.log(output_loglevel, 'stdout: {0}'.format(log_callback(ret['stdout']))) + if ret['stderr']: + log.log(output_loglevel, 'stderr: {0}'.format(log_callback(ret['stderr']))) + if ret['retcode']: + log.log(output_loglevel, 'retcode: {0}'.format(ret['retcode'])) + return ret @@ -953,21 +975,6 @@ def run(cmd, encoded_cmd=encoded_cmd, **kwargs) - log_callback = _check_cb(log_callback) - - lvl = _check_loglevel(output_loglevel) - if lvl is not None: - if not ignore_retcode and ret['retcode'] != 0: - if lvl < LOG_LEVELS['error']: - lvl = LOG_LEVELS['error'] - msg = ( - 'Command \'{0}\' failed with return code: {1}'.format( - cmd, - ret['retcode'] - ) - ) - log.error(log_callback(msg)) - log.log(lvl, 'output: {0}'.format(log_callback(ret['stdout']))) return ret['stdout'] @@ -1319,26 +1326,6 @@ def run_stdout(cmd, password=password, **kwargs) - log_callback = _check_cb(log_callback) - - lvl = _check_loglevel(output_loglevel) - if lvl is not None: - if not ignore_retcode and ret['retcode'] != 0: - if lvl < LOG_LEVELS['error']: - lvl = LOG_LEVELS['error'] - msg = ( - 'Command \'{0}\' failed with return code: {1}'.format( - cmd, - ret['retcode'] - ) - ) - log.error(log_callback(msg)) - if ret['stdout']: - log.log(lvl, 'stdout: {0}'.format(log_callback(ret['stdout']))) - if ret['stderr']: - log.log(lvl, 'stderr: {0}'.format(log_callback(ret['stderr']))) - if ret['retcode']: - log.log(lvl, 'retcode: {0}'.format(ret['retcode'])) return ret['stdout'] @@ -1501,26 +1488,6 @@ def run_stderr(cmd, password=password, **kwargs) - log_callback = _check_cb(log_callback) - - lvl = _check_loglevel(output_loglevel) - if lvl is not None: - if not ignore_retcode and ret['retcode'] != 0: - if lvl < LOG_LEVELS['error']: - lvl = LOG_LEVELS['error'] - msg = ( - 'Command \'{0}\' failed with return code: {1}'.format( - cmd, - ret['retcode'] - ) - ) - log.error(log_callback(msg)) - if ret['stdout']: - log.log(lvl, 'stdout: {0}'.format(log_callback(ret['stdout']))) - if ret['stderr']: - log.log(lvl, 'stderr: {0}'.format(log_callback(ret['stderr']))) - if ret['retcode']: - log.log(lvl, 'retcode: {0}'.format(ret['retcode'])) return ret['stderr'] @@ -1703,26 +1670,6 @@ def run_all(cmd, password=password, **kwargs) - log_callback = _check_cb(log_callback) - - lvl = _check_loglevel(output_loglevel) - if lvl is not None: - if not ignore_retcode and ret['retcode'] != 0: - if lvl < LOG_LEVELS['error']: - lvl = LOG_LEVELS['error'] - msg = ( - 'Command \'{0}\' failed with return code: {1}'.format( - cmd, - ret['retcode'] - ) - ) - log.error(log_callback(msg)) - if ret['stdout']: - log.log(lvl, 'stdout: {0}'.format(log_callback(ret['stdout']))) - if ret['stderr']: - log.log(lvl, 'stderr: {0}'.format(log_callback(ret['stderr']))) - if ret['retcode']: - log.log(lvl, 'retcode: {0}'.format(ret['retcode'])) return ret @@ -1886,21 +1833,6 @@ def retcode(cmd, password=password, **kwargs) - log_callback = _check_cb(log_callback) - - lvl = _check_loglevel(output_loglevel) - if lvl is not None: - if not ignore_retcode and ret['retcode'] != 0: - if lvl < LOG_LEVELS['error']: - lvl = LOG_LEVELS['error'] - msg = ( - 'Command \'{0}\' failed with return code: {1}'.format( - cmd, - ret['retcode'] - ) - ) - log.error(log_callback(msg)) - log.log(lvl, 'output: {0}'.format(log_callback(ret['stdout']))) return ret['retcode'] From 4b6351cda6776662b07f6daa025fef615b73e0c2 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Wed, 7 Feb 2018 12:46:26 -0600 Subject: [PATCH 076/223] Check the effective saltenv for cached archive This fixes a regression caused by using a saltenv other than `base`. --- salt/states/archive.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/states/archive.py b/salt/states/archive.py index 025ccf43bf..ba8c94031c 100644 --- a/salt/states/archive.py +++ b/salt/states/archive.py @@ -975,7 +975,7 @@ def extracted(name, if result['result']: # Get the path of the file in the minion cache - cached = __salt__['cp.is_cached'](source_match) + cached = __salt__['cp.is_cached'](source_match, saltenv=__env__) else: log.debug( 'failed to download %s', From cdda66d75963efc4887efdbc175eb37736353dc8 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Wed, 7 Feb 2018 13:12:30 -0600 Subject: [PATCH 077/223] Remove duplicated section in docstring and fix example --- salt/states/file.py | 32 +------------------------------- 1 file changed, 1 insertion(+), 31 deletions(-) diff --git a/salt/states/file.py b/salt/states/file.py index ac94d77502..da982a13d7 100644 --- a/salt/states/file.py +++ b/salt/states/file.py @@ -6533,37 +6533,7 @@ def cached(name, .. code-block:: python - cached = __salt__['cp.is_cached'](source_match) - - This function will return the cached path of the file, or an empty string - if the file is not present in the minion cache. - - This state will in most cases not be useful in SLS files, but it is useful - when writing a state or remote-execution module that needs to make sure - that a file at a given URL has been downloaded to the cachedir. One example - of this is in the :py:func:`archive.extracted ` - state: - - .. code-block:: python - - result = __states__['file.cached'](source_match, - source_hash=source_hash, - source_hash_name=source_hash_name, - skip_verify=skip_verify, - saltenv=__env__) - - This will return a dictionary containing the state's return data, including - a ``result`` key which will state whether or not the state was successful. - Note that this will not catch exceptions, so it is best used within a - try/except. - - Once this state has been run from within another state or remote-execution - module, the actual location of the cached file can be obtained using - :py:func:`cp.is_cached `: - - .. code-block:: python - - cached = __salt__['cp.is_cached'](source_match) + cached = __salt__['cp.is_cached'](source_match, saltenv=__env__) This function will return the cached path of the file, or an empty string if the file is not present in the minion cache. From bb83e8b345184d79476e7dca15d9df8b5b080489 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Wed, 7 Feb 2018 14:32:59 -0600 Subject: [PATCH 078/223] Add regression test for issue 45893 --- .../files/file/prod/issue45893/custom.tar.gz | Bin 0 -> 152 bytes .../files/file/prod/issue45893/init.sls | 5 +++ tests/integration/states/test_archive.py | 33 ++++++++++++------ 3 files changed, 27 insertions(+), 11 deletions(-) create mode 100644 tests/integration/files/file/prod/issue45893/custom.tar.gz create mode 100644 tests/integration/files/file/prod/issue45893/init.sls diff --git a/tests/integration/files/file/prod/issue45893/custom.tar.gz b/tests/integration/files/file/prod/issue45893/custom.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..584852716c8ead74cc1a9a88fed42eb2c7e7d22e GIT binary patch literal 152 zcmb2|=3tn;W>Po<^V^Gixtt7n+8)Muy+5F~?5$3PoA`T&T^2Hee|FYa2RMo!U3mE5 z_eiEpMb7xW^Yo7&cB<;~)y!NKHEG{9^+4@+CpXRX4%vE*Yvt=O{Tc!({ij{R|om3;<=J BMb-cS literal 0 HcmV?d00001 diff --git a/tests/integration/files/file/prod/issue45893/init.sls b/tests/integration/files/file/prod/issue45893/init.sls new file mode 100644 index 0000000000..28e4ff0fe2 --- /dev/null +++ b/tests/integration/files/file/prod/issue45893/init.sls @@ -0,0 +1,5 @@ +test_non_base_env: + archive.extracted: + - name: {{ pillar['issue45893.name'] }} + - source: salt://issue45893/custom.tar.gz + - keep: False diff --git a/tests/integration/states/test_archive.py b/tests/integration/states/test_archive.py index 3722c948fb..c1b9ca8f5b 100644 --- a/tests/integration/states/test_archive.py +++ b/tests/integration/states/test_archive.py @@ -68,6 +68,16 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): log.debug('Checking for extracted file: %s', path) self.assertTrue(os.path.isfile(path)) + def run_function(self, *args, **kwargs): + ret = super(ArchiveTest, self).run_function(*args, **kwargs) + log.debug('ret = %s', ret) + return ret + + def run_state(self, *args, **kwargs): + ret = super(ArchiveTest, self).run_state(*args, **kwargs) + log.debug('ret = %s', ret) + return ret + def test_archive_extracted_skip_verify(self): ''' test archive.extracted with skip_verify @@ -75,7 +85,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): ret = self.run_state('archive.extracted', name=ARCHIVE_DIR, source=self.archive_tar_source, archive_format='tar', skip_verify=True) - log.debug('ret = %s', ret) if 'Timeout' in ret: self.skipTest('Timeout talking to local tornado server.') self.assertSaltTrueReturn(ret) @@ -91,7 +100,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): ret = self.run_state('archive.extracted', name=ARCHIVE_DIR, source=self.archive_tar_source, archive_format='tar', source_hash=ARCHIVE_TAR_HASH) - log.debug('ret = %s', ret) if 'Timeout' in ret: self.skipTest('Timeout talking to local tornado server.') @@ -111,7 +119,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): source=self.archive_tar_source, archive_format='tar', source_hash=ARCHIVE_TAR_HASH, user='root', group=r_group) - log.debug('ret = %s', ret) if 'Timeout' in ret: self.skipTest('Timeout talking to local tornado server.') @@ -128,7 +135,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): source_hash=ARCHIVE_TAR_HASH, options='--strip=1', enforce_toplevel=False) - log.debug('ret = %s', ret) if 'Timeout' in ret: self.skipTest('Timeout talking to local tornado server.') @@ -145,7 +151,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): source_hash=ARCHIVE_TAR_HASH, options='--strip-components=1', enforce_toplevel=False) - log.debug('ret = %s', ret) if 'Timeout' in ret: self.skipTest('Timeout talking to local tornado server.') @@ -160,7 +165,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): ret = self.run_state('archive.extracted', name=ARCHIVE_DIR, source=self.archive_tar_source, source_hash=ARCHIVE_TAR_HASH) - log.debug('ret = %s', ret) if 'Timeout' in ret: self.skipTest('Timeout talking to local tornado server.') self.assertSaltTrueReturn(ret) @@ -177,7 +181,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): source_hash=ARCHIVE_TAR_HASH, use_cmd_unzip=False, archive_format='tar') - log.debug('ret = %s', ret) if 'Timeout' in ret: self.skipTest('Timeout talking to local tornado server.') self.assertSaltTrueReturn(ret) @@ -190,7 +193,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): ''' ret = self.run_state('archive.extracted', name=ARCHIVE_DIR, source=ARCHIVE_LOCAL_TAR_SOURCE, archive_format='tar') - log.debug('ret = %s', ret) self.assertSaltTrueReturn(ret) @@ -203,7 +205,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): ret = self.run_state('archive.extracted', name=ARCHIVE_DIR, source=ARCHIVE_LOCAL_TAR_SOURCE, archive_format='tar', source_hash=ARCHIVE_TAR_BAD_HASH, skip_verify=True) - log.debug('ret = %s', ret) self.assertSaltTrueReturn(ret) @@ -216,7 +217,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): ret = self.run_state('archive.extracted', name=ARCHIVE_DIR, source=ARCHIVE_LOCAL_TAR_SOURCE, archive_format='tar', source_hash=ARCHIVE_TAR_HASH) - log.debug('ret = %s', ret) self.assertSaltTrueReturn(ret) @@ -229,6 +229,17 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): ret = self.run_state('archive.extracted', name=ARCHIVE_DIR, source=ARCHIVE_LOCAL_TAR_SOURCE, archive_format='tar', source_hash=ARCHIVE_TAR_BAD_HASH) - log.debug('ret = %s', ret) self.assertSaltFalseReturn(ret) + + def test_archive_extracted_with_non_base_saltenv(self): + ''' + test archive.extracted with a saltenv other than `base` + ''' + ret = self.run_function( + 'state.sls', + ['issue45893'], + pillar={'issue45893.name': ARCHIVE_DIR}, + saltenv='prod') + self.assertSaltTrueReturn(ret) + self._check_extracted(os.path.join(ARCHIVE_DIR, UNTAR_FILE)) From ad1150fad400ab6ef84f5fe611e61062e424cb30 Mon Sep 17 00:00:00 2001 From: "Marc Vieira-Cardinal (VA2MVC)" Date: Sun, 28 Jan 2018 18:46:33 -0500 Subject: [PATCH 079/223] list.copy() is not compatible with python 2.7 I doubt that a .copy() is required there since list() would already create one but since the previous committer added it I am improving by replacing the .copy() with a [:] which makes a copy in python 2.7 and 3+ --- salt/cloud/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/cloud/__init__.py b/salt/cloud/__init__.py index c70d9aff2e..c1e5a2932e 100644 --- a/salt/cloud/__init__.py +++ b/salt/cloud/__init__.py @@ -234,7 +234,7 @@ class CloudClient(object): if a.get('provider', '')] if providers: _providers = opts.get('providers', {}) - for provider in list(_providers).copy(): + for provider in list(_providers)[:]: if provider not in providers: _providers.pop(provider) return opts From 6a0b5f7af312b57f7e629e8f36084bf1c45b987d Mon Sep 17 00:00:00 2001 From: "Marc Vieira-Cardinal (VA2MVC)" Date: Mon, 5 Feb 2018 18:43:41 -0500 Subject: [PATCH 080/223] Removed the chained copy --- salt/cloud/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/cloud/__init__.py b/salt/cloud/__init__.py index c1e5a2932e..35b01aa241 100644 --- a/salt/cloud/__init__.py +++ b/salt/cloud/__init__.py @@ -234,7 +234,7 @@ class CloudClient(object): if a.get('provider', '')] if providers: _providers = opts.get('providers', {}) - for provider in list(_providers)[:]: + for provider in _providers.copy(): if provider not in providers: _providers.pop(provider) return opts From f5e56b9b29322de00367d4f6b37a5895de714622 Mon Sep 17 00:00:00 2001 From: twangboy Date: Thu, 8 Feb 2018 15:48:37 -0700 Subject: [PATCH 081/223] Fix Py2 unicode issue in win_pkg.py `salt.utils.templates` Fixes an issue where the template renderer was doing some unnecessary encoding Also ensures utf-8 encoding when writing the file `salt.modules.win_pkg` Fixes an issue the winrepo.p file was being decoded with the system decoding. This makes sure it's using utf-8 Removes the need to decode the names since they're already in utf-8 format --- salt/modules/win_pkg.py | 4 ++-- salt/utils/templates.py | 8 ++------ 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/salt/modules/win_pkg.py b/salt/modules/win_pkg.py index f37eea8ccd..3c312c05fc 100644 --- a/salt/modules/win_pkg.py +++ b/salt/modules/win_pkg.py @@ -1820,7 +1820,7 @@ def get_repo_data(saltenv='base'): serial = salt.payload.Serial(__opts__) with salt.utils.files.fopen(repo_details.winrepo_file, 'rb') as repofile: try: - repodata = salt.utils.data.decode(serial.loads(repofile.read()) or {}) + repodata = salt.utils.data.decode(serial.loads(repofile.read(), encoding='utf-8') or {}) __context__['winrepo.data'] = repodata return repodata except Exception as exc: @@ -1843,7 +1843,7 @@ def _get_name_map(saltenv='base'): return name_map for k in name_map: - u_name_map[k.decode('utf-8')] = name_map[k] + u_name_map[k] = name_map[k] return u_name_map diff --git a/salt/utils/templates.py b/salt/utils/templates.py index cf5f0d6439..09984c9594 100644 --- a/salt/utils/templates.py +++ b/salt/utils/templates.py @@ -167,11 +167,9 @@ def wrap_tmpl_func(render_str): tmplsrc.close() try: output = render_str(tmplstr, context, tmplpath) - if six.PY2: - output = output.encode(SLS_ENCODING) if salt.utils.platform.is_windows(): newline = False - if salt.utils.stringutils.to_unicode(output).endswith(('\n', os.linesep)): + if salt.utils.stringutils.to_unicode(output, encoding=SLS_ENCODING).endswith(('\n', os.linesep)): newline = True # Write out with Windows newlines output = os.linesep.join(output.splitlines()) @@ -188,9 +186,7 @@ def wrap_tmpl_func(render_str): if to_str: # then render as string return dict(result=True, data=output) with tempfile.NamedTemporaryFile('wb', delete=False, prefix=salt.utils.files.TEMPFILE_PREFIX) as outf: - if six.PY3: - output = output.encode(SLS_ENCODING) - outf.write(output) + outf.write(salt.utils.stringutils.to_bytes(output, encoding=SLS_ENCODING)) # Note: If nothing is replaced or added by the rendering # function, then the contents of the output file will # be exactly the same as the input. From 2aa4483784a1ffdd7ce811085909402bb7c14db0 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Thu, 8 Feb 2018 14:54:29 -0800 Subject: [PATCH 082/223] Adding an import for salt.ext.six.moves to import queue to use in the exception for when the multiprocessing queue is full. --- salt/log/handlers/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/log/handlers/__init__.py b/salt/log/handlers/__init__.py index 8380cb9052..bd67a66f56 100644 --- a/salt/log/handlers/__init__.py +++ b/salt/log/handlers/__init__.py @@ -17,6 +17,7 @@ import logging.handlers # Import salt libs from salt.log.mixins import NewStyleClassMixIn, ExcInfoOnLogLevelFormatMixIn +from salt.ext.six.moves import queue log = logging.getLogger(__name__) @@ -176,7 +177,7 @@ if sys.version_info < (3, 2): ''' try: self.queue.put_nowait(record) - except self.queue.Full: + except queue.Full: sys.stderr.write('[WARNING ] Message queue is full, ' 'unable to write "{0}" to log', record ) From dd2788419b112a327f62b3ce5c87fa37e1625dff Mon Sep 17 00:00:00 2001 From: David Murphy < dmurphy@saltstack.com> Date: Thu, 8 Feb 2018 17:43:30 -0700 Subject: [PATCH 083/223] Fix use of 'su' for AIX to use '-' --- salt/modules/cmdmod.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/cmdmod.py b/salt/modules/cmdmod.py index 32653d08c1..233dca8647 100644 --- a/salt/modules/cmdmod.py +++ b/salt/modules/cmdmod.py @@ -407,7 +407,7 @@ def _run(cmd, elif __grains__['os_family'] in ['Solaris']: env_cmd = ('su', '-', runas, '-c', sys.executable) elif __grains__['os_family'] in ['AIX']: - env_cmd = ('su', runas, '-c', sys.executable) + env_cmd = ('su', '-', runas, '-c', sys.executable) else: env_cmd = ('su', '-s', shell, '-', runas, '-c', sys.executable) env_encoded = subprocess.Popen( From 967b83940c076106aa26c6fd6a096b619a37cffe Mon Sep 17 00:00:00 2001 From: twangboy Date: Mon, 29 Jan 2018 12:57:10 -0700 Subject: [PATCH 084/223] Fix rehash function Use SendMessageTimeout instead of SendMessage which was causing the system to hang in some scenarios Use ctypes to access the dll directly instead of using pywin32 as this seems to actually work --- salt/modules/win_path.py | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/salt/modules/win_path.py b/salt/modules/win_path.py index 3b446f7cea..e2f292df4e 100644 --- a/salt/modules/win_path.py +++ b/salt/modules/win_path.py @@ -12,12 +12,12 @@ from __future__ import absolute_import import logging import re import os +import ctypes from salt.ext.six.moves import map # Third party libs try: - from win32con import HWND_BROADCAST, WM_SETTINGCHANGE - from win32api import SendMessage + from win32con import HWND_BROADCAST, WM_SETTINGCHANGE, SMTO_ABORTIFHUNG HAS_WIN32 = True except ImportError: HAS_WIN32 = False @@ -47,7 +47,15 @@ def _normalize_dir(string): def rehash(): ''' - Send a WM_SETTINGCHANGE Broadcast to Windows to refresh the Environment variables + Send a WM_SETTINGCHANGE Broadcast to Windows to refresh the Environment + variables for new processes. + + .. note:: + This will only affect new processes that aren't launched by services. To + apply changes to the path to services, the host must be restarted. The + ``salt-minion``, if running as a service, will not see changes to the + environment until the system is restarted. See + `MSDN Documentation ` CLI Example: @@ -55,7 +63,12 @@ def rehash(): salt '*' win_path.rehash ''' - return bool(SendMessage(HWND_BROADCAST, WM_SETTINGCHANGE, 0, 'Environment')) + broadcast_message = ctypes.create_unicode_buffer('Environment') + user32 = ctypes.windll('user32', use_last_error=True) + result = user32.SendMessageTimeoutW(HWND_BROADCAST, WM_SETTINGCHANGE, 0, + broadcast_message, SMTO_ABORTIFHUNG, + 5000, 0) + return result == 1 def get_path(): From 79299361c3abd729a293fae823c8cd70bc0801ef Mon Sep 17 00:00:00 2001 From: twangboy Date: Wed, 31 Jan 2018 14:29:37 -0700 Subject: [PATCH 085/223] Create refresh_environment salt util --- salt/modules/reg.py | 10 +++------- salt/modules/win_path.py | 10 ++-------- salt/utils/win_functions.py | 29 +++++++++++++++++++++++++++++ 3 files changed, 34 insertions(+), 15 deletions(-) diff --git a/salt/modules/reg.py b/salt/modules/reg.py index 740bd59fc6..a7df4effa5 100644 --- a/salt/modules/reg.py +++ b/salt/modules/reg.py @@ -38,7 +38,6 @@ from salt.ext.six.moves import range # pylint: disable=W0622,import-error # Import third party libs try: - import win32gui import win32api import win32con import pywintypes @@ -48,6 +47,7 @@ except ImportError: # Import salt libs import salt.utils +import salt.utils.win_functions from salt.exceptions import CommandExecutionError PY2 = sys.version_info[0] == 2 @@ -68,7 +68,7 @@ def __virtual__(): if not HAS_WINDOWS_MODULES: return (False, 'reg execution module failed to load: ' 'One of the following libraries did not load: ' - + 'win32gui, win32con, win32api') + 'win32con, win32api, pywintypes') return __virtualname__ @@ -193,11 +193,7 @@ def broadcast_change(): salt '*' reg.broadcast_change ''' - # https://msdn.microsoft.com/en-us/library/windows/desktop/ms644952(v=vs.85).aspx - _, res = win32gui.SendMessageTimeout( - win32con.HWND_BROADCAST, win32con.WM_SETTINGCHANGE, 0, 0, - win32con.SMTO_ABORTIFHUNG, 5000) - return not bool(res) + return salt.utils.win_functions.refresh_environment() def list_keys(hive, key=None, use_32bit_registry=False): diff --git a/salt/modules/win_path.py b/salt/modules/win_path.py index e2f292df4e..b70ab423fb 100644 --- a/salt/modules/win_path.py +++ b/salt/modules/win_path.py @@ -12,18 +12,17 @@ from __future__ import absolute_import import logging import re import os -import ctypes from salt.ext.six.moves import map # Third party libs try: - from win32con import HWND_BROADCAST, WM_SETTINGCHANGE, SMTO_ABORTIFHUNG HAS_WIN32 = True except ImportError: HAS_WIN32 = False # Import salt libs import salt.utils +import salt.utils.win_functions # Settings log = logging.getLogger(__name__) @@ -63,12 +62,7 @@ def rehash(): salt '*' win_path.rehash ''' - broadcast_message = ctypes.create_unicode_buffer('Environment') - user32 = ctypes.windll('user32', use_last_error=True) - result = user32.SendMessageTimeoutW(HWND_BROADCAST, WM_SETTINGCHANGE, 0, - broadcast_message, SMTO_ABORTIFHUNG, - 5000, 0) - return result == 1 + return salt.utils.win_functions.refresh_environment() def get_path(): diff --git a/salt/utils/win_functions.py b/salt/utils/win_functions.py index 6c7ff4040b..a1f75a5a29 100644 --- a/salt/utils/win_functions.py +++ b/salt/utils/win_functions.py @@ -6,6 +6,7 @@ missing functions in other modules from __future__ import absolute_import import platform import re +import ctypes # Import Salt Libs from salt.exceptions import CommandExecutionError @@ -17,6 +18,7 @@ try: import win32api import win32net import win32security + from win32con import HWND_BROADCAST, WM_SETTINGCHANGE, SMTO_ABORTIFHUNG HAS_WIN32 = True except ImportError: HAS_WIN32 = False @@ -210,3 +212,30 @@ def escape_for_cmd_exe(arg): return meta_map[char] return meta_re.sub(escape_meta_chars, arg) + + +def refresh_environment(): + ''' + Send a WM_SETTINGCHANGE Broadcast to Windows to refresh the Environment + variables for new processes. + + .. note:: + This will only affect new processes that aren't launched by services. To + apply changes to the path or registry to services, the host must be + restarted. The ``salt-minion``, if running as a service, will not see + changes to the environment until the system is restarted. See + `MSDN Documentation ` + + CLI Example: + + ... code-block:: python + + import salt.utils.win_functions + salt.utils.win_functions.refresh_environment() + ''' + broadcast_message = ctypes.create_unicode_buffer('Environment') + user32 = ctypes.windll('user32', use_last_error=True) + result = user32.SendMessageTimeoutW(HWND_BROADCAST, WM_SETTINGCHANGE, 0, + broadcast_message, SMTO_ABORTIFHUNG, + 5000, 0) + return result == 1 From a3f9e99bc02c3887a38932511f4aa8302247cfaa Mon Sep 17 00:00:00 2001 From: twangboy Date: Mon, 5 Feb 2018 15:47:16 -0700 Subject: [PATCH 086/223] Change to a generic function to broadcast change --- salt/modules/reg.py | 6 +++--- salt/modules/win_path.py | 2 +- salt/utils/win_functions.py | 26 ++++++++++++++++++++------ 3 files changed, 24 insertions(+), 10 deletions(-) diff --git a/salt/modules/reg.py b/salt/modules/reg.py index a7df4effa5..3221b58d05 100644 --- a/salt/modules/reg.py +++ b/salt/modules/reg.py @@ -29,8 +29,8 @@ Values/Entries are name/data pairs. There can be many values in a key. The # When production windows installer is using Python 3, Python 2 code can be removed # Import _future_ python libs first & before any other code -from __future__ import absolute_import -from __future__ import unicode_literals +from __future__ import absolute_import, unicode_literals + # Import python libs import sys import logging @@ -193,7 +193,7 @@ def broadcast_change(): salt '*' reg.broadcast_change ''' - return salt.utils.win_functions.refresh_environment() + return salt.utils.win_functions.broadcast_setting_change('Environment') def list_keys(hive, key=None, use_32bit_registry=False): diff --git a/salt/modules/win_path.py b/salt/modules/win_path.py index b70ab423fb..281e0211f2 100644 --- a/salt/modules/win_path.py +++ b/salt/modules/win_path.py @@ -62,7 +62,7 @@ def rehash(): salt '*' win_path.rehash ''' - return salt.utils.win_functions.refresh_environment() + return salt.utils.win_functions.broadcast_setting_change('Environment') def get_path(): diff --git a/salt/utils/win_functions.py b/salt/utils/win_functions.py index a1f75a5a29..f5ce159f6f 100644 --- a/salt/utils/win_functions.py +++ b/salt/utils/win_functions.py @@ -3,7 +3,7 @@ Various functions to be used by windows during start up and to monkey patch missing functions in other modules ''' -from __future__ import absolute_import +from __future__ import absolute_import, unicode_literals import platform import re import ctypes @@ -214,10 +214,24 @@ def escape_for_cmd_exe(arg): return meta_re.sub(escape_meta_chars, arg) -def refresh_environment(): +def broadcast_setting_change(setting='Environment'): ''' - Send a WM_SETTINGCHANGE Broadcast to Windows to refresh the Environment - variables for new processes. + Send a WM_SETTINGCHANGE Broadcast to all Windows + + Args: + + setting (str): + A string value representing the portion of the system that has been + updated and needs to be refreshed. Default is ``Environment``. These + are some common values: + + - "Environment" : to effect a change in the environment variables + - "intl" : to effect a change in locale settings + - "Policy" : to effect a change in Group Policy Settings + - a leaf node in the registry + - the name of a section in the ``Win.ini`` file + + `See here ` .. note:: This will only affect new processes that aren't launched by services. To @@ -233,8 +247,8 @@ def refresh_environment(): import salt.utils.win_functions salt.utils.win_functions.refresh_environment() ''' - broadcast_message = ctypes.create_unicode_buffer('Environment') - user32 = ctypes.windll('user32', use_last_error=True) + broadcast_message = ctypes.create_unicode_buffer(setting) + user32 = ctypes.WinDLL('user32', use_last_error=True) result = user32.SendMessageTimeoutW(HWND_BROADCAST, WM_SETTINGCHANGE, 0, broadcast_message, SMTO_ABORTIFHUNG, 5000, 0) From 3a54e09cd9e22d7b5512b4f242ad4beac1852080 Mon Sep 17 00:00:00 2001 From: twangboy Date: Tue, 6 Feb 2018 16:12:11 -0700 Subject: [PATCH 087/223] Rename setting to message --- salt/utils/win_functions.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/utils/win_functions.py b/salt/utils/win_functions.py index f5ce159f6f..2a659299de 100644 --- a/salt/utils/win_functions.py +++ b/salt/utils/win_functions.py @@ -214,13 +214,13 @@ def escape_for_cmd_exe(arg): return meta_re.sub(escape_meta_chars, arg) -def broadcast_setting_change(setting='Environment'): +def broadcast_setting_change(message='Environment'): ''' Send a WM_SETTINGCHANGE Broadcast to all Windows Args: - setting (str): + message (str): A string value representing the portion of the system that has been updated and needs to be refreshed. Default is ``Environment``. These are some common values: @@ -247,7 +247,7 @@ def broadcast_setting_change(setting='Environment'): import salt.utils.win_functions salt.utils.win_functions.refresh_environment() ''' - broadcast_message = ctypes.create_unicode_buffer(setting) + broadcast_message = ctypes.create_unicode_buffer(message) user32 = ctypes.WinDLL('user32', use_last_error=True) result = user32.SendMessageTimeoutW(HWND_BROADCAST, WM_SETTINGCHANGE, 0, broadcast_message, SMTO_ABORTIFHUNG, From d50d5f582fdca5356b637ffd069ae6946f2b29ef Mon Sep 17 00:00:00 2001 From: twangboy Date: Wed, 7 Feb 2018 14:58:11 -0700 Subject: [PATCH 088/223] Add additional info to docs for `broadcast_setting_change` --- salt/utils/win_functions.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/salt/utils/win_functions.py b/salt/utils/win_functions.py index 2a659299de..26a8674fd2 100644 --- a/salt/utils/win_functions.py +++ b/salt/utils/win_functions.py @@ -233,12 +233,23 @@ def broadcast_setting_change(message='Environment'): `See here ` + See lParam within msdn docs for + `WM_SETTINGCHANGE ` + for more information on Broadcasting Messages. + + See GWL_WNDPROC within msdn docs for + `SetWindowLong ` + for information on how to retrieve those messages. + .. note:: This will only affect new processes that aren't launched by services. To apply changes to the path or registry to services, the host must be restarted. The ``salt-minion``, if running as a service, will not see - changes to the environment until the system is restarted. See + changes to the environment until the system is restarted. Services + inherit their environment from ``services.exe`` which does not respond + to messaging events. See `MSDN Documentation ` + for more information. CLI Example: From e84628c1ebaef5e06ad0c900b7c118d7aec9a051 Mon Sep 17 00:00:00 2001 From: twangboy Date: Wed, 7 Feb 2018 15:28:53 -0700 Subject: [PATCH 089/223] Add some comments to the code --- salt/utils/win_functions.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/salt/utils/win_functions.py b/salt/utils/win_functions.py index 26a8674fd2..1853afd276 100644 --- a/salt/utils/win_functions.py +++ b/salt/utils/win_functions.py @@ -258,6 +258,25 @@ def broadcast_setting_change(message='Environment'): import salt.utils.win_functions salt.utils.win_functions.refresh_environment() ''' + # Listen for messages sent by this would involve working with the + # SetWindowLong function. This can be accessed via the win32gui or through + # ctypes. You can find examples on how to do this by searching for + # `Accessing WGL_WNDPROC` on the internet. Here are some examples of how + # this might work: + # + # # using win32gui + # import win32con + # import win32gui + # old_function = win32gui.SetWindowLong(window_handle, win32con.GWL_WNDPROC, new_function) + # + # # using ctypes + # import ctypes + # import win32con + # from ctypes import c_long, c_int + # user32 = ctypes.WinDLL('user32', use_last_error=True) + # WndProcType = ctypes.WINFUNCTYPE(c_int, c_long, c_int, c_int) + # new_function = WndProcType + # old_function = user32.SetWindowLongW(window_handle, win32con.GWL_WNDPROC, new_function) broadcast_message = ctypes.create_unicode_buffer(message) user32 = ctypes.WinDLL('user32', use_last_error=True) result = user32.SendMessageTimeoutW(HWND_BROADCAST, WM_SETTINGCHANGE, 0, From adc594c183c0ec28a67f70a6d41e8347bb720913 Mon Sep 17 00:00:00 2001 From: twangboy Date: Wed, 7 Feb 2018 15:30:51 -0700 Subject: [PATCH 090/223] Remove duplicate link --- salt/utils/win_functions.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/salt/utils/win_functions.py b/salt/utils/win_functions.py index 1853afd276..145020bc84 100644 --- a/salt/utils/win_functions.py +++ b/salt/utils/win_functions.py @@ -231,8 +231,6 @@ def broadcast_setting_change(message='Environment'): - a leaf node in the registry - the name of a section in the ``Win.ini`` file - `See here ` - See lParam within msdn docs for `WM_SETTINGCHANGE ` for more information on Broadcasting Messages. From 29912adc15709993e93ab48d0da23e7d497f0698 Mon Sep 17 00:00:00 2001 From: twangboy Date: Thu, 8 Feb 2018 17:43:03 -0700 Subject: [PATCH 091/223] Move the test_rehash test to test_win_functions --- salt/utils/win_functions.py | 2 +- tests/unit/modules/test_win_path.py | 38 +------------------------- tests/unit/utils/test_win_functions.py | 8 ++++++ 3 files changed, 10 insertions(+), 38 deletions(-) diff --git a/salt/utils/win_functions.py b/salt/utils/win_functions.py index 145020bc84..662da797a9 100644 --- a/salt/utils/win_functions.py +++ b/salt/utils/win_functions.py @@ -254,7 +254,7 @@ def broadcast_setting_change(message='Environment'): ... code-block:: python import salt.utils.win_functions - salt.utils.win_functions.refresh_environment() + salt.utils.win_functions.broadcast_setting_change('Environment') ''' # Listen for messages sent by this would involve working with the # SetWindowLong function. This can be accessed via the win32gui or through diff --git a/tests/unit/modules/test_win_path.py b/tests/unit/modules/test_win_path.py index 8c80b1e1f1..c8d08b9418 100644 --- a/tests/unit/modules/test_win_path.py +++ b/tests/unit/modules/test_win_path.py @@ -20,49 +20,13 @@ from tests.support.mock import ( import salt.modules.win_path as win_path -class MockWin32API(object): - ''' - Mock class for win32api - ''' - def __init__(self): - pass - - @staticmethod - def SendMessage(*args): - ''' - Mock method for SendMessage - ''' - return [args[0]] - - -class MockWin32Con(object): - ''' - Mock class for win32con - ''' - HWND_BROADCAST = 1 - WM_SETTINGCHANGE = 1 - - def __init__(self): - pass - - @skipIf(NO_MOCK, NO_MOCK_REASON) class WinPathTestCase(TestCase, LoaderModuleMockMixin): ''' Test cases for salt.modules.win_path ''' def setup_loader_modules(self): - return {win_path: {'win32api': MockWin32API, - 'win32con': MockWin32Con, - 'SendMessage': MagicMock, - 'HWND_BROADCAST': MagicMock, - 'WM_SETTINGCHANGE': MagicMock}} - - def test_rehash(self): - ''' - Test to rehash the Environment variables - ''' - self.assertTrue(win_path.rehash()) + return {win_path: {}} def test_get_path(self): ''' diff --git a/tests/unit/utils/test_win_functions.py b/tests/unit/utils/test_win_functions.py index bbd0ef86fd..c35ab697da 100644 --- a/tests/unit/utils/test_win_functions.py +++ b/tests/unit/utils/test_win_functions.py @@ -12,6 +12,7 @@ from tests.support.mock import ( # Import Salt Libs import salt.utils.win_functions as win_functions +import salt.utils @skipIf(NO_MOCK, NO_MOCK_REASON) @@ -51,3 +52,10 @@ class WinFunctionsTestCase(TestCase): encoded = win_functions.escape_argument('C:\\Some Path\\With Spaces') self.assertEqual(encoded, '^"C:\\Some Path\\With Spaces^"') + + @skipIf(not salt.utils.is_windows(), 'WinDLL only available on Windows') + def test_broadcast_setting_change(self): + ''' + Test to rehash the Environment variables + ''' + self.assertTrue(win_functions.broadcast_setting_change()) From 15a1ab4acc2a57dcc0ac5a23dc1a8c53e021df59 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Thu, 8 Feb 2018 19:35:14 -0600 Subject: [PATCH 092/223] Roll back exception logging There are a lot of legit API errors such as 404 errors when inspecting something that doesn't exist. We don't want these to all log exceptions. --- salt/modules/dockermod.py | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/modules/dockermod.py b/salt/modules/dockermod.py index b91ede416e..14ba2a3d22 100644 --- a/salt/modules/dockermod.py +++ b/salt/modules/dockermod.py @@ -630,7 +630,6 @@ def _client_wrapper(attr, *args, **kwargs): ) ret = func(*args, **kwargs) except docker.errors.APIError as exc: - log.exception('Encountered error running API function %s', attr) if catch_api_errors: # Generic handling of Docker API errors raise CommandExecutionError( From f35d9f6feff67c971a0b114fe20bd8f63c5bd236 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Thu, 8 Feb 2018 22:32:24 -0600 Subject: [PATCH 093/223] Fix incorrect translation of docker port_bindings -> ports The logic which ensures that we expose ports which are being bound, even when not explicitly configured, was done incorrectly. UDP ports were being passed to the API as '1234/udp' instead of (1234, 'udp'). This results in the port not being exposed properly. The logic has been corrected. Additionally both the "ports" input translation function, as well as the post-processing code (where the port numbers configured in port_bindings were being added) both contained code to "fix" any ports which were configured using 'portnum/tcp', as these must be passed to the API simply as integers. To reduce code duplication, this normalization is now only performed at the very end of the post-processing function, after ports have been translated, and any missing ports from the port_bindings have been added. The unit test for the port_bindings input translation code, which was written based upon the same incorrect reading of the API docs that resulted in the incorrect behavior, have been updated to confirm the (now) correct behavior. The unit test for the ports input translation code has been updated to reflect the new normalization behavior. Finally, an integration test has been added to ensure that we properly expose UDP ports which are added as part of the post-processing function. --- salt/utils/docker/translate/container.py | 50 +++++++++---------- .../states/test_docker_container.py | 38 ++++++++++++++ tests/unit/utils/test_docker.py | 30 ++++++----- 3 files changed, 79 insertions(+), 39 deletions(-) diff --git a/salt/utils/docker/translate/container.py b/salt/utils/docker/translate/container.py index 1a167369ac..544e05c7a0 100644 --- a/salt/utils/docker/translate/container.py +++ b/salt/utils/docker/translate/container.py @@ -114,28 +114,32 @@ def _post_processing(kwargs, skip_translate, invalid): actual_volumes.sort() if kwargs.get('port_bindings') is not None \ - and (skip_translate is True or - all(x not in skip_translate - for x in ('port_bindings', 'expose', 'ports'))): + and all(x not in skip_translate + for x in ('port_bindings', 'expose', 'ports')): # Make sure that all ports defined in "port_bindings" are included in # the "ports" param. - auto_ports = list(kwargs['port_bindings']) - if auto_ports: - actual_ports = [] - # Sort list to make unit tests more reliable - for port in auto_ports: - if port in actual_ports: - continue - if isinstance(port, six.integer_types): - actual_ports.append((port, 'tcp')) + ports_to_bind = list(kwargs['port_bindings']) + if ports_to_bind: + ports_to_open = set(kwargs.get('ports', [])) + for port_def in ports_to_bind: + if isinstance(port_def, six.integer_types): + ports_to_open.add(port_def) else: - port, proto = port.split('/') - actual_ports.append((int(port), proto)) - actual_ports.sort() - actual_ports = [ - port if proto == 'tcp' else '{}/{}'.format(port, proto) for (port, proto) in actual_ports - ] - kwargs.setdefault('ports', actual_ports) + port_num, proto = port_def.split('/') + ports_to_open.add((int(port_num), proto)) + kwargs['ports'] = sorted(ports_to_open) + + if 'ports' in kwargs \ + and all(x not in skip_translate for x in ('expose', 'ports')): + # TCP ports should only be passed as the port number. Normalize the + # input so a port definition of 80/tcp becomes just 80 instead of + # (80, 'tcp'). + for index, _ in enumerate(kwargs['ports']): + try: + if kwargs['ports'][index][1] == 'tcp': + kwargs['ports'][index] = ports_to_open[index][0] + except TypeError: + continue # Functions below must match names of docker-py arguments @@ -552,13 +556,7 @@ def ports(val, **kwargs): # pylint: disable=unused-argument raise SaltInvocationError(exc.__str__()) new_ports.update([helpers.get_port_def(x, proto) for x in range(range_start, range_end + 1)]) - ordered_new_ports = [ - port if proto == 'tcp' else (port, proto) for (port, proto) in sorted( - [(new_port, 'tcp') if isinstance(new_port, six.integer_types) else new_port - for new_port in new_ports] - ) - ] - return ordered_new_ports + return sorted(new_ports) def privileged(val, **kwargs): # pylint: disable=unused-argument diff --git a/tests/integration/states/test_docker_container.py b/tests/integration/states/test_docker_container.py index ffa639cf1c..0396087d2d 100644 --- a/tests/integration/states/test_docker_container.py +++ b/tests/integration/states/test_docker_container.py @@ -444,6 +444,44 @@ class DockerContainerTestCase(ModuleCase, SaltReturnAssertsMixin): image_info['Config']['Cmd'] ) + @container_name + def test_running_with_port_bindings(self, name): + ''' + This tests that the ports which are being bound are also exposed, even + when not explicitly configured. This test will create a container with + only some of the ports exposed, including some which aren't even bound. + The resulting containers exposed ports should contain all of the ports + defined in the "ports" argument, as well as each of the ports which are + being bound. + ''' + # Create the container + ret = self.run_state( + 'docker_container.running', + name=name, + image=self.image, + command='sleep 600', + shutdown_timeout=1, + port_bindings=[1234, '1235-1236', '2234/udp', '2235-2236/udp'], + ports=[1235, '2235/udp', 9999], + ) + self.assertSaltTrueReturn(ret) + + # Check the created container's port bindings and exposed ports. The + # port bindings should only contain the ports defined in the + # port_bindings argument, while the exposed ports should also contain + # the extra port (9999/tcp) which was included in the ports argument. + cinfo = self.run_function('docker.inspect_container', [name]) + ports = ['1234/tcp', '1235/tcp', '1236/tcp', + '2234/udp', '2235/udp', '2236/udp'] + self.assertEqual( + sorted(cinfo['HostConfig']['PortBindings']), + ports + ) + self.assertEqual( + sorted(cinfo['Config']['ExposedPorts']), + ports + ['9999/tcp'] + ) + @container_name def test_absent_with_stopped_container(self, name): ''' diff --git a/tests/unit/utils/test_docker.py b/tests/unit/utils/test_docker.py index fd32242c1e..eae512485e 100644 --- a/tests/unit/utils/test_docker.py +++ b/tests/unit/utils/test_docker.py @@ -1302,8 +1302,9 @@ class TranslateContainerInputTestCase(TranslateBase): '3334/udp': ('10.4.5.6', 3334), '5505/udp': ('10.7.8.9', 15505), '5506/udp': ('10.7.8.9', 15506)}, - 'ports': [80, '81/udp', 3333, '3334/udp', - 4505, 4506, '5505/udp', '5506/udp']} + 'ports': [80, 3333, 4505, 4506, + (81, 'udp'), (3334, 'udp'), + (5505, 'udp'), (5506, 'udp')]} ) # ip::containerPort - Bind a specific IP and an ephemeral port to a @@ -1327,8 +1328,9 @@ class TranslateContainerInputTestCase(TranslateBase): '3334/udp': ('10.4.5.6',), '5505/udp': ('10.7.8.9',), '5506/udp': ('10.7.8.9',)}, - 'ports': [80, '81/udp', 3333, '3334/udp', - 4505, 4506, '5505/udp', '5506/udp']} + 'ports': [80, 3333, 4505, 4506, + (81, 'udp'), (3334, 'udp'), + (5505, 'udp'), (5506, 'udp')]} ) # hostPort:containerPort - Bind a specific port on all of the host's @@ -1351,8 +1353,9 @@ class TranslateContainerInputTestCase(TranslateBase): '3334/udp': 3334, '5505/udp': 15505, '5506/udp': 15506}, - 'ports': [80, '81/udp', 3333, '3334/udp', - 4505, 4506, '5505/udp', '5506/udp']} + 'ports': [80, 3333, 4505, 4506, + (81, 'udp'), (3334, 'udp'), + (5505, 'udp'), (5506, 'udp')]} ) # containerPort - Bind an ephemeral port on all of the host's @@ -1372,8 +1375,9 @@ class TranslateContainerInputTestCase(TranslateBase): '3334/udp': None, '5505/udp': None, '5506/udp': None}, - 'ports': [80, '81/udp', 3333, '3334/udp', - 4505, 4506, '5505/udp', '5506/udp']}, + 'ports': [80, 3333, 4505, 4506, + (81, 'udp'), (3334, 'udp'), + (5505, 'udp'), (5506, 'udp')]} ) # Test a mixture of different types of input @@ -1402,10 +1406,10 @@ class TranslateContainerInputTestCase(TranslateBase): '19999/udp': None, '20000/udp': None, '20001/udp': None}, - 'ports': [80, '81/udp', 3333, '3334/udp', - 4505, 4506, '5505/udp', '5506/udp', - 9999, 10000, 10001, '19999/udp', - '20000/udp', '20001/udp']} + 'ports': [80, 3333, 4505, 4506, 9999, 10000, 10001, + (81, 'udp'), (3334, 'udp'), (5505, 'udp'), + (5506, 'udp'), (19999, 'udp'), + (20000, 'udp'), (20001, 'udp')]} ) # Error case: too many items (max 3) @@ -1510,7 +1514,7 @@ class TranslateContainerInputTestCase(TranslateBase): self.translator, ports=val, ), - {'ports': [1111, 2222, (3333, 'udp'), 4505, 4506]} + {'ports': [1111, 2222, 4505, 4506, (3333, 'udp')]} ) # Error case: non-integer and non/string value From 9cd47b39dd8bf8bb2c7d89aead1bc8783cd75f24 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Thu, 8 Feb 2018 23:23:43 -0600 Subject: [PATCH 094/223] Fix incorrect translation of docker port_bindings -> ports The logic which ensures that we expose ports which are being bound, even when not explicitly configured, was done incorrectly. UDP ports were being passed to the API as '1234/udp' instead of (1234, 'udp'). This results in the port not being exposed properly. The logic has been corrected. Additionally both the "ports" input translation function, as well as the post-processing code (where the port numbers configured in port_bindings were being added) both contained code to "fix" any ports which were configured using 'portnum/tcp', as these must be passed to the API simply as integers. To reduce code duplication, this normalization is now only performed at the very end of the post-processing function, after ports have been translated, and any missing ports from the port_bindings have been added. The unit test for the port_bindings input translation code, which was written based upon the same incorrect reading of the API docs that resulted in the incorrect behavior, have been updated to confirm the (now) correct behavior. The unit test for the ports input translation code has been updated to reflect the new normalization behavior. Finally, an integration test has been added to ensure that we properly expose UDP ports which are added as part of the post-processing function. --- salt/utils/docker/__init__.py | 42 ++++++++++-------- salt/utils/docker/translate.py | 8 +--- .../states/test_docker_container.py | 44 +++++++++++++++++++ tests/unit/utils/test_docker.py | 25 ++++++++--- 4 files changed, 86 insertions(+), 33 deletions(-) diff --git a/salt/utils/docker/__init__.py b/salt/utils/docker/__init__.py index e186d639d7..e9ce305c68 100644 --- a/salt/utils/docker/__init__.py +++ b/salt/utils/docker/__init__.py @@ -287,27 +287,31 @@ def translate_input(**kwargs): actual_volumes.sort() if kwargs.get('port_bindings') is not None \ - and (skip_translate is True or - all(x not in skip_translate - for x in ('port_bindings', 'expose', 'ports'))): + and all(x not in skip_translate + for x in ('port_bindings', 'expose', 'ports')): # Make sure that all ports defined in "port_bindings" are included in # the "ports" param. - auto_ports = list(kwargs['port_bindings']) - if auto_ports: - actual_ports = [] - # Sort list to make unit tests more reliable - for port in auto_ports: - if port in actual_ports: - continue - if isinstance(port, six.integer_types): - actual_ports.append((port, 'tcp')) + ports_to_bind = list(kwargs['port_bindings']) + if ports_to_bind: + ports_to_open = set(kwargs.get('ports', [])) + for port_def in ports_to_bind: + if isinstance(port_def, six.integer_types): + ports_to_open.add(port_def) else: - port, proto = port.split('/') - actual_ports.append((int(port), proto)) - actual_ports.sort() - actual_ports = [ - port if proto == 'tcp' else '{}/{}'.format(port, proto) for (port, proto) in actual_ports - ] - kwargs.setdefault('ports', actual_ports) + port_num, proto = port_def.split('/') + ports_to_open.add((int(port_num), proto)) + kwargs['ports'] = sorted(ports_to_open) + + if 'ports' in kwargs \ + and all(x not in skip_translate for x in ('expose', 'ports')): + # TCP ports should only be passed as the port number. Normalize the + # input so a port definition of 80/tcp becomes just 80 instead of + # (80, 'tcp'). + for index, _ in enumerate(kwargs['ports']): + try: + if kwargs['ports'][index][1] == 'tcp': + kwargs['ports'][index] = ports_to_open[index][0] + except TypeError: + continue return kwargs, invalid, sorted(collisions) diff --git a/salt/utils/docker/translate.py b/salt/utils/docker/translate.py index 372596a759..7268d6c06b 100644 --- a/salt/utils/docker/translate.py +++ b/salt/utils/docker/translate.py @@ -705,13 +705,7 @@ def ports(val, **kwargs): # pylint: disable=unused-argument raise SaltInvocationError(exc.__str__()) new_ports.update([_get_port_def(x, proto) for x in range(range_start, range_end + 1)]) - ordered_new_ports = [ - port if proto == 'tcp' else (port, proto) for (port, proto) in sorted( - [(new_port, 'tcp') if isinstance(new_port, six.integer_types) else new_port - for new_port in new_ports] - ) - ] - return ordered_new_ports + return sorted(new_ports) def privileged(val, **kwargs): # pylint: disable=unused-argument diff --git a/tests/integration/states/test_docker_container.py b/tests/integration/states/test_docker_container.py index 061bcbb060..9cc14b0fbd 100644 --- a/tests/integration/states/test_docker_container.py +++ b/tests/integration/states/test_docker_container.py @@ -448,6 +448,50 @@ class DockerContainerTestCase(ModuleCase, SaltReturnAssertsMixin): if name in self.run_function('docker.list_containers', all=True): self.run_function('docker.rm', [name], force=True) + @with_random_name + def test_running_with_port_bindings(self, name): + ''' + This tests that the ports which are being bound are also exposed, even + when not explicitly configured. This test will create a container with + only some of the ports exposed, including some which aren't even bound. + The resulting containers exposed ports should contain all of the ports + defined in the "ports" argument, as well as each of the ports which are + being bound. + ''' + try: + # Create the container + ret = self.run_state( + 'docker_container.running', + name=name, + image=self.image, + command='sleep 600', + shutdown_timeout=1, + port_bindings=[1234, '1235-1236', '2234/udp', '2235-2236/udp'], + ports=[1235, '2235/udp', 9999], + ) + self.assertSaltTrueReturn(ret) + + # Check the created container's port bindings and exposed ports. + # The port bindings should only contain the ports defined in the + # port_bindings argument, while the exposed ports should also + # contain the extra port (9999/tcp) which was included in the ports + # argument. + + cinfo = self.run_function('docker.inspect_container', [name]) + ports = ['1234/tcp', '1235/tcp', '1236/tcp', + '2234/udp', '2235/udp', '2236/udp'] + self.assertEqual( + sorted(cinfo['HostConfig']['PortBindings']), + ports + ) + self.assertEqual( + sorted(cinfo['Config']['ExposedPorts']), + ports + ['9999/tcp'] + ) + finally: + if name in self.run_function('docker.list_containers', all=True): + self.run_function('docker.rm', [name], force=True) + @with_random_name def test_absent_with_stopped_container(self, name): ''' diff --git a/tests/unit/utils/test_docker.py b/tests/unit/utils/test_docker.py index 5ee6edc236..ea6760d5ea 100644 --- a/tests/unit/utils/test_docker.py +++ b/tests/unit/utils/test_docker.py @@ -1108,7 +1108,9 @@ class TranslateInputTestCase(TestCase): '3334/udp': ('10.4.5.6', 3334), '5505/udp': ('10.7.8.9', 15505), '5506/udp': ('10.7.8.9', 15506)}, - 'ports': [80, '81/udp', 3333, '3334/udp', 4505, 4506, '5505/udp', '5506/udp'], + 'ports': [80, 3333, 4505, 4506, + (81, 'udp'), (3334, 'udp'), + (5505, 'udp'), (5506, 'udp')] }, {}, [] ) @@ -1146,7 +1148,9 @@ class TranslateInputTestCase(TestCase): '3334/udp': ('10.4.5.6',), '5505/udp': ('10.7.8.9',), '5506/udp': ('10.7.8.9',)}, - 'ports': [80, '81/udp', 3333, '3334/udp', 4505, 4506, '5505/udp', '5506/udp'], + 'ports': [80, 3333, 4505, 4506, + (81, 'udp'), (3334, 'udp'), + (5505, 'udp'), (5506, 'udp')] }, {}, [] ) @@ -1182,7 +1186,9 @@ class TranslateInputTestCase(TestCase): '3334/udp': 3334, '5505/udp': 15505, '5506/udp': 15506}, - 'ports': [80, '81/udp', 3333, '3334/udp', 4505, 4506, '5505/udp', '5506/udp'], + 'ports': [80, 3333, 4505, 4506, + (81, 'udp'), (3334, 'udp'), + (5505, 'udp'), (5506, 'udp')] }, {}, [] ) @@ -1217,7 +1223,9 @@ class TranslateInputTestCase(TestCase): '3334/udp': None, '5505/udp': None, '5506/udp': None}, - 'ports': [80, '81/udp', 3333, '3334/udp', 4505, 4506, '5505/udp', '5506/udp'], + 'ports': [80, 3333, 4505, 4506, + (81, 'udp'), (3334, 'udp'), + (5505, 'udp'), (5506, 'udp')] }, {}, [] ) @@ -1251,8 +1259,11 @@ class TranslateInputTestCase(TestCase): '19999/udp': None, '20000/udp': None, '20001/udp': None}, - 'ports': [80, '81/udp', 3333, '3334/udp', 4505, 4506, '5505/udp', '5506/udp', - 9999, 10000, 10001, '19999/udp', '20000/udp', '20001/udp'] + 'ports': [80, 3333, 4505, 4506, 9999, 10000, 10001, + (81, 'udp'), (3334, 'udp'), (5505, 'udp'), + (5506, 'udp'), (19999, 'udp'), + (20000, 'udp'), (20001, 'udp')] + }, {}, [] ) @@ -1439,7 +1450,7 @@ class TranslateInputTestCase(TestCase): the port numbers must end up as integers. None of the decorators will suffice so this one must be tested specially. ''' - expected = ({'ports': [1111, 2222, (3333, 'udp'), 4505, 4506]}, {}, []) + expected = ({'ports': [1111, 2222, 4505, 4506, (3333, 'udp')]}, {}, []) # Comma-separated list self.assertEqual( docker_utils.translate_input(ports='1111,2222/tcp,3333/udp,4505-4506'), From ed69b987cfa3cd07e9e2e51757d616eba7b22032 Mon Sep 17 00:00:00 2001 From: Mircea Ulinic Date: Fri, 9 Feb 2018 09:44:07 +0000 Subject: [PATCH 095/223] Add NetBox module autodoc --- doc/ref/modules/all/index.rst | 1 + doc/ref/modules/all/salt.modules.netbox.rst | 7 +++++++ 2 files changed, 8 insertions(+) create mode 100644 doc/ref/modules/all/salt.modules.netbox.rst diff --git a/doc/ref/modules/all/index.rst b/doc/ref/modules/all/index.rst index 6ef587d1b4..6ef716905b 100644 --- a/doc/ref/modules/all/index.rst +++ b/doc/ref/modules/all/index.rst @@ -276,6 +276,7 @@ execution modules napalm_users napalm_yang_mod netaddress + netbox netbsd_sysctl netbsdservice netscaler diff --git a/doc/ref/modules/all/salt.modules.netbox.rst b/doc/ref/modules/all/salt.modules.netbox.rst new file mode 100644 index 0000000000..f020177e85 --- /dev/null +++ b/doc/ref/modules/all/salt.modules.netbox.rst @@ -0,0 +1,7 @@ +========================== +salt.modules.netbox module +========================== + +.. automodule:: salt.modules.netbox + :members: + From e04e7bdfc91730a7800f507b06b923e83d158c7f Mon Sep 17 00:00:00 2001 From: rallytime Date: Fri, 9 Feb 2018 09:40:22 -0500 Subject: [PATCH 096/223] Remove extra patch --- tests/unit/modules/test_localemod.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/tests/unit/modules/test_localemod.py b/tests/unit/modules/test_localemod.py index 5332367031..2fc7387de1 100644 --- a/tests/unit/modules/test_localemod.py +++ b/tests/unit/modules/test_localemod.py @@ -241,7 +241,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): @patch('salt.modules.localemod.dbus', None) @patch('salt.modules.localemod.__salt__', {'cmd.run': MagicMock()}) @patch('salt.utils.systemd.booted', MagicMock(return_value=False)) - def test_get_locale_with_no_systemd_slowlaris(self): + def test_get_locale_with_no_systemd_solaris(self): ''' Test getting current system locale with systemd and dbus available on Solaris. :return: @@ -329,7 +329,6 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): assert localemod.__salt__['file.replace'].call_args[0][1] == '^LANG=.*' assert localemod.__salt__['file.replace'].call_args[0][2] == 'LANG="{}"'.format(loc) - @patch('salt.utils.path.which', MagicMock(return_value=None)) @patch('salt.utils.path.which', MagicMock(return_value='/usr/sbin/update-locale')) @patch('salt.modules.localemod.__grains__', {'os_family': 'Debian', 'osmajorrelease': 42}) @patch('salt.modules.localemod.dbus', None) @@ -390,9 +389,9 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): 'file.replace': MagicMock()}) @patch('salt.modules.localemod._localectl_set', MagicMock()) @patch('salt.utils.systemd.booted', MagicMock(return_value=False)) - def test_set_locale_with_no_systemd_slowlaris_with_list_avail(self): + def test_set_locale_with_no_systemd_solaris_with_list_avail(self): ''' - Test setting current system locale with systemd and dbus available on Slowlaris. + Test setting current system locale with systemd and dbus available on Solaris. The list_avail returns the proper locale. :return: ''' @@ -411,9 +410,9 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): 'file.replace': MagicMock()}) @patch('salt.modules.localemod._localectl_set', MagicMock()) @patch('salt.utils.systemd.booted', MagicMock(return_value=False)) - def test_set_locale_with_no_systemd_slowlaris_without_list_avail(self): + def test_set_locale_with_no_systemd_solaris_without_list_avail(self): ''' - Test setting current system locale with systemd and dbus is not available on Slowlaris. + Test setting current system locale with systemd and dbus is not available on Solaris. The list_avail does not return the proper locale. :return: ''' From 9c49c8d47cb70ca4fc87ccf252656338a74b66da Mon Sep 17 00:00:00 2001 From: rallytime Date: Fri, 9 Feb 2018 09:41:35 -0500 Subject: [PATCH 097/223] Remove extra patch --- tests/unit/modules/test_localemod.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/unit/modules/test_localemod.py b/tests/unit/modules/test_localemod.py index 5332367031..4a6ce052eb 100644 --- a/tests/unit/modules/test_localemod.py +++ b/tests/unit/modules/test_localemod.py @@ -329,7 +329,6 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): assert localemod.__salt__['file.replace'].call_args[0][1] == '^LANG=.*' assert localemod.__salt__['file.replace'].call_args[0][2] == 'LANG="{}"'.format(loc) - @patch('salt.utils.path.which', MagicMock(return_value=None)) @patch('salt.utils.path.which', MagicMock(return_value='/usr/sbin/update-locale')) @patch('salt.modules.localemod.__grains__', {'os_family': 'Debian', 'osmajorrelease': 42}) @patch('salt.modules.localemod.dbus', None) From dd378bba9b663fe22804d67d4ddbd83c3719ac69 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Wed, 7 Feb 2018 12:46:26 -0600 Subject: [PATCH 098/223] Check the effective saltenv for cached archive This fixes a regression caused by using a saltenv other than `base`. --- salt/states/archive.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/states/archive.py b/salt/states/archive.py index e49465cdef..c4ea3019b1 100644 --- a/salt/states/archive.py +++ b/salt/states/archive.py @@ -966,7 +966,7 @@ def extracted(name, if result['result']: # Get the path of the file in the minion cache - cached = __salt__['cp.is_cached'](source_match) + cached = __salt__['cp.is_cached'](source_match, saltenv=__env__) else: log.debug( 'failed to download %s', From 4f522eee2fa1303ad842566e9db02d26fb50001a Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Wed, 7 Feb 2018 13:12:30 -0600 Subject: [PATCH 099/223] Remove duplicated section in docstring and fix example --- salt/states/file.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/states/file.py b/salt/states/file.py index 2822433c80..7e16bf706d 100644 --- a/salt/states/file.py +++ b/salt/states/file.py @@ -6626,7 +6626,7 @@ def cached(name, .. code-block:: python - cached = __salt__['cp.is_cached'](source_match) + cached = __salt__['cp.is_cached'](source_match, saltenv=__env__) This function will return the cached path of the file, or an empty string if the file is not present in the minion cache. From 0ff41c5835301696a7a89b428bfdb4f8b8b5b60f Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Wed, 7 Feb 2018 14:32:59 -0600 Subject: [PATCH 100/223] Add regression test for issue 45893 --- .../files/file/prod/issue45893/custom.tar.gz | Bin 0 -> 152 bytes .../files/file/prod/issue45893/init.sls | 5 +++ tests/integration/states/test_archive.py | 33 ++++++++++++------ 3 files changed, 27 insertions(+), 11 deletions(-) create mode 100644 tests/integration/files/file/prod/issue45893/custom.tar.gz create mode 100644 tests/integration/files/file/prod/issue45893/init.sls diff --git a/tests/integration/files/file/prod/issue45893/custom.tar.gz b/tests/integration/files/file/prod/issue45893/custom.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..584852716c8ead74cc1a9a88fed42eb2c7e7d22e GIT binary patch literal 152 zcmb2|=3tn;W>Po<^V^Gixtt7n+8)Muy+5F~?5$3PoA`T&T^2Hee|FYa2RMo!U3mE5 z_eiEpMb7xW^Yo7&cB<;~)y!NKHEG{9^+4@+CpXRX4%vE*Yvt=O{Tc!({ij{R|om3;<=J BMb-cS literal 0 HcmV?d00001 diff --git a/tests/integration/files/file/prod/issue45893/init.sls b/tests/integration/files/file/prod/issue45893/init.sls new file mode 100644 index 0000000000..28e4ff0fe2 --- /dev/null +++ b/tests/integration/files/file/prod/issue45893/init.sls @@ -0,0 +1,5 @@ +test_non_base_env: + archive.extracted: + - name: {{ pillar['issue45893.name'] }} + - source: salt://issue45893/custom.tar.gz + - keep: False diff --git a/tests/integration/states/test_archive.py b/tests/integration/states/test_archive.py index ec3e227f12..cc7495b951 100644 --- a/tests/integration/states/test_archive.py +++ b/tests/integration/states/test_archive.py @@ -68,6 +68,16 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): log.debug('Checking for extracted file: %s', path) self.assertTrue(os.path.isfile(path)) + def run_function(self, *args, **kwargs): + ret = super(ArchiveTest, self).run_function(*args, **kwargs) + log.debug('ret = %s', ret) + return ret + + def run_state(self, *args, **kwargs): + ret = super(ArchiveTest, self).run_state(*args, **kwargs) + log.debug('ret = %s', ret) + return ret + def test_archive_extracted_skip_verify(self): ''' test archive.extracted with skip_verify @@ -75,7 +85,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): ret = self.run_state('archive.extracted', name=ARCHIVE_DIR, source=self.archive_tar_source, archive_format='tar', skip_verify=True) - log.debug('ret = %s', ret) if 'Timeout' in ret: self.skipTest('Timeout talking to local tornado server.') self.assertSaltTrueReturn(ret) @@ -91,7 +100,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): ret = self.run_state('archive.extracted', name=ARCHIVE_DIR, source=self.archive_tar_source, archive_format='tar', source_hash=ARCHIVE_TAR_HASH) - log.debug('ret = %s', ret) if 'Timeout' in ret: self.skipTest('Timeout talking to local tornado server.') @@ -111,7 +119,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): source=self.archive_tar_source, archive_format='tar', source_hash=ARCHIVE_TAR_HASH, user='root', group=r_group) - log.debug('ret = %s', ret) if 'Timeout' in ret: self.skipTest('Timeout talking to local tornado server.') @@ -128,7 +135,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): source_hash=ARCHIVE_TAR_HASH, options='--strip=1', enforce_toplevel=False) - log.debug('ret = %s', ret) if 'Timeout' in ret: self.skipTest('Timeout talking to local tornado server.') @@ -145,7 +151,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): source_hash=ARCHIVE_TAR_HASH, options='--strip-components=1', enforce_toplevel=False) - log.debug('ret = %s', ret) if 'Timeout' in ret: self.skipTest('Timeout talking to local tornado server.') @@ -160,7 +165,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): ret = self.run_state('archive.extracted', name=ARCHIVE_DIR, source=self.archive_tar_source, source_hash=ARCHIVE_TAR_HASH) - log.debug('ret = %s', ret) if 'Timeout' in ret: self.skipTest('Timeout talking to local tornado server.') self.assertSaltTrueReturn(ret) @@ -177,7 +181,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): source_hash=ARCHIVE_TAR_HASH, use_cmd_unzip=False, archive_format='tar') - log.debug('ret = %s', ret) if 'Timeout' in ret: self.skipTest('Timeout talking to local tornado server.') self.assertSaltTrueReturn(ret) @@ -190,7 +193,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): ''' ret = self.run_state('archive.extracted', name=ARCHIVE_DIR, source=ARCHIVE_LOCAL_TAR_SOURCE, archive_format='tar') - log.debug('ret = %s', ret) self.assertSaltTrueReturn(ret) @@ -203,7 +205,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): ret = self.run_state('archive.extracted', name=ARCHIVE_DIR, source=ARCHIVE_LOCAL_TAR_SOURCE, archive_format='tar', source_hash=ARCHIVE_TAR_BAD_HASH, skip_verify=True) - log.debug('ret = %s', ret) self.assertSaltTrueReturn(ret) @@ -216,7 +217,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): ret = self.run_state('archive.extracted', name=ARCHIVE_DIR, source=ARCHIVE_LOCAL_TAR_SOURCE, archive_format='tar', source_hash=ARCHIVE_TAR_HASH) - log.debug('ret = %s', ret) self.assertSaltTrueReturn(ret) @@ -229,6 +229,17 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): ret = self.run_state('archive.extracted', name=ARCHIVE_DIR, source=ARCHIVE_LOCAL_TAR_SOURCE, archive_format='tar', source_hash=ARCHIVE_TAR_BAD_HASH) - log.debug('ret = %s', ret) self.assertSaltFalseReturn(ret) + + def test_archive_extracted_with_non_base_saltenv(self): + ''' + test archive.extracted with a saltenv other than `base` + ''' + ret = self.run_function( + 'state.sls', + ['issue45893'], + pillar={'issue45893.name': ARCHIVE_DIR}, + saltenv='prod') + self.assertSaltTrueReturn(ret) + self._check_extracted(os.path.join(ARCHIVE_DIR, UNTAR_FILE)) From 65fa24aa2620b80f17d6f0bcedb89880fa932bb4 Mon Sep 17 00:00:00 2001 From: "Marc Vieira-Cardinal (VA2MVC)" Date: Sun, 28 Jan 2018 18:46:33 -0500 Subject: [PATCH 101/223] list.copy() is not compatible with python 2.7 I doubt that a .copy() is required there since list() would already create one but since the previous committer added it I am improving by replacing the .copy() with a [:] which makes a copy in python 2.7 and 3+ --- salt/cloud/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/cloud/__init__.py b/salt/cloud/__init__.py index 1bb8107d14..f9c791ce13 100644 --- a/salt/cloud/__init__.py +++ b/salt/cloud/__init__.py @@ -240,7 +240,7 @@ class CloudClient(object): if a.get('provider', '')] if providers: _providers = opts.get('providers', {}) - for provider in list(_providers).copy(): + for provider in list(_providers)[:]: if provider not in providers: _providers.pop(provider) return opts From 3a2f1040f431df6b8ae8fe7a60161fc9fe8ae54a Mon Sep 17 00:00:00 2001 From: "Marc Vieira-Cardinal (VA2MVC)" Date: Mon, 5 Feb 2018 18:43:41 -0500 Subject: [PATCH 102/223] Removed the chained copy --- salt/cloud/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/cloud/__init__.py b/salt/cloud/__init__.py index f9c791ce13..cda7967e2c 100644 --- a/salt/cloud/__init__.py +++ b/salt/cloud/__init__.py @@ -240,7 +240,7 @@ class CloudClient(object): if a.get('provider', '')] if providers: _providers = opts.get('providers', {}) - for provider in list(_providers)[:]: + for provider in _providers.copy(): if provider not in providers: _providers.pop(provider) return opts From cda805fc3b51c3dd0eb89568eeea95ca340f90a7 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Thu, 8 Feb 2018 11:34:10 -0800 Subject: [PATCH 103/223] Fixing vault when used with pillar over salt-ssh --- salt/utils/vault.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/salt/utils/vault.py b/salt/utils/vault.py index 37af694040..2bdd3c8a36 100644 --- a/salt/utils/vault.py +++ b/salt/utils/vault.py @@ -96,7 +96,7 @@ def _get_vault_connection(): Get the connection details for calling Vault, from local configuration if it exists, or from the master otherwise ''' - if 'vault' in __opts__ and __opts__.get('__role', 'minion') == 'master': + def _use_local_config(): log.debug('Using Vault connection details from local config') try: if __opts__['vault']['auth']['method'] == 'approle': @@ -121,6 +121,11 @@ def _get_vault_connection(): except KeyError as err: errmsg = 'Minion has "vault" config section, but could not find key "{0}" within'.format(err.message) raise salt.exceptions.CommandExecutionError(errmsg) + + if 'vault' in __opts__ and __opts__.get('__role', 'minion') == 'master': + return _use_local_config() + elif '_ssh_version' in __opts__: + return _use_local_config() else: log.debug('Contacting master for Vault connection details') return _get_token_and_url_from_master() From 85363189d116efc7c707c6a0095113d359e4a2c0 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Thu, 8 Feb 2018 11:34:10 -0800 Subject: [PATCH 104/223] Fixing vault when used with pillar over salt-ssh --- salt/utils/vault.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/salt/utils/vault.py b/salt/utils/vault.py index 0a96fbc1a1..98f71a107e 100644 --- a/salt/utils/vault.py +++ b/salt/utils/vault.py @@ -98,7 +98,7 @@ def _get_vault_connection(): Get the connection details for calling Vault, from local configuration if it exists, or from the master otherwise ''' - if 'vault' in __opts__ and __opts__.get('__role', 'minion') == 'master': + def _use_local_config(): log.debug('Using Vault connection details from local config') try: return { @@ -108,6 +108,11 @@ def _get_vault_connection(): except KeyError as err: errmsg = 'Minion has "vault" config section, but could not find key "{0}" within'.format(err.message) raise salt.exceptions.CommandExecutionError(errmsg) + + if 'vault' in __opts__ and __opts__.get('__role', 'minion') == 'master': + return _use_local_config() + elif '_ssh_version' in __opts__: + return _use_local_config() else: log.debug('Contacting master for Vault connection details') return _get_token_and_url_from_master() From b9a2bc7b294f5bb7cf41232d7cfe883cebcd6f71 Mon Sep 17 00:00:00 2001 From: twangboy Date: Fri, 9 Feb 2018 09:53:14 -0700 Subject: [PATCH 105/223] Fix hyperlinks --- salt/modules/win_path.py | 2 +- salt/utils/win_functions.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/salt/modules/win_path.py b/salt/modules/win_path.py index 281e0211f2..833d605c01 100644 --- a/salt/modules/win_path.py +++ b/salt/modules/win_path.py @@ -54,7 +54,7 @@ def rehash(): apply changes to the path to services, the host must be restarted. The ``salt-minion``, if running as a service, will not see changes to the environment until the system is restarted. See - `MSDN Documentation ` + `MSDN Documentation `_ CLI Example: diff --git a/salt/utils/win_functions.py b/salt/utils/win_functions.py index 662da797a9..db883566b8 100644 --- a/salt/utils/win_functions.py +++ b/salt/utils/win_functions.py @@ -232,11 +232,11 @@ def broadcast_setting_change(message='Environment'): - the name of a section in the ``Win.ini`` file See lParam within msdn docs for - `WM_SETTINGCHANGE ` + `WM_SETTINGCHANGE `_ for more information on Broadcasting Messages. See GWL_WNDPROC within msdn docs for - `SetWindowLong ` + `SetWindowLong `_ for information on how to retrieve those messages. .. note:: @@ -246,7 +246,7 @@ def broadcast_setting_change(message='Environment'): changes to the environment until the system is restarted. Services inherit their environment from ``services.exe`` which does not respond to messaging events. See - `MSDN Documentation ` + `MSDN Documentation `_ for more information. CLI Example: @@ -257,7 +257,7 @@ def broadcast_setting_change(message='Environment'): salt.utils.win_functions.broadcast_setting_change('Environment') ''' # Listen for messages sent by this would involve working with the - # SetWindowLong function. This can be accessed via the win32gui or through + # SetWindowLong function. This can be accessed via win32gui or through # ctypes. You can find examples on how to do this by searching for # `Accessing WGL_WNDPROC` on the internet. Here are some examples of how # this might work: From 29d0271b737f2e09bdd5aa7758e129225d5f45b2 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Fri, 9 Feb 2018 11:14:03 -0600 Subject: [PATCH 106/223] Don't try to sort ports when translating docker input This sorting was done mainly for the benefit of the test suite, but Python 3 will raise an error when you try to sort a mixture of int and tuple types, so sorting breaks down when there are UDP ports. Instead, this just leaves them as an unsorted list when passed to the API, and the test suite does the sorting before the assertEqual. --- salt/utils/docker/translate/container.py | 11 +--- tests/unit/utils/test_docker.py | 71 ++++++++++++++++++------ 2 files changed, 56 insertions(+), 26 deletions(-) diff --git a/salt/utils/docker/translate/container.py b/salt/utils/docker/translate/container.py index 544e05c7a0..265cde74ba 100644 --- a/salt/utils/docker/translate/container.py +++ b/salt/utils/docker/translate/container.py @@ -121,13 +121,8 @@ def _post_processing(kwargs, skip_translate, invalid): ports_to_bind = list(kwargs['port_bindings']) if ports_to_bind: ports_to_open = set(kwargs.get('ports', [])) - for port_def in ports_to_bind: - if isinstance(port_def, six.integer_types): - ports_to_open.add(port_def) - else: - port_num, proto = port_def.split('/') - ports_to_open.add((int(port_num), proto)) - kwargs['ports'] = sorted(ports_to_open) + ports_to_open.update([helpers.get_port_def(x) for x in ports_to_bind]) + kwargs['ports'] = list(ports_to_open) if 'ports' in kwargs \ and all(x not in skip_translate for x in ('expose', 'ports')): @@ -556,7 +551,7 @@ def ports(val, **kwargs): # pylint: disable=unused-argument raise SaltInvocationError(exc.__str__()) new_ports.update([helpers.get_port_def(x, proto) for x in range(range_start, range_end + 1)]) - return sorted(new_ports) + return list(new_ports) def privileged(val, **kwargs): # pylint: disable=unused-argument diff --git a/tests/unit/utils/test_docker.py b/tests/unit/utils/test_docker.py index eae512485e..80743e2f22 100644 --- a/tests/unit/utils/test_docker.py +++ b/tests/unit/utils/test_docker.py @@ -798,6 +798,29 @@ class TranslateContainerInputTestCase(TranslateBase): ''' translator = salt.utils.docker.translate.container + @staticmethod + def normalize_ports(ret): + ''' + When we translate exposed ports, we can end up with a mixture of ints + (representing TCP ports) and tuples (representing UDP ports). Python 2 + will sort an iterable containing these mixed types, but Python 3 will + not. This helper is used to munge the ports in the return data so that + the resulting list is sorted in a way that can reliably be compared to + the expected results in the test. + + This helper should only be needed for port_bindings and ports. + ''' + if 'ports' in ret: + tcp_ports = [] + udp_ports = [] + for item in ret['ports']: + if isinstance(item, six.integer_types): + tcp_ports.append(item) + else: + udp_ports.append(item) + ret['ports'] = sorted(tcp_ports) + sorted(udp_ports) + return ret + @assert_bool(salt.utils.docker.translate.container) def test_auto_remove(self): ''' @@ -1288,9 +1311,11 @@ class TranslateContainerInputTestCase(TranslateBase): ) for val in (bindings, bindings.split(',')): self.assertEqual( - salt.utils.docker.translate_input( - self.translator, - port_bindings=val, + self.normalize_ports( + salt.utils.docker.translate_input( + self.translator, + port_bindings=val, + ) ), {'port_bindings': {80: [('10.1.2.3', 8080), ('10.1.2.3', 8888)], @@ -1316,9 +1341,11 @@ class TranslateContainerInputTestCase(TranslateBase): ) for val in (bindings, bindings.split(',')): self.assertEqual( - salt.utils.docker.translate_input( - self.translator, - port_bindings=val, + self.normalize_ports( + salt.utils.docker.translate_input( + self.translator, + port_bindings=val, + ) ), {'port_bindings': {80: [('10.1.2.3',), ('10.1.2.3',)], 3333: ('10.4.5.6',), @@ -1341,9 +1368,11 @@ class TranslateContainerInputTestCase(TranslateBase): ) for val in (bindings, bindings.split(',')): self.assertEqual( - salt.utils.docker.translate_input( - self.translator, - port_bindings=val, + self.normalize_ports( + salt.utils.docker.translate_input( + self.translator, + port_bindings=val, + ) ), {'port_bindings': {80: [8080, 8888], 3333: 3333, @@ -1363,9 +1392,11 @@ class TranslateContainerInputTestCase(TranslateBase): bindings = '80,3333,4505-4506,81/udp,3334/udp,5505-5506/udp' for val in (bindings, bindings.split(',')): self.assertEqual( - salt.utils.docker.translate_input( - self.translator, - port_bindings=val, + self.normalize_ports( + salt.utils.docker.translate_input( + self.translator, + port_bindings=val, + ) ), {'port_bindings': {80: None, 3333: None, @@ -1388,9 +1419,11 @@ class TranslateContainerInputTestCase(TranslateBase): ) for val in (bindings, bindings.split(',')): self.assertEqual( - salt.utils.docker.translate_input( - self.translator, - port_bindings=val, + self.normalize_ports( + salt.utils.docker.translate_input( + self.translator, + port_bindings=val, + ) ), {'port_bindings': {80: ('10.1.2.3', 8080), 3333: ('10.4.5.6',), @@ -1510,9 +1543,11 @@ class TranslateContainerInputTestCase(TranslateBase): [1111, '2222/tcp', '3333/udp', '4505-4506'], ['1111', '2222/tcp', '3333/udp', '4505-4506']): self.assertEqual( - salt.utils.docker.translate_input( - self.translator, - ports=val, + self.normalize_ports( + salt.utils.docker.translate_input( + self.translator, + ports=val, + ) ), {'ports': [1111, 2222, 4505, 4506, (3333, 'udp')]} ) From 89cbd72a0ddfe600b08842e3f4a3b960c6d7badc Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Fri, 9 Feb 2018 11:33:10 -0600 Subject: [PATCH 107/223] Don't try to sort ports when translating docker input This sorting was done mainly for the benefit of the test suite, but Python 3 will raise an error when you try to sort a mixture of int and tuple types, so sorting breaks down when there are UDP ports. Instead, this just leaves them as an unsorted list when passed to the API, and the test suite does the sorting before the assertEqual. --- salt/utils/docker/__init__.py | 2 +- salt/utils/docker/translate.py | 2 +- tests/unit/utils/test_docker.py | 163 +++++++++++++++++++++----------- 3 files changed, 108 insertions(+), 59 deletions(-) diff --git a/salt/utils/docker/__init__.py b/salt/utils/docker/__init__.py index e9ce305c68..8ea7002d73 100644 --- a/salt/utils/docker/__init__.py +++ b/salt/utils/docker/__init__.py @@ -300,7 +300,7 @@ def translate_input(**kwargs): else: port_num, proto = port_def.split('/') ports_to_open.add((int(port_num), proto)) - kwargs['ports'] = sorted(ports_to_open) + kwargs['ports'] = list(ports_to_open) if 'ports' in kwargs \ and all(x not in skip_translate for x in ('expose', 'ports')): diff --git a/salt/utils/docker/translate.py b/salt/utils/docker/translate.py index 7268d6c06b..2c3504e9ef 100644 --- a/salt/utils/docker/translate.py +++ b/salt/utils/docker/translate.py @@ -705,7 +705,7 @@ def ports(val, **kwargs): # pylint: disable=unused-argument raise SaltInvocationError(exc.__str__()) new_ports.update([_get_port_def(x, proto) for x in range(range_start, range_end + 1)]) - return sorted(new_ports) + return list(new_ports) def privileged(val, **kwargs): # pylint: disable=unused-argument diff --git a/tests/unit/utils/test_docker.py b/tests/unit/utils/test_docker.py index ea6760d5ea..9ba14e2cec 100644 --- a/tests/unit/utils/test_docker.py +++ b/tests/unit/utils/test_docker.py @@ -520,6 +520,29 @@ class TranslateInputTestCase(TestCase): ''' maxDiff = None + @staticmethod + def normalize_ports(ret): + ''' + When we translate exposed ports, we can end up with a mixture of ints + (representing TCP ports) and tuples (representing UDP ports). Python 2 + will sort an iterable containing these mixed types, but Python 3 will + not. This helper is used to munge the ports in the return data so that + the resulting list is sorted in a way that can reliably be compared to + the expected results in the test. + + This helper should only be needed for port_bindings and ports. + ''' + if 'ports' in ret[0]: + tcp_ports = [] + udp_ports = [] + for item in ret[0]['ports']: + if isinstance(item, six.integer_types): + tcp_ports.append(item) + else: + udp_ports.append(item) + ret[0]['ports'] = sorted(tcp_ports) + sorted(udp_ports) + return ret + def tearDown(self): ''' Test skip_translate kwarg @@ -1114,24 +1137,26 @@ class TranslateInputTestCase(TestCase): }, {}, [] ) - translated_input = docker_utils.translate_input( + translated_input = self.normalize_ports(docker_utils.translate_input( port_bindings='10.1.2.3:8080:80,10.1.2.3:8888:80,10.4.5.6:3333:3333,' '10.7.8.9:14505-14506:4505-4506,10.1.2.3:8080:81/udp,' '10.1.2.3:8888:81/udp,10.4.5.6:3334:3334/udp,' '10.7.8.9:15505-15506:5505-5506/udp', - ) + )) self.assertEqual(translated_input, expected) self.assertEqual( - docker_utils.translate_input( - port_bindings=[ - '10.1.2.3:8080:80', - '10.1.2.3:8888:80', - '10.4.5.6:3333:3333', - '10.7.8.9:14505-14506:4505-4506', - '10.1.2.3:8080:81/udp', - '10.1.2.3:8888:81/udp', - '10.4.5.6:3334:3334/udp', - '10.7.8.9:15505-15506:5505-5506/udp'] + self.normalize_ports( + docker_utils.translate_input( + port_bindings=[ + '10.1.2.3:8080:80', + '10.1.2.3:8888:80', + '10.4.5.6:3333:3333', + '10.7.8.9:14505-14506:4505-4506', + '10.1.2.3:8080:81/udp', + '10.1.2.3:8888:81/udp', + '10.4.5.6:3334:3334/udp', + '10.7.8.9:15505-15506:5505-5506/udp'] + ) ), expected ) @@ -1155,22 +1180,26 @@ class TranslateInputTestCase(TestCase): {}, [] ) self.assertEqual( - docker_utils.translate_input( - port_bindings='10.1.2.3::80,10.1.2.3::80,10.4.5.6::3333,10.7.8.9::4505-4506,10.1.2.3::81/udp,10.1.2.3::81/udp,10.4.5.6::3334/udp,10.7.8.9::5505-5506/udp', + self.normalize_ports( + docker_utils.translate_input( + port_bindings='10.1.2.3::80,10.1.2.3::80,10.4.5.6::3333,10.7.8.9::4505-4506,10.1.2.3::81/udp,10.1.2.3::81/udp,10.4.5.6::3334/udp,10.7.8.9::5505-5506/udp', + ) ), expected ) self.assertEqual( - docker_utils.translate_input( - port_bindings=[ - '10.1.2.3::80', - '10.1.2.3::80', - '10.4.5.6::3333', - '10.7.8.9::4505-4506', - '10.1.2.3::81/udp', - '10.1.2.3::81/udp', - '10.4.5.6::3334/udp', - '10.7.8.9::5505-5506/udp'] + self.normalize_ports( + docker_utils.translate_input( + port_bindings=[ + '10.1.2.3::80', + '10.1.2.3::80', + '10.4.5.6::3333', + '10.7.8.9::4505-4506', + '10.1.2.3::81/udp', + '10.1.2.3::81/udp', + '10.4.5.6::3334/udp', + '10.7.8.9::5505-5506/udp'] + ) ), expected ) @@ -1193,21 +1222,25 @@ class TranslateInputTestCase(TestCase): {}, [] ) self.assertEqual( - docker_utils.translate_input( - port_bindings='8080:80,8888:80,3333:3333,14505-14506:4505-4506,8080:81/udp,8888:81/udp,3334:3334/udp,15505-15506:5505-5506/udp', + self.normalize_ports( + docker_utils.translate_input( + port_bindings='8080:80,8888:80,3333:3333,14505-14506:4505-4506,8080:81/udp,8888:81/udp,3334:3334/udp,15505-15506:5505-5506/udp', + ) ), expected ) self.assertEqual( - docker_utils.translate_input( - port_bindings=['8080:80', - '8888:80', - '3333:3333', - '14505-14506:4505-4506', - '8080:81/udp', - '8888:81/udp', - '3334:3334/udp', - '15505-15506:5505-5506/udp'] + self.normalize_ports( + docker_utils.translate_input( + port_bindings=['8080:80', + '8888:80', + '3333:3333', + '14505-14506:4505-4506', + '8080:81/udp', + '8888:81/udp', + '3334:3334/udp', + '15505-15506:5505-5506/udp'] + ) ), expected ) @@ -1230,15 +1263,19 @@ class TranslateInputTestCase(TestCase): {}, [] ) self.assertEqual( - docker_utils.translate_input( - port_bindings='80,3333,4505-4506,81/udp,3334/udp,5505-5506/udp', + self.normalize_ports( + docker_utils.translate_input( + port_bindings='80,3333,4505-4506,81/udp,3334/udp,5505-5506/udp', + ) ), expected ) self.assertEqual( - docker_utils.translate_input( - port_bindings=['80', '3333', '4505-4506', - '81/udp', '3334/udp', '5505-5506/udp'] + self.normalize_ports( + docker_utils.translate_input( + port_bindings=['80', '3333', '4505-4506', + '81/udp', '3334/udp', '5505-5506/udp'] + ) ), expected ) @@ -1268,22 +1305,26 @@ class TranslateInputTestCase(TestCase): {}, [] ) self.assertEqual( - docker_utils.translate_input( - port_bindings='10.1.2.3:8080:80,10.4.5.6::3333,14505-14506:4505-4506,9999-10001,10.1.2.3:8080:81/udp,10.4.5.6::3334/udp,15505-15506:5505-5506/udp,19999-20001/udp', + self.normalize_ports( + docker_utils.translate_input( + port_bindings='10.1.2.3:8080:80,10.4.5.6::3333,14505-14506:4505-4506,9999-10001,10.1.2.3:8080:81/udp,10.4.5.6::3334/udp,15505-15506:5505-5506/udp,19999-20001/udp', + ) ), expected ) self.assertEqual( - docker_utils.translate_input( - port_bindings=[ - '10.1.2.3:8080:80', - '10.4.5.6::3333', - '14505-14506:4505-4506', - '9999-10001', - '10.1.2.3:8080:81/udp', - '10.4.5.6::3334/udp', - '15505-15506:5505-5506/udp', - '19999-20001/udp'] + self.normalize_ports( + docker_utils.translate_input( + port_bindings=[ + '10.1.2.3:8080:80', + '10.4.5.6::3333', + '14505-14506:4505-4506', + '9999-10001', + '10.1.2.3:8080:81/udp', + '10.4.5.6::3334/udp', + '15505-15506:5505-5506/udp', + '19999-20001/udp'] + ) ), expected ) @@ -1453,21 +1494,29 @@ class TranslateInputTestCase(TestCase): expected = ({'ports': [1111, 2222, 4505, 4506, (3333, 'udp')]}, {}, []) # Comma-separated list self.assertEqual( - docker_utils.translate_input(ports='1111,2222/tcp,3333/udp,4505-4506'), + self.normalize_ports( + docker_utils.translate_input( + ports='1111,2222/tcp,3333/udp,4505-4506' + ) + ), expected ) # Python list self.assertEqual( - docker_utils.translate_input( - ports=[1111, '2222/tcp', '3333/udp', '4505-4506'] + self.normalize_ports( + docker_utils.translate_input( + ports=[1111, '2222/tcp', '3333/udp', '4505-4506'] + ) ), expected ) # Same as above but with the first port as a string (it should be # converted to an integer). self.assertEqual( - docker_utils.translate_input( - ports=['1111', '2222/tcp', '3333/udp', '4505-4506'] + self.normalize_ports( + docker_utils.translate_input( + ports=['1111', '2222/tcp', '3333/udp', '4505-4506'] + ) ), expected ) From 5adcb6e3b260aff832f74a8023937e711275aa00 Mon Sep 17 00:00:00 2001 From: rallytime Date: Fri, 9 Feb 2018 15:17:28 -0500 Subject: [PATCH 108/223] Add missing six import in boto_cfn module Since pylint was disabled for this file, the lint check must have missed that the six import was missing. I also updated the CLI Example formatting to match our doc boxes. --- salt/modules/boto_cfn.py | 32 +++++++++++++++++++++++--------- 1 file changed, 23 insertions(+), 9 deletions(-) diff --git a/salt/modules/boto_cfn.py b/salt/modules/boto_cfn.py index 7f7e94d3bd..bb7bfcc33b 100644 --- a/salt/modules/boto_cfn.py +++ b/salt/modules/boto_cfn.py @@ -32,12 +32,12 @@ Connection module for Amazon Cloud Formation # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 -from __future__ import absolute_import, print_function, unicode_literals - # Import Python libs +from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs +from salt.ext import six import salt.utils.versions log = logging.getLogger(__name__) @@ -72,7 +72,9 @@ def exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a stack exists. - CLI example:: + CLI Example: + + .. code-block:: bash salt myminion boto_cfn.exists mystack region=us-east-1 ''' @@ -94,7 +96,9 @@ def describe(name, region=None, key=None, keyid=None, profile=None): .. versionadded:: 2015.8.0 - CLI example:: + CLI Example: + + .. code-block:: bash salt myminion boto_cfn.describe mystack region=us-east-1 ''' @@ -135,7 +139,9 @@ def create(name, template_body=None, template_url=None, parameters=None, notific ''' Create a CFN stack. - CLI example to create a stack:: + CLI Example: + + .. code-block:: bash salt myminion boto_cfn.create mystack template_url='https://s3.amazonaws.com/bucket/template.cft' \ region=us-east-1 @@ -161,7 +167,9 @@ def update_stack(name, template_body=None, template_url=None, parameters=None, n .. versionadded:: 2015.8.0 - CLI example to update a stack:: + CLI Example: + + .. code-block:: bash salt myminion boto_cfn.update_stack mystack template_url='https://s3.amazonaws.com/bucket/template.cft' \ region=us-east-1 @@ -186,7 +194,9 @@ def delete(name, region=None, key=None, keyid=None, profile=None): ''' Delete a CFN stack. - CLI example to delete a stack:: + CLI Example: + + .. code-block:: bash salt myminion boto_cfn.delete mystack region=us-east-1 ''' @@ -205,7 +215,9 @@ def get_template(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if attributes are set on a CFN stack. - CLI example:: + CLI Example: + + .. code-block:: bash salt myminion boto_cfn.get_template mystack ''' @@ -228,7 +240,9 @@ def validate_template(template_body=None, template_url=None, region=None, key=No .. versionadded:: 2015.8.0 - CLI example:: + CLI Example: + + .. code-block:: bash salt myminion boto_cfn.validate_template mystack-template ''' From 25dffaae91ea92fe86844cd910dbe081a49fb7f9 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Fri, 9 Feb 2018 12:34:47 -0800 Subject: [PATCH 109/223] Backporting #45935 --- salt/log/handlers/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/log/handlers/__init__.py b/salt/log/handlers/__init__.py index ae02e364fa..6657811775 100644 --- a/salt/log/handlers/__init__.py +++ b/salt/log/handlers/__init__.py @@ -17,6 +17,7 @@ import logging.handlers # Import salt libs from salt.log.mixins import NewStyleClassMixIn, ExcInfoOnLogLevelFormatMixIn +from salt.ext.six.moves import queue log = logging.getLogger(__name__) @@ -176,7 +177,7 @@ if sys.version_info < (3, 2): ''' try: self.queue.put_nowait(record) - except self.queue.Full: + except queue.Full: sys.stderr.write('[WARNING ] Message queue is full, ' 'unable to write "{0}" to log', record ) From 6f781cb95d5e797c034eaec6079333730ab2f9e4 Mon Sep 17 00:00:00 2001 From: rallytime Date: Fri, 9 Feb 2018 15:46:00 -0500 Subject: [PATCH 110/223] A couple of grammar updates for the state compiler docs --- salt/state.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/state.py b/salt/state.py index 9ba668fd3b..a454721487 100644 --- a/salt/state.py +++ b/salt/state.py @@ -1,8 +1,8 @@ # -*- coding: utf-8 -*- ''' -The module used to execute states in salt. A state is unlike a module -execution in that instead of just executing a command it ensure that a -certain state is present on the system. +The State Compiler is used to execute states in Salt. A state is unlike +an execution module in that instead of just executing a command, it +ensures that a certain state is present on the system. The data sent to the state calls is as follows: { 'state': '', From f89a20bf3e1e3635f061a0816ac1681774d8517a Mon Sep 17 00:00:00 2001 From: Michael Calmer Date: Sun, 11 Feb 2018 19:15:27 +0100 Subject: [PATCH 111/223] move log_file option to changeable defaults --- salt/client/ssh/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/client/ssh/__init__.py b/salt/client/ssh/__init__.py index 310c5550fc..57286b5fb7 100644 --- a/salt/client/ssh/__init__.py +++ b/salt/client/ssh/__init__.py @@ -880,6 +880,7 @@ class Single(object): # Pre apply changeable defaults self.minion_opts = { 'grains_cache': True, + 'log_file': 'salt-call.log', } self.minion_opts.update(opts.get('ssh_minion_opts', {})) if minion_opts is not None: @@ -889,7 +890,6 @@ class Single(object): 'root_dir': os.path.join(self.thin_dir, 'running_data'), 'id': self.id, 'sock_dir': '/', - 'log_file': 'salt-call.log', 'fileserver_list_cache_time': 3, }) self.minion_config = salt.serializers.yaml.serialize(self.minion_opts) From bcde1db2fb8be995798a4e488b1a131715bd7f37 Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Mon, 12 Feb 2018 12:00:56 +0100 Subject: [PATCH 112/223] Add logging --- salt/utils/parsers.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/utils/parsers.py b/salt/utils/parsers.py index b19cf068ce..fb2b92d096 100644 --- a/salt/utils/parsers.py +++ b/salt/utils/parsers.py @@ -47,6 +47,8 @@ import salt.exceptions from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin +log = logging.getLogger(__name__) + def _sorted(mixins_or_funcs): return sorted( From 4d056a0452184bc3332ae303c5370a134ff69ff1 Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Mon, 12 Feb 2018 12:02:25 +0100 Subject: [PATCH 113/223] Suppress logging PID file removal if running on non-root. --- salt/utils/parsers.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/salt/utils/parsers.py b/salt/utils/parsers.py index fb2b92d096..4ef5abc7da 100644 --- a/salt/utils/parsers.py +++ b/salt/utils/parsers.py @@ -975,11 +975,11 @@ class DaemonMixIn(six.with_metaclass(MixInMeta, object)): try: os.unlink(self.config['pidfile']) except OSError as err: - logging.getLogger(__name__).info( - 'PIDfile could not be deleted: {0}'.format( - self.config['pidfile'] - ) - ) + # Log error only when running salt-master as a root user. + # Otherwise this can be ignored, since salt-master is able to + # overwrite the PIDfile on the next start. + if not os.getuid(): + log.info('PIDfile could not be deleted: %s', six.text_type(self.config['pidfile'])) def set_pidfile(self): from salt.utils.process import set_pidfile From 51ba019a1a904a3c4580a7ad5a02f14dafe52258 Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Mon, 12 Feb 2018 12:03:23 +0100 Subject: [PATCH 114/223] Lintfix: remove extra-parenthesis. Remove unnecessary back-slashes --- salt/utils/parsers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/utils/parsers.py b/salt/utils/parsers.py index 4ef5abc7da..9f2c5a66fb 100644 --- a/salt/utils/parsers.py +++ b/salt/utils/parsers.py @@ -964,7 +964,7 @@ class DaemonMixIn(six.with_metaclass(MixInMeta, object)): default=os.path.join( syspaths.PIDFILE_DIR, '{0}.pid'.format(self.get_prog_name()) ), - help=('Specify the location of the pidfile. Default: \'%default\'.') + help="Specify the location of the pidfile. Default: '%default'." ) def _mixin_before_exit(self): From 20c952a8cb76ade434dd74ac091b189f42b65aff Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Mon, 12 Feb 2018 12:17:37 +0100 Subject: [PATCH 115/223] Rename logger variable --- salt/utils/parsers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/utils/parsers.py b/salt/utils/parsers.py index 9f2c5a66fb..3d26e7d9ab 100644 --- a/salt/utils/parsers.py +++ b/salt/utils/parsers.py @@ -47,7 +47,7 @@ import salt.exceptions from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin -log = logging.getLogger(__name__) +logger = logging.getLogger(__name__) def _sorted(mixins_or_funcs): @@ -979,7 +979,7 @@ class DaemonMixIn(six.with_metaclass(MixInMeta, object)): # Otherwise this can be ignored, since salt-master is able to # overwrite the PIDfile on the next start. if not os.getuid(): - log.info('PIDfile could not be deleted: %s', six.text_type(self.config['pidfile'])) + logger.info('PIDfile could not be deleted: %s', six.text_type(self.config['pidfile'])) def set_pidfile(self): from salt.utils.process import set_pidfile From 0c40c4a4b681f41feda74a870f7ba97b71bfcdc4 Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Mon, 12 Feb 2018 12:22:54 +0100 Subject: [PATCH 116/223] Remove extra-parenthesis and extra-backslashes --- salt/utils/parsers.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/salt/utils/parsers.py b/salt/utils/parsers.py index 3d26e7d9ab..83a391d205 100644 --- a/salt/utils/parsers.py +++ b/salt/utils/parsers.py @@ -490,8 +490,7 @@ class HardCrashMixin(six.with_metaclass(MixInMeta, object)): hard_crash = os.environ.get('SALT_HARD_CRASH', False) self.add_option( '--hard-crash', action='store_true', default=hard_crash, - help=('Raise any original exception rather than exiting gracefully. ' - 'Default: %default.') + help='Raise any original exception rather than exiting gracefully. Default: %default.' ) @@ -502,9 +501,8 @@ class NoParseMixin(six.with_metaclass(MixInMeta, object)): no_parse = os.environ.get('SALT_NO_PARSE', '') self.add_option( '--no-parse', default=no_parse, - help=('Comma-separated list of named CLI arguments (i.e. ' - 'argname=value) which should not be parsed as Python ' - 'data types'), + help='Comma-separated list of named CLI arguments (i.e. argname=value) ' + 'which should not be parsed as Python data types', metavar='argname1,argname2,...', ) @@ -532,8 +530,7 @@ class ConfigDirMixIn(six.with_metaclass(MixInMeta, object)): logging.getLogger(__name__).debug('SYSPATHS setup as: %s', syspaths.CONFIG_DIR) self.add_option( '-c', '--config-dir', default=config_dir, - help=('Pass in an alternative configuration directory. Default: ' - '\'%default\'.') + help="Pass in an alternative configuration directory. Default: '%default'." ) def process_config_dir(self): @@ -541,7 +538,7 @@ class ConfigDirMixIn(six.with_metaclass(MixInMeta, object)): if not os.path.isdir(self.options.config_dir): # No logging is configured yet sys.stderr.write( - 'WARNING: CONFIG \'{0}\' directory does not exist.\n'.format( + "WARNING: CONFIG '{0}' directory does not exist.\n".format( self.options.config_dir ) ) From 628df440ede01082ed27af420e9e47e20b6d2591 Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Mon, 12 Feb 2018 12:23:15 +0100 Subject: [PATCH 117/223] Remove extra-backslashes --- salt/utils/parsers.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/salt/utils/parsers.py b/salt/utils/parsers.py index 83a391d205..a8bde0b9b8 100644 --- a/salt/utils/parsers.py +++ b/salt/utils/parsers.py @@ -66,8 +66,8 @@ class MixInMeta(type): instance = super(MixInMeta, mcs).__new__(mcs, name, bases, attrs) if not hasattr(instance, '_mixin_setup'): raise RuntimeError( - 'Don\'t subclass {0} in {1} if you\'re not going to use it ' - 'as a salt parser mix-in.'.format(mcs.__name__, name) + "Don't subclass {0} in {1} if you're not going " + "to use it as a salt parser mix-in.".format(mcs.__name__, name) ) return instance @@ -401,9 +401,7 @@ class SaltfileMixIn(six.with_metaclass(MixInMeta, object)): return if not os.path.isfile(self.options.saltfile): - self.error( - '\'{0}\' file does not exist.\n'.format(self.options.saltfile) - ) + self.error("'{0}' file does not exist.\n".format(self.options.saltfile)) # Make sure we have an absolute path self.options.saltfile = os.path.abspath(self.options.saltfile) From 62b5a0c3ea1da6008c3123d4a234cfc1ac8dbe55 Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Mon, 12 Feb 2018 12:42:45 +0100 Subject: [PATCH 118/223] Use logger variable --- salt/utils/parsers.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/salt/utils/parsers.py b/salt/utils/parsers.py index a8bde0b9b8..719313a71b 100644 --- a/salt/utils/parsers.py +++ b/salt/utils/parsers.py @@ -208,7 +208,7 @@ class OptionParser(optparse.OptionParser, object): try: process_option_func() except Exception as err: # pylint: disable=broad-except - logging.getLogger(__name__).exception(err) + logger.exception(err) self.error( 'Error while processing {0}: {1}'.format( process_option_func, traceback.format_exc(err) @@ -220,7 +220,7 @@ class OptionParser(optparse.OptionParser, object): try: mixin_after_parsed_func(self) except Exception as err: # pylint: disable=broad-except - logging.getLogger(__name__).exception(err) + logger.exception(err) self.error( 'Error while processing {0}: {1}'.format( mixin_after_parsed_func, traceback.format_exc(err) @@ -228,7 +228,7 @@ class OptionParser(optparse.OptionParser, object): ) if self.config.get('conf_file', None) is not None: # pylint: disable=no-member - logging.getLogger(__name__).debug( + logger.debug( 'Configuration file path: %s', self.config['conf_file'] # pylint: disable=no-member ) @@ -407,9 +407,7 @@ class SaltfileMixIn(six.with_metaclass(MixInMeta, object)): self.options.saltfile = os.path.abspath(self.options.saltfile) # Make sure we let the user know that we will be loading a Saltfile - logging.getLogger(__name__).info( - 'Loading Saltfile from \'%s\'', self.options.saltfile - ) + logger.info("Loading Saltfile from '%s'", six.text_type(self.options.saltfile)) try: saltfile_config = config._read_conf_file(saltfile) @@ -525,7 +523,7 @@ class ConfigDirMixIn(six.with_metaclass(MixInMeta, object)): config_dir = os.environ.get(self._default_config_dir_env_var_, None) if not config_dir: config_dir = self._default_config_dir_ - logging.getLogger(__name__).debug('SYSPATHS setup as: %s', syspaths.CONFIG_DIR) + logger.debug('SYSPATHS setup as: %s', six.text_type(syspaths.CONFIG_DIR)) self.add_option( '-c', '--config-dir', default=config_dir, help="Pass in an alternative configuration directory. Default: '%default'." @@ -811,10 +809,10 @@ class LogLevelMixIn(six.with_metaclass(MixInMeta, object)): # Not supported on platforms other than Windows. # Other platforms may use an external tool such as 'logrotate' if log_rotate_max_bytes != 0: - logging.getLogger(__name__).warning('\'log_rotate_max_bytes\' is only supported on Windows') + logger.warning("'log_rotate_max_bytes' is only supported on Windows") log_rotate_max_bytes = 0 if log_rotate_backup_count != 0: - logging.getLogger(__name__).warning('\'log_rotate_backup_count\' is only supported on Windows') + logger.warning("'log_rotate_backup_count' is only supported on Windows") log_rotate_backup_count = 0 # Save the settings back to the configuration From e83490032d8b6504cfc40ff9c779bafc087ef24b Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Mon, 12 Feb 2018 12:43:13 +0100 Subject: [PATCH 119/223] Fix unicode support for the logging --- salt/utils/parsers.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/salt/utils/parsers.py b/salt/utils/parsers.py index 719313a71b..f6ae1b4e29 100644 --- a/salt/utils/parsers.py +++ b/salt/utils/parsers.py @@ -261,12 +261,10 @@ class OptionParser(optparse.OptionParser, object): try: mixin_before_exit_func(self) except Exception as err: # pylint: disable=broad-except - logger = logging.getLogger(__name__) logger.exception(err) - logger.error( - 'Error while processing %s: %s', - mixin_before_exit_func, traceback.format_exc(err) - ) + logger.error('Error while processing %s: %s', + six.text_type(mixin_before_exit_func), + traceback.format_exc(err)) if self._setup_mp_logging_listener_ is True: # Stop logging through the queue log.shutdown_multiprocessing_logging() From 5b9c49d6bb1ca6d4d8e495d561ed33805da11a21 Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Mon, 12 Feb 2018 12:43:34 +0100 Subject: [PATCH 120/223] Use logger variable and fix unicode support --- salt/utils/parsers.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/salt/utils/parsers.py b/salt/utils/parsers.py index f6ae1b4e29..fd11a346b1 100644 --- a/salt/utils/parsers.py +++ b/salt/utils/parsers.py @@ -787,12 +787,11 @@ class LogLevelMixIn(six.with_metaclass(MixInMeta, object)): logfile_basename = os.path.basename( self._default_logging_logfile_ ) - logging.getLogger(__name__).debug( - 'The user \'%s\' is not allowed to write to \'%s\'. ' - 'The log file will be stored in ' - '\'~/.salt/\'%s\'.log\'', - current_user, logfile, logfile_basename - ) + logger.debug("The user '%s' is not allowed to write to '%s'. " + "The log file will be stored in '~/.salt/'%s'.log'", + six.text_type(current_user), + six.text_type(logfile), + six.text_type(logfile_basename)) logfile = os.path.join( user_salt_dir, '{0}.log'.format(logfile_basename) ) From 2a92f4bc16fd1f081d2763e5508d67964561760b Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Mon, 12 Feb 2018 07:40:02 -0700 Subject: [PATCH 121/223] use local config for vault when masterless --- salt/utils/vault.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/utils/vault.py b/salt/utils/vault.py index 98f71a107e..ed2cfa9a3f 100644 --- a/salt/utils/vault.py +++ b/salt/utils/vault.py @@ -111,7 +111,7 @@ def _get_vault_connection(): if 'vault' in __opts__ and __opts__.get('__role', 'minion') == 'master': return _use_local_config() - elif '_ssh_version' in __opts__: + elif any((__opts__['local'], __opts__['file_client'] == 'local', __opts__['master_type'] == 'disable')): return _use_local_config() else: log.debug('Contacting master for Vault connection details') From 1be8f8d82011f211d62c6819dd6d55f883fbbc79 Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Mon, 12 Feb 2018 12:44:55 +0100 Subject: [PATCH 122/223] Bugfix: do not crash on import error, but log that incident instead --- salt/utils/parsers.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/salt/utils/parsers.py b/salt/utils/parsers.py index fd11a346b1..e1b8f3b05a 100644 --- a/salt/utils/parsers.py +++ b/salt/utils/parsers.py @@ -2750,12 +2750,13 @@ class SaltCallOptionParser(six.with_metaclass(OptionParserMeta, raise ValueError(emsg) if kind == kinds.APPL_KIND_NAMES[kinds.applKinds.minion]: # minion check - from raet.lane.yarding import Yard # pylint: disable=3rd-party-module-not-gated - ha, dirpath = Yard.computeHa(dirpath, lanename, yardname) # pylint: disable=invalid-name - if (os.path.exists(ha) and - not os.path.isfile(ha) and - not os.path.isdir(ha)): # minion manor yard - return True + try: + from raet.lane.yarding import Yard # pylint: disable=3rd-party-module-not-gated + ha, dirpath = Yard.computeHa(dirpath, lanename, yardname) # pylint: disable=invalid-name + if os.path.exists(ha) and not os.path.isfile(ha) and not os.path.isdir(ha): # minion manor yard + return True + except ImportError as ex: + logger.error("Error while importing Yard: %s", six.text_type(ex)) return False def process_module_dirs(self): From f7887e5f7cce9fe27e6682093a81d47e0a90ec4e Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Mon, 12 Feb 2018 12:45:15 +0100 Subject: [PATCH 123/223] PEP8: remove extra-parenthesis --- salt/utils/parsers.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/salt/utils/parsers.py b/salt/utils/parsers.py index e1b8f3b05a..0c13ca82b9 100644 --- a/salt/utils/parsers.py +++ b/salt/utils/parsers.py @@ -2815,13 +2815,13 @@ class SaltRunOptionParser(six.with_metaclass(OptionParserMeta, '--async', default=False, action='store_true', - help=('Start the runner operation and immediately return control.') + help='Start the runner operation and immediately return control.' ) self.add_option( '--skip-grains', default=False, action='store_true', - help=('Do not load grains.') + help='Do not load grains.' ) group = self.output_options_group = optparse.OptionGroup( self, 'Output Options', 'Configure your preferred output format.' @@ -2939,14 +2939,13 @@ class SaltSSHOptionParser(six.with_metaclass(OptionParserMeta, '-v', '--verbose', default=False, action='store_true', - help=('Turn on command verbosity, display jid.') + help='Turn on command verbosity, display jid.' ) self.add_option( '-s', '--static', default=False, action='store_true', - help=('Return the data from minions as a group after they ' - 'all return.') + help='Return the data from minions as a group after they all return.' ) self.add_option( '-w', '--wipe', From 879c40b1c3f0398dc2436d60a32d9ac862ed20e5 Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Mon, 12 Feb 2018 12:46:00 +0100 Subject: [PATCH 124/223] Fix logging, remove extra-parenthesis (PEP8) --- salt/utils/parsers.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/salt/utils/parsers.py b/salt/utils/parsers.py index 0c13ca82b9..797942eb5b 100644 --- a/salt/utils/parsers.py +++ b/salt/utils/parsers.py @@ -2731,22 +2731,21 @@ class SaltCallOptionParser(six.with_metaclass(OptionParserMeta, role = opts.get('id') if not role: - emsg = ("Missing role required to setup RAET SaltCaller.") - logging.getLogger(__name__).error(emsg + "\n") + emsg = "Missing role required to setup RAET SaltCaller." + logger.error(emsg) raise ValueError(emsg) kind = opts.get('__role') # application kind 'master', 'minion', etc if kind not in kinds.APPL_KINDS: - emsg = ("Invalid application kind = '{0}' for RAET SaltCaller.".format(kind)) - logging.getLogger(__name__).error(emsg + "\n") + emsg = "Invalid application kind = '{0}' for RAET SaltCaller.".format(six.text_type(kind)) + logger.error(emsg) raise ValueError(emsg) - if kind in [kinds.APPL_KIND_NAMES[kinds.applKinds.minion], - kinds.APPL_KIND_NAMES[kinds.applKinds.caller], ]: + if kind in [kinds.APPL_KIND_NAMES[kinds.applKinds.minion], kinds.APPL_KIND_NAMES[kinds.applKinds.caller]]: lanename = "{0}_{1}".format(role, kind) else: - emsg = ("Unsupported application kind '{0}' for RAET SaltCaller.".format(kind)) - logging.getLogger(__name__).error(emsg + '\n') + emsg = "Unsupported application kind '{0}' for RAET SaltCaller.".format(six.text_type(kind)) + logger.error(emsg) raise ValueError(emsg) if kind == kinds.APPL_KIND_NAMES[kinds.applKinds.minion]: # minion check From 7a0e3e983cb5b3c3fd6a4c19aad22694a190c560 Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Mon, 12 Feb 2018 13:08:50 +0100 Subject: [PATCH 125/223] Use pytest --- tests/unit/utils/test_parsers.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/unit/utils/test_parsers.py b/tests/unit/utils/test_parsers.py index 4fc85b7a5b..0a7f8af4b2 100644 --- a/tests/unit/utils/test_parsers.py +++ b/tests/unit/utils/test_parsers.py @@ -25,6 +25,11 @@ import salt.syspaths import salt.utils.parsers import salt.utils.platform +try: + import pytest +except ImportError: + pytest = None + class ErrorMock(object): # pylint: disable=too-few-public-methods ''' @@ -995,6 +1000,7 @@ class SaltAPIParserTestCase(LogSettingsParserTests): self.addCleanup(delattr, self, 'parser') +@skipIf(not pytest, False) @skipIf(NO_MOCK, NO_MOCK_REASON) class DaemonMixInTestCase(TestCase): ''' From 3a8e43f8062442f84075cb62fceaadd8d765985c Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Mon, 12 Feb 2018 13:09:05 +0100 Subject: [PATCH 126/223] Add tear-down method --- tests/unit/utils/test_parsers.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tests/unit/utils/test_parsers.py b/tests/unit/utils/test_parsers.py index 0a7f8af4b2..023386ca32 100644 --- a/tests/unit/utils/test_parsers.py +++ b/tests/unit/utils/test_parsers.py @@ -1022,6 +1022,15 @@ class DaemonMixInTestCase(TestCase): # logger self.logger = logging.getLogger('salt.utils.parsers') + def tearDown(self): + ''' + Tear down test + :return: + ''' + del self.logger + del self.mixin + del self.pid + def test_pid_file_deletion(self): ''' PIDfile deletion without exception. From 2f5b13314929730b8b2bc0d84e84889022ed3739 Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Mon, 12 Feb 2018 13:11:22 +0100 Subject: [PATCH 127/223] Fix unit test to attempt remove PID file as root --- tests/unit/utils/test_parsers.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/unit/utils/test_parsers.py b/tests/unit/utils/test_parsers.py index 023386ca32..7da92c3c68 100644 --- a/tests/unit/utils/test_parsers.py +++ b/tests/unit/utils/test_parsers.py @@ -1042,17 +1042,17 @@ class DaemonMixInTestCase(TestCase): assert mock_logger.call_count == 0 assert os_unlink.call_count == 1 - def test_pid_file_deletion_with_oserror(self): + @patch('os.unlink', MagicMock(side_effect=OSError())) + @patch('os.path.isfile', MagicMock(return_value=True)) + @patch('os.getuid', MagicMock(return_value=0)) + @patch('salt.utils.parsers.logger', MagicMock()) + def test_pid_deleted_oserror_as_root(self): ''' - PIDfile deletion with exception + PIDfile deletion with exception, running as root. ''' - with patch('os.unlink', MagicMock(side_effect=OSError())) as os_unlink: - with patch('os.path.isfile', MagicMock(return_value=True)): - with patch.object(self.logger, 'info') as mock_logger: - self.mixin._mixin_before_exit() - assert os_unlink.call_count == 1 - mock_logger.assert_called_with( - 'PIDfile could not be deleted: {0}'.format(self.pid)) + self.mixin._mixin_before_exit() + assert salt.utils.parsers.os.unlink.call_count == 1 + salt.utils.parsers.logger.info.assert_called_with('PIDfile could not be deleted: %s', format(self.pid)) # Hide the class from unittest framework when it searches for TestCase classes in the module del LogSettingsParserTests From 56818045b667679884af219e46accab68e276c43 Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Mon, 12 Feb 2018 13:13:27 +0100 Subject: [PATCH 128/223] Add unit test for calling PID deletion as non-root user --- tests/unit/utils/test_parsers.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tests/unit/utils/test_parsers.py b/tests/unit/utils/test_parsers.py index 7da92c3c68..1754962500 100644 --- a/tests/unit/utils/test_parsers.py +++ b/tests/unit/utils/test_parsers.py @@ -1054,5 +1054,18 @@ class DaemonMixInTestCase(TestCase): assert salt.utils.parsers.os.unlink.call_count == 1 salt.utils.parsers.logger.info.assert_called_with('PIDfile could not be deleted: %s', format(self.pid)) + @patch('os.unlink', MagicMock(side_effect=OSError())) + @patch('os.path.isfile', MagicMock(return_value=True)) + @patch('os.getuid', MagicMock(return_value=1000)) + @patch('salt.utils.parsers.logger', MagicMock()) + def test_pid_deleted_oserror_as_non_root(self): + ''' + PIDfile deletion with exception, running as non-root. + ''' + self.mixin._mixin_before_exit() + assert salt.utils.parsers.os.unlink.call_count == 1 + salt.utils.parsers.logger.info.assert_not_called + + # Hide the class from unittest framework when it searches for TestCase classes in the module del LogSettingsParserTests From 29084225390b5cbfec47ab7157f9d1b4b9d447dd Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Mon, 12 Feb 2018 13:18:43 +0100 Subject: [PATCH 129/223] Add debugging information to the log --- salt/utils/parsers.py | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/utils/parsers.py b/salt/utils/parsers.py index 797942eb5b..2e5e671fca 100644 --- a/salt/utils/parsers.py +++ b/salt/utils/parsers.py @@ -970,6 +970,7 @@ class DaemonMixIn(six.with_metaclass(MixInMeta, object)): # overwrite the PIDfile on the next start. if not os.getuid(): logger.info('PIDfile could not be deleted: %s', six.text_type(self.config['pidfile'])) + logger.debug(six.text_type(err)) def set_pidfile(self): from salt.utils.process import set_pidfile From 06158e424250873ff2128e60f24fefedc347562b Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Mon, 12 Feb 2018 13:19:01 +0100 Subject: [PATCH 130/223] Remove unnecessary string on get->return --- salt/utils/parsers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/utils/parsers.py b/salt/utils/parsers.py index 2e5e671fca..be82585dca 100644 --- a/salt/utils/parsers.py +++ b/salt/utils/parsers.py @@ -958,7 +958,7 @@ class DaemonMixIn(six.with_metaclass(MixInMeta, object)): ) def _mixin_before_exit(self): - if hasattr(self, 'config') and self.config.get('pidfile', ''): + if hasattr(self, 'config') and self.config.get('pidfile'): # We've loaded and merged options into the configuration, it's safe # to query about the pidfile if self.check_pidfile(): From d9941ce1f34dfe611710ad7ae37d7758810f6359 Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Mon, 12 Feb 2018 13:19:36 +0100 Subject: [PATCH 131/223] Throw away unnecessary variable handling --- tests/unit/utils/test_parsers.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/tests/unit/utils/test_parsers.py b/tests/unit/utils/test_parsers.py index 1754962500..c6cee67fa2 100644 --- a/tests/unit/utils/test_parsers.py +++ b/tests/unit/utils/test_parsers.py @@ -1011,13 +1011,10 @@ class DaemonMixInTestCase(TestCase): ''' Setting up ''' - # Set PID - self.pid = '/some/fake.pid' - # Setup mixin self.mixin = salt.utils.parsers.DaemonMixIn() self.mixin.config = {} - self.mixin.config['pidfile'] = self.pid + self.mixin.config['pidfile'] = '/some/fake.pid' # logger self.logger = logging.getLogger('salt.utils.parsers') @@ -1029,7 +1026,6 @@ class DaemonMixInTestCase(TestCase): ''' del self.logger del self.mixin - del self.pid def test_pid_file_deletion(self): ''' @@ -1052,7 +1048,8 @@ class DaemonMixInTestCase(TestCase): ''' self.mixin._mixin_before_exit() assert salt.utils.parsers.os.unlink.call_count == 1 - salt.utils.parsers.logger.info.assert_called_with('PIDfile could not be deleted: %s', format(self.pid)) + salt.utils.parsers.logger.info.assert_called_with('PIDfile could not be deleted: %s', + format(self.mixin.config['pidfile'])) @patch('os.unlink', MagicMock(side_effect=OSError())) @patch('os.path.isfile', MagicMock(return_value=True)) From fe4502b0e46f6ff60da5ba4e688ce66977b79cb0 Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Mon, 12 Feb 2018 13:19:52 +0100 Subject: [PATCH 132/223] Add debug logging call to the unit test check --- tests/unit/utils/test_parsers.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/unit/utils/test_parsers.py b/tests/unit/utils/test_parsers.py index c6cee67fa2..f6613c0fc7 100644 --- a/tests/unit/utils/test_parsers.py +++ b/tests/unit/utils/test_parsers.py @@ -1050,6 +1050,7 @@ class DaemonMixInTestCase(TestCase): assert salt.utils.parsers.os.unlink.call_count == 1 salt.utils.parsers.logger.info.assert_called_with('PIDfile could not be deleted: %s', format(self.mixin.config['pidfile'])) + salt.utils.parsers.logger.debug.assert_called @patch('os.unlink', MagicMock(side_effect=OSError())) @patch('os.path.isfile', MagicMock(return_value=True)) @@ -1062,6 +1063,7 @@ class DaemonMixInTestCase(TestCase): self.mixin._mixin_before_exit() assert salt.utils.parsers.os.unlink.call_count == 1 salt.utils.parsers.logger.info.assert_not_called + salt.utils.parsers.logger.debug.assert_not_called # Hide the class from unittest framework when it searches for TestCase classes in the module From 9fa6647bd01d2d09764d14898394b31caf70bde1 Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Mon, 12 Feb 2018 13:24:25 +0100 Subject: [PATCH 133/223] Refactor a unit test for calling PID file removal without the exception --- tests/unit/utils/test_parsers.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/tests/unit/utils/test_parsers.py b/tests/unit/utils/test_parsers.py index f6613c0fc7..4f1d644cf6 100644 --- a/tests/unit/utils/test_parsers.py +++ b/tests/unit/utils/test_parsers.py @@ -1027,16 +1027,17 @@ class DaemonMixInTestCase(TestCase): del self.logger del self.mixin + @patch('os.unlink', MagicMock()) + @patch('os.path.isfile', MagicMock(return_value=True)) + @patch('salt.utils.parsers.logger', MagicMock()) def test_pid_file_deletion(self): ''' PIDfile deletion without exception. ''' - with patch('os.unlink', MagicMock()) as os_unlink: - with patch('os.path.isfile', MagicMock(return_value=True)): - with patch.object(self.logger, 'info') as mock_logger: - self.mixin._mixin_before_exit() - assert mock_logger.call_count == 0 - assert os_unlink.call_count == 1 + self.mixin._mixin_before_exit() + assert salt.utils.parsers.os.unlink.call_count == 1 + salt.utils.parsers.logger.info.assert_called + salt.utils.parsers.logger.debug.assert_called @patch('os.unlink', MagicMock(side_effect=OSError())) @patch('os.path.isfile', MagicMock(return_value=True)) From 07ef909ea9a5b909abd9ad971a06aedfb9af3b40 Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Mon, 12 Feb 2018 13:25:51 +0100 Subject: [PATCH 134/223] Remove unused variables, rename mixin variable to more exact name --- tests/unit/utils/test_parsers.py | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/tests/unit/utils/test_parsers.py b/tests/unit/utils/test_parsers.py index 4f1d644cf6..6b5df85728 100644 --- a/tests/unit/utils/test_parsers.py +++ b/tests/unit/utils/test_parsers.py @@ -1012,20 +1012,16 @@ class DaemonMixInTestCase(TestCase): Setting up ''' # Setup mixin - self.mixin = salt.utils.parsers.DaemonMixIn() - self.mixin.config = {} - self.mixin.config['pidfile'] = '/some/fake.pid' - - # logger - self.logger = logging.getLogger('salt.utils.parsers') + self.daemon_mixin = salt.utils.parsers.DaemonMixIn() + self.daemon_mixin.config = {} + self.daemon_mixin.config['pidfile'] = '/some/fake.pid' def tearDown(self): ''' Tear down test :return: ''' - del self.logger - del self.mixin + del self.daemon_mixin @patch('os.unlink', MagicMock()) @patch('os.path.isfile', MagicMock(return_value=True)) @@ -1034,7 +1030,7 @@ class DaemonMixInTestCase(TestCase): ''' PIDfile deletion without exception. ''' - self.mixin._mixin_before_exit() + self.daemon_mixin._mixin_before_exit() assert salt.utils.parsers.os.unlink.call_count == 1 salt.utils.parsers.logger.info.assert_called salt.utils.parsers.logger.debug.assert_called @@ -1047,10 +1043,10 @@ class DaemonMixInTestCase(TestCase): ''' PIDfile deletion with exception, running as root. ''' - self.mixin._mixin_before_exit() + self.daemon_mixin._mixin_before_exit() assert salt.utils.parsers.os.unlink.call_count == 1 salt.utils.parsers.logger.info.assert_called_with('PIDfile could not be deleted: %s', - format(self.mixin.config['pidfile'])) + format(self.daemon_mixin.config['pidfile'])) salt.utils.parsers.logger.debug.assert_called @patch('os.unlink', MagicMock(side_effect=OSError())) @@ -1061,7 +1057,7 @@ class DaemonMixInTestCase(TestCase): ''' PIDfile deletion with exception, running as non-root. ''' - self.mixin._mixin_before_exit() + self.daemon_mixin._mixin_before_exit() assert salt.utils.parsers.os.unlink.call_count == 1 salt.utils.parsers.logger.info.assert_not_called salt.utils.parsers.logger.debug.assert_not_called From 8b5af580668ff283c6f30897778ef277ae59c37d Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Mon, 12 Feb 2018 14:42:29 +0100 Subject: [PATCH 135/223] Fix lints --- tests/unit/utils/test_parsers.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/tests/unit/utils/test_parsers.py b/tests/unit/utils/test_parsers.py index 6b5df85728..eb88197e84 100644 --- a/tests/unit/utils/test_parsers.py +++ b/tests/unit/utils/test_parsers.py @@ -5,7 +5,6 @@ # Import python libs from __future__ import absolute_import, print_function, unicode_literals -import logging import os # Import Salt Testing Libs @@ -1032,8 +1031,8 @@ class DaemonMixInTestCase(TestCase): ''' self.daemon_mixin._mixin_before_exit() assert salt.utils.parsers.os.unlink.call_count == 1 - salt.utils.parsers.logger.info.assert_called - salt.utils.parsers.logger.debug.assert_called + salt.utils.parsers.logger.info.assert_not_called() + salt.utils.parsers.logger.debug.assert_not_called() @patch('os.unlink', MagicMock(side_effect=OSError())) @patch('os.path.isfile', MagicMock(return_value=True)) @@ -1047,7 +1046,7 @@ class DaemonMixInTestCase(TestCase): assert salt.utils.parsers.os.unlink.call_count == 1 salt.utils.parsers.logger.info.assert_called_with('PIDfile could not be deleted: %s', format(self.daemon_mixin.config['pidfile'])) - salt.utils.parsers.logger.debug.assert_called + salt.utils.parsers.logger.debug.assert_called() @patch('os.unlink', MagicMock(side_effect=OSError())) @patch('os.path.isfile', MagicMock(return_value=True)) @@ -1059,8 +1058,8 @@ class DaemonMixInTestCase(TestCase): ''' self.daemon_mixin._mixin_before_exit() assert salt.utils.parsers.os.unlink.call_count == 1 - salt.utils.parsers.logger.info.assert_not_called - salt.utils.parsers.logger.debug.assert_not_called + salt.utils.parsers.logger.info.assert_not_called() + salt.utils.parsers.logger.debug.assert_not_called() # Hide the class from unittest framework when it searches for TestCase classes in the module From 888939116ad34c6ec464b344b21e70bb645c9c75 Mon Sep 17 00:00:00 2001 From: Bo Maryniuk Date: Mon, 12 Feb 2018 18:55:55 +0100 Subject: [PATCH 136/223] Drop forcing to unicode exception object --- salt/utils/parsers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/utils/parsers.py b/salt/utils/parsers.py index be82585dca..88ce272fa9 100644 --- a/salt/utils/parsers.py +++ b/salt/utils/parsers.py @@ -2756,7 +2756,7 @@ class SaltCallOptionParser(six.with_metaclass(OptionParserMeta, if os.path.exists(ha) and not os.path.isfile(ha) and not os.path.isdir(ha): # minion manor yard return True except ImportError as ex: - logger.error("Error while importing Yard: %s", six.text_type(ex)) + logger.error("Error while importing Yard: %s", ex) return False def process_module_dirs(self): From ae7791d30bdcde97332f6777102a6cfdf9a4d66e Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Mon, 12 Feb 2018 10:38:51 -0800 Subject: [PATCH 137/223] Missing `format` in the call to write. --- salt/log/handlers/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/log/handlers/__init__.py b/salt/log/handlers/__init__.py index 8380cb9052..f1b87b3bf3 100644 --- a/salt/log/handlers/__init__.py +++ b/salt/log/handlers/__init__.py @@ -178,7 +178,7 @@ if sys.version_info < (3, 2): self.queue.put_nowait(record) except self.queue.Full: sys.stderr.write('[WARNING ] Message queue is full, ' - 'unable to write "{0}" to log', record + 'unable to write "{0}" to log'.format(record) ) def prepare(self, record): From 7b8dc14433a0b0582ccd3893cd131550e247f7d3 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Mon, 12 Feb 2018 11:07:11 -0800 Subject: [PATCH 138/223] Missing `format` in the call to write. --- salt/log/handlers/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/log/handlers/__init__.py b/salt/log/handlers/__init__.py index 6657811775..cb498b1bae 100644 --- a/salt/log/handlers/__init__.py +++ b/salt/log/handlers/__init__.py @@ -179,7 +179,7 @@ if sys.version_info < (3, 2): self.queue.put_nowait(record) except queue.Full: sys.stderr.write('[WARNING ] Message queue is full, ' - 'unable to write "{0}" to log', record + 'unable to write "{0}" to log'.format(record) ) def prepare(self, record): From 0d448457dccc67e92627f35dfa7e2de963a4d3d1 Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Mon, 12 Feb 2018 12:53:59 -0700 Subject: [PATCH 139/223] apparently local is not set by default --- salt/config/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/config/__init__.py b/salt/config/__init__.py index f25cff293a..236cd3ca81 100644 --- a/salt/config/__init__.py +++ b/salt/config/__init__.py @@ -256,6 +256,7 @@ VALID_OPTS = { # Location of the files a minion should look for. Set to 'local' to never ask the master. 'file_client': str, + 'local': bool, # When using a local file_client, this parameter is used to allow the client to connect to # a master for remote execution. @@ -1149,6 +1150,7 @@ DEFAULT_MINION_OPTS = { 'base': [salt.syspaths.BASE_THORIUM_ROOTS_DIR], }, 'file_client': 'remote', + 'local': False, 'use_master_when_local': False, 'file_roots': { 'base': [salt.syspaths.BASE_FILE_ROOTS_DIR, From 2f712691cf79f9ec3fd249b2003d35426a5d7a64 Mon Sep 17 00:00:00 2001 From: twangboy Date: Wed, 17 Jan 2018 13:52:42 -0700 Subject: [PATCH 140/223] Exclude hidden directories in pkg.refresh_db --- salt/modules/win_pkg.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/modules/win_pkg.py b/salt/modules/win_pkg.py index 4ba44d9b07..302dd714c7 100644 --- a/salt/modules/win_pkg.py +++ b/salt/modules/win_pkg.py @@ -650,7 +650,8 @@ def refresh_db(**kwargs): __salt__['cp.cache_dir']( repo_details.winrepo_source_dir, saltenv, - include_pat='*.sls' + include_pat='*.sls', + exclude_pat='E@\/\..*\/' # Exclude all hidden directories (.git) ) return genrepo(saltenv=saltenv, verbose=verbose, failhard=failhard) From 08b82e08752c4e452a66f96183241475e1b9726a Mon Sep 17 00:00:00 2001 From: twangboy Date: Thu, 18 Jan 2018 10:52:46 -0700 Subject: [PATCH 141/223] Fix lint error, use raw --- salt/modules/win_pkg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/win_pkg.py b/salt/modules/win_pkg.py index 302dd714c7..3ab30cd353 100644 --- a/salt/modules/win_pkg.py +++ b/salt/modules/win_pkg.py @@ -651,7 +651,7 @@ def refresh_db(**kwargs): repo_details.winrepo_source_dir, saltenv, include_pat='*.sls', - exclude_pat='E@\/\..*\/' # Exclude all hidden directories (.git) + exclude_pat=r'E@\/\..*\/' # Exclude all hidden directories (.git) ) return genrepo(saltenv=saltenv, verbose=verbose, failhard=failhard) From 4803d92707d4325db8e655b744347d912d033de2 Mon Sep 17 00:00:00 2001 From: twangboy Date: Mon, 22 Jan 2018 15:19:59 -0700 Subject: [PATCH 142/223] Add some documentation --- salt/modules/win_pkg.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/salt/modules/win_pkg.py b/salt/modules/win_pkg.py index 3ab30cd353..61fd4ef00e 100644 --- a/salt/modules/win_pkg.py +++ b/salt/modules/win_pkg.py @@ -581,9 +581,22 @@ def _refresh_db_conditional(saltenv, **kwargs): def refresh_db(**kwargs): ''' - Fetches metadata files and calls :py:func:`pkg.genrepo + Fetches metadata files from the winrepo_dir and calls :py:func:`pkg.genrepo ` to compile updated repository metadata. + The default winrepo directory on the master is `/srv/salt/win/repo-ng`. All + files that end with `.sls` in this and all subdirectories will be used to + generate the repository metadata (`winrepo.p`). + + .. note:: + Hidden directories (directories beginning with `.`, such as `.git`) will + be ignored. + + .. note:: + Directories under `/srv/salt/win/repo-ng` will be processed in + alphabetical order. If two software definition files contain the same + name, the last one processed wins. + Kwargs: saltenv (str): Salt environment. Default: ``base`` From 91c3da8dfd6445a3ade27ab209aa740b749e2d09 Mon Sep 17 00:00:00 2001 From: twangboy Date: Fri, 26 Jan 2018 12:56:08 -0700 Subject: [PATCH 143/223] Improve docs for pkg.refresh_db --- salt/modules/win_pkg.py | 63 ++++++++++++++++++++++++++++++++--------- 1 file changed, 49 insertions(+), 14 deletions(-) diff --git a/salt/modules/win_pkg.py b/salt/modules/win_pkg.py index 61fd4ef00e..2dc7c29cce 100644 --- a/salt/modules/win_pkg.py +++ b/salt/modules/win_pkg.py @@ -580,41 +580,76 @@ def _refresh_db_conditional(saltenv, **kwargs): def refresh_db(**kwargs): - ''' - Fetches metadata files from the winrepo_dir and calls :py:func:`pkg.genrepo - ` to compile updated repository metadata. + r''' + Generates the local software metadata database (`winrepo.p`) on the minion. + The database is stored in a serialized format located by default at the + following location: + + `C:\salt\var\cache\salt\minion\files\base\win\repo-ng\winrepo.p` + + This module performs the following steps to generate the software metadata + database: + + - Fetch the package definition files (.sls) from `winrepo_source_dir` + (default `salt://win/repo-ng`) and cache them in + `\files\\` + (default: `C:\salt\var\cache\salt\minion\files\base\win\repo-ng`) + - Call :py:func:`pkg.genrepo ` to parse the + package definition files and generate the repository metadata database + file (`winrepo.p`) + - Return the report received from + :py:func:`pkg.genrepo ` The default winrepo directory on the master is `/srv/salt/win/repo-ng`. All files that end with `.sls` in this and all subdirectories will be used to - generate the repository metadata (`winrepo.p`). + generate the repository metadata database (`winrepo.p`). .. note:: - Hidden directories (directories beginning with `.`, such as `.git`) will - be ignored. + - Hidden directories (directories beginning with '`.`', such as + '`.git`') will be ignored. .. note:: - Directories under `/srv/salt/win/repo-ng` will be processed in - alphabetical order. If two software definition files contain the same - name, the last one processed wins. + There is no need to call `pkg.refresh_db` every time you work with the + pkg module. Automatic refresh will occur based on the following minion + configuration settings: + - `winrepo_cache_expire_min` + - `winrepo_cache_expire_max` + However, if the package definition files have changed, this function + should be called to ensure the minion has the latest information about + packages available to it. + + .. warning:: + Directories and files fetched from + (`/srv/salt/win/repo-ng`) will be processed in alphabetical order. If + two or more software definition files contain the same name, the last + one processed replaces all data from the files processed before it. + + For more information see + :ref:`Windows Software Repository ` Kwargs: saltenv (str): Salt environment. Default: ``base`` verbose (bool): - Return verbose data structure which includes 'success_list', a list - of all sls files and the package names contained within. Default - 'False' + Return a verbose data structure which includes 'success_list', a + list of all sls files and the package names contained within. + Default is 'False' failhard (bool): - If ``True``, an error will be raised if any repo SLS files failed to + If ``True``, an error will be raised if any repo SLS files fails to process. If ``False``, no error will be raised, and a dictionary containing the full results will be returned. Returns: dict: A dictionary containing the results of the database refresh. - .. Warning:: + .. note:: + A result with a `total: 0` generally means that the files are in the + wrong location on the master. Try running the following command on the + minion: `salt-call -l debug pkg.refresh saltenv=base` + + .. warning:: When calling this command from a state using `module.run` be sure to pass `failhard: False`. Otherwise the state will report failure if it encounters a bad software definition file. From 35c81faf5a88d67de76e24bb99bb34f4684b71ca Mon Sep 17 00:00:00 2001 From: twangboy Date: Fri, 26 Jan 2018 13:04:47 -0700 Subject: [PATCH 144/223] Log the source_dir when caching the files --- salt/modules/win_pkg.py | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/modules/win_pkg.py b/salt/modules/win_pkg.py index 2dc7c29cce..476f8a39f5 100644 --- a/salt/modules/win_pkg.py +++ b/salt/modules/win_pkg.py @@ -695,6 +695,7 @@ def refresh_db(**kwargs): ) # Cache repo-ng locally + log.info('Fetching *.sls files from {0}'.format(repo_details.winrepo_source_dir)) __salt__['cp.cache_dir']( repo_details.winrepo_source_dir, saltenv, From 3646d5c8975ceee58056445a883c1f90c0b2bda3 Mon Sep 17 00:00:00 2001 From: twangboy Date: Fri, 26 Jan 2018 14:14:13 -0700 Subject: [PATCH 145/223] Fix some docs formatting, add some warnings --- .../windows/windows-package-manager.rst | 383 ++++++++++-------- salt/modules/win_pkg.py | 14 +- 2 files changed, 216 insertions(+), 181 deletions(-) diff --git a/doc/topics/windows/windows-package-manager.rst b/doc/topics/windows/windows-package-manager.rst index cea071e888..597e389f45 100644 --- a/doc/topics/windows/windows-package-manager.rst +++ b/doc/topics/windows/windows-package-manager.rst @@ -301,25 +301,30 @@ can define multiple versions for the same piece of software. The lines following the version are indented two more spaces and contain all the information needed to install that package. -.. warning:: The package name and the ``full_name`` must be unique to all - other packages in the software repository. +.. warning:: + The package name and the ``full_name`` must be unique to all other packages + in the software repository. The version line is the version for the package to be installed. It is used when you need to install a specific version of a piece of software. -.. warning:: The version must be enclosed in quotes, otherwise the yaml parser - will remove trailing zeros. +.. warning:: + The version must be enclosed in quotes, otherwise the yaml parser will + remove trailing zeros. + +.. note:: + There are unique situations where previous versions are unavailable. Take + Google Chrome for example. There is only one url provided for a standalone + installation of Google Chrome. -.. note:: There are unique situations where previous versions are unavailable. - Take Google Chrome for example. There is only one url provided for a - standalone installation of Google Chrome. (https://dl.google.com/edgedl/chrome/install/GoogleChromeStandaloneEnterprise.msi) + When a new version is released, the url just points to the new version. To handle situations such as these, set the version to `latest`. Salt will install the version of Chrome at the URL and report that version. Here's an example: -.. code-block:: bash +.. code-block:: yaml chrome: latest: @@ -334,200 +339,230 @@ you need to install a specific version of a piece of software. Available parameters are as follows: -:param str full_name: The Full Name for the software as shown in "Programs and - Features" in the control panel. You can also get this information by - installing the package manually and then running ``pkg.list_pkgs``. Here's - an example of the output from ``pkg.list_pkgs``: +:param str full_name: + The Full Name for the software as shown in "Programs and Features" in the + control panel. You can also get this information by installing the package + manually and then running ``pkg.list_pkgs``. Here's an example of the output + from ``pkg.list_pkgs``: -.. code-block:: bash + .. code-block:: bash - salt 'test-2008' pkg.list_pkgs - test-2008 - ---------- - 7-Zip 9.20 (x64 edition): - 9.20.00.0 - Microsoft .NET Framework 4 Client Profile: - 4.0.30319,4.0.30319 - Microsoft .NET Framework 4 Extended: - 4.0.30319,4.0.30319 - Microsoft Visual C++ 2008 Redistributable - x64 9.0.21022: - 9.0.21022 - Mozilla Firefox 17.0.1 (x86 en-US): - 17.0.1 - Mozilla Maintenance Service: - 17.0.1 - NSClient++ (x64): - 0.3.8.76 - Notepad++: - 6.4.2 - Salt Minion 0.16.0: - 0.16.0 + salt 'test-2008' pkg.list_pkgs + test-2008 + ---------- + 7-Zip 9.20 (x64 edition): + 9.20.00.0 + Microsoft .NET Framework 4 Client Profile: + 4.0.30319,4.0.30319 + Microsoft .NET Framework 4 Extended: + 4.0.30319,4.0.30319 + Microsoft Visual C++ 2008 Redistributable - x64 9.0.21022: + 9.0.21022 + Mozilla Firefox 17.0.1 (x86 en-US): + 17.0.1 + Mozilla Maintenance Service: + 17.0.1 + NSClient++ (x64): + 0.3.8.76 + Notepad++: + 6.4.2 + Salt Minion 0.16.0: + 0.16.0 -Notice the Full Name for Firefox: Mozilla Firefox 17.0.0 (x86 en-US). That's -exactly what's in the ``full_name`` parameter in the software definition file. + Notice the Full Name for Firefox: ``Mozilla Firefox 17.0.0 (x86 en-US)``. + That's exactly what's in the ``full_name`` parameter in the software + definition file. -If any of the software insalled on the machine matches one of the software -definition files in the repository the full_name will be automatically renamed -to the package name. The example below shows the ``pkg.list_pkgs`` for a -machine that already has Mozilla Firefox 17.0.1 installed. + If any of the software installed on the machine matches one of the software + definition files in the repository, the full_name will be automatically + renamed to the package name. The example below shows the ``pkg.list_pkgs`` + for a machine that already has Mozilla Firefox 17.0.1 installed. -.. code-block:: bash + .. code-block:: bash + + test-2008: + ---------- + 7zip: + 9.20.00.0 + Microsoft .NET Framework 4 Client Profile: + 4.0.30319,4.0.30319 + Microsoft .NET Framework 4 Extended: + 4.0.30319,4.0.30319 + Microsoft Visual C++ 2008 Redistributable - x64 9.0.21022: + 9.0.21022 + Mozilla Maintenance Service: + 17.0.1 + Notepad++: + 6.4.2 + Salt Minion 0.16.0: + 0.16.0 + firefox: + 17.0.1 + nsclient: + 0.3.9.328 + + .. important:: + The version number and ``full_name`` need to match the output from + ``pkg.list_pkgs`` so that the status can be verified when running a + highstate. + + .. note:: + It is still possible to successfully install packages using + ``pkg.install``, even if the ``full_name`` or the version number don't + match. However, this can make troubleshooting issues difficult, so be + careful. + +:param str installer: + The path to the ``.exe`` or ``.msi`` to use to install the package. This can + be a path or a URL. If it is a URL or a salt path (``salt://``), the package + will be cached locally and then executed. If it is a path to a file on disk + or a file share, it will be executed directly. + + .. note:: + If storing software in the same location as the winrepo it is best + practice to place each installer in its own directory rather than the + root of winrepo. Then you can place your package definition file in the + same directory with a name of ``init.sls``. This will be picked up + by ``pkg.refresh_db`` and processed properly. + +:param str install_flags: + Any flags that need to be passed to the installer to make it perform a + silent install. These can often be found by adding ``/?`` or ``/h`` when + running the installer from the command-line. A great resource for finding + these silent install flags can be found on the WPKG project's wiki_: + + .. warning:: + Salt will not return if the installer is waiting for user input so it is + imperative that the software package being installed has the ability to + install silently. + +:param str uninstaller: + The path to the program used to uninstall this software. This can be the + path to the same `exe` or `msi` used to install the software. It can also be + a GUID. You can find this value in the registry under the following keys: + + - Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall + - Software\\Wow6432None\\Microsoft\\Windows\\CurrentVersion\\Uninstall + +:param str uninstall_flags: + Any flags that need to be passed to the uninstaller to make it perform a + silent uninstall. These can often be found by adding ``/?`` or ``/h`` when + running the uninstaller from the command-line. A great resource for finding + these silent install flags can be found on the WPKG project's wiki_: + + .. warning:: + Salt will not return if the uninstaller is waiting for user input so it + is imperative that the software package being uninstalled has the + ability to uninstall silently. + + Here are some examples of installer and uninstaller settings: + + .. code-block:: yaml - test-2008: - ---------- 7zip: - 9.20.00.0 - Microsoft .NET Framework 4 Client Profile: - 4.0.30319,4.0.30319 - Microsoft .NET Framework 4 Extended: - 4.0.30319,4.0.30319 - Microsoft Visual C++ 2008 Redistributable - x64 9.0.21022: - 9.0.21022 - Mozilla Maintenance Service: - 17.0.1 - Notepad++: - 6.4.2 - Salt Minion 0.16.0: - 0.16.0 - firefox: - 17.0.1 - nsclient: - 0.3.9.328 + '9.20.00.0': + installer: salt://win/repo/7zip/7z920-x64.msi + full_name: 7-Zip 9.20 (x64 edition) + reboot: False + install_flags: '/qn /norestart' + msiexec: True + uninstaller: '{23170F69-40C1-2702-0920-000001000000}' + uninstall_flags: '/qn /norestart' -.. important:: The version number and ``full_name`` need to match the output - from ``pkg.list_pkgs`` so that the status can be verified when running - highstate. + Alternatively the ``uninstaller`` can also simply repeat the URL of an msi + file: -.. note:: It is still possible to successfully install packages using - ``pkg.install`` even if they don't match. This can make troubleshooting - difficult so be careful. + .. code-block:: yaml -:param str installer: The path to the ``.exe`` or ``.msi`` to use to install the - package. This can be a path or a URL. If it is a URL or a salt path - (salt://), the package will be cached locally and then executed. If it is a - path to a file on disk or a file share, it will be executed directly. + 7zip: + '9.20.00.0': + installer: salt://win/repo/7zip/7z920-x64.msi + full_name: 7-Zip 9.20 (x64 edition) + reboot: False + install_flags: '/qn /norestart' + msiexec: True + uninstaller: salt://win/repo/7zip/7z920-x64.msi + uninstall_flags: '/qn /norestart' -:param str install_flags: Any flags that need to be passed to the installer to - make it perform a silent install. These can often be found by adding ``/?`` - or ``/h`` when running the installer from the command-line. A great resource - for finding these silent install flags can be found on the WPKG project's wiki_: +:param msiexec: + This tells salt to use ``msiexec /i`` to install the package and + ``msiexec /x`` to uninstall. This is for ``.msi`` installations. Possible + options are: True, False or the path to ``msiexec.exe`` on your system -Salt will not return if the installer is waiting for user input so these are -important. + .. code-block:: yaml -:param str uninstaller: The path to the program used to uninstall this software. - This can be the path to the same `exe` or `msi` used to install the - software. It can also be a GUID. You can find this value in the registry - under the following keys: + 7zip: + '9.20.00.0': + installer: salt://win/repo/7zip/7z920-x64.msi + full_name: 7-Zip 9.20 (x64 edition) + reboot: False + install_flags: '/qn /norestart' + msiexec: 'C:\Windows\System32\msiexec.exe' + uninstaller: salt://win/repo/7zip/7z920-x64.msi + uninstall_flags: '/qn /norestart' - - Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall - - Software\\Wow6432None\\Microsoft\\Windows\\CurrentVersion\\Uninstall +:param bool allusers: + This parameter is specific to ``.msi`` installations. It tells ``msiexec`` + to install the software for all users. The default is ``True``. -:param str uninstall_flags: Any flags that need to be passed to the uninstaller - to make it perform a silent uninstall. These can often be found by adding - ``/?`` or ``/h`` when running the uninstaller from the command-line. A great - resource for finding these silent install flags can be found on the WPKG - project's wiki_: +:param bool cache_dir: + If ``True`` and the installer URL begins with ``salt://``, the entire + directory where the installer resides will be recursively cached. This is + useful for installers that depend on other files in the same directory for + installation. -Salt will not return if the uninstaller is waiting for user input so these are -important. - -Here are some examples of installer and uninstaller settings: - -.. code-block:: yaml - - 7zip: - '9.20.00.0': - installer: salt://win/repo/7zip/7z920-x64.msi - full_name: 7-Zip 9.20 (x64 edition) - reboot: False - install_flags: '/qn /norestart' - msiexec: True - uninstaller: '{23170F69-40C1-2702-0920-000001000000}' - uninstall_flags: '/qn /norestart' - -Alternatively the ``uninstaller`` can also simply repeat the URL of the msi file. - -.. code-block:: yaml - - 7zip: - '9.20.00.0': - installer: salt://win/repo/7zip/7z920-x64.msi - full_name: 7-Zip 9.20 (x64 edition) - reboot: False - install_flags: '/qn /norestart' - msiexec: True - uninstaller: salt://win/repo/7zip/7z920-x64.msi - uninstall_flags: '/qn /norestart' - -:param msiexec: This tells salt to use ``msiexec /i`` to install the - package and ``msiexec /x`` to uninstall. This is for `.msi` installations. - Possible options are: True, False or path to msiexec on your system - - 7zip: - '9.20.00.0': - installer: salt://win/repo/7zip/7z920-x64.msi - full_name: 7-Zip 9.20 (x64 edition) - reboot: False - install_flags: '/qn /norestart' - msiexec: 'C:\Windows\System32\msiexec.exe' - uninstaller: salt://win/repo/7zip/7z920-x64.msi - uninstall_flags: '/qn /norestart' - -:param str arch: This selects which ``msiexec.exe`` to use. Possible values: - ``x86``, ``x64`` - -:param bool allusers: This parameter is specific to `.msi` installations. It - tells `msiexec` to install the software for all users. The default is True. - -:param bool cache_dir: If true when installer URL begins with salt://, the - entire directory where the installer resides will be recursively cached. - This is useful for installers that depend on other files in the same - directory for installation. + .. warning:: + Do not place installer files in the root of winrepo + (``/srv/salt/win/repo-ng``). If the installer is in the root of winrepo + and the package definition for that installer has ``cache_dir: True`` + then the entire contents of winrepo will be cached to the minion. :param str cache_file: - When installer URL begins with salt://, this indicates single file to copy - down for use with the installer. Copied to the same location as the - installer. Use this over ``cache_dir`` if there are many files in the + When the installer URL begins with ``salt://``, this indicates a single file + to copy down for use with the installer. It is copied to the same location + as the installer. Use this over ``cache_dir`` if there are many files in the directory and you only need a specific file and don't want to cache additional files that may reside in the installer directory. -Here's an example for a software package that has dependent files: + Here's an example for a software package that has dependent files: -.. code-block:: yaml + .. code-block:: yaml - sqlexpress: - '12.0.2000.8': - installer: 'salt://win/repo/sqlexpress/setup.exe' - full_name: Microsoft SQL Server 2014 Setup (English) - reboot: False - install_flags: '/ACTION=install /IACCEPTSQLSERVERLICENSETERMS /Q' - cache_dir: True + sqlexpress: + '12.0.2000.8': + installer: 'salt://win/repo/sqlexpress/setup.exe' + full_name: Microsoft SQL Server 2014 Setup (English) + reboot: False + install_flags: '/ACTION=install /IACCEPTSQLSERVERLICENSETERMS /Q' + cache_dir: True -:param bool use_scheduler: If true, windows will use the task scheduler to run - the installation. This is useful for running the salt installation itself as - the installation process kills any currently running instances of salt. +:param bool use_scheduler: + If ``True``, Windows will use the task scheduler to run the installation. + This is useful for running the Salt installation itself as the installation + process kills any currently running instances of Salt. -:param str source_hash: This tells salt to compare a hash sum of the installer -to the provided hash sum before execution. The value can be formatted as -``hash_algorithm=hash_sum``, or it can be a URI to a file containing the hash -sum. -For a list of supported algorithms, see the `hashlib documentation -`_. +:param str source_hash: + This tells Salt to compare a hash sum of the installer to the provided hash + sum before execution. The value can be formatted as + ``=``, or it can be a URI to a file containing the + hash sum. -Here's an example of source_hash usage: + For a list of supported algorithms, see the `hashlib documentation + `_. -.. code-block:: yaml + Here's an example of source_hash usage: - messageanalyzer: - '4.0.7551.0': - full_name: 'Microsoft Message Analyzer' - installer: 'salt://win/repo/messageanalyzer/MessageAnalyzer64.msi' - install_flags: '/quiet /norestart' - uninstaller: '{1CC02C23-8FCD-487E-860C-311EC0A0C933}' - uninstall_flags: '/quiet /norestart' - msiexec: True - source_hash: 'sha1=62875ff451f13b10a8ff988f2943e76a4735d3d4' + .. code-block:: yaml + + messageanalyzer: + '4.0.7551.0': + full_name: 'Microsoft Message Analyzer' + installer: 'salt://win/repo/messageanalyzer/MessageAnalyzer64.msi' + install_flags: '/quiet /norestart' + uninstaller: '{1CC02C23-8FCD-487E-860C-311EC0A0C933}' + uninstall_flags: '/quiet /norestart' + msiexec: True + source_hash: 'sha1=62875ff451f13b10a8ff988f2943e76a4735d3d4' :param bool reboot: Not implemented diff --git a/salt/modules/win_pkg.py b/salt/modules/win_pkg.py index 476f8a39f5..145697f1d1 100644 --- a/salt/modules/win_pkg.py +++ b/salt/modules/win_pkg.py @@ -697,8 +697,8 @@ def refresh_db(**kwargs): # Cache repo-ng locally log.info('Fetching *.sls files from {0}'.format(repo_details.winrepo_source_dir)) __salt__['cp.cache_dir']( - repo_details.winrepo_source_dir, - saltenv, + path=repo_details.winrepo_source_dir, + saltenv=saltenv, include_pat='*.sls', exclude_pat=r'E@\/\..*\/' # Exclude all hidden directories (.git) ) @@ -1293,11 +1293,11 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): # single files if cache_dir and installer.startswith('salt:'): path, _ = os.path.split(installer) - __salt__['cp.cache_dir'](path, - saltenv, - False, - None, - 'E@init.sls$') + __salt__['cp.cache_dir'](path=path, + saltenv=saltenv, + include_empty=False, + include_pat=None, + exclude_pat='E@init.sls$') # Check to see if the cache_file is cached... if passed if cache_file and cache_file.startswith('salt:'): From 87dc554dc388e36b091b4e448acc9ffb2ea77810 Mon Sep 17 00:00:00 2001 From: twangboy Date: Wed, 31 Jan 2018 14:48:13 -0700 Subject: [PATCH 146/223] Add final updates to docs --- doc/topics/windows/windows-package-manager.rst | 14 ++++++++------ salt/modules/win_pkg.py | 3 ++- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/doc/topics/windows/windows-package-manager.rst b/doc/topics/windows/windows-package-manager.rst index 597e389f45..6d5186ff49 100644 --- a/doc/topics/windows/windows-package-manager.rst +++ b/doc/topics/windows/windows-package-manager.rst @@ -422,8 +422,8 @@ Available parameters are as follows: If storing software in the same location as the winrepo it is best practice to place each installer in its own directory rather than the root of winrepo. Then you can place your package definition file in the - same directory with a name of ``init.sls``. This will be picked up - by ``pkg.refresh_db`` and processed properly. + same directory. It is best practice to name the file ``init.sls``. This + will be picked up by ``pkg.refresh_db`` and processed properly. :param str install_flags: Any flags that need to be passed to the installer to make it perform a @@ -512,10 +512,12 @@ Available parameters are as follows: installation. .. warning:: - Do not place installer files in the root of winrepo - (``/srv/salt/win/repo-ng``). If the installer is in the root of winrepo - and the package definition for that installer has ``cache_dir: True`` - then the entire contents of winrepo will be cached to the minion. + Be aware that all files and directories in the same location as the + installer file will be copied down to the minion. If you place your + installer file in the root of winrepo (``/srv/salt/win/repo-ng``) and + ``cache_dir: True`` the entire contents of winrepo will be cached to + the minion. Therefore, it is best practice to place your installer files + in a subdirectory if they are to be stored in winrepo. :param str cache_file: When the installer URL begins with ``salt://``, this indicates a single file diff --git a/salt/modules/win_pkg.py b/salt/modules/win_pkg.py index 145697f1d1..a8fa9bb183 100644 --- a/salt/modules/win_pkg.py +++ b/salt/modules/win_pkg.py @@ -614,7 +614,8 @@ def refresh_db(**kwargs): configuration settings: - `winrepo_cache_expire_min` - `winrepo_cache_expire_max` - However, if the package definition files have changed, this function + However, if the package definition files have changed, as would be the + case if you are developing a new package definition, this function should be called to ensure the minion has the latest information about packages available to it. From 437a457911e8dfd3d80db93a69859e62c83b5af3 Mon Sep 17 00:00:00 2001 From: twangboy Date: Mon, 5 Feb 2018 16:51:22 -0700 Subject: [PATCH 147/223] Skip hidden dirs in genrepo --- salt/modules/win_pkg.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/salt/modules/win_pkg.py b/salt/modules/win_pkg.py index a8fa9bb183..56c18a1dde 100644 --- a/salt/modules/win_pkg.py +++ b/salt/modules/win_pkg.py @@ -838,6 +838,10 @@ def genrepo(**kwargs): to process. If ``False``, no error will be raised, and a dictionary containing the full results will be returned. + .. note:: + - Hidden directories (directories beginning with '`.`', such as + '`.git`') will be ignored. + Returns: dict: A dictionary of the results of the command @@ -864,6 +868,10 @@ def genrepo(**kwargs): short_path = os.path.relpath(root, repo_details.local_dest) if short_path == '.': short_path = '' + if re.search(r'[\\/]\..*', root): + log.debug('Skipping files in directory: {0}'.format(root)) + continue + for name in files: if name.endswith('.sls'): total_files_processed += 1 From 1282ae3a93df123e7196b8257367a7e5b8d6e651 Mon Sep 17 00:00:00 2001 From: twangboy Date: Mon, 5 Feb 2018 17:08:11 -0700 Subject: [PATCH 148/223] Skip hidden first --- salt/modules/win_pkg.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/salt/modules/win_pkg.py b/salt/modules/win_pkg.py index 56c18a1dde..2cfa7f8172 100644 --- a/salt/modules/win_pkg.py +++ b/salt/modules/win_pkg.py @@ -865,13 +865,16 @@ def genrepo(**kwargs): repo_details = _get_repo_details(saltenv) for root, _, files in os.walk(repo_details.local_dest, followlinks=False): - short_path = os.path.relpath(root, repo_details.local_dest) - if short_path == '.': - short_path = '' + + # Skip hidden directories (.git) if re.search(r'[\\/]\..*', root): log.debug('Skipping files in directory: {0}'.format(root)) continue + short_path = os.path.relpath(root, repo_details.local_dest) + if short_path == '.': + short_path = '' + for name in files: if name.endswith('.sls'): total_files_processed += 1 From 6d223cffa76b8a9a224490933dcca3edd0251f7b Mon Sep 17 00:00:00 2001 From: twangboy Date: Tue, 6 Feb 2018 16:05:40 -0700 Subject: [PATCH 149/223] Add tip about passing bogus saltenv --- doc/topics/windows/windows-package-manager.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/doc/topics/windows/windows-package-manager.rst b/doc/topics/windows/windows-package-manager.rst index 6d5186ff49..1c5be46a9a 100644 --- a/doc/topics/windows/windows-package-manager.rst +++ b/doc/topics/windows/windows-package-manager.rst @@ -412,6 +412,11 @@ Available parameters are as follows: match. However, this can make troubleshooting issues difficult, so be careful. + .. tip:: + To force salt to display the full name when there's already an existing + package definition file on the system, you can pass a bogus ``saltenv`` + parameter to the command like so: ``pkg.list_pkgs saltenv=NotARealEnv`` + :param str installer: The path to the ``.exe`` or ``.msi`` to use to install the package. This can be a path or a URL. If it is a URL or a salt path (``salt://``), the package From ea41215646c765f026c179a8cbe034649ded2928 Mon Sep 17 00:00:00 2001 From: twangboy Date: Mon, 12 Feb 2018 14:03:04 -0700 Subject: [PATCH 150/223] Make the regex pattern less greedy --- salt/modules/win_pkg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/win_pkg.py b/salt/modules/win_pkg.py index 2cfa7f8172..588e2ed38a 100644 --- a/salt/modules/win_pkg.py +++ b/salt/modules/win_pkg.py @@ -701,7 +701,7 @@ def refresh_db(**kwargs): path=repo_details.winrepo_source_dir, saltenv=saltenv, include_pat='*.sls', - exclude_pat=r'E@\/\..*\/' # Exclude all hidden directories (.git) + exclude_pat=r'E@\/\..*?\/' # Exclude all hidden directories (.git) ) return genrepo(saltenv=saltenv, verbose=verbose, failhard=failhard) From 953a400d797ab56941a60a8274f833bd8e5b1792 Mon Sep 17 00:00:00 2001 From: Lars Wagner Date: Wed, 31 Jan 2018 13:53:40 +0100 Subject: [PATCH 151/223] follow symlinks --- salt/modules/aliases.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/aliases.py b/salt/modules/aliases.py index 0be801837b..1f55f7e267 100644 --- a/salt/modules/aliases.py +++ b/salt/modules/aliases.py @@ -33,7 +33,7 @@ def __get_aliases_filename(): ''' Return the path to the appropriate aliases file ''' - return __salt__['config.option']('aliases.file') + return os.path.realpath(__salt__['config.option']('aliases.file')) def __parse_aliases(): From a8c2cd806637fa795ba99ba8c260ad8d54424602 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Mon, 12 Feb 2018 10:38:51 -0800 Subject: [PATCH 152/223] Missing `format` in the call to write. --- salt/log/handlers/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/log/handlers/__init__.py b/salt/log/handlers/__init__.py index bd67a66f56..bea9cfe502 100644 --- a/salt/log/handlers/__init__.py +++ b/salt/log/handlers/__init__.py @@ -179,7 +179,7 @@ if sys.version_info < (3, 2): self.queue.put_nowait(record) except queue.Full: sys.stderr.write('[WARNING ] Message queue is full, ' - 'unable to write "{0}" to log', record + 'unable to write "{0}" to log'.format(record) ) def prepare(self, record): From 9e98f9dcdb24802d86c4e6464758efcda22fb49b Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Mon, 12 Feb 2018 15:29:56 -0700 Subject: [PATCH 153/223] fix rootdir prepending on windows --- salt/config/__init__.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/salt/config/__init__.py b/salt/config/__init__.py index 6090be4cc7..8cac3fa459 100644 --- a/salt/config/__init__.py +++ b/salt/config/__init__.py @@ -2306,6 +2306,12 @@ def prepend_root_dir(opts, path_options): path = tmp_path_root_dir else: path = tmp_path_def_root_dir + elif salt.utils.platform.is_windows() and not os.path.splitdrive(path)[0]: + # In windows, os.path.isabs resolves '/' to 'C:\\' or whatever + # the root drive is. This elif prevents the next from being + # hit, so that the root_dir is prefixed in cases where the + # drive is not prefixed on a config option + pass elif os.path.isabs(path): # Absolute path (not default or overriden root_dir) # No prepending required @@ -3641,7 +3647,7 @@ def _adjust_log_file_override(overrides, default_log_file): if overrides.get('log_dir'): # Adjust log_file if a log_dir override is introduced if overrides.get('log_file'): - if not os.path.abspath(overrides['log_file']): + if not os.path.isabs(overrides['log_file']): # Prepend log_dir if log_file is relative overrides['log_file'] = os.path.join(overrides['log_dir'], overrides['log_file']) From 679787699c0c972b909521a8b9ce21b43f6f39ec Mon Sep 17 00:00:00 2001 From: Ben Gridley Date: Mon, 12 Feb 2018 16:06:32 -0700 Subject: [PATCH 154/223] Add vpc_peering_connection_id to describe_route_tables route_keys This is required to fix state module function _routes_present when using a vpc_peering_connection. --- salt/modules/boto_vpc.py | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/modules/boto_vpc.py b/salt/modules/boto_vpc.py index bebaafdd57..2fa6da3c2d 100644 --- a/salt/modules/boto_vpc.py +++ b/salt/modules/boto_vpc.py @@ -2576,6 +2576,7 @@ def describe_route_tables(route_table_id=None, route_table_name=None, 'instance_id': 'Instance', 'interface_id': 'NetworkInterfaceId', 'nat_gateway_id': 'NatGatewayId', + 'vpc_peering_connection_id': 'VpcPeeringConnectionId', } assoc_keys = {'id': 'RouteTableAssociationId', 'main': 'Main', From b752775c4483134775d9075f1f845dba5855c783 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Mon, 12 Feb 2018 20:21:16 -0800 Subject: [PATCH 155/223] A couple bug fixes in the scheduler when once is used for the job run time. --- salt/utils/schedule.py | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/salt/utils/schedule.py b/salt/utils/schedule.py index 0df058094a..648848b27e 100644 --- a/salt/utils/schedule.py +++ b/salt/utils/schedule.py @@ -940,31 +940,31 @@ class Schedule(object): data['_next_scheduled_fire_time'] = now + data['_seconds'] elif 'once' in data: - if data['_next_fire_time'] and \ - data['_next_fire_time'] < now - self.opts['loop_interval'] and \ - data['_next_fire_time'] > now and \ - not data['_splay']: - continue + if data['_next_fire_time']: + if data['_next_fire_time'] < now - self.opts['loop_interval'] or \ + data['_next_fire_time'] > now and \ + not data['_splay']: + continue if not data['_next_fire_time'] and \ not data['_splay']: once_fmt = data.get('once_fmt', '%Y-%m-%dT%H:%M:%S') try: - once = datetime.datetime.strptime(data['once'], + _once = datetime.datetime.strptime(data['once'], once_fmt) - data['_next_fire_time'] = int( - time.mktime(once.timetuple())) - data['_next_scheduled_fire_time'] = int( - time.mktime(once.timetuple())) + + once = int(time.mktime(_once.timetuple())) except (TypeError, ValueError): log.error('Date string could not be parsed: %s, %s', data['once'], once_fmt) continue # If _next_fire_time is less than now or greater # than now, continue. - if data['_next_fire_time'] < now - self.opts['loop_interval'] and \ - data['_next_fire_time'] > now: + if once < now - self.opts['loop_interval']: continue + else: + data['_next_fire_time'] = once + data['_next_scheduled_fire_time'] = once elif 'when' in data: if not _WHEN_SUPPORTED: @@ -1363,8 +1363,8 @@ class Schedule(object): else: if not self.standalone: data = self._check_max_running(func, data, self.opts) + run = data['run'] - run = data['run'] if run: if multiprocessing_enabled: thread_cls = salt.utils.process.SignalHandlingMultiprocessingProcess From 6cb1a38407fb3276e560ebce2e447a3a104ed826 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Mon, 12 Feb 2018 22:35:45 -0600 Subject: [PATCH 156/223] Properly handle undecodable bytestrings in nested outputter Note however that these bytestrings will typically end up being whitespace when printed. But, at least they will no longer result in a traceback. --- salt/output/__init__.py | 5 ++++- salt/output/nested.py | 39 ++++++++++++++++++++++++++++++--------- 2 files changed, 34 insertions(+), 10 deletions(-) diff --git a/salt/output/__init__.py b/salt/output/__init__.py index 30d849e350..bca1573d44 100644 --- a/salt/output/__init__.py +++ b/salt/output/__init__.py @@ -226,6 +226,9 @@ def strip_esc_sequence(txt): from writing their own terminal manipulation commands ''' if isinstance(txt, six.string_types): - return txt.replace('\033', '?') + try: + return txt.replace('\033', '?') + except UnicodeDecodeError: + return txt.replace(str('\033'), str('?')) # future lint: disable=blacklisted-function else: return txt diff --git a/salt/output/nested.py b/salt/output/nested.py index 9ece388c82..19666780c0 100644 --- a/salt/output/nested.py +++ b/salt/output/nested.py @@ -71,20 +71,34 @@ class NestDisplay(object): endc, suffix) except UnicodeDecodeError: - return fmt.format( - indent, - color, - prefix, - salt.utils.stringutils.to_unicode(msg), - endc, - suffix) + try: + return fmt.format( + indent, + color, + prefix, + salt.utils.stringutils.to_unicode(msg), + endc, + suffix) + except UnicodeDecodeError: + # msg contains binary data that can't be decoded + return str(fmt).format( # future lint: disable=blacklisted-function + indent, + color, + prefix, + msg, + endc, + suffix) def display(self, ret, indent, prefix, out): ''' Recursively iterate down through data structures to determine output ''' if isinstance(ret, bytes): - ret = salt.utils.stringutils.to_unicode(ret) + try: + ret = salt.utils.stringutils.to_unicode(ret) + except UnicodeDecodeError: + # ret contains binary data that can't be decoded + pass if ret is None or ret is True or ret is False: out.append( @@ -183,4 +197,11 @@ def output(ret, **kwargs): base_indent = kwargs.get('nested_indent', 0) \ or __opts__.get('nested_indent', 0) nest = NestDisplay(retcode=retcode) - return '\n'.join(nest.display(ret, base_indent, '', [])) + lines = nest.display(ret, base_indent, '', []) + try: + return '\n'.join(lines) + except UnicodeDecodeError: + # output contains binary data that can't be decoded + return str('\n').join( # future lint: disable=blacklisted-function + [salt.utils.stringutils.to_str(x) for x in lines] + ) From ac0baf4b340ccb124047b45d84feb77a6e33d005 Mon Sep 17 00:00:00 2001 From: Ch3LL Date: Tue, 13 Feb 2018 11:31:20 -0500 Subject: [PATCH 157/223] Add 2017.7.4 Release Notes with PRs --- doc/topics/releases/2017.7.4.rst | 80 ++++++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) create mode 100644 doc/topics/releases/2017.7.4.rst diff --git a/doc/topics/releases/2017.7.4.rst b/doc/topics/releases/2017.7.4.rst new file mode 100644 index 0000000000..70a538d4d7 --- /dev/null +++ b/doc/topics/releases/2017.7.4.rst @@ -0,0 +1,80 @@ +=========================== +Salt 2017.7.4 Release Notes +=========================== + +Version 2017.7.4 is a bugfix release for :ref:`2017.7.0 `. + +Changes for v2017.7.3..v2017.7.4 +--------------------------------------------------------------- + +Extended changelog courtesy of Todd Stansell (https://github.com/tjstansell/salt-changelogs): + +*Generated at: 2018-02-13T16:29:07Z* + +Statistics: + +- Total Merges: **4** +- Total Issue references: **3** +- Total PR references: **7** + +Changes: + + +- **PR** `#45981`_: (*gtmanfred*) use local config for vault when masterless + @ *2018-02-13T15:22:01Z* + + - **ISSUE** `#45976`_: (*grobinson-blockchain*) 6a5e0f9 introduces regression that breaks Vault module for salt masterless + | refs: `#45981`_ + * ca76a0b328 Merge pull request `#45981`_ from gtmanfred/2017.7.3 + * 0d448457dc apparently local is not set by default + + * 2a92f4bc16 use local config for vault when masterless + +- **PR** `#45953`_: (*rallytime*) Back-port `#45928`_ to 2017.7.3 + @ *2018-02-09T22:29:10Z* + + - **ISSUE** `#45915`_: (*MatthiasKuehneEllerhold*) 2017.7.3: Salt-SSH & Vault Pillar: Permission denied "minion.pem" + | refs: `#45928`_ + - **PR** `#45928`_: (*garethgreenaway*) [2017.7] Fixing vault when used with pillar over salt-ssh + | refs: `#45953`_ + * 6530649dbc Merge pull request `#45953`_ from rallytime/`bp-45928`_-2017.7.3 + * 85363189d1 Fixing vault when used with pillar over salt-ssh + +- **PR** `#45934`_: (*rallytime*) Back-port `#45902`_ to 2017.7.3 + @ *2018-02-09T16:31:08Z* + + - **ISSUE** `#45893`_: (*CrackerJackMack*) archive.extracted ValueError "No path specified" in 2017.7.3 + | refs: `#45902`_ + - **PR** `#45902`_: (*terminalmage*) Check the effective saltenv for cached archive + | refs: `#45934`_ + * fb378cebb0 Merge pull request `#45934`_ from rallytime/`bp-45902`_ + * bb83e8b345 Add regression test for issue 45893 + + * cdda66d759 Remove duplicated section in docstring and fix example + + * 4b6351cda6 Check the effective saltenv for cached archive + +- **PR** `#45935`_: (*rallytime*) Back-port `#45742`_ to 2017.7.3 + @ *2018-02-09T14:02:26Z* + + - **PR** `#45742`_: (*marccardinal*) list.copy() is not compatible with python 2.7 + | refs: `#45935`_ + * 0d74151c71 Merge pull request `#45935`_ from rallytime/`bp-45742`_ + * 6a0b5f7af3 Removed the chained copy + + * ad1150fad4 list.copy() is not compatible with python 2.7 + + +.. _`#45742`: https://github.com/saltstack/salt/pull/45742 +.. _`#45893`: https://github.com/saltstack/salt/issues/45893 +.. _`#45902`: https://github.com/saltstack/salt/pull/45902 +.. _`#45915`: https://github.com/saltstack/salt/issues/45915 +.. _`#45928`: https://github.com/saltstack/salt/pull/45928 +.. _`#45934`: https://github.com/saltstack/salt/pull/45934 +.. _`#45935`: https://github.com/saltstack/salt/pull/45935 +.. _`#45953`: https://github.com/saltstack/salt/pull/45953 +.. _`#45976`: https://github.com/saltstack/salt/issues/45976 +.. _`#45981`: https://github.com/saltstack/salt/pull/45981 +.. _`bp-45742`: https://github.com/saltstack/salt/pull/45742 +.. _`bp-45902`: https://github.com/saltstack/salt/pull/45902 +.. _`bp-45928`: https://github.com/saltstack/salt/pull/45928 From 13cdb526905e3cff1de8975e0c4099f1f89d4ca2 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Tue, 13 Feb 2018 11:41:41 -0600 Subject: [PATCH 158/223] cmdmod.py: runas workaround for platforms that don't set a USER env var Solaris doesn't set a USER env var in its default environment, that is if you run `su - someuser -c env` you get a HOME, PWD, LOGNAME, etc. env var, but not a USER. This commit makes sure that the USER env var is set to the runas user. --- salt/modules/cmdmod.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/salt/modules/cmdmod.py b/salt/modules/cmdmod.py index 233dca8647..976b56d488 100644 --- a/salt/modules/cmdmod.py +++ b/salt/modules/cmdmod.py @@ -465,6 +465,10 @@ def _run(cmd, else: run_env = os.environ.copy() run_env.update(env) + # Fix platforms like Solaris that don't set a USER env var in the + # user's default environment as obtained above. + if runas is not None and run_env.get('USER') != runas: + run_env['USER'] = runas if python_shell is None: python_shell = False From 9f76836a6c8f5502e9e0f616baa457b2cd9b3edb Mon Sep 17 00:00:00 2001 From: Jeremy McMillan Date: Tue, 16 Jan 2018 08:04:31 -0600 Subject: [PATCH 159/223] emit port cli version, variants as separate args --- salt/modules/mac_ports.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/salt/modules/mac_ports.py b/salt/modules/mac_ports.py index 8a842cdb8e..d3c75769e5 100644 --- a/salt/modules/mac_ports.py +++ b/salt/modules/mac_ports.py @@ -306,13 +306,13 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): if pkgs is None: version_num = kwargs.get('version') variant_spec = kwargs.get('variant') - spec = None + spec = {} if version_num: - spec = (spec or '') + '@' + version_num + spec['version'] = version_num if variant_spec: - spec = (spec or '') + variant_spec + spec['variant'] = variant_spec pkg_params = {name: spec} @@ -321,7 +321,14 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): formulas_array = [] for pname, pparams in six.iteritems(pkg_params): - formulas_array.append(pname + (pparams or '')) + formulas_array.append(pname) + + if pparams: + if 'version' in pparams: + formulas_array.append('@' + pparams['version']) + + if 'variant' in pparams: + formulas_array.append(pparams['variant']) old = list_pkgs() cmd = ['port', 'install'] From ebb244396b536fcd332e9c2838a1995a218ca4af Mon Sep 17 00:00:00 2001 From: Lee Webb Date: Fri, 19 Jan 2018 09:28:16 +1100 Subject: [PATCH 160/223] Patch around ResourceRecords needing to be present for AliasTarget entries to work --- salt/states/boto3_route53.py | 92 ++++++++++++++++++------------------ 1 file changed, 47 insertions(+), 45 deletions(-) diff --git a/salt/states/boto3_route53.py b/salt/states/boto3_route53.py index 0a49dcc3fe..78ac92cdba 100644 --- a/salt/states/boto3_route53.py +++ b/salt/states/boto3_route53.py @@ -595,54 +595,55 @@ def rr_present(name, HostedZoneId=None, DomainName=None, PrivateZone=False, Name # Convert any magic RR values to something AWS will understand, and otherwise clean them up. fixed_rrs = [] - for rr in ResourceRecords: - if rr.startswith('magic:'): - fields = rr.split(':') - if fields[1] == 'ec2_instance_tag': - if len(fields) != 5: - log.warning("Invalid magic RR value seen: '{}'. Passing as-is.".format(rr)) - fixed_rrs += [rr] - continue - tag_name = fields[2] - tag_value = fields[3] - instance_attr = fields[4] - good_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') - r = __salt__['boto_ec2.find_instances']( - tags={tag_name: tag_value}, return_objs=True, in_states=good_states, - region=region, key=key, keyid=keyid, profile=profile) - if len(r) < 1: - ret['comment'] = 'No EC2 instance with tag {} == {} found'.format(tag_name, - tag_value) - log.error(ret['comment']) - ret['result'] = False - return ret - if len(r) > 1: - ret['comment'] = 'Multiple EC2 instances with tag {} == {} found'.format( - tag_name, tag_value) - log.error(ret['comment']) - ret['result'] = False - return ret - instance = r[0] - res = getattr(instance, instance_attr, None) - if res: - log.debug('Found {} {} for instance {}'.format(instance_attr, res, instance.id)) - fixed_rrs += [_to_aws_encoding(res)] + if ResourceRecords: + for rr in ResourceRecords: + if rr.startswith('magic:'): + fields = rr.split(':') + if fields[1] == 'ec2_instance_tag': + if len(fields) != 5: + log.warning("Invalid magic RR value seen: '{}'. Passing as-is.".format(rr)) + fixed_rrs += [rr] + continue + tag_name = fields[2] + tag_value = fields[3] + instance_attr = fields[4] + good_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') + r = __salt__['boto_ec2.find_instances']( + tags={tag_name: tag_value}, return_objs=True, in_states=good_states, + region=region, key=key, keyid=keyid, profile=profile) + if len(r) < 1: + ret['comment'] = 'No EC2 instance with tag {} == {} found'.format(tag_name, + tag_value) + log.error(ret['comment']) + ret['result'] = False + return ret + if len(r) > 1: + ret['comment'] = 'Multiple EC2 instances with tag {} == {} found'.format( + tag_name, tag_value) + log.error(ret['comment']) + ret['result'] = False + return ret + instance = r[0] + res = getattr(instance, instance_attr, None) + if res: + log.debug('Found {} {} for instance {}'.format(instance_attr, res, instance.id)) + fixed_rrs += [_to_aws_encoding(res)] + else: + ret['comment'] = 'Attribute {} not found on instance {}'.format(instance_attr, + instance.id) + log.error(ret['comment']) + ret['result'] = False + return ret else: - ret['comment'] = 'Attribute {} not found on instance {}'.format(instance_attr, - instance.id) + ret['comment'] = ('Unknown RR magic value seen: {}. Please extend the ' + 'boto3_route53 state module to add support for your preferred ' + 'incantation.'.format(fields[1])) log.error(ret['comment']) ret['result'] = False return ret else: - ret['comment'] = ('Unknown RR magic value seen: {}. Please extend the ' - 'boto3_route53 state module to add support for your preferred ' - 'incantation.'.format(fields[1])) - log.error(ret['comment']) - ret['result'] = False - return ret - else: - fixed_rrs += [rr] - ResourceRecords = [{'Value': rr} for rr in sorted(fixed_rrs)] + fixed_rrs += [rr] + ResourceRecords = [{'Value': rr} for rr in sorted(fixed_rrs)] recordsets = __salt__['boto3_route53.get_resource_records'](HostedZoneId=HostedZoneId, StartRecordName=Name, StartRecordType=Type, region=region, key=key, keyid=keyid, @@ -691,9 +692,10 @@ def rr_present(name, HostedZoneId=None, DomainName=None, PrivateZone=False, Name return ret ResourceRecordSet = { 'Name': Name, - 'Type': Type, - 'ResourceRecords': ResourceRecords + 'Type': Type } + if ResourceRecords: + ResourceRecordSet['ResourceRecords'] = ResourceRecords for u in updatable: ResourceRecordSet.update({u: locals().get(u)}) if locals().get(u) else None From f427b0febc84232a5f63cdc7fad450eb8538ce0b Mon Sep 17 00:00:00 2001 From: Lee Webb Date: Thu, 25 Jan 2018 10:35:18 +1100 Subject: [PATCH 161/223] Change formatting style of logging lines per review --- salt/states/boto3_route53.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/states/boto3_route53.py b/salt/states/boto3_route53.py index 78ac92cdba..28c9117ae6 100644 --- a/salt/states/boto3_route53.py +++ b/salt/states/boto3_route53.py @@ -601,7 +601,7 @@ def rr_present(name, HostedZoneId=None, DomainName=None, PrivateZone=False, Name fields = rr.split(':') if fields[1] == 'ec2_instance_tag': if len(fields) != 5: - log.warning("Invalid magic RR value seen: '{}'. Passing as-is.".format(rr)) + log.warning("Invalid magic RR value seen: '%s'. Passing as-is." % (rr)) fixed_rrs += [rr] continue tag_name = fields[2] @@ -626,7 +626,7 @@ def rr_present(name, HostedZoneId=None, DomainName=None, PrivateZone=False, Name instance = r[0] res = getattr(instance, instance_attr, None) if res: - log.debug('Found {} {} for instance {}'.format(instance_attr, res, instance.id)) + log.debug('Found %s %s for instance %s' % (instance_attr, res, instance.id)) fixed_rrs += [_to_aws_encoding(res)] else: ret['comment'] = 'Attribute {} not found on instance {}'.format(instance_attr, From 2ea3fef543395121a76a7cce06c3b11f7131863f Mon Sep 17 00:00:00 2001 From: Lee Webb Date: Mon, 29 Jan 2018 08:52:15 +1100 Subject: [PATCH 162/223] No lazy logging --- salt/states/boto3_route53.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/states/boto3_route53.py b/salt/states/boto3_route53.py index 28c9117ae6..54a66bdb07 100644 --- a/salt/states/boto3_route53.py +++ b/salt/states/boto3_route53.py @@ -601,7 +601,7 @@ def rr_present(name, HostedZoneId=None, DomainName=None, PrivateZone=False, Name fields = rr.split(':') if fields[1] == 'ec2_instance_tag': if len(fields) != 5: - log.warning("Invalid magic RR value seen: '%s'. Passing as-is." % (rr)) + log.warning("Invalid magic RR value seen: '%s'. Passing as-is.", rr) fixed_rrs += [rr] continue tag_name = fields[2] @@ -626,7 +626,7 @@ def rr_present(name, HostedZoneId=None, DomainName=None, PrivateZone=False, Name instance = r[0] res = getattr(instance, instance_attr, None) if res: - log.debug('Found %s %s for instance %s' % (instance_attr, res, instance.id)) + log.debug('Found %s %s for instance %s', instance_attr, res, instance.id) fixed_rrs += [_to_aws_encoding(res)] else: ret['comment'] = 'Attribute {} not found on instance {}'.format(instance_attr, From a830a6e819edcebc737799836c5d936540984cb9 Mon Sep 17 00:00:00 2001 From: Ollie Armstrong Date: Tue, 30 Jan 2018 18:05:44 +0000 Subject: [PATCH 163/223] m/selinux.fcontext_get_policy allow long filespecs The previous logic of matching the output of `semanage fcontext --list` did not allow for filespecs that were longer than 49 characters. This was due to the output of the semanage tool not conforming to the expected output. We used to expect that the after the filespec would be at least two spaces. However, with long filespecs there is only a single space separating it and the next field (the file type). This modifies the regular expression that we use to match the line to accept one or more spaces as field delimeters. However, this causes problems when we attempt to split the three fields into a python dictionary. We cannot use the same logic as previously of using the field delimeter as the file type field itself can contain a space. Instead we use a separate regular expression to parse the line into its component parts. Fixes #45784. --- salt/modules/selinux.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/salt/modules/selinux.py b/salt/modules/selinux.py index aecadd7a14..17f0aed17a 100644 --- a/salt/modules/selinux.py +++ b/salt/modules/selinux.py @@ -453,7 +453,7 @@ def fcontext_get_policy(name, filetype=None, sel_type=None, sel_user=None, sel_l ''' if filetype: _validate_filetype(filetype) - re_spacer = '[ ]{2,}' + re_spacer = '[ ]+' cmd_kwargs = {'spacer': re_spacer, 'filespec': re.escape(name), 'sel_user': sel_user or '[^:]+', @@ -466,11 +466,14 @@ def fcontext_get_policy(name, filetype=None, sel_type=None, sel_user=None, sel_l current_entry_text = __salt__['cmd.shell'](cmd, ignore_retcode=True) if current_entry_text == '': return None - ret = {} - current_entry_list = re.split(re_spacer, current_entry_text) - ret['filespec'] = current_entry_list[0] - ret['filetype'] = current_entry_list[1] - ret.update(_context_string_to_dict(current_entry_list[2])) + + parts = re.match(r'^({filespec}) +([a-z ]+) (.*)$'.format(**{'filespec': re.escape(name)}), current_entry_text) + ret = { + 'filespec': parts.group(1), + 'filetype': parts.group(2), + } + ret.update(_context_string_to_dict(parts.group(3))) + return ret From bafb7b4e6e3fdffc559f685fe9e9b4cec4725dad Mon Sep 17 00:00:00 2001 From: Ollie Armstrong Date: Wed, 7 Feb 2018 18:25:21 +0000 Subject: [PATCH 164/223] Ensure parsed fields are stripped --- salt/modules/selinux.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/modules/selinux.py b/salt/modules/selinux.py index 17f0aed17a..6ebc2f1614 100644 --- a/salt/modules/selinux.py +++ b/salt/modules/selinux.py @@ -469,10 +469,10 @@ def fcontext_get_policy(name, filetype=None, sel_type=None, sel_user=None, sel_l parts = re.match(r'^({filespec}) +([a-z ]+) (.*)$'.format(**{'filespec': re.escape(name)}), current_entry_text) ret = { - 'filespec': parts.group(1), - 'filetype': parts.group(2), + 'filespec': parts.group(1).strip(), + 'filetype': parts.group(2).strip(), } - ret.update(_context_string_to_dict(parts.group(3))) + ret.update(_context_string_to_dict(parts.group(3).strip())) return ret From 8f7c45935a360b769ded4160804efc122fcb6124 Mon Sep 17 00:00:00 2001 From: Ollie Armstrong Date: Wed, 7 Feb 2018 18:26:42 +0000 Subject: [PATCH 165/223] Add tests for salt.modules.selinux.fcontext_get_policy --- tests/unit/modules/test_selinux.py | 88 ++++++++++++++++++++++++++++++ 1 file changed, 88 insertions(+) create mode 100644 tests/unit/modules/test_selinux.py diff --git a/tests/unit/modules/test_selinux.py b/tests/unit/modules/test_selinux.py new file mode 100644 index 0000000000..0376fba835 --- /dev/null +++ b/tests/unit/modules/test_selinux.py @@ -0,0 +1,88 @@ +# -*- coding: utf-8 -*- + +# Import Python libs +import os + +# Import Salt Testing Libs +from tests.support.mixins import LoaderModuleMockMixin +from tests.support.unit import TestCase, skipIf +from tests.support.mock import ( + MagicMock, + patch, + NO_MOCK, + NO_MOCK_REASON +) + +# Import Salt libs +import salt.modules.selinux as selinux + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class SelinuxModuleTestCase(TestCase, LoaderModuleMockMixin): + ''' + Test cases for salt.modules.selinux + ''' + def setup_loader_modules(self): + return {selinux: {}} + + def test_fcontext_get_policy_parsing(self): + ''' + Test to verify that the parsing of the semanage output into fields is + correct. Added with #45784. + ''' + cases = [ + { + 'semanage_out': '/var/www(/.*)? all files system_u:object_r:httpd_sys_content_t:s0', + 'name': '/var/www(/.*)?', + 'filetype': 'all files', + 'sel_user': 'system_u', + 'sel_role': 'object_r', + 'sel_type': 'httpd_sys_content_t', + 'sel_level': 's0' + }, + { + 'semanage_out': '/var/www(/.*)? all files system_u:object_r:httpd_sys_content_t:s0', + 'name': '/var/www(/.*)?', + 'filetype': 'all files', + 'sel_user': 'system_u', + 'sel_role': 'object_r', + 'sel_type': 'httpd_sys_content_t', + 'sel_level': 's0' + }, + { + 'semanage_out': '/var/lib/dhcp3? directory system_u:object_r:dhcp_state_t:s0', + 'name': '/var/lib/dhcp3?', + 'filetype': 'directory', + 'sel_user': 'system_u', + 'sel_role': 'object_r', + 'sel_type': 'dhcp_state_t', + 'sel_level': 's0' + }, + { + 'semanage_out': '/var/lib/dhcp3? directory system_u:object_r:dhcp_state_t:s0', + 'name': '/var/lib/dhcp3?', + 'filetype': 'directory', + 'sel_user': 'system_u', + 'sel_role': 'object_r', + 'sel_type': 'dhcp_state_t', + 'sel_level': 's0' + }, + { + 'semanage_out': '/var/lib/dhcp3? directory system_u:object_r:dhcp_state_t:s0', + 'name': '/var/lib/dhcp3?', + 'filetype': 'directory', + 'sel_user': 'system_u', + 'sel_role': 'object_r', + 'sel_type': 'dhcp_state_t', + 'sel_level': 's0' + } + ] + + for case in cases: + with patch.dict(selinux.__salt__, {'cmd.shell': MagicMock(return_value=case['semanage_out'])}): + ret = selinux.fcontext_get_policy(case['name']) + self.assertEqual(ret['filespec'], case['name']) + self.assertEqual(ret['filetype'], case['filetype']) + self.assertEqual(ret['sel_user'], case['sel_user']) + self.assertEqual(ret['sel_role'], case['sel_role']) + self.assertEqual(ret['sel_type'], case['sel_type']) + self.assertEqual(ret['sel_level'], case['sel_level']) From 8047066c4693e2fd6d6723120db4186109bc088b Mon Sep 17 00:00:00 2001 From: Ollie Armstrong Date: Wed, 7 Feb 2018 18:33:37 +0000 Subject: [PATCH 166/223] Remove unused import --- tests/unit/modules/test_selinux.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/unit/modules/test_selinux.py b/tests/unit/modules/test_selinux.py index 0376fba835..ca1e53a6b7 100644 --- a/tests/unit/modules/test_selinux.py +++ b/tests/unit/modules/test_selinux.py @@ -1,8 +1,5 @@ # -*- coding: utf-8 -*- -# Import Python libs -import os - # Import Salt Testing Libs from tests.support.mixins import LoaderModuleMockMixin from tests.support.unit import TestCase, skipIf From ef6ffb14921446b2ffe0f02455b635e038fa3ac8 Mon Sep 17 00:00:00 2001 From: Ollie Armstrong Date: Fri, 9 Feb 2018 14:09:17 +0000 Subject: [PATCH 167/223] Resolve linting errors --- tests/unit/modules/test_selinux.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/unit/modules/test_selinux.py b/tests/unit/modules/test_selinux.py index ca1e53a6b7..a13618b5dd 100644 --- a/tests/unit/modules/test_selinux.py +++ b/tests/unit/modules/test_selinux.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # Import Salt Testing Libs +from __future__ import absolute_import from tests.support.mixins import LoaderModuleMockMixin from tests.support.unit import TestCase, skipIf from tests.support.mock import ( @@ -13,6 +14,7 @@ from tests.support.mock import ( # Import Salt libs import salt.modules.selinux as selinux + @skipIf(NO_MOCK, NO_MOCK_REASON) class SelinuxModuleTestCase(TestCase, LoaderModuleMockMixin): ''' From 1916e5c4a4b732dfc1fa685a70988221191b5db0 Mon Sep 17 00:00:00 2001 From: Philippe Pepiot Date: Thu, 1 Feb 2018 18:19:32 +0100 Subject: [PATCH 168/223] Fix selinux.fcontext_policy_present for Centos 6 'a' is not a valid filetype for semanage on Centos 6. Since "a" (all files) is the default behavior of semanage, don't specify a `--ftype` when invoking semanage. Closes #45825 --- salt/modules/selinux.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/salt/modules/selinux.py b/salt/modules/selinux.py index aecadd7a14..004a2ea8a7 100644 --- a/salt/modules/selinux.py +++ b/salt/modules/selinux.py @@ -514,7 +514,9 @@ def fcontext_add_or_delete_policy(action, name, filetype=None, sel_type=None, se if action not in ['add', 'delete']: raise SaltInvocationError('Actions supported are "add" and "delete", not "{0}".'.format(action)) cmd = 'semanage fcontext --{0}'.format(action) - if filetype is not None: + # "semanage --ftype a" isn't valid on Centos 6, + # don't pass --ftype since "a" is the default filetype. + if filetype is not None and filetype != 'a': _validate_filetype(filetype) cmd += ' --ftype {0}'.format(filetype) if sel_type is not None: From 9d004f6512d0619df895e0aca56143490a34337c Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Tue, 13 Feb 2018 13:16:03 -0600 Subject: [PATCH 169/223] Use mock 2.0.0 instead of unittest.mock on Python 3 (#2) This preserves the custom mock_open we backported from unittest.mock, but otherwise ditches unittest.mock as it does not have MagicMock.assert_called in Python releases before 3.6. This allows us to maintain a uniform mock version across all platforms and Python releases. --- tests/support/mock.py | 60 +++++++++++++++---------------------------- 1 file changed, 21 insertions(+), 39 deletions(-) diff --git a/tests/support/mock.py b/tests/support/mock.py index 6da421dc35..da3d09480d 100644 --- a/tests/support/mock.py +++ b/tests/support/mock.py @@ -5,9 +5,12 @@ tests.support.mock ~~~~~~~~~~~~~~~~~~ - Helper module that wraps :mod:`mock ` and provides - some fake objects in order to properly set the function/class decorators - and yet skip the test cases execution. + Helper module that wraps `mock` and provides some fake objects in order to + properly set the function/class decorators and yet skip the test case's + execution. + + Note: mock >= 2.0.0 required since unittest.mock does not have + MagicMock.assert_called in Python < 3.6. ''' # pylint: disable=unused-import,function-redefined,blacklisted-module,blacklisted-external-module @@ -18,37 +21,20 @@ import sys from salt.ext import six try: - if sys.version_info >= (3,): - # Python 3 - from unittest.mock import ( - Mock, - MagicMock, - patch, - sentinel, - DEFAULT, - # ANY and call will be imported further down - create_autospec, - FILTER_DIR, - NonCallableMock, - NonCallableMagicMock, - PropertyMock, - __version__ - ) - else: - from mock import ( - Mock, - MagicMock, - patch, - sentinel, - DEFAULT, - # ANY and call will be imported further down - create_autospec, - FILTER_DIR, - NonCallableMock, - NonCallableMagicMock, - PropertyMock, - __version__ - ) + from mock import ( + Mock, + MagicMock, + patch, + sentinel, + DEFAULT, + # ANY and call will be imported further down + create_autospec, + FILTER_DIR, + NonCallableMock, + NonCallableMagicMock, + PropertyMock, + __version__ + ) NO_MOCK = False NO_MOCK_REASON = '' mock_version = [] @@ -99,11 +85,7 @@ except ImportError as exc: if NO_MOCK is False: try: - if sys.version_info >= (3,): - # Python 3 - from unittest.mock import call, ANY - else: - from mock import call, ANY + from mock import call, ANY except ImportError: NO_MOCK = True NO_MOCK_REASON = 'you need to upgrade your mock version to >= 0.8.0' From 8ee0a3a28bd5f7516cc897d48af4e88504f20b07 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Tue, 13 Feb 2018 13:42:20 -0600 Subject: [PATCH 170/223] Move Solaris USER workaround up a bit In its previous position, the run_env would not get the workaround when clean_env was set to True. --- salt/modules/cmdmod.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/modules/cmdmod.py b/salt/modules/cmdmod.py index 976b56d488..794e924216 100644 --- a/salt/modules/cmdmod.py +++ b/salt/modules/cmdmod.py @@ -423,6 +423,10 @@ def _run(cmd, env_encoded = env_encoded.encode(__salt_system_encoding__) env_runas = dict(list(zip(*[iter(env_encoded.split(b'\0'))]*2))) env_runas.update(env) + # Fix platforms like Solaris that don't set a USER env var in the + # user's default environment as obtained above. + if env_runas.get('USER') != runas: + env_runas['USER'] = runas env = env_runas # Encode unicode kwargs to filesystem encoding to avoid a # UnicodeEncodeError when the subprocess is invoked. @@ -465,10 +469,6 @@ def _run(cmd, else: run_env = os.environ.copy() run_env.update(env) - # Fix platforms like Solaris that don't set a USER env var in the - # user's default environment as obtained above. - if runas is not None and run_env.get('USER') != runas: - run_env['USER'] = runas if python_shell is None: python_shell = False From 8cf13325eec08ba8e9418e2d26677f7900185b8c Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Mon, 12 Feb 2018 22:47:59 -0600 Subject: [PATCH 171/223] salt.states.reg.present: Prevent traceback when reg data is binary This prevents a failed decode of undecodable data from resulting in a traceback by catching the exception and just using the original value in the changes dict. --- salt/states/reg.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/salt/states/reg.py b/salt/states/reg.py index d9bc8a60e5..99cca3b364 100644 --- a/salt/states/reg.py +++ b/salt/states/reg.py @@ -192,9 +192,14 @@ def present(name, salt.utils.to_unicode(name, 'utf-8')) return ret + try: + vdata_decoded = salt.utils.to_unicode(vdata, 'utf-8') + except UnicodeDecodeError: + # vdata contains binary data that can't be decoded + vdata_decoded = vdata add_change = {'Key': r'{0}\{1}'.format(hive, key), 'Entry': u'{0}'.format(salt.utils.to_unicode(vname, 'utf-8') if vname else u'(Default)'), - 'Value': salt.utils.to_unicode(vdata, 'utf-8')} + 'Value': vdata_decoded} # Check for test option if __opts__['test']: From 916766f6519a21efd9908a93a174aad79fc772c9 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Mon, 12 Feb 2018 16:34:51 -0600 Subject: [PATCH 172/223] yumpkg: Fix a couple issues with _get_extra_opts `_get_extra_opts()` and `_get_branch_option()` were unnecessarily quoting the value, causing it to be interpreted as a literal quote by `subprocess.Popen()`. Also, because there were separate helpers for repo options, disableexcludes, branch options, and extra options, and specifically because `_get_extra_opts()` parses *all* kwargs, any of the options from the other helper funcs would end up being added to the command line twice if `_get_extra_opts()` was used. This commit consolidates all of the kwarg inspection and CLI opts construction to a single helper function. It also adds unit tests to make sure that we are formatting our commands properly. Additionally, it makes a minor fix in `refresh_db()` which was not accounted for when we changed the osmajorrelease grain to an integer in 2017.7.0. --- salt/modules/yumpkg.py | 116 +++----- tests/unit/modules/test_yumpkg.py | 441 ++++++++++++++++++++++++++++++ 2 files changed, 486 insertions(+), 71 deletions(-) create mode 100644 tests/unit/modules/test_yumpkg.py diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py index 5464d752dc..3db8aaa27f 100644 --- a/salt/modules/yumpkg.py +++ b/salt/modules/yumpkg.py @@ -204,25 +204,29 @@ def _check_versionlock(): ) -def _get_repo_options(**kwargs): +def _get_options(**kwargs): ''' - Returns a list of '--enablerepo' and '--disablerepo' options to be used - in the yum command, based on the kwargs. + Returns a list of options to be used in the yum/dnf command, based on the + kwargs passed. ''' # Get repo options from the kwargs fromrepo = kwargs.pop('fromrepo', '') repo = kwargs.pop('repo', '') disablerepo = kwargs.pop('disablerepo', '') enablerepo = kwargs.pop('enablerepo', '') + disableexcludes = kwargs.pop('disableexcludes', '') + branch = kwargs.pop('branch', '') + get_extra_options = kwargs.pop('get_extra_options', False) # Support old 'repo' argument if repo and not fromrepo: fromrepo = repo ret = [] + if fromrepo: log.info('Restricting to repo \'%s\'', fromrepo) - ret.extend(['--disablerepo=*', '--enablerepo=' + fromrepo]) + ret.extend(['--disablerepo=*', '--enablerepo={0}'.format(fromrepo)]) else: if disablerepo: targets = [disablerepo] \ @@ -238,46 +242,30 @@ def _get_repo_options(**kwargs): else enablerepo log.info('Enabling repo(s): %s', ', '.join(targets)) ret.extend(['--enablerepo={0}'.format(x) for x in targets]) - return ret + if disableexcludes: + log.info('Disabling excludes for \'%s\'', disableexcludes) + ret.append('--disableexcludes={0}'.format(disableexcludes)) -def _get_excludes_option(**kwargs): - ''' - Returns a list of '--disableexcludes' option to be used in the yum command, - based on the kwargs. - ''' - disable_excludes = kwargs.pop('disableexcludes', '') - ret = [] - if disable_excludes: - log.info('Disabling excludes for \'%s\'', disable_excludes) - ret.append('--disableexcludes={0}'.format(disable_excludes)) - return ret - - -def _get_branch_option(**kwargs): - ''' - Returns a list of '--branch' option to be used in the yum command, - based on the kwargs. This feature requires 'branch' plugin for YUM. - ''' - branch = kwargs.pop('branch', '') - ret = [] if branch: log.info('Adding branch \'%s\'', branch) - ret.append('--branch=\'{0}\''.format(branch)) - return ret + ret.append('--branch={0}'.format(branch)) + if get_extra_options: + # sorting here to make order uniform, makes unit testing more reliable + for key in sorted(kwargs): + if key.startswith('__'): + continue + value = kwargs[key] + if isinstance(value, six.string_types): + log.info('Found extra option --%s=%s', key, value) + ret.append('--{0}={1}'.format(key, value)) + elif value is True: + log.info('Found extra option --%s', key) + ret.append('--{0}'.format(key)) + if ret: + log.info('Adding extra options: %s', ret) -def _get_extra_options(**kwargs): - ''' - Returns list of extra options for yum - ''' - ret = [] - kwargs = salt.utils.clean_kwargs(**kwargs) - for key, value in six.iteritems(kwargs): - if isinstance(value, six.string_types): - ret.append('--{0}=\'{1}\''.format(key, value)) - elif value is True: - ret.append('--{0}'.format(key)) return ret @@ -441,8 +429,7 @@ def latest_version(*names, **kwargs): if len(names) == 0: return '' - repo_arg = _get_repo_options(**kwargs) - exclude_arg = _get_excludes_option(**kwargs) + options = _get_options(**kwargs) # Refresh before looking for the latest version available if refresh: @@ -452,8 +439,7 @@ def latest_version(*names, **kwargs): # Get available versions for specified package(s) cmd = [_yum(), '--quiet'] - cmd.extend(repo_arg) - cmd.extend(exclude_arg) + cmd.extend(options) cmd.extend(['list', 'available']) cmd.extend(names) out = __salt__['cmd.run_all'](cmd, @@ -761,7 +747,7 @@ def list_repo_pkgs(*args, **kwargs): disablerepo = kwargs.pop('disablerepo', '') or '' enablerepo = kwargs.pop('enablerepo', '') or '' - repo_arg = _get_repo_options(fromrepo=fromrepo, **kwargs) + repo_arg = _get_options(fromrepo=fromrepo, **kwargs) if fromrepo and not isinstance(fromrepo, list): try: @@ -913,15 +899,13 @@ def list_upgrades(refresh=True, **kwargs): salt '*' pkg.list_upgrades ''' - repo_arg = _get_repo_options(**kwargs) - exclude_arg = _get_excludes_option(**kwargs) + options = _get_options(**kwargs) if salt.utils.is_true(refresh): refresh_db(check_update=False, **kwargs) cmd = [_yum(), '--quiet'] - cmd.extend(repo_arg) - cmd.extend(exclude_arg) + cmd.extend(options) cmd.extend(['list', 'upgrades' if _yum() == 'dnf' else 'updates']) out = __salt__['cmd.run_all'](cmd, output_loglevel='trace', @@ -1039,21 +1023,19 @@ def refresh_db(**kwargs): check_update_ = kwargs.pop('check_update', True) - repo_arg = _get_repo_options(**kwargs) - exclude_arg = _get_excludes_option(**kwargs) - branch_arg = _get_branch_option(**kwargs) + options = _get_options(**kwargs) clean_cmd = [_yum(), '--quiet', 'clean', 'expire-cache'] update_cmd = [_yum(), '--quiet', 'check-update'] - if __grains__.get('os_family') == 'RedHat' and __grains__.get('osmajorrelease') == '7': - # This feature is disable because it is not used by Salt and lasts a lot with using large repo like EPEL + if __grains__.get('os_family') == 'RedHat' \ + and __grains__.get('osmajorrelease') == 7: + # This feature is disabled because it is not used by Salt and adds a + # lot of extra time to the command with large repos like EPEL update_cmd.append('--setopt=autocheck_running_kernel=false') - for args in (repo_arg, exclude_arg, branch_arg): - if args: - clean_cmd.extend(args) - update_cmd.extend(args) + clean_cmd.extend(options) + update_cmd.extend(options) __salt__['cmd.run'](clean_cmd, python_shell=False) if check_update_: @@ -1090,6 +1072,7 @@ def install(name=None, reinstall=False, normalize=True, update_holds=False, + saltenv='base', **kwargs): ''' .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0 @@ -1227,9 +1210,7 @@ def install(name=None, {'': {'old': '', 'new': ''}} ''' - repo_arg = _get_repo_options(**kwargs) - exclude_arg = _get_excludes_option(**kwargs) - branch_arg = _get_branch_option(**kwargs) + options = _get_options(**kwargs) if salt.utils.is_true(refresh): refresh_db(**kwargs) @@ -1237,7 +1218,7 @@ def install(name=None, try: pkg_params, pkg_type = __salt__['pkg_resource.parse_targets']( - name, pkgs, sources, normalize=normalize, **kwargs + name, pkgs, sources, saltenv=saltenv, normalize=normalize ) except MinionError as exc: raise CommandExecutionError(exc) @@ -1439,9 +1420,7 @@ def install(name=None, ''' DRY function to add args common to all yum/dnf commands ''' - for arg in (repo_arg, exclude_arg, branch_arg): - if arg: - cmd.extend(arg) + cmd.extend(options) if skip_verify: cmd.append('--nogpgcheck') if downloadonly: @@ -1706,17 +1685,14 @@ def upgrade(name=None, .. note:: To add extra arguments to the ``yum upgrade`` command, pass them as key - word arguments. For arguments without assignments, pass ``True`` + word arguments. For arguments without assignments, pass ``True`` .. code-block:: bash salt '*' pkg.upgrade security=True exclude='kernel*' ''' - repo_arg = _get_repo_options(**kwargs) - exclude_arg = _get_excludes_option(**kwargs) - branch_arg = _get_branch_option(**kwargs) - extra_args = _get_extra_options(**kwargs) + options = _get_options(get_extra_options=True, **kwargs) if salt.utils.is_true(refresh): refresh_db(**kwargs) @@ -1745,9 +1721,7 @@ def upgrade(name=None, and __salt__['config.get']('systemd.scope', True): cmd.extend(['systemd-run', '--scope']) cmd.extend([_yum(), '--quiet', '-y']) - for args in (repo_arg, exclude_arg, branch_arg, extra_args): - if args: - cmd.extend(args) + cmd.extend(options) if skip_verify: cmd.append('--nogpgcheck') cmd.append('upgrade') diff --git a/tests/unit/modules/test_yumpkg.py b/tests/unit/modules/test_yumpkg.py new file mode 100644 index 0000000000..c7adcb28d0 --- /dev/null +++ b/tests/unit/modules/test_yumpkg.py @@ -0,0 +1,441 @@ +# -*- coding: utf-8 -*- + +# Import Python Libs +from __future__ import absolute_import + +# Import Salt Testing Libs +from tests.support.mixins import LoaderModuleMockMixin +from tests.support.unit import TestCase, skipIf +from tests.support.mock import ( + Mock, + MagicMock, + patch, + NO_MOCK, + NO_MOCK_REASON +) + +# Import Salt libs +import salt.modules.yumpkg as yumpkg + +LIST_REPOS = { + 'base': { + 'file': '/etc/yum.repos.d/CentOS-Base.repo', + 'gpgcheck': '1', + 'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7', + 'mirrorlist': 'http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=os&infra=$infra', + 'name': 'CentOS-$releasever - Base' + }, + 'base-source': { + 'baseurl': 'http://vault.centos.org/centos/$releasever/os/Source/', + 'enabled': '0', + 'file': '/etc/yum.repos.d/CentOS-Sources.repo', + 'gpgcheck': '1', + 'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7', + 'name': 'CentOS-$releasever - Base Sources' + }, + 'updates': { + 'file': '/etc/yum.repos.d/CentOS-Base.repo', + 'gpgcheck': '1', + 'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7', + 'mirrorlist': 'http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=updates&infra=$infra', + 'name': 'CentOS-$releasever - Updates' + }, + 'updates-source': { + 'baseurl': 'http://vault.centos.org/centos/$releasever/updates/Source/', + 'enabled': '0', + 'file': '/etc/yum.repos.d/CentOS-Sources.repo', + 'gpgcheck': '1', + 'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7', + 'name': 'CentOS-$releasever - Updates Sources' + } +} + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class YumTestCase(TestCase, LoaderModuleMockMixin): + ''' + Test cases for salt.modules.yumpkg + ''' + def setup_loader_modules(self): + return { + yumpkg: { + '__context__': { + 'yum_bin': 'yum', + }, + '__grains__': { + 'osarch': 'x86_64', + 'os_family': 'RedHat', + 'osmajorrelease': 7, + }, + } + } + + def test_latest_version_with_options(self): + with patch.object(yumpkg, 'list_pkgs', MagicMock(return_value={})): + + # with fromrepo + cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''}) + with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}): + yumpkg.latest_version( + 'foo', + refresh=False, + fromrepo='good', + branch='foo') + cmd.assert_called_once_with( + ['yum', '--quiet', '--disablerepo=*', '--enablerepo=good', + '--branch=foo', 'list', 'available', 'foo'], + ignore_retcode=True, + output_loglevel='trace', + python_shell=False) + + # without fromrepo + cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''}) + with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}): + yumpkg.latest_version( + 'foo', + refresh=False, + enablerepo='good', + disablerepo='bad', + branch='foo') + cmd.assert_called_once_with( + ['yum', '--quiet', '--disablerepo=bad', '--enablerepo=good', + '--branch=foo', 'list', 'available', 'foo'], + ignore_retcode=True, + output_loglevel='trace', + python_shell=False) + + def test_list_repo_pkgs_with_options(self): + ''' + Test list_repo_pkgs with and without fromrepo + + NOTE: mock_calls is a stack. The most recent call is indexed + with 0, while the first call would have the highest index. + ''' + really_old_yum = MagicMock(return_value='3.2.0') + older_yum = MagicMock(return_value='3.4.0') + newer_yum = MagicMock(return_value='3.4.5') + list_repos_mock = MagicMock(return_value=LIST_REPOS) + kwargs = {'output_loglevel': 'trace', + 'ignore_retcode': True, + 'python_shell': False} + + with patch.object(yumpkg, 'list_repos', list_repos_mock): + + # Test with really old yum. The fromrepo argument has no effect on + # the yum commands we'd run. + with patch.dict(yumpkg.__salt__, {'cmd.run': really_old_yum}): + + cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''}) + with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}): + yumpkg.list_repo_pkgs('foo') + # We should have called cmd.run_all twice + self.assertEqual(len(cmd.mock_calls), 2) + + # Check args from first call + self.assertEqual( + cmd.mock_calls[1][1], + (['yum', '--quiet', 'list', 'available'],) + ) + # Check kwargs from first call + self.assertEqual(cmd.mock_calls[1][2], kwargs) + + # Check args from second call + self.assertEqual( + cmd.mock_calls[0][1], + (['yum', '--quiet', 'list', 'installed'],) + ) + # Check kwargs from second call + self.assertEqual(cmd.mock_calls[0][2], kwargs) + + # Test with really old yum. The fromrepo argument has no effect on + # the yum commands we'd run. + with patch.dict(yumpkg.__salt__, {'cmd.run': older_yum}): + + cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''}) + with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}): + yumpkg.list_repo_pkgs('foo') + # We should have called cmd.run_all twice + self.assertEqual(len(cmd.mock_calls), 2) + + # Check args from first call + self.assertEqual( + cmd.mock_calls[1][1], + (['yum', '--quiet', '--showduplicates', 'list', 'available'],) + ) + # Check kwargs from first call + self.assertEqual(cmd.mock_calls[1][2], kwargs) + + # Check args from second call + self.assertEqual( + cmd.mock_calls[0][1], + (['yum', '--quiet', '--showduplicates', 'list', 'installed'],) + ) + # Check kwargs from second call + self.assertEqual(cmd.mock_calls[0][2], kwargs) + + # Test with newer yum. We should run one yum command per repo, so + # fromrepo would limit how many calls we make. + with patch.dict(yumpkg.__salt__, {'cmd.run': newer_yum}): + + # When fromrepo is used, we would only run one yum command, for + # that specific repo. + cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''}) + with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}): + yumpkg.list_repo_pkgs('foo', fromrepo='base') + # We should have called cmd.run_all once + self.assertEqual(len(cmd.mock_calls), 1) + + # Check args + self.assertEqual( + cmd.mock_calls[0][1], + (['yum', '--quiet', '--showduplicates', + 'repository-packages', 'base', 'list', 'foo'],) + ) + # Check kwargs + self.assertEqual(cmd.mock_calls[0][2], kwargs) + + # Test enabling base-source and disabling updates. We should + # get two calls, one for each enabled repo. Because dict + # iteration order will vary, different Python versions will be + # do them in different orders, which is OK, but it will just + # mean that we will have to check both the first and second + # mock call both times. + cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''}) + with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}): + yumpkg.list_repo_pkgs( + 'foo', + enablerepo='base-source', + disablerepo='updates') + # We should have called cmd.run_all twice + self.assertEqual(len(cmd.mock_calls), 2) + + for repo in ('base', 'base-source'): + for index in (0, 1): + try: + # Check args + self.assertEqual( + cmd.mock_calls[index][1], + (['yum', '--quiet', '--showduplicates', + 'repository-packages', repo, 'list', + 'foo'],) + ) + # Check kwargs + self.assertEqual(cmd.mock_calls[index][2], kwargs) + break + except AssertionError: + continue + else: + self.fail("repo '{0}' not checked".format(repo)) + + def test_list_upgrades_dnf(self): + ''' + The subcommand should be "upgrades" with dnf + ''' + with patch.dict(yumpkg.__context__, {'yum_bin': 'dnf'}): + # with fromrepo + cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''}) + with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}): + yumpkg.list_upgrades( + refresh=False, + fromrepo='good', + branch='foo') + cmd.assert_called_once_with( + ['dnf', '--quiet', '--disablerepo=*', '--enablerepo=good', + '--branch=foo', 'list', 'upgrades'], + output_loglevel='trace', + ignore_retcode=True, + python_shell=False) + + # without fromrepo + cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''}) + with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}): + yumpkg.list_upgrades( + refresh=False, + enablerepo='good', + disablerepo='bad', + branch='foo') + cmd.assert_called_once_with( + ['dnf', '--quiet', '--disablerepo=bad', '--enablerepo=good', + '--branch=foo', 'list', 'upgrades'], + output_loglevel='trace', + ignore_retcode=True, + python_shell=False) + + def test_list_upgrades_yum(self): + ''' + The subcommand should be "updates" with yum + ''' + # with fromrepo + cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''}) + with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}): + yumpkg.list_upgrades( + refresh=False, + fromrepo='good', + branch='foo') + cmd.assert_called_once_with( + ['yum', '--quiet', '--disablerepo=*', '--enablerepo=good', + '--branch=foo', 'list', 'updates'], + output_loglevel='trace', + ignore_retcode=True, + python_shell=False) + + # without fromrepo + cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''}) + with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}): + yumpkg.list_upgrades( + refresh=False, + enablerepo='good', + disablerepo='bad', + branch='foo') + cmd.assert_called_once_with( + ['yum', '--quiet', '--disablerepo=bad', '--enablerepo=good', + '--branch=foo', 'list', 'updates'], + output_loglevel='trace', + ignore_retcode=True, + python_shell=False) + + def test_refresh_db_with_options(self): + + with patch('salt.utils.pkg.clear_rtag', Mock()): + + # With check_update=True we will do a cmd.run to run the clean_cmd, and + # then a separate cmd.retcode to check for updates. + + # with fromrepo + clean_cmd = Mock() + update_cmd = MagicMock(return_value=0) + with patch.dict(yumpkg.__salt__, {'cmd.run': clean_cmd, + 'cmd.retcode': update_cmd}): + yumpkg.refresh_db( + check_update=True, + fromrepo='good', + branch='foo') + clean_cmd.assert_called_once_with( + ['yum', '--quiet', 'clean', 'expire-cache', '--disablerepo=*', + '--enablerepo=good', '--branch=foo'], + python_shell=False) + update_cmd.assert_called_once_with( + ['yum', '--quiet', 'check-update', + '--setopt=autocheck_running_kernel=false', '--disablerepo=*', + '--enablerepo=good', '--branch=foo'], + output_loglevel='trace', + ignore_retcode=True, + python_shell=False) + + # without fromrepo + clean_cmd = Mock() + update_cmd = MagicMock(return_value=0) + with patch.dict(yumpkg.__salt__, {'cmd.run': clean_cmd, + 'cmd.retcode': update_cmd}): + yumpkg.refresh_db( + check_update=True, + enablerepo='good', + disablerepo='bad', + branch='foo') + clean_cmd.assert_called_once_with( + ['yum', '--quiet', 'clean', 'expire-cache', '--disablerepo=bad', + '--enablerepo=good', '--branch=foo'], + python_shell=False) + update_cmd.assert_called_once_with( + ['yum', '--quiet', 'check-update', + '--setopt=autocheck_running_kernel=false', '--disablerepo=bad', + '--enablerepo=good', '--branch=foo'], + output_loglevel='trace', + ignore_retcode=True, + python_shell=False) + + # With check_update=False we will just do a cmd.run for the clean_cmd + + # with fromrepo + clean_cmd = Mock() + with patch.dict(yumpkg.__salt__, {'cmd.run': clean_cmd}): + yumpkg.refresh_db( + check_update=False, + fromrepo='good', + branch='foo') + clean_cmd.assert_called_once_with( + ['yum', '--quiet', 'clean', 'expire-cache', '--disablerepo=*', + '--enablerepo=good', '--branch=foo'], + python_shell=False) + + # without fromrepo + clean_cmd = Mock() + with patch.dict(yumpkg.__salt__, {'cmd.run': clean_cmd}): + yumpkg.refresh_db( + check_update=False, + enablerepo='good', + disablerepo='bad', + branch='foo') + clean_cmd.assert_called_once_with( + ['yum', '--quiet', 'clean', 'expire-cache', '--disablerepo=bad', + '--enablerepo=good', '--branch=foo'], + python_shell=False) + + def test_install_with_options(self): + parse_targets = MagicMock(return_value=({'foo': None}, 'repository')) + with patch.object(yumpkg, 'list_pkgs', MagicMock(return_value={})), \ + patch.object(yumpkg, 'list_holds', MagicMock(return_value=[])), \ + patch.dict(yumpkg.__salt__, {'pkg_resource.parse_targets': parse_targets}), \ + patch('salt.utils.systemd.has_scope', MagicMock(return_value=False)): + + # with fromrepo + cmd = MagicMock(return_value={'retcode': 0}) + with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}): + yumpkg.install( + refresh=False, + fromrepo='good', + branch='foo') + cmd.assert_called_once_with( + ['yum', '-y', '--disablerepo=*', '--enablerepo=good', + '--branch=foo', 'install', 'foo'], + output_loglevel='trace', + python_shell=False, + redirect_stderr=True) + + # without fromrepo + cmd = MagicMock(return_value={'retcode': 0}) + with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}): + yumpkg.install( + refresh=False, + enablerepo='good', + disablerepo='bad', + branch='foo') + cmd.assert_called_once_with( + ['yum', '-y', '--disablerepo=bad', '--enablerepo=good', + '--branch=foo', 'install', 'foo'], + output_loglevel='trace', + python_shell=False, + redirect_stderr=True) + + def test_upgrade_with_options(self): + with patch.object(yumpkg, 'list_pkgs', MagicMock(return_value={})), \ + patch('salt.utils.systemd.has_scope', MagicMock(return_value=False)): + + # with fromrepo + cmd = MagicMock(return_value={'retcode': 0}) + with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}): + yumpkg.upgrade( + refresh=False, + fromrepo='good', + exclude='kernel*', + branch='foo') + cmd.assert_called_once_with( + ['yum', '--quiet', '-y', '--disablerepo=*', '--enablerepo=good', + '--branch=foo', '--exclude=kernel*', 'upgrade'], + output_loglevel='trace', + python_shell=False) + + # without fromrepo + cmd = MagicMock(return_value={'retcode': 0}) + with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}): + yumpkg.upgrade( + refresh=False, + enablerepo='good', + disablerepo='bad', + exclude='kernel*', + branch='foo') + cmd.assert_called_once_with( + ['yum', '--quiet', '-y', '--disablerepo=bad', '--enablerepo=good', + '--branch=foo', '--exclude=kernel*', 'upgrade'], + output_loglevel='trace', + python_shell=False) From 5a3316ea20ac748fca85146df678871d5408110b Mon Sep 17 00:00:00 2001 From: Dmitry Kuzmenko Date: Wed, 14 Feb 2018 16:16:57 +0300 Subject: [PATCH 173/223] Workaroung python bug in traceback.format_exc() The function raises an AttributeError if there is no current exception. https://bugs.python.org/issue23003 --- salt/scripts.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/salt/scripts.py b/salt/scripts.py index df8e9b6f30..e677368a0f 100644 --- a/salt/scripts.py +++ b/salt/scripts.py @@ -43,7 +43,12 @@ def _handle_interrupt(exc, original_exc, hardfail=False, trace=''): def _handle_signals(client, signum, sigframe): - trace = traceback.format_exc() + try: + # This raises AttributeError on Python 3.4 and 3.5 if there is no current exception. + # Ref: https://bugs.python.org/issue23003 + trace = traceback.format_exc() + except AttributeError: + trace = '' try: hardcrash = client.options.hard_crash except (AttributeError, KeyError): From bb1cdc451e40634895c54345b3e828aac467367a Mon Sep 17 00:00:00 2001 From: Justin Findlay Date: Tue, 13 Feb 2018 23:48:50 -0800 Subject: [PATCH 174/223] salt.pillar.file_tree no stack trace when nodegroups undefined --- salt/pillar/file_tree.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/pillar/file_tree.py b/salt/pillar/file_tree.py index 1a825f80a8..7ab69d1252 100644 --- a/salt/pillar/file_tree.py +++ b/salt/pillar/file_tree.py @@ -328,7 +328,7 @@ def ext_pillar(minion_id, ngroup_pillar = {} nodegroups_dir = os.path.join(root_dir, 'nodegroups') - if os.path.exists(nodegroups_dir) and len(__opts__['nodegroups']) > 0: + if os.path.exists(nodegroups_dir) and len(__opts__.get('nodegroups', ())) > 0: master_ngroups = __opts__['nodegroups'] ext_pillar_dirs = os.listdir(nodegroups_dir) if len(ext_pillar_dirs) > 0: From 3c6a5bf96760b69ec86a7b43e1039f9a4036bc0f Mon Sep 17 00:00:00 2001 From: Justin Findlay Date: Tue, 13 Feb 2018 23:49:32 -0800 Subject: [PATCH 175/223] salt.pillar.file_tree provide better debug info --- salt/pillar/file_tree.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/salt/pillar/file_tree.py b/salt/pillar/file_tree.py index 7ab69d1252..ede20f798b 100644 --- a/salt/pillar/file_tree.py +++ b/salt/pillar/file_tree.py @@ -354,8 +354,8 @@ def ext_pillar(minion_id, else: if debug is True: log.debug( - 'file_tree: no nodegroups found in file tree directory ' - 'ext_pillar_dirs, skipping...' + 'file_tree: no nodegroups found in file tree directory %s, skipping...', + ext_pillar_dirs ) else: if debug is True: @@ -363,7 +363,12 @@ def ext_pillar(minion_id, host_dir = os.path.join(root_dir, 'hosts', minion_id) if not os.path.exists(host_dir): - # No data for host with this ID + if debug is True: + log.debug( + 'file_tree: no pillar data for minion %s found in file tree directory %s', + minion_id, + host_dir + ) return ngroup_pillar if not os.path.isdir(host_dir): From 0ba4954a4ba4ca91c188354d46cba16e7c27d23d Mon Sep 17 00:00:00 2001 From: Justin Findlay Date: Tue, 13 Feb 2018 23:46:55 -0800 Subject: [PATCH 176/223] salt.pillar.file_tree revise module documentation --- salt/pillar/file_tree.py | 338 ++++++++++++++++++++++----------------- 1 file changed, 188 insertions(+), 150 deletions(-) diff --git a/salt/pillar/file_tree.py b/salt/pillar/file_tree.py index ede20f798b..97187bedde 100644 --- a/salt/pillar/file_tree.py +++ b/salt/pillar/file_tree.py @@ -1,175 +1,97 @@ # -*- coding: utf-8 -*- ''' +The ``file_tree`` external pillar allows values from all files in a directory +tree to be imported as Pillar data. -``File_tree`` is an external pillar that allows -values from all files in a directory tree to be imported as Pillar data. +.. note:: -Note this is an external pillar, and is subject to the rules and constraints -governing external pillars detailed here: :ref:`external-pillars`. + This is an external pillar and is subject to the :ref:`rules and + constraints ` governing external pillars. .. versionadded:: 2015.5.0 -Example Configuration ---------------------- +In this pillar, data is organized by either Minion ID or Nodegroup name. To +setup pillar data for a specific Minion, place it in +``/hosts/``. To setup pillar data for an entire +Nodegroup, place it in ``/nodegroups/`` where +```` is the Nodegroup's name. + +Example ``file_tree`` Pillar +============================ + +Master Configuration +-------------------- .. code-block:: yaml ext_pillar: - file_tree: - root_dir: /path/to/root/directory + root_dir: /srv/ext_pillar follow_dir_links: False keep_newline: True -The ``root_dir`` parameter is required and points to the directory where files -for each host are stored. The ``follow_dir_links`` parameter is optional and -defaults to False. If ``follow_dir_links`` is set to True, this external pillar -will follow symbolic links to other directories. + node_groups: + internal_servers: 'L@bob,stuart,kevin' -.. warning:: - Be careful when using ``follow_dir_links``, as a recursive symlink chain - will result in unexpected results. +Pillar Configuration +-------------------- -If ``keep_newline`` is set to ``True``, then the pillar values for files ending -in newlines will keep that newline. The default behavior is to remove the -end-of-file newline. ``keep_newline`` should be turned on if the pillar data is -intended to be used to deploy a file using ``contents_pillar`` with a -:py:func:`file.managed ` state. +.. code-block:: bash -.. versionchanged:: 2015.8.4 - The ``raw_data`` parameter has been renamed to ``keep_newline``. In earlier - releases, ``raw_data`` must be used. Also, this parameter can now be a list - of globs, allowing for more granular control over which pillar values keep - their end-of-file newline. The globs match paths relative to the - directories named for minion IDs and nodegroups underneath the ``root_dir`` - (see the layout examples in the below sections). + (salt-master) # tree /srv/ext_pillar + /srv/ext_pillar/ + |-- hosts + | |-- bob + | | |-- apache + | | | `-- config.d + | | | |-- 00_important.conf + | | | `-- 20_bob_extra.conf + | | `-- corporate_app + | | `-- settings + | | `-- bob_settings.cfg + | `-- kevin + | |-- apache + | | `-- config.d + | | `-- 00_important.conf + | `-- corporate_app + | `-- settings + | `-- kevin_settings.cfg + `-- nodegroups + `-- internal_servers + `-- corporate_app + `-- settings + `-- common_settings.cfg - .. code-block:: yaml +Verify Pillar Data +------------------ - ext_pillar: - - file_tree: - root_dir: /path/to/root/directory - keep_newline: - - files/testdir/* +.. code-block:: bash + + (salt-master) # salt bob pillar.items + bob: + ---------- + apache: + ---------- + config.d: + ---------- + 00_important.conf: + + 20_bob_extra.conf: + + corporate_app: + ---------- + settings: + ---------- + common_settings: + // This is the main settings file for the corporate + // internal web app + main_setting: probably + bob_settings: + role: bob .. note:: - In earlier releases, this documentation incorrectly stated that binary - files would not affected by the ``keep_newline`` configuration. However, - this module does not actually distinguish between binary and text files. -.. versionchanged:: 2017.7.0 - Templating/rendering has been added. You can now specify a default render - pipeline and a black- and whitelist of (dis)allowed renderers. - - ``template`` must be set to ``True`` for templating to happen. - - .. code-block:: yaml - - ext_pillar: - - file_tree: - root_dir: /path/to/root/directory - render_default: jinja|yaml - renderer_blacklist: - - gpg - renderer_whitelist: - - jinja - - yaml - template: True - -Assigning Pillar Data to Individual Hosts ------------------------------------------ - -To configure pillar data for each host, this external pillar will recursively -iterate over ``root_dir``/hosts/``id`` (where ``id`` is a minion ID), and -compile pillar data with each subdirectory as a dictionary key and each file -as a value. - -For example, the following ``root_dir`` tree: - -.. code-block:: text - - ./hosts/ - ./hosts/test-host/ - ./hosts/test-host/files/ - ./hosts/test-host/files/testdir/ - ./hosts/test-host/files/testdir/file1.txt - ./hosts/test-host/files/testdir/file2.txt - ./hosts/test-host/files/another-testdir/ - ./hosts/test-host/files/another-testdir/symlink-to-file1.txt - -will result in the following pillar tree for minion with ID ``test-host``: - -.. code-block:: text - - test-host: - ---------- - files: - ---------- - another-testdir: - ---------- - symlink-to-file1.txt: - Contents of file #1. - - testdir: - ---------- - file1.txt: - Contents of file #1. - - file2.txt: - Contents of file #2. - -.. note:: - Subdirectories underneath ``root_dir``/hosts/``id`` become nested - dictionaries, as shown above. - - -Assigning Pillar Data to Entire Nodegroups ------------------------------------------- - -To assign Pillar data to all minions in a given nodegroup, this external pillar -recursively iterates over ``root_dir``/nodegroups/``nodegroup`` (where -``nodegroup`` is the name of a nodegroup), and like for individual hosts, -compiles pillar data with each subdirectory as a dictionary key and each file -as a value. - -.. important:: - If the same Pillar key is set for a minion both by nodegroup and by - individual host, then the value set for the individual host will take - precedence. - -For example, the following ``root_dir`` tree: - -.. code-block:: text - - ./nodegroups/ - ./nodegroups/test-group/ - ./nodegroups/test-group/files/ - ./nodegroups/test-group/files/testdir/ - ./nodegroups/test-group/files/testdir/file1.txt - ./nodegroups/test-group/files/testdir/file2.txt - ./nodegroups/test-group/files/another-testdir/ - ./nodegroups/test-group/files/another-testdir/symlink-to-file1.txt - -will result in the following pillar data for minions in the node group -``test-group``: - -.. code-block:: text - - test-host: - ---------- - files: - ---------- - another-testdir: - ---------- - symlink-to-file1.txt: - Contents of file #1. - - testdir: - ---------- - file1.txt: - Contents of file #1. - - file2.txt: - Contents of file #2. + The leaf data in the example shown is the contents of the pillar files. ''' from __future__ import absolute_import @@ -302,7 +224,123 @@ def ext_pillar(minion_id, renderer_whitelist=None, template=False): ''' - Compile pillar data for the specified minion ID + Compile pillar data from the given ``root_dir`` specific to Nodegroup names + and Minion IDs. + + If a Minion's ID is not found at ``/host/`` or if it + is not included in any Nodegroups named at + ``/nodegroups/``, no pillar data provided by this + pillar module will be available for that Minion. + + .. versionchanged:: 2017.7.0 + Templating/rendering has been added. You can now specify a default + render pipeline and a black- and whitelist of (dis)allowed renderers. + + :param:`template` must be set to ``True`` for templating to happen. + + .. code-block:: yaml + + ext_pillar: + - file_tree: + root_dir: /path/to/root/directory + render_default: jinja|yaml + renderer_blacklist: + - gpg + renderer_whitelist: + - jinja + - yaml + template: True + + :param minion_id: + The ID of the Minion whose pillar data is to be collected + + :param pillar: + Unused by the ``file_tree`` pillar module + + :param root_dir: + Filesystem directory used as the root for pillar data (e.g. + ``/srv/ext_pillar``) + + :param follow_dir_links: + Follow symbolic links to directories while collecting pillar files. + Defaults to ``False``. + + .. warning:: + + Care should be exercised when enabling this option as it will + follow links that point outside of :param:`root_dir`. + + .. warning:: + + Symbolic links that lead to infinite recursion are not filtered. + + :param debug: + Enable debug information at log level ``debug``. Defaults to + ``False``. This option may be useful to help debug errors when setting + up the ``file_tree`` pillar module. + + :param keep_newline: + Preserve the end-of-file newline in files. Defaults to ``False``. + This option may either be a boolean or a list of file globs (as defined + by the `Python fnmatch package + `_) for which end-of-file + newlines are to be kept. + + ``keep_newline`` should be turned on if the pillar data is intended to + be used to deploy a file using ``contents_pillar`` with a + :py:func:`file.managed ` state. + + .. versionchanged:: 2015.8.4 + The ``raw_data`` parameter has been renamed to ``keep_newline``. In + earlier releases, ``raw_data`` must be used. Also, this parameter + can now be a list of globs, allowing for more granular control over + which pillar values keep their end-of-file newline. The globs match + paths relative to the directories named for Minion IDs and + Nodegroup namess underneath the :param:`root_dir`. + + .. code-block:: yaml + + ext_pillar: + - file_tree: + root_dir: /srv/ext_pillar + keep_newline: + - apache/config.d/* + - corporate_app/settings/* + + .. note:: + In earlier releases, this documentation incorrectly stated that + binary files would not affected by the ``keep_newline``. However, + this module does not actually distinguish between binary and text + files. + + + :param render_default: + Override Salt's :conf_master:`default global renderer ` for + the ``file_tree`` pillar. + + .. code-block:: yaml + + render_default: jinja + + :param renderer_blacklist: + Disallow renderers for pillar files. + + .. code-block:: yaml + + renderer_blacklist: + - json + + :param renderer_whitelist: + Allow renderers for pillar files. + + .. code-block:: yaml + + renderer_whitelist: + - yaml + - jinja + + :param template: + Enable templating of pillar files. Defaults to ``False``. ''' # Not used del pillar From e44f5133c51f166db8a14f109b570c0245fad572 Mon Sep 17 00:00:00 2001 From: Ch3LL Date: Thu, 15 Feb 2018 16:17:25 -0500 Subject: [PATCH 177/223] Fix mac_assistive module not loading --- salt/modules/mac_assistive.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/mac_assistive.py b/salt/modules/mac_assistive.py index 962c3350f1..484efcd718 100644 --- a/salt/modules/mac_assistive.py +++ b/salt/modules/mac_assistive.py @@ -30,7 +30,7 @@ def __virtual__(): Only work on Mac OS ''' if salt.utils.platform.is_darwin() \ - and _LooseVersion(__grains__['osrelease']) >= '10.9': + and _LooseVersion(__grains__['osrelease']) >= _LooseVersion('10.9'): return True return ( False, From a5fbe4e95eda002813abdf7cfc7517d4ea8f4115 Mon Sep 17 00:00:00 2001 From: Denys Havrysh Date: Thu, 15 Feb 2018 16:15:20 +0200 Subject: [PATCH 178/223] Fix typo in postgres_user.present state function --- salt/states/postgres_user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/states/postgres_user.py b/salt/states/postgres_user.py index f85264da20..d9f686871c 100644 --- a/salt/states/postgres_user.py +++ b/salt/states/postgres_user.py @@ -95,7 +95,7 @@ def present(name, encrypted to the previous format if it is not already done. - default_passwoord + default_password The password used only when creating the user, unless password is set. .. versionadded:: 2016.3.0 From e8678f633ddb0cfd5caece29b25c28bf94141565 Mon Sep 17 00:00:00 2001 From: Lee Webb Date: Tue, 13 Feb 2018 12:00:09 +1100 Subject: [PATCH 179/223] Fix Comment being None not '' and inject quotes into the TXT ChangeRecords --- salt/states/boto3_route53.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/salt/states/boto3_route53.py b/salt/states/boto3_route53.py index 54a66bdb07..095afba829 100644 --- a/salt/states/boto3_route53.py +++ b/salt/states/boto3_route53.py @@ -137,7 +137,7 @@ def _from_aws_encoding(string): # XXX TODO def hosted_zone_present(name, Name=None, PrivateZone=False, - CallerReference=None, Comment='', VPCs=None, + CallerReference=None, Comment=None, VPCs=None, region=None, key=None, keyid=None, profile=None): ''' Ensure a hosted zone exists with the given attributes. @@ -642,6 +642,11 @@ def rr_present(name, HostedZoneId=None, DomainName=None, PrivateZone=False, Name ret['result'] = False return ret else: + # for TXT records the entry must be encapsulated in quotes as required by the API + # this appears to be incredibly difficult with the jinja templating engine + # so inject the quotations here to make a viable ChangeBatch + if Type == 'TXT': + rr = '"%s"' % (rr) fixed_rrs += [rr] ResourceRecords = [{'Value': rr} for rr in sorted(fixed_rrs)] From a07bb487264b92d7d5078e77f70a5012eeb2939a Mon Sep 17 00:00:00 2001 From: Lee Webb Date: Wed, 14 Feb 2018 09:09:17 +1100 Subject: [PATCH 180/223] Correct formatting for lint --- salt/states/boto3_route53.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/states/boto3_route53.py b/salt/states/boto3_route53.py index 095afba829..dbeb2e2c80 100644 --- a/salt/states/boto3_route53.py +++ b/salt/states/boto3_route53.py @@ -646,7 +646,7 @@ def rr_present(name, HostedZoneId=None, DomainName=None, PrivateZone=False, Name # this appears to be incredibly difficult with the jinja templating engine # so inject the quotations here to make a viable ChangeBatch if Type == 'TXT': - rr = '"%s"' % (rr) + rr = '"{}"'.format(rr) fixed_rrs += [rr] ResourceRecords = [{'Value': rr} for rr in sorted(fixed_rrs)] From b82c8bd6304c49ca1c40fe8ae15798467ee49321 Mon Sep 17 00:00:00 2001 From: Nathan Grennan Date: Wed, 14 Feb 2018 15:04:10 -0800 Subject: [PATCH 181/223] Allow zookeeper znode creation to not require an ACL --- salt/states/zookeeper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/states/zookeeper.py b/salt/states/zookeeper.py index 8574f93c99..46f6e41e39 100644 --- a/salt/states/zookeeper.py +++ b/salt/states/zookeeper.py @@ -158,7 +158,7 @@ def present(name, value, acls=None, ephemeral=False, sequence=False, makepath=Fa value_result = new_value == value changes.setdefault('new', {}).setdefault('value', new_value) changes.setdefault('old', {}).setdefault('value', cur_value) - if not _check_acls(chk_acls, cur_acls): + if chk_acls and not _check_acls(chk_acls, cur_acls): __salt__['zookeeper.set_acls'](name, acls, version, **connkwargs) new_acls = __salt__['zookeeper.get_acls'](name, **connkwargs) acl_result = _check_acls(new_acls, chk_acls) From 2a5d855d976acca23b5a034c53d9614581415bb7 Mon Sep 17 00:00:00 2001 From: Ch3LL Date: Fri, 16 Feb 2018 10:48:57 -0500 Subject: [PATCH 182/223] add required arg to dns_check jinja doc example --- doc/topics/jinja/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/topics/jinja/index.rst b/doc/topics/jinja/index.rst index a833507269..f60d9955d0 100644 --- a/doc/topics/jinja/index.rst +++ b/doc/topics/jinja/index.rst @@ -1348,7 +1348,7 @@ Example: .. code-block:: jinja - {{ 'www.google.com' | dns_check }} + {{ 'www.google.com' | dns_check(port=443) }} Returns: From b94d73c53e85100e27cdf476f421dcc4f22a23c5 Mon Sep 17 00:00:00 2001 From: rallytime Date: Fri, 16 Feb 2018 09:02:36 -0500 Subject: [PATCH 183/223] Pin tornado version in requirements file tornado needs to be >= 4.2.1, but less that 5.0. Tornado 5.0 is introducing backwards-incompatible changes. Therefore, we need to pin the version of tornado in base.txt until we can fix supporting Tornado 5.0 in Salt. Refs #45790 --- requirements/base.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/base.txt b/requirements/base.txt index 4e735d3a21..1a16e368d0 100644 --- a/requirements/base.txt +++ b/requirements/base.txt @@ -3,6 +3,6 @@ msgpack-python>0.3 PyYAML MarkupSafe requests>=1.0.0 -tornado>=4.2.1 +tornado>=4.2.1,<5.0 # Required by Tornado to handle threads stuff. futures>=2.0 From 5a0fe104f7b15b1023119bab1d88faf767c987c1 Mon Sep 17 00:00:00 2001 From: Wedge Jarrad Date: Sun, 18 Feb 2018 10:57:28 -0800 Subject: [PATCH 184/223] Fix contributing doc typo --- doc/topics/development/contributing.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/topics/development/contributing.rst b/doc/topics/development/contributing.rst index ed4fd2aa12..3c21c9b9c0 100644 --- a/doc/topics/development/contributing.rst +++ b/doc/topics/development/contributing.rst @@ -223,7 +223,7 @@ branches, and dot release branches. .. note:: GitHub will open pull requests against Salt's main branch, ``develop``, - byndefault. Be sure to check which branch is selected when creating the + by default. Be sure to check which branch is selected when creating the pull request. The Develop Branch From aba00805f48cb1e9ad8a28d2d083b7a6fdb434cd Mon Sep 17 00:00:00 2001 From: gwiyeong Date: Mon, 19 Feb 2018 11:07:36 +0900 Subject: [PATCH 185/223] Adds set_close_callback function to removes stream instance after closed from a set streams. --- salt/transport/ipc.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py index d8d8235920..d93a62154e 100644 --- a/salt/transport/ipc.py +++ b/salt/transport/ipc.py @@ -559,6 +559,11 @@ class IPCMessagePublisher(object): io_loop=self.io_loop ) self.streams.add(stream) + + def discard_after_closed(): + self.streams.discard(stream) + + stream.set_close_callback(discard_after_closed) except Exception as exc: log.error('IPC streaming error: {0}'.format(exc)) From 48080a1bae77b43a6315c114a5c28daf747a5259 Mon Sep 17 00:00:00 2001 From: gwiyeong Date: Mon, 19 Feb 2018 11:07:36 +0900 Subject: [PATCH 186/223] Fixes memory leak, saltclients should be cleaned after used. --- salt/netapi/rest_tornado/saltnado.py | 36 ++++++++++------------------ 1 file changed, 12 insertions(+), 24 deletions(-) diff --git a/salt/netapi/rest_tornado/saltnado.py b/salt/netapi/rest_tornado/saltnado.py index 38e665b7df..b5fce465ed 100644 --- a/salt/netapi/rest_tornado/saltnado.py +++ b/salt/netapi/rest_tornado/saltnado.py @@ -236,28 +236,6 @@ logger = logging.getLogger() # - "wheel" (need async api...) -class SaltClientsMixIn(object): - ''' - MixIn class to container all of the salt clients that the API needs - ''' - # TODO: load this proactively, instead of waiting for a request - __saltclients = None - - @property - def saltclients(self): - if SaltClientsMixIn.__saltclients is None: - local_client = salt.client.get_local_client(mopts=self.application.opts) - # TODO: refreshing clients using cachedict - SaltClientsMixIn.__saltclients = { - 'local': local_client.run_job_async, - # not the actual client we'll use.. but its what we'll use to get args - 'local_async': local_client.run_job_async, - 'runner': salt.runner.RunnerClient(opts=self.application.opts).cmd_async, - 'runner_async': None, # empty, since we use the same client as `runner` - } - return SaltClientsMixIn.__saltclients - - AUTH_TOKEN_HEADER = 'X-Auth-Token' AUTH_COOKIE_NAME = 'session_id' @@ -388,7 +366,7 @@ class EventListener(object): del self.timeout_map[future] -class BaseSaltAPIHandler(tornado.web.RequestHandler, SaltClientsMixIn): # pylint: disable=W0223 +class BaseSaltAPIHandler(tornado.web.RequestHandler): # pylint: disable=W0223 ct_out_map = ( ('application/json', json.dumps), ('application/x-yaml', yaml.safe_dump), @@ -416,6 +394,16 @@ class BaseSaltAPIHandler(tornado.web.RequestHandler, SaltClientsMixIn): # pylin self.application.opts, ) + if not hasattr(self, 'saltclients'): + local_client = salt.client.get_local_client(mopts=self.application.opts) + self.saltclients = { + 'local': local_client.run_job_async, + # not the actual client we'll use.. but its what we'll use to get args + 'local_async': local_client.run_job_async, + 'runner': salt.runner.RunnerClient(opts=self.application.opts).cmd_async, + 'runner_async': None, # empty, since we use the same client as `runner` + } + @property def token(self): ''' @@ -745,7 +733,7 @@ class SaltAuthHandler(BaseSaltAPIHandler): # pylint: disable=W0223 self.write(self.serialize(ret)) -class SaltAPIHandler(BaseSaltAPIHandler, SaltClientsMixIn): # pylint: disable=W0223 +class SaltAPIHandler(BaseSaltAPIHandler): # pylint: disable=W0223 ''' Main API handler for base "/" ''' From 2062fd0e5cb809ecf3db9ce14ca7440eaaab6369 Mon Sep 17 00:00:00 2001 From: Denys Havrysh Date: Mon, 19 Feb 2018 12:59:00 +0200 Subject: [PATCH 187/223] [DOC] Put https link to the formulas doc page --- doc/topics/development/conventions/formulas.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/topics/development/conventions/formulas.rst b/doc/topics/development/conventions/formulas.rst index 57a8d51075..de4e3e28bb 100644 --- a/doc/topics/development/conventions/formulas.rst +++ b/doc/topics/development/conventions/formulas.rst @@ -1245,7 +1245,7 @@ A sample skeleton for the ``README.rst`` file: .. note:: See the full `Salt Formulas installation and usage instructions - `_. + `_. Available states ================ From dd3f93655719a280567ece827c05c4da93c7e4fe Mon Sep 17 00:00:00 2001 From: Benjamin Drung Date: Mon, 19 Feb 2018 12:32:14 +0100 Subject: [PATCH 188/223] Fix skipping Kubernetes tests if client is not installed When the Kubernetes client is not installed, the import of salt.modules.kubernetes will still succeed, but HAS_LIBS will be set to False (since the library import will be covered by a try-except clause). Therefore expect the salt.modules.kubernetes to always succeed and check kubernetes.HAS_LIBS instead for the presence of the kubernetes library. Signed-off-by: Benjamin Drung --- tests/unit/modules/test_kubernetes.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/tests/unit/modules/test_kubernetes.py b/tests/unit/modules/test_kubernetes.py index 4e8f6cd4b5..92879e0359 100644 --- a/tests/unit/modules/test_kubernetes.py +++ b/tests/unit/modules/test_kubernetes.py @@ -16,15 +16,12 @@ from tests.support.mock import ( NO_MOCK_REASON ) -try: - from salt.modules import kubernetes -except ImportError: - kubernetes = False +from salt.modules import kubernetes @skipIf(NO_MOCK, NO_MOCK_REASON) -@skipIf(kubernetes is False, "Probably Kubernetes client lib is not installed. \ - Skipping test_kubernetes.py") +@skipIf(not kubernetes.HAS_LIBS, "Kubernetes client lib is not installed. " + "Skipping test_kubernetes.py") class KubernetesTestCase(TestCase, LoaderModuleMockMixin): ''' Test cases for salt.modules.kubernetes From 8d9a432fb2859e12337460a41e0175118d33d90b Mon Sep 17 00:00:00 2001 From: Adam Mendlik Date: Mon, 19 Feb 2018 07:35:18 -0700 Subject: [PATCH 189/223] Add --assumeyes to yum/dnf commands in yumpkg.refresh_db Without --assumeyes, these commands can hang waiting for user confirmation if there is an unaccepted repository key. --- salt/modules/yumpkg.py | 4 ++-- tests/unit/modules/test_yumpkg.py | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py index 3db8aaa27f..0fc2034e04 100644 --- a/salt/modules/yumpkg.py +++ b/salt/modules/yumpkg.py @@ -1025,8 +1025,8 @@ def refresh_db(**kwargs): options = _get_options(**kwargs) - clean_cmd = [_yum(), '--quiet', 'clean', 'expire-cache'] - update_cmd = [_yum(), '--quiet', 'check-update'] + clean_cmd = [_yum(), '--quiet', '--assumeyes', 'clean', 'expire-cache'] + update_cmd = [_yum(), '--quiet', '--assumeyes', 'check-update'] if __grains__.get('os_family') == 'RedHat' \ and __grains__.get('osmajorrelease') == 7: diff --git a/tests/unit/modules/test_yumpkg.py b/tests/unit/modules/test_yumpkg.py index c7adcb28d0..882c1c50ab 100644 --- a/tests/unit/modules/test_yumpkg.py +++ b/tests/unit/modules/test_yumpkg.py @@ -311,11 +311,11 @@ class YumTestCase(TestCase, LoaderModuleMockMixin): fromrepo='good', branch='foo') clean_cmd.assert_called_once_with( - ['yum', '--quiet', 'clean', 'expire-cache', '--disablerepo=*', + ['yum', '--quiet', '--assumeyes', 'clean', 'expire-cache', '--disablerepo=*', '--enablerepo=good', '--branch=foo'], python_shell=False) update_cmd.assert_called_once_with( - ['yum', '--quiet', 'check-update', + ['yum', '--quiet', '--assumeyes', 'check-update', '--setopt=autocheck_running_kernel=false', '--disablerepo=*', '--enablerepo=good', '--branch=foo'], output_loglevel='trace', @@ -333,11 +333,11 @@ class YumTestCase(TestCase, LoaderModuleMockMixin): disablerepo='bad', branch='foo') clean_cmd.assert_called_once_with( - ['yum', '--quiet', 'clean', 'expire-cache', '--disablerepo=bad', + ['yum', '--quiet', '--assumeyes', 'clean', 'expire-cache', '--disablerepo=bad', '--enablerepo=good', '--branch=foo'], python_shell=False) update_cmd.assert_called_once_with( - ['yum', '--quiet', 'check-update', + ['yum', '--quiet', '--assumeyes', 'check-update', '--setopt=autocheck_running_kernel=false', '--disablerepo=bad', '--enablerepo=good', '--branch=foo'], output_loglevel='trace', @@ -354,7 +354,7 @@ class YumTestCase(TestCase, LoaderModuleMockMixin): fromrepo='good', branch='foo') clean_cmd.assert_called_once_with( - ['yum', '--quiet', 'clean', 'expire-cache', '--disablerepo=*', + ['yum', '--quiet', '--assumeyes', 'clean', 'expire-cache', '--disablerepo=*', '--enablerepo=good', '--branch=foo'], python_shell=False) @@ -367,7 +367,7 @@ class YumTestCase(TestCase, LoaderModuleMockMixin): disablerepo='bad', branch='foo') clean_cmd.assert_called_once_with( - ['yum', '--quiet', 'clean', 'expire-cache', '--disablerepo=bad', + ['yum', '--quiet', '--assumeyes', 'clean', 'expire-cache', '--disablerepo=bad', '--enablerepo=good', '--branch=foo'], python_shell=False) From 2a2c23c66b292c457ccdcbb7eadeb7fcade25a42 Mon Sep 17 00:00:00 2001 From: Ollie Armstrong Date: Mon, 20 Nov 2017 15:56:16 +0000 Subject: [PATCH 190/223] Fix acme state to correctly return on test Previously, the acme.cert state would return that a change was pending during a test run even in the case that the certificate would not have been touched. Changed the return value in this case so that it is not thought to require a change. This is reported in #40208 [0] and is also referenced in #42763 [1]. The issue #40208 looks to go on to recommend further changes beyond the scope of this 'quick fix'. [0] https://github.com/saltstack/salt/issues/40208#issuecomment-289637588 [1] https://github.com/saltstack/salt/issues/42763#issuecomment-345728031 --- salt/states/acme.py | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/states/acme.py b/salt/states/acme.py index 43649a6426..ad1c9d0564 100644 --- a/salt/states/acme.py +++ b/salt/states/acme.py @@ -85,6 +85,7 @@ def cert(name, comment += 'would have been renewed' else: comment += 'would not have been touched' + ret['result'] = True ret['comment'] = comment return ret From 6875e9bee8bfca3fdaeb5f44336f0981f1d038db Mon Sep 17 00:00:00 2001 From: rallytime Date: Wed, 21 Feb 2018 06:37:26 -0500 Subject: [PATCH 191/223] Update release versions for the develop branch --- doc/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/conf.py b/doc/conf.py index 1a713d2a69..ef06527810 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -248,7 +248,7 @@ on_saltstack = 'SALT_ON_SALTSTACK' in os.environ project = 'Salt' version = salt.version.__version__ -latest_release = '2017.7.3' # latest release +latest_release = '2017.7.4' # latest release previous_release = '2016.11.9' # latest release from previous branch previous_release_dir = '2016.11' # path on web server for previous branch next_release = '' # next release From d8fb051e44b83d5ea753b8f75a977d9dfdbfa39f Mon Sep 17 00:00:00 2001 From: Denys Havrysh Date: Wed, 21 Feb 2018 15:37:57 +0200 Subject: [PATCH 192/223] [DOC] Fix code-blocks for reStructuredText --- doc/topics/development/conventions/formulas.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/topics/development/conventions/formulas.rst b/doc/topics/development/conventions/formulas.rst index de4e3e28bb..b11433f187 100644 --- a/doc/topics/development/conventions/formulas.rst +++ b/doc/topics/development/conventions/formulas.rst @@ -1234,7 +1234,7 @@ target platform, and any other installation or usage instructions or tips. A sample skeleton for the ``README.rst`` file: -.. code-block:: rest +.. code-block:: restructuredtext === foo @@ -1274,7 +1274,7 @@ A sample skeleton for the `CHANGELOG.rst` file: :file:`CHANGELOG.rst`: -.. code-block:: rest +.. code-block:: restructuredtext foo formula =========== From 57a60f62a313fe0ba8ed045aed8edf4acd1c18d8 Mon Sep 17 00:00:00 2001 From: zr Date: Sat, 17 Feb 2018 23:31:26 +0800 Subject: [PATCH 193/223] fix kernel subpackages install bug --- salt/modules/yumpkg.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py index 0fc2034e04..7b4e4e6d36 100644 --- a/salt/modules/yumpkg.py +++ b/salt/modules/yumpkg.py @@ -1396,8 +1396,8 @@ def install(name=None, break else: if pkgname is not None: - if re.match('kernel(-.+)?', pkgname): - # kernel and its subpackages support multiple + if re.match('^kernel(|-devel)$', pkgname): + # kernel and kernel-devel support multiple # installs as their paths do not conflict. # Performing a yum/dnf downgrade will be a # no-op so just do an install instead. It will From 0a481d707fa145307da21dd6fb7ac7f51492d6f2 Mon Sep 17 00:00:00 2001 From: Volodymyr Samodid Date: Tue, 20 Feb 2018 23:04:17 +0200 Subject: [PATCH 194/223] update digitalocean salt-cloud driver fix #45837 --- salt/cloud/clouds/digital_ocean.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/cloud/clouds/digital_ocean.py b/salt/cloud/clouds/digital_ocean.py index daabcbddfe..358faaea4e 100644 --- a/salt/cloud/clouds/digital_ocean.py +++ b/salt/cloud/clouds/digital_ocean.py @@ -159,7 +159,7 @@ def avail_sizes(call=None): '-f or --function, or with the --list-sizes option' ) - items = query(method='sizes') + items = query(method='sizes', command='?per_page=100') ret = {} for size in items['sizes']: ret[size['slug']] = {} From 8d36730ef762a76323653b88724b12eaec82c654 Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Tue, 20 Feb 2018 17:43:53 -0700 Subject: [PATCH 195/223] If no pubkey is passed in openmode fail If the pub entry in the load is empty, we should fail authentication in open mode. This is usually caught elsewhere for the other modes, because we would just write it to a file, but in this case, we only write it to a file if it actually exists, and if it is different from disk_key, so we would catch all other options when trying to load the public key. Fixes #46085 --- salt/transport/mixins/auth.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/salt/transport/mixins/auth.py b/salt/transport/mixins/auth.py index 866493b854..f6a01a1d30 100644 --- a/salt/transport/mixins/auth.py +++ b/salt/transport/mixins/auth.py @@ -417,6 +417,10 @@ class AESReqServerMixin(object): log.debug('Host key change detected in open mode.') with salt.utils.fopen(pubfn, 'w+') as fp_: fp_.write(load['pub']) + elif not load['pub']: + log.error('Public key is empty: {0}'.format(load['id'])) + return {'enc': 'clear', + 'load': {'ret': False}} pub = None From c818d4b7911cad6bcd216d1a0b473ac08eab6cee Mon Sep 17 00:00:00 2001 From: twangboy Date: Wed, 7 Feb 2018 16:13:43 -0700 Subject: [PATCH 196/223] Convert reg values to unicode for debug --- salt/modules/win_lgpo.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/salt/modules/win_lgpo.py b/salt/modules/win_lgpo.py index ebaff9f040..3318693196 100644 --- a/salt/modules/win_lgpo.py +++ b/salt/modules/win_lgpo.py @@ -2442,7 +2442,7 @@ class _policy_info(object): elif ord(val) == 1: return 'Enabled' else: - return 'Invalid Value' + return 'Invalid Value: %s' % repr(val) else: return 'Not Defined' except TypeError: @@ -5047,7 +5047,10 @@ def get(policy_class=None, return_full_policy_names=True, class_vals[policy_name] = __salt__['reg.read_value'](_pol['Registry']['Hive'], _pol['Registry']['Path'], _pol['Registry']['Value'])['vdata'] - log.debug('Value {0} found for reg policy {1}'.format(class_vals[policy_name], policy_name)) + log.debug( + 'Value {0} found for reg policy {1}'.format( + salt.utils.to_unicode(class_vals[policy_name]), + policy_name)) elif 'Secedit' in _pol: # get value from secedit _ret, _val = _findOptionValueInSeceditFile(_pol['Secedit']['Option']) From e9fa53d3b759970cf21993d4e95048814fd8ab3d Mon Sep 17 00:00:00 2001 From: twangboy Date: Fri, 9 Feb 2018 13:45:24 -0700 Subject: [PATCH 197/223] Change the Invalid Data Message --- salt/modules/win_lgpo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/win_lgpo.py b/salt/modules/win_lgpo.py index 3318693196..2b467ca2ed 100644 --- a/salt/modules/win_lgpo.py +++ b/salt/modules/win_lgpo.py @@ -2442,7 +2442,7 @@ class _policy_info(object): elif ord(val) == 1: return 'Enabled' else: - return 'Invalid Value: %s' % repr(val) + return 'Invalid Value: {0!r}'.format(val) # pylint: disable=repr-flag-used-in-string else: return 'Not Defined' except TypeError: From bcde5cc62550b09575f4f7fef0ab78f9b6d3eb72 Mon Sep 17 00:00:00 2001 From: twangboy Date: Wed, 21 Feb 2018 09:47:20 -0700 Subject: [PATCH 198/223] Update log statement --- salt/modules/win_lgpo.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/modules/win_lgpo.py b/salt/modules/win_lgpo.py index 2b467ca2ed..89b48b8b79 100644 --- a/salt/modules/win_lgpo.py +++ b/salt/modules/win_lgpo.py @@ -5048,9 +5048,9 @@ def get(policy_class=None, return_full_policy_names=True, _pol['Registry']['Path'], _pol['Registry']['Value'])['vdata'] log.debug( - 'Value {0} found for reg policy {1}'.format( - salt.utils.to_unicode(class_vals[policy_name]), - policy_name)) + 'Value %r found for reg policy %s', + class_vals[policy_name], policy_name + ) elif 'Secedit' in _pol: # get value from secedit _ret, _val = _findOptionValueInSeceditFile(_pol['Secedit']['Option']) From 99079fc4426dd55c7a2a99d86e4447f844d37abf Mon Sep 17 00:00:00 2001 From: Mihai Dinca Date: Tue, 13 Feb 2018 16:11:20 +0100 Subject: [PATCH 199/223] Remove obsolete unicode handling in pkg.info_installed --- salt/modules/zypper.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/salt/modules/zypper.py b/salt/modules/zypper.py index a937106473..75ae752f3a 100644 --- a/salt/modules/zypper.py +++ b/salt/modules/zypper.py @@ -482,15 +482,6 @@ def info_installed(*names, **kwargs): t_nfo = dict() # Translate dpkg-specific keys to a common structure for key, value in six.iteritems(pkg_nfo): - if isinstance(value, six.string_types): - # Check, if string is encoded in a proper UTF-8 - if six.PY3: - value_ = value.encode('UTF-8', 'ignore').decode('UTF-8', 'ignore') - else: - value_ = value.decode('UTF-8', 'ignore').encode('UTF-8', 'ignore') - if value != value_: - value = kwargs.get('errors', 'ignore') == 'ignore' and value_ or 'N/A (invalid UTF-8)' - log.error('Package %s has bad UTF-8 code in %s: %s', pkg_name, key, value) if key == 'source_rpm': t_nfo['source'] = value else: From 49e49ae51b38f39d29fbdce7f0fecace05fe9a7b Mon Sep 17 00:00:00 2001 From: rallytime Date: Thu, 22 Feb 2018 14:29:17 -0500 Subject: [PATCH 200/223] Mark 2 tests as flaky - integration.spm.test_man_spm.SPMManTest.test_man_spm - integration.ssh.test_state.SSHStateTest.test_state_running --- tests/integration/spm/test_man_spm.py | 3 ++- tests/integration/ssh/test_state.py | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/integration/spm/test_man_spm.py b/tests/integration/spm/test_man_spm.py index 24fb77b846..51b9806c34 100644 --- a/tests/integration/spm/test_man_spm.py +++ b/tests/integration/spm/test_man_spm.py @@ -11,7 +11,7 @@ import tempfile # Import Salt Testing libs from tests.support.case import ModuleCase -from tests.support.helpers import destructiveTest +from tests.support.helpers import destructiveTest, flaky from tests.support.paths import CODE_DIR @@ -33,6 +33,7 @@ class SPMManTest(ModuleCase): def tearDown(self): shutil.rmtree(self.tmpdir) + @flaky def test_man_spm(self): ''' test man spm diff --git a/tests/integration/ssh/test_state.py b/tests/integration/ssh/test_state.py index 4df91890d6..4524cf2f9c 100644 --- a/tests/integration/ssh/test_state.py +++ b/tests/integration/ssh/test_state.py @@ -9,6 +9,7 @@ import time # Import Salt Testing Libs from tests.support.case import SSHCase +from tests.support.helpers import flaky from tests.support.paths import TMP # Import Salt Libs @@ -162,6 +163,7 @@ class SSHStateTest(SSHCase): check_file = self.run_function('file.file_exists', [SSH_SLS_FILE], wipe=False) self.assertTrue(check_file) + @flaky def test_state_running(self): ''' test state.running with salt-ssh From ac99bd26dbc77588a502475f5a102be7c4079de2 Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Fri, 23 Feb 2018 11:55:43 -0700 Subject: [PATCH 201/223] driver and provider should be specified This is not needed in Oxygen --- salt/cloud/clouds/xen.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/salt/cloud/clouds/xen.py b/salt/cloud/clouds/xen.py index 5823fd7cc9..0b79d4dfb9 100644 --- a/salt/cloud/clouds/xen.py +++ b/salt/cloud/clouds/xen.py @@ -565,11 +565,6 @@ def create(vm_): record = {} ret = {} - # Since using "provider: " is deprecated, alias provider - # to use driver: "driver: " - if 'provider' in vm_: - vm_['driver'] = vm_.pop('provider') - # fire creating event __utils__['cloud.fire_event']( 'event', From cffbf52c106c6a3f94d9139e0de9fe4e1bdb1936 Mon Sep 17 00:00:00 2001 From: rallytime Date: Sat, 24 Feb 2018 06:51:25 -0500 Subject: [PATCH 202/223] Lint fix: remove extra line --- tests/unit/modules/test_win_path.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/unit/modules/test_win_path.py b/tests/unit/modules/test_win_path.py index c1d85e7a26..5986294dc9 100644 --- a/tests/unit/modules/test_win_path.py +++ b/tests/unit/modules/test_win_path.py @@ -49,7 +49,6 @@ class WinPathTestCase(TestCase, LoaderModuleMockMixin): salt.utils.stringutils.to_str(self.pathsep.join(new_path)) ) - def test_get_path(self): ''' Test to Returns the system path From 69ac94bacae7eec66a0c76f36b977757e3bbabaf Mon Sep 17 00:00:00 2001 From: rallytime Date: Sat, 24 Feb 2018 06:55:09 -0500 Subject: [PATCH 203/223] Update utils paths --- salt/states/reg.py | 2 +- tests/unit/utils/test_win_functions.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/states/reg.py b/salt/states/reg.py index b1795ef0d3..d4ff1e74d3 100644 --- a/salt/states/reg.py +++ b/salt/states/reg.py @@ -193,7 +193,7 @@ def present(name, return ret try: - vdata_decoded = salt.utils.to_unicode(vdata, 'utf-8') + vdata_decoded = salt.utils.stringutils.to_unicode(vdata, 'utf-8') except UnicodeDecodeError: # vdata contains binary data that can't be decoded vdata_decoded = vdata diff --git a/tests/unit/utils/test_win_functions.py b/tests/unit/utils/test_win_functions.py index c35ab697da..370994c479 100644 --- a/tests/unit/utils/test_win_functions.py +++ b/tests/unit/utils/test_win_functions.py @@ -11,8 +11,8 @@ from tests.support.mock import ( ) # Import Salt Libs +import salt.utils.platform import salt.utils.win_functions as win_functions -import salt.utils @skipIf(NO_MOCK, NO_MOCK_REASON) @@ -53,7 +53,7 @@ class WinFunctionsTestCase(TestCase): self.assertEqual(encoded, '^"C:\\Some Path\\With Spaces^"') - @skipIf(not salt.utils.is_windows(), 'WinDLL only available on Windows') + @skipIf(not salt.utils.platform.is_windows(), 'WinDLL only available on Windows') def test_broadcast_setting_change(self): ''' Test to rehash the Environment variables From 46996178e266f83f05749a7da21c7427b378653c Mon Sep 17 00:00:00 2001 From: mephi42 Date: Mon, 19 Feb 2018 13:25:58 +0100 Subject: [PATCH 204/223] Allow configuring HTTP connect timeout --- doc/ref/configuration/master.rst | 34 ++++++++++++++++++++++++++++++++ doc/ref/configuration/minion.rst | 34 ++++++++++++++++++++++++++++++++ salt/config/__init__.py | 6 ++++++ salt/utils/http.py | 2 ++ 4 files changed, 76 insertions(+) diff --git a/doc/ref/configuration/master.rst b/doc/ref/configuration/master.rst index f793b90d8b..87af51320f 100644 --- a/doc/ref/configuration/master.rst +++ b/doc/ref/configuration/master.rst @@ -1044,6 +1044,40 @@ cache events are fired when a minion requests a minion data cache refresh. minion_data_cache_events: True +.. conf_master:: http_connect_timeout + +``http_connect_timeout`` +------------------------ + +.. versionadded:: Fluorine + +Default: ``20`` + +HTTP connection timeout in seconds. +Applied when fetching files using tornado back-end. +Should be greater than overall download time. + +.. code-block:: yaml + + http_connect_timeout: 20 + +.. conf_master:: http_request_timeout + +``http_request_timeout`` +------------------------ + +.. versionadded:: 2015.8.0 + +Default: ``3600`` + +HTTP request timeout in seconds. +Applied when fetching files using tornado back-end. +Should be greater than overall download time. + +.. code-block:: yaml + + http_request_timeout: 3600 + .. _salt-ssh-configuration: Salt-SSH Configuration diff --git a/doc/ref/configuration/minion.rst b/doc/ref/configuration/minion.rst index cc3535b207..3ebc562531 100644 --- a/doc/ref/configuration/minion.rst +++ b/doc/ref/configuration/minion.rst @@ -1267,6 +1267,40 @@ talking to the intended master. syndic_finger: 'ab:30:65:2a:d6:9e:20:4f:d8:b2:f3:a7:d4:65:50:10' +.. conf_minion:: http_connect_timeout + +``http_connect_timeout`` +------------------------ + +.. versionadded:: Fluorine + +Default: ``20`` + +HTTP connection timeout in seconds. +Applied when fetching files using tornado back-end. +Should be greater than overall download time. + +.. code-block:: yaml + + http_connect_timeout: 20 + +.. conf_minion:: http_request_timeout + +``http_request_timeout`` +------------------------ + +.. versionadded:: 2015.8.0 + +Default: ``3600`` + +HTTP request timeout in seconds. +Applied when fetching files using tornado back-end. +Should be greater than overall download time. + +.. code-block:: yaml + + http_request_timeout: 3600 + .. conf_minion:: proxy_host ``proxy_host`` diff --git a/salt/config/__init__.py b/salt/config/__init__.py index 1aaab99c79..0fa40af4d9 100644 --- a/salt/config/__init__.py +++ b/salt/config/__init__.py @@ -1050,6 +1050,10 @@ VALID_OPTS = { # If set, all minion exec module actions will be rerouted through sudo as this user 'sudo_user': six.string_types, + # HTTP connection timeout in seconds. Applied for tornado http fetch functions like cp.get_url + # should be greater than overall download time + 'http_connect_timeout': float, + # HTTP request timeout in seconds. Applied for tornado http fetch functions like cp.get_url # should be greater than overall download time 'http_request_timeout': float, @@ -1451,6 +1455,7 @@ DEFAULT_MINION_OPTS = { 'cache_sreqs': True, 'cmd_safe': True, 'sudo_user': '', + 'http_connect_timeout': 20.0, # tornado default - 20 seconds 'http_request_timeout': 1 * 60 * 60.0, # 1 hour 'http_max_body': 100 * 1024 * 1024 * 1024, # 100GB 'event_match_type': 'startswith', @@ -1785,6 +1790,7 @@ DEFAULT_MASTER_OPTS = { 'rotate_aes_key': True, 'cache_sreqs': True, 'dummy_pub': False, + 'http_connect_timeout': 20.0, # tornado default - 20 seconds 'http_request_timeout': 1 * 60 * 60.0, # 1 hour 'http_max_body': 100 * 1024 * 1024 * 1024, # 100GB 'python2_bin': 'python2', diff --git a/salt/utils/http.py b/salt/utils/http.py index 373bbc9418..918c5643cb 100644 --- a/salt/utils/http.py +++ b/salt/utils/http.py @@ -491,6 +491,7 @@ def query(url, req_kwargs['ca_certs'] = ca_bundle max_body = opts.get('http_max_body', salt.config.DEFAULT_MINION_OPTS['http_max_body']) + connect_timeout = opts.get('http_connect_timeout', salt.config.DEFAULT_MINION_OPTS['http_connect_timeout']) timeout = opts.get('http_request_timeout', salt.config.DEFAULT_MINION_OPTS['http_request_timeout']) client_argspec = None @@ -532,6 +533,7 @@ def query(url, allow_nonstandard_methods=True, streaming_callback=streaming_callback, header_callback=header_callback, + connect_timeout=connect_timeout, request_timeout=timeout, proxy_host=proxy_host, proxy_port=proxy_port, From 7b32faa0ced71817e88ab49fb83176bd0941a581 Mon Sep 17 00:00:00 2001 From: Super-User Date: Sun, 4 Feb 2018 22:33:52 +0100 Subject: [PATCH 205/223] Phase 0 - salt.utils.zfs tests --- tests/unit/utils/test_zfs.py | 1722 ++++++++++++++++++++++++++++++++++ 1 file changed, 1722 insertions(+) create mode 100644 tests/unit/utils/test_zfs.py diff --git a/tests/unit/utils/test_zfs.py b/tests/unit/utils/test_zfs.py new file mode 100644 index 0000000000..0d5b3e8025 --- /dev/null +++ b/tests/unit/utils/test_zfs.py @@ -0,0 +1,1722 @@ +# -*- coding: utf-8 -*- +''' +Tests for the zfs utils library + +:codeauthor: Jorge Schrauwen +:maintainer: Jorge Schrauwen +:maturity: new +:platform: illumos,freebsd,linux + +.. versionadded:: Fluorine +''' + +# Import Python libs +from __future__ import absolute_import, unicode_literals, print_function + +# Import Salt Testing libs +from tests.support.unit import skipIf, TestCase +from tests.support.mock import ( + MagicMock, + patch, + NO_MOCK, + NO_MOCK_REASON, +) + +# Import Salt Execution module to test +import salt.utils.zfs as zfs + +# Import Salt Utils +import salt.loader +from salt.utils.odict import OrderedDict + +# property_map mocks +pmap_exec_zpool = { + 'retcode': 2, + 'stdout': '', + 'stderr': "\n".join([ + 'missing property argument', + 'usage:', + ' get [-Hp] [-o "all" | field[,...]] <"all" | property[,...]> ...', + '', + 'the following properties are supported:', + '', + ' PROPERTY EDIT VALUES', + '', + ' allocated NO ', + ' capacity NO ', + ' dedupratio NO <1.00x or higher if deduped>', + ' expandsize NO ', + ' fragmentation NO ', + ' free NO ', + ' freeing NO ', + ' guid NO ', + ' health NO ', + ' leaked NO ', + ' size NO ', + ' altroot YES ', + ' autoexpand YES on | off', + ' autoreplace YES on | off', + ' bootfs YES ', + ' bootsize YES ', + ' cachefile YES | none', + ' comment YES ', + ' dedupditto YES ', + ' delegation YES on | off', + ' failmode YES wait | continue | panic', + ' listsnapshots YES on | off', + ' readonly YES on | off', + ' version YES ', + ' feature@... YES disabled | enabled | active', + '', + 'The feature@ properties must be appended with a feature name.', + 'See zpool-features(5). ', + ]), +} +pmap_zpool = { + 'comment': { + 'edit': True, + 'type': 'str', + 'values': '' + }, + 'freeing': { + 'edit': False, + 'type': 'size', + 'values': '' + }, + 'listsnapshots': { + 'edit': True, + 'type': 'bool', + 'values': 'on | off' + }, + 'leaked': { + 'edit': False, + 'type': 'size', + 'values': '' + }, + 'version': { + 'edit': True, + 'type': 'numeric', + 'values': '' + }, + 'write': { + 'edit': False, + 'type': 'size', + 'values': '' + }, + 'replace': { + 'edit': True, + 'type': 'bool', + 'values': 'on | off' + }, + 'delegation': { + 'edit': True, + 'type': 'bool', + 'values': 'on | off' + }, + 'dedupditto': { + 'edit': True, + 'type': 'str', + 'values': '' + }, + 'autoexpand': { + 'edit': True, + 'type': 'bool', + 'values': 'on | off' + }, + 'alloc': { + 'edit': False, + 'type': 'size', + 'values': '' + }, + 'allocated': { + 'edit': False, + 'type': 'size', + 'values': '' + }, + 'guid': { + 'edit': False, + 'type': 'numeric', + 'values': '' + }, + 'size': { + 'edit': False, + 'type': 'size', + 'values': '' + }, + 'cap': { + 'edit': False, + 'type': 'numeric', + 'values': '' + }, + 'capacity': { + 'edit': False, + 'type': 'size', + 'values': '' + }, + "capacity-alloc": { + "edit": False, + "type": "size", + "values": "" + }, + "capacity-free": { + "edit": False, + "type": "size", + "values": "" + }, + 'cachefile': { + 'edit': True, + 'type': 'str', + 'values': ' | none' + }, + "cksum": { + "edit": False, + "type": "numeric", + "values": "" + }, + 'bootfs': { + 'edit': True, + 'type': 'str', + 'values': '' + }, + 'autoreplace': { + 'edit': True, + 'type': 'bool', + 'values': 'on | off' + }, + "bandwith-read": { + "edit": False, + "type": "size", + "values": "" + }, + "bandwith-write": { + "edit": False, + "type": "size", + "values": "" + }, + "operations-read": { + "edit": False, + "type": "size", + "values": "" + }, + "operations-write": { + "edit": False, + "type": "size", + "values": "" + }, + "read": { + "edit": False, + "type": "size", + "values": "" + }, + 'readonly': { + 'edit': True, + 'type': 'bool', + 'values': 'on | off' + }, + 'dedupratio': { + 'edit': False, + 'type': 'str', + 'values': '<1.00x or higher if deduped>' + }, + 'health': { + 'edit': False, + 'type': 'str', + 'values': '' + }, + 'feature@': { + 'edit': True, + 'type': 'str', + 'values': 'disabled | enabled | active' + }, + 'expandsize': { + 'edit': False, + 'type': 'size', + 'values': '' + }, + 'listsnaps': { + 'edit': True, + 'type': 'bool', + 'values': 'on | off' + }, + 'bootsize': { + 'edit': True, + 'type': 'size', + 'values': '' + }, + 'free': { + 'edit': False, + 'type': 'size', + 'values': '' + }, + 'failmode': { + 'edit': True, + 'type': 'str', + 'values': 'wait | continue | panic' + }, + 'altroot': { + 'edit': True, + 'type': 'str', + 'values': '' + }, + 'expand': { + 'edit': True, + 'type': 'bool', + 'values': 'on | off' + }, + 'frag': { + 'edit': False, + 'type': 'str', + 'values': '' + }, + 'fragmentation': { + 'edit': False, + 'type': 'str', + 'values': '' + } +} +pmap_exec_zfs = { + 'retcode': 2, + 'stdout': '', + 'stderr': "\n".join([ + 'missing property argument', + 'usage:', + ' get [-crHp] [-d max] [-o "all" | field[,...]]', + ' [-t type[,...]] [-s source[,...]]', + ' <"all" | property[,...]> [filesystem|volume|snapshot|bookmark] ...', + '', + 'The following properties are supported:', + '', + ' PROPERTY EDIT INHERIT VALUES', + '', + ' available NO NO ', + ' clones NO NO [,...]', + ' compressratio NO NO <1.00x or higher if compressed>', + ' creation NO NO ', + ' defer_destroy NO NO yes | no', + ' filesystem_count NO NO ', + ' logicalreferenced NO NO ', + ' logicalused NO NO ', + ' mounted NO NO yes | no', + ' origin NO NO ', + ' receive_resume_token NO NO ', + ' refcompressratio NO NO <1.00x or higher if compressed>', + ' referenced NO NO ', + ' snapshot_count NO NO ', + ' type NO NO filesystem | volume | snapshot | bookmark', + ' used NO NO ', + ' usedbychildren NO NO ', + ' usedbydataset NO NO ', + ' usedbyrefreservation NO NO ', + ' usedbysnapshots NO NO ', + ' userrefs NO NO ', + ' written NO NO ', + ' aclinherit YES YES discard | noallow | restricted | passthrough | passthrough-x', + ' aclmode YES YES discard | groupmask | passthrough | restricted', + ' atime YES YES on | off', + ' canmount YES NO on | off | noauto', + ' casesensitivity NO YES sensitive | insensitive | mixed', + ' checksum YES YES on | off | fletcher2 | fletcher4 | sha256 | sha512 | skein | edonr', + ' compression YES YES on | off | lzjb | gzip | gzip-[1-9] | zle | lz4', + ' copies YES YES 1 | 2 | 3', + ' dedup YES YES on | off | verify | sha256[,verify], sha512[,verify], skein[,verify], edonr,verify', + ' devices YES YES on | off', + ' exec YES YES on | off', + ' filesystem_limit YES NO | none', + ' logbias YES YES latency | throughput', + ' mlslabel YES YES ', + ' mountpoint YES YES | legacy | none', + ' nbmand YES YES on | off', + ' normalization NO YES none | formC | formD | formKC | formKD', + ' primarycache YES YES all | none | metadata', + ' quota YES NO | none', + ' readonly YES YES on | off', + ' recordsize YES YES 512 to 1M, power of 2', + ' redundant_metadata YES YES all | most', + ' refquota YES NO | none', + ' refreservation YES NO | none', + ' reservation YES NO | none', + ' secondarycache YES YES all | none | metadata', + ' setuid YES YES on | off', + ' sharenfs YES YES on | off | share(1M) options', + ' sharesmb YES YES on | off | sharemgr(1M) options', + ' snapdir YES YES hidden | visible', + ' snapshot_limit YES NO | none', + ' sync YES YES standard | always | disabled', + ' utf8only NO YES on | off', + ' version YES NO 1 | 2 | 3 | 4 | 5 | current', + ' volblocksize NO YES 512 to 128k, power of 2', + ' volsize YES NO ', + ' vscan YES YES on | off', + ' xattr YES YES on | off', + ' zoned YES YES on | off', + ' userused@... NO NO ', + ' groupused@... NO NO ', + ' userquota@... YES NO | none', + ' groupquota@... YES NO | none', + ' written@ NO NO ', + '', + 'Sizes are specified in bytes with standard units such as K, M, G, etc.', + '', + 'User-defined properties can be specified by using a name containing a colon (:).', + '', + 'The {user|group}{used|quota}@ properties must be appended with', + 'a user or group specifier of one of these forms:', + ' POSIX name (eg: "matt")', + ' POSIX id (eg: "126829")', + ' SMB name@domain (eg: "matt@sun")', + ' SMB SID (eg: "S-1-234-567-89")', + ]), +} +pmap_zfs = { + "origin": { + "edit": False, + "inherit": False, + "values": "", + "type": "str" + }, + "setuid": { + "edit": True, + "inherit": True, + "values": "on | off", + "type": "bool" + }, + "referenced": { + "edit": False, + "inherit": False, + "values": "", + "type": "size" + }, + "vscan": { + "edit": True, + "inherit": True, + "values": "on | off", + "type": "bool" + }, + "logicalused": { + "edit": False, + "inherit": False, + "values": "", + "type": "size" + }, + "userrefs": { + "edit": False, + "inherit": False, + "values": "", + "type": "numeric" + }, + "primarycache": { + "edit": True, + "inherit": True, + "values": "all | none | metadata", + "type": "str" + }, + "logbias": { + "edit": True, + "inherit": True, + "values": "latency | throughput", + "type": "str" + }, + "creation": { + "edit": False, + "inherit": False, + "values": "", + "type": "str" + }, + "sync": { + "edit": True, + "inherit": True, + "values": "standard | always | disabled", + "type": "str" + }, + "dedup": { + "edit": True, + "inherit": True, + "values": "on | off | verify | sha256[,verify], sha512[,verify], skein[,verify], edonr,verify", + "type": "bool" + }, + "sharenfs": { + "edit": True, + "inherit": True, + "values": "on | off | share(1m) options", + "type": "bool" + }, + "receive_resume_token": { + "edit": False, + "inherit": False, + "values": "", + "type": "str" + }, + "usedbyrefreservation": { + "edit": False, + "inherit": False, + "values": "", + "type": "size" + }, + "sharesmb": { + "edit": True, + "inherit": True, + "values": "on | off | sharemgr(1m) options", + "type": "bool" + }, + "rdonly": { + "edit": True, + "inherit": True, + "values": "on | off", + "type": "bool" + }, + "reservation": { + "edit": True, + "inherit": False, + "values": " | none", + "type": "size" + }, + "reserv": { + "edit": True, + "inherit": False, + "values": " | none", + "type": "size" + }, + "mountpoint": { + "edit": True, + "inherit": True, + "values": " | legacy | none", + "type": "str" + }, + "casesensitivity": { + "edit": False, + "inherit": True, + "values": "sensitive | insensitive | mixed", + "type": "str" + }, + "utf8only": { + "edit": False, + "inherit": True, + "values": "on | off", + "type": "bool" + }, + "usedbysnapshots": { + "edit": False, + "inherit": False, + "values": "", + "type": "size" + }, + "readonly": { + "edit": True, + "inherit": True, + "values": "on | off", + "type": "bool" + }, + "written@": { + "edit": False, + "inherit": False, + "values": "", + "type": "size" + }, + "avail": { + "edit": False, + "inherit": False, + "values": "", + "type": "size" + }, + "recsize": { + "edit": True, + "inherit": True, + "values": "512 to 1m, power of 2", + "type": "str" + }, + "atime": { + "edit": True, + "inherit": True, + "values": "on | off", + "type": "bool" + }, + "compression": { + "edit": True, + "inherit": True, + "values": "on | off | lzjb | gzip | gzip-[1-9] | zle | lz4", + "type": "bool" + }, + "snapdir": { + "edit": True, + "inherit": True, + "values": "hidden | visible", + "type": "str" + }, + "aclmode": { + "edit": True, + "inherit": True, + "values": "discard | groupmask | passthrough | restricted", + "type": "str" + }, + "zoned": { + "edit": True, + "inherit": True, + "values": "on | off", + "type": "bool" + }, + "copies": { + "edit": True, + "inherit": True, + "values": "1 | 2 | 3", + "type": "numeric" + }, + "snapshot_limit": { + "edit": True, + "inherit": False, + "values": " | none", + "type": "numeric" + }, + "aclinherit": { + "edit": True, + "inherit": True, + "values": "discard | noallow | restricted | passthrough | passthrough-x", + "type": "str" + }, + "compressratio": { + "edit": False, + "inherit": False, + "values": "<1.00x or higher if compressed>", + "type": "str" + }, + "xattr": { + "edit": True, + "inherit": True, + "values": "on | off", + "type": "bool" + }, + "written": { + "edit": False, + "inherit": False, + "values": "", + "type": "size" + }, + "version": { + "edit": True, + "inherit": False, + "values": "1 | 2 | 3 | 4 | 5 | current", + "type": "numeric" + }, + "recordsize": { + "edit": True, + "inherit": True, + "values": "512 to 1m, power of 2", + "type": "str" + }, + "refquota": { + "edit": True, + "inherit": False, + "values": " | none", + "type": "size" + }, + "filesystem_limit": { + "edit": True, + "inherit": False, + "values": " | none", + "type": "numeric" + }, + "lrefer.": { + "edit": False, + "inherit": False, + "values": "", + "type": "size" + }, + "type": { + "edit": False, + "inherit": False, + "values": "filesystem | volume | snapshot | bookmark", + "type": "str" + }, + "secondarycache": { + "edit": True, + "inherit": True, + "values": "all | none | metadata", + "type": "str" + }, + "refer": { + "edit": False, + "inherit": False, + "values": "", + "type": "size" + }, + "available": { + "edit": False, + "inherit": False, + "values": "", + "type": "size" + }, + "used": { + "edit": False, + "inherit": False, + "values": "", + "type": "size" + }, + "exec": { + "edit": True, + "inherit": True, + "values": "on | off", + "type": "bool" + }, + "compress": { + "edit": True, + "inherit": True, + "values": "on | off | lzjb | gzip | gzip-[1-9] | zle | lz4", + "type": "bool" + }, + "volblock": { + "edit": False, + "inherit": True, + "values": "512 to 128k, power of 2", + "type": "str" + }, + "refcompressratio": { + "edit": False, + "inherit": False, + "values": "<1.00x or higher if compressed>", + "type": "str" + }, + "quota": { + "edit": True, + "inherit": False, + "values": " | none", + "type": "size" + }, + "groupquota@": { + "edit": True, + "inherit": False, + "values": " | none", + "type": "size" + }, + "userquota@": { + "edit": True, + "inherit": False, + "values": " | none", + "type": "size" + }, + "snapshot_count": { + "edit": False, + "inherit": False, + "values": "", + "type": "numeric" + }, + "volsize": { + "edit": True, + "inherit": False, + "values": "", + "type": "size" + }, + "clones": { + "edit": False, + "inherit": False, + "values": "[,...]", + "type": "str" + }, + "canmount": { + "edit": True, + "inherit": False, + "values": "on | off | noauto", + "type": "bool" + }, + "mounted": { + "edit": False, + "inherit": False, + "values": "yes | no", + "type": "bool_alt" + }, + "groupused@": { + "edit": False, + "inherit": False, + "values": "", + "type": "size" + }, + "normalization": { + "edit": False, + "inherit": True, + "values": "none | formc | formd | formkc | formkd", + "type": "str" + }, + "usedbychildren": { + "edit": False, + "inherit": False, + "values": "", + "type": "size" + }, + "usedbydataset": { + "edit": False, + "inherit": False, + "values": "", + "type": "size" + }, + "mlslabel": { + "edit": True, + "inherit": True, + "values": "", + "type": "str" + }, + "refreserv": { + "edit": True, + "inherit": False, + "values": " | none", + "type": "size" + }, + "defer_destroy": { + "edit": False, + "inherit": False, + "values": "yes | no", + "type": "bool_alt" + }, + "volblocksize": { + "edit": False, + "inherit": True, + "values": "512 to 128k, power of 2", + "type": "str" + }, + "lused.": { + "edit": False, + "inherit": False, + "values": "", + "type": "size" + }, + "redundant_metadata": { + "edit": True, + "inherit": True, + "values": "all | most", + "type": "str" + }, + "filesystem_count": { + "edit": False, + "inherit": False, + "values": "", + "type": "numeric" + }, + "devices": { + "edit": True, + "inherit": True, + "values": "on | off", + "type": "bool" + }, + "refreservation": { + "edit": True, + "inherit": False, + "values": " | none", + "type": "size" + }, + "userused@": { + "edit": False, + "inherit": False, + "values": "", + "type": "size" + }, + "logicalreferenced": { + "edit": False, + "inherit": False, + "values": "", + "type": "size" + }, + "checksum": { + "edit": True, + "inherit": True, + "values": "on | off | fletcher2 | fletcher4 | sha256 | sha512 | skein | edonr", + "type": "bool" + }, + "nbmand": { + "edit": True, + "inherit": True, + "values": "on | off", + "type": "bool" + } +} + + +def _from_auto(name, value, source='auto'): + ''' + some more complex patching for zfs.from_auto + ''' + with patch.object(salt.utils.zfs, 'property_data_zpool', MagicMock(return_value=pmap_zpool)), \ + patch.object(salt.utils.zfs, 'property_data_zfs', MagicMock(return_value=pmap_zfs)): + return salt.utils.zfs.from_auto(name, value, source) + + +def _from_auto_dict(values, source='auto'): + ''' + some more complex patching for zfs.from_auto_dict + ''' + with patch.object(salt.utils.zfs, 'property_data_zpool', MagicMock(return_value=pmap_zpool)), \ + patch.object(salt.utils.zfs, 'property_data_zfs', MagicMock(return_value=pmap_zfs)): + return salt.utils.zfs.from_auto_dict(values, source) + + +def _to_auto(name, value, source='auto', convert_to_human=True): + ''' + some more complex patching for zfs.to_auto + ''' + with patch.object(salt.utils.zfs, 'property_data_zpool', MagicMock(return_value=pmap_zpool)), \ + patch.object(salt.utils.zfs, 'property_data_zfs', MagicMock(return_value=pmap_zfs)): + return salt.utils.zfs.to_auto(name, value, source, convert_to_human) + + +def _to_auto_dict(values, source='auto', convert_to_human=True): + ''' + some more complex patching for zfs.to_auto_dict + ''' + with patch.object(salt.utils.zfs, 'property_data_zpool', MagicMock(return_value=pmap_zpool)), \ + patch.object(salt.utils.zfs, 'property_data_zfs', MagicMock(return_value=pmap_zfs)): + return salt.utils.zfs.to_auto_dict(values, source, convert_to_human) + + +utils_patch = { + 'zfs.is_supported': MagicMock(return_value=True), + 'zfs.has_feature_flags': MagicMock(return_value=True), + 'zfs.property_data_zpool': MagicMock(return_value=pmap_zpool), + 'zfs.property_data_zfs': MagicMock(return_value=pmap_zfs), + # NOTE: we make zpool_command and zfs_command a NOOP + # these are extensively tested in tests.unit.utils.test_zfs + 'zfs.zpool_command': MagicMock(return_value='/bin/false'), + 'zfs.zfs_command': MagicMock(return_value='/bin/false'), + # NOTE: from_auto_dict is a special snowflake + # internally it calls multiple calls from + # salt.utils.zfs but we cannot patch those using + # the common methode, __utils__ is not available + # so they are direct calls, we do some voodoo here. + 'zfs.from_auto_dict': _from_auto_dict, + 'zfs.from_auto': _from_auto, + 'zfs.to_auto_dict': _to_auto_dict, + 'zfs.to_auto': _to_auto, +} + + +# Skip this test case if we don't have access to mock! +@skipIf(NO_MOCK, NO_MOCK_REASON) +class ZfsUtilsTestCase(TestCase): + ''' + This class contains a set of functions that test salt.utils.zfs utils + ''' + ## NOTE: test parameter parsing + def test_property_data_zpool(self): + ''' + Test parsing of zpool get output + ''' + with patch.object(zfs, '_zfs_cmd', MagicMock(return_value='/sbin/zfs')): + with patch.object(zfs, '_zpool_cmd', MagicMock(return_value='/sbin/zpool')): + with patch.object(zfs, '_exec', MagicMock(return_value=pmap_exec_zpool)): + self.assertEqual(zfs.property_data_zpool(), pmap_zpool) + + def test_property_data_zfs(self): + ''' + Test parsing of zfs get output + ''' + with patch.object(zfs, '_zfs_cmd', MagicMock(return_value='/sbin/zfs')): + with patch.object(zfs, '_zpool_cmd', MagicMock(return_value='/sbin/zpool')): + with patch.object(zfs, '_exec', MagicMock(return_value=pmap_exec_zfs)): + self.assertEqual(zfs.property_data_zfs(), pmap_zfs) + + ## NOTE: testing from_bool results + def test_from_bool_on(self): + ''' + Test from_bool with 'on' + ''' + self.assertTrue(zfs.from_bool('on')) + self.assertTrue(zfs.from_bool(zfs.from_bool('on'))) + + def test_from_bool_off(self): + ''' + Test from_bool with 'off' + ''' + self.assertFalse(zfs.from_bool('off')) + self.assertFalse(zfs.from_bool(zfs.from_bool('off'))) + + def test_from_bool_none(self): + ''' + Test from_bool with 'none' + ''' + self.assertEqual(zfs.from_bool('none'), None) + self.assertEqual(zfs.from_bool(zfs.from_bool('none')), None) + + def test_from_bool_passthrough(self): + ''' + Test from_bool with 'passthrough' + ''' + self.assertEqual(zfs.from_bool('passthrough'), 'passthrough') + self.assertEqual(zfs.from_bool(zfs.from_bool('passthrough')), 'passthrough') + + def test_from_bool_alt_yes(self): + ''' + Test from_bool_alt with 'yes' + ''' + self.assertTrue(zfs.from_bool_alt('yes')) + self.assertTrue(zfs.from_bool_alt(zfs.from_bool_alt('yes'))) + + def test_from_bool_alt_no(self): + ''' + Test from_bool_alt with 'no' + ''' + self.assertFalse(zfs.from_bool_alt('no')) + self.assertFalse(zfs.from_bool_alt(zfs.from_bool_alt('no'))) + + def test_from_bool_alt_none(self): + ''' + Test from_bool_alt with 'none' + ''' + self.assertEqual(zfs.from_bool_alt('none'), None) + self.assertEqual(zfs.from_bool_alt(zfs.from_bool_alt('none')), None) + + def test_from_bool_alt_passthrough(self): + ''' + Test from_bool_alt with 'passthrough' + ''' + self.assertEqual(zfs.from_bool_alt('passthrough'), 'passthrough') + self.assertEqual(zfs.from_bool_alt(zfs.from_bool_alt('passthrough')), 'passthrough') + + ## NOTE: testing to_bool results + def test_to_bool_true(self): + ''' + Test to_bool with True + ''' + self.assertEqual(zfs.to_bool(True), 'on') + self.assertEqual(zfs.to_bool(zfs.to_bool(True)), 'on') + + def test_to_bool_false(self): + ''' + Test to_bool with False + ''' + self.assertEqual(zfs.to_bool(False), 'off') + self.assertEqual(zfs.to_bool(zfs.to_bool(False)), 'off') + + def test_to_bool_none(self): + ''' + Test to_bool with None + ''' + self.assertEqual(zfs.to_bool(None), 'none') + self.assertEqual(zfs.to_bool(zfs.to_bool(None)), 'none') + + def test_to_bool_passthrough(self): + ''' + Test to_bool with 'passthrough' + ''' + self.assertEqual(zfs.to_bool('passthrough'), 'passthrough') + self.assertEqual(zfs.to_bool(zfs.to_bool('passthrough')), 'passthrough') + + def test_to_bool_alt_true(self): + ''' + Test to_bool_alt with True + ''' + self.assertEqual(zfs.to_bool_alt(True), 'yes') + self.assertEqual(zfs.to_bool_alt(zfs.to_bool_alt(True)), 'yes') + + def test_to_bool_alt_false(self): + ''' + Test to_bool_alt with False + ''' + self.assertEqual(zfs.to_bool_alt(False), 'no') + self.assertEqual(zfs.to_bool_alt(zfs.to_bool_alt(False)), 'no') + + def test_to_bool_alt_none(self): + ''' + Test to_bool_alt with None + ''' + self.assertEqual(zfs.to_bool_alt(None), 'none') + self.assertEqual(zfs.to_bool_alt(zfs.to_bool_alt(None)), 'none') + + def test_to_bool_alt_passthrough(self): + ''' + Test to_bool_alt with 'passthrough' + ''' + self.assertEqual(zfs.to_bool_alt('passthrough'), 'passthrough') + self.assertEqual(zfs.to_bool_alt(zfs.to_bool_alt('passthrough')), 'passthrough') + + ## NOTE: testing from_numeric results + def test_from_numeric_str(self): + ''' + Test from_numeric with '42' + ''' + self.assertEqual(zfs.from_numeric('42'), 42) + self.assertEqual(zfs.from_numeric(zfs.from_numeric('42')), 42) + + def test_from_numeric_int(self): + ''' + Test from_numeric with 42 + ''' + self.assertEqual(zfs.from_numeric(42), 42) + self.assertEqual(zfs.from_numeric(zfs.from_numeric(42)), 42) + + def test_from_numeric_none(self): + ''' + Test from_numeric with 'none' + ''' + self.assertEqual(zfs.from_numeric('none'), None) + self.assertEqual(zfs.from_numeric(zfs.from_numeric('none')), None) + + def test_from_numeric_passthrough(self): + ''' + Test from_numeric with 'passthrough' + ''' + self.assertEqual(zfs.from_numeric('passthrough'), 'passthrough') + self.assertEqual(zfs.from_numeric(zfs.from_numeric('passthrough')), 'passthrough') + + ## NOTE: testing to_numeric results + def test_to_numeric_str(self): + ''' + Test to_numeric with '42' + ''' + self.assertEqual(zfs.to_numeric('42'), 42) + self.assertEqual(zfs.to_numeric(zfs.to_numeric('42')), 42) + + def test_to_numeric_int(self): + ''' + Test to_numeric with 42 + ''' + self.assertEqual(zfs.to_numeric(42), 42) + self.assertEqual(zfs.to_numeric(zfs.to_numeric(42)), 42) + + def test_to_numeric_none(self): + ''' + Test to_numeric with 'none' + ''' + self.assertEqual(zfs.to_numeric(None), 'none') + self.assertEqual(zfs.to_numeric(zfs.to_numeric(None)), 'none') + + def test_to_numeric_passthrough(self): + ''' + Test to_numeric with 'passthrough' + ''' + self.assertEqual(zfs.to_numeric('passthrough'), 'passthrough') + self.assertEqual(zfs.to_numeric(zfs.to_numeric('passthrough')), 'passthrough') + + ## NOTE: testing from_size results + def test_from_size_absolute(self): + ''' + Test from_size with '5G' + ''' + self.assertEqual(zfs.from_size('5G'), 5368709120) + self.assertEqual(zfs.from_size(zfs.from_size('5G')), 5368709120) + + def test_from_size_decimal(self): + ''' + Test from_size with '4.20M' + ''' + self.assertEqual(zfs.from_size('4.20M'), 4404019) + self.assertEqual(zfs.from_size(zfs.from_size('4.20M')), 4404019) + + def test_from_size_none(self): + ''' + Test from_size with 'none' + ''' + self.assertEqual(zfs.from_size('none'), None) + self.assertEqual(zfs.from_size(zfs.from_size('none')), None) + + def test_from_size_passthrough(self): + ''' + Test from_size with 'passthrough' + ''' + self.assertEqual(zfs.from_size('passthrough'), 'passthrough') + self.assertEqual(zfs.from_size(zfs.from_size('passthrough')), 'passthrough') + + ## NOTE: testing to_size results + def test_to_size_str_absolute(self): + ''' + Test to_size with '5368709120' + ''' + self.assertEqual(zfs.to_size('5368709120'), '5G') + self.assertEqual(zfs.to_size(zfs.to_size('5368709120')), '5G') + + def test_to_size_str_decimal(self): + ''' + Test to_size with '4404019' + ''' + self.assertEqual(zfs.to_size('4404019'), '4.20M') + self.assertEqual(zfs.to_size(zfs.to_size('4404019')), '4.20M') + + def test_to_size_int_absolute(self): + ''' + Test to_size with 5368709120 + ''' + self.assertEqual(zfs.to_size(5368709120), '5G') + self.assertEqual(zfs.to_size(zfs.to_size(5368709120)), '5G') + + def test_to_size_int_decimal(self): + ''' + Test to_size with 4404019 + ''' + self.assertEqual(zfs.to_size(4404019), '4.20M') + self.assertEqual(zfs.to_size(zfs.to_size(4404019)), '4.20M') + + def test_to_size_none(self): + ''' + Test to_size with 'none' + ''' + self.assertEqual(zfs.to_size(None), 'none') + self.assertEqual(zfs.to_size(zfs.to_size(None)), 'none') + + def test_to_size_passthrough(self): + ''' + Test to_size with 'passthrough' + ''' + self.assertEqual(zfs.to_size('passthrough'), 'passthrough') + self.assertEqual(zfs.to_size(zfs.to_size('passthrough')), 'passthrough') + + ## NOTE: testing from_str results + def test_from_str_space(self): + ''' + Test from_str with "\"my pool/my dataset\" + ''' + self.assertEqual(zfs.from_str('"my pool/my dataset"'), 'my pool/my dataset') + self.assertEqual(zfs.from_str(zfs.from_str('"my pool/my dataset"')), 'my pool/my dataset') + + def test_from_str_squote_space(self): + ''' + Test from_str with "my pool/jorge's dataset" + ''' + self.assertEqual(zfs.from_str("my pool/jorge's dataset"), "my pool/jorge's dataset") + self.assertEqual(zfs.from_str(zfs.from_str("my pool/jorge's dataset")), "my pool/jorge's dataset") + + def test_from_str_dquote_space(self): + ''' + Test from_str with "my pool/the \"good\" stuff" + ''' + self.assertEqual(zfs.from_str("my pool/the \"good\" stuff"), 'my pool/the "good" stuff') + self.assertEqual(zfs.from_str(zfs.from_str("my pool/the \"good\" stuff")), 'my pool/the "good" stuff') + + def test_from_str_none(self): + ''' + Test from_str with 'none' + ''' + self.assertEqual(zfs.from_str('none'), None) + self.assertEqual(zfs.from_str(zfs.from_str('none')), None) + + def test_from_str_passthrough(self): + ''' + Test from_str with 'passthrough' + ''' + self.assertEqual(zfs.from_str('passthrough'), 'passthrough') + self.assertEqual(zfs.from_str(zfs.from_str('passthrough')), 'passthrough') + + ## NOTE: testing to_str results + def test_to_str_space(self): + ''' + Test to_str with 'my pool/my dataset' + ''' + ## NOTE: for fun we use both the '"str"' and "\"str\"" way of getting the literal string: "str" + self.assertEqual(zfs.to_str('my pool/my dataset'), '"my pool/my dataset"') + self.assertEqual(zfs.to_str(zfs.to_str('my pool/my dataset')), "\"my pool/my dataset\"") + + def test_to_str_squote_space(self): + ''' + Test to_str with "my pool/jorge's dataset" + ''' + self.assertEqual(zfs.to_str("my pool/jorge's dataset"), "\"my pool/jorge's dataset\"") + self.assertEqual(zfs.to_str(zfs.to_str("my pool/jorge's dataset")), "\"my pool/jorge's dataset\"") + + def test_to_str_none(self): + ''' + Test to_str with 'none' + ''' + self.assertEqual(zfs.to_str(None), 'none') + self.assertEqual(zfs.to_str(zfs.to_str(None)), 'none') + + def test_to_str_passthrough(self): + ''' + Test to_str with 'passthrough' + ''' + self.assertEqual(zfs.to_str('passthrough'), 'passthrough') + self.assertEqual(zfs.to_str(zfs.to_str('passthrough')), 'passthrough') + + ## NOTE: testing is_snapshot + def test_is_snapshot_snapshot(self): + ''' + Test is_snapshot with a valid snapshot name + ''' + self.assertTrue(zfs.is_snapshot('zpool_name/dataset@backup')) + + def test_is_snapshot_bookmark(self): + ''' + Test is_snapshot with a valid bookmark name + ''' + self.assertFalse(zfs.is_snapshot('zpool_name/dataset#backup')) + + def test_is_snapshot_filesystem(self): + ''' + Test is_snapshot with a valid filesystem name + ''' + self.assertFalse(zfs.is_snapshot('zpool_name/dataset')) + + ## NOTE: testing is_bookmark + def test_is_bookmark_snapshot(self): + ''' + Test is_bookmark with a valid snapshot name + ''' + self.assertFalse(zfs.is_bookmark('zpool_name/dataset@backup')) + + def test_is_bookmark_bookmark(self): + ''' + Test is_bookmark with a valid bookmark name + ''' + self.assertTrue(zfs.is_bookmark('zpool_name/dataset#backup')) + + def test_is_bookmark_filesystem(self): + ''' + Test is_bookmark with a valid filesystem name + ''' + self.assertFalse(zfs.is_bookmark('zpool_name/dataset')) + + ## NOTE: testing is_dataset + def test_is_dataset_snapshot(self): + ''' + Test is_dataset with a valid snapshot name + ''' + self.assertFalse(zfs.is_dataset('zpool_name/dataset@backup')) + + def test_is_dataset_bookmark(self): + ''' + Test is_dataset with a valid bookmark name + ''' + self.assertFalse(zfs.is_dataset('zpool_name/dataset#backup')) + + def test_is_dataset_filesystem(self): + ''' + Test is_dataset with a valid filesystem/volume name + ''' + self.assertTrue(zfs.is_dataset('zpool_name/dataset')) + + ## NOTE: testing zfs_command + def test_zfs_command_simple(self): + ''' + Test if zfs_command builds the correct string + ''' + with patch.object(zfs, '_zfs_cmd', MagicMock(return_value='/sbin/zfs')): + with patch.object(zfs, '_zpool_cmd', MagicMock(return_value='/sbin/zpool')): + with patch.object(zfs, 'property_data_zfs', MagicMock(return_value=pmap_zfs)): + with patch.object(zfs, 'property_data_zpool', MagicMock(return_value=pmap_zpool)): + self.assertEqual( + zfs.zfs_command('list'), + "/sbin/zfs list" + ) + + def test_zfs_command_none_target(self): + ''' + Test if zfs_command builds the correct string with a target of None + ''' + with patch.object(zfs, '_zfs_cmd', MagicMock(return_value='/sbin/zfs')): + with patch.object(zfs, '_zpool_cmd', MagicMock(return_value='/sbin/zpool')): + with patch.object(zfs, 'property_data_zfs', MagicMock(return_value=pmap_zfs)): + with patch.object(zfs, 'property_data_zpool', MagicMock(return_value=pmap_zpool)): + self.assertEqual( + zfs.zfs_command('list', target=[None, 'mypool', None]), + "/sbin/zfs list mypool" + ) + + def test_zfs_command_flag(self): + ''' + Test if zfs_command builds the correct string + ''' + with patch.object(zfs, '_zfs_cmd', MagicMock(return_value='/sbin/zfs')): + with patch.object(zfs, '_zpool_cmd', MagicMock(return_value='/sbin/zpool')): + with patch.object(zfs, 'property_data_zfs', MagicMock(return_value=pmap_zfs)): + with patch.object(zfs, 'property_data_zpool', MagicMock(return_value=pmap_zpool)): + my_flags = [ + '-r', # recursive + '-p', # parsable + ] + self.assertEqual( + zfs.zfs_command('list', flags=my_flags), + "/sbin/zfs list -r -p" + ) + + def test_zfs_command_opt(self): + ''' + Test if zfs_command builds the correct string + ''' + with patch.object(zfs, '_zfs_cmd', MagicMock(return_value='/sbin/zfs')): + with patch.object(zfs, '_zpool_cmd', MagicMock(return_value='/sbin/zpool')): + with patch.object(zfs, 'property_data_zfs', MagicMock(return_value=pmap_zfs)): + with patch.object(zfs, 'property_data_zpool', MagicMock(return_value=pmap_zpool)): + my_opts = { + '-t': 'snap', # only list snapshots + } + self.assertEqual( + zfs.zfs_command('list', opts=my_opts), + "/sbin/zfs list -t snap" + ) + + def test_zfs_command_flag_opt(self): + ''' + Test if zfs_command builds the correct string + ''' + with patch.object(zfs, '_zfs_cmd', MagicMock(return_value='/sbin/zfs')): + with patch.object(zfs, '_zpool_cmd', MagicMock(return_value='/sbin/zpool')): + with patch.object(zfs, 'property_data_zfs', MagicMock(return_value=pmap_zfs)): + with patch.object(zfs, 'property_data_zpool', MagicMock(return_value=pmap_zpool)): + my_flags = [ + '-r', # recursive + '-p', # parsable + ] + my_opts = { + '-t': 'snap', # only list snapshots + } + self.assertEqual( + zfs.zfs_command('list', flags=my_flags, opts=my_opts), + "/sbin/zfs list -r -p -t snap" + ) + + def test_zfs_command_target(self): + ''' + Test if zfs_command builds the correct string + ''' + with patch.object(zfs, '_zfs_cmd', MagicMock(return_value='/sbin/zfs')): + with patch.object(zfs, '_zpool_cmd', MagicMock(return_value='/sbin/zpool')): + with patch.object(zfs, 'property_data_zfs', MagicMock(return_value=pmap_zfs)): + with patch.object(zfs, 'property_data_zpool', MagicMock(return_value=pmap_zpool)): + my_flags = [ + '-r', # recursive + '-p', # parsable + ] + my_opts = { + '-t': 'snap', # only list snapshots + } + self.assertEqual( + zfs.zfs_command('list', flags=my_flags, opts=my_opts, target='mypool'), + "/sbin/zfs list -r -p -t snap mypool" + ) + + def test_zfs_command_target_with_space(self): + ''' + Test if zfs_command builds the correct string + ''' + with patch.object(zfs, '_zfs_cmd', MagicMock(return_value='/sbin/zfs')): + with patch.object(zfs, '_zpool_cmd', MagicMock(return_value='/sbin/zpool')): + with patch.object(zfs, 'property_data_zfs', MagicMock(return_value=pmap_zfs)): + with patch.object(zfs, 'property_data_zpool', MagicMock(return_value=pmap_zpool)): + my_flags = [ + '-r', # recursive + '-p', # parsable + ] + my_opts = { + '-t': 'snap', # only list snapshots + } + self.assertEqual( + zfs.zfs_command('list', flags=my_flags, opts=my_opts, target='my pool'), + '/sbin/zfs list -r -p -t snap "my pool"' + ) + + def test_zfs_command_property(self): + ''' + Test if zfs_command builds the correct string + ''' + with patch.object(zfs, '_zfs_cmd', MagicMock(return_value='/sbin/zfs')): + with patch.object(zfs, '_zpool_cmd', MagicMock(return_value='/sbin/zpool')): + with patch.object(zfs, 'property_data_zfs', MagicMock(return_value=pmap_zfs)): + with patch.object(zfs, 'property_data_zpool', MagicMock(return_value=pmap_zpool)): + self.assertEqual( + zfs.zfs_command('get', property_name='quota', target='mypool'), + "/sbin/zfs get quota mypool" + ) + + def test_zfs_command_property_value(self): + ''' + Test if zfs_command builds the correct string + ''' + with patch.object(zfs, '_zfs_cmd', MagicMock(return_value='/sbin/zfs')): + with patch.object(zfs, '_zpool_cmd', MagicMock(return_value='/sbin/zpool')): + with patch.object(zfs, 'property_data_zfs', MagicMock(return_value=pmap_zfs)): + with patch.object(zfs, 'property_data_zpool', MagicMock(return_value=pmap_zpool)): + my_flags = [ + '-r', # recursive + ] + self.assertEqual( + zfs.zfs_command('set', flags=my_flags, property_name='quota', property_value='5G', target='mypool'), + "/sbin/zfs set -r quota=5368709120 mypool" + ) + + def test_zfs_command_multi_property_value(self): + ''' + Test if zfs_command builds the correct string + ''' + with patch.object(zfs, '_zfs_cmd', MagicMock(return_value='/sbin/zfs')): + with patch.object(zfs, '_zpool_cmd', MagicMock(return_value='/sbin/zpool')): + with patch.object(zfs, 'property_data_zfs', MagicMock(return_value=pmap_zfs)): + with patch.object(zfs, 'property_data_zpool', MagicMock(return_value=pmap_zpool)): + property_name = ['quota', 'readonly'] + property_value = ['5G', 'no'] + self.assertEqual( + zfs.zfs_command('set', property_name=property_name, property_value=property_value, target='mypool'), + "/sbin/zfs set quota=5368709120 readonly=off mypool" + ) + + def test_zfs_command_fs_props(self): + ''' + Test if zfs_command builds the correct string + ''' + with patch.object(zfs, '_zfs_cmd', MagicMock(return_value='/sbin/zfs')): + with patch.object(zfs, '_zpool_cmd', MagicMock(return_value='/sbin/zpool')): + with patch.object(zfs, 'property_data_zfs', MagicMock(return_value=pmap_zfs)): + with patch.object(zfs, 'property_data_zpool', MagicMock(return_value=pmap_zpool)): + my_flags = [ + '-p', # create parent + ] + my_props = { + 'quota': '1G', + 'compression': 'lz4', + } + self.assertEqual( + zfs.zfs_command('create', flags=my_flags, filesystem_properties=my_props, target='mypool/dataset'), + "/sbin/zfs create -p -o compression=lz4 -o quota=1073741824 mypool/dataset" + ) + + def test_zfs_command_fs_props_with_space(self): + ''' + Test if zfs_command builds the correct string + ''' + with patch.object(zfs, '_zfs_cmd', MagicMock(return_value='/sbin/zfs')): + with patch.object(zfs, '_zpool_cmd', MagicMock(return_value='/sbin/zpool')): + with patch.object(zfs, 'property_data_zfs', MagicMock(return_value=pmap_zfs)): + with patch.object(zfs, 'property_data_zpool', MagicMock(return_value=pmap_zpool)): + my_props = { + 'quota': '4.2M', + 'compression': 'lz4', + } + self.assertEqual( + zfs.zfs_command('create', filesystem_properties=my_props, target="my pool/jorge's dataset"), + '/sbin/zfs create -o compression=lz4 -o quota=4404019 "my pool/jorge\'s dataset"' + ) + + ## NOTE: testing zpool_command + def test_zpool_command_simple(self): + ''' + Test if zfs_command builds the correct string + ''' + with patch.object(zfs, '_zfs_cmd', MagicMock(return_value='/sbin/zfs')): + with patch.object(zfs, '_zpool_cmd', MagicMock(return_value='/sbin/zpool')): + with patch.object(zfs, 'property_data_zfs', MagicMock(return_value=pmap_zfs)): + with patch.object(zfs, 'property_data_zpool', MagicMock(return_value=pmap_zpool)): + self.assertEqual( + zfs.zpool_command('list'), + "/sbin/zpool list" + ) + + def test_zpool_command_opt(self): + ''' + Test if zpool_command builds the correct string + ''' + with patch.object(zfs, '_zfs_cmd', MagicMock(return_value='/sbin/zfs')): + with patch.object(zfs, '_zpool_cmd', MagicMock(return_value='/sbin/zpool')): + with patch.object(zfs, 'property_data_zfs', MagicMock(return_value=pmap_zfs)): + with patch.object(zfs, 'property_data_zpool', MagicMock(return_value=pmap_zpool)): + my_opts = { + '-o': 'name,size', # show only name and size + } + self.assertEqual( + zfs.zpool_command('list', opts=my_opts), + "/sbin/zpool list -o name,size" + ) + + def test_zpool_command_opt_list(self): + ''' + Test if zpool_command builds the correct string + ''' + with patch.object(zfs, '_zfs_cmd', MagicMock(return_value='/sbin/zfs')): + with patch.object(zfs, '_zpool_cmd', MagicMock(return_value='/sbin/zpool')): + with patch.object(zfs, 'property_data_zfs', MagicMock(return_value=pmap_zfs)): + with patch.object(zfs, 'property_data_zpool', MagicMock(return_value=pmap_zpool)): + my_opts = { + '-d': ['/tmp', '/zvol'], + } + self.assertEqual( + zfs.zpool_command('import', opts=my_opts, target='mypool'), + "/sbin/zpool import -d /tmp -d /zvol mypool" + ) + + def test_zpool_command_flag_opt(self): + ''' + Test if zpool_command builds the correct string + ''' + with patch.object(zfs, '_zfs_cmd', MagicMock(return_value='/sbin/zfs')): + with patch.object(zfs, '_zpool_cmd', MagicMock(return_value='/sbin/zpool')): + with patch.object(zfs, 'property_data_zfs', MagicMock(return_value=pmap_zfs)): + with patch.object(zfs, 'property_data_zpool', MagicMock(return_value=pmap_zpool)): + my_flags = [ + '-p', # parsable + ] + my_opts = { + '-o': 'name,size', # show only name and size + } + self.assertEqual( + zfs.zpool_command('list', flags=my_flags, opts=my_opts), + "/sbin/zpool list -p -o name,size" + ) + + def test_zpool_command_target(self): + ''' + Test if zpool_command builds the correct string + ''' + with patch.object(zfs, '_zfs_cmd', MagicMock(return_value='/sbin/zfs')): + with patch.object(zfs, '_zpool_cmd', MagicMock(return_value='/sbin/zpool')): + with patch.object(zfs, 'property_data_zfs', MagicMock(return_value=pmap_zfs)): + with patch.object(zfs, 'property_data_zpool', MagicMock(return_value=pmap_zpool)): + my_flags = [ + '-p', # parsable + ] + my_opts = { + '-o': 'name,size', # show only name and size + } + self.assertEqual( + zfs.zpool_command('list', flags=my_flags, opts=my_opts, target='mypool'), + "/sbin/zpool list -p -o name,size mypool" + ) + + def test_zpool_command_target_with_space(self): + ''' + Test if zpool_command builds the correct string + ''' + with patch.object(zfs, '_zfs_cmd', MagicMock(return_value='/sbin/zfs')): + with patch.object(zfs, '_zpool_cmd', MagicMock(return_value='/sbin/zpool')): + with patch.object(zfs, 'property_data_zfs', MagicMock(return_value=pmap_zfs)): + with patch.object(zfs, 'property_data_zpool', MagicMock(return_value=pmap_zpool)): + fs_props = { + 'quota': '100G', + } + pool_props = { + 'comment': "jorge's comment has a space", + } + self.assertEqual( + zfs.zpool_command('create', pool_properties=pool_props, filesystem_properties=fs_props, target='my pool'), + "/sbin/zpool create -O quota=107374182400 -o comment=\"jorge's comment has a space\" \"my pool\"" + ) + + def test_zpool_command_property(self): + ''' + Test if zpool_command builds the correct string + ''' + with patch.object(zfs, '_zfs_cmd', MagicMock(return_value='/sbin/zfs')): + with patch.object(zfs, '_zpool_cmd', MagicMock(return_value='/sbin/zpool')): + with patch.object(zfs, 'property_data_zfs', MagicMock(return_value=pmap_zfs)): + with patch.object(zfs, 'property_data_zpool', MagicMock(return_value=pmap_zpool)): + self.assertEqual( + zfs.zpool_command('get', property_name='comment', target='mypool'), + "/sbin/zpool get comment mypool" + ) + + def test_zpool_command_property_value(self): + ''' + Test if zpool_command builds the correct string + ''' + with patch.object(zfs, '_zfs_cmd', MagicMock(return_value='/sbin/zfs')): + with patch.object(zfs, '_zpool_cmd', MagicMock(return_value='/sbin/zpool')): + with patch.object(zfs, 'property_data_zfs', MagicMock(return_value=pmap_zfs)): + with patch.object(zfs, 'property_data_zpool', MagicMock(return_value=pmap_zpool)): + my_flags = [ + '-v', # verbose + ] + self.assertEqual( + zfs.zpool_command('iostat', flags=my_flags, target=['mypool', 60, 1]), + "/sbin/zpool iostat -v mypool 60 1" + ) + + def test_parse_command_result_success(self): + ''' + Test if parse_command_result returns the expected result + ''' + with patch.object(zfs, '_zfs_cmd', MagicMock(return_value='/sbin/zfs')): + with patch.object(zfs, '_zpool_cmd', MagicMock(return_value='/sbin/zpool')): + with patch.object(zfs, 'property_data_zfs', MagicMock(return_value=pmap_zfs)): + with patch.object(zfs, 'property_data_zpool', MagicMock(return_value=pmap_zpool)): + res = {} + res['retcode'] = 0 + res['stderr'] = '' + res['stdout'] = '' + self.assertEqual( + zfs.parse_command_result(res, 'tested'), + OrderedDict([('tested', True)]), + ) + + def test_parse_command_result_success_nolabel(self): + ''' + Test if parse_command_result returns the expected result + ''' + with patch.object(zfs, '_zfs_cmd', MagicMock(return_value='/sbin/zfs')): + with patch.object(zfs, '_zpool_cmd', MagicMock(return_value='/sbin/zpool')): + with patch.object(zfs, 'property_data_zfs', MagicMock(return_value=pmap_zfs)): + with patch.object(zfs, 'property_data_zpool', MagicMock(return_value=pmap_zpool)): + res = {} + res['retcode'] = 0 + res['stderr'] = '' + res['stdout'] = '' + self.assertEqual( + zfs.parse_command_result(res), + OrderedDict(), + ) + + def test_parse_command_result_fail(self): + ''' + Test if parse_command_result returns the expected result on failure + ''' + with patch.object(zfs, '_zfs_cmd', MagicMock(return_value='/sbin/zfs')): + with patch.object(zfs, '_zpool_cmd', MagicMock(return_value='/sbin/zpool')): + with patch.object(zfs, 'property_data_zfs', MagicMock(return_value=pmap_zfs)): + with patch.object(zfs, 'property_data_zpool', MagicMock(return_value=pmap_zpool)): + res = {} + res['retcode'] = 1 + res['stderr'] = '' + res['stdout'] = '' + self.assertEqual( + zfs.parse_command_result(res, 'tested'), + OrderedDict([('tested', False)]), + ) + + def test_parse_command_result_nolabel(self): + ''' + Test if parse_command_result returns the expected result on failure + ''' + with patch.object(zfs, '_zfs_cmd', MagicMock(return_value='/sbin/zfs')): + with patch.object(zfs, '_zpool_cmd', MagicMock(return_value='/sbin/zpool')): + with patch.object(zfs, 'property_data_zfs', MagicMock(return_value=pmap_zfs)): + with patch.object(zfs, 'property_data_zpool', MagicMock(return_value=pmap_zpool)): + res = {} + res['retcode'] = 1 + res['stderr'] = '' + res['stdout'] = '' + self.assertEqual( + zfs.parse_command_result(res), + OrderedDict(), + ) + + def test_parse_command_result_fail_message(self): + ''' + Test if parse_command_result returns the expected result on failure with stderr + ''' + with patch.object(zfs, '_zfs_cmd', MagicMock(return_value='/sbin/zfs')): + with patch.object(zfs, '_zpool_cmd', MagicMock(return_value='/sbin/zpool')): + with patch.object(zfs, 'property_data_zfs', MagicMock(return_value=pmap_zfs)): + with patch.object(zfs, 'property_data_zpool', MagicMock(return_value=pmap_zpool)): + res = {} + res['retcode'] = 1 + res['stderr'] = "\n".join([ + 'ice is not hot', + 'usage:', + 'this should not be printed', + ]) + res['stdout'] = '' + self.assertEqual( + zfs.parse_command_result(res, 'tested'), + OrderedDict([('tested', False), ('error', 'ice is not hot')]), + ) + + def test_parse_command_result_fail_message_nolabel(self): + ''' + Test if parse_command_result returns the expected result on failure with stderr + ''' + with patch.object(zfs, '_zfs_cmd', MagicMock(return_value='/sbin/zfs')): + with patch.object(zfs, '_zpool_cmd', MagicMock(return_value='/sbin/zpool')): + with patch.object(zfs, 'property_data_zfs', MagicMock(return_value=pmap_zfs)): + with patch.object(zfs, 'property_data_zpool', MagicMock(return_value=pmap_zpool)): + res = {} + res['retcode'] = 1 + res['stderr'] = "\n".join([ + 'ice is not hot', + 'usage:', + 'this should not be printed', + ]) + res['stdout'] = '' + self.assertEqual( + zfs.parse_command_result(res), + OrderedDict([('error', 'ice is not hot')]), + ) + +# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 From 505b816914a29306b0a05d9c137ede9f4bf4da03 Mon Sep 17 00:00:00 2001 From: Super-User Date: Sun, 4 Feb 2018 22:34:03 +0100 Subject: [PATCH 206/223] Phase 0 - salt.utils.zfs --- salt/utils/zfs.py | 712 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 712 insertions(+) create mode 100644 salt/utils/zfs.py diff --git a/salt/utils/zfs.py b/salt/utils/zfs.py new file mode 100644 index 0000000000..3b5c158e22 --- /dev/null +++ b/salt/utils/zfs.py @@ -0,0 +1,712 @@ +# -*- coding: utf-8 -*- +''' +Utility functions for zfs + +These functions are for dealing with type conversion and basic execution + +:maintainer: Jorge Schrauwen +:maturity: new +:depends: salt.utils.stringutils, salt.ext, salt.module.cmdmod +:platform: illumos,freebsd,linux + +.. versionadded:: Fluorine +''' + +# Import python libs +from __future__ import absolute_import, unicode_literals, print_function +import os +import re +import math +import logging +from numbers import Number + +# Import salt libs +from salt.utils.decorators import memoize as real_memoize +from salt.utils.odict import OrderedDict +import salt.utils.stringutils +import salt.modules.cmdmod + +# Import 3rd-party libs +from salt.ext.six.moves import zip + +# Size conversion data +re_zfs_size = re.compile(r'^(\d+|\d+(?=\d*)\.\d+)([KkMmGgTtPpEe][Bb]?)$') +zfs_size = ['K', 'M', 'G', 'T', 'P', 'E'] + +log = logging.getLogger(__name__) + + +def _check_retcode(cmd): + ''' + Simple internal wrapper for cmdmod.retcode + ''' + return salt.modules.cmdmod.retcode(cmd, output_loglevel='quiet', ignore_retcode=True) == 0 + + +def _exec(**kwargs): + ''' + Simple internal wrapper for cmdmod.run + ''' + if 'ignore_retcode' not in kwargs: + kwargs['ignore_retcode'] = True + if 'output_loglevel' not in kwargs: + kwargs['output_loglevel'] = 'quiet' + return salt.modules.cmdmod.run_all(**kwargs) + + +def _merge_last(values, merge_after, merge_with=' '): + ''' + Merge values all values after X into the last value + ''' + if len(values) > merge_after: + values = values[0:(merge_after-1)] + [merge_with.join(values[(merge_after-1):])] + + return values + + +def _property_normalize_name(name): + ''' + Normalizes property names + ''' + if '@' in name: + name = name[:name.index('@')+1] + return name + + +def _property_detect_type(name, values): + ''' + Detect the datatype of a property + ''' + value_type = 'str' + if values.startswith('on | off'): + value_type = 'bool' + elif values.startswith('yes | no'): + value_type = 'bool_alt' + elif values in ['', ' | none']: + value_type = 'size' + elif values in ['', ' | none', '']: + value_type = 'numeric' + elif name in ['sharenfs', 'sharesmb', 'canmount']: + value_type = 'bool' + elif name in ['version', 'copies']: + value_type = 'numeric' + return value_type + + +def _property_create_dict(header, data): + ''' + Create a property dict + ''' + prop = dict(zip(header, _merge_last(data, len(header)))) + prop['name'] = _property_normalize_name(prop['property']) + prop['type'] = _property_detect_type(prop['name'], prop['values']) + prop['edit'] = from_bool(prop['edit']) + if 'inherit' in prop: + prop['inherit'] = from_bool(prop['inherit']) + del prop['property'] + return prop + + +def _property_parse_cmd(cmd, alias=None): + ''' + Parse output of zpool/zfs get command + ''' + if not alias: + alias = {} + properties = {} + + # NOTE: append get to command + if cmd[-3:] != 'get': + cmd += ' get' + + # NOTE: parse output + prop_hdr = [] + for prop_data in _exec(cmd=cmd)['stderr'].split('\n'): + # NOTE: make the line data more managable + prop_data = prop_data.lower().split() + + # NOTE: skip empty lines + if len(prop_data) == 0: + continue + # NOTE: parse header + elif prop_data[0] == 'property': + prop_hdr = prop_data + continue + # NOTE: skip lines after data + elif len(prop_hdr) == 0 or prop_data[1] not in ['no', 'yes']: + continue + + # NOTE: create property dict + prop = _property_create_dict(prop_hdr, prop_data) + + # NOTE: add property to dict + properties[prop['name']] = prop + if prop['name'] in alias: + properties[alias[prop['name']]] = prop + + # NOTE: cleanup some duplicate data + del prop['name'] + return properties + + +def _auto(direction, name, value, source='auto', convert_to_human=True): + ''' + Internal magic for from_auto and to_auto + ''' + # NOTE: check direction + if direction not in ['to', 'from']: + return value + + # NOTE: collect property data + props = property_data_zpool() + if source == 'zfs': + props = property_data_zfs() + elif source == 'auto': + props.update(property_data_zfs()) + + # NOTE: figure out the conversion type + value_type = props[name]['type'] if name in props else 'str' + + # NOTE: convert + if value_type == 'size' and direction == 'to': + return globals()['{}_{}'.format(direction, value_type)](value, convert_to_human) + + return globals()['{}_{}'.format(direction, value_type)](value) + + +@real_memoize +def _zfs_cmd(): + ''' + Return the path of the zfs binary if present + ''' + # Get the path to the zfs binary. + return salt.utils.path.which('zfs') + + +@real_memoize +def _zpool_cmd(): + ''' + Return the path of the zpool binary if present + ''' + # Get the path to the zfs binary. + return salt.utils.path.which('zpool') + + +def _command(source, command, flags=None, opts=None, + property_name=None, property_value=None, + filesystem_properties=None, pool_properties=None, + target=None): + ''' + Build and properly escape a zfs command + + .. note:: + + Input is not considered safe and will be passed through + to_auto(from_auto('input_here')), you do not need to do so + your self first. + + ''' + # NOTE: start with the zfs binary and command + cmd = [] + cmd.append(_zpool_cmd() if source == 'zpool' else _zfs_cmd()) + cmd.append(command) + + # NOTE: append flags if we have any + if flags is None: + flags = [] + for flag in flags: + cmd.append(flag) + + # NOTE: append options + # we pass through 'sorted' to garentee the same order + if opts is None: + opts = {} + for opt in sorted(opts): + if not isinstance(opts[opt], list): + opts[opt] = [opts[opt]] + for val in opts[opt]: + cmd.append(opt) + cmd.append(to_str(val)) + + # NOTE: append filesystem properties (really just options with a key/value) + # we pass through 'sorted' to garentee the same order + if filesystem_properties is None: + filesystem_properties = {} + for fsopt in sorted(filesystem_properties): + cmd.append('-O' if source == 'zpool' else '-o') + cmd.append('{key}={val}'.format( + key=fsopt, + val=to_auto(fsopt, filesystem_properties[fsopt], source='zfs', convert_to_human=False), + )) + + # NOTE: append pool properties (really just options with a key/value) + # we pass through 'sorted' to garentee the same order + if pool_properties is None: + pool_properties = {} + for fsopt in sorted(pool_properties): + cmd.append('-o') + cmd.append('{key}={val}'.format( + key=fsopt, + val=to_auto(fsopt, pool_properties[fsopt], source='zpool', convert_to_human=False), + )) + + # NOTE: append property and value + # the set command takes a key=value pair, we need to support this + if property_name is not None: + if property_value is not None: + if not isinstance(property_name, list): + property_name = [property_name] + if not isinstance(property_value, list): + property_value = [property_value] + for key, val in zip(property_name, property_value): + cmd.append('{key}={val}'.format( + key=key, + val=to_auto(key, val, source=source, convert_to_human=False), + )) + else: + cmd.append(property_name) + + # NOTE: append the target(s) + if target is not None: + if not isinstance(target, list): + target = [target] + for tgt in target: + # NOTE: skip None list items + # we do not want to skip False and 0! + if tgt is None: + continue + cmd.append(to_str(tgt)) + + return ' '.join(cmd) + + +@real_memoize +def is_supported(): + ''' + Check the system for ZFS support + ''' + # Check for supported platforms + # NOTE: ZFS on Windows is in development + # NOTE: ZFS on NetBSD is in development + on_supported_platform = False + if salt.utils.platform.is_sunos(): + on_supported_platform = True + elif salt.utils.platform.is_freebsd() and _check_retcode('kldstat -q -m zfs'): + on_supported_platform = True + elif salt.utils.platform.is_linux() and os.path.exists('/sys/module/zfs'): + on_supported_platform = True + elif salt.utils.platform.is_linux() and salt.utils.path.which('zfs-fuse'): + on_supported_platform = True + elif salt.utils.platform.is_darwin() and \ + os.path.exists('/Library/Extensions/zfs.kext') and \ + os.path.exists('/dev/zfs'): + on_supported_platform = True + + # Additional check for the zpool command + return (_zpool_cmd() and on_supported_platform) is True + + +@real_memoize +def has_feature_flags(): + ''' + Check if zpool-features is available + ''' + # get man location + man = salt.utils.path.which('man') + return _check_retcode('{man} zpool-features'.format( + man=man + )) if man else False + + +@real_memoize +def property_data_zpool(): + ''' + Return a dict of zpool properties + + .. note:: + + Each property will have an entry with the following info: + - edit : boolean - is this property editable after pool creation + - type : str - either bool, bool_alt, size, numeric, or string + - values : str - list of possible values + + .. warning:: + + This data is probed from the output of 'zpool get' with some suplimental + data that is hardcoded. There is no better way to get this informatio aside + from reading the code. + + ''' + # NOTE: man page also mentions a few short forms + property_data = _property_parse_cmd(_zpool_cmd(), { + 'allocated': 'alloc', + 'autoexpand': 'expand', + 'autoreplace': 'replace', + 'listsnapshots': 'listsnaps', + 'fragmentation': 'frag', + }) + + # NOTE: zpool status/iostat has a few extra fields + zpool_size_extra = [ + 'capacity-alloc', 'capacity-free', + 'operations-read', 'operations-write', + 'bandwith-read', 'bandwith-write', + 'read', 'write', + ] + zpool_numeric_extra = [ + 'cksum', 'cap', + ] + + for prop in zpool_size_extra: + property_data[prop] = { + 'edit': False, + 'type': 'size', + 'values': '', + } + + for prop in zpool_numeric_extra: + property_data[prop] = { + 'edit': False, + 'type': 'numeric', + 'values': '', + } + + return property_data + + +@real_memoize +def property_data_zfs(): + ''' + Return a dict of zfs properties + + .. note:: + + Each property will have an entry with the following info: + - edit : boolean - is this property editable after pool creation + - inherit : boolean - is this property inheritable + - type : str - either bool, bool_alt, size, numeric, or string + - values : str - list of possible values + + .. warning:: + + This data is probed from the output of 'zfs get' with some suplimental + data that is hardcoded. There is no better way to get this informatio aside + from reading the code. + + ''' + return _property_parse_cmd(_zfs_cmd(), { + 'available': 'avail', + 'logicalreferenced': 'lrefer.', + 'logicalused': 'lused.', + 'referenced': 'refer', + 'volblocksize': 'volblock', + 'compression': 'compress', + 'readonly': 'rdonly', + 'recordsize': 'recsize', + 'refreservation': 'refreserv', + 'reservation': 'reserv', + }) + + +def from_numeric(value): + ''' + Convert zfs numeric to python int + ''' + if value == 'none': + value = None + elif value: + value = salt.utils.stringutils.to_num(value) + return value + + +def to_numeric(value): + ''' + Convert python int to zfs numeric + ''' + value = from_numeric(value) + if value is None: + value = 'none' + return value + + +def from_bool(value): + ''' + Convert zfs bool to python bool + ''' + if value in ['on', 'yes']: + value = True + elif value in ['off', 'no']: + value = False + elif value == 'none': + value = None + + return value + + +def from_bool_alt(value): + ''' + Convert zfs bool_alt to python bool + ''' + return from_bool(value) + + +def to_bool(value): + ''' + Convert python bool to zfs on/off bool + ''' + value = from_bool(value) + if isinstance(value, bool): + value = 'on' if value else 'off' + elif value is None: + value = 'none' + + return value + + +def to_bool_alt(value): + ''' + Convert python to zfs yes/no value + ''' + value = from_bool_alt(value) + if isinstance(value, bool): + value = 'yes' if value else 'no' + elif value is None: + value = 'none' + + return value + + +def from_size(value): + ''' + Convert zfs size (human readble) to python int (bytes) + ''' + match_size = re_zfs_size.match(str(value)) + if match_size: + v_unit = match_size.group(2).upper()[0] + v_size = float(match_size.group(1)) + v_multiplier = math.pow(1024, zfs_size.index(v_unit) + 1) + value = v_size * v_multiplier + if int(value) == value: + value = int(value) + elif value is not None: + value = str(value) + + return from_numeric(value) + + +def to_size(value, convert_to_human=True): + ''' + Convert python int (bytes) to zfs size + + NOTE: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/pyzfs/common/util.py#114 + ''' + value = from_size(value) + if value is None: + value = 'none' + + if isinstance(value, Number) and value > 1024 and convert_to_human: + v_power = int(math.floor(math.log(value, 1024))) + v_multiplier = math.pow(1024, v_power) + + # NOTE: zfs is a bit odd on how it does the rounding, + # see libzfs implementation linked above + v_size_float = float(value) / v_multiplier + if v_size_float == int(v_size_float): + value = "{:.0f}{}".format( + v_size_float, + zfs_size[v_power-1], + ) + else: + for v_precision in ["{:.2f}{}", "{:.1f}{}", "{:.0f}{}"]: + v_size = v_precision.format( + v_size_float, + zfs_size[v_power-1], + ) + if len(v_size) <= 5: + value = v_size + break + + return value + + +def from_str(value): + ''' + Decode zfs safe string (used for name, path, ...) + ''' + if value == 'none': + value = None + if value: + value = str(value) + if value.startswith('"') and value.endswith('"'): + value = value[1:-1] + value = value.replace('\\"', '"') + + return value + + +def to_str(value): + ''' + Encode zfs safe string (used for name, path, ...) + ''' + value = from_str(value) + + if value: + value = value.replace('"', '\\"') + if ' ' in value: + value = '"' + value + '"' + elif value is None: + value = 'none' + + return value + + +def from_auto(name, value, source='auto'): + ''' + Convert zfs value to python value + ''' + return _auto('from', name, value, source) + + +def to_auto(name, value, source='auto', convert_to_human=True): + ''' + Convert python value to zfs value + ''' + return _auto('to', name, value, source, convert_to_human) + + +def from_auto_dict(values, source='auto'): + ''' + Pass an entire dictionary to from_auto + + .. note:: + The key will be passed as the name + ''' + for name, value in values.items(): + values[name] = from_auto(name, value, source) + + return values + + +def to_auto_dict(values, source='auto', convert_to_human=True): + ''' + Pass an entire dictionary to to_auto + + .. note:: + The key will be passed as the name + ''' + for name, value in values.items(): + values[name] = to_auto(name, value, source, convert_to_human) + + return values + + +def is_snapshot(name): + ''' + Check if name is a valid snapshot name + ''' + return from_str(name).count('@') == 1 + + +def is_bookmark(name): + ''' + Check if name is a valid bookmark name + ''' + return from_str(name).count('#') == 1 + + +def is_dataset(name): + ''' + Check if name is a valid filesystem or volume name + ''' + return not is_snapshot(name) and not is_bookmark(name) + + +def zfs_command(command, flags=None, opts=None, property_name=None, property_value=None, + filesystem_properties=None, target=None): + ''' + Build and properly escape a zfs command + + .. note:: + + Input is not considered safe and will be passed through + to_auto(from_auto('input_here')), you do not need to do so + your self first. + + ''' + return _command( + 'zfs', + command=command, + flags=flags, + opts=opts, + property_name=property_name, + property_value=property_value, + filesystem_properties=filesystem_properties, + pool_properties=None, + target=target, + ) + + +def zpool_command(command, flags=None, opts=None, property_name=None, property_value=None, + filesystem_properties=None, pool_properties=None, target=None): + ''' + Build and properly escape a zpool command + + .. note:: + + Input is not considered safe and will be passed through + to_auto(from_auto('input_here')), you do not need to do so + your self first. + + ''' + return _command( + 'zpool', + command=command, + flags=flags, + opts=opts, + property_name=property_name, + property_value=property_value, + filesystem_properties=filesystem_properties, + pool_properties=pool_properties, + target=target, + ) + + +def parse_command_result(res, label=None): + ''' + Parse the result of a zpool/zfs command + + .. note:: + + Output on failure is rather predicatable. + - retcode > 0 + - each 'error' is a line on stderr + - optional 'Usage:' block under those with hits + + We simple check those and return a OrderedDict were + we set label = True|False and error = error_messages + + ''' + ret = OrderedDict() + + if label: + ret[label] = res['retcode'] == 0 + + if res['retcode'] != 0: + ret['error'] = [] + for error in res['stderr'].splitlines(): + if error.lower().startswith('usage:'): + break + if error.lower().startswith("use '-f'"): + error = error.replace('-f', 'force=True') + if error.lower().startswith("use '-r'"): + error = error.replace('-r', 'recursive=True') + ret['error'].append(error) + + if len(ret['error']): + ret['error'] = "\n".join(ret['error']) + else: + del ret['error'] + + return ret + +# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 From a64d81d2cdaf47a471ad2a0cd9ef242e6a42ed45 Mon Sep 17 00:00:00 2001 From: Super-User Date: Sun, 4 Feb 2018 22:34:19 +0100 Subject: [PATCH 207/223] Phase 1 - salt.grains.zfs --- salt/grains/zfs.py | 71 ++++++++++++---------------------------------- 1 file changed, 18 insertions(+), 53 deletions(-) diff --git a/salt/grains/zfs.py b/salt/grains/zfs.py index ee55930766..035bacdab0 100644 --- a/salt/grains/zfs.py +++ b/salt/grains/zfs.py @@ -16,25 +16,25 @@ from __future__ import absolute_import, print_function, unicode_literals import logging # Import salt libs -import salt.utils.dictupdate +import salt.utils.dictupdate as dictupdate import salt.utils.path import salt.utils.platform -try: - # The zfs_support grain will only be set to True if this module is supported - # This allows the grain to be set to False on systems that don't support zfs - # _conform_value is only called if zfs_support is set to True - from salt.modules.zfs import _conform_value -except ImportError: - pass # Solve the Chicken and egg problem where grains need to run before any # of the modules are loaded and are generally available for any usage. import salt.modules.cmdmod +import salt.utils.zfs __virtualname__ = 'zfs' __salt__ = { 'cmd.run': salt.modules.cmdmod.run, } +__utils__ = { + 'zfs.is_supported': salt.utils.zfs.is_supported, + 'zfs.has_feature_flags': salt.utils.zfs.has_feature_flags, + 'zfs.zpool_command': salt.utils.zfs.zpool_command, + 'zfs.to_auto': salt.utils.zfs.to_auto, +} log = logging.getLogger(__name__) @@ -48,45 +48,6 @@ def __virtual__(): return __virtualname__ -def _check_retcode(cmd): - ''' - Simple internal wrapper for cmdmod.retcode - ''' - return salt.modules.cmdmod.retcode(cmd, output_loglevel='quiet', ignore_retcode=True) == 0 - - -def _zfs_support(): - ''' - Provide information about zfs kernel module - ''' - grains = {'zfs_support': False} - - # Check for zfs support - # NOTE: ZFS on Windows is in development - # NOTE: ZFS on NetBSD is in development - on_supported_platform = False - if salt.utils.platform.is_sunos() and salt.utils.path.which('zfs'): - on_supported_platform = True - elif salt.utils.platform.is_freebsd() and _check_retcode('kldstat -q -m zfs'): - on_supported_platform = True - elif salt.utils.platform.is_linux(): - modinfo = salt.utils.path.which('modinfo') - if modinfo: - on_supported_platform = _check_retcode('{0} zfs'.format(modinfo)) - else: - on_supported_platform = _check_retcode('ls /sys/module/zfs') - - # NOTE: fallback to zfs-fuse if needed - if not on_supported_platform and salt.utils.path.which('zfs-fuse'): - on_supported_platform = True - - # Additional check for the zpool command - if on_supported_platform and salt.utils.path.which('zpool'): - grains['zfs_support'] = True - - return grains - - def _zfs_pool_data(): ''' Provide grains about zpools @@ -94,12 +55,16 @@ def _zfs_pool_data(): grains = {} # collect zpool data - zpool_cmd = salt.utils.path.which('zpool') - for zpool in __salt__['cmd.run']('{zpool} list -H -p -o name,size'.format(zpool=zpool_cmd)).splitlines(): + zpool_list_cmd = __utils__['zfs.zpool_command']( + 'list', + flags=['-H', '-p'], + opts={'-o': 'name,size'}, + ) + for zpool in __salt__['cmd.run'](zpool_list_cmd).splitlines(): if 'zpool' not in grains: grains['zpool'] = {} zpool = zpool.split() - grains['zpool'][zpool[0]] = _conform_value(zpool[1], True) + grains['zpool'][zpool[0]] = __utils__['zfs.to_auto'](zpool[1], True) # return grain data return grains @@ -110,10 +75,10 @@ def zfs(): Provide grains for zfs/zpool ''' grains = {} - - grains = salt.utils.dictupdate.update(grains, _zfs_support(), merge_lists=True) + grains['zfs_support'] = __utils__['zfs.is_supported']() + grains['zfs_feature_flags'] = __utils__['zfs.has_feature_flags']() if grains['zfs_support']: - grains = salt.utils.dictupdate.update(grains, _zfs_pool_data(), merge_lists=True) + grains = dictupdate.update(grains, _zfs_pool_data(), merge_lists=True) return grains From e7e902662bbcc82200ddd7ac48a3b07d7d48143d Mon Sep 17 00:00:00 2001 From: Super-User Date: Sun, 4 Feb 2018 22:34:40 +0100 Subject: [PATCH 208/223] Phase 1 - salt.modules.zfs tests --- tests/unit/modules/test_zfs.py | 552 ++++++++++++++++++++++++++------- 1 file changed, 442 insertions(+), 110 deletions(-) diff --git a/tests/unit/modules/test_zfs.py b/tests/unit/modules/test_zfs.py index a7dcc63ce9..62ba14fe82 100644 --- a/tests/unit/modules/test_zfs.py +++ b/tests/unit/modules/test_zfs.py @@ -1,9 +1,12 @@ # -*- coding: utf-8 -*- ''' - :codeauthor: Nitin Madhok ` +Tests for salt.modules.zfs - tests.unit.modules.zfs_test - ~~~~~~~~~~~~~~~~~~~~~~~~~~~ +:codeauthor: Nitin Madhok , Jorge Schrauwen +:maintainer: Jorge Schrauwen +:maturity: new +:depends: salt.utils.zfs +:platform: illumos,freebsd,linux ''' # Import Python libs @@ -19,9 +22,17 @@ from tests.support.mock import ( NO_MOCK_REASON, ) +# Import test data from salt.utils.zfs test +from tests.unit.utils.test_zfs import utils_patch + # Import Salt Execution module to test +import salt.utils.zfs import salt.modules.zfs as zfs + +# Import Salt Utils +import salt.loader from salt.utils.odict import OrderedDict +from salt.utils.dateutils import strftime # Skip this test case if we don't have access to mock! @@ -31,10 +42,16 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): This class contains a set of functions that test salt.modules.zfs module ''' def setup_loader_modules(self): - patcher = patch('salt.modules.zfs._check_zfs', MagicMock(return_value='/sbin/zfs')) - patcher.start() - self.addCleanup(patcher.stop) - return {zfs: {}} + self.opts = opts = salt.config.DEFAULT_MINION_OPTS + utils = salt.loader.utils(opts, whitelist=['zfs']) + zfs_obj = { + zfs: { + '__opts__': opts, + '__utils__': utils, + } + } + + return zfs_obj def test_exists_success(self): ''' @@ -45,7 +62,8 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): ret['stderr'] = '' ret['retcode'] = 0 mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertTrue(zfs.exists('myzpool/mydataset')) def test_exists_failure_not_exists(self): @@ -57,7 +75,8 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): ret['stderr'] = "cannot open 'myzpool/mydataset': dataset does not exist" ret['retcode'] = 1 mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertFalse(zfs.exists('myzpool/mydataset')) def test_exists_failure_invalid_name(self): @@ -69,46 +88,50 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): ret['stderr'] = "cannot open 'myzpool/': invalid dataset name" ret['retcode'] = 1 mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertFalse(zfs.exists('myzpool/')) def test_create_success(self): ''' Tests successful return of create function on ZFS file system creation ''' - res = {'myzpool/mydataset': 'created'} + res = OrderedDict([('created', True)]) ret = {} ret['stdout'] = "" ret['stderr'] = "" ret['retcode'] = 0 mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertEqual(zfs.create('myzpool/mydataset'), res) def test_create_success_with_create_parent(self): ''' Tests successful return of create function when ``create_parent=True`` ''' - res = {'myzpool/mydataset/mysubdataset': 'created'} + res = OrderedDict([('created', True)]) ret = {} ret['stdout'] = "" ret['stderr'] = "" ret['retcode'] = 0 mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertEqual(zfs.create('myzpool/mydataset/mysubdataset', create_parent=True), res) def test_create_success_with_properties(self): ''' Tests successful return of create function on ZFS file system creation (with properties) ''' - res = {'myzpool/mydataset': 'created'} + res = OrderedDict([('created', True)]) ret = {} ret['stdout'] = "" ret['stderr'] = "" ret['retcode'] = 0 mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertEqual( zfs.create( 'myzpool/mydataset', @@ -123,177 +146,429 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): ''' Tests unsuccessful return of create function if dataset name is missing ''' - res = {'myzpool': 'cannot create \'myzpool\': missing dataset name'} + res = OrderedDict([ + ('created', False), + ('error', "cannot create 'myzpool': missing dataset name"), + ]) ret = {} ret['stdout'] = "" ret['stderr'] = "cannot create 'myzpool': missing dataset name" ret['retcode'] = 1 mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertEqual(zfs.create('myzpool'), res) def test_create_error_trailing_slash(self): ''' Tests unsuccessful return of create function if trailing slash in name is present ''' - res = {'myzpool/': 'cannot create \'myzpool/\': trailing slash in name'} + res = OrderedDict([ + ('created', False), + ('error', "cannot create 'myzpool/': trailing slash in name"), + ]) ret = {} ret['stdout'] = "" ret['stderr'] = "cannot create 'myzpool/': trailing slash in name" ret['retcode'] = 1 mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertEqual(zfs.create('myzpool/'), res) def test_create_error_no_such_pool(self): ''' Tests unsuccessful return of create function if the pool is not present ''' - res = {'myzpool/mydataset': 'cannot create \'myzpool/mydataset\': no such pool \'myzpool\''} + res = OrderedDict([ + ('created', False), + ('error', "cannot create 'myzpool/mydataset': no such pool 'myzpool'"), + ]) ret = {} ret['stdout'] = "" ret['stderr'] = "cannot create 'myzpool/mydataset': no such pool 'myzpool'" ret['retcode'] = 1 mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertEqual(zfs.create('myzpool/mydataset'), res) def test_create_error_missing_parent(self): ''' Tests unsuccessful return of create function if the parent datasets do not exist ''' - res = {'myzpool/mydataset/mysubdataset': 'cannot create \'myzpool/mydataset/mysubdataset\': parent does not exist'} + res = OrderedDict([ + ('created', False), + ('error', "cannot create 'myzpool/mydataset/mysubdataset': parent does not exist"), + ]) ret = {} ret['stdout'] = "" ret['stderr'] = "cannot create 'myzpool/mydataset/mysubdataset': parent does not exist" ret['retcode'] = 1 mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertEqual(zfs.create('myzpool/mydataset/mysubdataset'), res) - def test_list_success(self): + def test_destroy_success(self): ''' - Tests zfs list + Tests successful return of destroy function on ZFS file system destruction ''' - res = OrderedDict([('myzpool', {'avail': '954G', 'mountpoint': '/myzpool', 'used': '844G', 'refer': '96K'})]) - ret = {'pid': 31817, 'retcode': 0, 'stderr': '', 'stdout': 'myzpool\t844G\t954G\t96K\t/myzpool'} - mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): - self.assertEqual(zfs.list_('myzpool'), res) - - def test_list_parsable_success(self): - ''' - Tests zfs list with parsable output - ''' - res = OrderedDict([('myzpool', {'avail': 1024795238400, 'mountpoint': '/myzpool', 'used': 905792561152, 'refer': 98304})]) - ret = {'pid': 31817, 'retcode': 0, 'stderr': '', 'stdout': 'myzpool\t905792561152\t1024795238400\t98304\t/myzpool'} - mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): - self.assertEqual(zfs.list_('myzpool', parsable=True), res) - - def test_mount_success(self): - ''' - Tests zfs mount of filesystem - ''' - res = {'myzpool/mydataset': 'mounted'} + res = OrderedDict([('destroyed', True)]) ret = {} ret['stdout'] = "" ret['stderr'] = "" ret['retcode'] = 0 mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(zfs.destroy('myzpool/mydataset'), res) + + def test_destroy_error_not_exists(self): + ''' + Tests failure return of destroy function on ZFS file system destruction + ''' + res = OrderedDict([ + ('destroyed', False), + ('error', "cannot open 'myzpool/mydataset': dataset does not exist"), + ]) + ret = {} + ret['stdout'] = "" + ret['stderr'] = "cannot open 'myzpool/mydataset': dataset does not exist" + ret['retcode'] = 1 + mock_cmd = MagicMock(return_value=ret) + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(zfs.destroy('myzpool/mydataset'), res) + + def test_destroy_error_has_children(self): + ''' + Tests failure return of destroy function on ZFS file system destruction + ''' + res = OrderedDict([ + ('destroyed', False), + ('error', "\n".join([ + "cannot destroy 'myzpool/mydataset': filesystem has children", + "use 'recursive=True' to destroy the following datasets:", + "myzpool/mydataset@snapshot", + ])), + ]) + ret = {} + ret['stdout'] = "" + ret['stderr'] = "\n".join([ + "cannot destroy 'myzpool/mydataset': filesystem has children", + "use '-r' to destroy the following datasets:", + "myzpool/mydataset@snapshot", + ]) + ret['retcode'] = 1 + mock_cmd = MagicMock(return_value=ret) + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(zfs.destroy('myzpool/mydataset'), res) + + def test_rename_success(self): + ''' + Tests successful return of rename function + ''' + res = OrderedDict([('renamed', True)]) + ret = {} + ret['stdout'] = "" + ret['stderr'] = "" + ret['retcode'] = 0 + mock_cmd = MagicMock(return_value=ret) + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(zfs.rename('myzpool/mydataset', 'myzpool/newdataset'), res) + + def test_rename_error_not_exists(self): + ''' + Tests failure return of rename function + ''' + res = OrderedDict([ + ('renamed', False), + ('error', "cannot open 'myzpool/mydataset': dataset does not exist"), + ]) + ret = {} + ret['stdout'] = "" + ret['stderr'] = "cannot open 'myzpool/mydataset': dataset does not exist" + ret['retcode'] = 1 + mock_cmd = MagicMock(return_value=ret) + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(zfs.rename('myzpool/mydataset', 'myzpool/newdataset'), res) + + def test_list_success(self): + ''' + Tests zfs list + ''' + res = OrderedDict([ + ('myzpool', OrderedDict([ + ('used', 905792561152), + ('avail', 1024795238400), + ('refer', 98304), + ('mountpoint', '/myzpool'), + ])), + ]) + ret = {} + ret['retcode'] = 0 + ret['stdout'] = 'myzpool\t905792561152\t1024795238400\t98304\t/myzpool' + ret['stderr'] = '' + mock_cmd = MagicMock(return_value=ret) + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(zfs.list_('myzpool'), res) + + def test_list_parsable_success(self): + ''' + Tests zfs list with parsable set to False + ''' + res = OrderedDict([ + ('myzpool', OrderedDict([ + ('used', '844G'), + ('avail', '954G'), + ('refer', '96K'), + ('mountpoint', '/myzpool'), + ])), + ]) + ret = {} + ret['retcode'] = 0 + ret['stdout'] = 'myzpool\t905792561152\t1024795238400\t98304\t/myzpool' + ret['stderr'] = '' + mock_cmd = MagicMock(return_value=ret) + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(zfs.list_('myzpool', parsable=False), res) + + def test_list_custom_success(self): + ''' + Tests zfs list + ''' + res = OrderedDict([ + ('myzpool', OrderedDict([ + ('canmount', True), + ('used', 834786304), + ('avail', 87502848), + ('compression', False), + ])), + ]) + ret = {} + ret['retcode'] = 0 + ret['stdout'] = 'myzpool\ton\t834786304\t87502848\toff' + ret['stderr'] = '' + mock_cmd = MagicMock(return_value=ret) + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(zfs.list_('myzpool', properties='canmount,used,avail,compression'), res) + + def test_list_custom_parsable_success(self): + ''' + Tests zfs list + ''' + res = OrderedDict([ + ('myzpool', OrderedDict([ + ('canmount', 'on'), + ('used', '796M'), + ('avail', '83.4M'), + ('compression', 'off'), + ])), + ]) + ret = {} + ret['retcode'] = 0 + ret['stdout'] = 'myzpool\ton\t834786304\t87502848\toff' + ret['stderr'] = '' + mock_cmd = MagicMock(return_value=ret) + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(zfs.list_('myzpool', properties='canmount,used,avail,compression', parsable=False), res) + + def test_list_error_no_dataset(self): + ''' + Tests zfs list + ''' + res = OrderedDict() + ret = {} + ret['retcode'] = 1 + ret['stdout'] = "cannot open 'myzpool': dataset does not exist" + ret['stderr'] = '' + mock_cmd = MagicMock(return_value=ret) + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(zfs.list_('myzpool'), res) + + def test_list_mount_success(self): + ''' + Tests zfs list_mount + ''' + res = OrderedDict([ + ('myzpool/data', '/data'), + ('myzpool/data/ares', '/data/ares'), + ]) + ret = {} + ret['retcode'] = 0 + ret['stdout'] = "\n".join([ + "myzpool/data\t\t\t\t/data", + "myzpool/data/ares\t\t\t/data/ares", + ]) + ret['stderr'] = '' + mock_cmd = MagicMock(return_value=ret) + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(zfs.list_mount(), res) + + def test_mount_success(self): + ''' + Tests zfs mount of filesystem + ''' + res = OrderedDict([('mounted', True)]) + ret = {} + ret['stdout'] = "" + ret['stderr'] = "" + ret['retcode'] = 0 + mock_cmd = MagicMock(return_value=ret) + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertEqual(zfs.mount('myzpool/mydataset'), res) def test_mount_failure(self): ''' Tests zfs mount of already mounted filesystem ''' - res = {'myzpool/mydataset': "cannot mount 'myzpool/mydataset': filesystem already mounted"} + res = OrderedDict([('mounted', False), ('error', "cannot mount 'myzpool/mydataset': filesystem already mounted")]) ret = {} ret['stdout'] = "" ret['stderr'] = "cannot mount 'myzpool/mydataset': filesystem already mounted" ret['retcode'] = 1 mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertEqual(zfs.mount('myzpool/mydataset'), res) def test_unmount_success(self): ''' Tests zfs unmount of filesystem ''' - res = {'myzpool/mydataset': 'unmounted'} + res = OrderedDict([('unmounted', True)]) ret = {} ret['stdout'] = "" ret['stderr'] = "" ret['retcode'] = 0 mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertEqual(zfs.unmount('myzpool/mydataset'), res) def test_unmount_failure(self): ''' Tests zfs unmount of already mounted filesystem ''' - res = {'myzpool/mydataset': "cannot mount 'myzpool/mydataset': not currently mounted"} + res = OrderedDict([ + ('unmounted', False), + ('error', "cannot mount 'myzpool/mydataset': not currently mounted"), + ]) ret = {} ret['stdout'] = "" ret['stderr'] = "cannot mount 'myzpool/mydataset': not currently mounted" ret['retcode'] = 1 mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertEqual(zfs.unmount('myzpool/mydataset'), res) def test_inherit_success(self): ''' Tests zfs inherit of compression property ''' - res = {'myzpool/mydataset': {'compression': 'cleared'}} + res = OrderedDict([('inherited', True)]) ret = {'pid': 45193, 'retcode': 0, 'stderr': '', 'stdout': ''} mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertEqual(zfs.inherit('compression', 'myzpool/mydataset'), res) def test_inherit_failure(self): ''' Tests zfs inherit of canmount ''' - res = { - 'myzpool/mydataset': { - 'canmount': "'canmount' property cannot be inherited, use revert=True to try and reset it to it's default value." - } - } + res = OrderedDict([ + ('inherited', False), + ('error', "'canmount' property cannot be inherited"), + ]) ret = {'pid': 43898, 'retcode': 1, 'stderr': "'canmount' property cannot be inherited", 'stdout': ''} mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertEqual(zfs.inherit('canmount', 'myzpool/mydataset'), res) def test_diff(self): ''' Tests zfs diff ''' - res = ['M\t/\t/myzpool/mydataset/', '+\tF\t/myzpool/mydataset/hello'] - ret = {'pid': 51495, 'retcode': 0, 'stderr': '', 'stdout': 'M\t/\t/myzpool/mydataset/\n+\tF\t/myzpool/mydataset/hello'} + res = [ + "1517063879.144517494\tM\t\t/data/test/", + "1517063875.296592355\t+\t\t/data/test/world", + "1517063879.274438467\t+\t\t/data/test/hello", + ] + ret = {} + ret['retcode'] = 0 + ret['stdout'] = "\n".join([ + "1517063879.144517494\tM\t\t/data/test/", + "1517063875.296592355\t+\t\t/data/test/world", + "1517063879.274438467\t+\t\t/data/test/hello", + ]) + ret['stderr'] = '' mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertEqual(zfs.diff('myzpool/mydataset@yesterday', 'myzpool/mydataset'), res) + def test_diff_parsed_time(self): + ''' + Tests zfs diff + ''' + ## NOTE: do not hardcode parsed timestamps, timezone play a role here. + ## zfs diff output seems to be timezone aware + res = OrderedDict([ + (strftime(1517063879.144517494, '%Y-%m-%d.%H:%M:%S.%f'), 'M\t\t/data/test/'), + (strftime(1517063875.296592355, '%Y-%m-%d.%H:%M:%S.%f'), '+\t\t/data/test/world'), + (strftime(1517063879.274438467, '%Y-%m-%d.%H:%M:%S.%f'), '+\t\t/data/test/hello'), + ]) + ret = {} + ret['retcode'] = 0 + ret['stdout'] = "\n".join([ + "1517063879.144517494\tM\t\t/data/test/", + "1517063875.296592355\t+\t\t/data/test/world", + "1517063879.274438467\t+\t\t/data/test/hello", + ]) + ret['stderr'] = '' + mock_cmd = MagicMock(return_value=ret) + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(zfs.diff('myzpool/data@yesterday', 'myzpool/data', parsable=False), res) + def test_rollback_success(self): ''' Tests zfs rollback success ''' - res = {'myzpool/mydataset': 'rolledback to snapshot: yesterday'} + res = OrderedDict([('rolledback', True)]) ret = {'pid': 56502, 'retcode': 0, 'stderr': '', 'stdout': ''} mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertEqual(zfs.rollback('myzpool/mydataset@yesterday'), res) def test_rollback_failure(self): ''' Tests zfs rollback failure ''' - res = {'myzpool/mydataset': "cannot rollback to 'myzpool/mydataset@yesterday': more recent snapshots " - "or bookmarks exist\nuse '-r' to force deletion of the following snapshots " - "and bookmarks:\nmyzpool/mydataset@today"} + res = OrderedDict([ + ('rolledback', False), + ('error', "\n".join([ + "cannot rollback to 'myzpool/mydataset@yesterday': more recent snapshots or bookmarks exist", + "use 'recursive=True' to force deletion of the following snapshots and bookmarks:", + "myzpool/mydataset@today" + ]), + ), + ]) ret = { 'pid': 57471, 'retcode': 1, @@ -303,47 +578,58 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): 'stdout': '' } mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertEqual(zfs.rollback('myzpool/mydataset@yesterday'), res) def test_clone_success(self): ''' Tests zfs clone success ''' - res = {'myzpool/yesterday': 'cloned from myzpool/mydataset@yesterday'} + res = OrderedDict([('cloned', True)]) ret = {'pid': 64532, 'retcode': 0, 'stderr': '', 'stdout': ''} mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertEqual(zfs.clone('myzpool/mydataset@yesterday', 'myzpool/yesterday'), res) def test_clone_failure(self): ''' Tests zfs clone failure ''' - res = {'myzpool/archive/yesterday': "cannot create 'myzpool/archive/yesterday': parent does not exist"} + res = OrderedDict([ + ('cloned', False), + ('error', "cannot create 'myzpool/archive/yesterday': parent does not exist"), + ]) ret = {'pid': 64864, 'retcode': 1, 'stderr': "cannot create 'myzpool/archive/yesterday': parent does not exist", 'stdout': ''} mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertEqual(zfs.clone('myzpool/mydataset@yesterday', 'myzpool/archive/yesterday'), res) def test_promote_success(self): ''' Tests zfs promote success ''' - res = {'myzpool/yesterday': 'promoted'} + res = OrderedDict([('promoted', True)]) ret = {'pid': 69075, 'retcode': 0, 'stderr': '', 'stdout': ''} mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertEqual(zfs.promote('myzpool/yesterday'), res) def test_promote_failure(self): ''' Tests zfs promote failure ''' - res = {'myzpool/yesterday': "cannot promote 'myzpool/yesterday': not a cloned filesystem"} + res = OrderedDict([ + ('promoted', False), + ('error', "cannot promote 'myzpool/yesterday': not a cloned filesystem"), + ]) ret = {'pid': 69209, 'retcode': 1, 'stderr': "cannot promote 'myzpool/yesterday': not a cloned filesystem", 'stdout': ''} mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertEqual(zfs.promote('myzpool/yesterday'), res) def test_bookmark_success(self): @@ -351,138 +637,184 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): Tests zfs bookmark success ''' with patch('salt.utils.path.which', MagicMock(return_value='/usr/bin/man')): - res = {'myzpool/mydataset@yesterday': 'bookmarked as myzpool/mydataset#important'} + res = OrderedDict([('bookmarked', True)]) ret = {'pid': 20990, 'retcode': 0, 'stderr': '', 'stdout': ''} mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertEqual(zfs.bookmark('myzpool/mydataset@yesterday', 'myzpool/mydataset#important'), res) def test_holds_success(self): ''' Tests zfs holds success ''' - res = {'myzpool/mydataset@baseline': {'important ': 'Wed Dec 23 21:06 2015', 'release-1.0': 'Wed Dec 23 21:08 2015'}} + res = OrderedDict([ + ('important', 'Wed Dec 23 21:06 2015'), + ('release-1.0', 'Wed Dec 23 21:08 2015'), + ]) ret = {'pid': 40216, 'retcode': 0, 'stderr': '', 'stdout': 'myzpool/mydataset@baseline\timportant \tWed Dec 23 21:06 2015\nmyzpool/mydataset@baseline\trelease-1.0\tWed Dec 23 21:08 2015'} mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertEqual(zfs.holds('myzpool/mydataset@baseline'), res) def test_holds_failure(self): ''' Tests zfs holds failure ''' - res = {'myzpool/mydataset@baseline': "cannot open 'myzpool/mydataset@baseline': dataset does not exist"} + res = OrderedDict([ + ('error', "cannot open 'myzpool/mydataset@baseline': dataset does not exist"), + ]) ret = {'pid': 40993, 'retcode': 1, 'stderr': "cannot open 'myzpool/mydataset@baseline': dataset does not exist", 'stdout': 'no datasets available'} mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertEqual(zfs.holds('myzpool/mydataset@baseline'), res) def test_hold_success(self): ''' Tests zfs hold success ''' - res = {'myzpool/mydataset@baseline': {'important': 'held'}, 'myzpool/mydataset@release-1.0': {'important': 'held'}} + res = OrderedDict([('held', True)]) ret = {'pid': 50876, 'retcode': 0, 'stderr': '', 'stdout': ''} mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertEqual(zfs.hold('important', 'myzpool/mydataset@baseline', 'myzpool/mydataset@release-1.0'), res) def test_hold_failure(self): ''' Tests zfs hold failure ''' - res = {'myzpool/mydataset@baseline': {'important': 'tag already exists on this dataset'}} + res = OrderedDict([ + ('held', False), + ('error', "cannot hold snapshot 'myzpool/mydataset@baseline': tag already exists on this dataset"), + ]) ret = {'pid': 51006, 'retcode': 1, 'stderr': "cannot hold snapshot 'myzpool/mydataset@baseline': tag already exists on this dataset", 'stdout': ''} mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertEqual(zfs.hold('important', 'myzpool/mydataset@baseline'), res) def test_release_success(self): ''' Tests zfs release success ''' - res = {'myzpool/mydataset@baseline': {'important': 'released'}, 'myzpool/mydataset@release-1.0': {'important': 'released'}} + res = OrderedDict([('released', True)]) ret = {'pid': 50876, 'retcode': 0, 'stderr': '', 'stdout': ''} mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertEqual(zfs.release('important', 'myzpool/mydataset@baseline', 'myzpool/mydataset@release-1.0'), res) def test_release_failure(self): ''' Tests zfs release failure ''' - res = {'myzpool/mydataset@baseline': {'important': 'no such tag on this dataset'}} + res = OrderedDict([ + ('released', False), + ('error', "cannot release hold from snapshot 'myzpool/mydataset@baseline': no such tag on this dataset"), + ]) ret = {'pid': 51006, 'retcode': 1, 'stderr': "cannot release hold from snapshot 'myzpool/mydataset@baseline': no such tag on this dataset", 'stdout': ''} mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertEqual(zfs.release('important', 'myzpool/mydataset@baseline'), res) def test_snapshot_success(self): ''' Tests zfs snapshot success ''' - res = {'myzpool/mydataset@baseline': 'snapshotted'} + res = OrderedDict([('snapshotted', True)]) ret = {'pid': 69125, 'retcode': 0, 'stderr': '', 'stdout': ''} mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertEqual(zfs.snapshot('myzpool/mydataset@baseline'), res) def test_snapshot_failure(self): ''' Tests zfs snapshot failure ''' - res = {'myzpool/mydataset@baseline': 'dataset already exists'} + res = OrderedDict([ + ('snapshotted', False), + ('error', "cannot create snapshot 'myzpool/mydataset@baseline': dataset already exists"), + ]) ret = {'pid': 68526, 'retcode': 1, 'stderr': "cannot create snapshot 'myzpool/mydataset@baseline': dataset already exists", 'stdout': ''} mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertEqual(zfs.snapshot('myzpool/mydataset@baseline'), res) def test_snapshot_failure2(self): ''' Tests zfs snapshot failure ''' - res = {'myzpool/mydataset@baseline': 'dataset does not exist'} + res = OrderedDict([ + ('snapshotted', False), + ('error', "cannot open 'myzpool/mydataset': dataset does not exist"), + ]) ret = {'pid': 69256, 'retcode': 2, 'stderr': "cannot open 'myzpool/mydataset': dataset does not exist\nusage:\n\tsnapshot [-r] [-o property=value] ... @ ...\n\nFor the property list, run: zfs set|get\n\nFor the delegated permission list, run: zfs allow|unallow", 'stdout': ''} mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertEqual(zfs.snapshot('myzpool/mydataset@baseline'), res) def test_set_success(self): ''' Tests zfs set success ''' - res = {'myzpool/mydataset': {'compression': 'set'}} + res = OrderedDict([('set', True)]) ret = {'pid': 79736, 'retcode': 0, 'stderr': '', 'stdout': ''} mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertEqual(zfs.set('myzpool/mydataset', compression='lz4'), res) def test_set_failure(self): ''' Tests zfs set failure ''' - res = {'myzpool/mydataset': {'canmount': "'canmount' must be one of 'on | off | noauto'"}} + res = OrderedDict([ + ('set', False), + ('error', "cannot set property for 'myzpool/mydataset': 'canmount' must be one of 'on | off | noauto'"), + ]) ret = {'pid': 79887, 'retcode': 1, 'stderr': "cannot set property for 'myzpool/mydataset': 'canmount' must be one of 'on | off | noauto'", 'stdout': ''} mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertEqual(zfs.set('myzpool/mydataset', canmount='lz4'), res) def test_get_success(self): ''' Tests zfs get success ''' - res = OrderedDict([('myzpool', {'used': {'value': '844G'}})]) - ret = {'pid': 562, 'retcode': 0, 'stderr': '', 'stdout': 'myzpool\tused\t844G'} + res = OrderedDict([ + ('myzpool', OrderedDict([ + ('used', OrderedDict([ + ('value', 906238099456), + ])), + ])), + ]) + ret = {'pid': 562, 'retcode': 0, 'stderr': '', 'stdout': 'myzpool\tused\t906238099456'} mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): self.assertEqual(zfs.get('myzpool', properties='used', fields='value'), res) def test_get_parsable_success(self): ''' Tests zfs get with parsable output ''' - res = OrderedDict([('myzpool', {'used': {'value': 905792561152}})]) - ret = {'pid': 562, 'retcode': 0, 'stderr': '', 'stdout': 'myzpool\tused\t905792561152'} + res = OrderedDict([ + ('myzpool', OrderedDict([ + ('used', OrderedDict([ + ('value', '844G'), + ])), + ])), + ]) + ret = {'pid': 562, 'retcode': 0, 'stderr': '', 'stdout': 'myzpool\tused\t906238099456'} mock_cmd = MagicMock(return_value=ret) - with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}): - self.assertEqual(zfs.get('myzpool', properties='used', fields='value', parsable=True), res) + with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(zfs.get('myzpool', properties='used', fields='value', parsable=False), res) From 5ca342494f6e976638172c68b875698f79d86e54 Mon Sep 17 00:00:00 2001 From: Super-User Date: Sun, 4 Feb 2018 22:34:48 +0100 Subject: [PATCH 209/223] Phase 1 - salt.modules.zfs --- salt/modules/zfs.py | 1283 ++++++++++++++++++++----------------------- 1 file changed, 601 insertions(+), 682 deletions(-) diff --git a/salt/modules/zfs.py b/salt/modules/zfs.py index a48977a346..8129d0aab5 100644 --- a/salt/modules/zfs.py +++ b/salt/modules/zfs.py @@ -1,32 +1,29 @@ # -*- coding: utf-8 -*- ''' -Salt interface to ZFS commands +Module for running ZFS command -:codeauthor: Nitin Madhok +:codeauthor: Nitin Madhok , Jorge Schrauwen +:maintainer: Jorge Schrauwen +:maturity: new +:depends: salt.utils.zfs +:platform: illumos,freebsd,linux ''' from __future__ import absolute_import, unicode_literals, print_function # Import Python libs -import re -import math import logging # Import Salt libs import salt.utils.args import salt.utils.path import salt.modules.cmdmod -import salt.utils.decorators as decorators from salt.utils.odict import OrderedDict -from salt.utils.stringutils import to_num as str_to_num -from salt.ext import six +from salt.ext.six.moves import zip __virtualname__ = 'zfs' log = logging.getLogger(__name__) -# Precompiled regex -re_zfs_size = re.compile(r'^(\d+|\d+(?=\d*)\.\d+)([KkMmGgTtPpEeZz][Bb]?)$') - # Function alias to set mapping. __func_alias__ = { 'list_': 'list', @@ -43,77 +40,8 @@ def __virtual__(): return (False, "The zfs module cannot be loaded: zfs not supported") -@decorators.memoize -def _check_zfs(): - ''' - Looks to see if zfs is present on the system. - ''' - # Get the path to the zfs binary. - return salt.utils.path.which('zfs') - - -@decorators.memoize -def _check_features(): - ''' - Looks to see if zpool-features is available - ''' - # get man location - man = salt.utils.path.which('man') - if not man: - return False - - cmd = '{man} zpool-features'.format( - man=man - ) - res = __salt__['cmd.run_all'](cmd, python_shell=False) - return res['retcode'] == 0 - - -def _conform_value(value, convert_size=False): - ''' - Ensure value always conform to what zfs expects - ''' - # NOTE: salt breaks the on/off/yes/no properties - if isinstance(value, bool): - return 'on' if value else 'off' - - if isinstance(value, six.text_type) or isinstance(value, str): - # NOTE: handle whitespaces - if ' ' in value: - # NOTE: quoting the string may be better - # but it is hard to know if we already quoted it before - # this can be improved in the future - return "'{0}'".format(value.strip("'")) - - # NOTE: handle ZFS size conversion - match_size = re_zfs_size.match(value) - if convert_size and match_size: - v_size = float(match_size.group(1)) - v_unit = match_size.group(2).upper()[0] - v_power = math.pow(1024, ['K', 'M', 'G', 'T', 'P', 'E', 'Z'].index(v_unit) + 1) - value = v_size * v_power - return int(value) if int(value) == value else value - - # NOTE: convert to numeric if needed - return str_to_num(value) - - # NOTE: passthrough - return value - - -def _zfs_quote_escape_path(name): - ''' - Quotes zfs path with single quotes and escapes single quotes in path if present - ''' - if name: - name = '\'' + name.replace('\'', '\\\'') + '\'' - return name - - def exists(name, **kwargs): ''' - .. versionadded:: 2015.5.0 - Check if a ZFS filesystem or volume or snapshot exists. name : string @@ -122,6 +50,9 @@ def exists(name, **kwargs): also check if dataset is of a certain type, valid choices are: filesystem, snapshot, volume, bookmark, or all. + .. versionadded:: 2015.5.0 + .. versionchanged:: Fluorine + CLI Example: .. code-block:: bash @@ -129,20 +60,30 @@ def exists(name, **kwargs): salt '*' zfs.exists myzpool/mydataset salt '*' zfs.exists myzpool/myvolume type=volume ''' - zfs = _check_zfs() - ltype = kwargs.get('type', None) + ## Configure command + # NOTE: initialize the defaults + opts = {} - cmd = '{0} list {1}{2}'.format(zfs, '-t {0} '.format(ltype) if ltype else '', _zfs_quote_escape_path(name)) - res = __salt__['cmd.run_all'](cmd, ignore_retcode=True) + # NOTE: set extra config from kwargs + if kwargs.get('type', False): + opts['-t'] = kwargs.get('type') + + ## Check if 'name' of 'type' exists + res = __salt__['cmd.run_all']( + __utils__['zfs.zfs_command']( + command='list', + opts=opts, + target=name, + ), + python_shell=False, + ignore_retcode=True, + ) return res['retcode'] == 0 def create(name, **kwargs): ''' - .. versionadded:: 2015.5.0 - .. versionchanged:: 2016.3.0 - Create a ZFS File System. name : string @@ -165,6 +106,9 @@ def create(name, **kwargs): properties="{'property1': 'value1', 'property2': 'value2'}" + .. versionadded:: 2015.5.0 + .. versionchanged:: Fluorine + CLI Example: .. code-block:: bash @@ -175,53 +119,39 @@ def create(name, **kwargs): salt '*' zfs.create myzpool/volume volume_size=1G properties="{'volblocksize': '512'}" [sparse=True|False] ''' - ret = {} + ## Configure command + # NOTE: initialize the defaults + flags = [] + opts = {} - zfs = _check_zfs() - properties = kwargs.get('properties', None) - if properties and 'mountpoint' in properties: - properties['mountpoint'] = _zfs_quote_escape_path(properties['mountpoint']) - create_parent = kwargs.get('create_parent', False) - volume_size = kwargs.get('volume_size', None) - sparse = kwargs.get('sparse', False) - cmd = '{0} create'.format(zfs) + # NOTE: push filesystem properties + filesystem_properties = kwargs.get('properties', {}) - if create_parent: - cmd = '{0} -p'.format(cmd) + # NOTE: set extra config from kwargs + if kwargs.get('create_parent', False): + flags.append('-p') + if kwargs.get('sparse', False) and kwargs.get('volume_size', None): + flags.append('-s') + if kwargs.get('volume_size', None): + opts['-V'] = __utils__['zfs.to_size'](kwargs.get('volume_size'), convert_to_human=False) - if volume_size and sparse: - cmd = '{0} -s'.format(cmd) + ## Create filesystem/volume + res = __salt__['cmd.run_all']( + __utils__['zfs.zfs_command']( + command='create', + flags=flags, + opts=opts, + filesystem_properties=filesystem_properties, + target=name, + ), + python_shell=False, + ) - # if zpool properties specified, then - # create "-o property=value" pairs - if properties: - proplist = [] - for prop in properties: - proplist.append('-o {0}={1}'.format(prop, _conform_value(properties[prop]))) - cmd = '{0} {1}'.format(cmd, ' '.join(proplist)) - - if volume_size: - cmd = '{0} -V {1}'.format(cmd, volume_size) - - # append name - cmd = '{0} {1}'.format(cmd, _zfs_quote_escape_path(name)) - - # Create filesystem - res = __salt__['cmd.run_all'](cmd) - - # Check and see if the dataset is available - if res['retcode'] != 0: - ret[name] = res['stderr'] if 'stderr' in res else res['stdout'] - else: - ret[name] = 'created' - - return ret + return __utils__['zfs.parse_command_result'](res, 'created') def destroy(name, **kwargs): ''' - .. versionadded:: 2015.5.0 - Destroy a ZFS File System. name : string @@ -237,49 +167,42 @@ def destroy(name, **kwargs): .. warning:: watch out when using recursive and recursive_all + .. versionadded:: 2015.5.0 + .. versionchanged:: Fluorine + CLI Example: .. code-block:: bash salt '*' zfs.destroy myzpool/mydataset [force=True|False] ''' - ret = {} - zfs = _check_zfs() - force = kwargs.get('force', False) - recursive = kwargs.get('recursive', False) - recursive_all = kwargs.get('recursive_all', False) - cmd = '{0} destroy'.format(zfs) + ## Configure command + # NOTE: initialize the defaults + flags = [] - if recursive_all: - cmd = '{0} -R'.format(cmd) + # NOTE: set extra config from kwargs + if kwargs.get('force', False): + flags.append('-f') + if kwargs.get('recursive_all', False): + flags.append('-R') + if kwargs.get('recursive', False): + flags.append('-r') - if force: - cmd = '{0} -f'.format(cmd) + ## Destroy filesystem/volume/snapshot/... + res = __salt__['cmd.run_all']( + __utils__['zfs.zfs_command']( + command='destroy', + flags=flags, + target=name, + ), + python_shell=False, + ) - if recursive: - cmd = '{0} -r'.format(cmd) - - cmd = '{0} {1}'.format(cmd, _zfs_quote_escape_path(name)) - res = __salt__['cmd.run_all'](cmd) - - if res['retcode'] != 0: - if "operation does not apply to pools" in res['stderr']: - ret[name] = '{0}, use zpool.destroy to destroy the pool'.format(res['stderr'].splitlines()[0]) - if "has children" in res['stderr']: - ret[name] = '{0}, you can add the "recursive=True" parameter'.format(res['stderr'].splitlines()[0]) - else: - ret[name] = res['stderr'] if 'stderr' in res else res['stdout'] - else: - ret[name] = 'destroyed' - - return ret + return __utils__['zfs.parse_command_result'](res, 'destroyed') def rename(name, new_name, **kwargs): ''' - .. versionadded:: 2015.5.0 - .. versionchanged:: 2016.3.0 - Rename or Relocate a ZFS File System. name : string @@ -296,50 +219,55 @@ def rename(name, new_name, **kwargs): recursively rename the snapshots of all descendent datasets. snapshots are the only dataset that can be renamed recursively. + .. versionadded:: 2015.5.0 + .. versionchanged:: Fluorine + CLI Example: .. code-block:: bash salt '*' zfs.rename myzpool/mydataset myzpool/renameddataset ''' - ret = {} - zfs = _check_zfs() - create_parent = kwargs.get('create_parent', False) - force = kwargs.get('force', False) - recursive = kwargs.get('recursive', False) + ## Configure command + # NOTE: initialize the defaults + flags = [] + target = [] - # fix up conflicting parameters - if recursive: - if '@' in name: # -p and -f don't work with -r - create_parent = False - force = False - else: # -r only works with snapshots - recursive = False - if create_parent and '@' in name: # doesn't work with snapshots - create_parent = False - - res = __salt__['cmd.run_all']('{zfs} rename {force}{create_parent}{recursive}{name} {new_name}'.format( - zfs=zfs, - force='-f ' if force else '', - create_parent='-p ' if create_parent else '', - recursive='-r ' if recursive else '', - name=_zfs_quote_escape_path(name), - new_name=_zfs_quote_escape_path(new_name) - )) - - if res['retcode'] != 0: - ret[name] = res['stderr'] if 'stderr' in res else res['stdout'] + # NOTE: set extra config from kwargs + if __utils__['zfs.is_snapshot'](name): + if kwargs.get('create_parent', False): + log.warning('zfs.rename - create_parent=True cannot be used with snapshots.') + if kwargs.get('force', False): + log.warning('zfs.rename - force=True cannot be used with snapshots.') + if kwargs.get('recursive', False): + flags.append('-r') else: - ret[name] = 'renamed to {0}'.format(new_name) + if kwargs.get('create_parent', False): + flags.append('-p') + if kwargs.get('force', False): + flags.append('-f') + if kwargs.get('recursive', False): + log.warning('zfs.rename - recursive=True can only be used with snapshots.') - return ret + # NOTE: update target + target.append(name) + target.append(new_name) + + ## Rename filesystem/volume/snapshot/... + res = __salt__['cmd.run_all']( + __utils__['zfs.zfs_command']( + command='rename', + flags=flags, + target=target, + ), + python_shell=False, + ) + + return __utils__['zfs.parse_command_result'](res, 'renamed') def list_(name=None, **kwargs): ''' - .. versionadded:: 2015.5.0 - .. versionchanged:: Oxygen - Return a list of all datasets or a specified dataset on the system and the values of their used, available, referenced, and mountpoint properties. @@ -362,6 +290,9 @@ def list_(name=None, **kwargs): display numbers in parsable (exact) values .. versionadded:: Oxygen + .. versionadded:: 2015.5.0 + .. versionchanged:: Fluorine + CLI Example: .. code-block:: bash @@ -371,80 +302,120 @@ def list_(name=None, **kwargs): salt '*' zfs.list myzpool/mydataset properties="sharenfs,mountpoint" ''' ret = OrderedDict() - zfs = _check_zfs() - recursive = kwargs.get('recursive', False) - depth = kwargs.get('depth', 0) + + ## update properties + # NOTE: properties should be a list properties = kwargs.get('properties', 'used,avail,refer,mountpoint') - sort = kwargs.get('sort', None) - ltype = kwargs.get('type', None) - order = kwargs.get('order', 'ascending') - parsable = kwargs.get('parsable', False) - cmd = '{0} list -H'.format(zfs) + if not isinstance(properties, list): + properties = properties.split(',') - # parsable output - if parsable: - cmd = '{0} -p'.format(cmd) - - # filter on type - if ltype: - cmd = '{0} -t {1}'.format(cmd, ltype) - - # recursively list - if recursive: - cmd = '{0} -r'.format(cmd) - if depth: - cmd = '{0} -d {1}'.format(cmd, depth) - - # add properties - properties = properties.split(',') - if 'name' in properties: # ensure name is first property + # NOTE: name should be first property + while 'name' in properties: properties.remove('name') properties.insert(0, 'name') - cmd = '{0} -o {1}'.format(cmd, ','.join(properties)) - # sorting - if sort and sort in properties: - if order.startswith('a'): - cmd = '{0} -s {1}'.format(cmd, sort) + ## Configure command + # NOTE: initialize the defaults + flags = ['-H', '-p'] + opts = {} + + # NOTE: set extra config from kwargs + if kwargs.get('recursive', False): + flags.append('-r') + if kwargs.get('recursive', False) and kwargs.get('depth', False): + opts['-d'] = kwargs.get('depth') + if kwargs.get('type', False): + opts['-t'] = kwargs.get('type') + if kwargs.get('sort', False) and kwargs.get('sort') in properties: + if kwargs.get('order', 'ascending').startswith('a'): + opts['-s'] = kwargs.get('sort') else: - cmd = '{0} -S {1}'.format(cmd, sort) + opts['-S'] = kwargs.get('sort') + if isinstance(properties, list): + # NOTE: There can be only one -o and it takes a comma-seperated list + opts['-o'] = ','.join(properties) + else: + opts['-o'] = properties - # add name if set - if name: - cmd = '{0} {1}'.format(cmd, _zfs_quote_escape_path(name)) - - # parse output - res = __salt__['cmd.run_all'](cmd) + ## parse zfs list + res = __salt__['cmd.run_all']( + __utils__['zfs.zfs_command']( + command='list', + flags=flags, + opts=opts, + target=name, + ), + python_shell=False, + ) if res['retcode'] == 0: - for ds in [l for l in res['stdout'].splitlines()]: - ds = ds.split("\t") - ds_data = {} - - for prop in properties: - ds_data[prop] = _conform_value(ds[properties.index(prop)]) + for ds in res['stdout'].splitlines(): + if kwargs.get('parsable', True): + ds_data = __utils__['zfs.from_auto_dict']( + OrderedDict(list(zip(properties, ds.split("\t")))), + ) + else: + ds_data = __utils__['zfs.to_auto_dict']( + OrderedDict(list(zip(properties, ds.split("\t")))), + convert_to_human=True, + ) ret[ds_data['name']] = ds_data del ret[ds_data['name']]['name'] else: - ret['error'] = res['stderr'] if 'stderr' in res else res['stdout'] + return __utils__['zfs.parse_command_result'](res) return ret -def mount(name='-a', **kwargs): +def list_mount(): ''' - .. versionadded:: 2016.3.0 + List mounted zfs filesystems + .. versionadded:: Fluorine + + CLI Example: + + .. code-block:: bash + + salt '*' zfs.list_mount + ''' + ## List mounted filesystem + res = __salt__['cmd.run_all']( + __utils__['zfs.zfs_command']( + command='mount', + ), + python_shell=False, + ) + + if res['retcode'] == 0: + ret = OrderedDict() + for mount in res['stdout'].splitlines(): + mount = mount.split() + ret[mount[0]] = mount[-1] + return ret + else: + return __utils__['zfs.parse_command_result'](res) + + +def mount(name=None, **kwargs): + ''' Mounts ZFS file systems name : string - name of the filesystem, you can use '-a' to mount all unmounted filesystems. (this is the default) + name of the filesystem, having this set to None will mount all filesystems. (this is the default) overlay : boolean perform an overlay mount. options : string optional comma-separated list of mount options to use temporarily for the duration of the mount. + .. versionadded:: 2016.3.0 + .. versionchanged:: Fluorine + + .. warning:: + + Passing '-a' as name is deprecated and will be removed 2 verions after Flourine. + CLI Example: .. code-block:: bash @@ -453,36 +424,43 @@ def mount(name='-a', **kwargs): salt '*' zfs.mount myzpool/mydataset salt '*' zfs.mount myzpool/mydataset options=ro ''' - zfs = _check_zfs() - overlay = kwargs.get('overlay', False) - options = kwargs.get('options', None) + ## Configure command + # NOTE: initialize the defaults + flags = [] + opts = {} - res = __salt__['cmd.run_all']('{zfs} mount {overlay}{options}{filesystem}'.format( - zfs=zfs, - overlay='-O ' if overlay else '', - options='-o {0} '.format(options) if options else '', - filesystem=_zfs_quote_escape_path(name) - )) + # NOTE: set extra config from kwargs + if kwargs.get('overlay', False): + flags.append('-O') + if kwargs.get('options', False): + opts['-o'] = kwargs.get('options') + if name in [None, '-a']: + # NOTE: still accept '-a' as name for backwards compatibility + # two versions after Flourine this should just simplify + # this to just set '-a' if name is not set. + flags.append('-a') + name = None - ret = {} - if name == '-a': - ret = res['retcode'] == 0 - else: - if res['retcode'] != 0: - ret[name] = res['stderr'] if 'stderr' in res else res['stdout'] - else: - ret[name] = 'mounted' - return ret + ## Mount filesystem + res = __salt__['cmd.run_all']( + __utils__['zfs.zfs_command']( + command='mount', + flags=flags, + opts=opts, + target=name, + ), + python_shell=False, + ) + + return __utils__['zfs.parse_command_result'](res, 'mounted') def unmount(name, **kwargs): ''' - .. versionadded:: 2016.3.0 - Unmounts ZFS file systems name : string - name of the filesystem, you can use '-a' to unmount all mounted filesystems. + name of the filesystem, you can use None to unmount all mounted filesystems. force : boolean forcefully unmount the file system, even if it is currently in use. @@ -490,36 +468,48 @@ def unmount(name, **kwargs): Using ``-a`` for the name parameter will probably break your system, unless your rootfs is not on zfs. + .. versionadded:: 2016.3.0 + .. versionchanged:: Fluorine + + .. warning:: + + Passing '-a' as name is deprecated and will be removed 2 verions after Flourine. + CLI Example: .. code-block:: bash salt '*' zfs.unmount myzpool/mydataset [force=True|False] ''' - zfs = _check_zfs() - force = kwargs.get('force', False) + ## Configure command + # NOTE: initialize the defaults + flags = [] - res = __salt__['cmd.run_all']('{zfs} unmount {force}{filesystem}'.format( - zfs=zfs, - force='-f ' if force else '', - filesystem=_zfs_quote_escape_path(name) - )) + # NOTE: set extra config from kwargs + if kwargs.get('force', False): + flags.append('-f') + if name in [None, '-a']: + # NOTE: still accept '-a' as name for backwards compatibility + # two versions after Flourine this should just simplify + # this to just set '-a' if name is not set. + flags.append('-a') + name = None - ret = {} - if name == '-a': - ret = res['retcode'] == 0 - else: - if res['retcode'] != 0: - ret[name] = res['stderr'] if 'stderr' in res else res['stdout'] - else: - ret[name] = 'unmounted' - return ret + ## Unmount filesystem + res = __salt__['cmd.run_all']( + __utils__['zfs.zfs_command']( + command='unmount', + flags=flags, + target=name, + ), + python_shell=False, + ) + + return __utils__['zfs.parse_command_result'](res, 'unmounted') def inherit(prop, name, **kwargs): ''' - .. versionadded:: 2016.3.0 - Clears the specified property prop : string @@ -532,42 +522,41 @@ def inherit(prop, name, **kwargs): revert the property to the received value if one exists; otherwise operate as if the -S option was not specified. + .. versionadded:: 2016.3.0 + .. versionchanged:: Fluorine + CLI Example: .. code-block:: bash salt '*' zfs.inherit canmount myzpool/mydataset [recursive=True|False] ''' - zfs = _check_zfs() - recursive = kwargs.get('recursive', False) - revert = kwargs.get('revert', False) + ## Configure command + # NOTE: initialize the defaults + flags = [] - res = __salt__['cmd.run_all']('{zfs} inherit {recursive}{revert}{prop} {name}'.format( - zfs=zfs, - recursive='-r ' if recursive else '', - revert='-S ' if revert else '', - prop=prop, - name=_zfs_quote_escape_path(name) - )) + # NOTE: set extra config from kwargs + if kwargs.get('recursive', False): + flags.append('-r') + if kwargs.get('revert', False): + flags.append('-S') - ret = {} - ret[name] = {} - if res['retcode'] != 0: - ret[name][prop] = res['stderr'] if 'stderr' in res else res['stdout'] - if 'property cannot be inherited' in res['stderr']: - ret[name][prop] = '{0}, {1}'.format( - ret[name][prop], - 'use revert=True to try and reset it to it\'s default value.' - ) - else: - ret[name][prop] = 'cleared' - return ret + ## Inherit property + res = __salt__['cmd.run_all']( + __utils__['zfs.zfs_command']( + command='inherit', + flags=flags, + property_name=prop, + target=name, + ), + python_shell=False, + ) + + return __utils__['zfs.parse_command_result'](res, 'inherited') -def diff(name_a, name_b, **kwargs): +def diff(name_a, name_b=None, **kwargs): ''' - .. versionadded:: 2016.3.0 - Display the difference between a snapshot of a given filesystem and another snapshot of that filesystem from a later time or the current contents of the filesystem. @@ -575,11 +564,16 @@ def diff(name_a, name_b, **kwargs): name_a : string name of snapshot name_b : string - name of snapshot or filesystem + (optional) name of snapshot or filesystem show_changetime : boolean - display the path's inode change time as the first column of output. (default = False) + display the path's inode change time as the first column of output. (default = True) show_indication : boolean display an indication of the type of file. (default = True) + parsable : boolean + if true we don't parse the timestamp to a more readable date (default = True) + + .. versionadded:: 2016.3.0 + .. versionchanged:: Fluorine CLI Example: @@ -587,49 +581,51 @@ def diff(name_a, name_b, **kwargs): salt '*' zfs.diff myzpool/mydataset@yesterday myzpool/mydataset ''' - ret = {} + ## Configure command + # NOTE: initialize the defaults + flags = ['-H'] + target = [] - zfs = _check_zfs() - show_changetime = kwargs.get('show_changetime', False) - show_indication = kwargs.get('show_indication', True) + # NOTE: set extra config from kwargs + if kwargs.get('show_changetime', True): + flags.append('-t') + if kwargs.get('show_indication', True): + flags.append('-F') - if '@' not in name_a: - ret[name_a] = 'MUST be a snapshot' - return ret + # NOTE: update target + target.append(name_a) + if name_b: + target.append(name_b) - res = __salt__['cmd.run_all']('{zfs} diff -H {changetime}{indication}{name_a} {name_b}'.format( - zfs=zfs, - changetime='-t ' if show_changetime else '', - indication='-F ' if show_indication else '', - name_a=_zfs_quote_escape_path(name_a), - name_b=_zfs_quote_escape_path(name_b) - )) + ## Diff filesystem/snapshot + res = __salt__['cmd.run_all']( + __utils__['zfs.zfs_command']( + command='diff', + flags=flags, + target=target, + ), + python_shell=False, + ) if res['retcode'] != 0: - ret['error'] = res['stderr'] if 'stderr' in res else res['stdout'] + return __utils__['zfs.parse_command_result'](res) else: - ret = [] - for line in res['stdout'].splitlines(): - ret.append(line) - return ret + if not kwargs.get('parsable', True) and kwargs.get('show_changetime', True): + ret = OrderedDict() + for entry in res['stdout'].splitlines(): + entry = entry.split() + entry_timestamp = __utils__['dateutils.strftime'](entry[0], '%Y-%m-%d.%H:%M:%S.%f') + entry_data = "\t\t".join(entry[1:]) + ret[entry_timestamp] = entry_data + else: + ret = res['stdout'].splitlines() + return ret def rollback(name, **kwargs): ''' - .. versionadded:: 2016.3.0 - Roll back the given dataset to a previous snapshot. - .. warning:: - - When a dataset is rolled back, all data that has changed since - the snapshot is discarded, and the dataset reverts to the state - at the time of the snapshot. By default, the command refuses to - roll back to a snapshot other than the most recent one. - - In order to do so, all intermediate snapshots and bookmarks - must be destroyed by specifying the -r option. - name : string name of snapshot recursive : boolean @@ -642,47 +638,55 @@ def rollback(name, **kwargs): used with the -R option to force an unmount of any clone file systems that are to be destroyed. + .. warning:: + + When a dataset is rolled back, all data that has changed since + the snapshot is discarded, and the dataset reverts to the state + at the time of the snapshot. By default, the command refuses to + roll back to a snapshot other than the most recent one. + + In order to do so, all intermediate snapshots and bookmarks + must be destroyed by specifying the -r option. + + .. versionadded:: 2016.3.0 + .. versionchanged:: Fluorine + CLI Example: .. code-block:: bash salt '*' zfs.rollback myzpool/mydataset@yesterday ''' - ret = {} + ## Configure command + # NOTE: initialize the defaults + flags = [] - zfs = _check_zfs() - force = kwargs.get('force', False) - recursive = kwargs.get('recursive', False) - recursive_all = kwargs.get('recursive_all', False) + # NOTE: set extra config from kwargs + if kwargs.get('recursive_all', False): + flags.append('-R') + if kwargs.get('recursive', False): + flags.append('-r') + if kwargs.get('force', False): + if kwargs.get('recursive_all', False) or kwargs.get('recursive', False): + flags.append('-f') + else: + log.warning('zfs.rollback - force=True can only be used with recursive_all=True or recursive=True') - if '@' not in name: - ret[name] = 'MUST be a snapshot' - return ret + ## Rollback to snapshot + res = __salt__['cmd.run_all']( + __utils__['zfs.zfs_command']( + command='rollback', + flags=flags, + target=name, + ), + python_shell=False, + ) - if force: - if not recursive and not recursive_all: # -f only works with -R - log.warning('zfs.rollback - force=True can only be used when recursive_all=True or recursive=True') - force = False - - res = __salt__['cmd.run_all']('{zfs} rollback {force}{recursive}{recursive_all}{snapshot}'.format( - zfs=zfs, - force='-f ' if force else '', - recursive='-r ' if recursive else '', - recursive_all='-R ' if recursive_all else '', - snapshot=_zfs_quote_escape_path(name) - )) - - if res['retcode'] != 0: - ret[name[:name.index('@')]] = res['stderr'] if 'stderr' in res else res['stdout'] - else: - ret[name[:name.index('@')]] = 'rolledback to snapshot: {0}'.format(name[name.index('@')+1:]) - return ret + return __utils__['zfs.parse_command_result'](res, 'rolledback') def clone(name_a, name_b, **kwargs): ''' - .. versionadded:: 2016.3.0 - Creates a clone of the given snapshot. name_a : string @@ -703,49 +707,47 @@ def clone(name_a, name_b, **kwargs): properties="{'property1': 'value1', 'property2': 'value2'}" + .. versionadded:: 2016.3.0 + .. versionchanged:: Fluorine + CLI Example: .. code-block:: bash salt '*' zfs.clone myzpool/mydataset@yesterday myzpool/mydataset_yesterday ''' - ret = {} + ## Configure command + # NOTE: initialize the defaults + flags = [] + target = [] - zfs = _check_zfs() - create_parent = kwargs.get('create_parent', False) - properties = kwargs.get('properties', None) + # NOTE: push filesystem properties + filesystem_properties = kwargs.get('properties', {}) - if '@' not in name_a: - ret[name_b] = 'failed to clone from {0} because it is not a snapshot'.format(name_a) - return ret + # NOTE: set extra config from kwargs + if kwargs.get('create_parent', False): + flags.append('-p') - # if zpool properties specified, then - # create "-o property=value" pairs - if properties: - proplist = [] - for prop in properties: - proplist.append('-o {0}={1}'.format(prop, properties[prop])) - properties = ' '.join(proplist) + # NOTE: update target + target.append(name_a) + target.append(name_b) - res = __salt__['cmd.run_all']('{zfs} clone {create_parent}{properties}{name_a} {name_b}'.format( - zfs=zfs, - create_parent='-p ' if create_parent else '', - properties='{0} '.format(properties) if properties else '', - name_a=_zfs_quote_escape_path(name_a), - name_b=_zfs_quote_escape_path(name_b) - )) + ## Clone filesystem/volume + res = __salt__['cmd.run_all']( + __utils__['zfs.zfs_command']( + command='clone', + flags=flags, + filesystem_properties=filesystem_properties, + target=target, + ), + python_shell=False, + ) - if res['retcode'] != 0: - ret[name_b] = res['stderr'] if 'stderr' in res else res['stdout'] - else: - ret[name_b] = 'cloned from {0}'.format(name_a) - return ret + return __utils__['zfs.parse_command_result'](res, 'cloned') def promote(name): ''' - .. versionadded:: 2016.3.0 - Promotes a clone file system to no longer be dependent on its "origin" snapshot. @@ -767,32 +769,28 @@ def promote(name): name : string name of clone-filesystem + .. versionadded:: 2016.3.0 + CLI Example: .. code-block:: bash salt '*' zfs.promote myzpool/myclone ''' - ret = {} + ## Promote clone + res = __salt__['cmd.run_all']( + __utils__['zfs.zfs_command']( + command='promote', + target=name, + ), + python_shell=False, + ) - zfs = _check_zfs() - - res = __salt__['cmd.run_all']('{zfs} promote {name}'.format( - zfs=zfs, - name=_zfs_quote_escape_path(name) - )) - - if res['retcode'] != 0: - ret[name] = res['stderr'] if 'stderr' in res else res['stdout'] - else: - ret[name] = 'promoted' - return ret + return __utils__['zfs.parse_command_result'](res, 'promoted') def bookmark(snapshot, bookmark): ''' - .. versionadded:: 2016.3.0 - Creates a bookmark of the given snapshot .. note:: @@ -808,47 +806,40 @@ def bookmark(snapshot, bookmark): bookmark : string name of bookmark + .. versionadded:: 2016.3.0 + CLI Example: .. code-block:: bash salt '*' zfs.bookmark myzpool/mydataset@yesterday myzpool/mydataset#complete ''' - ret = {} - # abort if we do not have feature flags - if not _check_features(): - ret['error'] = 'bookmarks are not supported' - return ret + if not __utils__['zfs.has_feature_flags'](): + return OrderedDict([('error', 'bookmarks are not supported')]) - zfs = _check_zfs() + ## Configure command + # NOTE: initialize the defaults + target = [] - if '@' not in snapshot: - ret[snapshot] = 'MUST be a snapshot' + # NOTE: update target + target.append(snapshot) + target.append(bookmark) - if '#' not in bookmark: - ret[bookmark] = 'MUST be a bookmark' + ## Bookmark snapshot + res = __salt__['cmd.run_all']( + __utils__['zfs.zfs_command']( + command='bookmark', + target=target, + ), + python_shell=False, + ) - if len(ret) > 0: - return ret - - res = __salt__['cmd.run_all']('{zfs} bookmark {snapshot} {bookmark}'.format( - zfs=zfs, - snapshot=_zfs_quote_escape_path(snapshot), - bookmark=_zfs_quote_escape_path(bookmark) - )) - - if res['retcode'] != 0: - ret[snapshot] = res['stderr'] if 'stderr' in res else res['stdout'] - else: - ret[snapshot] = 'bookmarked as {0}'.format(bookmark) - return ret + return __utils__['zfs.parse_command_result'](res, 'bookmarked') def holds(snapshot, **kwargs): ''' - .. versionadded:: 2016.3.0 - Lists all existing user references for the given snapshot or snapshots. snapshot : string @@ -856,51 +847,50 @@ def holds(snapshot, **kwargs): recursive : boolean lists the holds that are set on the named descendent snapshots also. + .. versionadded:: 2016.3.0 + CLI Example: .. code-block:: bash salt '*' zfs.holds myzpool/mydataset@baseline ''' - ret = {} + ## Configure command + # NOTE: initialize the defaults + flags = ['-H'] + target = [] - if '@' not in snapshot: - ret[snapshot] = 'MUST be a snapshot' - return ret + # NOTE: set extra config from kwargs + if kwargs.get('recursive', False): + flags.append('-r') - zfs = _check_zfs() - recursive = kwargs.get('recursive', False) + # NOTE: update target + target.append(snapshot) - res = __salt__['cmd.run_all']('{zfs} holds -H {recursive}{snapshot}'.format( - zfs=zfs, - recursive='-r ' if recursive else '', - snapshot=_zfs_quote_escape_path(snapshot) - )) + ## Lookup holds + res = __salt__['cmd.run_all']( + __utils__['zfs.zfs_command']( + command='holds', + flags=flags, + target=target, + ), + python_shell=False, + ) + ret = __utils__['zfs.parse_command_result'](res) if res['retcode'] == 0: - if res['stdout'] != '': - properties = "name,tag,timestamp".split(",") - for hold in [l for l in res['stdout'].splitlines()]: - hold = hold.split("\t") - hold_data = {} + for hold in res['stdout'].splitlines(): + hold_data = OrderedDict(list(zip( + ['name', 'tag', 'timestamp'], + hold.split("\t"), + ))) + ret[hold_data['tag'].strip()] = hold_data['timestamp'] - for prop in properties: - hold_data[prop] = hold[properties.index(prop)] - - if hold_data['name'] not in ret: - ret[hold_data['name']] = {} - ret[hold_data['name']][hold_data['tag']] = hold_data['timestamp'] - else: - ret[snapshot] = 'no holds' - else: - ret[snapshot] = res['stderr'] if 'stderr' in res else res['stdout'] return ret def hold(tag, *snapshot, **kwargs): ''' - .. versionadded:: 2016.3.0 - Adds a single reference, named with the tag argument, to the specified snapshot or snapshots. @@ -919,67 +909,54 @@ def hold(tag, *snapshot, **kwargs): specifies that a hold with the given tag is applied recursively to the snapshots of all descendent file systems. - .. note:: + .. versionadded:: 2016.3.0 + .. versionchanged:: Flourine - A comma-separated list can be provided for the tag parameter to hold multiple tags. + .. warning:: + + As of Flourine the tag parameter no longer accepts a comma-seprated value. + It's is now possible to create a tag that contains a comma, this was impossible before. CLI Example: .. code-block:: bash salt '*' zfs.hold mytag myzpool/mydataset@mysnapshot [recursive=True] - salt '*' zfs.hold mytag,myothertag myzpool/mydataset@mysnapshot salt '*' zfs.hold mytag myzpool/mydataset@mysnapshot myzpool/mydataset@myothersnapshot ''' - ret = {} + ## warn about tag change + # NOTE: remove me 2 versions after Flourine + if ',' in tag: + log.warning('zfs.hold - on Flourine and later a comma in a tag will no longer create multiple tags!') - zfs = _check_zfs() - recursive = kwargs.get('recursive', False) + ## Configure command + # NOTE: initialize the defaults + flags = [] + target = [] - # verify snapshots - if not snapshot: - ret['error'] = 'one or more snapshots must be specified' + # NOTE: set extra config from kwargs + if kwargs.get('recursive', False): + flags.append('-r') - for snap in snapshot: - if '@' not in snap: - ret[snap] = 'not a snapshot' + # NOTE: update target + target.append(tag) + target.extend(snapshot) - if len(ret) > 0: - return ret + ## hold snapshot + res = __salt__['cmd.run_all']( + __utils__['zfs.zfs_command']( + command='hold', + flags=flags, + target=target, + ), + python_shell=False, + ) - for csnap in snapshot: - for ctag in tag.split(','): - res = __salt__['cmd.run_all']('{zfs} hold {recursive}{tag} {snapshot}'.format( - zfs=zfs, - recursive='-r ' if recursive else '', - tag=_zfs_quote_escape_path(ctag), - snapshot=_zfs_quote_escape_path(csnap) - )) - - if csnap not in ret: - ret[csnap] = {} - - if res['retcode'] != 0: - for err in res['stderr'].splitlines(): - if err.startswith('cannot hold snapshot'): - ret[csnap][ctag] = err[err.index(':')+2:] - elif err.startswith('cannot open'): - ret[csnap][ctag] = err[err.index(':')+2:] - else: - # fallback in case we hit a weird error - if err == 'usage:': - break - ret[csnap][ctag] = res['stderr'] - else: - ret[csnap][ctag] = 'held' - - return ret + return __utils__['zfs.parse_command_result'](res, 'held') def release(tag, *snapshot, **kwargs): ''' - .. versionadded:: 2016.3.0 - Removes a single reference, named with the tag argument, from the specified snapshot or snapshots. @@ -997,9 +974,13 @@ def release(tag, *snapshot, **kwargs): recursively releases a hold with the given tag on the snapshots of all descendent file systems. - .. note:: + .. versionadded:: 2016.3.0 + .. versionchanged:: Flourine - A comma-separated list can be provided for the tag parameter to release multiple tags. + .. warning:: + + As of Flourine the tag parameter no longer accepts a comma-seprated value. + It's is now possible to create a tag that contains a comma, this was impossible before. CLI Example: @@ -1008,55 +989,39 @@ def release(tag, *snapshot, **kwargs): salt '*' zfs.release mytag myzpool/mydataset@mysnapshot [recursive=True] salt '*' zfs.release mytag myzpool/mydataset@mysnapshot myzpool/mydataset@myothersnapshot ''' - ret = {} + ## warn about tag change + # NOTE: remove me 2 versions after Flourine + if ',' in tag: + log.warning('zfs.release - on Flourine and later a comma in a tag will no longer create multiple tags!') - zfs = _check_zfs() - recursive = kwargs.get('recursive', False) + ## Configure command + # NOTE: initialize the defaults + flags = [] + target = [] - # verify snapshots - if not snapshot: - ret['error'] = 'one or more snapshots must be specified' + # NOTE: set extra config from kwargs + if kwargs.get('recursive', False): + flags.append('-r') - for snap in snapshot: - if '@' not in snap: - ret[snap] = 'not a snapshot' + # NOTE: update target + target.append(tag) + target.extend(snapshot) - if len(ret) > 0: - return ret + ## release snapshot + res = __salt__['cmd.run_all']( + __utils__['zfs.zfs_command']( + command='release', + flags=flags, + target=target, + ), + python_shell=False, + ) - for csnap in snapshot: - for ctag in tag.split(','): - res = __salt__['cmd.run_all']('{zfs} release {recursive}{tag} {snapshot}'.format( - zfs=zfs, - recursive='-r ' if recursive else '', - tag=_zfs_quote_escape_path(ctag), - snapshot=_zfs_quote_escape_path(csnap) - )) - - if csnap not in ret: - ret[csnap] = {} - - if res['retcode'] != 0: - for err in res['stderr'].splitlines(): - if err.startswith('cannot release hold from snapshot'): - ret[csnap][ctag] = err[err.index(':')+2:] - elif err.startswith('cannot open'): - ret[csnap][ctag] = err[err.index(':')+2:] - else: - # fallback in case we hit a weird error - if err == 'usage:': - break - ret[csnap][ctag] = res['stderr'] - else: - ret[csnap][ctag] = 'released' - - return ret + return __utils__['zfs.parse_command_result'](res, 'released') def snapshot(*snapshot, **kwargs): ''' - .. versionadded:: 2016.3.0 - Creates snapshots with the given names. *snapshot : string @@ -1074,6 +1039,9 @@ def snapshot(*snapshot, **kwargs): properties="{'property1': 'value1', 'property2': 'value2'}" + .. versionadded:: 2016.3.0 + .. versionchanged:: Flourine + CLI Example: .. code-block:: bash @@ -1081,59 +1049,33 @@ def snapshot(*snapshot, **kwargs): salt '*' zfs.snapshot myzpool/mydataset@yesterday [recursive=True] salt '*' zfs.snapshot myzpool/mydataset@yesterday myzpool/myotherdataset@yesterday [recursive=True] ''' - ret = {} + ## Configure command + # NOTE: initialize the defaults + flags = [] - zfs = _check_zfs() - recursive = kwargs.get('recursive', False) - properties = kwargs.get('properties', None) + # NOTE: push filesystem properties + filesystem_properties = kwargs.get('properties', {}) - # verify snapshots - if not snapshot: - ret['error'] = 'one or more snapshots must be specified' + # NOTE: set extra config from kwargs + if kwargs.get('recursive', False): + flags.append('-r') - for snap in snapshot: - if '@' not in snap: - ret[snap] = 'not a snapshot' + ## Create snapshot + res = __salt__['cmd.run_all']( + __utils__['zfs.zfs_command']( + command='snapshot', + flags=flags, + filesystem_properties=filesystem_properties, + target=list(snapshot), + ), + python_shell=False, + ) - # if zpool properties specified, then - # create "-o property=value" pairs - if properties: - proplist = [] - for prop in properties: - proplist.append('-o {0}={1}'.format(prop, _conform_value((properties[prop])))) - properties = ' '.join(proplist) - - for csnap in snapshot: - if '@' not in csnap: - continue - - res = __salt__['cmd.run_all']('{zfs} snapshot {recursive}{properties}{snapshot}'.format( - zfs=zfs, - recursive='-r ' if recursive else '', - properties='{0} '.format(properties) if properties else '', - snapshot=_zfs_quote_escape_path(csnap) - )) - - if res['retcode'] != 0: - for err in res['stderr'].splitlines(): - if err.startswith('cannot create snapshot'): - ret[csnap] = err[err.index(':')+2:] - elif err.startswith('cannot open'): - ret[csnap] = err[err.index(':')+2:] - else: - # fallback in case we hit a weird error - if err == 'usage:': - break - ret[csnap] = res['stderr'] - else: - ret[csnap] = 'snapshotted' - return ret + return __utils__['zfs.parse_command_result'](res, 'snapshotted') def set(*dataset, **kwargs): ''' - .. versionadded:: 2016.3.0 - Sets the property or list of properties to the given value(s) for each dataset. *dataset : string @@ -1155,9 +1097,11 @@ def set(*dataset, **kwargs): can be set and acceptable values. Numeric values can be specified as exact values, or in a human-readable - form with a suffix of B, K, M, G, T, P, E, Z (for bytes, kilobytes, - megabytes, gigabytes, terabytes, petabytes, exabytes, or zettabytes, - respectively). + form with a suffix of B, K, M, G, T, P, E (for bytes, kilobytes, + megabytes, gigabytes, terabytes, petabytes, or exabytes respectively). + + .. versionadded:: 2016.3.0 + .. versionchanged:: Flourine CLI Example: @@ -1167,51 +1111,26 @@ def set(*dataset, **kwargs): salt '*' zfs.set myzpool/mydataset myzpool/myotherdataset compression=off salt '*' zfs.set myzpool/mydataset myzpool/myotherdataset compression=lz4 canmount=off ''' - ret = {} + ## Configure command + # NOTE: push filesystem properties + filesystem_properties = salt.utils.args.clean_kwargs(**kwargs) - zfs = _check_zfs() + ## Set property + res = __salt__['cmd.run_all']( + __utils__['zfs.zfs_command']( + command='set', + property_name=filesystem_properties.keys(), + property_value=filesystem_properties.values(), + target=list(dataset), + ), + python_shell=False, + ) - # verify snapshots - if not dataset: - ret['error'] = 'one or more snapshots must be specified' - - # clean kwargs - properties = salt.utils.args.clean_kwargs(**kwargs) - if len(properties) < 1: - ret['error'] = '{0}one or more properties must be specified'.format( - '{0},\n'.format(ret['error']) if 'error' in ret else '' - ) - - if len(ret) > 0: - return ret - - # for better error handling we don't do one big set command - for ds in dataset: - for prop in properties: - res = __salt__['cmd.run_all']('{zfs} set {prop}={value} {dataset}'.format( - zfs=zfs, - prop=prop, - value=_conform_value(properties[prop]), - dataset=_zfs_quote_escape_path(ds) - )) - if ds not in ret: - ret[ds] = {} - - if res['retcode'] != 0: - ret[ds][prop] = res['stderr'] if 'stderr' in res else res['stdout'] - if ':' in ret[ds][prop]: - ret[ds][prop] = ret[ds][prop][ret[ds][prop].index(':')+2:] - else: - ret[ds][prop] = 'set' - - return ret + return __utils__['zfs.parse_command_result'](res, 'set') def get(*dataset, **kwargs): ''' - .. versionadded:: 2016.3.0 - .. versionchanged:: Oxygen - Displays properties for the given datasets. *dataset : string @@ -1231,7 +1150,7 @@ def get(*dataset, **kwargs): comma-separated list of sources to display. Must be one of the following: local, default, inherited, temporary, and none. The default value is all sources. parsable : boolean - display numbers in parsable (exact) values + display numbers in parsable (exact) values (default = True) .. versionadded:: Oxygen .. note:: @@ -1239,6 +1158,9 @@ def get(*dataset, **kwargs): If no datasets are specified, then the command displays properties for all datasets on the system. + .. versionadded:: 2016.3.0 + .. versionchanged:: Flourine + CLI Example: .. code-block:: bash @@ -1248,75 +1170,72 @@ def get(*dataset, **kwargs): salt '*' zfs.get myzpool/mydataset properties="sharenfs,mountpoint" [recursive=True|False] salt '*' zfs.get myzpool/mydataset myzpool/myotherdataset properties=available fields=value depth=1 ''' - ret = OrderedDict() - zfs = _check_zfs() - properties = kwargs.get('properties', 'all') - recursive = kwargs.get('recursive', False) - depth = kwargs.get('depth', 0) - fields = kwargs.get('fields', 'value,source') - ltype = kwargs.get('type', None) - source = kwargs.get('source', None) - parsable = kwargs.get('parsable', False) - cmd = '{0} get -H'.format(zfs) + ## Configure command + # NOTE: initialize the defaults + flags = ['-H', '-p'] + opts = {} - # parsable output - if parsable: - cmd = '{0} -p'.format(cmd) - - # recursively get - if depth: - cmd = '{0} -d {1}'.format(cmd, depth) - elif recursive: - cmd = '{0} -r'.format(cmd) - - # fields - fields = fields.split(',') - if 'name' in fields: # ensure name is first + # NOTE: set extra config from kwargs + if kwargs.get('depth', False): + opts['-d'] = kwargs.get('depth') + elif kwargs.get('recursive', False): + flags.append('-r') + fields = kwargs.get('fields', 'value,source').split(',') + if 'name' in fields: # ensure name is first fields.remove('name') if 'property' in fields: # ensure property is second fields.remove('property') fields.insert(0, 'name') fields.insert(1, 'property') - cmd = '{0} -o {1}'.format(cmd, ','.join(fields)) + opts['-o'] = ",".join(fields) + if kwargs.get('type', False): + opts['-t'] = kwargs.get('type') + if kwargs.get('source', False): + opts['-s'] = kwargs.get('source') - # filter on type - if source: - cmd = '{0} -s {1}'.format(cmd, source) + # NOTE: set property_name + property_name = kwargs.get('properties', 'all') - # filter on type - if ltype: - cmd = '{0} -t {1}'.format(cmd, ltype) + ## Get properties + res = __salt__['cmd.run_all']( + __utils__['zfs.zfs_command']( + command='get', + flags=flags, + opts=opts, + property_name=property_name, + target=list(dataset), + ), + python_shell=False, + ) - # properties - cmd = '{0} {1}'.format(cmd, properties) - - # datasets - if dataset: - dataset = [_zfs_quote_escape_path(x) for x in dataset] - cmd = '{0} {1}'.format(cmd, ' '.join(dataset)) - - # parse output - res = __salt__['cmd.run_all'](cmd) + ret = __utils__['zfs.parse_command_result'](res) if res['retcode'] == 0: - for ds in [l for l in res['stdout'].splitlines()]: - ds = ds.split("\t") - ds_data = {} + for ds in res['stdout'].splitlines(): + ds_data = OrderedDict(list(zip( + fields, + ds.split("\t") + ))) - for field in fields: - ds_data[field] = _conform_value(ds[fields.index(field)]) + if 'value' in ds_data: + if kwargs.get('parsable', True): + ds_data['value'] = __utils__['zfs.from_auto']( + ds_data['property'], + ds_data['value'], + ) + else: + ds_data['value'] = __utils__['zfs.to_auto']( + ds_data['property'], + ds_data['value'], + convert_to_human=True, + ) - ds_name = ds_data['name'] - ds_prop = ds_data['property'] + if ds_data['name'] not in ret: + ret[ds_data['name']] = OrderedDict() + + ret[ds_data['name']][ds_data['property']] = ds_data del ds_data['name'] del ds_data['property'] - if ds_name not in ret: - ret[ds_name] = {} - - ret[ds_name][ds_prop] = ds_data - else: - ret['error'] = res['stderr'] if 'stderr' in res else res['stdout'] - return ret # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 From fce3920b07afe28cf2945687d2668c77865ce8f4 Mon Sep 17 00:00:00 2001 From: Super-User Date: Sun, 4 Feb 2018 22:34:56 +0100 Subject: [PATCH 210/223] Phase 1 - salt.modules.zpool tests --- tests/unit/modules/test_zpool.py | 700 ++++++++++++++++++++++++++----- 1 file changed, 603 insertions(+), 97 deletions(-) diff --git a/tests/unit/modules/test_zpool.py b/tests/unit/modules/test_zpool.py index 891baecc4f..cfb16d4f6b 100644 --- a/tests/unit/modules/test_zpool.py +++ b/tests/unit/modules/test_zpool.py @@ -1,9 +1,12 @@ # -*- coding: utf-8 -*- ''' - :codeauthor: Nitin Madhok ` +Tests for salt.modules.zpool - tests.unit.modules.zpool_test - ~~~~~~~~~~~~~~~~~~~~~~~~~~~ +:codeauthor: Nitin Madhok , Jorge Schrauwen +:maintainer: Jorge Schrauwen +:maturity: new +:depends: salt.utils.zfs +:platform: illumos,freebsd,linux ''' # Import Python libs @@ -13,18 +16,24 @@ from __future__ import absolute_import, print_function, unicode_literals from tests.support.mixins import LoaderModuleMockMixin from tests.support.unit import skipIf, TestCase from tests.support.mock import ( - Mock, MagicMock, patch, NO_MOCK, NO_MOCK_REASON, ) +# Import test data from salt.utils.zfs test +from tests.unit.utils.test_zfs import utils_patch + # Import Salt Execution module to test +import salt.utils.zfs import salt.modules.zpool as zpool # Import Salt Utils +import salt.loader from salt.utils.odict import OrderedDict +import salt.utils.decorators +import salt.utils.decorators.path # Skip this test case if we don't have access to mock! @@ -34,11 +43,16 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): This class contains a set of functions that test salt.modules.zpool module ''' def setup_loader_modules(self): - patcher = patch('salt.modules.zpool._check_zpool', - MagicMock(return_value='/sbin/zpool')) - patcher.start() - self.addCleanup(patcher.stop) - return {zpool: {}} + self.opts = opts = salt.config.DEFAULT_MINION_OPTS + utils = salt.loader.utils(opts, whitelist=['zfs']) + zpool_obj = { + zpool: { + '__opts__': opts, + '__utils__': utils, + } + } + + return zpool_obj def test_exists_success(self): ''' @@ -50,7 +64,8 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): ret['stderr'] = "" ret['retcode'] = 0 mock_cmd = MagicMock(return_value=ret) - with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): self.assertTrue(zpool.exists('myzpool')) def test_exists_failure(self): @@ -62,7 +77,9 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): ret['stderr'] = "cannot open 'myzpool': no such pool" ret['retcode'] = 1 mock_cmd = MagicMock(return_value=ret) - with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}): + + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): self.assertFalse(zpool.exists('myzpool')) def test_healthy(self): @@ -74,7 +91,9 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): ret['stderr'] = "" ret['retcode'] = 0 mock_cmd = MagicMock(return_value=ret) - with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}): + + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): self.assertTrue(zpool.healthy()) def test_status(self): @@ -88,18 +107,19 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): " scan: scrub repaired 0 in 0h6m with 0 errors on Mon Dec 21 02:06:17 2015", "config:", "", - " NAME STATE READ WRITE CKSUM", - " mypool ONLINE 0 0 0", - " mirror-0 ONLINE 0 0 0", - " c2t0d0 ONLINE 0 0 0", - " c2t1d0 ONLINE 0 0 0", + "\tNAME STATE READ WRITE CKSUM", + "\tmypool ONLINE 0 0 0", + "\t mirror-0 ONLINE 0 0 0", + "\t c2t0d0 ONLINE 0 0 0", + "\t c2t1d0 ONLINE 0 0 0", "", "errors: No known data errors", ]) ret['stderr'] = "" ret['retcode'] = 0 mock_cmd = MagicMock(return_value=ret) - with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): ret = zpool.status() self.assertEqual('ONLINE', ret['mypool']['state']) @@ -121,25 +141,59 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): ret['stderr'] = "" ret['retcode'] = 0 mock_cmd = MagicMock(return_value=ret) - with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}): - ret = zpool.iostat('mypool') - self.assertEqual('46.7G', ret['mypool']['mypool']['capacity-alloc']) + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.iostat('mypool', parsable=False) + self.assertEqual(ret['mypool']['capacity-alloc'], '46.7G') + + def test_iostat_parsable(self): + ''' + Tests successful return of iostat function + + .. note: + The command output is the same as the non parsable! + There is no -p flag for zpool iostat, but our type + conversions can handle this! + ''' + ret = {} + ret['stdout'] = "\n".join([ + " capacity operations bandwidth", + "pool alloc free read write read write", + "---------- ----- ----- ----- ----- ----- -----", + "mypool 46.7G 64.3G 4 19 113K 331K", + " mirror 46.7G 64.3G 4 19 113K 331K", + " c2t0d0 - - 1 10 114K 334K", + " c2t1d0 - - 1 10 114K 334K", + "---------- ----- ----- ----- ----- ----- -----", + ]) + ret['stderr'] = "" + ret['retcode'] = 0 + mock_cmd = MagicMock(return_value=ret) + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.iostat('mypool', parsable=True) + self.assertEqual(ret['mypool']['capacity-alloc'], 50143743180) def test_list(self): ''' Tests successful return of list function ''' ret = {} - ret['stdout'] = "mypool\t1.81T\t714G\t1.11T\t38%\tONLINE" + ret['stdout'] = "mypool\t1992864825344\t767076794368\t1225788030976\t38\t0%\tONLINE" ret['stderr'] = "" ret['retcode'] = 0 mock_cmd = MagicMock(return_value=ret) with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ - patch('salt.modules.zpool._check_features', - MagicMock(return_value=False)): - ret = zpool.list_() - res = OrderedDict([('mypool', {'alloc': '714G', 'cap': '38%', 'free': '1.11T', - 'health': 'ONLINE', 'size': '1.81T'})]) + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.list_(parsable=False) + res = OrderedDict([('mypool', OrderedDict([ + ('size', '1.81T'), + ('alloc', '714G'), + ('free', '1.11T'), + ('cap', 38), + ('frag', '0%'), + ('health', 'ONLINE'), + ]))]) self.assertEqual(res, ret) def test_list_parsable(self): @@ -147,17 +201,21 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): Tests successful return of list function with parsable output ''' ret = {} - ret['stdout'] = "mypool\t1992864825344\t767076794368\t1225788030976\t38\tONLINE" + ret['stdout'] = "mypool\t1992864825344\t767076794368\t1225788030976\t38\t0%\tONLINE" ret['stderr'] = "" ret['retcode'] = 0 mock_cmd = MagicMock(return_value=ret) with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ - patch('salt.modules.zpool._check_features', - MagicMock(return_value=False)): - ret = zpool.list_() - res = OrderedDict([('mypool', {'alloc': 767076794368, 'cap': 38, - 'free': 1225788030976, 'health': 'ONLINE', - 'size': 1992864825344})]) + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.list_(parsable=True) + res = OrderedDict([('mypool', OrderedDict([ + ('size', 1992864825344), + ('alloc', 767076794368), + ('free', 1225788030976), + ('cap', 38), + ('frag', '0%'), + ('health', 'ONLINE'), + ]))]) self.assertEqual(res, ret) def test_get(self): @@ -165,13 +223,14 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): Tests successful return of get function ''' ret = {} - ret['stdout'] = "size\t1.81T\t-\n" + ret['stdout'] = "size\t1992864825344\t-\n" ret['stderr'] = "" ret['retcode'] = 0 mock_cmd = MagicMock(return_value=ret) - with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}): - ret = zpool.get('mypool', 'size') - res = OrderedDict([('mypool', OrderedDict([('size', '1.81T')]))]) + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.get('mypool', 'size', parsable=False) + res = OrderedDict(OrderedDict([('size', '1.81T')])) self.assertEqual(res, ret) def test_get_parsable(self): @@ -183,9 +242,10 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): ret['stderr'] = "" ret['retcode'] = 0 mock_cmd = MagicMock(return_value=ret) - with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}): - ret = zpool.get('mypool', 'size') - res = OrderedDict([('mypool', OrderedDict([('size', 1992864825344)]))]) + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.get('mypool', 'size', parsable=True) + res = OrderedDict(OrderedDict([('size', 1992864825344)])) self.assertEqual(res, ret) def test_get_whitespace(self): @@ -197,9 +257,10 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): ret['stderr'] = "" ret['retcode'] = 0 mock_cmd = MagicMock(return_value=ret) - with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): ret = zpool.get('mypool', 'comment') - res = OrderedDict([('mypool', OrderedDict([('comment', "'my testing pool'")]))]) + res = OrderedDict(OrderedDict([('comment', "my testing pool")])) self.assertEqual(res, ret) def test_scrub_start(self): @@ -213,11 +274,12 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) mock_exists = MagicMock(return_value=True) - with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}): - with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}): - ret = zpool.scrub('mypool') - res = OrderedDict([('mypool', OrderedDict([('scrubbing', True)]))]) - self.assertEqual(res, ret) + with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}), \ + patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.scrub('mypool') + res = OrderedDict(OrderedDict([('scrubbing', True)])) + self.assertEqual(res, ret) def test_scrub_pause(self): ''' @@ -230,11 +292,12 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) mock_exists = MagicMock(return_value=True) - with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}): - with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}): - ret = zpool.scrub('mypool', pause=True) - res = OrderedDict([('mypool', OrderedDict([('scrubbing', False)]))]) - self.assertEqual(res, ret) + with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}), \ + patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.scrub('mypool', pause=True) + res = OrderedDict(OrderedDict([('scrubbing', False)])) + self.assertEqual(res, ret) def test_scrub_stop(self): ''' @@ -247,11 +310,12 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) mock_exists = MagicMock(return_value=True) - with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}): - with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}): - ret = zpool.scrub('mypool', stop=True) - res = OrderedDict([('mypool', OrderedDict([('scrubbing', False)]))]) - self.assertEqual(res, ret) + with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}), \ + patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.scrub('mypool', stop=True) + res = OrderedDict(OrderedDict([('scrubbing', False)])) + self.assertEqual(res, ret) def test_split_success(self): ''' @@ -262,14 +326,12 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): ret['stderr'] = "" ret['retcode'] = 0 mock_cmd = MagicMock(return_value=ret) - mock_exists = Mock() - mock_exists.side_effect = [False, True] - with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}): - with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}): - ret = zpool.split('datapool', 'backuppool') - res = OrderedDict([('backuppool', 'split off from datapool')]) - self.assertEqual(res, ret) + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.split('datapool', 'backuppool') + res = OrderedDict([('split', True)]) + self.assertEqual(res, ret) def test_split_exist_new(self): ''' @@ -277,17 +339,15 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): ''' ret = {} ret['stdout'] = "" - ret['stderr'] = "" - ret['retcode'] = 0 + ret['stderr'] = "Unable to split datapool: pool already exists" + ret['retcode'] = 1 mock_cmd = MagicMock(return_value=ret) - mock_exists = Mock() - mock_exists.side_effect = [True, True] - with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}): - with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}): - ret = zpool.split('datapool', 'backuppool') - res = OrderedDict([('backuppool', 'storage pool already exists')]) - self.assertEqual(res, ret) + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.split('datapool', 'backuppool') + res = OrderedDict([('split', False), ('error', 'Unable to split datapool: pool already exists')]) + self.assertEqual(res, ret) def test_split_missing_pool(self): ''' @@ -295,17 +355,15 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): ''' ret = {} ret['stdout'] = "" - ret['stderr'] = "" - ret['retcode'] = 0 + ret['stderr'] = "cannot open 'datapool': no such pool" + ret['retcode'] = 1 mock_cmd = MagicMock(return_value=ret) - mock_exists = Mock() - mock_exists.side_effect = [False, False] - with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}): - with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}): - ret = zpool.split('datapool', 'backuppool') - res = OrderedDict([('datapool', 'storage pool does not exists')]) - self.assertEqual(res, ret) + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.split('datapool', 'backuppool') + res = OrderedDict([('split', False), ('error', "cannot open 'datapool': no such pool")]) + self.assertEqual(res, ret) def test_split_not_mirror(self): ''' @@ -316,15 +374,12 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): ret['stderr'] = "Unable to split datapool: Source pool must be composed only of mirrors" ret['retcode'] = 1 mock_cmd = MagicMock(return_value=ret) - mock_exists = Mock() - mock_exists.side_effect = [False, True] - with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}): - with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}): - ret = zpool.split('datapool', 'backuppool') - res = OrderedDict([('backuppool', 'Unable to split datapool: ' - 'Source pool must be composed only of mirrors')]) - self.assertEqual(res, ret) + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.split('datapool', 'backuppool') + res = OrderedDict([('split', False), ('error', 'Unable to split datapool: Source pool must be composed only of mirrors')]) + self.assertEqual(res, ret) def test_labelclear_success(self): ''' @@ -335,9 +390,30 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): ret['stderr'] = "" ret['retcode'] = 0 mock_cmd = MagicMock(return_value=ret) - with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}): + + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): ret = zpool.labelclear('/dev/rdsk/c0t0d0', force=False) - res = OrderedDict([('/dev/rdsk/c0t0d0', 'cleared')]) + res = OrderedDict([('labelcleared', True)]) + self.assertEqual(res, ret) + + def test_labelclear_nodevice(self): + ''' + Tests labelclear on non existing device + ''' + ret = {} + ret['stdout'] = "" + ret['stderr'] = "failed to open /dev/rdsk/c0t0d0: No such file or directory" + ret['retcode'] = 1 + mock_cmd = MagicMock(return_value=ret) + + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.labelclear('/dev/rdsk/c0t0d0', force=False) + res = OrderedDict([ + ('labelcleared', False), + ('error', 'failed to open /dev/rdsk/c0t0d0: No such file or directory'), + ]) self.assertEqual(res, ret) def test_labelclear_cleared(self): @@ -349,9 +425,14 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): ret['stderr'] = "failed to read label from /dev/rdsk/c0t0d0" ret['retcode'] = 1 mock_cmd = MagicMock(return_value=ret) - with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}): + + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): ret = zpool.labelclear('/dev/rdsk/c0t0d0', force=False) - res = OrderedDict([('/dev/rdsk/c0t0d0', 'failed to read label from /dev/rdsk/c0t0d0')]) + res = OrderedDict([ + ('labelcleared', False), + ('error', 'failed to read label from /dev/rdsk/c0t0d0'), + ]) self.assertEqual(res, ret) def test_labelclear_exported(self): @@ -366,7 +447,432 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): ]) ret['retcode'] = 1 mock_cmd = MagicMock(return_value=ret) - with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}): + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): ret = zpool.labelclear('/dev/rdsk/c0t0d0', force=False) - res = OrderedDict([('/dev/rdsk/c0t0d0', '/dev/rdsk/c0t0d0 is a member of exported pool "mypool"')]) + res = OrderedDict([ + ('labelcleared', False), + ('error', 'use \'force=True\' to override the following error:\n/dev/rdsk/c0t0d0 is a member of exported pool "mypool"'), + ]) + self.assertEqual(res, ret) + + @skipIf(not salt.utils.path.which('mkfile'), 'Cannot find mkfile executable') + def test_create_file_vdev_success(self): + ''' + Tests create_file_vdev when out of space + ''' + ret = {} + ret['stdout'] = "" + ret['stderr'] = "" + ret['retcode'] = 0 + mock_cmd = MagicMock(return_value=ret) + + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.create_file_vdev('64M', '/vdisks/disk0') + res = OrderedDict([ + ('/vdisks/disk0', 'created'), + ]) + self.assertEqual(res, ret) + + @skipIf(not salt.utils.path.which('mkfile'), 'Cannot find mkfile executable') + def test_create_file_vdev_nospace(self): + ''' + Tests create_file_vdev when out of space + ''' + ret = {} + ret['stdout'] = "" + ret['stderr'] = "/vdisks/disk0: initialized 10424320 of 67108864 bytes: No space left on device" + ret['retcode'] = 1 + mock_cmd = MagicMock(return_value=ret) + + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.create_file_vdev('64M', '/vdisks/disk0') + res = OrderedDict([ + ('/vdisks/disk0', 'failed'), + ('error', OrderedDict([ + ('/vdisks/disk0', ' initialized 10424320 of 67108864 bytes: No space left on device'), + ])), + ]) + self.assertEqual(res, ret) + + def test_export_success(self): + ''' + Tests export + ''' + ret = {} + ret['stdout'] = "" + ret['stderr'] = "" + ret['retcode'] = 0 + mock_cmd = MagicMock(return_value=ret) + + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.export('mypool') + res = OrderedDict([('exported', True)]) + self.assertEqual(res, ret) + + def test_export_nopool(self): + ''' + Tests export when the pool does not exists + ''' + ret = {} + ret['stdout'] = "" + ret['stderr'] = "cannot open 'mypool': no such pool" + ret['retcode'] = 1 + mock_cmd = MagicMock(return_value=ret) + + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.export('mypool') + res = OrderedDict([('exported', False), ('error', "cannot open 'mypool': no such pool")]) + self.assertEqual(res, ret) + + def test_import_success(self): + ''' + Tests import + ''' + ret = {} + ret['stdout'] = "" + ret['stderr'] = "" + ret['retcode'] = 0 + mock_cmd = MagicMock(return_value=ret) + + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.import_('mypool') + res = OrderedDict([('imported', True)]) + self.assertEqual(res, ret) + + def test_import_duplicate(self): + ''' + Tests import with already imported pool + ''' + ret = {} + ret['stdout'] = "" + ret['stderr'] = "\n".join([ + "cannot import 'mypool': a pool with that name already exists", + "use the form 'zpool import ' to give it a new name", + ]) + ret['retcode'] = 1 + mock_cmd = MagicMock(return_value=ret) + + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.import_('mypool') + res = OrderedDict([ + ('imported', False), + ('error', "cannot import 'mypool': a pool with that name already exists\nuse the form 'zpool import ' to give it a new name"), + ]) + self.assertEqual(res, ret) + + def test_import_nopool(self): + ''' + Tests import + ''' + ret = {} + ret['stdout'] = "" + ret['stderr'] = "cannot import 'mypool': no such pool available" + ret['retcode'] = 1 + mock_cmd = MagicMock(return_value=ret) + + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.import_('mypool') + res = OrderedDict([ + ('imported', False), + ('error', "cannot import 'mypool': no such pool available"), + ]) + self.assertEqual(res, ret) + + def test_online_success(self): + ''' + Tests online + ''' + ret = {} + ret['stdout'] = "" + ret['stderr'] = "" + ret['retcode'] = 0 + mock_cmd = MagicMock(return_value=ret) + + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.online('mypool', '/dev/rdsk/c0t0d0') + res = OrderedDict([('onlined', True)]) + self.assertEqual(res, ret) + + def test_online_nodevice(self): + ''' + Tests online + ''' + ret = {} + ret['stdout'] = "" + ret['stderr'] = "cannot online /dev/rdsk/c0t0d1: no such device in pool" + ret['retcode'] = 1 + mock_cmd = MagicMock(return_value=ret) + + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.online('mypool', '/dev/rdsk/c0t0d1') + res = OrderedDict([ + ('onlined', False), + ('error', 'cannot online /dev/rdsk/c0t0d1: no such device in pool'), + ]) + self.assertEqual(res, ret) + + def test_offline_success(self): + ''' + Tests offline + ''' + ret = {} + ret['stdout'] = "" + ret['stderr'] = "" + ret['retcode'] = 0 + mock_cmd = MagicMock(return_value=ret) + + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.offline('mypool', '/dev/rdsk/c0t0d0') + res = OrderedDict([('offlined', True)]) + self.assertEqual(res, ret) + + def test_offline_nodevice(self): + ''' + Tests offline + ''' + ret = {} + ret['stdout'] = "" + ret['stderr'] = "cannot offline /dev/rdsk/c0t0d1: no such device in pool" + ret['retcode'] = 1 + mock_cmd = MagicMock(return_value=ret) + + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.offline('mypool', '/dev/rdsk/c0t0d1') + res = OrderedDict([ + ('offlined', False), + ('error', 'cannot offline /dev/rdsk/c0t0d1: no such device in pool'), + ]) + self.assertEqual(res, ret) + + def test_offline_noreplica(self): + ''' + Tests offline + ''' + ret = {} + ret['stdout'] = "" + ret['stderr'] = "cannot offline /dev/rdsk/c0t0d1: no valid replicas" + ret['retcode'] = 1 + mock_cmd = MagicMock(return_value=ret) + + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.offline('mypool', '/dev/rdsk/c0t0d1') + res = OrderedDict([ + ('offlined', False), + ('error', 'cannot offline /dev/rdsk/c0t0d1: no valid replicas'), + ]) + self.assertEqual(res, ret) + + def test_reguid_success(self): + ''' + Tests reguid + ''' + ret = {} + ret['stdout'] = "" + ret['stderr'] = "" + ret['retcode'] = 0 + mock_cmd = MagicMock(return_value=ret) + + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.reguid('mypool') + res = OrderedDict([('reguided', True)]) + self.assertEqual(res, ret) + + def test_reguid_nopool(self): + ''' + Tests reguid with missing pool + ''' + ret = {} + ret['stdout'] = "" + ret['stderr'] = "cannot open 'mypool': no such pool" + ret['retcode'] = 1 + mock_cmd = MagicMock(return_value=ret) + + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.reguid('mypool') + res = OrderedDict([ + ('reguided', False), + ('error', "cannot open 'mypool': no such pool"), + ]) + self.assertEqual(res, ret) + + def test_reopen_success(self): + ''' + Tests reopen + ''' + ret = {} + ret['stdout'] = "" + ret['stderr'] = "" + ret['retcode'] = 0 + mock_cmd = MagicMock(return_value=ret) + + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.reopen('mypool') + res = OrderedDict([('reopened', True)]) + self.assertEqual(res, ret) + + def test_reopen_nopool(self): + ''' + Tests reopen with missing pool + ''' + ret = {} + ret['stdout'] = "" + ret['stderr'] = "cannot open 'mypool': no such pool" + ret['retcode'] = 1 + mock_cmd = MagicMock(return_value=ret) + + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.reopen('mypool') + res = OrderedDict([ + ('reopened', False), + ('error', "cannot open 'mypool': no such pool"), + ]) + self.assertEqual(res, ret) + + def test_upgrade_success(self): + ''' + Tests upgrade + ''' + ret = {} + ret['stdout'] = "" + ret['stderr'] = "" + ret['retcode'] = 0 + mock_cmd = MagicMock(return_value=ret) + + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.upgrade('mypool') + res = OrderedDict([('upgraded', True)]) + self.assertEqual(res, ret) + + def test_upgrade_nopool(self): + ''' + Tests upgrade with missing pool + ''' + ret = {} + ret['stdout'] = "" + ret['stderr'] = "cannot open 'mypool': no such pool" + ret['retcode'] = 1 + mock_cmd = MagicMock(return_value=ret) + + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.upgrade('mypool') + res = OrderedDict([ + ('upgraded', False), + ('error', "cannot open 'mypool': no such pool"), + ]) + self.assertEqual(res, ret) + + def test_history_success(self): + ''' + Tests history + ''' + ret = {} + ret['stdout'] = "\n".join([ + "History for 'mypool':", + "2018-01-18.16:56:12 zpool create -f mypool /dev/rdsk/c0t0d0", + "2018-01-19.16:01:55 zpool attach -f mypool /dev/rdsk/c0t0d0 /dev/rdsk/c0t0d1", + ]) + ret['stderr'] = "" + ret['retcode'] = 0 + mock_cmd = MagicMock(return_value=ret) + + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.history('mypool') + res = OrderedDict([ + ('mypool', OrderedDict([ + ('2018-01-18.16:56:12', 'zpool create -f mypool /dev/rdsk/c0t0d0'), + ('2018-01-19.16:01:55', 'zpool attach -f mypool /dev/rdsk/c0t0d0 /dev/rdsk/c0t0d1'), + ])), + ]) + self.assertEqual(res, ret) + + def test_history_nopool(self): + ''' + Tests history with missing pool + ''' + ret = {} + ret['stdout'] = "" + ret['stderr'] = "cannot open 'mypool': no such pool" + ret['retcode'] = 1 + mock_cmd = MagicMock(return_value=ret) + + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.history('mypool') + res = OrderedDict([ + ('error', "cannot open 'mypool': no such pool"), + ]) + self.assertEqual(res, ret) + + def test_clear_success(self): + ''' + Tests clear + ''' + ret = {} + ret['stdout'] = "" + ret['stderr'] = "" + ret['retcode'] = 0 + mock_cmd = MagicMock(return_value=ret) + + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.clear('mypool') + res = OrderedDict([('cleared', True)]) + self.assertEqual(res, ret) + + def test_clear_nopool(self): + ''' + Tests clear with missing pool + ''' + ret = {} + ret['stdout'] = "" + ret['stderr'] = "cannot open 'mypool': no such pool" + ret['retcode'] = 1 + mock_cmd = MagicMock(return_value=ret) + + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.clear('mypool') + res = OrderedDict([ + ('cleared', False), + ('error', "cannot open 'mypool': no such pool"), + ]) + + def test_clear_nodevice(self): + ''' + Tests clear with non existign device + ''' + ret = {} + ret['stdout'] = "" + ret['stderr'] = "cannot clear errors for /dev/rdsk/c0t0d0: no such device in pool" + ret['retcode'] = 1 + mock_cmd = MagicMock(return_value=ret) + + with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(zpool.__utils__, utils_patch): + ret = zpool.clear('mypool', '/dev/rdsk/c0t0d0') + res = OrderedDict([ + ('cleared', False), + ('error', "cannot clear errors for /dev/rdsk/c0t0d0: no such device in pool"), + ]) + self.assertEqual(res, ret) self.assertEqual(res, ret) From 9d18da9e2ea300ab37bffc54f2b0daeec34d94c3 Mon Sep 17 00:00:00 2001 From: Super-User Date: Sun, 4 Feb 2018 22:35:03 +0100 Subject: [PATCH 211/223] Phase 1 - salt.modules.zpool --- salt/modules/zpool.py | 1601 +++++++++++++++++++++-------------------- 1 file changed, 825 insertions(+), 776 deletions(-) diff --git a/salt/modules/zpool.py b/salt/modules/zpool.py index b9ec6f7297..f7d1467588 100644 --- a/salt/modules/zpool.py +++ b/salt/modules/zpool.py @@ -2,21 +2,24 @@ ''' Module for running ZFS zpool command -:codeauthor: Nitin Madhok +:codeauthor: Nitin Madhok , Jorge Schrauwen +:maintainer: Jorge Schrauwen +:maturity: new +:depends: salt.utils.zfs +:platform: illumos,freebsd,linux ''' from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import os -import stat import logging # Import Salt libs import salt.utils.decorators import salt.utils.decorators.path import salt.utils.path +from salt.ext.six.moves import zip from salt.utils.odict import OrderedDict -from salt.modules.zfs import _conform_value log = logging.getLogger(__name__) @@ -37,179 +40,211 @@ def __virtual__(): return (False, "The zpool module cannot be loaded: zfs not supported") -@salt.utils.decorators.memoize -def _check_zpool(): +def _clean_vdev_config(config): ''' - Looks to see if zpool is present on the system + Return a simple vdev tree from zpool.status' config section ''' - return salt.utils.path.which('zpool') + cln_config = OrderedDict() + for label, sub_config in config.items(): + if label not in ['state', 'read', 'write', 'cksum']: + sub_config = _clean_vdev_config(sub_config) + if sub_config and isinstance(cln_config, list): + cln_config.append(OrderedDict([(label, sub_config)])) + elif sub_config and isinstance(cln_config, OrderedDict): + cln_config[label] = sub_config + elif isinstance(cln_config, list): + cln_config.append(label) + elif isinstance(cln_config, OrderedDict): + new_config = [] + for old_label, old_config in cln_config.items(): + new_config.append(OrderedDict([(old_label, old_config)])) + new_config.append(label) + cln_config = new_config + else: + cln_config = [label] -@salt.utils.decorators.memoize -def _check_features(): - ''' - Looks to see if zpool-features is available - ''' - # get man location - man = salt.utils.path.which('man') - if not man: - return False - - cmd = '{man} zpool-features'.format( - man=man - ) - res = __salt__['cmd.run_all'](cmd, python_shell=False) - return res['retcode'] == 0 - - -@salt.utils.decorators.memoize -def _check_mkfile(): - ''' - Looks to see if mkfile is present on the system - ''' - return salt.utils.path.which('mkfile') + return cln_config def healthy(): ''' - .. versionadded:: 2016.3.0 - Check if all zpools are healthy + .. versionadded:: 2016.3.0 + CLI Example: .. code-block:: bash salt '*' zpool.healthy - ''' - zpool_cmd = _check_zpool() - cmd = '{zpool_cmd} status -x'.format( - zpool_cmd=zpool_cmd + ''' + ## collect status output + # NOTE: we pass the -x flag, by doing this + # we will get 'all pools are healthy' on stdout + # if all pools are healthy, otherwise we will get + # the same output that we expect from zpool status + res = __salt__['cmd.run_all']( + __utils__['zfs.zpool_command']('status', flags=['-x']), + python_shell=False, ) - res = __salt__['cmd.run_all'](cmd, python_shell=False) return res['stdout'] == 'all pools are healthy' def status(zpool=None): ''' - .. versionchanged:: 2016.3.0 - Return the status of the named zpool zpool : string optional name of storage pool + .. versionadded:: 2016.3.0 + .. versionchanged:: Fluorine + CLI Example: .. code-block:: bash salt '*' zpool.status myzpool + ''' ret = OrderedDict() - # get zpool list data - zpool_cmd = _check_zpool() - cmd = '{zpool_cmd} status{zpool}'.format( - zpool_cmd=zpool_cmd, - zpool=' {0}'.format(zpool) if zpool else '' + ## collect status output + res = __salt__['cmd.run_all']( + __utils__['zfs.zpool_command']('status', target=zpool), + python_shell=False, ) - res = __salt__['cmd.run_all'](cmd, python_shell=False) - if res['retcode'] != 0: - ret['error'] = res['stderr'] if 'stderr' in res else res['stdout'] - return ret - # parse zpool status data - zp_data = {} + if res['retcode'] != 0: + return __utils__['zfs.parse_command_result'](res) + + # NOTE: command output for reference + # ===================================================================== + # pool: data + # state: ONLINE + # scan: scrub repaired 0 in 2h27m with 0 errors on Mon Jan 8 03:27:25 2018 + # config: + # + # NAME STATE READ WRITE CKSUM + # data ONLINE 0 0 0 + # mirror-0 ONLINE 0 0 0 + # c0tXXXXCXXXXXXXXXXXd0 ONLINE 0 0 0 + # c0tXXXXCXXXXXXXXXXXd0 ONLINE 0 0 0 + # c0tXXXXCXXXXXXXXXXXd0 ONLINE 0 0 0 + # + # errors: No known data errors + # ===================================================================== + + ## parse status output + # NOTE: output is 'key: value' except for the 'config' key. + # mulitple pools will repeat the output, so if switch pools if + # we see 'pool:' current_pool = None current_prop = None for zpd in res['stdout'].splitlines(): if zpd.strip() == '': continue if ':' in zpd: + # NOTE: line is 'key: value' format, we just update a dict prop = zpd.split(':')[0].strip() value = ":".join(zpd.split(':')[1:]).strip() if prop == 'pool' and current_pool != value: current_pool = value - zp_data[current_pool] = {} + ret[current_pool] = OrderedDict() if prop != 'pool': - zp_data[current_pool][prop] = value + ret[current_pool][prop] = value current_prop = prop else: - zp_data[current_pool][current_prop] = "{0}\n{1}".format( - zp_data[current_pool][current_prop], + # NOTE: we append the line output to the last property + # this should only happens once we hit the config + # section + ret[current_pool][current_prop] = "{0}\n{1}".format( + ret[current_pool][current_prop], zpd ) - # parse zpool config data - for pool in zp_data: - if 'config' not in zp_data[pool]: + ## parse config property for each pool + # NOTE: the config property has some structured data + # sadly this data is in a different format than + # the rest and it needs further processing + for pool in ret: + if 'config' not in ret[pool]: continue header = None root_vdev = None vdev = None dev = None - config = zp_data[pool]['config'] + rdev = None + config = ret[pool]['config'] config_data = OrderedDict() for line in config.splitlines(): + # NOTE: the first line is the header + # we grab all the none whitespace values if not header: header = line.strip().lower() header = [x for x in header.split(' ') if x not in ['']] continue - if line[0:1] == "\t": + # NOTE: data is indented by 1 tab, then multiples of 2 spaces + # to differential root vdev, vdev, and dev + # + # we just strip the intial tab (can't use .strip() here) + if line[0] == "\t": line = line[1:] - stat_data = OrderedDict() - stats = [x for x in line.strip().split(' ') if x not in ['']] - for prop in header: - if prop == 'name': - continue - if header.index(prop) < len(stats): - stat_data[prop] = stats[header.index(prop)] + # NOTE: transform data into dict + stat_data = OrderedDict(list(zip( + header, + [x for x in line.strip().split(' ') if x not in ['']], + ))) - dev = line.strip().split()[0] + # NOTE: decode the zfs values properly + stat_data = __utils__['zfs.from_auto_dict'](stat_data) - if line[0:4] != ' ': - if line[0:2] == ' ': - vdev = line.strip().split()[0] - dev = None - else: - root_vdev = line.strip().split()[0] - vdev = None - dev = None + # NOTE: store stat_data in the proper location + if line.startswith(' ' * 6): + rdev = stat_data['name'] + config_data[root_vdev][vdev][dev][rdev] = stat_data + elif line.startswith(' ' * 4): + rdev = None + dev = stat_data['name'] + config_data[root_vdev][vdev][dev] = stat_data + elif line.startswith(' ' * 2): + rdev = dev = None + vdev = stat_data['name'] + config_data[root_vdev][vdev] = stat_data + else: + rdev = dev = vdev = None + root_vdev = stat_data['name'] + config_data[root_vdev] = stat_data - if root_vdev: - if root_vdev not in config_data: - config_data[root_vdev] = {} - if len(stat_data) > 0: - config_data[root_vdev] = stat_data - if vdev: - if vdev not in config_data[root_vdev]: - config_data[root_vdev][vdev] = {} - if len(stat_data) > 0: - config_data[root_vdev][vdev] = stat_data - if dev and dev not in config_data[root_vdev][vdev]: - config_data[root_vdev][vdev][dev] = {} - if len(stat_data) > 0: - config_data[root_vdev][vdev][dev] = stat_data + # NOTE: name already used as identifier, drop duplicate data + del stat_data['name'] - zp_data[pool]['config'] = config_data + ret[pool]['config'] = config_data - return zp_data + return ret -def iostat(zpool=None, sample_time=0): +def iostat(zpool=None, sample_time=5, parsable=True): ''' - .. versionchanged:: 2016.3.0 - Display I/O statistics for the given pools zpool : string optional name of storage pool sample_time : int seconds to capture data before output + default a sample of 5 seconds is used + parsable : boolean + display data in pythonc values (True, False, Bytes,...) + + .. versionadded:: 2016.3.0 + .. versionchanged:: Fluorine + + Added ```parsable``` parameter that defaults to True CLI Example: @@ -219,109 +254,106 @@ def iostat(zpool=None, sample_time=0): ''' ret = OrderedDict() - # get zpool list data - zpool_cmd = _check_zpool() - cmd = '{zpool_cmd} iostat -v{zpool}{sample_time}'.format( - zpool_cmd=zpool_cmd, - zpool=' {0}'.format(zpool) if zpool else '', - sample_time=' {0} 2'.format(sample_time) if sample_time else '' + ## get iostat output + res = __salt__['cmd.run_all']( + __utils__['zfs.zpool_command']( + command='iostat', + flags=['-v'], + target=[zpool, sample_time, 2] + ), + python_shell=False, ) - res = __salt__['cmd.run_all'](cmd, python_shell=False) - if res['retcode'] != 0: - ret['error'] = res['stderr'] if 'stderr' in res else res['stdout'] - return ret - # note: hardcoded header fields, the double header is hard to parse - # capacity operations bandwidth - #pool alloc free read write read write + if res['retcode'] != 0: + return __utils__['zfs.parse_command_result'](res) + + # NOTE: command output for reference + # ===================================================================== + # capacity operations bandwidth + # pool alloc free read write read write + # ------------------------- ----- ----- ----- ----- ----- ----- + # mypool 648G 1.18T 10 6 1.30M 817K + # mirror 648G 1.18T 10 6 1.30M 817K + # c0tXXXXCXXXXXXXXXXXd0 - - 9 5 1.29M 817K + # c0tXXXXCXXXXXXXXXXXd0 - - 9 5 1.29M 817K + # c0tXXXXCXXXXXXXXXXXd0 - - 9 5 1.29M 817K + # ------------------------- ----- ----- ----- ----- ----- ----- + # ===================================================================== + + ## parse iostat output + # NOTE: hardcode the header + # the double header line is hard to parse, we opt to + # hardcode the header fields header = [ - 'pool', - 'capacity-alloc', - 'capacity-free', - 'operations-read', - 'operations-write', - 'bandwith-read', - 'bandwith-write' + 'name', + 'capacity-alloc', 'capacity-free', + 'operations-read', 'operations-write', + 'bandwith-read', 'bandwith-write', ] root_vdev = None vdev = None dev = None - config_data = None - current_pool = None + current_data = OrderedDict() for line in res['stdout'].splitlines(): - if line.strip() == '': - continue - - # ignore header - if line.startswith('pool') and line.endswith('write'): - continue - if line.endswith('bandwidth'): + # NOTE: skip header + if line.strip() == '' or \ + line.strip().split()[-1] in ['write', 'bandwidth']: continue + # NOTE: reset pool on line separator if line.startswith('-') and line.endswith('-'): - if config_data: - ret[current_pool] = config_data - config_data = OrderedDict() - current_pool = None + ret.update(current_data) + current_data = OrderedDict() + continue + + # NOTE: transform data into dict + io_data = OrderedDict(list(zip( + header, + [x for x in line.strip().split(' ') if x not in ['']], + ))) + + # NOTE: normalize values + if parsable: + # NOTE: raw numbers and pythonic types + io_data = __utils__['zfs.from_auto_dict'](io_data) else: - if not isinstance(config_data, salt.utils.odict.OrderedDict): - continue + # NOTE: human readable zfs types + io_data = __utils__['zfs.to_auto_dict'](io_data) - stat_data = OrderedDict() - stats = [x for x in line.strip().split(' ') if x not in ['']] - for prop in header: - if header.index(prop) < len(stats): - if prop == 'pool': - if not current_pool: - current_pool = stats[header.index(prop)] - continue - if stats[header.index(prop)] == '-': - continue - stat_data[prop] = stats[header.index(prop)] + # NOTE: store io_data in the proper location + if line.startswith(' ' * 4): + dev = io_data['name'] + current_data[root_vdev][vdev][dev] = io_data + elif line.startswith(' ' * 2): + dev = None + vdev = io_data['name'] + current_data[root_vdev][vdev] = io_data + else: + dev = vdev = None + root_vdev = io_data['name'] + current_data[root_vdev] = io_data - dev = line.strip().split()[0] - - if line[0:4] != ' ': - if line[0:2] == ' ': - vdev = line.strip().split()[0] - dev = None - else: - root_vdev = line.strip().split()[0] - vdev = None - dev = None - - if root_vdev: - if not config_data.get(root_vdev): - config_data[root_vdev] = {} - if len(stat_data) > 0: - config_data[root_vdev] = stat_data - if vdev: - if vdev not in config_data[root_vdev]: - config_data[root_vdev][vdev] = {} - if len(stat_data) > 0: - config_data[root_vdev][vdev] = stat_data - if dev and dev not in config_data[root_vdev][vdev]: - config_data[root_vdev][vdev][dev] = {} - if len(stat_data) > 0: - config_data[root_vdev][vdev][dev] = stat_data + # NOTE: name already used as identifier, drop duplicate data + del io_data['name'] return ret -def list_(properties='size,alloc,free,cap,frag,health', zpool=None, parsable=False): +def list_(properties='size,alloc,free,cap,frag,health', zpool=None, parsable=True): ''' - .. versionadded:: 2015.5.0 - .. versionchanged:: Oxygen - Return information about (all) storage pools zpool : string optional name of storage pool properties : string - comma-separated list of properties to list + comma-separated list of properties to display parsable : boolean - display numbers in parsable (exact) values - .. versionadded:: Oxygen + display data in pythonc values (True, False, Bytes,...) + + .. versionadded:: 2015.5.0 + .. versionchanged:: Fluorine + + Added ```parsable``` parameter that defaults to True .. note:: the 'name' property will always be included, the 'frag' property will get removed if not available @@ -343,46 +375,64 @@ def list_(properties='size,alloc,free,cap,frag,health', zpool=None, parsable=Fal ''' ret = OrderedDict() - # remove 'frag' property if not available - properties = properties.split(',') - if 'name' in properties: + ## update properties + # NOTE: properties should be a list + if not isinstance(properties, list): + properties = properties.split(',') + + # NOTE: name should be first property + while 'name' in properties: properties.remove('name') properties.insert(0, 'name') - if not _check_features() and 'frag' in properties: - properties.remove('frag') - # get zpool list data - zpool_cmd = _check_zpool() - cmd = '{zpool_cmd} list -H -o {properties}{parsable}{zpool}'.format( - zpool_cmd=zpool_cmd, - properties=','.join(properties), - parsable=' -p' if parsable else '', - zpool=' {0}'.format(zpool) if zpool else '' + # NOTE: remove 'frags' if we don't have feature flags + if not __utils__['zfs.has_feature_flags'](): + while 'frag' in properties: + properties.remove('frag') + + ## collect list output + res = __salt__['cmd.run_all']( + __utils__['zfs.zpool_command']( + command='list', + flags=['-H', '-p'], + opts={'-o': ','.join(properties)}, + target=zpool + ), + python_shell=False, ) - res = __salt__['cmd.run_all'](cmd, python_shell=False) + if res['retcode'] != 0: - ret['error'] = res['stderr'] if 'stderr' in res else res['stdout'] - return ret + return __utils__['zfs.parse_command_result'](res) - # parse zpool list data - for zp in res['stdout'].splitlines(): - zp = zp.split("\t") - zp_data = {} + # NOTE: command output for reference + # ======================================================================== + # data 1992864825344 695955501056 1296909324288 34 11% ONLINE + # ========================================================================= - for prop in properties: - zp_data[prop] = _conform_value(zp[properties.index(prop)]) + ## parse list output + for line in res['stdout'].splitlines(): + # NOTE: transform data into dict + zpool_data = OrderedDict(list(zip( + properties, + line.strip().split('\t'), + ))) - ret[zp_data['name']] = zp_data - del ret[zp_data['name']]['name'] + # NOTE: normalize values + if parsable: + # NOTE: raw numbers and pythonic types + zpool_data = __utils__['zfs.from_auto_dict'](zpool_data) + else: + # NOTE: human readable zfs types + zpool_data = __utils__['zfs.to_auto_dict'](zpool_data) + + ret[zpool_data['name']] = zpool_data + del ret[zpool_data['name']]['name'] return ret -def get(zpool, prop=None, show_source=False, parsable=False): +def get(zpool, prop=None, show_source=False, parsable=True): ''' - .. versionadded:: 2016.3.0 - .. versionchanged: Oxygen - Retrieves the given list of properties zpool : string @@ -392,8 +442,12 @@ def get(zpool, prop=None, show_source=False, parsable=False): show_source : boolean show source of property parsable : boolean - display numbers in parsable (exact) values - .. versionadded:: Oxygen + display data in pythonc values (True, False, Bytes,...) + + .. versionadded:: 2016.3.0 + .. versionchanged:: Fluorine + + Added ```parsable``` parameter that defaults to True CLI Example: @@ -402,45 +456,59 @@ def get(zpool, prop=None, show_source=False, parsable=False): salt '*' zpool.get myzpool ''' ret = OrderedDict() - ret[zpool] = OrderedDict() + value_properties = ['property', 'value', 'source'] - properties = 'property,value,source'.split(',') - - # get zpool list data - zpool_cmd = _check_zpool() - cmd = '{zpool_cmd} get -H -o {properties}{parsable} {prop} {zpool}'.format( - zpool_cmd=zpool_cmd, - properties=','.join(properties), - parsable=' -p' if parsable else '', - prop=prop if prop else 'all', - zpool=zpool + ## collect get output + res = __salt__['cmd.run_all']( + __utils__['zfs.zpool_command']( + command='get', + flags=['-H', '-p'], + opts={'-o': ','.join(value_properties)}, + property_name=prop if prop else 'all', + target=zpool, + ), + python_shell=False, ) - res = __salt__['cmd.run_all'](cmd, python_shell=False) + if res['retcode'] != 0: - ret['error'] = res['stderr'] if 'stderr' in res else res['stdout'] - return ret + return __utils__['zfs.parse_command_result'](res) - # parse zpool list data - for zp in res['stdout'].splitlines(): - zp = zp.split("\t") - zp_data = {} + # NOTE: command output for reference + # ======================================================================== + # ... + # data mountpoint /data local + # data compression off default + # ... + # ========================================================================= - for prop in properties: - zp_data[prop] = _conform_value(zp[properties.index(prop)]) + # parse get output + for line in res['stdout'].splitlines(): + # NOTE: transform data into dict + prop_data = OrderedDict(list(zip( + value_properties, + [x for x in line.strip().split('\t') if x not in ['']], + ))) - if show_source: - ret[zpool][zp_data['property']] = zp_data - del ret[zpool][zp_data['property']]['property'] + # NOTE: normalize values + if parsable: + # NOTE: raw numbers and pythonic types + prop_data['value'] = __utils__['zfs.from_auto'](prop_data['property'], prop_data['value']) else: - ret[zpool][zp_data['property']] = zp_data['value'] + # NOTE: human readable zfs types + prop_data['value'] = __utils__['zfs.to_auto'](prop_data['property'], prop_data['value']) + + # NOTE: show source if requested + if show_source: + ret[prop_data['property']] = prop_data + del ret[prop_data['property']]['property'] + else: + ret[prop_data['property']] = prop_data['value'] return ret def set(zpool, prop, value): ''' - .. versionadded:: 2016.3.0 - Sets the given property on the specified pool zpool : string @@ -450,32 +518,28 @@ def set(zpool, prop, value): value : string value to set property to + .. versionadded:: 2016.3.0 + CLI Example: .. code-block:: bash salt '*' zpool.set myzpool readonly yes ''' - ret = {} - ret[zpool] = {} + ret = OrderedDict() - # make sure value is what zfs expects - value = _conform_value(value) - - # get zpool list data - zpool_cmd = _check_zpool() - cmd = '{zpool_cmd} set {prop}={value} {zpool}'.format( - zpool_cmd=zpool_cmd, - prop=prop, - value=value, - zpool=zpool + # set property + res = __salt__['cmd.run_all']( + __utils__['zfs.zpool_command']( + command='set', + property_name=prop, + property_value=value, + target=zpool, + ), + python_shell=False, ) - res = __salt__['cmd.run_all'](cmd, python_shell=False) - if res['retcode'] != 0: - ret[zpool][prop] = res['stderr'] if 'stderr' in res else res['stdout'] - else: - ret[zpool][prop] = value - return ret + + return __utils__['zfs.parse_command_result'](res, 'set') def exists(zpool): @@ -491,21 +555,22 @@ def exists(zpool): salt '*' zpool.exists myzpool ''' - zpool_cmd = _check_zpool() - cmd = '{zpool_cmd} list {zpool}'.format( - zpool_cmd=zpool_cmd, - zpool=zpool + # list for zpool + # NOTE: retcode > 0 if zpool does not exists + res = __salt__['cmd.run_all']( + __utils__['zfs.zpool_command']( + command='list', + target=zpool, + ), + python_shell=False, + ignore_retcode=True, ) - res = __salt__['cmd.run_all'](cmd, python_shell=False, ignore_retcode=True) - if res['retcode'] != 0: - return False - return True + + return res['retcode'] == 0 def destroy(zpool, force=False): ''' - .. versionchanged:: 2016.3.0 - Destroys a storage pool zpool : string @@ -513,38 +578,29 @@ def destroy(zpool, force=False): force : boolean force destroy of pool + .. versionchanged:: 2016.3.0 + CLI Example: .. code-block:: bash salt '*' zpool.destroy myzpool ''' - ret = {} - ret[zpool] = {} - if not __salt__['zpool.exists'](zpool): - ret[zpool] = 'storage pool does not exist' - else: - zpool_cmd = _check_zpool() - cmd = '{zpool_cmd} destroy {force}{zpool}'.format( - zpool_cmd=zpool_cmd, - force='-f ' if force else '', - zpool=zpool - ) - res = __salt__['cmd.run_all'](cmd, python_shell=False) - if res['retcode'] != 0: - ret[zpool] = 'error destroying storage pool' - if 'stderr' in res and res['stderr'] != '': - ret[zpool] = res['stderr'] - else: - ret[zpool] = 'destroyed' + # destroy zpool + res = __salt__['cmd.run_all']( + __utils__['zfs.zpool_command']( + command='destroy', + flags=['-f'] if force else None, + target=zpool, + ), + python_shell=False, + ) - return ret + return __utils__['zfs.parse_command_result'](res, 'destroyed') def scrub(zpool, stop=False, pause=False): ''' - .. versionchanged:: 2016.3.0 - Scrub a storage pool zpool : string @@ -558,6 +614,10 @@ def scrub(zpool, stop=False, pause=False): .. note:: If both pause and stop are true, stop will win. + Pause support was added in this PR: + https://github.com/openzfs/openzfs/pull/407 + + .. versionchanged:: 2016.3.0 CLI Example: @@ -565,51 +625,37 @@ def scrub(zpool, stop=False, pause=False): salt '*' zpool.scrub myzpool ''' - ret = {} - ret[zpool] = {} - if __salt__['zpool.exists'](zpool): - zpool_cmd = _check_zpool() - if stop: - action = '-s ' - elif pause: - # NOTE: https://github.com/openzfs/openzfs/pull/407 - action = '-p ' - else: - action = '' - cmd = '{zpool_cmd} scrub {action}{zpool}'.format( - zpool_cmd=zpool_cmd, - action=action, - zpool=zpool - ) - res = __salt__['cmd.run_all'](cmd, python_shell=False) - ret[zpool] = {} - if res['retcode'] != 0: - ret[zpool]['scrubbing'] = False - if 'stderr' in res: - if 'currently scrubbing' in res['stderr']: - ret[zpool]['scrubbing'] = True - elif 'no active scrub' not in res['stderr']: - ret[zpool]['error'] = res['stderr'] - else: - ret[zpool]['error'] = res['stdout'] - else: - if stop: - ret[zpool]['scrubbing'] = False - elif pause: - ret[zpool]['scrubbing'] = False - else: - ret[zpool]['scrubbing'] = True + ## select correct action + if stop: + action = ['-s'] + elif pause: + action = ['-p'] else: - ret[zpool] = 'storage pool does not exist' + action = None + ## Scrub storage pool + res = __salt__['cmd.run_all']( + __utils__['zfs.zpool_command']( + command='scrub', + flags=action, + target=zpool, + ), + python_shell=False, + ) + + if res['retcode'] != 0: + return __utils__['zfs.parse_command_result'](res, 'scrubbing') + + ret = OrderedDict() + if stop or pause: + ret['scrubbing'] = False + else: + ret['scrubbing'] = True return ret def create(zpool, *vdevs, **kwargs): ''' - .. versionadded:: 2015.5.0 - .. versionchanged:: 2016.3.0 - Create a simple zpool, a mirrored zpool, a zpool having nested VDEVs, a hybrid zpool with cache, spare and log drives or a zpool with RAIDZ-1, RAIDZ-2 or RAIDZ-3 zpool : string @@ -630,6 +676,9 @@ def create(zpool, *vdevs, **kwargs): ..versionadded:: Oxygen create a boot partition + .. versionadded:: 2015.5.0 + .. versionchanged:: Fluorine + CLI Example: .. code-block:: bash @@ -660,77 +709,55 @@ def create(zpool, *vdevs, **kwargs): salt '*' zpool.create myzpool /path/to/vdev1 [...] properties="{'property1': 'value1', 'property2': 'value2'}" ''' - ret = {} + ## Configure pool + # NOTE: initialize the defaults + flags = [] + opts = {} + target = [] - # Check if the pool_name is already being used - if __salt__['zpool.exists'](zpool): - ret[zpool] = 'storage pool already exists' - return ret + # NOTE: push pool and filesystem properties + pool_properties = kwargs.get('properties', {}) + filesystem_properties = kwargs.get('filesystem_properties', {}) - if not vdevs: - ret[zpool] = 'no devices specified' - return ret + # NOTE: set extra config based on kwargs + if kwargs.get('force', False): + flags.append('-f') + if kwargs.get('createboot', False) or 'bootsize' in pool_properties: + flags.append('-B') + if kwargs.get('altroot', False): + opts['-R'] = kwargs.get('altroot') + if kwargs.get('mountpoint', False): + opts['-m'] = kwargs.get('mountpoint') - devs = ' '.join(vdevs) - zpool_cmd = _check_zpool() - force = kwargs.get('force', False) - altroot = kwargs.get('altroot', None) - createboot = kwargs.get('createboot', False) - mountpoint = kwargs.get('mountpoint', None) - properties = kwargs.get('properties', None) - filesystem_properties = kwargs.get('filesystem_properties', None) - cmd = '{0} create'.format(zpool_cmd) + # NOTE: append the pool name and specifications + target.append(zpool) + target.extend(vdevs) - # bootsize implies createboot - if properties and 'bootsize' in properties: - createboot = True + ## Create storage pool + res = __salt__['cmd.run_all']( + __utils__['zfs.zpool_command']( + command='create', + flags=flags, + opts=opts, + pool_properties=pool_properties, + filesystem_properties=filesystem_properties, + target=target, + ), + python_shell=False, + ) - # make sure values are in the format zfs expects - if properties: - for prop in properties: - properties[prop] = _conform_value(properties[prop]) - - if filesystem_properties: - for prop in filesystem_properties: - filesystem_properties[prop] = _conform_value(filesystem_properties[prop]) - - # apply extra arguments from kwargs - if force: # force creation - cmd = '{0} -f'.format(cmd) - if createboot: # create boot paritition - cmd = '{0} -B'.format(cmd) - if properties: # create "-o property=value" pairs - proplist = [] - for prop in properties: - proplist.append('-o {0}={1}'.format(prop, properties[prop])) - cmd = '{0} {1}'.format(cmd, ' '.join(proplist)) - if filesystem_properties: # create "-O property=value" pairs - fsproplist = [] - for prop in filesystem_properties: - fsproplist.append('-O {0}={1}'.format(prop, filesystem_properties[prop])) - cmd = '{0} {1}'.format(cmd, ' '.join(fsproplist)) - if mountpoint: # set mountpoint - cmd = '{0} -m {1}'.format(cmd, mountpoint) - if altroot: # set altroot - cmd = '{0} -R {1}'.format(cmd, altroot) - cmd = '{0} {1} {2}'.format(cmd, zpool, devs) - - # Create storage pool - res = __salt__['cmd.run_all'](cmd, python_shell=False) - - # Check and see if the pools is available - if res['retcode'] != 0: - ret[zpool] = res['stderr'] if 'stderr' in res else res['stdout'] - else: - ret[zpool] = 'created with {0}'.format(devs) + ret = __utils__['zfs.parse_command_result'](res, 'created') + if ret['created']: + ## NOTE: lookup zpool status for vdev config + ret['vdevs'] = _clean_vdev_config( + __salt__['zpool.status'](zpool=zpool)[zpool]['config'][zpool], + ) return ret def add(zpool, *vdevs, **kwargs): ''' - .. versionchanged:: 2016.3.0 - Add the specified vdev\'s to the given storage pool zpool : string @@ -740,47 +767,49 @@ def add(zpool, *vdevs, **kwargs): force : boolean forces use of device + .. versionchanged:: Fluorine + CLI Example: .. code-block:: bash salt '*' zpool.add myzpool /path/to/vdev1 /path/to/vdev2 [...] ''' - ret = {} + ## Configure pool + # NOTE: initialize the defaults + flags = [] + target = [] - # check for pool - if not __salt__['zpool.exists'](zpool): - ret[zpool] = 'storage pool does not exist' - return ret + # NOTE: set extra config based on kwargs + if kwargs.get('force', False): + flags.append('-f') - if not vdevs: - ret[zpool] = 'no devices specified' - return ret + # NOTE: append the pool name and specifications + target.append(zpool) + target.extend(vdevs) - force = kwargs.get('force', False) - devs = ' '.join(vdevs) - - # try and add watch out for mismatched replication levels - zpool_cmd = _check_zpool() - cmd = '{zpool_cmd} add {force}{zpool} {devs}'.format( - zpool_cmd=zpool_cmd, - force='-f ' if force else '', - zpool=zpool, - devs=devs + ## Update storage pool + res = __salt__['cmd.run_all']( + __utils__['zfs.zpool_command']( + command='add', + flags=flags, + target=target, + ), + python_shell=False, ) - res = __salt__['cmd.run_all'](cmd, python_shell=False) - if res['retcode'] != 0: - ret[zpool] = res['stderr'] if 'stderr' in res else res['stdout'] - else: - ret[zpool] = 'added {0}'.format(devs) + + ret = __utils__['zfs.parse_command_result'](res, 'added') + if ret['added']: + ## NOTE: lookup zpool status for vdev config + ret['vdevs'] = _clean_vdev_config( + __salt__['zpool.status'](zpool=zpool)[zpool]['config'][zpool], + ) return ret def attach(zpool, device, new_device, force=False): ''' - .. versionchanged:: 2016.3.0 - Attach specified device to zpool zpool : string @@ -792,61 +821,51 @@ def attach(zpool, device, new_device, force=False): force : boolean forces use of device + .. versionchanged:: 2016.3.0 + .. versionchanged:: Fluorine + CLI Example: .. code-block:: bash salt '*' zpool.attach myzpool /path/to/vdev1 /path/to/vdev2 [...] ''' - ret = {} - dlist = [] + ## Configure pool + # NOTE: initialize the defaults + flags = [] + target = [] - # check for pool - if not __salt__['zpool.exists'](zpool): - ret[zpool] = 'storage pool does not exist' - return ret + # NOTE: set extra config + if force: + flags.append('-f') - # check devices - ret[zpool] = {} - if not os.path.exists(device): - ret[zpool][device] = 'not present on filesystem' - else: - mode = os.stat(device).st_mode - if not stat.S_ISBLK(mode) and not stat.S_ISREG(mode): - ret[zpool][device] = 'not a block device, a file vdev or character special device' - if not os.path.exists(new_device): - ret[zpool][new_device] = 'not present on filesystem' - else: - mode = os.stat(new_device).st_mode - if not stat.S_ISBLK(mode) and not stat.S_ISREG(mode): - ret[zpool][new_device] = 'not a block device, a file vdev or character special device' + # NOTE: append the pool name and specifications + target.append(zpool) + target.append(device) + target.append(new_device) - if len(ret[zpool]) > 0: - return ret - - # try and add watch out for mismatched replication levels - zpool_cmd = _check_zpool() - cmd = '{zpool_cmd} attach {force}{zpool} {device} {new_device}'.format( - zpool_cmd=zpool_cmd, - force='-f ' if force else '', - zpool=zpool, - device=device, - new_device=new_device + ## Update storage pool + res = __salt__['cmd.run_all']( + __utils__['zfs.zpool_command']( + command='attach', + flags=flags, + target=target, + ), + python_shell=False, ) - res = __salt__['cmd.run_all'](cmd, python_shell=False) - if res['retcode'] != 0: - ret[zpool] = res['stderr'] if 'stderr' in res else res['stdout'] - else: - ret[zpool] = {} - ret[zpool][new_device] = 'attached' + + ret = __utils__['zfs.parse_command_result'](res, 'attached') + if ret['attached']: + ## NOTE: lookup zpool status for vdev config + ret['vdevs'] = _clean_vdev_config( + __salt__['zpool.status'](zpool=zpool)[zpool]['config'][zpool], + ) return ret def detach(zpool, device): ''' - .. versionchanged:: 2016.3.0 - Detach specified device to zpool zpool : string @@ -854,41 +873,35 @@ def detach(zpool, device): device : string device to detach + .. versionchanged:: Fluorine + CLI Example: .. code-block:: bash salt '*' zpool.detach myzpool /path/to/vdev1 ''' - ret = {} - dlist = [] - - # check for pool - if not __salt__['zpool.exists'](zpool): - ret[zpool] = 'storage pool does not exist' - return ret - - # try and add watch out for mismatched replication levels - zpool_cmd = _check_zpool() - cmd = '{zpool_cmd} detach {zpool} {device}'.format( - zpool_cmd=zpool_cmd, - zpool=zpool, - device=device + ## Update storage pool + res = __salt__['cmd.run_all']( + __utils__['zfs.zpool_command']( + command='detach', + target=[zpool, device], + ), + python_shell=False, ) - res = __salt__['cmd.run_all'](cmd, python_shell=False) - if res['retcode'] != 0: - ret[zpool] = res['stderr'] if 'stderr' in res else res['stdout'] - else: - ret[zpool] = {} - ret[zpool][device] = 'detached' + + ret = __utils__['zfs.parse_command_result'](res, 'detatched') + if ret['detatched']: + ## NOTE: lookup zpool status for vdev config + ret['vdevs'] = _clean_vdev_config( + __salt__['zpool.status'](zpool=zpool)[zpool]['config'][zpool], + ) return ret def split(zpool, newzpool, **kwargs): ''' - .. versionadded:: Oxygen - Splits devices off pool creating newpool. .. note:: @@ -896,6 +909,8 @@ def split(zpool, newzpool, **kwargs): All vdevs in pool must be mirrors. At the time of the split, newpool will be a replica of pool. + After splitting, do not forget to import the new pool! + zpool : string name of storage pool newzpool : string @@ -907,6 +922,9 @@ def split(zpool, newzpool, **kwargs): properties : dict additional pool properties for newzpool + .. versionadded:: Oxygen + .. versionchanged:: Fluorine + CLI Example: .. code-block:: bash @@ -928,48 +946,33 @@ def split(zpool, newzpool, **kwargs): salt '*' zpool.split datamirror databackup properties="{'readonly': 'on'}" ''' - ret = {} + ## Configure pool + # NOTE: initialize the defaults + opts = {} - # Check if the pool_name is already being used - if __salt__['zpool.exists'](newzpool): - ret[newzpool] = 'storage pool already exists' - return ret + # NOTE: push pool and filesystem properties + pool_properties = kwargs.get('properties', {}) - if not __salt__['zpool.exists'](zpool): - ret[zpool] = 'storage pool does not exists' - return ret + # NOTE: set extra config based on kwargs + if kwargs.get('altroot', False): + opts['-R'] = kwargs.get('altroot') - zpool_cmd = _check_zpool() - altroot = kwargs.get('altroot', None) - properties = kwargs.get('properties', None) - cmd = '{0} split'.format(zpool_cmd) + ## Split storage pool + res = __salt__['cmd.run_all']( + __utils__['zfs.zpool_command']( + command='split', + opts=opts, + pool_properties=pool_properties, + target=[zpool, newzpool], + ), + python_shell=False, + ) - # apply extra arguments from kwargs - if properties: # create "-o property=value" pairs - proplist = [] - for prop in properties: - proplist.append('-o {0}={1}'.format(prop, _conform_value(properties[prop]))) - cmd = '{0} {1}'.format(cmd, ' '.join(proplist)) - if altroot: # set altroot - cmd = '{0} -R {1}'.format(cmd, altroot) - cmd = '{0} {1} {2}'.format(cmd, zpool, newzpool) - - # Create storage pool - res = __salt__['cmd.run_all'](cmd, python_shell=False) - - # Check and see if the pools is available - if res['retcode'] != 0: - ret[newzpool] = res['stderr'] if 'stderr' in res else res['stdout'] - else: - ret[newzpool] = 'split off from {}'.format(zpool) - - return ret + return __utils__['zfs.parse_command_result'](res, 'split') def replace(zpool, old_device, new_device=None, force=False): ''' - .. versionchanged:: 2016.3.0 - Replaces old_device with new_device. .. note:: @@ -988,53 +991,45 @@ def replace(zpool, old_device, new_device=None, force=False): force : boolean Forces use of new_device, even if its appears to be in use. + .. versionchanged:: Fluorine + CLI Example: .. code-block:: bash salt '*' zpool.replace myzpool /path/to/vdev1 /path/to/vdev2 ''' - ret = {} - # Make sure pool is there - if not __salt__['zpool.exists'](zpool): - ret[zpool] = 'storage pool does not exist' - return ret + ## Configure pool + # NOTE: initialize the defaults + flags = [] + target = [] - # check devices - ret[zpool] = {} - if not new_device: # if we have a new device, old_device is probably missing! - if not os.path.exists(old_device): - ret[zpool][old_device] = 'not present on filesystem' - else: - mode = os.stat(old_device).st_mode - if not stat.S_ISBLK(mode) and not stat.S_ISREG(mode): - ret[zpool][old_device] = 'not a block device, a file vdev or character special device' + # NOTE: set extra config + if force: + flags.append('-f') - if new_device: # if we are replacing a device in the same slot, new device can be None - if not os.path.exists(new_device): - ret[zpool][new_device] = 'not present on filesystem' - else: - mode = os.stat(new_device).st_mode - if not stat.S_ISBLK(mode) and not stat.S_ISREG(mode): - ret[zpool][new_device] = 'not a block device, a file vdev or character special device' + # NOTE: append the pool name and specifications + target.append(zpool) + target.append(old_device) + if new_device: + target.append(new_device) - if len(ret[zpool]) > 0: - return ret - - # Replace vdevs - zpool_cmd = _check_zpool() - cmd = '{zpool_cmd} replace {force}{zpool} {old_device}{new_device}'.format( - zpool_cmd=zpool_cmd, - zpool=zpool, - force='-f ' if force else '', - old_device=old_device, - new_device=' {0}'.format(new_device) if new_device else '' + ## Replace device + res = __salt__['cmd.run_all']( + __utils__['zfs.zpool_command']( + command='replace', + flags=flags, + target=target, + ), + python_shell=False, ) - res = __salt__['cmd.run_all'](cmd, python_shell=False) - if res['retcode'] != 0: - ret[zpool] = res['stderr'] if 'stderr' in res else res['stdout'] - else: - ret[zpool] = 'replaced {0} with {1}'.format(old_device, new_device) + + ret = __utils__['zfs.parse_command_result'](res, 'replaced') + if ret['replaced']: + ## NOTE: lookup zpool status for vdev config + ret['vdevs'] = _clean_vdev_config( + __salt__['zpool.status'](zpool=zpool)[zpool]['config'][zpool], + ) return ret @@ -1042,52 +1037,52 @@ def replace(zpool, old_device, new_device=None, force=False): @salt.utils.decorators.path.which('mkfile') def create_file_vdev(size, *vdevs): ''' - .. versionchanged:: 2016.3.0 - Creates file based ``virtual devices`` for a zpool ``*vdevs`` is a list of full paths for mkfile to create + .. versionchanged:: Fluorine + CLI Example: .. code-block:: bash - salt '*' zpool.create_file_vdev 7g /path/to/vdev1 [/path/to/vdev2] [...] + salt '*' zpool.create_file_vdev 7G /path/to/vdev1 [/path/to/vdev2] [...] .. note:: Depending on file size, the above command may take a while to return. ''' - ret = {} - dlist = [] - # Get file names to create + ret = OrderedDict() + err = OrderedDict() + + _mkfile_cmd = salt.utils.path.which('mkfile') for vdev in vdevs: - # check if file is present if not add it if os.path.isfile(vdev): ret[vdev] = 'existed' else: - dlist.append(vdev) - - mkfile = _check_mkfile() - cmd = [mkfile, '{0}'.format(size)] - cmd.extend(dlist) - __salt__['cmd.run_all'](cmd, python_shell=False) - - # Makesure the files are there - for vdev in vdevs: - if not os.path.isfile(vdev): - ret[vdev] = 'failed' - else: - if vdev not in ret: + res = __salt__['cmd.run_all']( + '{mkfile} {size} {vdev}'.format( + mkfile=_mkfile_cmd, + size=size, + vdev=vdev, + ), + python_shell=False, + ) + if res['retcode'] != 0: + if 'stderr' in res and ':' in res['stderr']: + ret[vdev] = 'failed' + err[vdev] = ":".join(res['stderr'].strip().split(':')[1:]) + else: ret[vdev] = 'created' + if err: + ret['error'] = err + return ret def export(*pools, **kwargs): ''' - .. versionadded:: 2015.5.0 - .. versionchanged:: 2016.3.0 - Export storage pools *pools : string @@ -1095,6 +1090,9 @@ def export(*pools, **kwargs): force : boolean force export of storage pools + .. versionadded:: 2015.5.0 + .. versionchanged:: Fluorine + CLI Example: .. code-block:: bash @@ -1102,39 +1100,33 @@ def export(*pools, **kwargs): salt '*' zpool.export myzpool ... [force=True|False] salt '*' zpool.export myzpool2 myzpool2 ... [force=True|False] ''' - ret = {} - pool_present = [] - if not pools: - ret['error'] = 'atleast one storage pool must be specified' - return ret + ## Configure pool + # NOTE: initialize the defaults + flags = [] + targets = [] - for pool in pools: - if not __salt__['zpool.exists'](pool): - ret[pool] = 'storage pool does not exist' - else: - pool_present.append(pool) + # NOTE: set extra config based on kwargs + if kwargs.get('force', False): + flags.append('-f') - zpool = _check_zpool() - force = kwargs.get('force', False) - for pool in pool_present: - if force is True: - cmd = '{0} export -f {1}'.format(zpool, pool) - else: - cmd = '{0} export {1}'.format(zpool, pool) - res = __salt__['cmd.run_all'](cmd, ignore_retcode=True) - if res['retcode'] != 0: - ret[pool] = res['stderr'] if 'stderr' in res else res['stdout'] - else: - ret[pool] = 'exported' + # NOTE: append the pool name and specifications + targets = list(pools) - return ret + ## Export pools + res = __salt__['cmd.run_all']( + __utils__['zfs.zpool_command']( + command='export', + flags=flags, + target=targets, + ), + python_shell=False, + ) + + return __utils__['zfs.parse_command_result'](res, 'exported') def import_(zpool=None, new_name=None, **kwargs): ''' - .. versionadded:: 2015.5.0 - .. versionchanged:: 2016.3.0 - Import storage pools or list pools available for import zpool : string @@ -1153,6 +1145,19 @@ def import_(zpool=None, new_name=None, **kwargs): import the pool without mounting any file systems. only_destroyed : boolean imports destroyed pools only. this also sets force=True. + recovery : bool|str + false: do not try to recovery broken pools + true: try to recovery the pool by rolling back the latest transactions + test: check if a pool can be recovered, but don't import it + nolog: allow import without log device, recent transactions might be lost + + .. note:: + If feature flags are not support this forced to the default of 'false' + + .. warning:: + When recovery is set to 'test' the result will be have imported set to True if the pool + can be imported. The pool might also be imported if the pool was not broken to begin with. + properties : dict additional pool properties @@ -1164,6 +1169,9 @@ def import_(zpool=None, new_name=None, **kwargs): properties="{'property1': 'value1', 'property2': 'value2'}" + .. versionadded:: 2015.5.0 + .. versionchanged:: Fluorine + CLI Example: .. code-block:: bash @@ -1172,69 +1180,63 @@ def import_(zpool=None, new_name=None, **kwargs): salt '*' zpool.import myzpool [mynewzpool] [force=True|False] salt '*' zpool.import myzpool dir='/tmp' ''' - ret = {} + ## Configure pool + # NOTE: initialize the defaults + flags = [] + opts = {} + target = [] - zpool_cmd = _check_zpool() - force = kwargs.get('force', False) - altroot = kwargs.get('altroot', None) - mntopts = kwargs.get('mntopts', None) - properties = kwargs.get('properties', None) - dirs = kwargs.get('dir', None) - no_mount = kwargs.get('no_mount', False) - only_destroyed = kwargs.get('only_destroyed', False) - cmd = '{0} import'.format(zpool_cmd) + # NOTE: push pool and filesystem properties + pool_properties = kwargs.get('properties', {}) - # apply extra arguments from kwargs - if mntopts: # set mountpoint - cmd = '{0} -o {1}'.format(cmd, mntopts) - if properties: # create "-o property=value" pairs - optlist = [] - for prop in properties: - if ' ' in properties[prop]: - value = "'{0}'".format(properties[prop]) - else: - value = properties[prop] - optlist.append('-o {0}={1}'.format(prop, value)) - opts = ' '.join(optlist) - cmd = '{0} {1}'.format(cmd, opts) - if dirs: # append -d params - dirs = dirs.split(',') - for d in dirs: - cmd = '{0} -d {1}'.format(cmd, d) - if only_destroyed: # only import destroyed pools (-D) - force = True - cmd = '{0} -D'.format(cmd) - if force: # force import (-f) - cmd = '{0} -f'.format(cmd) - if no_mount: # set no mount (-N) - cmd = '{0} -N'.format(cmd) - if altroot: # set altroot - cmd = '{0} -R {1}'.format(cmd, altroot) + # NOTE: set extra config based on kwargs + if kwargs.get('force', False) or kwargs.get('only_destroyed', False): + flags.append('-f') + if kwargs.get('only_destroyed', False): + flags.append('-D') + if kwargs.get('no_mount', False): + flags.append('-N') + if kwargs.get('altroot', False): + opts['-R'] = kwargs.get('altroot') + if kwargs.get('mntopts', False): + # NOTE: -o is used for both mount options and pool properties! + # ```-o nodevices,noexec,nosetuid,ro``` vs ```-o prop=val``` + opts['-o'] = kwargs.get('mntopts') + if kwargs.get('dir', False): + opts['-d'] = kwargs.get('dir').split(',') + if kwargs.get('recovery', False) and __utils__['zfs.has_feature_flags'](): + recovery = kwargs.get('recovery') + if recovery in [True, 'test']: + flags.append('-F') + if recovery == 'test': + flags.append('-n') + if recovery == 'nolog': + flags.append('-m') - cmd = '{cmd} {zpool}{new_name}'.format( - cmd=cmd, - zpool='{0}'.format(zpool) if zpool else '-a', - new_name=' {0}'.format(new_name) if zpool and new_name else '' - ) - res = __salt__['cmd.run_all'](cmd, python_shell=False) - if res['retcode'] != 0 and res['stderr'] != '': - if zpool: - ret[zpool] = res['stderr'] if 'stderr' in res else res['stdout'] - else: - ret['error'] = res['stderr'] if 'stderr' in res else res['stdout'] + # NOTE: append the pool name and specifications + if zpool: + target.append(zpool) + target.append(new_name) else: - if zpool: - ret[zpool if not new_name else new_name] = 'imported' if __salt__['zpool.exists'](zpool if not new_name else new_name) else 'not found' - else: - ret = True - return ret + flags.append('-a') + + ## Import storage pool + res = __salt__['cmd.run_all']( + __utils__['zfs.zpool_command']( + command='import', + flags=flags, + opts=opts, + pool_properties=pool_properties, + target=target, + ), + python_shell=False, + ) + + return __utils__['zfs.parse_command_result'](res, 'imported') def online(zpool, *vdevs, **kwargs): ''' - .. versionadded:: 2015.5.0 - .. versionchanged:: 2016.3.0 - Ensure that the specified devices are online zpool : string @@ -1248,6 +1250,9 @@ def online(zpool, *vdevs, **kwargs): If the device is part of a mirror or raidz then all devices must be expanded before the new space will become available to the pool. + .. versionadded:: 2015.5.0 + .. versionchanged:: Fluorine + CLI Example: .. code-block:: bash @@ -1255,43 +1260,46 @@ def online(zpool, *vdevs, **kwargs): salt '*' zpool.online myzpool /path/to/vdev1 [...] ''' - ret = {} - dlist = [] + ## Configure pool + # default options + flags = [] + target = [] - # Check if the pool_name exists - if not __salt__['zpool.exists'](zpool): - ret[zpool] = 'storage pool does not exist' - return ret + # set flags and options + if kwargs.get('expand', False): + flags.append('-e') + target.append(zpool) + if vdevs: + target.extend(vdevs) - if not vdevs: - ret[zpool] = 'no devices specified' - return ret + ## Configure pool + # NOTE: initialize the defaults + flags = [] + target = [] - # get expand option - expand = kwargs.get('expand', False) + # NOTE: set extra config based on kwargs + if kwargs.get('expand', False): + flags.append('-e') - devs = ' '.join(vdevs) - zpool_cmd = _check_zpool() - cmd = '{zpool_cmd} online {expand}{zpool} {devs}'.format( - zpool_cmd=zpool_cmd, - expand='-e ' if expand else '', - zpool=zpool, - devs=devs + # NOTE: append the pool name and specifications + target.append(zpool) + target.extend(vdevs) + + ## Bring online device + res = __salt__['cmd.run_all']( + __utils__['zfs.zpool_command']( + command='online', + flags=flags, + target=target, + ), + python_shell=False, ) - # Bring all specified devices online - res = __salt__['cmd.run_all'](cmd, python_shell=False) - if res['retcode'] != 0: - ret[zpool] = res['stderr'] if 'stderr' in res else res['stdout'] - else: - ret[zpool] = 'onlined {0}'.format(devs) - return ret + + return __utils__['zfs.parse_command_result'](res, 'onlined') def offline(zpool, *vdevs, **kwargs): ''' - .. versionadded:: 2015.5.0 - .. versionchanged:: 2016.3.0 - Ensure that the specified devices are offline .. warning:: @@ -1306,47 +1314,43 @@ def offline(zpool, *vdevs, **kwargs): temporary : boolean enable temporarily offline + .. versionadded:: 2015.5.0 + .. versionchanged:: Fluorine + CLI Example: .. code-block:: bash salt '*' zpool.offline myzpool /path/to/vdev1 [...] [temporary=True|False] ''' - ret = {} + ## Configure pool + # NOTE: initialize the defaults + flags = [] + target = [] - # Check if the pool_name exists - if not __salt__['zpool.exists'](zpool): - ret[zpool] = 'storage pool does not exist' - return ret + # NOTE: set extra config based on kwargs + if kwargs.get('temporary', False): + flags.append('-t') - if not vdevs or len(vdevs) <= 0: - ret[zpool] = 'no devices specified' - return ret + # NOTE: append the pool name and specifications + target.append(zpool) + target.extend(vdevs) - # note: we don't check if the device exists - # a device can be offlined until a replacement is available - ret[zpool] = {} - devs = ' '.join(vdevs) - zpool_cmd = _check_zpool() - cmd = '{zpool_cmd} offline {temp}{zpool} {devs}'.format( - zpool_cmd=zpool_cmd, - temp='-t ' if kwargs.get('temporary', False) else '', - zpool=zpool, - devs=devs + ## Take a device offline + res = __salt__['cmd.run_all']( + __utils__['zfs.zpool_command']( + command='offline', + flags=flags, + target=target, + ), + python_shell=False, ) - # Bring all specified devices offline - res = __salt__['cmd.run_all'](cmd, python_shell=False) - if res['retcode'] != 0: - ret[zpool] = res['stderr'] if 'stderr' in res else res['stdout'] - else: - ret[zpool] = 'offlined {0}'.format(devs) - return ret + + return __utils__['zfs.parse_command_result'](res, 'offlined') def labelclear(device, force=False): ''' - .. versionadded:: Oxygen - Removes ZFS label information from the specified device .. warning:: @@ -1358,39 +1362,72 @@ def labelclear(device, force=False): force : boolean treat exported or foreign devices as inactive + .. versionadded:: Oxygen + .. versionchanged:: Fluorine + CLI Example: .. code-block:: bash salt '*' zpool.labelclear /path/to/dev ''' - ret = {} - - zpool_cmd = _check_zpool() - cmd = '{zpool_cmd} labelclear {force}{device}'.format( - zpool_cmd=zpool_cmd, - force='-f ' if force else '', - device=device, + ## clear label for all specified device + res = __salt__['cmd.run_all']( + __utils__['zfs.zpool_command']( + command='labelclear', + flags=['-f'] if force else None, + target=device, + ), + python_shell=False, ) - # Bring all specified devices offline - res = __salt__['cmd.run_all'](cmd, python_shell=False) - if res['retcode'] != 0: - ## NOTE: skip the "use '-f' hint" - res['stderr'] = res['stderr'].split("\n") - if len(res['stderr']) >= 1: - if res['stderr'][0].startswith("use '-f'"): - del res['stderr'][0] - res['stderr'] = "\n".join(res['stderr']) - ret[device] = res['stderr'] if 'stderr' in res and res['stderr'] else res['stdout'] - else: - ret[device] = 'cleared' - return ret + + return __utils__['zfs.parse_command_result'](res, 'labelcleared') + + +def clear(zpool, device=None): + ''' + Clears device errors in a pool. + + .. warning:: + + The device must not be part of an active pool configuration. + + zpool : string + name of storage pool + device : string + (optional) specific device to clear + + .. versionadded:: Fluorine + + CLI Example: + + .. code-block:: bash + + salt '*' zpool.clear mypool + salt '*' zpool.clear mypool /path/to/dev + ''' + ## Configure pool + # NOTE: initialize the defaults + target = [] + + # NOTE: append the pool name and specifications + target.append(zpool) + target.append(device) + + ## clear storage pool errors + res = __salt__['cmd.run_all']( + __utils__['zfs.zpool_command']( + command='clear', + target=target, + ), + python_shell=False, + ) + + return __utils__['zfs.parse_command_result'](res, 'cleared') def reguid(zpool): ''' - .. versionadded:: 2016.3.0 - Generates a new unique identifier for the pool .. warning:: @@ -1400,63 +1437,57 @@ def reguid(zpool): zpool : string name of storage pool + .. versionadded:: 2016.3.0 + .. versionchanged:: Fluorine + CLI Example: .. code-block:: bash salt '*' zpool.reguid myzpool ''' - ret = {} - ret[zpool] = {} - - zpool_cmd = _check_zpool() - cmd = '{zpool_cmd} reguid {zpool}'.format( - zpool_cmd=zpool_cmd, - zpool=zpool + ## generate new GUID for pool + res = __salt__['cmd.run_all']( + __utils__['zfs.zpool_command']( + command='reguid', + target=zpool, + ), + python_shell=False, ) - res = __salt__['cmd.run_all'](cmd, python_shell=False) - if res['retcode'] != 0: - ret[zpool] = res['stderr'] if 'stderr' in res and res['stderr'] != '' else res['stdout'] - else: - ret[zpool] = 'reguided' - return ret + + return __utils__['zfs.parse_command_result'](res, 'reguided') def reopen(zpool): ''' - .. versionadded:: 2016.3.0 - Reopen all the vdevs associated with the pool zpool : string name of storage pool + .. versionadded:: 2016.3.0 + .. versionchanged:: Fluorine + CLI Example: .. code-block:: bash salt '*' zpool.reopen myzpool ''' - ret = {} - ret[zpool] = {} - - zpool_cmd = _check_zpool() - cmd = '{zpool_cmd} reopen {zpool}'.format( - zpool_cmd=zpool_cmd, - zpool=zpool + ## reopen all devices fro pool + res = __salt__['cmd.run_all']( + __utils__['zfs.zpool_command']( + command='reopen', + target=zpool, + ), + python_shell=False, ) - res = __salt__['cmd.run_all'](cmd, python_shell=False) - if res['retcode'] != 0: - ret[zpool] = res['stderr'] if 'stderr' in res and res['stderr'] != '' else res['stdout'] - else: - ret[zpool] = 'reopened' - return ret + + return __utils__['zfs.parse_command_result'](res, 'reopened') def upgrade(zpool=None, version=None): ''' - .. versionadded:: 2016.3.0 - Enables all supported features on the given pool .. warning:: @@ -1469,38 +1500,42 @@ def upgrade(zpool=None, version=None): version : int version to upgrade to, if unspecified upgrade to the highest possible + .. versionadded:: 2016.3.0 + .. versionchanged:: Fluorine + CLI Example: .. code-block:: bash salt '*' zpool.upgrade myzpool ''' - ret = {} + ## Configure pool + # NOTE: initialize the defaults + flags = [] + opts = {} - zpool_cmd = _check_zpool() - cmd = '{zpool_cmd} upgrade {version}{zpool}'.format( - zpool_cmd=zpool_cmd, - version='-V {0} '.format(version) if version else '', - zpool=zpool if zpool else '-a' + # NOTE: set extra config + if version: + opts['-V'] = version + if not zpool: + flags.append('-a') + + ## Upgrade pool + res = __salt__['cmd.run_all']( + __utils__['zfs.zpool_command']( + command='upgrade', + flags=flags, + opts=opts, + target=zpool, + ), + python_shell=False, ) - res = __salt__['cmd.run_all'](cmd, python_shell=False) - if res['retcode'] != 0: - if zpool: - ret[zpool] = res['stderr'] if 'stderr' in res and res['stderr'] != '' else res['stdout'] - else: - ret['error'] = res['stderr'] if 'stderr' in res and res['stderr'] != '' else res['stdout'] - else: - if zpool: - ret[zpool] = 'upgraded to {0}'.format('version {0}'.format(version) if version else 'the highest supported version') - else: - ret = 'all pools upgraded to {0}'.format('version {0}'.format(version) if version else 'the highest supported version') - return ret + + return __utils__['zfs.parse_command_result'](res, 'upgraded') def history(zpool=None, internal=False, verbose=False): ''' - .. versionadded:: 2016.3.0 - Displays the command history of the specified pools or all pools if no pool is specified zpool : string @@ -1510,37 +1545,51 @@ def history(zpool=None, internal=False, verbose=False): verbose : boolean toggle display of the user name, the hostname, and the zone in which the operation was performed + .. versionadded:: 2016.3.0 + .. versionchanged:: Fluorine + CLI Example: .. code-block:: bash salt '*' zpool.upgrade myzpool ''' - ret = {} + ret = OrderedDict() - zpool_cmd = _check_zpool() - cmd = '{zpool_cmd} history {verbose}{internal}{zpool}'.format( - zpool_cmd=zpool_cmd, - verbose='-l ' if verbose else '', - internal='-i ' if internal else '', - zpool=zpool if zpool else '' + ## Configure pool + # NOTE: initialize the defaults + flags = [] + + # NOTE: set extra config + if verbose: + flags.append('-l') + if internal: + flags.append('-i') + + ## Lookup history + res = __salt__['cmd.run_all']( + __utils__['zfs.zpool_command']( + command='history', + flags=flags, + target=zpool, + ), + python_shell=False, ) - res = __salt__['cmd.run_all'](cmd, python_shell=False) + if res['retcode'] != 0: - if zpool: - ret[zpool] = res['stderr'] if 'stderr' in res and res['stderr'] != '' else res['stdout'] - else: - ret['error'] = res['stderr'] if 'stderr' in res and res['stderr'] != '' else res['stdout'] + return __utils__['zfs.parse_command_result'](res) else: pool = 'unknown' for line in res['stdout'].splitlines(): if line.startswith('History for'): pool = line[13:-2] - ret[pool] = [] + ret[pool] = OrderedDict() else: if line == '': continue - ret[pool].append(line) + log_timestamp = line[0:19] + log_command = line[20:] + ret[pool][log_timestamp] = log_command return ret From f4bcf5fa71bd9ecd9c96159601867c19ce133f34 Mon Sep 17 00:00:00 2001 From: "sjorge@acheron.be" Date: Mon, 5 Feb 2018 09:58:10 +0000 Subject: [PATCH 212/223] Phase 2 - salt.states.zpool tests --- tests/unit/states/test_zpool.py | 450 ++++++++++++++++++++++++++++++++ 1 file changed, 450 insertions(+) create mode 100644 tests/unit/states/test_zpool.py diff --git a/tests/unit/states/test_zpool.py b/tests/unit/states/test_zpool.py new file mode 100644 index 0000000000..79621149d5 --- /dev/null +++ b/tests/unit/states/test_zpool.py @@ -0,0 +1,450 @@ +# -*- coding: utf-8 -*- +''' +Tests for salt.states.zpool + +:codeauthor: Jorge Schrauwen +:maintainer: Jorge Schrauwen +:maturity: new +:depends: salt.utils.zfs, salt.modules.zpool +:platform: illumos,freebsd,linux +''' +# Import Python libs +from __future__ import absolute_import, unicode_literals, print_function + +# Import Salt Testing Libs +from tests.support.mixins import LoaderModuleMockMixin +from tests.support.unit import skipIf, TestCase +from tests.support.mock import ( + NO_MOCK, + NO_MOCK_REASON, + MagicMock, + patch) + +# Import test data from salt.utils.zfs test +from tests.unit.utils.test_zfs import utils_patch + +# Import Salt Execution module to test +import salt.utils.zfs +import salt.states.zpool as zpool + +# Import Salt Utils +import salt.loader +from salt.utils.odict import OrderedDict + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class ZpoolTestCase(TestCase, LoaderModuleMockMixin): + ''' + Test cases for salt.states.zpool + ''' + def setup_loader_modules(self): + self.opts = opts = salt.config.DEFAULT_MINION_OPTS + utils = salt.loader.utils(opts, whitelist=['zfs']) + zpool_obj = { + zpool: { + '__opts__': opts, + '__grains__': {'kernel': 'SunOS'}, + '__utils__': utils, + } + } + + return zpool_obj + + def test_absent_without_pool(self): + ''' + Test zpool absent without a pool + ''' + ret = {'name': 'myzpool', + 'result': True, + 'comment': 'storage pool myzpool is absent', + 'changes': {}} + + mock_exists = MagicMock(return_value=False) + with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}), \ + patch.dict(zpool.__utils__, utils_patch): + self.assertEqual(zpool.absent('myzpool'), ret) + + def test_absent_destroy_pool(self): + ''' + Test zpool absent destroying pool + ''' + ret = { + 'name': 'myzpool', + 'result': True, + 'comment': 'storage pool myzpool was destroyed', + 'changes': {'myzpool': 'destroyed'}, + } + + mock_exists = MagicMock(return_value=True) + mock_destroy = MagicMock(return_value=OrderedDict([ + ('destroyed', True), + ])) + with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}), \ + patch.dict(zpool.__salt__, {'zpool.destroy': mock_destroy}), \ + patch.dict(zpool.__utils__, utils_patch): + self.assertEqual(zpool.absent('myzpool'), ret) + + def test_absent_exporty_pool(self): + ''' + Test zpool absent exporting pool + ''' + ret = { + 'name': 'myzpool', + 'result': True, + 'comment': 'storage pool myzpool was exported', + 'changes': {'myzpool': 'exported'}, + } + + mock_exists = MagicMock(return_value=True) + mock_destroy = MagicMock(return_value=OrderedDict([ + ('exported', True), + ])) + with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}), \ + patch.dict(zpool.__salt__, {'zpool.export': mock_destroy}), \ + patch.dict(zpool.__utils__, utils_patch): + self.assertEqual(zpool.absent('myzpool', export=True), ret) + + def test_absent_busy(self): + ''' + Test zpool absent on a busy pool + ''' + ret = { + 'name': 'myzpool', + 'result': False, + 'comment': "\n".join([ + "cannot unmount '/myzpool': Device busy", + "cannot export 'myzpool': pool is busy", + ]), + 'changes': {}, + } + + mock_exists = MagicMock(return_value=True) + mock_destroy = MagicMock(return_value=OrderedDict([ + ('exported', False), + ('error', "\n".join([ + "cannot unmount '/myzpool': Device busy", + "cannot export 'myzpool': pool is busy", + ])), + ])) + with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}), \ + patch.dict(zpool.__salt__, {'zpool.export': mock_destroy}), \ + patch.dict(zpool.__utils__, utils_patch): + self.assertEqual(zpool.absent('myzpool', export=True), ret) + + def test_present_import_success(self): + ''' + Test zpool present with import allowed and unimported pool + ''' + ret = {'name': 'myzpool', + 'result': True, + 'comment': 'storage pool myzpool was imported', + 'changes': {'myzpool': 'imported'}} + + config = { + 'import': True, + } + + mock_exists = MagicMock(return_value=False) + mock_import = MagicMock(return_value=OrderedDict([ + ('imported', True), + ])) + with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}), \ + patch.dict(zpool.__salt__, {'zpool.import': mock_import}), \ + patch.dict(zpool.__utils__, utils_patch): + self.assertEqual(zpool.present('myzpool', config=config), ret) + + def test_present_import_fail(self): + ''' + Test zpool present with import allowed and no unimported pool or layout + ''' + ret = {'name': 'myzpool', + 'result': False, + 'comment': 'storage pool myzpool was not imported, no (valid) layout specified for creation', + 'changes': {}} + + config = { + 'import': True, + } + + mock_exists = MagicMock(return_value=False) + mock_import = MagicMock(return_value=OrderedDict([ + ('imported', False), + ])) + with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}), \ + patch.dict(zpool.__salt__, {'zpool.import': mock_import}), \ + patch.dict(zpool.__utils__, utils_patch): + self.assertEqual(zpool.present('myzpool', config=config), ret) + + def test_present_create_success(self): + ''' + Test zpool present with non existing pool + ''' + ret = {'name': 'myzpool', + 'result': True, + 'comment': 'storage pool myzpool was created', + 'changes': {'myzpool': 'created'}} + + config = { + 'import': False, + } + layout = [ + OrderedDict([('mirror', ['disk0', 'disk1'])]), + OrderedDict([('mirror', ['disk2', 'disk3'])]), + ] + properties = { + 'autoexpand': True, + } + filesystem_properties = { + 'quota': '5G', + } + + mock_exists = MagicMock(return_value=False) + mock_create = MagicMock(return_value=OrderedDict([ + ('created', True), + ('vdevs', OrderedDict([ + ('mirror-0', ['/dev/dsk/disk0', '/dev/dsk/disk1']), + ('mirror-1', ['/dev/dsk/disk2', '/dev/dsk/disk3']), + ])), + ])) + with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}), \ + patch.dict(zpool.__salt__, {'zpool.create': mock_create}), \ + patch.dict(zpool.__utils__, utils_patch): + self.assertEqual( + zpool.present( + 'myzpool', + config=config, + layout=layout, + properties=properties, + filesystem_properties=filesystem_properties, + ), + ret, + ) + + def test_present_create_fail(self): + ''' + Test zpool present with non existing pool (without a layout) + ''' + ret = {'name': 'myzpool', + 'result': False, + 'comment': 'storage pool myzpool was not imported, no (valid) layout specified for creation', + 'changes': {}} + + config = { + 'import': False, + } + + mock_exists = MagicMock(return_value=False) + with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}), \ + patch.dict(zpool.__utils__, utils_patch): + self.assertEqual(zpool.present('myzpool', config=config), ret) + + def test_present_create_passthrough_fail(self): + ''' + Test zpool present with non existing pool (without a layout) + ''' + ret = {'name': 'myzpool', + 'result': False, + 'comment': "\n".join([ + "invalid vdev specification", + "use 'force=True' to override the following errors:", + "/data/salt/vdisk0 is part of exported pool 'zsalt'", + "/data/salt/vdisk1 is part of exported pool 'zsalt'", + ]), + 'changes': {}} + + config = { + 'force': False, + 'import': False, + } + layout = [ + OrderedDict([('mirror', ['disk0', 'disk1'])]), + OrderedDict([('mirror', ['disk2', 'disk3'])]), + ] + properties = { + 'autoexpand': True, + } + filesystem_properties = { + 'quota': '5G', + } + + mock_exists = MagicMock(return_value=False) + mock_create = MagicMock(return_value=OrderedDict([ + ('created', False), + ('error', "\n".join([ + "invalid vdev specification", + "use 'force=True' to override the following errors:", + "/data/salt/vdisk0 is part of exported pool 'zsalt'", + "/data/salt/vdisk1 is part of exported pool 'zsalt'", + ])), + ])) + with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}), \ + patch.dict(zpool.__salt__, {'zpool.create': mock_create}), \ + patch.dict(zpool.__utils__, utils_patch): + self.assertEqual( + zpool.present( + 'myzpool', + config=config, + layout=layout, + properties=properties, + filesystem_properties=filesystem_properties, + ), + ret, + ) + + def test_present_update_success(self): + ''' + Test zpool present with an existing pool that needs an update + ''' + ret = {'name': 'myzpool', + 'result': True, + 'comment': 'properties updated', + 'changes': {'myzpool': {'autoexpand': False}}} + + config = { + 'import': False, + } + layout = [ + OrderedDict([('mirror', ['disk0', 'disk1'])]), + OrderedDict([('mirror', ['disk2', 'disk3'])]), + ] + properties = { + 'autoexpand': False, + } + + mock_exists = MagicMock(return_value=True) + mock_get = MagicMock(return_value=OrderedDict([ + ('comment', 'salt managed pool'), + ('freeing', 0), + ('listsnapshots', False), + ('leaked', 0), + ('feature@obsolete_counts', 'enabled'), + ('feature@sha512', 'enabled'), + ('delegation', True), + ('dedupditto', '0'), + ('dedupratio', '1.00x'), + ('autoexpand', True), + ('feature@bookmarks', 'enabled'), + ('allocated', 115712), + ('guid', 1591906802560842214), + ('feature@large_blocks', 'enabled'), + ('size', 2113929216), + ('feature@enabled_txg', 'active'), + ('feature@hole_birth', 'active'), + ('capacity', 0), + ('feature@multi_vdev_crash_dump', 'enabled'), + ('feature@extensible_dataset', 'enabled'), + ('cachefile', '-'), + ('bootfs', '-'), + ('autoreplace', True), + ('readonly', False), + ('version', '-'), + ('health', 'ONLINE'), + ('expandsize', '-'), + ('feature@embedded_data', 'active'), + ('feature@lz4_compress', 'active'), + ('feature@async_destroy', 'enabled'), + ('feature@skein', 'enabled'), + ('feature@empty_bpobj', 'enabled'), + ('feature@spacemap_histogram', 'active'), + ('bootsize', '-'), + ('free', 2113813504), + ('feature@device_removal', 'enabled'), + ('failmode', 'wait'), + ('feature@filesystem_limits', 'enabled'), + ('feature@edonr', 'enabled'), + ('altroot', '-'), + ('fragmentation', '0%'), + ])) + mock_set = MagicMock(return_value=OrderedDict([ + ('set', True), + ])) + with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}), \ + patch.dict(zpool.__salt__, {'zpool.get': mock_get}), \ + patch.dict(zpool.__salt__, {'zpool.set': mock_set}), \ + patch.dict(zpool.__utils__, utils_patch): + self.assertEqual( + zpool.present( + 'myzpool', + config=config, + layout=layout, + properties=properties, + ), + ret, + ) + + def test_present_update_nochange_success(self): + ''' + Test zpool present with non existing pool + ''' + ret = {'name': 'myzpool', + 'result': True, + 'comment': 'no update needed', + 'changes': {}} + + config = { + 'import': False, + } + layout = [ + OrderedDict([('mirror', ['disk0', 'disk1'])]), + OrderedDict([('mirror', ['disk2', 'disk3'])]), + ] + properties = { + 'autoexpand': True, + } + + mock_exists = MagicMock(return_value=True) + mock_get = MagicMock(return_value=OrderedDict([ + ('comment', 'salt managed pool'), + ('freeing', 0), + ('listsnapshots', False), + ('leaked', 0), + ('feature@obsolete_counts', 'enabled'), + ('feature@sha512', 'enabled'), + ('delegation', True), + ('dedupditto', '0'), + ('dedupratio', '1.00x'), + ('autoexpand', True), + ('feature@bookmarks', 'enabled'), + ('allocated', 115712), + ('guid', 1591906802560842214), + ('feature@large_blocks', 'enabled'), + ('size', 2113929216), + ('feature@enabled_txg', 'active'), + ('feature@hole_birth', 'active'), + ('capacity', 0), + ('feature@multi_vdev_crash_dump', 'enabled'), + ('feature@extensible_dataset', 'enabled'), + ('cachefile', '-'), + ('bootfs', '-'), + ('autoreplace', True), + ('readonly', False), + ('version', '-'), + ('health', 'ONLINE'), + ('expandsize', '-'), + ('feature@embedded_data', 'active'), + ('feature@lz4_compress', 'active'), + ('feature@async_destroy', 'enabled'), + ('feature@skein', 'enabled'), + ('feature@empty_bpobj', 'enabled'), + ('feature@spacemap_histogram', 'active'), + ('bootsize', '-'), + ('free', 2113813504), + ('feature@device_removal', 'enabled'), + ('failmode', 'wait'), + ('feature@filesystem_limits', 'enabled'), + ('feature@edonr', 'enabled'), + ('altroot', '-'), + ('fragmentation', '0%'), + ])) + with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}), \ + patch.dict(zpool.__salt__, {'zpool.get': mock_get}), \ + patch.dict(zpool.__utils__, utils_patch): + self.assertEqual( + zpool.present( + 'myzpool', + config=config, + layout=layout, + properties=properties, + ), + ret, + ) From ce5c9792ea602191cfaa6402b4dde186365d9629 Mon Sep 17 00:00:00 2001 From: "sjorge@acheron.be" Date: Mon, 5 Feb 2018 09:42:21 +0000 Subject: [PATCH 213/223] Phase 2 - salt.states.zpool They old way of passing the zpool layout was a pain to work with. A new layout has been introduce and the documentation updated accordingly. The legacy format is still supported. --- salt/states/zpool.py | 346 ++++++++++++++++++++++++++++--------------- 1 file changed, 223 insertions(+), 123 deletions(-) diff --git a/salt/states/zpool.py b/salt/states/zpool.py index 3261432dee..baf7fabc87 100644 --- a/salt/states/zpool.py +++ b/salt/states/zpool.py @@ -1,13 +1,14 @@ # -*- coding: utf-8 -*- ''' -Management zpool +States for managing zpools :maintainer: Jorge Schrauwen :maturity: new -:depends: zpool +:depends: salt.utils.zfs, salt.modules.zpool :platform: smartos, illumos, solaris, freebsd, linux .. versionadded:: 2016.3.0 +.. versionchanged:: Flourine .. code-block:: yaml @@ -23,12 +24,12 @@ Management zpool - properties: comment: salty storage pool - layout: - mirror-0: - /dev/disk0 - /dev/disk1 - mirror-1: - /dev/disk2 - /dev/disk3 + - mirror: + - /dev/disk0 + - /dev/disk1 + - mirror: + - /dev/disk2 + - /dev/disk3 partitionpool: zpool.present: @@ -73,7 +74,6 @@ import logging # Import Salt libs from salt.utils.odict import OrderedDict -from salt.modules.zpool import _conform_value log = logging.getLogger(__name__) @@ -85,15 +85,83 @@ def __virtual__(): ''' Provides zpool state ''' - if 'zpool.create' in __salt__: - return True + if __grains__['zfs_support']: + return __virtualname__ else: - return ( - False, - '{0} state module can only be loaded on illumos, Solaris, SmartOS, FreeBSD, Linux, ...'.format( - __virtualname__ - ) - ) + return (False, "The zpool state cannot be loaded: zfs not supported") + + +def _layout_to_vdev(layout, device_dir=None): + ''' + Turn the layout data into usable vdevs spedcification + + We need to support 2 ways of passing the layout: + + .. code:: + layout_new: + - mirror: + - disk0 + - disk1 + - mirror: + - disk2 + - disk3 + + .. code: + layout_legacy: + mirror-0: + disk0 + disk1 + mirror-1: + disk2 + disk3 + + ''' + vdevs = [] + + # NOTE: check device_dir exists + if device_dir and not os.path.exists(device_dir): + device_dir = None + + # NOTE: handle list of OrderedDicts (new layout) + if isinstance(layout, list): + # NOTE: parse each vdev as a tiny layout and just append + for vdev in layout: + if isinstance(vdev, OrderedDict): + vdevs.extend(_layout_to_vdev(vdev, device_dir)) + else: + if device_dir and vdev[0] != '/': + vdev = os.path.join(device_dir, vdev) + vdevs.append(vdev) + + # NOTE: handle nested OrderedDict (legacy layout) + # this is also used to parse the nested OrderedDicts + # from the new layout + elif isinstance(layout, OrderedDict): + for vdev in layout: + # NOTE: extract the vdev type and disks in the vdev + vdev_type = vdev.split('-')[0] + vdev_disk = layout[vdev] + + # NOTE: skip appending the dummy type 'disk' + if vdev_type != 'disk': + vdevs.append(vdev_type) + + # NOTE: ensure the disks are a list (legacy layout are not) + if not isinstance(vdev_disk, list): + vdev_disk = vdev_disk.split(' ') + + # NOTE: also append the actualy disks behind the type + # also prepend device_dir to disks if required + for disk in vdev_disk: + if device_dir and disk[0] != '/': + disk = os.path.join(device_dir, disk) + vdevs.append(disk) + + # NOTE: we got invalid data for layout + else: + vdevs = None + + return vdevs def present(name, properties=None, filesystem_properties=None, layout=None, config=None): @@ -115,13 +183,34 @@ def present(name, properties=None, filesystem_properties=None, layout=None, conf The following configuration properties can be toggled in the config parameter. - import (true) - try to import the pool before creating it if absent - - import_dirs (None) - specify additional locations to scan for devices on import - - device_dir (None, SunOS=/dev/rdsk) - specify device directory to use if not absolute path + - import_dirs (None) - specify additional locations to scan for devices on import (comma-seperated) + - device_dir (None, SunOS=/dev/dsk, Linux=/dev) - specify device directory to prepend for none absolute device paths - force (false) - try to force the import or creation .. note:: - Because ID's inside the layout dict must be unique they need to have a suffix. + It is no longer needed to give a unique name to each top-level vdev, the old + layout format is still supported but no longer recommended. + + .. code-block:: yaml + + - mirror: + - /tmp/vdisk3 + - /tmp/vdisk2 + - mirror: + - /tmp/vdisk0 + - /tmp/vdisk1 + + The above yaml will always result in the following zpool create: + + .. code-block:: bash + + zpool create mypool mirror /tmp/vdisk3 /tmp/vdisk2 mirror /tmp/vdisk0 /tmp/vdisk1 + + .. warning:: + + The legacy format is also still supported but not recommended, + because ID's inside the layout dict must be unique they need to have a suffix. .. code-block:: yaml @@ -132,22 +221,16 @@ def present(name, properties=None, filesystem_properties=None, layout=None, conf /tmp/vdisk0 /tmp/vdisk1 - The above yaml will always result in the following zpool create: - - .. code-block:: bash - - zpool create mypool mirror /tmp/vdisk3 /tmp/vdisk2 mirror /tmp/vdisk0 /tmp/vdisk1 - .. warning:: Pay attention to the order of your dict! .. code-block:: yaml - mirror-0: - /tmp/vdisk0 - /tmp/vdisk1 - /tmp/vdisk2: + - mirror: + - /tmp/vdisk0 + - /tmp/vdisk1 + - /tmp/vdisk2 The above will result in the following zpool create: @@ -163,60 +246,123 @@ def present(name, properties=None, filesystem_properties=None, layout=None, conf 'result': None, 'comment': ''} - # config defaults - state_config = config if config else {} - config = { + ## config defaults + default_config = { 'import': True, 'import_dirs': None, 'device_dir': None, 'force': False } if __grains__['kernel'] == 'SunOS': - config['device_dir'] = '/dev/rdsk' + default_config['device_dir'] = '/dev/dsk' elif __grains__['kernel'] == 'Linux': - config['device_dir'] = '/dev' - config.update(state_config) - log.debug('zpool.present::%s::config - %s', name, config) + default_config['device_dir'] = '/dev' - # parse layout - if layout: - for root_dev in layout: - if root_dev.count('-') != 1: - continue - layout[root_dev] = layout[root_dev].keys() if isinstance(layout[root_dev], OrderedDict) else layout[root_dev].split(' ') + ## merge state config + if config: + default_config.update(config) + config = default_config - log.debug('zpool.present::%s::layout - %s', name, layout) + ## ensure properties are zfs values + if properties: + properties = __utils__['zfs.from_auto_dict'](properties) + if filesystem_properties: + filesystem_properties = __utils__['zfs.from_auto_dict'](filesystem_properties) - # ensure properties conform to the zfs parsable format - for prop in properties: - properties[prop] = _conform_value(properties[prop], True) + ## parse layout + vdevs = _layout_to_vdev(layout, config['device_dir']) + if vdevs: + vdevs.insert(0, name) - # ensure the pool is present + ## log configuration + log.debug('zpool.present::%s::config - %s', + name, config) + log.debug('zpool.present::%s::vdevs - %s', + name, vdevs) + log.debug('zpool.present::%s::properties - %s', + name, properties) + log.debug('zpool.present::%s::filesystem_properties - %s', + name, filesystem_properties) + + ## ensure the pool is present ret['result'] = False - if __salt__['zpool.exists'](name): # update + + ## NOTE: don't do anything because this is a test + if __opts__['test']: + ret['result'] = True + if __salt__['zpool.exists'](name): + ret['changes'][name] = 'uptodate' + else: + ret['changes'][name] = 'imported' if config['import'] else 'created' + ret['comment'] = 'storage pool {0} was {1}'.format(name, ret['changes'][name]) + + ## NOTE: import or create the pool (at least try to anyway) + elif not __salt__['zpool.exists'](name): + ## NOTE: import pool + if config['import']: + mod_res = __salt__['zpool.import']( + name, + force=config['force'], + dir=config['import_dirs'], + ) + + ret['result'] = mod_res['imported'] + if ret['result']: + ret['changes'][name] = 'imported' + ret['comment'] = 'storage pool {0} was imported'.format(name) + + ## NOTE: create pool + if not ret['result'] and vdevs: + log.debug('zpool.present::%s::creating', name) + + ## NOTE: execute zpool.create + mod_res = __salt__['zpool.create']( + *vdevs, + force=config['force'], + properties=properties, + filesystem_properties=filesystem_properties + ) + + ret['result'] = mod_res['created'] + if ret['result']: + ret['changes'][name] = 'created' + ret['comment'] = 'storage pool {0} was created'.format(name) + elif 'error' in mod_res: + ret['comment'] = mod_res['error'] + else: + ret['comment'] = 'could not create storage pool {0}'.format(name) + + ## NOTE: give up, we cannot import the pool and we do not have a layout to create it + if not ret['result'] and not vdevs: + ret['comment'] = 'storage pool {0} was not imported, no (valid) layout specified for creation'.format(name) + + ## NOTE: update pool + else: ret['result'] = True - # retrieve current properties - properties_current = __salt__['zpool.get'](name)[name] + ## NOTE: fetch current pool properties + properties_current = __salt__['zpool.get'](name, parsable=True) - # figure out if updates needed + ## NOTE: build list of properties to update properties_update = [] for prop in properties: + ## NOTE: skip unexisting properties if prop not in properties_current: + log.warning('zpool.present::%s::update - unknown property: %s', name, prop) continue + ## NOTE: compare current and wanted value if properties_current[prop] != properties[prop]: properties_update.append(prop) - # update properties + ## NOTE: update pool properties for prop in properties_update: res = __salt__['zpool.set'](name, prop, properties[prop]) - # check return - if name in res and prop in res[name] and res[name][prop] == properties[prop]: + if res['set']: if name not in ret['changes']: ret['changes'][name] = {} - ret['changes'][name].update(res[name]) + ret['changes'][name][prop] = properties[prop] else: ret['result'] = False if ret['comment'] == '': @@ -226,57 +372,6 @@ def present(name, properties=None, filesystem_properties=None, layout=None, conf if ret['result']: ret['comment'] = 'properties updated' if len(ret['changes']) > 0 else 'no update needed' - else: # import or create - if config['import']: # try import - log.debug('zpool.present::%s::importing', name) - ret['result'] = __salt__['zpool.import']( - name, - force=config['force'], - dir=config['import_dirs'] - ) - ret['result'] = ret['result'].get(name) == 'imported' - if ret['result']: - ret['changes'][name] = 'imported' - ret['comment'] = 'storage pool {0} was imported'.format(name) - - if not ret['result']: # create - if not layout: - ret['comment'] = 'storage pool {0} was not imported, no layout specified for creation'.format(name) - else: - log.debug('zpool.present::%s::creating', name) - if __opts__['test']: - ret['result'] = True - else: - # construct *vdev parameter for zpool.create - params = [] - params.append(name) - for root_dev in layout: - if root_dev.count('-') == 1: # special device - # NOTE: accomidate non existing 'disk' vdev - if root_dev.split('-')[0] != 'disk': - params.append(root_dev.split('-')[0]) # add the type by stripping the ID - for sub_dev in layout[root_dev]: # add all sub devices - if '/' not in sub_dev and config['device_dir'] and os.path.exists(config['device_dir']): - sub_dev = os.path.join(config['device_dir'], sub_dev) - params.append(sub_dev) - else: # normal device - if '/' not in root_dev and config['device_dir'] and os.path.exists(config['device_dir']): - root_dev = os.path.join(config['device_dir'], root_dev) - params.append(root_dev) - - # execute zpool.create - ret['result'] = __salt__['zpool.create'](*params, force=config['force'], properties=properties, filesystem_properties=filesystem_properties) - if ret['result'].get(name).startswith('created'): - ret['result'] = True - else: - if ret['result'].get(name): - ret['comment'] = ret['result'].get(name) - ret['result'] = False - - if ret['result']: - ret['changes'][name] = 'created' - ret['comment'] = 'storage pool {0} was created'.format(name) - return ret @@ -297,31 +392,36 @@ def absent(name, export=False, force=False): 'result': None, 'comment': ''} - # config defaults - log.debug('zpool.absent::%s::config::force = %s', name, force) - log.debug('zpool.absent::%s::config::export = %s', name, export) + ## log configuration + log.debug('zpool.absent::%s::config::force = %s', + name, force) + log.debug('zpool.absent::%s::config::export = %s', + name, export) - # ensure the pool is absent + ## ensure the pool is absent if __salt__['zpool.exists'](name): # looks like we need to do some work + mod_res = {} ret['result'] = False - if export: # try to export the zpool - if __opts__['test']: - ret['result'] = True - else: - ret['result'] = __salt__['zpool.export'](name, force=force) - ret['result'] = ret['result'].get(name) == 'exported' + # NOTE: handle test + if __opts__['test']: + ret['result'] = True - else: # try to destroy the zpool - if __opts__['test']: - ret['result'] = True - else: - ret['result'] = __salt__['zpool.destroy'](name, force=force) - ret['result'] = ret['result'].get(name) == 'destroyed' + # NOTE: try to export the pool + elif export: + mod_res = __salt__['zpool.export'](name, force=force) + ret['result'] = mod_res['exported'] + + # NOTE: try to destroy the pool + else: + mod_res = __salt__['zpool.destroy'](name, force=force) + ret['result'] = mod_res['destroyed'] if ret['result']: # update the changes and comment ret['changes'][name] = 'exported' if export else 'destroyed' ret['comment'] = 'storage pool {0} was {1}'.format(name, ret['changes'][name]) + elif 'error' in mod_res: + ret['comment'] = mod_res['error'] else: # we are looking good ret['result'] = True From 8517e876e4d8b40b9ca6c21795054474a3b763b4 Mon Sep 17 00:00:00 2001 From: Jorge Schrauwen Date: Thu, 15 Feb 2018 15:59:51 +0000 Subject: [PATCH 214/223] Phase 2 - salt.states.zfs tests --- tests/unit/states/test_zfs.py | 696 ++++++++++++++++++++++++++++++++++ 1 file changed, 696 insertions(+) create mode 100644 tests/unit/states/test_zfs.py diff --git a/tests/unit/states/test_zfs.py b/tests/unit/states/test_zfs.py new file mode 100644 index 0000000000..421db9225e --- /dev/null +++ b/tests/unit/states/test_zfs.py @@ -0,0 +1,696 @@ +# -*- coding: utf-8 -*- +''' +Tests for salt.states.zfs + +:codeauthor: Jorge Schrauwen +:maintainer: Jorge Schrauwen +:maturity: new +:depends: salt.utils.zfs, salt.modules.zfs +:platform: illumos,freebsd,linux +''' +# Import Python libs +from __future__ import absolute_import, unicode_literals, print_function + +# Import Salt Testing Libs +from tests.support.mixins import LoaderModuleMockMixin +from tests.support.unit import skipIf, TestCase +from tests.support.mock import ( + NO_MOCK, + NO_MOCK_REASON, + MagicMock, + patch) + +# Import test data from salt.utils.zfs test +from tests.unit.utils.test_zfs import utils_patch + +# Import Salt Execution module to test +import salt.utils.zfs +import salt.states.zfs as zfs + +# Import Salt Utils +import salt.loader +from salt.utils.odict import OrderedDict + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class ZfsTestCase(TestCase, LoaderModuleMockMixin): + ''' + Test cases for salt.states.zfs + ''' + def setup_loader_modules(self): + self.opts = opts = salt.config.DEFAULT_MINION_OPTS + utils = salt.loader.utils(opts, whitelist=['zfs']) + zfs_obj = { + zfs: { + '__opts__': opts, + '__grains__': {'kernel': 'SunOS'}, + '__utils__': utils, + } + } + + return zfs_obj + + def test_filesystem_absent_nofs(self): + ''' + Test if filesystem is absent (non existing filesystem) + ''' + ret = {'name': 'myzpool/filesystem', + 'result': True, + 'comment': 'filesystem myzpool/filesystem is absent', + 'changes': {}} + + mock_exists = MagicMock(return_value=False) + with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(ret, zfs.filesystem_absent('myzpool/filesystem')) + + def test_filesystem_absent_removed(self): + ''' + Test if filesystem is absent + ''' + ret = {'name': 'myzpool/filesystem', + 'result': True, + 'comment': 'filesystem myzpool/filesystem was destroyed', + 'changes': {'myzpool/filesystem': 'destroyed'}} + + mock_exists = MagicMock(return_value=True) + mock_destroy = MagicMock(return_value=OrderedDict([('destroyed', True)])) + with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \ + patch.dict(zfs.__salt__, {'zfs.destroy': mock_destroy}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(ret, zfs.filesystem_absent('myzpool/filesystem')) + + def test_filesystem_absent_fail(self): + ''' + Test if filesystem is absent (with snapshots) + ''' + ret = {'name': 'myzpool/filesystem', + 'result': False, + 'comment': "\n".join([ + "cannot destroy 'myzpool/filesystem': filesystem has children", + "use 'recursive=True' to destroy the following datasets:", + "myzpool/filesystem@snap", + ]), + 'changes': {}} + + mock_exists = MagicMock(return_value=True) + mock_destroy = MagicMock(return_value=OrderedDict([ + ('destroyed', False), + ('error', "\n".join([ + "cannot destroy 'myzpool/filesystem': filesystem has children", + "use 'recursive=True' to destroy the following datasets:", + "myzpool/filesystem@snap", + ])), + ])) + with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \ + patch.dict(zfs.__salt__, {'zfs.destroy': mock_destroy}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(ret, zfs.filesystem_absent('myzpool/filesystem')) + + def test_volume_absent_novol(self): + ''' + Test if volume is absent (non existing volume) + ''' + ret = {'name': 'myzpool/volume', + 'result': True, + 'comment': 'volume myzpool/volume is absent', + 'changes': {}} + + mock_exists = MagicMock(return_value=False) + with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(ret, zfs.volume_absent('myzpool/volume')) + + def test_volume_absent_removed(self): + ''' + Test if volume is absent + ''' + ret = {'name': 'myzpool/volume', + 'result': True, + 'comment': 'volume myzpool/volume was destroyed', + 'changes': {'myzpool/volume': 'destroyed'}} + + mock_exists = MagicMock(return_value=True) + mock_destroy = MagicMock(return_value=OrderedDict([('destroyed', True)])) + with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \ + patch.dict(zfs.__salt__, {'zfs.destroy': mock_destroy}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(ret, zfs.volume_absent('myzpool/volume')) + + def test_volume_absent_fail(self): + ''' + Test if volume is absent (with snapshots) + ''' + ret = {'name': 'myzpool/volume', + 'result': False, + 'comment': "\n".join([ + "cannot destroy 'myzpool/volume': volume has children", + "use 'recursive=True' to destroy the following datasets:", + "myzpool/volume@snap", + ]), + 'changes': {}} + + mock_exists = MagicMock(return_value=True) + mock_destroy = MagicMock(return_value=OrderedDict([ + ('destroyed', False), + ('error', "\n".join([ + "cannot destroy 'myzpool/volume': volume has children", + "use 'recursive=True' to destroy the following datasets:", + "myzpool/volume@snap", + ])), + ])) + with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \ + patch.dict(zfs.__salt__, {'zfs.destroy': mock_destroy}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(ret, zfs.volume_absent('myzpool/volume')) + + def test_snapshot_absent_nosnap(self): + ''' + Test if snapshot is absent (non existing snapshot) + ''' + ret = {'name': 'myzpool/filesystem@snap', + 'result': True, + 'comment': 'snapshot myzpool/filesystem@snap is absent', + 'changes': {}} + + mock_exists = MagicMock(return_value=False) + with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(ret, zfs.snapshot_absent('myzpool/filesystem@snap')) + + def test_snapshot_absent_removed(self): + ''' + Test if snapshot is absent + ''' + ret = {'name': 'myzpool/filesystem@snap', + 'result': True, + 'comment': 'snapshot myzpool/filesystem@snap was destroyed', + 'changes': {'myzpool/filesystem@snap': 'destroyed'}} + + mock_exists = MagicMock(return_value=True) + mock_destroy = MagicMock(return_value=OrderedDict([('destroyed', True)])) + with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \ + patch.dict(zfs.__salt__, {'zfs.destroy': mock_destroy}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(ret, zfs.snapshot_absent('myzpool/filesystem@snap')) + + def test_snapshot_absent_fail(self): + ''' + Test if snapshot is absent (with snapshots) + ''' + ret = {'name': 'myzpool/filesystem@snap', + 'result': False, + 'comment': 'cannot destroy snapshot myzpool/filesystem@snap: dataset is busy', + 'changes': {}} + + mock_exists = MagicMock(return_value=True) + mock_destroy = MagicMock(return_value=OrderedDict([ + ('destroyed', False), + ('error', 'cannot destroy snapshot myzpool/filesystem@snap: dataset is busy'), + ])) + with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \ + patch.dict(zfs.__salt__, {'zfs.destroy': mock_destroy}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(ret, zfs.snapshot_absent('myzpool/filesystem@snap')) + + def test_bookmark_absent_nobook(self): + ''' + Test if bookmark is absent (non existing bookmark) + ''' + ret = {'name': 'myzpool/filesystem#book', + 'result': True, + 'comment': 'bookmark myzpool/filesystem#book is absent', + 'changes': {}} + + mock_exists = MagicMock(return_value=False) + with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(ret, zfs.bookmark_absent('myzpool/filesystem#book')) + + def test_bookmark_absent_removed(self): + ''' + Test if bookmark is absent + ''' + ret = {'name': 'myzpool/filesystem#book', + 'result': True, + 'comment': 'bookmark myzpool/filesystem#book was destroyed', + 'changes': {'myzpool/filesystem#book': 'destroyed'}} + + mock_exists = MagicMock(return_value=True) + mock_destroy = MagicMock(return_value=OrderedDict([('destroyed', True)])) + with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \ + patch.dict(zfs.__salt__, {'zfs.destroy': mock_destroy}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(ret, zfs.bookmark_absent('myzpool/filesystem#book')) + + def test_hold_absent_nohold(self): + ''' + Test if hold is absent (non existing hold) + ''' + ret = {'name': 'myhold', + 'result': True, + 'comment': 'hold myhold is absent', + 'changes': {}} + + mock_holds = MagicMock(return_value=OrderedDict([])) + with patch.dict(zfs.__salt__, {'zfs.holds': mock_holds}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(ret, zfs.hold_absent('myhold', 'myzpool/filesystem@snap')) + + def test_hold_absent_removed(self): + ''' + Test if hold is absent + ''' + ret = {'name': 'myhold', + 'result': True, + 'comment': 'hold myhold released', + 'changes': OrderedDict([ + ('myzpool/filesystem@snap', OrderedDict([ + ('myhold', 'released'), + ])), + ])} + + mock_holds = MagicMock(return_value=OrderedDict([('myhold', 'Thu Feb 15 16:24 2018')])) + mock_release = MagicMock(return_value=OrderedDict([('released', True)])) + with patch.dict(zfs.__salt__, {'zfs.holds': mock_holds}), \ + patch.dict(zfs.__salt__, {'zfs.release': mock_release}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(ret, zfs.hold_absent('myhold', 'myzpool/filesystem@snap')) + + def test_hold_absent_fail(self): + ''' + Test if hold is absent (non existing snapshot) + ''' + ret = {'name': 'myhold', + 'result': False, + 'comment': "cannot open 'myzpool/filesystem@snap': dataset does not exist", + 'changes': {}} + + mock_holds = MagicMock(return_value=OrderedDict([ + ('error', "cannot open 'myzpool/filesystem@snap': dataset does not exist"), + ])) + with patch.dict(zfs.__salt__, {'zfs.holds': mock_holds}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(ret, zfs.hold_absent('myhold', 'myzpool/filesystem@snap')) + + def test_hold_present(self): + ''' + Test if hold is present (hold already present) + ''' + ret = {'name': 'myhold', + 'result': True, + 'comment': 'hold myhold is present for myzpool/filesystem@snap', + 'changes': {}} + + mock_holds = MagicMock(return_value=OrderedDict([('myhold', 'Thu Feb 15 16:24 2018')])) + with patch.dict(zfs.__salt__, {'zfs.holds': mock_holds}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(ret, zfs.hold_present('myhold', 'myzpool/filesystem@snap')) + + def test_hold_present_new(self): + ''' + Test if hold is present (new) + ''' + ret = {'name': 'myhold', + 'result': True, + 'comment': 'hold myhold added to myzpool/filesystem@snap', + 'changes': {'myzpool/filesystem@snap': {'myhold': 'held'}}} + + mock_holds = MagicMock(return_value=OrderedDict([])) + mock_hold = MagicMock(return_value=OrderedDict([('held', True)])) + with patch.dict(zfs.__salt__, {'zfs.holds': mock_holds}), \ + patch.dict(zfs.__salt__, {'zfs.hold': mock_hold}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(ret, zfs.hold_present('myhold', 'myzpool/filesystem@snap')) + + def test_hold_present_fail(self): + ''' + Test if hold is present (using non existing snapshot) + ''' + ret = {'name': 'myhold', + 'result': False, + 'comment': "cannot hold snapshot 'zsalt/filesystem@snap': dataset does not exist", + 'changes': {}} + + mock_holds = MagicMock(return_value=OrderedDict([])) + mock_hold = MagicMock(return_value=OrderedDict([ + ('held', False), + ('error', "cannot hold snapshot 'zsalt/filesystem@snap': dataset does not exist"), + ])) + with patch.dict(zfs.__salt__, {'zfs.holds': mock_holds}), \ + patch.dict(zfs.__salt__, {'zfs.hold': mock_hold}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(ret, zfs.hold_present('myhold', 'myzpool/filesystem@snap')) + + def test_filesystem_present(self): + ''' + Test if filesystem is present (existing filesystem) + ''' + ret = {'name': 'myzpool/filesystem', + 'result': True, + 'comment': 'filesystem myzpool/filesystem is uptodate', + 'changes': {}} + + mock_exists = MagicMock(return_value=True) + mock_get = MagicMock(return_value=OrderedDict([ + ('myzpool/filesystem', OrderedDict([ + ('type', OrderedDict([ + ('value', 'filesystem'), + ])), + ('compression', OrderedDict([ + ('value', False), + ])), + ])), + ])) + with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \ + patch.dict(zfs.__salt__, {'zfs.get': mock_get}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(ret, zfs.filesystem_present('myzpool/filesystem')) + + def test_filesystem_present_new(self): + ''' + Test if filesystem is present (non existing filesystem) + ''' + ret = {'name': 'myzpool/filesystem', + 'result': True, + 'comment': 'filesystem myzpool/filesystem was created', + 'changes': {'myzpool/filesystem': u'created'}} + + mock_exists = MagicMock(return_value=False) + mock_create = MagicMock(return_value=OrderedDict([('created', True)])) + with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \ + patch.dict(zfs.__salt__, {'zfs.create': mock_create}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(ret, zfs.filesystem_present('myzpool/filesystem')) + + def test_filesystem_present_update(self): + ''' + Test if filesystem is present (non existing filesystem) + ''' + ret = {'name': 'myzpool/filesystem', + 'result': True, + 'comment': 'filesystem myzpool/filesystem was updated', + 'changes': {'myzpool/filesystem': {'compression': 'lz4'}}} + + mock_exists = MagicMock(return_value=True) + mock_set = MagicMock(return_value=OrderedDict([('set', True)])) + mock_get = MagicMock(return_value=OrderedDict([ + ('myzpool/filesystem', OrderedDict([ + ('type', OrderedDict([ + ('value', 'filesystem'), + ])), + ('compression', OrderedDict([ + ('value', False), + ])), + ])), + ])) + with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \ + patch.dict(zfs.__salt__, {'zfs.get': mock_get}), \ + patch.dict(zfs.__salt__, {'zfs.set': mock_set}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(ret, zfs.filesystem_present( + name='myzpool/filesystem', + properties={'compression': 'lz4'}, + )) + + def test_filesystem_present_fail(self): + ''' + Test if filesystem is present (non existing pool) + ''' + ret = {'name': 'myzpool/filesystem', + 'result': False, + 'comment': "cannot create 'myzpool/filesystem': no such pool 'myzpool'", + 'changes': {}} + + mock_exists = MagicMock(return_value=False) + mock_create = MagicMock(return_value=OrderedDict([ + ('created', False), + ('error', "cannot create 'myzpool/filesystem': no such pool 'myzpool'"), + ])) + with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \ + patch.dict(zfs.__salt__, {'zfs.create': mock_create}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(ret, zfs.filesystem_present('myzpool/filesystem')) + + def test_volume_present(self): + ''' + Test if volume is present (existing volume) + ''' + ret = {'name': 'myzpool/volume', + 'result': True, + 'comment': 'volume myzpool/volume is uptodate', + 'changes': {}} + + mock_exists = MagicMock(return_value=True) + mock_get = MagicMock(return_value=OrderedDict([ + ('myzpool/volume', OrderedDict([ + ('type', OrderedDict([ + ('value', 'volume'), + ])), + ('compression', OrderedDict([ + ('value', False), + ])), + ])), + ])) + with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \ + patch.dict(zfs.__salt__, {'zfs.get': mock_get}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(ret, zfs.volume_present('myzpool/volume', volume_size='1G')) + + def test_volume_present_new(self): + ''' + Test if volume is present (non existing volume) + ''' + ret = {'name': 'myzpool/volume', + 'result': True, + 'comment': 'volume myzpool/volume was created', + 'changes': {'myzpool/volume': u'created'}} + + mock_exists = MagicMock(return_value=False) + mock_create = MagicMock(return_value=OrderedDict([('created', True)])) + with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \ + patch.dict(zfs.__salt__, {'zfs.create': mock_create}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(ret, zfs.volume_present('myzpool/volume', volume_size='1G')) + + def test_volume_present_update(self): + ''' + Test if volume is present (non existing volume) + ''' + ret = {'name': 'myzpool/volume', + 'result': True, + 'comment': 'volume myzpool/volume was updated', + 'changes': {'myzpool/volume': {'compression': 'lz4'}}} + + mock_exists = MagicMock(return_value=True) + mock_set = MagicMock(return_value=OrderedDict([('set', True)])) + mock_get = MagicMock(return_value=OrderedDict([ + ('myzpool/volume', OrderedDict([ + ('type', OrderedDict([ + ('value', 'volume'), + ])), + ('compression', OrderedDict([ + ('value', False), + ])), + ])), + ])) + with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \ + patch.dict(zfs.__salt__, {'zfs.get': mock_get}), \ + patch.dict(zfs.__salt__, {'zfs.set': mock_set}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(ret, zfs.volume_present( + name='myzpool/volume', + volume_size='1G', + properties={'compression': 'lz4'}, + )) + + def test_volume_present_fail(self): + ''' + Test if volume is present (non existing pool) + ''' + ret = {'name': 'myzpool/volume', + 'result': False, + 'comment': "cannot create 'myzpool/volume': no such pool 'myzpool'", + 'changes': {}} + + mock_exists = MagicMock(return_value=False) + mock_create = MagicMock(return_value=OrderedDict([ + ('created', False), + ('error', "cannot create 'myzpool/volume': no such pool 'myzpool'"), + ])) + with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \ + patch.dict(zfs.__salt__, {'zfs.create': mock_create}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(ret, zfs.volume_present('myzpool/volume', volume_size='1G')) + + def test_bookmark_present(self): + ''' + Test if bookmark is present (bookmark already present) + ''' + ret = {'name': 'myzpool/filesystem#mybookmark', + 'result': True, + 'comment': 'bookmark is present', + 'changes': {}} + + mock_exists = MagicMock(return_value=True) + with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(ret, zfs.bookmark_present('mybookmark', 'myzpool/filesystem@snap')) + + def test_bookmark_present_new(self): + ''' + Test if bookmark is present (new) + ''' + ret = {'name': 'myzpool/filesystem#mybookmark', + 'result': True, + 'comment': 'myzpool/filesystem@snap bookmarked as myzpool/filesystem#mybookmark', + 'changes': {'myzpool/filesystem#mybookmark': 'myzpool/filesystem@snap'}} + + mock_exists = MagicMock(return_value=False) + mock_bookmark = MagicMock(return_value=OrderedDict([('bookmarked', True)])) + with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \ + patch.dict(zfs.__salt__, {'zfs.bookmark': mock_bookmark}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(ret, zfs.bookmark_present('mybookmark', 'myzpool/filesystem@snap')) + + def test_bookmark_present_fail(self): + ''' + Test if bookmark is present (using non existing snapshot) + ''' + ret = {'name': 'myzpool/filesystem#mybookmark', + 'result': False, + 'comment': "cannot bookmark snapshot 'zsalt/filesystem@snap': dataset does not exist", + 'changes': {}} + + mock_exists = MagicMock(return_value=False) + mock_bookmark = MagicMock(return_value=OrderedDict([ + ('bookmarked', False), + ('error', "cannot bookmark snapshot 'zsalt/filesystem@snap': dataset does not exist"), + ])) + with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \ + patch.dict(zfs.__salt__, {'zfs.bookmark': mock_bookmark}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(ret, zfs.bookmark_present('mybookmark', 'myzpool/filesystem@snap')) + + def test_snapshot_present(self): + ''' + Test if snapshot is present (snapshot already present) + ''' + ret = {'name': 'myzpool/filesystem@snap', + 'result': True, + 'comment': 'snapshot is present', + 'changes': {}} + + mock_exists = MagicMock(return_value=True) + with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(ret, zfs.snapshot_present('myzpool/filesystem@snap')) + + def test_snapshot_present_new(self): + ''' + Test if snapshot is present (new) + ''' + ret = {'name': 'myzpool/filesystem@snap', + 'result': True, + 'comment': 'snapshot myzpool/filesystem@snap was created', + 'changes': {u'myzpool/filesystem@snap': u'snapshotted'}} + + mock_exists = MagicMock(return_value=False) + mock_snapshot = MagicMock(return_value=OrderedDict([('snapshotted', True)])) + with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \ + patch.dict(zfs.__salt__, {'zfs.snapshot': mock_snapshot}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(ret, zfs.snapshot_present('myzpool/filesystem@snap')) + + def test_snapshot_present_fail(self): + ''' + Test if snapshot is present (using non existing snapshot) + ''' + ret = {'name': 'myzpool/filesystem@snap', + 'result': False, + 'comment': "cannot open 'myzpool/filesystem': dataset does not exist", + 'changes': {}} + + mock_exists = MagicMock(return_value=False) + mock_snapshot = MagicMock(return_value=OrderedDict([ + ('snapshotted', False), + ('error', "cannot open 'myzpool/filesystem': dataset does not exist"), + ])) + with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \ + patch.dict(zfs.__salt__, {'zfs.snapshot': mock_snapshot}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(ret, zfs.snapshot_present('myzpool/filesystem@snap')) + + def test_propmoted(self): + ''' + Test promotion of clone (already promoted) + ''' + ret = {'name': 'myzpool/filesystem', + 'result': True, + 'comment': 'myzpool/filesystem already promoted', + 'changes': {}} + + mock_exists = MagicMock(return_value=True) + mock_get = MagicMock(return_value=OrderedDict([ + ('myzpool/filesystem', OrderedDict([ + ('origin', OrderedDict([ + ('value', '-'), + ])), + ])), + ])) + with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \ + patch.dict(zfs.__salt__, {'zfs.get': mock_get}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(ret, zfs.promoted('myzpool/filesystem')) + + def test_propmoted_clone(self): + ''' + Test promotion of clone + ''' + ret = {'name': 'myzpool/filesystem', + 'result': True, + 'comment': 'myzpool/filesystem promoted', + 'changes': {'myzpool/filesystem': 'promoted'}} + + mock_exists = MagicMock(return_value=True) + mock_get = MagicMock(return_value=OrderedDict([ + ('myzpool/filesystem', OrderedDict([ + ('origin', OrderedDict([ + ('value', 'myzool/filesystem_source@clean'), + ])), + ])), + ])) + mock_promote = MagicMock(return_value=OrderedDict([('promoted', True)])) + with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \ + patch.dict(zfs.__salt__, {'zfs.get': mock_get}), \ + patch.dict(zfs.__salt__, {'zfs.promote': mock_promote}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(ret, zfs.promoted('myzpool/filesystem')) + + def test_propmoted_fail(self): + ''' + Test promotion of clone (unknown dataset) + ''' + ret = {'name': 'myzpool/filesystem', + 'result': False, + 'comment': 'dataset myzpool/filesystem does not exist', + 'changes': {}} + + mock_exists = MagicMock(return_value=False) + with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(ret, zfs.promoted('myzpool/filesystem')) + + def test_scheduled_snapshot_fail(self): + ''' + Test scheduled_snapshot of unknown dataset + ''' + ret = {'name': 'myzpool/filesystem', + 'result': False, + 'comment': 'dataset myzpool/filesystem does not exist', + 'changes': {}} + + mock_exists = MagicMock(return_value=False) + with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \ + patch.dict(zfs.__utils__, utils_patch): + self.assertEqual(ret, zfs.scheduled_snapshot('myzpool/filesystem', 'shadow', schedule={'hour': 6})) From ba45fcca31d34972e14295b0b282655a47d2d865 Mon Sep 17 00:00:00 2001 From: Jorge Schrauwen Date: Thu, 8 Feb 2018 15:36:59 +0100 Subject: [PATCH 215/223] Phase 2 - salt.states.zfs --- salt/states/zfs.py | 1179 ++++++++++++++++++++++++------------------ salt/states/zpool.py | 78 +-- salt/utils/zfs.py | 4 +- 3 files changed, 712 insertions(+), 549 deletions(-) diff --git a/salt/states/zfs.py b/salt/states/zfs.py index 8e75ebe963..d03365c3ed 100644 --- a/salt/states/zfs.py +++ b/salt/states/zfs.py @@ -1,13 +1,14 @@ # -*- coding: utf-8 -*- ''' -Management zfs datasets +States for managing zfs datasets :maintainer: Jorge Schrauwen :maturity: new -:depends: zfs +:depends: salt.utils.zfs, salt.modules.zfs :platform: smartos, illumos, solaris, freebsd, linux .. versionadded:: 2016.3.0 +.. versionchanged:: Flourine .. code-block:: yaml @@ -44,30 +45,31 @@ from __future__ import absolute_import, unicode_literals, print_function # Import Python libs import logging -from time import strftime, strptime, localtime +from datetime import datetime # Import Salt libs -from salt.modules.zfs import _conform_value +from salt.utils.odict import OrderedDict log = logging.getLogger(__name__) # Define the state's virtual name __virtualname__ = 'zfs' +# Compare modifiers for zfs.schedule_snapshot +comp_hour = {'minute': 0} +comp_day = {'minute': 0, 'hour': 0} +comp_month = {'minute': 0, 'hour': 0, 'day': 1} +comp_year = {'minute': 0, 'hour': 0, 'day': 1, 'month': 1} + def __virtual__(): ''' Provides zfs state ''' - if 'zfs.create' in __salt__: - return True + if __grains__['zfs_support']: + return __virtualname__ else: - return ( - False, - '{0} state module can only be loaded on illumos, Solaris, SmartOS, FreeBSD, Linux, ...'.format( - __virtualname__ - ) - ) + return (False, "The zfs state cannot be loaded: zfs not supported") def _absent(name, dataset_type, force=False, recursive=False): @@ -83,59 +85,48 @@ def _absent(name, dataset_type, force=False, recursive=False): recursive : boolean also destroy all the child datasets + .. versionchanged:: Flourine ''' - dataset_type = dataset_type.lower() ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} + ## log configuration + dataset_type = dataset_type.lower() log.debug('zfs.%s_absent::%s::config::force = %s', dataset_type, name, force) log.debug('zfs.%s_absent::%s::config::recursive = %s', dataset_type, name, recursive) - # check name and type - if dataset_type not in ['filesystem', 'volume', 'snapshot', 'bookmark']: - ret['result'] = False - ret['comment'] = 'unknown dateset type: {0}'.format(dataset_type) + ## destroy dataset if needed + if __salt__['zfs.exists'](name, **{'type': dataset_type}): + ## NOTE: dataset found with the name and dataset_type + if not __opts__['test']: + mod_res = __salt__['zfs.destroy'](name, **{'force': force, 'recursive': recursive}) + else: + mod_res = OrderedDict([('destroyed', True)]) - if ret['result'] and dataset_type in ['snapshot'] and '@' not in name: - ret['result'] = False - ret['comment'] = 'invalid snapshot name: {0}'.format(name) - - if ret['result'] and dataset_type in ['bookmark'] and '#' not in name: - ret['result'] = False - ret['comment'] = 'invalid bookmark name: {0}'.format(name) - - if ret['result'] and dataset_type in ['filesystem', 'volume']: - if '@' in name or '#' in name: - ret['result'] = False - ret['comment'] = 'invalid filesystem or volume name: {0}'.format(name) - - # check if dataset exists - if ret['result']: - if __salt__['zfs.exists'](name, **{'type': dataset_type}): # we need to destroy it - result = {name: 'destroyed'} - if not __opts__['test']: - result = __salt__['zfs.destroy'](name, **{'force': force, 'recursive': recursive}) - - ret['result'] = name in result and result[name] == 'destroyed' - ret['changes'] = result if ret['result'] else {} - if ret['result']: - ret['comment'] = '{0} {1} was destroyed'.format( - dataset_type, - name - ) - else: - ret['comment'] = 'failed to destroy {0}'.format(name) - if name in result: - ret['comment'] = result[name] - else: # dataset with type and name does not exist! (all good) - ret['comment'] = '{0} {1} is not present'.format( + ret['result'] = mod_res['destroyed'] + if ret['result']: + ret['changes'][name] = 'destroyed' + ret['comment'] = '{0} {1} was destroyed'.format( dataset_type, - name + name, ) + else: + ret['comment'] = 'failed to destroy {0} {1}'.format( + dataset_type, + name, + ) + if 'error' in mod_res: + ret['comment'] = mod_res['error'] + else: + ## NOTE: no dataset found with name of the dataset_type + ret['comment'] = '{0} {1} is absent'.format( + dataset_type, + name + ) return ret @@ -151,13 +142,22 @@ def filesystem_absent(name, force=False, recursive=False): recursive : boolean also destroy all the child datasets (zfs destroy -r) + .. versionchanged:: Flourine + .. warning:: If a volume with ``name`` exists, this state will succeed without destroying the volume specified by ``name``. This module is dataset type sensitive. ''' - return _absent(name, 'filesystem', force, recursive) + if not __utils__['zfs.is_dataset'](name): + ret = {'name': name, + 'changes': {}, + 'result': False, + 'comment': 'invalid dataset name: {0}'.format(name)} + else: + ret = _absent(name, 'filesystem', force, recursive) + return ret def volume_absent(name, force=False, recursive=False): @@ -171,13 +171,22 @@ def volume_absent(name, force=False, recursive=False): recursive : boolean also destroy all the child datasets (zfs destroy -r) + .. versionchanged:: Flourine + .. warning:: If a filesystem with ``name`` exists, this state will succeed without destroying the filesystem specified by ``name``. This module is dataset type sensitive. ''' - return _absent(name, 'volume', force, recursive) + if not __utils__['zfs.is_dataset'](name): + ret = {'name': name, + 'changes': {}, + 'result': False, + 'comment': 'invalid dataset name: {0}'.format(name)} + else: + ret = _absent(name, 'volume', force, recursive) + return ret def snapshot_absent(name, force=False, recursive=False): @@ -190,8 +199,17 @@ def snapshot_absent(name, force=False, recursive=False): try harder to destroy the dataset (zfs destroy -f) recursive : boolean also destroy all the child datasets (zfs destroy -r) + + .. versionchanged:: Flourine ''' - return _absent(name, 'snapshot', force, recursive) + if not __utils__['zfs.is_snapshot'](name): + ret = {'name': name, + 'changes': {}, + 'result': False, + 'comment': 'invalid snapshot name: {0}'.format(name)} + else: + ret = _absent(name, 'snapshot', force, recursive) + return ret def bookmark_absent(name, force=False, recursive=False): @@ -204,8 +222,17 @@ def bookmark_absent(name, force=False, recursive=False): try harder to destroy the dataset (zfs destroy -f) recursive : boolean also destroy all the child datasets (zfs destroy -r) + + .. versionchanged:: Flourine ''' - return _absent(name, 'bookmark', force, recursive) + if not __utils__['zfs.is_bookmark'](name): + ret = {'name': name, + 'changes': {}, + 'result': False, + 'comment': 'invalid bookmark name: {0}'.format(name)} + else: + ret = _absent(name, 'bookmark', force, recursive) + return ret def hold_absent(name, snapshot, recursive=False): @@ -213,56 +240,68 @@ def hold_absent(name, snapshot, recursive=False): ensure hold is absent on the system name : string - name of holdt + name of hold snapshot : string name of snapshot recursive : boolean recursively releases a hold with the given tag on the snapshots of all descendent file systems. + + .. versionchanged:: Flourine ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} + ## log configuration log.debug('zfs.hold_absent::%s::config::snapshot = %s', name, snapshot) log.debug('zfs.hold_absent::%s::config::recursive = %s', name, recursive) - # check name and type - if '@' not in snapshot: + ## check we have a snapshot/tag name + if not __utils__['zfs.is_snapshot'](snapshot): ret['result'] = False ret['comment'] = 'invalid snapshot name: {0}'.format(snapshot) return ret - if '@' in name or '#' in name: + if __utils__['zfs.is_snapshot'](name) or \ + __utils__['zfs.is_bookmark'](name) or \ + name == 'error': ret['result'] = False ret['comment'] = 'invalid tag name: {0}'.format(name) return ret - result = __salt__['zfs.holds'](snapshot) - if snapshot not in result: - ret['result'] = False - ret['comment'] = '{0} is probably not a snapshot'.format(snapshot) - else: - if snapshot in result[snapshot]: - ret['result'] = False - ret['comment'] = result[snapshot] - elif result[snapshot] == 'no holds' or name not in result[snapshot]: - ret['comment'] = 'hold {0} not present'.format(name) + ## release hold if required + holds = __salt__['zfs.holds'](snapshot) + if name in holds: + ## NOTE: hold found for snapshot, release it + if not __opts__['test']: + mod_res = __salt__['zfs.release'](name, snapshot, **{'recursive': recursive}) else: - result = {snapshot: {name: 'released'}} - if not __opts__['test']: - result = __salt__['zfs.release'](name, snapshot, **{'recursive': recursive}) + mod_res = OrderedDict([('released', True)]) - ret['result'] = snapshot in result and name in result[snapshot] - if ret['result']: - ret['changes'] = result[snapshot] - ret['comment'] = 'hold {0} released'.format(name) - else: - ret['comment'] = 'failed to release {0}'.format(name) - if snapshot in result: - ret['comment'] = result[snapshot] + ret['result'] = mod_res['released'] + if ret['result']: + ret['changes'] = {snapshot: {name: 'released'}} + ret['comment'] = 'hold {0} released'.format( + name, + ) + else: + ret['comment'] = 'failed to release hold {0}'.format( + name, + ) + if 'error' in mod_res: + ret['comment'] = mod_res['error'] + elif 'error' in holds: + ## NOTE: we have an error + ret['result'] = False + ret['comment'] = holds['error'] + else: + ## NOTE: no hold found with name for snapshot + ret['comment'] = 'hold {0} is absent'.format( + name, + ) return ret @@ -277,172 +316,76 @@ def hold_present(name, snapshot, recursive=False): name of snapshot recursive : boolean recursively add hold with the given tag on the snapshots of all descendent file systems. + + .. versionchanged:: Flourine ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} + ## log configuration log.debug('zfs.hold_present::%s::config::snapshot = %s', name, snapshot) log.debug('zfs.hold_present::%s::config::recursive = %s', name, recursive) - # check name and type - if '@' not in snapshot: + ## check we have a snapshot/tag name + if not __utils__['zfs.is_snapshot'](snapshot): ret['result'] = False ret['comment'] = 'invalid snapshot name: {0}'.format(snapshot) return ret - if '@' in name or '#' in name: + if __utils__['zfs.is_snapshot'](name) or \ + __utils__['zfs.is_bookmark'](name) or \ + name == 'error': ret['result'] = False ret['comment'] = 'invalid tag name: {0}'.format(name) return ret - result = __salt__['zfs.holds'](snapshot) - if snapshot not in result: - ret['result'] = False - ret['comment'] = '{0} is probably not a snapshot'.format(snapshot) + ## place hold if required + holds = __salt__['zfs.holds'](snapshot) + if name in holds: + ## NOTE: hold with name already exists for snapshot + ret['comment'] = 'hold {0} is present for {1}'.format( + name, + snapshot, + ) else: - if snapshot in result[snapshot]: - ret['result'] = False - ret['comment'] = result[snapshot] - elif result[snapshot] == 'no holds' or name not in result[snapshot]: # add hold - result = {snapshot: {name: 'held'}} - if not __opts__['test']: - result = __salt__['zfs.hold'](name, snapshot, **{'recursive': recursive}) - - ret['result'] = snapshot in result and name in result[snapshot] - if ret['result']: - ret['changes'] = result[snapshot] - ret['comment'] = 'hold {0} added to {1}'.format(name, snapshot) - else: - ret['comment'] = 'failed to add hold {0}'.format(name) - if snapshot in result: - ret['comment'] = result[snapshot] - else: # hold present - ret['comment'] = 'hold already exists' - - return ret - - -def filesystem_present(name, create_parent=False, properties=None, cloned_from=None): - ''' - ensure filesystem exists and has properties set - - name : string - name of filesystem - create_parent : boolean - creates all the non-existing parent datasets. - any property specified on the command line using the -o option is ignored. - cloned_from : string - name of snapshot to clone - properties : dict - additional zfs properties (-o) - - .. note:: - ``cloned_from`` is only use if the filesystem does not exist yet, - when ``cloned_from`` is set after the filesystem exists it will be ignored. - - .. note:: - Properties do not get cloned, if you specify the properties in the - state file they will be applied on a subsequent run. - - ''' - ret = {'name': name, - 'changes': {}, - 'result': True, - 'comment': ''} - - # check params - if not properties: - properties = {} - - log.debug('zfs.filesystem_present::%s::config::create_parent = %s', - name, create_parent) - log.debug('zfs.filesystem_present::%s::config::cloned_from = %s', - name, cloned_from) - log.debug('zfs.filesystem_present::%s::config::properties = %s', - name, properties) - - for prop in properties: - properties[prop] = _conform_value(properties[prop], True) - - if '@' in name or '#' in name: - ret['result'] = False - ret['comment'] = 'invalid filesystem or volume name: {0}'.format(name) - return ret - - if cloned_from: - if '@' not in cloned_from: - ret['result'] = False - ret['comment'] = '{0} is not a snapshot'.format(cloned_from) - return ret - - if not __salt__['zfs.exists'](cloned_from, **{'type': 'snapshot'}): - ret['result'] = False - ret['comment'] = 'snapshot {0} does not exist'.format(cloned_from) - return ret - - cloned_parent = cloned_from[:cloned_from.index('@')] - if not __salt__['zfs.exists'](cloned_parent, **{'type': 'filesystem'}): - ret['result'] = False - ret['comment'] = 'snapshot {0} is not from a filesystem'.format(cloned_from) - return ret - - if __salt__['zfs.exists'](name, **{'type': 'filesystem'}): # update properties if needed - result = {} - if len(properties) > 0: - result = __salt__['zfs.get'](name, **{'properties': ','.join(properties.keys()), 'fields': 'value', 'depth': 1, 'parsable': True}) - - for prop in properties: - if properties[prop] != result[name][prop]['value']: - if name not in ret['changes']: - ret['changes'][name] = {} - ret['changes'][name][prop] = properties[prop] - - if len(ret['changes']) > 0: - if not __opts__['test']: - result = __salt__['zfs.set'](name, **ret['changes'][name]) - if name not in result: - ret['result'] = False - else: - for prop in result[name]: - if result[name][prop] != 'set': - ret['result'] = False - - if ret['result']: - ret['comment'] = 'filesystem {0} was updated'.format(name) - else: - ret['changes'] = {} - ret['comment'] = 'filesystem {0} failed to be updated'.format(name) - else: - ret['comment'] = 'filesystem {0} is up to date'.format(name) - else: # create filesystem - result = {name: 'created'} + ## NOTE: no hold found with name for snapshot if not __opts__['test']: - if not cloned_from: - result = __salt__['zfs.create'](name, **{'create_parent': create_parent, 'properties': properties}) - else: - result = __salt__['zfs.clone'](cloned_from, name, **{'create_parent': create_parent, 'properties': properties}) - - ret['result'] = name in result - if ret['result']: - ret['result'] = result[name] == 'created' or result[name].startswith('cloned') - if ret['result']: - ret['changes'][name] = properties if len(properties) > 0 else result[name] - ret['comment'] = 'filesystem {0} was created'.format(name) + mod_res = __salt__['zfs.hold'](name, snapshot, **{'recursive': recursive}) else: - ret['comment'] = 'failed to create filesystem {0}'.format(name) - if name in result: - ret['comment'] = result[name] + mod_res = OrderedDict([('held', True)]) + + ret['result'] = mod_res['held'] + if ret['result']: + ret['changes'] = OrderedDict([ + (snapshot, OrderedDict([ + (name, 'held'), + ])), + ]) + ret['comment'] = 'hold {0} added to {1}'.format( + name, + snapshot, + ) + else: + ret['comment'] = 'failed to add hold {0} to {1}'.format( + name, + snapshot, + ) + if 'error' in mod_res: + ret['comment'] = mod_res['error'] + return ret -def volume_present(name, volume_size, sparse=False, create_parent=False, properties=None, cloned_from=None): +def _dataset_present(dataset_type, name, volume_size=None, sparse=False, create_parent=False, properties=None, cloned_from=None): ''' - ensure volume exists and has properties set + internal handler for filesystem_present/volume_present + dataset_type : string + volume or filesystem name : string name of volume volume_size : string @@ -457,6 +400,8 @@ def volume_present(name, volume_size, sparse=False, create_parent=False, propert properties : dict additional zfs properties (-o) + .. versionchanged:: Flourine + .. note:: ``cloned_from`` is only use if the volume does not exist yet, when ``cloned_from`` is set after the volume exists it will be ignored. @@ -477,100 +422,223 @@ def volume_present(name, volume_size, sparse=False, create_parent=False, propert 'result': True, 'comment': ''} - # check params - if not properties: + ## fallback dataset_type to filesystem if out of range + if dataset_type not in ['filesystem', 'volume']: + dataset_type = 'filesystem' + + ## ensure properties are zfs values + if volume_size: + volume_size = __utils__['zfs.from_size'](volume_size) + if properties: + properties = __utils__['zfs.from_auto_dict'](properties) + elif properties is None: properties = {} - log.debug('zfs.volume_present::%s::config::volume_size = %s', - name, volume_size) - log.debug('zfs.volume_present::%s::config::sparse = %s', - name, sparse) - log.debug('zfs.volume_present::%s::config::create_parent = %s', - name, create_parent) - log.debug('zfs.volume_present::%s::config::cloned_from = %s', - name, cloned_from) - log.debug('zfs.volume_present::%s::config::properties = %s', - name, properties) + ## log configuration + log.debug('zfs.%s_present::%s::config::volume_size = %s', + dataset_type, name, volume_size) + log.debug('zfs.%s_present::%s::config::sparse = %s', + dataset_type, name, sparse) + log.debug('zfs.%s_present::%s::config::create_parent = %s', + dataset_type, name, create_parent) + log.debug('zfs.%s_present::%s::config::cloned_from = %s', + dataset_type, name, cloned_from) + log.debug('zfs.%s_present::%s::config::properties = %s', + dataset_type, name, properties) - volume_size = _conform_value(volume_size, True) - for prop in properties: - properties[prop] = _conform_value(properties[prop], True) - - if '@' in name or '#' in name: + ## check we have valid filesystem name/volume name/clone snapshot + if not __utils__['zfs.is_dataset'](name): ret['result'] = False - ret['comment'] = 'invalid filesystem or volume name: {0}'.format(name) + ret['comment'] = 'invalid dataset name: {1}'.format(name) return ret - if cloned_from: - if '@' not in cloned_from: - ret['result'] = False - ret['comment'] = '{0} is not a snapshot'.format(cloned_from) - return ret + if cloned_from and not __utils__['zfs.is_snapshot'](cloned_from): + ret['result'] = False + ret['comment'] = '{0} is not a snapshot'.format(cloned_from) + return ret - if not __salt__['zfs.exists'](cloned_from, **{'type': 'snapshot'}): - ret['result'] = False - ret['comment'] = 'snapshot {0} does not exist'.format(cloned_from) - return ret + ## ensure dataset is in correct state + ## NOTE: update the dataset + if __salt__['zfs.exists'](name, **{'type': dataset_type}): + ## NOTE: fetch current volume properties + properties_current = __salt__['zfs.get']( + name, + fields='value', + depth=1, + parsable=True, + ).get(name, OrderedDict()) - cloned_parent = cloned_from[:cloned_from.index('@')] - if not __salt__['zfs.exists'](cloned_parent, **{'type': 'volume'}): - ret['result'] = False - ret['comment'] = 'snapshot {0} is not from a volume'.format(cloned_from) - return ret - - if __salt__['zfs.exists'](name, **{'type': 'volume'}): # update properties if needed - properties['volsize'] = volume_size # add volume_size to properties - result = __salt__['zfs.get'](name, **{'properties': ','.join(properties.keys()), 'fields': 'value', 'depth': 1, 'parsable': True}) + ## NOTE: add volsize to properties + if volume_size: + properties['volsize'] = volume_size + ## NOTE: build list of properties to update + properties_update = [] for prop in properties: - if properties[prop] != result[name][prop]['value']: + ## NOTE: skip unexisting properties + if prop not in properties_current: + log.warning('zfs.%s_present::%s::update - unknown property: %s', + dataset_type, name, prop) + continue + + ## NOTE: compare current and wanted value + if properties_current[prop]['value'] != properties[prop]: + properties_update.append(prop) + + ## NOTE: update pool properties + for prop in properties_update: + if not __opts__['test']: + mod_res = __salt__['zfs.set'](name, **{prop: properties[prop]}) + else: + mod_res = OrderedDict([('set', True)]) + + if mod_res['set']: if name not in ret['changes']: ret['changes'][name] = {} ret['changes'][name][prop] = properties[prop] - - if len(ret['changes']) > 0: - if not __opts__['test']: - result = __salt__['zfs.set'](name, **ret['changes'][name]) - if name not in result: - ret['result'] = False - else: - for prop in result[name]: - if result[name][prop] != 'set': - ret['result'] = False - - if ret['result']: - ret['comment'] = 'volume {0} was updated'.format(name) else: - ret['changes'] = {} - ret['comment'] = 'volume {0} failed to be updated'.format(name) - else: - ret['comment'] = 'volume {0} is up to date'.format(name) - else: # create volume - result = {name: 'created'} - if not __opts__['test']: - if not cloned_from: - result = __salt__['zfs.create'](name, **{ - 'volume_size': volume_size, - 'sparse': sparse, - 'create_parent': create_parent, - 'properties': properties - }) - else: - result = __salt__['zfs.clone'](cloned_from, name, **{'create_parent': create_parent, 'properties': properties}) + ret['result'] = False + if ret['comment'] == '': + ret['comment'] = 'The following properties were not updated:' + ret['comment'] = '{0} {1}'.format(ret['comment'], prop) - ret['result'] = name in result - if ret['result']: - ret['result'] = result[name] == 'created' or result[name].startswith('cloned') - if ret['result']: - ret['changes'][name] = properties if len(properties) > 0 else result[name] - ret['comment'] = 'volume {0} was created'.format(name) + ## NOTE: update comment + if ret['result'] and name in ret['changes']: + ret['comment'] = '{0} {1} was updated'.format(dataset_type, name) + elif ret['result']: + ret['comment'] = '{0} {1} is uptodate'.format(dataset_type, name) else: - ret['comment'] = 'failed to create volume {0}'.format(name) - if name in result: - ret['comment'] = result[name] + ret['comment'] = '{0} {1} failed to be updated'.format(dataset_type, name) + + ## NOTE: create or clone the dataset + else: + mod_res_action = 'cloned' if cloned_from else 'created' + if __opts__['test']: + ## NOTE: pretend to create/clone + mod_res = OrderedDict([ + (mod_res_action, True), + ]) + elif cloned_from: + ## NOTE: add volsize to properties + if volume_size: + properties['volsize'] = volume_size + + ## NOTE: clone the dataset + mod_res = __salt__['zfs.clone'](cloned_from, name, **{ + 'create_parent': create_parent, + 'properties': properties, + }) + else: + ## NOTE: create the dataset + mod_res = __salt__['zfs.create'](name, **{ + 'create_parent': create_parent, + 'properties': properties, + 'volume_size': volume_size, + 'sparse': sparse, + }) + + ret['result'] = mod_res[mod_res_action] + if ret['result']: + ret['changes'][name] = mod_res_action + if properties: + ret['changes'][name] = properties + ret['comment'] = '{0} {1} was {2}'.format( + dataset_type, + name, + mod_res_action, + ) + else: + ret['comment'] = 'failed to {0} {1} {2}'.format( + mod_res_action[:-1], + dataset_type, + name, + ) + if 'error' in mod_res: + ret['comment'] = mod_res['error'] + return ret +def filesystem_present(name, create_parent=False, properties=None, cloned_from=None): + ''' + ensure filesystem exists and has properties set + + name : string + name of filesystem + create_parent : boolean + creates all the non-existing parent datasets. + any property specified on the command line using the -o option is ignored. + cloned_from : string + name of snapshot to clone + properties : dict + additional zfs properties (-o) + + .. versionchanged:: Flourine + + .. note:: + ``cloned_from`` is only use if the filesystem does not exist yet, + when ``cloned_from`` is set after the filesystem exists it will be ignored. + + .. note:: + Properties do not get cloned, if you specify the properties in the + state file they will be applied on a subsequent run. + + ''' + return _dataset_present( + 'filesystem', + name, + create_parent=create_parent, + properties=properties, + cloned_from=cloned_from, + ) + + +def volume_present(name, volume_size, sparse=False, create_parent=False, properties=None, cloned_from=None): + ''' + ensure volume exists and has properties set + + name : string + name of volume + volume_size : string + size of volume + sparse : boolean + create sparse volume + create_parent : boolean + creates all the non-existing parent datasets. + any property specified on the command line using the -o option is ignored. + cloned_from : string + name of snapshot to clone + properties : dict + additional zfs properties (-o) + + .. versionchanged:: Flourine + + .. note:: + ``cloned_from`` is only use if the volume does not exist yet, + when ``cloned_from`` is set after the volume exists it will be ignored. + + .. note:: + Properties do not get cloned, if you specify the properties in the state file + they will be applied on a subsequent run. + + ``volume_size`` is considered a property, so the volume's size will be + corrected when the properties get updated if it differs from the + original volume. + + The sparse parameter is ignored when using ``cloned_from``. + + ''' + return _dataset_present( + 'volume', + name, + volume_size, + sparse=sparse, + create_parent=create_parent, + properties=properties, + cloned_from=cloned_from, + ) + + def bookmark_present(name, snapshot): ''' ensure bookmark exists @@ -580,41 +648,55 @@ def bookmark_present(name, snapshot): snapshot : string name of snapshot + .. versionchanged:: Flourine ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} + ## log configuration log.debug('zfs.bookmark_present::%s::config::snapshot = %s', name, snapshot) - if '@' not in snapshot: + ## check we have valid snapshot/bookmark name + if not __utils__['zfs.is_snapshot'](snapshot): ret['result'] = False - ret['comment'] = '{0} is not a snapshot'.format(snapshot) + ret['comment'] = 'invalid snapshot name: {0}'.format(name) return ret - if '#' not in name: - if '/' not in name: - name = '{0}#{1}'.format(snapshot[:snapshot.index('@')], name) - else: - ret['result'] = False - ret['comment'] = '{0} is not a bookmark'.format(name) - return ret + if '#' not in name and '/' not in name: + ## NOTE: simple snapshot name + # take the snapshot name and replace the snapshot but with the simple name + # e.g. pool/fs@snap + bm --> pool/fs#bm + name = '{0}#{1}'.format(snapshot[:snapshot.index('@')], name) + ret['name'] = name - if __salt__['zfs.exists'](name, **{'type': 'bookmark'}): - ret['comment'] = 'bookmark already exists' - else: # create bookmark - result = {snapshot: 'bookmarked'} + if not __utils__['zfs.is_bookmark'](name): + ret['result'] = False + ret['comment'] = 'invalid bookmark name: {0}'.format(name) + return ret + + ## ensure bookmark exists + if not __salt__['zfs.exists'](name, **{'type': 'bookmark'}): + ## NOTE: bookmark the snapshot if not __opts__['test']: - result = __salt__['zfs.bookmark'](snapshot, name) - - ret['result'] = snapshot in result and result[snapshot].startswith('bookmarked') - if ret['result']: - ret['changes'] = result - ret['comment'] = 'snapshot {0} was bookmarked as {1}'.format(snapshot, name) + mod_res = __salt__['zfs.bookmark'](snapshot, name) else: - ret['comment'] = 'failed to create bookmark {0}'.format(name) + mod_res = OrderedDict([('bookmarked', True)]) + + ret['result'] = mod_res['bookmarked'] + if ret['result']: + ret['changes'][name] = snapshot + ret['comment'] = '{0} bookmarked as {1}'.format(snapshot, name) + else: + ret['comment'] = 'failed to bookmark {0}'.format(snapshot) + if 'error' in mod_res: + ret['comment'] = mod_res['error'] + else: + ## NOTE: bookmark already exists + ret['comment'] = 'bookmark is present' + return ret @@ -629,6 +711,8 @@ def snapshot_present(name, recursive=False, properties=None): properties : dict additional zfs properties (-o) + .. versionchanged:: Flourine + .. note: Properties are only set at creation time @@ -638,38 +722,43 @@ def snapshot_present(name, recursive=False, properties=None): 'result': True, 'comment': ''} - # check params - if not properties: - properties = {} - + ## log configuration log.debug('zfs.snapshot_present::%s::config::recursive = %s', name, recursive) log.debug('zfs.snapshot_present::%s::config::properties = %s', name, properties) - for prop in properties: - properties[prop] = _conform_value(properties[prop], True) + ## ensure properties are zfs values + if properties: + properties = __utils__['zfs.from_auto_dict'](properties) - if '@' not in name: + ## check we have valid snapshot name + if not __utils__['zfs.is_snapshot'](name): ret['result'] = False ret['comment'] = 'invalid snapshot name: {0}'.format(name) return ret - if __salt__['zfs.exists'](name, **{'type': 'snapshot'}): # we are all good - ret['comment'] = 'snapshot already exists' - else: # create snapshot - result = {name: 'snapshotted'} + ## ensure snapshot exits + if not __salt__['zfs.exists'](name, **{'type': 'snapshot'}): + ## NOTE: create the snapshot if not __opts__['test']: - result = __salt__['zfs.snapshot'](name, **{'recursive': recursive, 'properties': properties}) + mod_res = __salt__['zfs.snapshot'](name, **{'recursive': recursive, 'properties': properties}) + else: + mod_res = OrderedDict([('snapshotted', True)]) - ret['result'] = name in result and result[name] == 'snapshotted' + ret['result'] = mod_res['snapshotted'] if ret['result']: - ret['changes'][name] = properties if len(properties) > 0 else result[name] + ret['changes'][name] = 'snapshotted' + if properties: + ret['changes'][name] = properties ret['comment'] = 'snapshot {0} was created'.format(name) else: ret['comment'] = 'failed to create snapshot {0}'.format(name) - if name in result: - ret['comment'] = result[name] + if 'error' in mod_res: + ret['comment'] = mod_res['error'] + else: + ## NOTE: snapshot already exists + ret['comment'] = 'snapshot is present' return ret @@ -686,45 +775,147 @@ def promoted(name): only one dataset can be the origin, if you promote a clone the original will now point to the promoted dataset + .. versionchanged:: Flourine ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} - if '@' in name or '#' in name: + ## check we if we have a valid dataset name + if not __utils__['zfs.is_dataset'](name): ret['result'] = False - ret['comment'] = 'invalid filesystem or volume name: {0}'.format(name) + ret['comment'] = 'invalid dataset name: {0}'.format(name) return ret - if __salt__['zfs.exists'](name): - origin = '-' - if not __opts__['test']: - origin = __salt__['zfs.get'](name, **{'properties': 'origin', 'fields': 'value', 'parsable': True})[name]['origin']['value'] - - if origin == '-': - ret['comment'] = '{0} already promoted'.format(name) - else: - result = {name: 'promoted'} - if not __opts__['test']: - result = __salt__['zfs.promote'](name) - - ret['result'] = name in result and result[name] == 'promoted' - ret['changes'] = result if ret['result'] else {} - if ret['result']: - ret['comment'] = '{0} was promoted'.format(name) - else: - ret['comment'] = 'failed to promote {0}'.format(name) - if name in result: - ret['comment'] = result[name] - - else: # we don't have the dataset + ## ensure dataset is the primary instance + if not __salt__['zfs.exists'](name, **{'type': 'filesystem,volume'}): + ## NOTE: we don't have a dataset ret['result'] = False ret['comment'] = 'dataset {0} does not exist'.format(name) + else: + ## NOTE: check if we have a blank origin (-) + if __salt__['zfs.get'](name, **{'properties': 'origin', 'fields': 'value', 'parsable': True})[name]['origin']['value'] == '-': + ## NOTE: we're already promoted + ret['comment'] = '{0} already promoted'.format(name) + else: + ## NOTE: promote dataset + if not __opts__['test']: + mod_res = __salt__['zfs.promote'](name) + else: + mod_res = OrderedDict([('promoted', True)]) + + ret['result'] = mod_res['promoted'] + if ret['result']: + ret['changes'][name] = 'promoted' + ret['comment'] = '{0} promoted'.format(name) + else: + ret['comment'] = 'failed to promote {0}'.format(name) + if 'error' in mod_res: + ret['comment'] = mod_res['error'] return ret +def _schedule_snapshot_retrieve(dataset, prefix, snapshots): + ''' + Update snapshots dict with current snapshots + + dataset: string + name of filesystem or volume + prefix : string + prefix for the snapshots + e.g. 'test' will result in snapshots being named 'test-yyyymmdd_hhmm' + snapshots : OrderedDict + preseeded OrderedDict with configuration + + ''' + ## NOTE: retrieve all snapshots for the dataset + for snap in sorted(__salt__['zfs.list'](dataset, **{'recursive': True, 'depth': 1, 'type': 'snapshot'}).keys()): + ## NOTE: we only want the actualy name + ## myzpool/data@zbck-20171201_000248 -> zbck-20171201_000248 + snap_name = snap[snap.index('@')+1:] + + ## NOTE: we only want snapshots matching our prefix + if not snap_name.startswith('{0}-'.format(prefix)): + continue + + ## NOTE: retrieve the holds for this snapshot + snap_holds = __salt__['zfs.holds'](snap) + + ## NOTE: this snapshot has no holds, eligable for pruning + if not snap_holds: + snapshots['_prunable'].append(snap) + + ## NOTE: update snapshots based on holds (if any) + ## we are only interested in the ones from our schedule + ## if we find any others we skip them + for hold in snap_holds: + if hold in snapshots['_schedule'].keys(): + snapshots[hold].append(snap) + + return snapshots + + +def _schedule_snapshot_prepare(dataset, prefix, snapshots): + ''' + Update snapshots dict with info for a new snapshot + + dataset: string + name of filesystem or volume + prefix : string + prefix for the snapshots + e.g. 'test' will result in snapshots being named 'test-yyyymmdd_hhmm' + snapshots : OrderedDict + preseeded OrderedDict with configuration + + ''' + ## NOTE: generate new snapshot name + snapshot_create_name = '{dataset}@{prefix}-{timestamp}'.format( + dataset=dataset, + prefix=prefix, + timestamp=datetime.now().strftime('%Y%m%d_%H%M%S') + ) + + ## NOTE: figure out if we need to create the snapshot + timestamp_now = datetime.now().replace(second=0, microsecond=0) + snapshots['_create'][snapshot_create_name] = [] + for hold, hold_count in snapshots['_schedule'].items(): + ## NOTE: skip hold if we don't keep snapshots for it + if hold_count == 0: + continue + + ## NOTE: figure out if we need the current hold on the new snapshot + if len(snapshots[hold]) > 0: + ## NOTE: extract datetime from snapshot name + timestamp = datetime.strptime( + snapshots[hold][-1], + '{0}@{1}-%Y%m%d_%H%M%S'.format(dataset, prefix), + ).replace(second=0, microsecond=0) + + ## NOTE: compare current timestamp to timestamp from snapshot + if hold == 'minute' and \ + timestamp_now <= timestamp: + continue + elif hold == 'hour' and \ + timestamp_now.replace(**comp_hour) <= timestamp.replace(**comp_hour): + continue + elif hold == 'day' and \ + timestamp_now.replace(**comp_day) <= timestamp.replace(**comp_day): + continue + elif hold == 'month' and \ + timestamp_now.replace(**comp_month) <= timestamp.replace(**comp_month): + continue + elif hold == 'year' and \ + timestamp_now.replace(**comp_year) <= timestamp.replace(**comp_year): + continue + + ## NOTE: add hold entry for snapshot + snapshots['_create'][snapshot_create_name].append(hold) + + return snapshots + + def scheduled_snapshot(name, prefix, recursive=True, schedule=None): ''' maintain a set of snapshots based on a schedule @@ -733,7 +924,7 @@ def scheduled_snapshot(name, prefix, recursive=True, schedule=None): name of filesystem or volume prefix : string prefix for the snapshots - e.g. 'test' will result in snapshots being named 'test-YYYYMMDD_HHMM' + e.g. 'test' will result in snapshots being named 'test-yyyymmdd_hhmm' recursive : boolean create snapshots for all children also schedule : dict @@ -751,187 +942,155 @@ def scheduled_snapshot(name, prefix, recursive=True, schedule=None): switched to localtime from gmtime so times now take into account timezones. + .. versionchanged:: Flourine ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} - ## parse parameters - # update default schedule - state_schedule = schedule if schedule else {} - schedule = { - 'minute': 0, - 'hour': 0, - 'day': 0, - 'month': 0, - 'year': 0, - } - for hold in state_schedule: - if hold not in schedule: - del state_schedule[hold] - schedule.update(state_schedule) - # check name - if not __salt__['zfs.exists'](name, **{'type': 'filesystem'}) and not __salt__['zfs.exists'](name, **{'type': 'volume'}): - ret['comment'] = '{0} is not a filesystem or a volume or does not exist'.format(name) + ## initialize defaults + schedule_holds = ['minute', 'hour', 'day', 'month', 'year'] + snapshots = OrderedDict([ + ('_create', OrderedDict()), + ('_prunable', []), + ('_schedule', OrderedDict()), + ]) + + ## strict configuration validation + ## NOTE: we need a valid dataset + if not __utils__['zfs.is_dataset'](name): ret['result'] = False - # check prefix + ret['comment'] = 'invalid dataset name: {0}'.format(name) + + if not __salt__['zfs.exists'](name, **{'type': 'filesystem,volume'}): + ret['comment'] = 'dataset {0} does not exist'.format(name) + ret['result'] = False + + ## NOTE: prefix must be 4 or longer if not prefix or len(prefix) < 4: ret['comment'] = 'prefix ({0}) must be at least 4 long'.format(prefix) ret['result'] = False - # check schedule - snap_count = 0 - for hold in schedule: - if not isinstance(schedule[hold], int): - ret['comment'] = 'schedule values must be integers' - ret['result'] = False - break - snap_count += schedule[hold] - if ret['result'] and snap_count == 0: - ret['comment'] = 'at least one snapshot must be schedule' - ret['result'] = False - # print debug info + ## NOTE: validate schedule + total_count = 0 + for hold in schedule_holds: + snapshots[hold] = [] + if hold not in schedule: + snapshots['_schedule'][hold] = 0 + elif isinstance(schedule[hold], int): + snapshots['_schedule'][hold] = schedule[hold] + else: + ret['result'] = False + ret['comment'] = 'schedule value for {0} is not an integer'.format( + hold, + ) + break + total_count += snapshots['_schedule'][hold] + if ret['result'] and total_count == 0: + ret['result'] = False + ret['comment'] = 'schedule is not valid, you need to keep atleast 1 snapshot' + + ## NOTE: return if configuration is not valid + if not ret['result']: + return ret + + ## retrieve existing snapshots + snapshots = _schedule_snapshot_retrieve(name, prefix, snapshots) + + ## prepare snapshot + snapshots = _schedule_snapshot_prepare(name, prefix, snapshots) + + ## log configuration log.debug('zfs.scheduled_snapshot::%s::config::recursive = %s', name, recursive) log.debug('zfs.scheduled_snapshot::%s::config::prefix = %s', name, prefix) - log.debug('zfs.scheduled_snapshot::%s::config::schedule = %s', - name, schedule) + log.debug('zfs.scheduled_snapshot::%s::snapshots = %s', + name, snapshots) - ## manage snapshots - if ret['result']: - # retreive snapshots - prunable = [] - snapshots = {} - for key in schedule: - snapshots[key] = [] + ## create snapshot(s) + for snapshot_name, snapshot_holds in snapshots['_create'].items(): + ## NOTE: skip if new snapshot has no holds + if not snapshot_holds: + continue - for snap in sorted(__salt__['zfs.list'](name, **{'recursive': True, 'depth': 1, 'type': 'snapshot'}).keys()): - if '@' not in snap: - continue + ## NOTE: create snapshot + if not __opts__['test']: + mod_res = __salt__['zfs.snapshot'](snapshot_name, **{'recursive': recursive}) + else: + mod_res = OrderedDict([('snapshotted', True)]) - snap_name = snap[snap.index('@')+1:] - if snap_name.startswith('{0}-'.format(prefix)): - holds = __salt__['zfs.holds'](snap) - if snap not in holds or holds[snap] == 'no holds': - prunable.append(snap) - continue - for hold in holds[snap]: - hold = hold.strip() - if hold not in snapshots.keys(): - continue - snapshots[hold].append(snap) - log.debug('zfs.scheduled_snapshot::%s::snapshots = %s', - name, snapshots) - - # create snapshot - needed_holds = [] - current_timestamp = localtime() - for hold in snapshots: - # check if we need need to consider hold - if schedule[hold] == 0: - continue - - # check we need a new snapshot for hold - if len(snapshots[hold]) > 0: - snapshots[hold].sort() - timestamp = strptime(snapshots[hold][-1], '{0}@{1}-%Y%m%d_%H%M%S'.format(name, prefix)) - if hold == 'minute': - if current_timestamp.tm_min <= timestamp.tm_min and \ - current_timestamp.tm_hour <= timestamp.tm_hour and \ - current_timestamp.tm_mday <= timestamp.tm_mday and \ - current_timestamp.tm_mon <= timestamp.tm_mon and \ - current_timestamp.tm_year <= timestamp.tm_year: - continue - elif hold == 'hour': - if current_timestamp.tm_hour <= timestamp.tm_hour and \ - current_timestamp.tm_mday <= timestamp.tm_mday and \ - current_timestamp.tm_mon <= timestamp.tm_mon and \ - current_timestamp.tm_year <= timestamp.tm_year: - continue - elif hold == 'day': - if current_timestamp.tm_mday <= timestamp.tm_mday and \ - current_timestamp.tm_mon <= timestamp.tm_mon and \ - current_timestamp.tm_year <= timestamp.tm_year: - continue - elif hold == 'month': - if current_timestamp.tm_mon <= timestamp.tm_mon and \ - current_timestamp.tm_year <= timestamp.tm_year: - continue - elif hold == 'year': - if current_timestamp.tm_year <= timestamp.tm_year: - continue + if not mod_res['snapshotted']: + ret['result'] = False + ret['comment'] = 'error creating snapshot ({0})'.format(snapshot_name) + else: + ## NOTE: create holds (if we have a snapshot) + for hold in snapshot_holds: + if not __opts__['test']: + mod_res = __salt__['zfs.hold'](hold, snapshot_name, **{'recursive': recursive}) else: - log.debug('zfs.scheduled_snapshot::%s::hold_unknown = %s', - name, hold) + mod_res = OrderedDict([('held', True)]) - # mark snapshot for hold as needed - needed_holds.append(hold) - - snap_name = '{prefix}-{timestamp}'.format( - prefix=prefix, - timestamp=strftime('%Y%m%d_%H%M%S') - ) - log.debug('zfs.scheduled_snapshot::%s::needed_holds = %s', - name, needed_holds) - if len(needed_holds) > 0: - snap = '{dataset}@{snapshot}'.format(dataset=name, snapshot=snap_name) - res = __salt__['zfs.snapshot'](snap, **{'recursive': recursive}) - if snap not in res or res[snap] != 'snapshotted': # something went wrong! - ret['comment'] = 'error creating snapshot ({0})'.format(snap) - ret['result'] = False - - for hold in needed_holds: - if not ret['result']: - continue # skip if snapshot failed - res = __salt__['zfs.hold'](hold, snap, **{'recursive': recursive}) - if snap not in res or hold not in res[snap] or res[snap][hold] != 'held': - ret['comment'] = "{0}error adding hold ({1}) to snapshot ({2})".format( - "{0}\n".format(ret['comment']) if not ret['result'] else '', + if not mod_res['held']: + ret['result'] = False + ret['comment'] = "error adding hold ({0}) to snapshot ({1})".format( hold, - snap + snapshot_name, ) - ret['result'] = False - else: # add new snapshot to lists (for pruning) - snapshots[hold].append(snap) + break - if ret['result']: - ret['comment'] = 'scheduled snapshots were updated' - ret['changes']['created'] = [snap] - ret['changes']['pruned'] = [] + snapshots[hold].append(snapshot_name) - # prune snapshots - for hold in schedule: - if hold not in snapshots.keys(): - continue - while len(snapshots[hold]) > schedule[hold]: - # pop oldest snapshot and release hold - snap = snapshots[hold].pop(0) - __salt__['zfs.release'](hold, snap, **{'recursive': recursive}) - # check if snapshot is prunable - holds = __salt__['zfs.holds'](snap) - if snap not in holds or holds[snap] == 'no holds': - prunable.append(snap) + if ret['result']: + ret['comment'] = 'scheduled snapshots updated' + if 'created' not in ret['changes']: + ret['changes']['created'] = [] + ret['changes']['created'].append(snapshot_name) - if len(prunable) > 0: - for snap in prunable: # destroy if hold free - res = __salt__['zfs.destroy'](snap, **{'recursive': recursive}) - if snap not in res or res[snap] != 'destroyed': - ret['comment'] = "{0}error prunding snapshot ({1})".format( - "{0}\n".format(ret['comment']) if not ret['result'] else '', - snap - ) - ret['result'] = False - else: - ret['comment'] = 'scheduled snapshots were updated' - if 'created' not in ret['changes']: - ret['changes']['created'] = [] - if 'pruned' not in ret['changes']: - ret['changes']['pruned'] = [] - ret['changes']['pruned'].append(snap) + ## prune hold(s) + for hold, hold_count in snapshots['_schedule'].items(): + while ret['result'] and len(snapshots[hold]) > hold_count: + ## NOTE: pop oldest snapshot + snapshot_name = snapshots[hold].pop(0) - if ret['result'] and ret['comment'] == '': + ## NOTE: release hold for snapshot + if not __opts__['test']: + mod_res = __salt__['zfs.release'](hold, snapshot_name, **{'recursive': recursive}) + else: + mod_res = OrderedDict([('released', True)]) + + if not mod_res['released']: + ret['result'] = False + ret['comment'] = "error adding hold ({0}) to snapshot ({1})".format( + hold, + snapshot_name, + ) + + ## NOTE: mark as prunable + if not __salt__['zfs.holds'](snapshot_name): + snapshots['_prunable'].append(snapshot_name) + + ## prune snapshot(s) + for snapshot_name in snapshots['_prunable']: + ## NOTE: destroy snapshot + if not __opts__['test']: + mod_res = __salt__['zfs.destroy'](snapshot_name, **{'recursive': recursive}) + else: + mod_res = OrderedDict([('destroyed', True)]) + + if not mod_res['destroyed']: + ret['result'] = False + ret['comment'] = "error prunding snapshot ({1})".format( + snapshot_name, + ) + break + + if ret['result'] and snapshots['_prunable']: + ret['comment'] = 'scheduled snapshots updated' + ret['changes']['pruned'] = snapshots['_prunable'] + + if ret['result'] and not ret['changes']: ret['comment'] = 'scheduled snapshots are up to date' return ret diff --git a/salt/states/zpool.py b/salt/states/zpool.py index baf7fabc87..5512105d0e 100644 --- a/salt/states/zpool.py +++ b/salt/states/zpool.py @@ -266,8 +266,12 @@ def present(name, properties=None, filesystem_properties=None, layout=None, conf ## ensure properties are zfs values if properties: properties = __utils__['zfs.from_auto_dict'](properties) + elif properties is None: + properties = {} if filesystem_properties: filesystem_properties = __utils__['zfs.from_auto_dict'](filesystem_properties) + elif filesystem_properties is None: + filesystem_properties = {} ## parse layout vdevs = _layout_to_vdev(layout, config['device_dir']) @@ -296,8 +300,44 @@ def present(name, properties=None, filesystem_properties=None, layout=None, conf ret['changes'][name] = 'imported' if config['import'] else 'created' ret['comment'] = 'storage pool {0} was {1}'.format(name, ret['changes'][name]) + ## NOTE: update pool + elif __salt__['zpool.exists'](name): + ret['result'] = True + + ## NOTE: fetch current pool properties + properties_current = __salt__['zpool.get'](name, parsable=True) + + ## NOTE: build list of properties to update + properties_update = [] + for prop in properties: + ## NOTE: skip unexisting properties + if prop not in properties_current: + log.warning('zpool.present::%s::update - unknown property: %s', name, prop) + continue + + ## NOTE: compare current and wanted value + if properties_current[prop] != properties[prop]: + properties_update.append(prop) + + ## NOTE: update pool properties + for prop in properties_update: + res = __salt__['zpool.set'](name, prop, properties[prop]) + + if res['set']: + if name not in ret['changes']: + ret['changes'][name] = {} + ret['changes'][name][prop] = properties[prop] + else: + ret['result'] = False + if ret['comment'] == '': + ret['comment'] = 'The following properties were not updated:' + ret['comment'] = '{0} {1}'.format(ret['comment'], prop) + + if ret['result']: + ret['comment'] = 'properties updated' if len(ret['changes']) > 0 else 'no update needed' + ## NOTE: import or create the pool (at least try to anyway) - elif not __salt__['zpool.exists'](name): + else: ## NOTE: import pool if config['import']: mod_res = __salt__['zpool.import']( @@ -336,42 +376,6 @@ def present(name, properties=None, filesystem_properties=None, layout=None, conf if not ret['result'] and not vdevs: ret['comment'] = 'storage pool {0} was not imported, no (valid) layout specified for creation'.format(name) - ## NOTE: update pool - else: - ret['result'] = True - - ## NOTE: fetch current pool properties - properties_current = __salt__['zpool.get'](name, parsable=True) - - ## NOTE: build list of properties to update - properties_update = [] - for prop in properties: - ## NOTE: skip unexisting properties - if prop not in properties_current: - log.warning('zpool.present::%s::update - unknown property: %s', name, prop) - continue - - ## NOTE: compare current and wanted value - if properties_current[prop] != properties[prop]: - properties_update.append(prop) - - ## NOTE: update pool properties - for prop in properties_update: - res = __salt__['zpool.set'](name, prop, properties[prop]) - - if res['set']: - if name not in ret['changes']: - ret['changes'][name] = {} - ret['changes'][name][prop] = properties[prop] - else: - ret['result'] = False - if ret['comment'] == '': - ret['comment'] = 'The following properties were not updated:' - ret['comment'] = '{0} {1}'.format(ret['comment'], prop) - - if ret['result']: - ret['comment'] = 'properties updated' if len(ret['changes']) > 0 else 'no update needed' - return ret diff --git a/salt/utils/zfs.py b/salt/utils/zfs.py index 3b5c158e22..24bf323968 100644 --- a/salt/utils/zfs.py +++ b/salt/utils/zfs.py @@ -23,7 +23,7 @@ from numbers import Number # Import salt libs from salt.utils.decorators import memoize as real_memoize from salt.utils.odict import OrderedDict -import salt.utils.stringutils +from salt.utils.stringutils import to_num as str_to_num import salt.modules.cmdmod # Import 3rd-party libs @@ -415,7 +415,7 @@ def from_numeric(value): if value == 'none': value = None elif value: - value = salt.utils.stringutils.to_num(value) + value = str_to_num(value) return value From 01da93ec1f88054dd914978b5d0b246838ad7205 Mon Sep 17 00:00:00 2001 From: Jorge Schrauwen Date: Sat, 17 Feb 2018 15:52:27 +0100 Subject: [PATCH 216/223] Phase 3 - Minor cleanups --- salt/modules/zfs.py | 35 +++++++++----- salt/modules/zpool.py | 104 ++++++++++++++++++++---------------------- salt/states/zfs.py | 24 ++-------- salt/states/zpool.py | 4 +- salt/utils/zfs.py | 8 ++-- 5 files changed, 84 insertions(+), 91 deletions(-) diff --git a/salt/modules/zfs.py b/salt/modules/zfs.py index 8129d0aab5..8b8e5ba52c 100644 --- a/salt/modules/zfs.py +++ b/salt/modules/zfs.py @@ -8,6 +8,10 @@ Module for running ZFS command :depends: salt.utils.zfs :platform: illumos,freebsd,linux +.. versionchanged:: Fluorine + Big refactor to remove duplicate code, better type converions and improved + consistancy in output. + ''' from __future__ import absolute_import, unicode_literals, print_function @@ -51,7 +55,6 @@ def exists(name, **kwargs): filesystem, snapshot, volume, bookmark, or all. .. versionadded:: 2015.5.0 - .. versionchanged:: Fluorine CLI Example: @@ -59,6 +62,7 @@ def exists(name, **kwargs): salt '*' zfs.exists myzpool/mydataset salt '*' zfs.exists myzpool/myvolume type=volume + ''' ## Configure command # NOTE: initialize the defaults @@ -107,7 +111,6 @@ def create(name, **kwargs): properties="{'property1': 'value1', 'property2': 'value2'}" .. versionadded:: 2015.5.0 - .. versionchanged:: Fluorine CLI Example: @@ -168,13 +171,13 @@ def destroy(name, **kwargs): watch out when using recursive and recursive_all .. versionadded:: 2015.5.0 - .. versionchanged:: Fluorine CLI Example: .. code-block:: bash salt '*' zfs.destroy myzpool/mydataset [force=True|False] + ''' ## Configure command # NOTE: initialize the defaults @@ -220,13 +223,13 @@ def rename(name, new_name, **kwargs): snapshots are the only dataset that can be renamed recursively. .. versionadded:: 2015.5.0 - .. versionchanged:: Fluorine CLI Example: .. code-block:: bash salt '*' zfs.rename myzpool/mydataset myzpool/renameddataset + ''' ## Configure command # NOTE: initialize the defaults @@ -291,7 +294,6 @@ def list_(name=None, **kwargs): .. versionadded:: Oxygen .. versionadded:: 2015.5.0 - .. versionchanged:: Fluorine CLI Example: @@ -300,6 +302,7 @@ def list_(name=None, **kwargs): salt '*' zfs.list salt '*' zfs.list myzpool/mydataset [recursive=True|False] salt '*' zfs.list myzpool/mydataset properties="sharenfs,mountpoint" + ''' ret = OrderedDict() @@ -378,6 +381,7 @@ def list_mount(): .. code-block:: bash salt '*' zfs.list_mount + ''' ## List mounted filesystem res = __salt__['cmd.run_all']( @@ -423,6 +427,7 @@ def mount(name=None, **kwargs): salt '*' zfs.mount salt '*' zfs.mount myzpool/mydataset salt '*' zfs.mount myzpool/mydataset options=ro + ''' ## Configure command # NOTE: initialize the defaults @@ -480,6 +485,7 @@ def unmount(name, **kwargs): .. code-block:: bash salt '*' zfs.unmount myzpool/mydataset [force=True|False] + ''' ## Configure command # NOTE: initialize the defaults @@ -523,13 +529,13 @@ def inherit(prop, name, **kwargs): operate as if the -S option was not specified. .. versionadded:: 2016.3.0 - .. versionchanged:: Fluorine CLI Example: .. code-block:: bash salt '*' zfs.inherit canmount myzpool/mydataset [recursive=True|False] + ''' ## Configure command # NOTE: initialize the defaults @@ -573,13 +579,13 @@ def diff(name_a, name_b=None, **kwargs): if true we don't parse the timestamp to a more readable date (default = True) .. versionadded:: 2016.3.0 - .. versionchanged:: Fluorine CLI Example: .. code-block:: bash salt '*' zfs.diff myzpool/mydataset@yesterday myzpool/mydataset + ''' ## Configure command # NOTE: initialize the defaults @@ -649,13 +655,13 @@ def rollback(name, **kwargs): must be destroyed by specifying the -r option. .. versionadded:: 2016.3.0 - .. versionchanged:: Fluorine CLI Example: .. code-block:: bash salt '*' zfs.rollback myzpool/mydataset@yesterday + ''' ## Configure command # NOTE: initialize the defaults @@ -708,13 +714,13 @@ def clone(name_a, name_b, **kwargs): properties="{'property1': 'value1', 'property2': 'value2'}" .. versionadded:: 2016.3.0 - .. versionchanged:: Fluorine CLI Example: .. code-block:: bash salt '*' zfs.clone myzpool/mydataset@yesterday myzpool/mydataset_yesterday + ''' ## Configure command # NOTE: initialize the defaults @@ -776,6 +782,7 @@ def promote(name): .. code-block:: bash salt '*' zfs.promote myzpool/myclone + ''' ## Promote clone res = __salt__['cmd.run_all']( @@ -813,6 +820,7 @@ def bookmark(snapshot, bookmark): .. code-block:: bash salt '*' zfs.bookmark myzpool/mydataset@yesterday myzpool/mydataset#complete + ''' # abort if we do not have feature flags if not __utils__['zfs.has_feature_flags'](): @@ -854,6 +862,7 @@ def holds(snapshot, **kwargs): .. code-block:: bash salt '*' zfs.holds myzpool/mydataset@baseline + ''' ## Configure command # NOTE: initialize the defaults @@ -923,6 +932,7 @@ def hold(tag, *snapshot, **kwargs): salt '*' zfs.hold mytag myzpool/mydataset@mysnapshot [recursive=True] salt '*' zfs.hold mytag myzpool/mydataset@mysnapshot myzpool/mydataset@myothersnapshot + ''' ## warn about tag change # NOTE: remove me 2 versions after Flourine @@ -988,6 +998,7 @@ def release(tag, *snapshot, **kwargs): salt '*' zfs.release mytag myzpool/mydataset@mysnapshot [recursive=True] salt '*' zfs.release mytag myzpool/mydataset@mysnapshot myzpool/mydataset@myothersnapshot + ''' ## warn about tag change # NOTE: remove me 2 versions after Flourine @@ -1040,7 +1051,6 @@ def snapshot(*snapshot, **kwargs): properties="{'property1': 'value1', 'property2': 'value2'}" .. versionadded:: 2016.3.0 - .. versionchanged:: Flourine CLI Example: @@ -1048,6 +1058,7 @@ def snapshot(*snapshot, **kwargs): salt '*' zfs.snapshot myzpool/mydataset@yesterday [recursive=True] salt '*' zfs.snapshot myzpool/mydataset@yesterday myzpool/myotherdataset@yesterday [recursive=True] + ''' ## Configure command # NOTE: initialize the defaults @@ -1101,7 +1112,6 @@ def set(*dataset, **kwargs): megabytes, gigabytes, terabytes, petabytes, or exabytes respectively). .. versionadded:: 2016.3.0 - .. versionchanged:: Flourine CLI Example: @@ -1110,6 +1120,7 @@ def set(*dataset, **kwargs): salt '*' zfs.set myzpool/mydataset compression=off salt '*' zfs.set myzpool/mydataset myzpool/myotherdataset compression=off salt '*' zfs.set myzpool/mydataset myzpool/myotherdataset compression=lz4 canmount=off + ''' ## Configure command # NOTE: push filesystem properties @@ -1159,7 +1170,6 @@ def get(*dataset, **kwargs): for all datasets on the system. .. versionadded:: 2016.3.0 - .. versionchanged:: Flourine CLI Example: @@ -1169,6 +1179,7 @@ def get(*dataset, **kwargs): salt '*' zfs.get myzpool/mydataset [recursive=True|False] salt '*' zfs.get myzpool/mydataset properties="sharenfs,mountpoint" [recursive=True|False] salt '*' zfs.get myzpool/mydataset myzpool/myotherdataset properties=available fields=value depth=1 + ''' ## Configure command # NOTE: initialize the defaults diff --git a/salt/modules/zpool.py b/salt/modules/zpool.py index f7d1467588..d056b442b8 100644 --- a/salt/modules/zpool.py +++ b/salt/modules/zpool.py @@ -7,6 +7,11 @@ Module for running ZFS zpool command :maturity: new :depends: salt.utils.zfs :platform: illumos,freebsd,linux + +.. versionchanged:: Fluorine + Big refactor to remove duplicate code, better type converions and improved + consistancy in output. + ''' from __future__ import absolute_import, print_function, unicode_literals @@ -100,7 +105,6 @@ def status(zpool=None): optional name of storage pool .. versionadded:: 2016.3.0 - .. versionchanged:: Fluorine CLI Example: @@ -251,6 +255,7 @@ def iostat(zpool=None, sample_time=5, parsable=True): .. code-block:: bash salt '*' zpool.iostat myzpool + ''' ret = OrderedDict() @@ -372,6 +377,7 @@ def list_(properties='size,alloc,free,cap,frag,health', zpool=None, parsable=Tru salt '*' zpool.list zpool=tank salt '*' zpool.list 'size,free' salt '*' zpool.list 'size,free' tank + ''' ret = OrderedDict() @@ -454,6 +460,7 @@ def get(zpool, prop=None, show_source=False, parsable=True): .. code-block:: bash salt '*' zpool.get myzpool + ''' ret = OrderedDict() value_properties = ['property', 'value', 'source'] @@ -525,6 +532,7 @@ def set(zpool, prop, value): .. code-block:: bash salt '*' zpool.set myzpool readonly yes + ''' ret = OrderedDict() @@ -554,6 +562,7 @@ def exists(zpool): .. code-block:: bash salt '*' zpool.exists myzpool + ''' # list for zpool # NOTE: retcode > 0 if zpool does not exists @@ -578,13 +587,12 @@ def destroy(zpool, force=False): force : boolean force destroy of pool - .. versionchanged:: 2016.3.0 - CLI Example: .. code-block:: bash salt '*' zpool.destroy myzpool + ''' # destroy zpool res = __salt__['cmd.run_all']( @@ -617,13 +625,12 @@ def scrub(zpool, stop=False, pause=False): Pause support was added in this PR: https://github.com/openzfs/openzfs/pull/407 - .. versionchanged:: 2016.3.0 - CLI Example: .. code-block:: bash salt '*' zpool.scrub myzpool + ''' ## select correct action if stop: @@ -677,17 +684,6 @@ def create(zpool, *vdevs, **kwargs): create a boot partition .. versionadded:: 2015.5.0 - .. versionchanged:: Fluorine - - CLI Example: - - .. code-block:: bash - - salt '*' zpool.create myzpool /path/to/vdev1 [...] [force=True|False] - salt '*' zpool.create myzpool mirror /path/to/vdev1 /path/to/vdev2 [...] [force=True|False] - salt '*' zpool.create myzpool raidz1 /path/to/vdev1 /path/to/vdev2 raidz2 /path/to/vdev3 /path/to/vdev4 /path/to/vdev5 [...] [force=True|False] - salt '*' zpool.create myzpool mirror /path/to/vdev1 [...] mirror /path/to/vdev2 /path/to/vdev3 [...] [force=True|False] - salt '*' zpool.create myhybridzpool mirror /tmp/file1 [...] log mirror /path/to/vdev1 [...] cache /path/to/vdev2 [...] spare /path/to/vdev3 [...] [force=True|False] .. note:: @@ -708,6 +704,17 @@ def create(zpool, *vdevs, **kwargs): .. code-block:: bash salt '*' zpool.create myzpool /path/to/vdev1 [...] properties="{'property1': 'value1', 'property2': 'value2'}" + + CLI Example: + + .. code-block:: bash + + salt '*' zpool.create myzpool /path/to/vdev1 [...] [force=True|False] + salt '*' zpool.create myzpool mirror /path/to/vdev1 /path/to/vdev2 [...] [force=True|False] + salt '*' zpool.create myzpool raidz1 /path/to/vdev1 /path/to/vdev2 raidz2 /path/to/vdev3 /path/to/vdev4 /path/to/vdev5 [...] [force=True|False] + salt '*' zpool.create myzpool mirror /path/to/vdev1 [...] mirror /path/to/vdev2 /path/to/vdev3 [...] [force=True|False] + salt '*' zpool.create myhybridzpool mirror /tmp/file1 [...] log mirror /path/to/vdev1 [...] cache /path/to/vdev2 [...] spare /path/to/vdev3 [...] [force=True|False] + ''' ## Configure pool # NOTE: initialize the defaults @@ -767,13 +774,12 @@ def add(zpool, *vdevs, **kwargs): force : boolean forces use of device - .. versionchanged:: Fluorine - CLI Example: .. code-block:: bash salt '*' zpool.add myzpool /path/to/vdev1 /path/to/vdev2 [...] + ''' ## Configure pool # NOTE: initialize the defaults @@ -821,14 +827,12 @@ def attach(zpool, device, new_device, force=False): force : boolean forces use of device - .. versionchanged:: 2016.3.0 - .. versionchanged:: Fluorine - CLI Example: .. code-block:: bash salt '*' zpool.attach myzpool /path/to/vdev1 /path/to/vdev2 [...] + ''' ## Configure pool # NOTE: initialize the defaults @@ -873,13 +877,12 @@ def detach(zpool, device): device : string device to detach - .. versionchanged:: Fluorine - CLI Example: .. code-block:: bash salt '*' zpool.detach myzpool /path/to/vdev1 + ''' ## Update storage pool res = __salt__['cmd.run_all']( @@ -923,14 +926,6 @@ def split(zpool, newzpool, **kwargs): additional pool properties for newzpool .. versionadded:: Oxygen - .. versionchanged:: Fluorine - - CLI Example: - - .. code-block:: bash - - salt '*' zpool.split datamirror databackup - salt '*' zpool.split datamirror databackup altroot=/backup .. note:: @@ -945,6 +940,14 @@ def split(zpool, newzpool, **kwargs): .. code-block:: bash salt '*' zpool.split datamirror databackup properties="{'readonly': 'on'}" + + CLI Example: + + .. code-block:: bash + + salt '*' zpool.split datamirror databackup + salt '*' zpool.split datamirror databackup altroot=/backup + ''' ## Configure pool # NOTE: initialize the defaults @@ -991,13 +994,12 @@ def replace(zpool, old_device, new_device=None, force=False): force : boolean Forces use of new_device, even if its appears to be in use. - .. versionchanged:: Fluorine - CLI Example: .. code-block:: bash salt '*' zpool.replace myzpool /path/to/vdev1 /path/to/vdev2 + ''' ## Configure pool # NOTE: initialize the defaults @@ -1041,8 +1043,6 @@ def create_file_vdev(size, *vdevs): ``*vdevs`` is a list of full paths for mkfile to create - .. versionchanged:: Fluorine - CLI Example: .. code-block:: bash @@ -1052,6 +1052,7 @@ def create_file_vdev(size, *vdevs): .. note:: Depending on file size, the above command may take a while to return. + ''' ret = OrderedDict() err = OrderedDict() @@ -1091,7 +1092,6 @@ def export(*pools, **kwargs): force export of storage pools .. versionadded:: 2015.5.0 - .. versionchanged:: Fluorine CLI Example: @@ -1099,6 +1099,7 @@ def export(*pools, **kwargs): salt '*' zpool.export myzpool ... [force=True|False] salt '*' zpool.export myzpool2 myzpool2 ... [force=True|False] + ''' ## Configure pool # NOTE: initialize the defaults @@ -1170,7 +1171,6 @@ def import_(zpool=None, new_name=None, **kwargs): properties="{'property1': 'value1', 'property2': 'value2'}" .. versionadded:: 2015.5.0 - .. versionchanged:: Fluorine CLI Example: @@ -1179,6 +1179,7 @@ def import_(zpool=None, new_name=None, **kwargs): salt '*' zpool.import [force=True|False] salt '*' zpool.import myzpool [mynewzpool] [force=True|False] salt '*' zpool.import myzpool dir='/tmp' + ''' ## Configure pool # NOTE: initialize the defaults @@ -1251,7 +1252,6 @@ def online(zpool, *vdevs, **kwargs): expanded before the new space will become available to the pool. .. versionadded:: 2015.5.0 - .. versionchanged:: Fluorine CLI Example: @@ -1315,13 +1315,13 @@ def offline(zpool, *vdevs, **kwargs): enable temporarily offline .. versionadded:: 2015.5.0 - .. versionchanged:: Fluorine CLI Example: .. code-block:: bash salt '*' zpool.offline myzpool /path/to/vdev1 [...] [temporary=True|False] + ''' ## Configure pool # NOTE: initialize the defaults @@ -1353,23 +1353,19 @@ def labelclear(device, force=False): ''' Removes ZFS label information from the specified device - .. warning:: - - The device must not be part of an active pool configuration. - device : string - device + device, must not be part of an active pool configuration. force : boolean treat exported or foreign devices as inactive .. versionadded:: Oxygen - .. versionchanged:: Fluorine CLI Example: .. code-block:: bash salt '*' zpool.labelclear /path/to/dev + ''' ## clear label for all specified device res = __salt__['cmd.run_all']( @@ -1405,6 +1401,7 @@ def clear(zpool, device=None): salt '*' zpool.clear mypool salt '*' zpool.clear mypool /path/to/dev + ''' ## Configure pool # NOTE: initialize the defaults @@ -1438,7 +1435,6 @@ def reguid(zpool): name of storage pool .. versionadded:: 2016.3.0 - .. versionchanged:: Fluorine CLI Example: @@ -1466,13 +1462,13 @@ def reopen(zpool): name of storage pool .. versionadded:: 2016.3.0 - .. versionchanged:: Fluorine CLI Example: .. code-block:: bash salt '*' zpool.reopen myzpool + ''' ## reopen all devices fro pool res = __salt__['cmd.run_all']( @@ -1490,24 +1486,24 @@ def upgrade(zpool=None, version=None): ''' Enables all supported features on the given pool - .. warning:: - Once this is done, the pool will no longer be accessible on systems that do not - support feature flags. See zpool-features(5) for details on compatibility with - systems that support feature flags, but do not support all features enabled on the pool. - zpool : string optional storage pool, applies to all otherwize version : int version to upgrade to, if unspecified upgrade to the highest possible .. versionadded:: 2016.3.0 - .. versionchanged:: Fluorine + + .. warning:: + Once this is done, the pool will no longer be accessible on systems that do not + support feature flags. See zpool-features(5) for details on compatibility with + systems that support feature flags, but do not support all features enabled on the pool. CLI Example: .. code-block:: bash salt '*' zpool.upgrade myzpool + ''' ## Configure pool # NOTE: initialize the defaults @@ -1546,13 +1542,13 @@ def history(zpool=None, internal=False, verbose=False): toggle display of the user name, the hostname, and the zone in which the operation was performed .. versionadded:: 2016.3.0 - .. versionchanged:: Fluorine CLI Example: .. code-block:: bash salt '*' zpool.upgrade myzpool + ''' ret = OrderedDict() diff --git a/salt/states/zfs.py b/salt/states/zfs.py index d03365c3ed..cafe364b28 100644 --- a/salt/states/zfs.py +++ b/salt/states/zfs.py @@ -9,6 +9,8 @@ States for managing zfs datasets .. versionadded:: 2016.3.0 .. versionchanged:: Flourine + Big refactor to remove duplicate code, better type converions and improved + consistancy in output. .. code-block:: yaml @@ -85,7 +87,6 @@ def _absent(name, dataset_type, force=False, recursive=False): recursive : boolean also destroy all the child datasets - .. versionchanged:: Flourine ''' ret = {'name': name, 'changes': {}, @@ -142,8 +143,6 @@ def filesystem_absent(name, force=False, recursive=False): recursive : boolean also destroy all the child datasets (zfs destroy -r) - .. versionchanged:: Flourine - .. warning:: If a volume with ``name`` exists, this state will succeed without @@ -171,8 +170,6 @@ def volume_absent(name, force=False, recursive=False): recursive : boolean also destroy all the child datasets (zfs destroy -r) - .. versionchanged:: Flourine - .. warning:: If a filesystem with ``name`` exists, this state will succeed without @@ -200,7 +197,6 @@ def snapshot_absent(name, force=False, recursive=False): recursive : boolean also destroy all the child datasets (zfs destroy -r) - .. versionchanged:: Flourine ''' if not __utils__['zfs.is_snapshot'](name): ret = {'name': name, @@ -223,7 +219,6 @@ def bookmark_absent(name, force=False, recursive=False): recursive : boolean also destroy all the child datasets (zfs destroy -r) - .. versionchanged:: Flourine ''' if not __utils__['zfs.is_bookmark'](name): ret = {'name': name, @@ -246,7 +241,6 @@ def hold_absent(name, snapshot, recursive=False): recursive : boolean recursively releases a hold with the given tag on the snapshots of all descendent file systems. - .. versionchanged:: Flourine ''' ret = {'name': name, 'changes': {}, @@ -317,7 +311,6 @@ def hold_present(name, snapshot, recursive=False): recursive : boolean recursively add hold with the given tag on the snapshots of all descendent file systems. - .. versionchanged:: Flourine ''' ret = {'name': name, 'changes': {}, @@ -400,8 +393,6 @@ def _dataset_present(dataset_type, name, volume_size=None, sparse=False, create_ properties : dict additional zfs properties (-o) - .. versionchanged:: Flourine - .. note:: ``cloned_from`` is only use if the volume does not exist yet, when ``cloned_from`` is set after the volume exists it will be ignored. @@ -573,8 +564,6 @@ def filesystem_present(name, create_parent=False, properties=None, cloned_from=N properties : dict additional zfs properties (-o) - .. versionchanged:: Flourine - .. note:: ``cloned_from`` is only use if the filesystem does not exist yet, when ``cloned_from`` is set after the filesystem exists it will be ignored. @@ -611,8 +600,6 @@ def volume_present(name, volume_size, sparse=False, create_parent=False, propert properties : dict additional zfs properties (-o) - .. versionchanged:: Flourine - .. note:: ``cloned_from`` is only use if the volume does not exist yet, when ``cloned_from`` is set after the volume exists it will be ignored. @@ -648,7 +635,6 @@ def bookmark_present(name, snapshot): snapshot : string name of snapshot - .. versionchanged:: Flourine ''' ret = {'name': name, 'changes': {}, @@ -711,8 +697,6 @@ def snapshot_present(name, recursive=False, properties=None): properties : dict additional zfs properties (-o) - .. versionchanged:: Flourine - .. note: Properties are only set at creation time @@ -775,7 +759,6 @@ def promoted(name): only one dataset can be the origin, if you promote a clone the original will now point to the promoted dataset - .. versionchanged:: Flourine ''' ret = {'name': name, 'changes': {}, @@ -886,7 +869,7 @@ def _schedule_snapshot_prepare(dataset, prefix, snapshots): continue ## NOTE: figure out if we need the current hold on the new snapshot - if len(snapshots[hold]) > 0: + if snapshots[hold]: ## NOTE: extract datetime from snapshot name timestamp = datetime.strptime( snapshots[hold][-1], @@ -942,7 +925,6 @@ def scheduled_snapshot(name, prefix, recursive=True, schedule=None): switched to localtime from gmtime so times now take into account timezones. - .. versionchanged:: Flourine ''' ret = {'name': name, 'changes': {}, diff --git a/salt/states/zpool.py b/salt/states/zpool.py index 5512105d0e..00bc0e7f75 100644 --- a/salt/states/zpool.py +++ b/salt/states/zpool.py @@ -9,6 +9,8 @@ States for managing zpools .. versionadded:: 2016.3.0 .. versionchanged:: Flourine + Big refactor to remove duplicate code, better type converions and improved + consistancy in output. .. code-block:: yaml @@ -334,7 +336,7 @@ def present(name, properties=None, filesystem_properties=None, layout=None, conf ret['comment'] = '{0} {1}'.format(ret['comment'], prop) if ret['result']: - ret['comment'] = 'properties updated' if len(ret['changes']) > 0 else 'no update needed' + ret['comment'] = 'properties updated' if ret['changes'] else 'no update needed' ## NOTE: import or create the pool (at least try to anyway) else: diff --git a/salt/utils/zfs.py b/salt/utils/zfs.py index 24bf323968..e383366b8a 100644 --- a/salt/utils/zfs.py +++ b/salt/utils/zfs.py @@ -10,6 +10,7 @@ These functions are for dealing with type conversion and basic execution :platform: illumos,freebsd,linux .. versionadded:: Fluorine + ''' # Import python libs @@ -126,14 +127,14 @@ def _property_parse_cmd(cmd, alias=None): prop_data = prop_data.lower().split() # NOTE: skip empty lines - if len(prop_data) == 0: + if not prop_data: continue # NOTE: parse header elif prop_data[0] == 'property': prop_hdr = prop_data continue # NOTE: skip lines after data - elif len(prop_hdr) == 0 or prop_data[1] not in ['no', 'yes']: + elif not prop_hdr or prop_data[1] not in ['no', 'yes']: continue # NOTE: create property dict @@ -580,6 +581,7 @@ def from_auto_dict(values, source='auto'): .. note:: The key will be passed as the name + ''' for name, value in values.items(): values[name] = from_auto(name, value, source) @@ -702,7 +704,7 @@ def parse_command_result(res, label=None): error = error.replace('-r', 'recursive=True') ret['error'].append(error) - if len(ret['error']): + if ret['error']: ret['error'] = "\n".join(ret['error']) else: del ret['error'] From dd29778ed5337919b2601a04a5d1504cf9bf7f21 Mon Sep 17 00:00:00 2001 From: Jorge Schrauwen Date: Sat, 17 Feb 2018 17:16:55 +0100 Subject: [PATCH 217/223] Phase 3 - Cleanup tests --- tests/unit/modules/test_zfs.py | 93 ++++++++++++++++---------------- tests/unit/modules/test_zpool.py | 81 ++++++++++++++-------------- 2 files changed, 87 insertions(+), 87 deletions(-) diff --git a/tests/unit/modules/test_zfs.py b/tests/unit/modules/test_zfs.py index 62ba14fe82..898fd66f69 100644 --- a/tests/unit/modules/test_zfs.py +++ b/tests/unit/modules/test_zfs.py @@ -104,7 +104,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.create('myzpool/mydataset'), res) + self.assertEqual(res, zfs.create('myzpool/mydataset')) def test_create_success_with_create_parent(self): ''' @@ -118,7 +118,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.create('myzpool/mydataset/mysubdataset', create_parent=True), res) + self.assertEqual(res, zfs.create('myzpool/mydataset/mysubdataset', create_parent=True)) def test_create_success_with_properties(self): ''' @@ -133,13 +133,14 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): self.assertEqual( + res, zfs.create( 'myzpool/mydataset', properties={ 'mountpoint': '/export/zfs', 'sharenfs': 'on' } - ), res + ), ) def test_create_error_missing_dataset(self): @@ -157,7 +158,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.create('myzpool'), res) + self.assertEqual(res, zfs.create('myzpool')) def test_create_error_trailing_slash(self): ''' @@ -174,7 +175,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.create('myzpool/'), res) + self.assertEqual(res, zfs.create('myzpool/')) def test_create_error_no_such_pool(self): ''' @@ -191,7 +192,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.create('myzpool/mydataset'), res) + self.assertEqual(res, zfs.create('myzpool/mydataset')) def test_create_error_missing_parent(self): ''' @@ -208,7 +209,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.create('myzpool/mydataset/mysubdataset'), res) + self.assertEqual(res, zfs.create('myzpool/mydataset/mysubdataset')) def test_destroy_success(self): ''' @@ -222,7 +223,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.destroy('myzpool/mydataset'), res) + self.assertEqual(res, zfs.destroy('myzpool/mydataset')) def test_destroy_error_not_exists(self): ''' @@ -239,7 +240,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.destroy('myzpool/mydataset'), res) + self.assertEqual(res, zfs.destroy('myzpool/mydataset')) def test_destroy_error_has_children(self): ''' @@ -264,7 +265,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.destroy('myzpool/mydataset'), res) + self.assertEqual(res, zfs.destroy('myzpool/mydataset')) def test_rename_success(self): ''' @@ -278,7 +279,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.rename('myzpool/mydataset', 'myzpool/newdataset'), res) + self.assertEqual(res, zfs.rename('myzpool/mydataset', 'myzpool/newdataset')) def test_rename_error_not_exists(self): ''' @@ -295,7 +296,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.rename('myzpool/mydataset', 'myzpool/newdataset'), res) + self.assertEqual(res, zfs.rename('myzpool/mydataset', 'myzpool/newdataset')) def test_list_success(self): ''' @@ -316,7 +317,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.list_('myzpool'), res) + self.assertEqual(res, zfs.list_('myzpool')) def test_list_parsable_success(self): ''' @@ -337,7 +338,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.list_('myzpool', parsable=False), res) + self.assertEqual(res, zfs.list_('myzpool', parsable=False)) def test_list_custom_success(self): ''' @@ -358,7 +359,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.list_('myzpool', properties='canmount,used,avail,compression'), res) + self.assertEqual(res, zfs.list_('myzpool', properties='canmount,used,avail,compression')) def test_list_custom_parsable_success(self): ''' @@ -379,7 +380,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.list_('myzpool', properties='canmount,used,avail,compression', parsable=False), res) + self.assertEqual(res, zfs.list_('myzpool', properties='canmount,used,avail,compression', parsable=False)) def test_list_error_no_dataset(self): ''' @@ -393,7 +394,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.list_('myzpool'), res) + self.assertEqual(res, zfs.list_('myzpool')) def test_list_mount_success(self): ''' @@ -413,7 +414,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.list_mount(), res) + self.assertEqual(res, zfs.list_mount()) def test_mount_success(self): ''' @@ -427,7 +428,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.mount('myzpool/mydataset'), res) + self.assertEqual(res, zfs.mount('myzpool/mydataset')) def test_mount_failure(self): ''' @@ -441,7 +442,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.mount('myzpool/mydataset'), res) + self.assertEqual(res, zfs.mount('myzpool/mydataset')) def test_unmount_success(self): ''' @@ -455,7 +456,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.unmount('myzpool/mydataset'), res) + self.assertEqual(res, zfs.unmount('myzpool/mydataset')) def test_unmount_failure(self): ''' @@ -472,7 +473,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.unmount('myzpool/mydataset'), res) + self.assertEqual(res, zfs.unmount('myzpool/mydataset')) def test_inherit_success(self): ''' @@ -483,7 +484,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.inherit('compression', 'myzpool/mydataset'), res) + self.assertEqual(res, zfs.inherit('compression', 'myzpool/mydataset')) def test_inherit_failure(self): ''' @@ -497,7 +498,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.inherit('canmount', 'myzpool/mydataset'), res) + self.assertEqual(res, zfs.inherit('canmount', 'myzpool/mydataset')) def test_diff(self): ''' @@ -519,7 +520,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.diff('myzpool/mydataset@yesterday', 'myzpool/mydataset'), res) + self.assertEqual(res, zfs.diff('myzpool/mydataset@yesterday', 'myzpool/mydataset')) def test_diff_parsed_time(self): ''' @@ -543,7 +544,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.diff('myzpool/data@yesterday', 'myzpool/data', parsable=False), res) + self.assertEqual(res, zfs.diff('myzpool/data@yesterday', 'myzpool/data', parsable=False)) def test_rollback_success(self): ''' @@ -554,7 +555,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.rollback('myzpool/mydataset@yesterday'), res) + self.assertEqual(res, zfs.rollback('myzpool/mydataset@yesterday')) def test_rollback_failure(self): ''' @@ -580,7 +581,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.rollback('myzpool/mydataset@yesterday'), res) + self.assertEqual(res, zfs.rollback('myzpool/mydataset@yesterday')) def test_clone_success(self): ''' @@ -591,7 +592,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.clone('myzpool/mydataset@yesterday', 'myzpool/yesterday'), res) + self.assertEqual(res, zfs.clone('myzpool/mydataset@yesterday', 'myzpool/yesterday')) def test_clone_failure(self): ''' @@ -605,7 +606,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.clone('myzpool/mydataset@yesterday', 'myzpool/archive/yesterday'), res) + self.assertEqual(res, zfs.clone('myzpool/mydataset@yesterday', 'myzpool/archive/yesterday')) def test_promote_success(self): ''' @@ -616,7 +617,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.promote('myzpool/yesterday'), res) + self.assertEqual(res, zfs.promote('myzpool/yesterday')) def test_promote_failure(self): ''' @@ -630,7 +631,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.promote('myzpool/yesterday'), res) + self.assertEqual(res, zfs.promote('myzpool/yesterday')) def test_bookmark_success(self): ''' @@ -642,7 +643,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.bookmark('myzpool/mydataset@yesterday', 'myzpool/mydataset#important'), res) + self.assertEqual(res, zfs.bookmark('myzpool/mydataset@yesterday', 'myzpool/mydataset#important')) def test_holds_success(self): ''' @@ -656,7 +657,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.holds('myzpool/mydataset@baseline'), res) + self.assertEqual(res, zfs.holds('myzpool/mydataset@baseline')) def test_holds_failure(self): ''' @@ -669,7 +670,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.holds('myzpool/mydataset@baseline'), res) + self.assertEqual(res, zfs.holds('myzpool/mydataset@baseline')) def test_hold_success(self): ''' @@ -680,7 +681,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.hold('important', 'myzpool/mydataset@baseline', 'myzpool/mydataset@release-1.0'), res) + self.assertEqual(res, zfs.hold('important', 'myzpool/mydataset@baseline', 'myzpool/mydataset@release-1.0')) def test_hold_failure(self): ''' @@ -694,7 +695,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.hold('important', 'myzpool/mydataset@baseline'), res) + self.assertEqual(res, zfs.hold('important', 'myzpool/mydataset@baseline')) def test_release_success(self): ''' @@ -705,7 +706,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.release('important', 'myzpool/mydataset@baseline', 'myzpool/mydataset@release-1.0'), res) + self.assertEqual(res, zfs.release('important', 'myzpool/mydataset@baseline', 'myzpool/mydataset@release-1.0')) def test_release_failure(self): ''' @@ -719,7 +720,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.release('important', 'myzpool/mydataset@baseline'), res) + self.assertEqual(res, zfs.release('important', 'myzpool/mydataset@baseline')) def test_snapshot_success(self): ''' @@ -730,7 +731,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.snapshot('myzpool/mydataset@baseline'), res) + self.assertEqual(res, zfs.snapshot('myzpool/mydataset@baseline')) def test_snapshot_failure(self): ''' @@ -744,7 +745,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.snapshot('myzpool/mydataset@baseline'), res) + self.assertEqual(res, zfs.snapshot('myzpool/mydataset@baseline')) def test_snapshot_failure2(self): ''' @@ -758,7 +759,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.snapshot('myzpool/mydataset@baseline'), res) + self.assertEqual(res, zfs.snapshot('myzpool/mydataset@baseline')) def test_set_success(self): ''' @@ -769,7 +770,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.set('myzpool/mydataset', compression='lz4'), res) + self.assertEqual(res, zfs.set('myzpool/mydataset', compression='lz4')) def test_set_failure(self): ''' @@ -783,7 +784,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.set('myzpool/mydataset', canmount='lz4'), res) + self.assertEqual(res, zfs.set('myzpool/mydataset', canmount='lz4')) def test_get_success(self): ''' @@ -800,7 +801,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.get('myzpool', properties='used', fields='value'), res) + self.assertEqual(res, zfs.get('myzpool', properties='used', fields='value')) def test_get_parsable_success(self): ''' @@ -817,4 +818,4 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): mock_cmd = MagicMock(return_value=ret) with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zfs.__utils__, utils_patch): - self.assertEqual(zfs.get('myzpool', properties='used', fields='value', parsable=False), res) + self.assertEqual(res, zfs.get('myzpool', properties='used', fields='value', parsable=False)) diff --git a/tests/unit/modules/test_zpool.py b/tests/unit/modules/test_zpool.py index cfb16d4f6b..47d4746438 100644 --- a/tests/unit/modules/test_zpool.py +++ b/tests/unit/modules/test_zpool.py @@ -144,7 +144,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zpool.__utils__, utils_patch): ret = zpool.iostat('mypool', parsable=False) - self.assertEqual(ret['mypool']['capacity-alloc'], '46.7G') + self.assertEqual('46.7G', ret['mypool']['capacity-alloc']) def test_iostat_parsable(self): ''' @@ -172,7 +172,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \ patch.dict(zpool.__utils__, utils_patch): ret = zpool.iostat('mypool', parsable=True) - self.assertEqual(ret['mypool']['capacity-alloc'], 50143743180) + self.assertEqual(50143743180, ret['mypool']['capacity-alloc']) def test_list(self): ''' @@ -194,7 +194,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): ('frag', '0%'), ('health', 'ONLINE'), ]))]) - self.assertEqual(res, ret) + self.assertEqual(ret, res) def test_list_parsable(self): ''' @@ -216,7 +216,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): ('frag', '0%'), ('health', 'ONLINE'), ]))]) - self.assertEqual(res, ret) + self.assertEqual(ret, res) def test_get(self): ''' @@ -231,7 +231,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): patch.dict(zpool.__utils__, utils_patch): ret = zpool.get('mypool', 'size', parsable=False) res = OrderedDict(OrderedDict([('size', '1.81T')])) - self.assertEqual(res, ret) + self.assertEqual(ret, res) def test_get_parsable(self): ''' @@ -246,7 +246,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): patch.dict(zpool.__utils__, utils_patch): ret = zpool.get('mypool', 'size', parsable=True) res = OrderedDict(OrderedDict([('size', 1992864825344)])) - self.assertEqual(res, ret) + self.assertEqual(ret, res) def test_get_whitespace(self): ''' @@ -261,7 +261,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): patch.dict(zpool.__utils__, utils_patch): ret = zpool.get('mypool', 'comment') res = OrderedDict(OrderedDict([('comment', "my testing pool")])) - self.assertEqual(res, ret) + self.assertEqual(ret, res) def test_scrub_start(self): ''' @@ -279,7 +279,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): patch.dict(zpool.__utils__, utils_patch): ret = zpool.scrub('mypool') res = OrderedDict(OrderedDict([('scrubbing', True)])) - self.assertEqual(res, ret) + self.assertEqual(ret, res) def test_scrub_pause(self): ''' @@ -297,7 +297,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): patch.dict(zpool.__utils__, utils_patch): ret = zpool.scrub('mypool', pause=True) res = OrderedDict(OrderedDict([('scrubbing', False)])) - self.assertEqual(res, ret) + self.assertEqual(ret, res) def test_scrub_stop(self): ''' @@ -315,7 +315,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): patch.dict(zpool.__utils__, utils_patch): ret = zpool.scrub('mypool', stop=True) res = OrderedDict(OrderedDict([('scrubbing', False)])) - self.assertEqual(res, ret) + self.assertEqual(ret, res) def test_split_success(self): ''' @@ -331,7 +331,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): patch.dict(zpool.__utils__, utils_patch): ret = zpool.split('datapool', 'backuppool') res = OrderedDict([('split', True)]) - self.assertEqual(res, ret) + self.assertEqual(ret, res) def test_split_exist_new(self): ''' @@ -347,7 +347,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): patch.dict(zpool.__utils__, utils_patch): ret = zpool.split('datapool', 'backuppool') res = OrderedDict([('split', False), ('error', 'Unable to split datapool: pool already exists')]) - self.assertEqual(res, ret) + self.assertEqual(ret, res) def test_split_missing_pool(self): ''' @@ -363,7 +363,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): patch.dict(zpool.__utils__, utils_patch): ret = zpool.split('datapool', 'backuppool') res = OrderedDict([('split', False), ('error', "cannot open 'datapool': no such pool")]) - self.assertEqual(res, ret) + self.assertEqual(ret, res) def test_split_not_mirror(self): ''' @@ -379,7 +379,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): patch.dict(zpool.__utils__, utils_patch): ret = zpool.split('datapool', 'backuppool') res = OrderedDict([('split', False), ('error', 'Unable to split datapool: Source pool must be composed only of mirrors')]) - self.assertEqual(res, ret) + self.assertEqual(ret, res) def test_labelclear_success(self): ''' @@ -395,7 +395,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): patch.dict(zpool.__utils__, utils_patch): ret = zpool.labelclear('/dev/rdsk/c0t0d0', force=False) res = OrderedDict([('labelcleared', True)]) - self.assertEqual(res, ret) + self.assertEqual(ret, res) def test_labelclear_nodevice(self): ''' @@ -414,7 +414,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): ('labelcleared', False), ('error', 'failed to open /dev/rdsk/c0t0d0: No such file or directory'), ]) - self.assertEqual(res, ret) + self.assertEqual(ret, res) def test_labelclear_cleared(self): ''' @@ -433,7 +433,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): ('labelcleared', False), ('error', 'failed to read label from /dev/rdsk/c0t0d0'), ]) - self.assertEqual(res, ret) + self.assertEqual(ret, res) def test_labelclear_exported(self): ''' @@ -454,7 +454,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): ('labelcleared', False), ('error', 'use \'force=True\' to override the following error:\n/dev/rdsk/c0t0d0 is a member of exported pool "mypool"'), ]) - self.assertEqual(res, ret) + self.assertEqual(ret, res) @skipIf(not salt.utils.path.which('mkfile'), 'Cannot find mkfile executable') def test_create_file_vdev_success(self): @@ -473,7 +473,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): res = OrderedDict([ ('/vdisks/disk0', 'created'), ]) - self.assertEqual(res, ret) + self.assertEqual(ret, res) @skipIf(not salt.utils.path.which('mkfile'), 'Cannot find mkfile executable') def test_create_file_vdev_nospace(self): @@ -495,7 +495,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): ('/vdisks/disk0', ' initialized 10424320 of 67108864 bytes: No space left on device'), ])), ]) - self.assertEqual(res, ret) + self.assertEqual(ret, res) def test_export_success(self): ''' @@ -511,7 +511,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): patch.dict(zpool.__utils__, utils_patch): ret = zpool.export('mypool') res = OrderedDict([('exported', True)]) - self.assertEqual(res, ret) + self.assertEqual(ret, res) def test_export_nopool(self): ''' @@ -527,7 +527,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): patch.dict(zpool.__utils__, utils_patch): ret = zpool.export('mypool') res = OrderedDict([('exported', False), ('error', "cannot open 'mypool': no such pool")]) - self.assertEqual(res, ret) + self.assertEqual(ret, res) def test_import_success(self): ''' @@ -543,7 +543,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): patch.dict(zpool.__utils__, utils_patch): ret = zpool.import_('mypool') res = OrderedDict([('imported', True)]) - self.assertEqual(res, ret) + self.assertEqual(ret, res) def test_import_duplicate(self): ''' @@ -565,7 +565,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): ('imported', False), ('error', "cannot import 'mypool': a pool with that name already exists\nuse the form 'zpool import ' to give it a new name"), ]) - self.assertEqual(res, ret) + self.assertEqual(ret, res) def test_import_nopool(self): ''' @@ -584,7 +584,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): ('imported', False), ('error', "cannot import 'mypool': no such pool available"), ]) - self.assertEqual(res, ret) + self.assertEqual(ret, res) def test_online_success(self): ''' @@ -600,7 +600,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): patch.dict(zpool.__utils__, utils_patch): ret = zpool.online('mypool', '/dev/rdsk/c0t0d0') res = OrderedDict([('onlined', True)]) - self.assertEqual(res, ret) + self.assertEqual(ret, res) def test_online_nodevice(self): ''' @@ -619,7 +619,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): ('onlined', False), ('error', 'cannot online /dev/rdsk/c0t0d1: no such device in pool'), ]) - self.assertEqual(res, ret) + self.assertEqual(ret, res) def test_offline_success(self): ''' @@ -635,7 +635,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): patch.dict(zpool.__utils__, utils_patch): ret = zpool.offline('mypool', '/dev/rdsk/c0t0d0') res = OrderedDict([('offlined', True)]) - self.assertEqual(res, ret) + self.assertEqual(ret, res) def test_offline_nodevice(self): ''' @@ -654,7 +654,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): ('offlined', False), ('error', 'cannot offline /dev/rdsk/c0t0d1: no such device in pool'), ]) - self.assertEqual(res, ret) + self.assertEqual(ret, res) def test_offline_noreplica(self): ''' @@ -673,7 +673,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): ('offlined', False), ('error', 'cannot offline /dev/rdsk/c0t0d1: no valid replicas'), ]) - self.assertEqual(res, ret) + self.assertEqual(ret, res) def test_reguid_success(self): ''' @@ -689,7 +689,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): patch.dict(zpool.__utils__, utils_patch): ret = zpool.reguid('mypool') res = OrderedDict([('reguided', True)]) - self.assertEqual(res, ret) + self.assertEqual(ret, res) def test_reguid_nopool(self): ''' @@ -708,7 +708,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): ('reguided', False), ('error', "cannot open 'mypool': no such pool"), ]) - self.assertEqual(res, ret) + self.assertEqual(ret, res) def test_reopen_success(self): ''' @@ -724,7 +724,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): patch.dict(zpool.__utils__, utils_patch): ret = zpool.reopen('mypool') res = OrderedDict([('reopened', True)]) - self.assertEqual(res, ret) + self.assertEqual(ret, res) def test_reopen_nopool(self): ''' @@ -743,7 +743,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): ('reopened', False), ('error', "cannot open 'mypool': no such pool"), ]) - self.assertEqual(res, ret) + self.assertEqual(ret, res) def test_upgrade_success(self): ''' @@ -759,7 +759,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): patch.dict(zpool.__utils__, utils_patch): ret = zpool.upgrade('mypool') res = OrderedDict([('upgraded', True)]) - self.assertEqual(res, ret) + self.assertEqual(ret, res) def test_upgrade_nopool(self): ''' @@ -778,7 +778,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): ('upgraded', False), ('error', "cannot open 'mypool': no such pool"), ]) - self.assertEqual(res, ret) + self.assertEqual(ret, res) def test_history_success(self): ''' @@ -803,7 +803,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): ('2018-01-19.16:01:55', 'zpool attach -f mypool /dev/rdsk/c0t0d0 /dev/rdsk/c0t0d1'), ])), ]) - self.assertEqual(res, ret) + self.assertEqual(ret, res) def test_history_nopool(self): ''' @@ -821,7 +821,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): res = OrderedDict([ ('error', "cannot open 'mypool': no such pool"), ]) - self.assertEqual(res, ret) + self.assertEqual(ret, res) def test_clear_success(self): ''' @@ -837,7 +837,7 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): patch.dict(zpool.__utils__, utils_patch): ret = zpool.clear('mypool') res = OrderedDict([('cleared', True)]) - self.assertEqual(res, ret) + self.assertEqual(ret, res) def test_clear_nopool(self): ''' @@ -874,5 +874,4 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin): ('cleared', False), ('error', "cannot clear errors for /dev/rdsk/c0t0d0: no such device in pool"), ]) - self.assertEqual(res, ret) - self.assertEqual(res, ret) + self.assertEqual(ret, res) From b0403be0fa919065222575f78fa7ac208d444da7 Mon Sep 17 00:00:00 2001 From: Simon Dodsley Date: Mon, 12 Feb 2018 18:14:58 -0500 Subject: [PATCH 218/223] Add module for Pure Storage FlashBlade array --- salt/modules/purefb.py | 508 ++++++++++++++++++++++++++++++ tests/unit/modules/test_purefb.py | 78 +++++ 2 files changed, 586 insertions(+) create mode 100644 salt/modules/purefb.py create mode 100644 tests/unit/modules/test_purefb.py diff --git a/salt/modules/purefb.py b/salt/modules/purefb.py new file mode 100644 index 0000000000..ed9d08f67c --- /dev/null +++ b/salt/modules/purefb.py @@ -0,0 +1,508 @@ +# -*- coding: utf-8 -*- + +## +# Copyright 2018 Pure Storage Inc +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an 'AS IS' BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +''' + +Management of Pure Storage FlashBlade + +Installation Prerequisites +-------------------------- +- You will need the ``purity_fb`` python package in your python installation + path that is running salt. + + .. code-block:: bash + + pip install purity_fb + +- Configure Pure Storage FlashBlade authentication. Use one of the following + three methods. + + 1) From the minion config + + .. code-block:: yaml + + pure_tags: + fb: + san_ip: management vip or hostname for the FlashBlade + api_token: A valid api token for the FlashBlade being managed + + 2) From environment (PUREFB_IP and PUREFB_API) + 3) From the pillar (PUREFB_IP and PUREFB_API) + +:maintainer: Simon Dodsley (simon@purestorage.com) +:maturity: new +:requires: purestorage +:platform: all + +.. versionadded:: Flourine + +''' + +# Import Python libs +from __future__ import absolute_import, print_function, unicode_literals +import os +from datetime import datetime + +# Import Salt libs +from salt.ext import six +from salt.exceptions import CommandExecutionError + +# Import 3rd party modules +try: + from purity_fb import PurityFb, FileSystem, FileSystemSnapshot, SnapshotSuffix + from purity_fb import rest, NfsRule, ProtocolRule + HAS_PURITY_FB = True +except ImportError: + HAS_PURITY_FB = False + +__docformat__ = 'restructuredtext en' + +__virtualname__ = 'purefb' + + +def __virtual__(): + ''' + Determine whether or not to load this module + ''' + if HAS_PURITY_FB: + return __virtualname__ + return (False, 'purefb execution module not loaded: purity_fb python library not available.') + + +def _get_blade(): + ''' + Get Pure Storage FlasBlade configuration + + 1) From the minion config + pure_tags: + fb: + san_ip: management vip or hostname for the FlashBlade + api_token: A valid api token for the FlashBlade being managed + 2) From environment (PUREFB_IP and PUREFB_API) + 3) From the pillar (PUREFB_IP and PUREFB_API) + + ''' + + try: + blade_name = __opts__['pure_tags']['fb'].get('san_ip') + api_token = __opts__['pure_tags']['fb'].get('api_token') + if blade_name and api: + blade = PurityFb(blade_name) + blade.disable_verify_ssl() + except (KeyError, NameError, TypeError): + try: + blade_name = os.environ.get('PUREFB_IP') + api_token = os.environ.get('PUREFB_API') + if blade_name: + blade = PurityFb(blade_name) + blade.disable_verify_ssl() + except (ValueError, KeyError, NameError): + try: + api_token = __pillar__['PUREFB_API'] + blade = PurityFb(__pillar__['PUREFB_IP']) + blade.disable_verify_ssl() + except (KeyError, NameError): + raise CommandExecutionError('No Pure Storage FlashBlade credentials found.') + try: + blade.login(api_token) + except Exception: + raise CommandExecutionError('Pure Storage FlashBlade authentication failed.') + return blade + + +def _get_fs(name, blade): + ''' + Private function to + check for existance of a filesystem + ''' + _fs = [] + _fs.append(name) + try: + res = blade.file_systems.list_file_systems(names=_fs) + return res.items[0] + except rest.ApiException: + return None + + +def _get_snapshot(name, suffix, blade): + ''' + Return name of Snapshot + or None + ''' + try: + filt = 'source=\'{}\' and suffix=\'{}\''.format(name, suffix) + res = blade.file_system_snapshots.list_file_system_snapshots(filter=filt) + return res.items[0] + except rest.ApiException: + return None + + +def _get_deleted_fs(name, blade): + ''' + Private function to check + if a file systeem has already been deleted + ''' + try: + _fs = _get_fs(name, blade) + if _fs and _fs.destroyed: + return _fs + except rest.ApiException: + return None + + +def snap_create(name, suffix=None): + ''' + + Create a filesystem snapshot on a Pure Storage FlashBlade. + + Will return False if filesystem selected to snap does not exist. + + .. versionadded:: Flourine + + name : string + name of filesystem to snapshot + suffix : string + if specificed forces snapshot name suffix. If not specified defaults to timestamp. + + CLI Example: + + .. code-block:: bash + + salt '*' purefb.snap_create foo + salt '*' purefb.snap_create foo suffix=bar + + ''' + blade = _get_blade() + if suffix is None: + suffix = ('snap-' + + six.text_type((datetime.utcnow() - + datetime(1970, 1, 1, 0, 0, 0, 0)).total_seconds())) + suffix = suffix.replace('.', '') + if _get_fs(name, blade) is not None: + try: + source = [] + source.append(name) + blade.file_system_snapshots.create_file_system_snapshots(sources=source, + suffix=SnapshotSuffix(suffix)) + return True + except rest.ApiException: + return False + else: + return False + + +def snap_delete(name, suffix=None, eradicate=False): + ''' + + Delete a filesystem snapshot on a Pure Storage FlashBlade. + + Will return False if selected snapshot does not exist. + + .. versionadded:: Flourine + + name : string + name of filesystem + suffix : string + name of snapshot + eradicate : boolean + Eradicate snapshot after deletion if True. Default is False + + CLI Example: + + .. code-block:: bash + + salt '*' purefb.snap_delete foo suffix=snap eradicate=True + + ''' + blade = _get_blade() + if _get_snapshot(name, suffix, blade) is not None: + try: + snapname = name + '.' + suffix + new_attr = FileSystemSnapshot(destroyed=True) + blade.file_system_snapshots.update_file_system_snapshots(name=snapname, + attributes=new_attr) + except rest.ApiException: + return False + if eradicate is True: + try: + blade.file_system_snapshots.delete_file_system_snapshots(name=snapname) + return True + except rest.ApiException: + return False + else: + return True + else: + return False + + +def snap_eradicate(name, suffix=None): + ''' + + Eradicate a deleted filesystem snapshot on a Pure Storage FlashBlade. + + Will return False if snapshot is not in a deleted state. + + .. versionadded:: Flourine + + name : string + name of filesystem + suffix : string + name of snapshot + + CLI Example: + + .. code-block:: bash + + salt '*' purefb.snap_eradicate foo suffix=snap + + ''' + blade = _get_blade() + if _get_snapshot(name, suffix, blade) is not None: + snapname = name + '.' + suffix + try: + blade.file_system_snapshots.delete_file_system_snapshots(name=snapname) + return True + except rest.ApiException: + return False + else: + return False + + +def fs_create(name, size=None, proto='NFS', nfs_rules='*(rw,no_root_squash)', snapshot=False): + ''' + + Create a filesystem on a Pure Storage FlashBlade. + + Will return False if filesystem already exists. + + .. versionadded:: Flourine + + name : string + name of filesystem (truncated to 63 characters) + proto : string + (Optional) Sharing protocol (NFS, CIFS or HTTP). If not specified default is NFS + snapshot: boolean + (Optional) Are snapshots enabled on the filesystem. Default is False + nfs_rules : string + (Optional) export rules for NFS. If not specified default is *(rw,no_root_squash) + Refer to Pure Storage documentation for formatting rules. + size : string + if specified capacity of filesystem. If not specified default to 32G. + Refer to Pure Storage documentation for formatting rules. + + CLI Example: + + .. code-block:: bash + + salt '*' purefb.fs_create foo proto=CIFS + salt '*' purefb.fs_create foo size=10T + + ''' + if len(name) > 63: + name = name[0:63] + blade = _get_blade() + print(proto) + if _get_fs(name, blade) is None: + if size is None: + size = __utils__['stringutils.human_to_bytes']('32G') + else: + size = __utils__['stringutils.human_to_bytes'](size) + if proto.lower() == 'nfs': + fs_obj = FileSystem(name=name, + provisioned=size, + fast_remove_directory_enabled=True, + snapshot_directory_enabled=snapshot, + nfs=NfsRule(enabled=True, rules=nfs_rules), + ) + elif proto.lower() == 'cifs': + fs_obj = FileSystem(name=name, + provisioned=size, + fast_remove_directory_enabled=True, + snapshot_directory_enabled=snapshot, + smb=ProtocolRule(enabled=True), + ) + elif proto.lower() == 'http': + fs_obj = FileSystem(name=name, + provisioned=size, + fast_remove_directory_enabled=True, + snapshot_directory_enabled=snapshot, + http=ProtocolRule(enabled=True), + ) + else: + return False + try: + blade.file_systems.create_file_systems(fs_obj) + return True + except rest.ApiException: + return False + else: + return False + + +def fs_delete(name, eradicate=False): + ''' + + Delete a share on a Pure Storage FlashBlade. + + Will return False if filesystem doesn't exist or is already in a deleted state. + + .. versionadded:: Flourine + + name : string + name of filesystem + eradicate : boolean + (Optional) Eradicate filesystem after deletion if True. Default is False + + CLI Example: + + .. code-block:: bash + + salt '*' purefb.fs_delete foo eradicate=True + + ''' + blade = _get_blade() + if _get_fs(name, blade) is not None: + try: + blade.file_systems.update_file_systems(name=name, + attributes=FileSystem(nfs=NfsRule(enabled=False), + smb=ProtocolRule(enabled=False), + http=ProtocolRule(enabled=False), + destroyed=True) + ) + except rest.ApiException: + return False + if eradicate is True: + try: + blade.file_systems.delete_file_systems(name) + return True + except rest.ApiException: + return False + else: + return True + else: + return False + + +def fs_eradicate(name): + ''' + + Eradicate a deleted filesystem on a Pure Storage FlashBlade. + + Will return False is filesystem is not in a deleted state. + + .. versionadded:: Flourine + + name : string + name of filesystem + + CLI Example: + + .. code-block:: bash + + salt '*' purefb.fs_eradicate foo + + ''' + blade = _get_blade() + if _get_deleted_fs(name, blade) is not None: + try: + blade.file_systems.delete_file_systems(name) + return True + except rest.ApiException: + return False + else: + return False + + +def fs_extend(name, size): + ''' + + Resize an existing filesystem on a Pure Storage FlashBlade. + + Will return False if new size is less than or equal to existing size. + + .. versionadded:: Flourine + + name : string + name of filesystem + size : string + New capacity of filesystem. + Refer to Pure Storage documentation for formatting rules. + + CLI Example: + + .. code-block:: bash + + salt '*' purefb.fs_extend foo 10T + + ''' + attr = {} + blade = _get_blade() + _fs = _get_fs(name, blade) + if _fs is not None: + if __utils__['stringutils.human_to_bytes'](size) > _fs.provisioned: + try: + attr['provisioned'] = __utils__['stringutils.human_to_bytes'](size) + n_attr = FileSystem(**attr) + blade.file_systems.update_file_systems(name=name, attributes=n_attr) + return True + except rest.ApiException: + return False + else: + return False + else: + return False + + +def fs_update(name, rules, snapshot=False): + ''' + + Update filesystem on a Pure Storage FlashBlade. + + Allows for change of NFS export rules and enabling/disabled + of snapshotting capability. + + .. versionadded:: Flourine + + name : string + name of filesystem + rules : string + NFS export rules for filesystem + Refer to Pure Storage documentation for formatting rules. + snapshot: boolean + (Optional) Enable/Disable snapshots on the filesystem. Default is False + + CLI Example: + + .. code-block:: bash + + salt '*' purefb.fs_nfs_update foo rules='10.234.112.23(ro), 10.234.112.24(rw)' snapshot=True + + ''' + blade = _get_blade() + attr = {} + _fs = _get_fs(name, blade) + if _fs is not None: + try: + if _fs.nfs.enabled: + attr['nfs'] = NfsRule(rules=rules) + attr['snapshot_directory_enabled'] = snapshot + n_attr = FileSystem(**attr) + blade.file_systems.update_file_systems(name=name, attributes=n_attr) + return True + except rest.ApiException: + return False + else: + return False diff --git a/tests/unit/modules/test_purefb.py b/tests/unit/modules/test_purefb.py new file mode 100644 index 0000000000..e3d2a4e958 --- /dev/null +++ b/tests/unit/modules/test_purefb.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- +''' + :codeauthor: :email:`Simon Dodsley ` +''' + +# Import Python Libs +from __future__ import absolute_import, print_function, unicode_literals + +# Import Salt Testing Libs +from tests.support.mixins import LoaderModuleMockMixin +from tests.support.unit import TestCase, skipIf +from tests.support.mock import ( + patch, + NO_MOCK, + NO_MOCK_REASON +) + +# Import Salt Libs +import salt.modules.purefb as purefb + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class PureFBTestCase(TestCase, LoaderModuleMockMixin): + ''' + Test cases for salt.modules.purefb + ''' + def setup_loader_modules(self): + return {purefb: {}} + + def test_fs_create(self): + ''' + Test for creation of a filesystem + ''' + with patch.object(purefb, 'fs_create', return_value=True): + self.assertEqual(purefb.fs_create('test'), True) + + def test_fs_delete(self): + ''' + Test for deletion of a filesystem + ''' + with patch.object(purefb, 'fs_delete', return_value=True): + self.assertEqual(purefb.fs_delete('test'), True) + + def test_fs_eradicate(self): + ''' + Test for eradication of a filesystem + ''' + with patch.object(purefb, 'fs_eradicate', return_value=True): + self.assertEqual(purefb.fs_eradicate('test'), True) + + def test_fs_extend(self): + ''' + Test for size extention of a filesystem + ''' + with patch.object(purefb, 'fs_extend', return_value=True): + self.assertEqual(purefb.fs_extend('test', '33G'), True) + + def test_snap_create(self): + ''' + Test for creation of a filesystem snapshot + ''' + with patch.object(purefb, 'snap_create', return_value=True): + self.assertEqual(purefb.snap_create('test', suffix='suffix'), True) + + def test_snap_delete(self): + ''' + Test for deletion of a filesystem snapshot + ''' + with patch.object(purefb, 'snap_delete', return_value=True): + self.assertEqual(purefb.snap_delete('test', suffix='suffix'), True) + + def test_snap_eradicate(self): + ''' + Test for eradication of a deleted filesystem snapshot + ''' + with patch.object(purefb, 'snap_eradicate', return_value=True): + self.assertEqual(purefb.snap_eradicate('test', + suffix='suffix'), True) From 8c7b3e530c3e2ae285dcabdbf180ecb5231613e1 Mon Sep 17 00:00:00 2001 From: Jorge Schrauwen Date: Sat, 24 Feb 2018 13:26:14 +0100 Subject: [PATCH 219/223] Phase 3 - Minor cleanup based on feedback --- salt/grains/zfs.py | 8 ++++---- salt/modules/zfs.py | 41 ++++++++++++++++++++++++++++------------- salt/modules/zpool.py | 8 ++++---- salt/states/zfs.py | 2 +- 4 files changed, 37 insertions(+), 22 deletions(-) diff --git a/salt/grains/zfs.py b/salt/grains/zfs.py index 035bacdab0..85c4e9ec0f 100644 --- a/salt/grains/zfs.py +++ b/salt/grains/zfs.py @@ -16,7 +16,7 @@ from __future__ import absolute_import, print_function, unicode_literals import logging # Import salt libs -import salt.utils.dictupdate as dictupdate +import salt.utils.dictupdate import salt.utils.path import salt.utils.platform @@ -33,7 +33,7 @@ __utils__ = { 'zfs.is_supported': salt.utils.zfs.is_supported, 'zfs.has_feature_flags': salt.utils.zfs.has_feature_flags, 'zfs.zpool_command': salt.utils.zfs.zpool_command, - 'zfs.to_auto': salt.utils.zfs.to_auto, + 'zfs.to_size': salt.utils.zfs.to_size, } log = logging.getLogger(__name__) @@ -64,7 +64,7 @@ def _zfs_pool_data(): if 'zpool' not in grains: grains['zpool'] = {} zpool = zpool.split() - grains['zpool'][zpool[0]] = __utils__['zfs.to_auto'](zpool[1], True) + grains['zpool'][zpool[0]] = __utils__['zfs.to_size'](zpool[1], True) # return grain data return grains @@ -78,7 +78,7 @@ def zfs(): grains['zfs_support'] = __utils__['zfs.is_supported']() grains['zfs_feature_flags'] = __utils__['zfs.has_feature_flags']() if grains['zfs_support']: - grains = dictupdate.update(grains, _zfs_pool_data(), merge_lists=True) + grains = salt.utils.dictupdate.update(grains, _zfs_pool_data(), merge_lists=True) return grains diff --git a/salt/modules/zfs.py b/salt/modules/zfs.py index 8b8e5ba52c..4e5f7f38f6 100644 --- a/salt/modules/zfs.py +++ b/salt/modules/zfs.py @@ -21,6 +21,7 @@ import logging # Import Salt libs import salt.utils.args import salt.utils.path +import salt.utils.versions import salt.modules.cmdmod from salt.utils.odict import OrderedDict from salt.ext.six.moves import zip @@ -291,7 +292,7 @@ def list_(name=None, **kwargs): sort order (default = ascending) parsable : boolean display numbers in parsable (exact) values - .. versionadded:: Oxygen + .. versionadded:: 2018.3.0 .. versionadded:: 2015.5.0 @@ -313,6 +314,8 @@ def list_(name=None, **kwargs): properties = properties.split(',') # NOTE: name should be first property + # we loop here because there 'name' can be in the list + # multiple times. while 'name' in properties: properties.remove('name') properties.insert(0, 'name') @@ -329,11 +332,12 @@ def list_(name=None, **kwargs): opts['-d'] = kwargs.get('depth') if kwargs.get('type', False): opts['-t'] = kwargs.get('type') - if kwargs.get('sort', False) and kwargs.get('sort') in properties: + kwargs_sort = kwargs.get('sort', False) + if kwargs_sort and kwargs_sort in properties: if kwargs.get('order', 'ascending').startswith('a'): - opts['-s'] = kwargs.get('sort') + opts['-s'] = kwargs_sort else: - opts['-S'] = kwargs.get('sort') + opts['-S'] = kwargs_sort if isinstance(properties, list): # NOTE: There can be only one -o and it takes a comma-seperated list opts['-o'] = ','.join(properties) @@ -440,9 +444,16 @@ def mount(name=None, **kwargs): if kwargs.get('options', False): opts['-o'] = kwargs.get('options') if name in [None, '-a']: - # NOTE: still accept '-a' as name for backwards compatibility - # two versions after Flourine this should just simplify - # this to just set '-a' if name is not set. + # NOTE: the new way to mount all filesystems is to have name + # set to ```None```. We still accept the old '-a' until + # Sodium. After Sodium we can update the if statement + # to ```if not name:``` + if name == '-a': + salt.utils.versions.warn_until( + 'Sodium', + 'Passing \'-a\' as name is deprecated as of Salt Flourine. This ' + 'warning will be removed in Salt Sodium. Please pass name as ' + '\'None\' instead to mount all filesystems.') flags.append('-a') name = None @@ -923,7 +934,7 @@ def hold(tag, *snapshot, **kwargs): .. warning:: - As of Flourine the tag parameter no longer accepts a comma-seprated value. + As of Flourine the tag parameter no longer accepts a comma-separated value. It's is now possible to create a tag that contains a comma, this was impossible before. CLI Example: @@ -935,9 +946,11 @@ def hold(tag, *snapshot, **kwargs): ''' ## warn about tag change - # NOTE: remove me 2 versions after Flourine if ',' in tag: - log.warning('zfs.hold - on Flourine and later a comma in a tag will no longer create multiple tags!') + salt.utils.versions.warn_until( + 'Sodium', + 'A comma-separated tag is no support as of Salt Flourine. ' + 'This warning will be removed in Salt Sodium.') ## Configure command # NOTE: initialize the defaults @@ -1001,9 +1014,11 @@ def release(tag, *snapshot, **kwargs): ''' ## warn about tag change - # NOTE: remove me 2 versions after Flourine if ',' in tag: - log.warning('zfs.release - on Flourine and later a comma in a tag will no longer create multiple tags!') + salt.utils.versions.warn_until( + 'Sodium', + 'A comma-separated tag is no support as of Salt Flourine. ' + 'This warning will be removed in Salt Sodium.') ## Configure command # NOTE: initialize the defaults @@ -1162,7 +1177,7 @@ def get(*dataset, **kwargs): local, default, inherited, temporary, and none. The default value is all sources. parsable : boolean display numbers in parsable (exact) values (default = True) - .. versionadded:: Oxygen + .. versionadded:: 2018.3.0 .. note:: diff --git a/salt/modules/zpool.py b/salt/modules/zpool.py index d056b442b8..fa616f9f71 100644 --- a/salt/modules/zpool.py +++ b/salt/modules/zpool.py @@ -617,7 +617,7 @@ def scrub(zpool, stop=False, pause=False): if true, cancel ongoing scrub pause : boolean if true, pause ongoing scrub - .. versionadded:: Oxygen + .. versionadded:: 2018.3.0 .. note:: @@ -680,7 +680,7 @@ def create(zpool, *vdevs, **kwargs): filesystem_properties : dict additional filesystem properties createboot : boolean - ..versionadded:: Oxygen + ..versionadded:: 2018.3.0 create a boot partition .. versionadded:: 2015.5.0 @@ -925,7 +925,7 @@ def split(zpool, newzpool, **kwargs): properties : dict additional pool properties for newzpool - .. versionadded:: Oxygen + .. versionadded:: 2018.3.0 .. note:: @@ -1358,7 +1358,7 @@ def labelclear(device, force=False): force : boolean treat exported or foreign devices as inactive - .. versionadded:: Oxygen + .. versionadded:: 2018.3.0 CLI Example: diff --git a/salt/states/zfs.py b/salt/states/zfs.py index cafe364b28..da5954d249 100644 --- a/salt/states/zfs.py +++ b/salt/states/zfs.py @@ -921,7 +921,7 @@ def scheduled_snapshot(name, prefix, recursive=True, schedule=None): a schedule must be setup to automatically run the state. this means that if you run the state daily the hourly snapshot will only be made once per day! - .. versionchanged:: Oxygen + .. versionchanged:: 2018.3.0 switched to localtime from gmtime so times now take into account timezones. From 0fa4faafef584bb562b27eb53cbda3342e4908d5 Mon Sep 17 00:00:00 2001 From: Dmitry Kuzmenko Date: Mon, 26 Feb 2018 16:10:39 +0300 Subject: [PATCH 220/223] Format string before passing to log_callback. --- salt/modules/cmdmod.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/modules/cmdmod.py b/salt/modules/cmdmod.py index d50d266424..6ae142843a 100644 --- a/salt/modules/cmdmod.py +++ b/salt/modules/cmdmod.py @@ -456,7 +456,8 @@ def _run(cmd, env_cmd = ('su', runas, '-c', sys.executable) else: env_cmd = ('su', '-s', shell, '-', runas, '-c', sys.executable) - log.debug(log_callback('env command: %s', env_cmd)) + msg = 'env command: {0}'.format(env_cmd) + log.debug(log_callback(msg)) env_bytes = salt.utils.stringutils.to_bytes(subprocess.Popen( env_cmd, stdin=subprocess.PIPE, From b15ad956817e1d67c962682d2fb984486649bc1b Mon Sep 17 00:00:00 2001 From: Dmitry Kuzmenko Date: Mon, 26 Feb 2018 17:05:44 +0300 Subject: [PATCH 221/223] Fixed bad clustershell merge. --- salt/roster/clustershell.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/roster/clustershell.py b/salt/roster/clustershell.py index d192208d3a..77d5b118a8 100644 --- a/salt/roster/clustershell.py +++ b/salt/roster/clustershell.py @@ -45,7 +45,7 @@ def targets(tgt, tgt_type='glob', **kwargs): for host, addr in host_addrs.items(): addr = six.text_type(addr) - ret[addr] = copy.deepcopy(__opts__.get('roster_defaults', {})) + ret[host] = copy.deepcopy(__opts__.get('roster_defaults', {})) for port in ports: try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) From 21673ea4eb1a62b4c100b3dfe20f3dedf4e6ca79 Mon Sep 17 00:00:00 2001 From: Dmitry Kuzmenko Date: Mon, 26 Feb 2018 21:44:18 +0300 Subject: [PATCH 222/223] Postgress module unit test fix. --- tests/unit/modules/test_postgres.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/unit/modules/test_postgres.py b/tests/unit/modules/test_postgres.py index d4fff86223..7094767d0f 100644 --- a/tests/unit/modules/test_postgres.py +++ b/tests/unit/modules/test_postgres.py @@ -371,6 +371,7 @@ class PostgresTestCase(TestCase, LoaderModuleMockMixin): 'replication': None, 'password': 'test_password', 'connections': '-1', + 'groups': '', 'expiry time': '', 'defaults variables': None }])): @@ -402,6 +403,7 @@ class PostgresTestCase(TestCase, LoaderModuleMockMixin): 'can login': 't', 'replication': None, 'connections': '-1', + 'groups': '', 'expiry time': '2017-08-16 08:57:46', 'defaults variables': None }])): From 379e33cb3cad0be03b38553801770b167b9615c5 Mon Sep 17 00:00:00 2001 From: rallytime Date: Tue, 27 Feb 2018 10:14:35 -0500 Subject: [PATCH 223/223] Lint fix: remove unused import --- tests/unit/utils/test_parsers.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/unit/utils/test_parsers.py b/tests/unit/utils/test_parsers.py index b1a68ab318..eb88197e84 100644 --- a/tests/unit/utils/test_parsers.py +++ b/tests/unit/utils/test_parsers.py @@ -6,7 +6,6 @@ # Import python libs from __future__ import absolute_import, print_function, unicode_literals import os -import logging # Import Salt Testing Libs from tests.support.unit import skipIf, TestCase