From f735f0b96d60cb2371ea07e5a2bc93afca8bf918 Mon Sep 17 00:00:00 2001 From: Justin Findlay Date: Thu, 12 Mar 2015 09:38:09 -0600 Subject: [PATCH 01/23] add eauth pam group tests Adds test for #17380. --- tests/integration/files/conf/master | 4 ++ tests/integration/shell/auth.py | 89 ++++++++++++++++++++--------- 2 files changed, 66 insertions(+), 27 deletions(-) diff --git a/tests/integration/files/conf/master b/tests/integration/files/conf/master index 74a3633baa..48e71697d6 100644 --- a/tests/integration/files/conf/master +++ b/tests/integration/files/conf/master @@ -36,6 +36,10 @@ external_auth: - '@wheel' - '@runner' - test.* + saltops%: + - '@wheel' + - '@runner' + - 'test.*' auto: saltdev_auto: - '@wheel' diff --git a/tests/integration/shell/auth.py b/tests/integration/shell/auth.py index 743573a740..0ee2af33a1 100644 --- a/tests/integration/shell/auth.py +++ b/tests/integration/shell/auth.py @@ -7,18 +7,37 @@ # Import python libs import os import pwd +import grp import random # Import Salt Testing libs +from salttesting import skipIf from salttesting.helpers import ( ensure_in_syspath, destructiveTest) ensure_in_syspath('../../') # Import salt libs +from salt.utils.pycrypto import gen_hash import integration -from salttesting import skipIf + +def gen_password(): + ''' + generate a password and hash it + ''' + alphabet = ('abcdefghijklmnopqrstuvwxyz' + '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ') + password = '' + # generate password + for _ in range(20): + next_index = random.randrange(len(alphabet)) + password += alphabet[next_index] + + # hash the password + hashed_pwd = gen_hash('salt', password, 'sha512') + + return (password, hashed_pwd) class AuthTest(integration.ShellCase): @@ -28,38 +47,38 @@ class AuthTest(integration.ShellCase): _call_binary_ = 'salt' - is_root = os.geteuid() != 0 + is_not_root = os.geteuid() != 0 + + userA = 'saltdev' + userB = 'saltadm' + group = 'saltops' @destructiveTest - @skipIf(is_root, 'You must be logged in as root to run this test') + @skipIf(is_not_root, 'You must be logged in as root to run this test') def setUp(self): # This is a little wasteful but shouldn't be a problem + for user in (self.userA, self.userB): + try: + pwd.getpwnam(user) + except KeyError: + self.run_call('user.add {0} createhome=False'.format(user)) + + # only put userB into the group for the group auth test try: - pwd.getpwnam('saltdev') + grp.getgrnam(self.group) except KeyError: - self.run_call('user.add saltdev createhome=False') + self.run_call('group.add {0}'.format(self.group)) + self.run_call('user.chgroups {0} {1} True'.format(self.userB, self.group)) def test_pam_auth_valid_user(self): ''' test pam auth mechanism is working with a valid user ''' - alphabet = ('abcdefghijklmnopqrstuvwxyz' - '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ') - self.password = '' - # generate password - for _ in range(20): - next_index = random.randrange(len(alphabet)) - self.password = self.password + alphabet[next_index] - - # hash the password - from salt.utils.pycrypto import gen_hash - - pwd = gen_hash('salt', self.password, 'sha512') - self.run_call("shadow.set_password saltdev '{0}'".format(pwd)) - cmd = ('-a pam "*"' - ' test.ping --username {0}' - ' --password {1}'.format('saltdev', self.password)) + password, hashed_pwd = gen_password() + self.run_call("shadow.set_password {0} '{1}'".format(self.userA, hashed_pwd)) + cmd = ('-a pam "*" test.ping ' + '--username {0} --password {1}'.format(self.userA, password)) resp = self.run_salt(cmd) self.assertTrue( 'minion:' in resp @@ -69,19 +88,35 @@ class AuthTest(integration.ShellCase): ''' test pam auth mechanism errors for an invalid user ''' - cmd = ('-a pam' - ' * test.ping --username nouser' - ' --password {0}'.format('abcd1234')) + cmd = ('-a pam "*" test.ping ' + '--username nouser --password {0}'.format('abcd1234')) resp = self.run_salt(cmd) self.assertTrue( 'Failed to authenticate' in ''.join(resp) ) + def test_pam_auth_valid_group(self): + ''' + test pam auth mechanism success for a valid group + ''' + password, hashed_pwd = gen_password() + self.run_call("shadow.set_password {0} '{1}'".format(self.userB, hashed_pwd)) + + cmd = ('-a pam "*" test.ping ' + '--username {0} --password {1}'.format(self.userB, password)) + resp = self.run_salt(cmd) + self.assertTrue( + 'minion:' in resp + ) + @destructiveTest - @skipIf(is_root, 'You must be logged in as root to run this test') + @skipIf(is_not_root, 'You must be logged in as root to run this test') def test_zzzz_tearDown(self): - if pwd.getpwnam('saltdev'): - self.run_call('user.delete saltdev') + for user in (self.userA, self.userB): + if pwd.getpwnam(user): + self.run_call('user.delete {0}'.format(user)) + if grp.getgrnam(self.group): + self.run_call('group.delete {0}'.format(self.group)) if __name__ == '__main__': From 53c9d483318964dc8f17068cbe1a905bca2c7090 Mon Sep 17 00:00:00 2001 From: Justin Findlay Date: Tue, 17 Mar 2015 19:44:30 -0600 Subject: [PATCH 02/23] fix typo --- tests/unit/states/module_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/states/module_test.py b/tests/unit/states/module_test.py index 0a9edf07e0..759fd36daa 100644 --- a/tests/unit/states/module_test.py +++ b/tests/unit/states/module_test.py @@ -3,7 +3,7 @@ :codeauthor: :email:`Nicole Thomas (nicole@saltstack.com)` ''' -# Import Pyhton Libs +# Import Python Libs from inspect import ArgSpec # Import Salt Libs From f2fe1b9ed54944e1ce7e435ae465d35f1a66f836 Mon Sep 17 00:00:00 2001 From: Justin Findlay Date: Tue, 17 Mar 2015 19:48:47 -0600 Subject: [PATCH 03/23] add positional arguments in order within longopts Non option arguments, arguments that do not begin with a `-` should probably be kept in the same order that they were specified in `tar_options` as they might be arguments to a specific option, like `--option option_arg`. Collecting all of these at the end of `tar_cmd` would break these options. The gnu tar manpage has this to say about it: A long function name must be prefixed with --. Some options take a parameter; with the single-letter form these must be given as separate arguments. Related to #20795. --- salt/states/archive.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/salt/states/archive.py b/salt/states/archive.py index 8a244b10a7..8b9a9c3844 100644 --- a/salt/states/archive.py +++ b/salt/states/archive.py @@ -191,14 +191,13 @@ def extracted(name, tar_cmd = ['tar'] tar_shortopts = 'x' tar_longopts = [] - tar_afterfile = [] for position, opt in enumerate(tar_opts): if opt.startswith('-'): tar_longopts.append(opt) else: if position > 0: - tar_afterfile.append(opt) + tar_longopts.append(opt) else: append_opt = opt append_opt = append_opt.replace('x', '').replace('f', '') @@ -207,7 +206,6 @@ def extracted(name, tar_cmd.append(tar_shortopts) tar_cmd.extend(tar_longopts) tar_cmd.extend(['-f', filename]) - tar_cmd.extend(tar_afterfile) results = __salt__['cmd.run_all'](tar_cmd, cwd=name, python_shell=False) if results['retcode'] != 0: From 5f143ecb6014471969be330a8d29dc9d0cbb70ad Mon Sep 17 00:00:00 2001 From: Justin Findlay Date: Tue, 17 Mar 2015 17:24:02 -0600 Subject: [PATCH 04/23] unit tests for states.archive.extracted tar opts Implements tests for #20795. --- tests/unit/states/archive_test.py | 81 +++++++++++++++++++++++++++++++ 1 file changed, 81 insertions(+) create mode 100644 tests/unit/states/archive_test.py diff --git a/tests/unit/states/archive_test.py b/tests/unit/states/archive_test.py new file mode 100644 index 0000000000..5aafc37381 --- /dev/null +++ b/tests/unit/states/archive_test.py @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- +''' +unit tests for the archive state +''' + +# Import Python Libs +import os +import tempfile + +# Import Salt Libs +from salt.states import archive + +# Import Salt Testing Libs +from salttesting import skipIf, TestCase +from salttesting.helpers import ensure_in_syspath +from salttesting.mock import ( + NO_MOCK, + NO_MOCK_REASON, + MagicMock, + patch +) + +ensure_in_syspath('../../') + +archive.__opts__ = {} +archive.__salt__ = {} +archive.__env__ = 'test' + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class ArchiveTest(TestCase): + ''' + Validate the archive state + ''' + + def test_extracted_tar(self): + ''' + archive.extracted tar options + ''' + + source = 'file.tar.gz' + tmp_dir = os.path.join(tempfile.gettempdir(), 'test_archive') + test_tar_opts = [ + '--no-anchored foo', + 'v -p --opt', + '-v -p', + '--long-opt -z', + 'z -v -weird-long-opt arg', + ] + ret_tar_opts = [ + ['tar', 'x', '--no-anchored', 'foo', '-f'], + ['tar', 'xv', '-p', '--opt', '-f'], + ['tar', 'x', '-v', '-p', '-f'], + ['tar', 'x', '--long-opt', '-z', '-f'], + ['tar', 'xz', '-v', '-weird-long-opt', 'arg', '-f'], + ] + + mock_true = MagicMock(return_value=True) + mock_false = MagicMock(return_value=False) + ret = {'stdout': ['saltines', 'cheese'], 'stderr': 'biscuits', 'retcode': '31337', 'pid': '1337'} + mock_run = MagicMock(return_value=ret) + + with patch('os.path.exists', mock_true): + with patch.dict(archive.__opts__, {'test': False, + 'cachedir': tmp_dir}): + with patch.dict(archive.__salt__, {'file.directory_exists': mock_false, + 'file.file_exists': mock_false, + 'file.makedirs': mock_true, + 'cmd.run_all': mock_run}): + for test_opts, ret_opts in zip(test_tar_opts, ret_tar_opts): + ret = archive.extracted(tmp_dir, + source, + 'tar', + tar_options=test_opts) + ret_opts.append(os.path.join(tmp_dir, 'files/test/_tmp_test_archive.tar')) + mock_run.assert_called_with(ret_opts, cwd=tmp_dir, python_shell=False) + + +if __name__ == '__main__': + from integration import run_tests + run_tests(ArchiveTest) From 234d02b2187d1a9b206c602e20e39b3c135fb838 Mon Sep 17 00:00:00 2001 From: Thomas Jackson Date: Tue, 17 Mar 2015 18:35:43 -0700 Subject: [PATCH 05/23] Add timeout to saltnado's event listener Fixes #21707 The issue is basically that the master has seen N minions-- and not all N are currently connected. The publish job returns to the salt-api that N minions got the job-- and it dutifilly waits for all the returns. With this we use the same ping timeout that the CLI does while waiting for returns. So once all minions are no longer running the job we will return. --- salt/netapi/rest_tornado/saltnado.py | 42 ++++++++++++------- .../netapi/rest_tornado/test_app.py | 3 +- tests/unit/netapi/rest_tornado/test_utils.py | 17 ++++++++ 3 files changed, 45 insertions(+), 17 deletions(-) diff --git a/salt/netapi/rest_tornado/saltnado.py b/salt/netapi/rest_tornado/saltnado.py index 391b08bf6d..e2e9d6875b 100644 --- a/salt/netapi/rest_tornado/saltnado.py +++ b/salt/netapi/rest_tornado/saltnado.py @@ -259,6 +259,8 @@ class EventListener(object): # request_obj -> list of (tag, future) self.request_map = defaultdict(list) + self.timeout_map = {} # map of future -> timeout_callback + self.stream = zmqstream.ZMQStream(self.event.sub, io_loop=tornado.ioloop.IOLoop.current()) self.stream.on_recv(self._handle_event_socket_recv) @@ -270,23 +272,13 @@ class EventListener(object): if request not in self.request_map: return for tag, future in self.request_map[request]: - # TODO: log, this shouldn't happen... - if tag not in self.tag_map: - continue - # if the future isn't complete, mark it as a timeout - if not future.done(): - future.set_exception(TimeoutException()) - # if we marked it as a timeout, we need to remove it from tag_map - self.tag_map[tag].remove(future) - - # if that was the last of them, remove the key all together - if len(self.tag_map[tag]) == 0: - del self.tag_map[tag] + self._timeout_future(tag, future) def get_event(self, request, tag='', callback=None, + timeout=None ): ''' Get an event (async of course) return a future that will get it later @@ -294,15 +286,30 @@ class EventListener(object): future = Future() if callback is not None: def handle_future(future): - response = future.result() - tornado.ioloop.IOLoop.current().add_callback(callback, response) + tornado.ioloop.IOLoop.current().add_callback(callback, future) future.add_done_callback(handle_future) # add this tag and future to the callbacks self.tag_map[tag].append(future) self.request_map[request].append((tag, future)) + if timeout: + timeout_future = tornado.ioloop.IOLoop.current().call_later(timeout, self._timeout_future, tag, future) + self.timeout_map[future] = timeout_future + return future + def _timeout_future(self, tag, future): + ''' + Timeout a specific future + ''' + if tag not in self.tag_map: + return + if not future.done(): + future.set_exception(TimeoutException()) + self.tag_map[tag].remove(future) + if len(self.tag_map[tag]) == 0: + del self.tag_map[tag] + def _handle_event_socket_recv(self, raw): ''' Callback for events on the event sub socket @@ -316,6 +323,9 @@ class EventListener(object): continue future.set_result({'data': data, 'tag': mtag}) self.tag_map[tag_prefix].remove(future) + if future in self.timeout_map: + tornado.ioloop.IOLoop.current().remove_timeout(self.timeout_map[future]) + del self.timeout_map[future] # TODO: move to a utils function within salt-- the batching stuff is a bit tied together @@ -881,7 +891,7 @@ class SaltAPIHandler(BaseSaltAPIHandler, SaltClientsMixIn): minions_remaining = pub_data['minions'] ret_event = self.application.event_listener.get_event(self, tag=ret_tag) - ping_event = self.application.event_listener.get_event(self, tag=ping_tag) + ping_event = self.application.event_listener.get_event(self, tag=ping_tag, timeout=self.application.opts['gather_job_timeout']) # while we are waiting on all the mininons while len(minions_remaining) > 0 or not self.min_syndic_wait_done(): @@ -896,7 +906,7 @@ class SaltAPIHandler(BaseSaltAPIHandler, SaltClientsMixIn): ping_id = event['data']['id'] if ping_id not in chunk_ret and ping_id not in minions_remaining: minions_remaining.append(ping_id) - ping_event = self.application.event_listener.get_event(self, tag=ping_tag) + ping_event = self.application.event_listener.get_event(self, tag=ping_tag, timeout=self.application.opts['gather_job_timeout']) # if it is a ret future, its just a regular return else: chunk_ret[event['data']['id']] = event['data']['return'] diff --git a/tests/integration/netapi/rest_tornado/test_app.py b/tests/integration/netapi/rest_tornado/test_app.py index 7d6bd2988b..7fef66db49 100644 --- a/tests/integration/netapi/rest_tornado/test_app.py +++ b/tests/integration/netapi/rest_tornado/test_app.py @@ -536,10 +536,11 @@ class TestWebhookSaltAPIHandler(SaltnadoTestCase): return application def test_post(self): - def verify_event(event): + def verify_event(future): ''' Verify that the event fired on the master matches what we sent ''' + event = future.result() self.assertEqual(event['tag'], 'salt/netapi/hook') self.assertIn('headers', event['data']) self.assertEqual(event['data']['post'], {'foo': 'bar'}) diff --git a/tests/unit/netapi/rest_tornado/test_utils.py b/tests/unit/netapi/rest_tornado/test_utils.py index 35c527a314..ede30882e4 100644 --- a/tests/unit/netapi/rest_tornado/test_utils.py +++ b/tests/unit/netapi/rest_tornado/test_utils.py @@ -83,6 +83,23 @@ class TestEventListener(tornado.testing.AsyncTestCase): self.assertEqual(event_future.result()['tag'], 'evt1') self.assertEqual(event_future.result()['data']['data'], 'foo1') + def test_timeout(self): + ''' + Make sure timeouts work correctly + ''' + with eventpublisher_process(): + event_listener = saltnado.EventListener({}, # we don't use mod_opts, don't save? + {'sock_dir': SOCK_DIR, + 'transport': 'zeromq'}) + event_future = event_listener.get_event(1, + tag='evt1', + callback=self.stop, + timeout=1, + ) # get an event future + self.wait() + self.assertTrue(event_future.done()) + with self.assertRaises(saltnado.TimeoutException): + event_future.result() if __name__ == '__main__': from integration import run_tests From 70155ddd49b84243f8a8c1477d929afedb49a692 Mon Sep 17 00:00:00 2001 From: Thomas Jackson Date: Tue, 17 Mar 2015 18:47:38 -0700 Subject: [PATCH 06/23] Massive speedup to saltnado This mixin was creating 3 localclients and 1 runner client per request-- which takes ~6s. Now we build the set of clients once per process Conflicts: salt/runner.py --- salt/netapi/rest_tornado/saltnado.py | 16 ++++++++++------ salt/runner.py | 1 + 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/salt/netapi/rest_tornado/saltnado.py b/salt/netapi/rest_tornado/saltnado.py index e2e9d6875b..3af40f88f1 100644 --- a/salt/netapi/rest_tornado/saltnado.py +++ b/salt/netapi/rest_tornado/saltnado.py @@ -198,18 +198,22 @@ class SaltClientsMixIn(object): ''' MixIn class to container all of the salt clients that the API needs ''' + # TODO: load this proactively, instead of waiting for a request + __saltclients = None + @property def saltclients(self): - if not hasattr(self, '__saltclients'): + if SaltClientsMixIn.__saltclients is None: + local_client = salt.client.get_local_client(mopts=self.application.opts) # TODO: refreshing clients using cachedict - self.__saltclients = { - 'local': salt.client.get_local_client(mopts=self.application.opts).run_job, + SaltClientsMixIn.__saltclients = { + 'local': local_client.run_job, # not the actual client we'll use.. but its what we'll use to get args - 'local_batch': salt.client.get_local_client(mopts=self.application.opts).cmd_batch, - 'local_async': salt.client.get_local_client(mopts=self.application.opts).run_job, + 'local_batch': local_client.cmd_batch, + 'local_async': local_client.run_job, 'runner': salt.runner.RunnerClient(opts=self.application.opts).async, } - return self.__saltclients + return SaltClientsMixIn.__saltclients AUTH_TOKEN_HEADER = 'X-Auth-Token' diff --git a/salt/runner.py b/salt/runner.py index 99253ea4b7..d86f763a8e 100644 --- a/salt/runner.py +++ b/salt/runner.py @@ -53,6 +53,7 @@ class RunnerClient(mixins.SyncClientMixin, mixins.AsyncClientMixin, object): .. code-block:: python +<<<<<<< HEAD >>> opts = salt.config.master_config('/etc/salt/master') >>> runner = salt.runner.RunnerClient(opts) >>> runner.cmd('jobs.list_jobs', []) From a95f81233cdc2561c457f282bff60972547eb0bd Mon Sep 17 00:00:00 2001 From: Thomas Jackson Date: Wed, 18 Mar 2015 08:11:38 -0700 Subject: [PATCH 07/23] Cleanup merge --- salt/runner.py | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/runner.py b/salt/runner.py index d86f763a8e..99253ea4b7 100644 --- a/salt/runner.py +++ b/salt/runner.py @@ -53,7 +53,6 @@ class RunnerClient(mixins.SyncClientMixin, mixins.AsyncClientMixin, object): .. code-block:: python -<<<<<<< HEAD >>> opts = salt.config.master_config('/etc/salt/master') >>> runner = salt.runner.RunnerClient(opts) >>> runner.cmd('jobs.list_jobs', []) From 36841bdd82782aef640c5b3ad383146d7f7f3c16 Mon Sep 17 00:00:00 2001 From: Mike Place Date: Wed, 18 Mar 2015 14:48:03 -0600 Subject: [PATCH 08/23] Backport #21175 to 2014.7 Addresses same syndic issues as #21175 --- salt/client/__init__.py | 38 ++++++++++++++++++++++++++++++++------ salt/daemons/masterapi.py | 5 ++++- 2 files changed, 36 insertions(+), 7 deletions(-) diff --git a/salt/client/__init__.py b/salt/client/__init__.py index d7cd65d2c6..30dae0b045 100644 --- a/salt/client/__init__.py +++ b/salt/client/__init__.py @@ -783,19 +783,33 @@ class LocalClient(object): def get_returns_no_block( self, jid, - event=None): + event=None, + additional_tags=None): ''' Raw function to just return events of jid excluding timeout logic Yield either the raw event data or None + + Pass a list of additional tags as `additional_tags` to search + the event bus for non-return data, such as minion lists returned + from syndics. ''' + tag_search = [] + tag_search.append(jid) + if isinstance(additional_tags, str): + tag_search.append(additional_tags) + elif isinstance(additional_tags, list): + for tag in additional_tags: + tag_search.append(tag) + if event is None: event = self.event while True: if HAS_ZMQ: try: raw = event.get_event_noblock() - if raw and raw.get('tag', '').startswith(jid): + if (raw and (raw.get('tag', '').startswith(jid) or + any([raw.get('tag', '').startswith(tag) for tag in tag_search]))): yield raw else: yield None @@ -849,10 +863,15 @@ class LocalClient(object): syndic_wait = 0 last_time = False # iterator for this job's return - ret_iter = self.get_returns_no_block(jid) + if self.opts['order_masters']: + # If we are a MoM, we need to gather expected minions from downstream masters. + ret_iter = self.get_returns_no_block(jid, additional_tags='syndic') + else: + ret_iter = self.get_returns_no_block(jid) # iterator for the info of this job jinfo_iter = [] timeout_at = time.time() + timeout + gather_syndic_wait = time.time() + self.opts['syndic_wait'] # are there still minions running the job out there # start as True so that we ping at least once minions_running = True @@ -871,9 +890,6 @@ class LocalClient(object): if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue - if 'syndic' in raw: - minions.update(raw['syndic']) - continue if 'return' not in raw['data']: continue if kwargs.get('raw', False): @@ -892,6 +908,16 @@ class LocalClient(object): # All minions have returned, break out of the loop log.debug('jid {0} found all minions {1}'.format(jid, found)) break + elif len(found.intersection(minions)) >= len(minions) and self.opts['order_masters']: + if len(found) >= len(minions) and len(minions) > 0 and time.time() > gather_syndic_wait: + # There were some minions to find and we found them + # However, this does not imply that *all* masters have yet responded with expected minion lists. + # Therefore, continue to wait up to the syndic_wait period (calculated in gather_syndic_wait) to see + # if additional lower-level masters deliver their lists of expected + # minions. + break + # If we get here we may not have gathered the minion list yet. Keep waiting + # for all lower-level masters to respond with their minion lists # let start the timeouts for all remaining minions for id_ in minions - found: diff --git a/salt/daemons/masterapi.py b/salt/daemons/masterapi.py index 48884cbac8..50809f20dc 100644 --- a/salt/daemons/masterapi.py +++ b/salt/daemons/masterapi.py @@ -712,7 +712,10 @@ class RemoteFuncs(object): for event in load['events']: self.event.fire_event(event, event['tag']) # old dup event if load.get('pretag') is not None: - self.event.fire_event(event, tagify(event['tag'], base=load['pretag'])) + if 'data' in event: + self.fire_event(event['data'], tagify(event['tag'], base=load['pretag'])) + else: + self.fire_event(event, tagify(event['tag'], base=load['pretag'])) else: tag = load['tag'] self.event.fire_event(load, tag) From f56cdd548969d393ec8201d62b737c68e6f9dd6b Mon Sep 17 00:00:00 2001 From: rallytime Date: Wed, 18 Mar 2015 16:05:07 -0600 Subject: [PATCH 09/23] Update syndic documentation Add some docs about installing a minion on the syndic to help reduce the amount of time spent waiting for the CLI to return. --- doc/topics/topology/syndic.rst | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/doc/topics/topology/syndic.rst b/doc/topics/topology/syndic.rst index c7379b9e68..6573b16b17 100644 --- a/doc/topics/topology/syndic.rst +++ b/doc/topics/topology/syndic.rst @@ -66,6 +66,7 @@ starting the other Salt daemons. Topology ======== + The ``salt-syndic`` is little more than a command and event forwarder. When a command is issued from a higher-level master, it will be received by the configured syndics on lower-level masters, and propagated to to their minions, @@ -83,3 +84,22 @@ Nodes on the lowest points of the hierarchy (minions which do not propagate data to another level) will only have the ``salt-minion`` daemon running. There is no need for either ``salt-master`` or ``salt-syndic`` to be running on a standard minion. + +Syndic and the CLI +================== + +In order for the high-level master to return information from minions that are +below the syndic(s), the CLI requires a short wait time in order to allow the +syndic(s) to gather responses from their minions. This value is defined in the +``syndic_wait` and has a default of five seconds. + +While it is possible to run a syndic without a minion installed on the same machine, +it is recommended, for a faster CLI response time, to do so. Without a minion +installed on the syndic, the timeout value of ``syndic_wait`` increases +significantly - about three-fold. With a minion installed on the syndic, the CLI +timeout resides at the value defined in ``syndic_wait``. + +.. note:: + + To reduce the amount of time the CLI waits for minions to respond, install a minion + on the syndic or tune the value of the ``syndic_wait`` configuration. From 14b367e8cef778e73495f2679b22766ee639f113 Mon Sep 17 00:00:00 2001 From: Nicolas Delaby Date: Tue, 17 Mar 2015 18:26:34 +0100 Subject: [PATCH 10/23] transmit socket parameter for inner function calls --- salt/modules/haproxyconn.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/modules/haproxyconn.py b/salt/modules/haproxyconn.py index fb8f297822..8e35cd3ebd 100644 --- a/salt/modules/haproxyconn.py +++ b/salt/modules/haproxyconn.py @@ -83,7 +83,7 @@ def enable_server(name, backend, socket='/var/run/haproxy.sock'): ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.enableServer(server=name, backend=backend) ha_conn.sendCmd(ha_cmd) - return list_servers(backend) + return list_servers(backend, socket=socket) def disable_server(name, backend, socket='/var/run/haproxy.sock'): @@ -106,7 +106,7 @@ def disable_server(name, backend, socket='/var/run/haproxy.sock'): ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.disableServer(server=name, backend=backend) ha_conn.sendCmd(ha_cmd) - return list_servers(backend) + return list_servers(backend, socket=socket) def get_weight(name, backend, socket='/var/run/haproxy.sock'): From eddef004a8765e2027e8414057849c3b1225f03d Mon Sep 17 00:00:00 2001 From: Nicolas Delaby Date: Tue, 17 Mar 2015 18:26:01 +0100 Subject: [PATCH 11/23] If there no containers in the response it does not mean the command failed. It could be there is no container at all. --- salt/modules/dockerio.py | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/salt/modules/dockerio.py b/salt/modules/dockerio.py index 2733186ed9..08b54fe412 100644 --- a/salt/modules/dockerio.py +++ b/salt/modules/dockerio.py @@ -388,29 +388,22 @@ def get_containers(all=True, status['host'] = {} status['host']['interfaces'] = __salt__['network.interfaces']() - containers = ret = client.containers(all=all, - trunc=trunc, - since=since, - before=before, - limit=limit) + containers = client.containers(all=all, + trunc=trunc, + since=since, + before=before, + limit=limit) # Optionally for each container get more granular information from them # by inspecting the container if inspect: - ret = [] for container in containers: container_id = container.get('Id') if container_id: inspect = _get_container_infos(container_id) - container['detail'] = {} - for key, value in inspect.iteritems(): - container['detail'][key] = value - ret.append(container) + container['detail'] = inspect.copy() - if ret: - _valid(status, comment='All containers in out', out=ret) - else: - _invalid(status) + _valid(status, comment='All containers in out', out=containers) return status From d40b3874c58631d7c3eb597980c09b519b5579d7 Mon Sep 17 00:00:00 2001 From: Colton Myers Date: Wed, 18 Mar 2015 14:26:14 -0600 Subject: [PATCH 12/23] Fix syndic to use master cachedir --- salt/config.py | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/config.py b/salt/config.py index 6ad2d50b15..ab088db672 100644 --- a/salt/config.py +++ b/salt/config.py @@ -967,6 +967,7 @@ def syndic_config(master_config_path, 'sock_dir': os.path.join( opts['cachedir'], opts.get('syndic_sock_dir', opts['sock_dir']) ), + 'cachedir': master_opts['cachedir'], } opts.update(syndic_opts) # Prepend root_dir to other paths From 8d406c1642055f3c19f84395bdd48cd860619629 Mon Sep 17 00:00:00 2001 From: Colton Myers Date: Wed, 18 Mar 2015 14:27:45 -0600 Subject: [PATCH 13/23] Fix syndic to get the load for __load__, not the jid Fixes #21495 --- salt/minion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/minion.py b/salt/minion.py index ef3d4c371c..ae62c267b2 100644 --- a/salt/minion.py +++ b/salt/minion.py @@ -2309,7 +2309,7 @@ class MultiSyndic(MinionBase): jdict['__fun__'] = event['data'].get('fun') jdict['__jid__'] = event['data']['jid'] jdict['__load__'] = {} - fstr = '{0}.get_jid'.format(self.opts['master_job_cache']) + fstr = '{0}.get_load'.format(self.opts['master_job_cache']) jdict['__load__'].update( self.mminion.returners[fstr](event['data']['jid']) ) From e386db2c34d4e3a8508c10db5c58919df4cae2c3 Mon Sep 17 00:00:00 2001 From: Colton Myers Date: Wed, 18 Mar 2015 15:17:39 -0600 Subject: [PATCH 14/23] Update syndic_config test for new cachedir --- tests/unit/config_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/config_test.py b/tests/unit/config_test.py index 5d672a1e02..ad0491d8bc 100644 --- a/tests/unit/config_test.py +++ b/tests/unit/config_test.py @@ -355,7 +355,7 @@ class ConfigTestCase(TestCase, integration.AdaptedConfigurationTestCaseMixIn): self.assertEqual(syndic_opts['master_ip'], '127.0.0.1') self.assertEqual(syndic_opts['master'], 'localhost') self.assertEqual(syndic_opts['sock_dir'], os.path.join(root_dir, 'minion_sock')) - self.assertEqual(syndic_opts['cachedir'], os.path.join(root_dir, 'cachedir')) + self.assertEqual(syndic_opts['cachedir'], os.path.join(root_dir, 'cache')) self.assertEqual(syndic_opts['log_file'], os.path.join(root_dir, 'osyndic.log')) self.assertEqual(syndic_opts['pidfile'], os.path.join(root_dir, 'osyndic.pid')) # Show that the options of localclient that repub to local master From 36192e3d40435eff7e85b28d7efbbb3f49285d81 Mon Sep 17 00:00:00 2001 From: Colton Myers Date: Thu, 19 Mar 2015 10:25:16 -0600 Subject: [PATCH 15/23] Fix Syndic to only forward unforwarded loads --- salt/minion.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/salt/minion.py b/salt/minion.py index ae62c267b2..c636b12781 100644 --- a/salt/minion.py +++ b/salt/minion.py @@ -1825,6 +1825,7 @@ class Syndic(Minion): opts['loop_interval'] = 1 super(Syndic, self).__init__(opts, **kwargs) self.mminion = salt.minion.MasterMinion(opts) + self.jid_forward_cache = set() def _handle_aes(self, load, sig=None): ''' @@ -2309,7 +2310,9 @@ class MultiSyndic(MinionBase): jdict['__fun__'] = event['data'].get('fun') jdict['__jid__'] = event['data']['jid'] jdict['__load__'] = {} - fstr = '{0}.get_load'.format(self.opts['master_job_cache']) + if event['data']['jid'] not in self.jid_forward_cache: + fstr = '{0}.get_load'.format(self.opts['master_job_cache']) + self.jid_forward_cache.add(event['data']['jid']) jdict['__load__'].update( self.mminion.returners[fstr](event['data']['jid']) ) From 46c097389cc4db39ff35d589653462b8dc40cd6d Mon Sep 17 00:00:00 2001 From: Colton Myers Date: Thu, 19 Mar 2015 10:27:40 -0600 Subject: [PATCH 16/23] Add syndic_jid_forward_cache_hwm configuration --- salt/config.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/config.py b/salt/config.py index ab088db672..34746f2f95 100644 --- a/salt/config.py +++ b/salt/config.py @@ -242,6 +242,7 @@ VALID_OPTS = { 'random_reauth_delay': int, 'syndic_event_forward_timeout': float, 'syndic_max_event_process_time': float, + 'syndic_jid_forward_cache_hwm': int, 'ssh_passwd': str, 'ssh_port': str, 'ssh_sudo': bool, @@ -559,6 +560,7 @@ DEFAULT_MASTER_OPTS = { 'gather_job_timeout': 5, 'syndic_event_forward_timeout': 0.5, 'syndic_max_event_process_time': 0.5, + 'syndic_jid_forward_cache_hwm': 100, 'ssh_passwd': '', 'ssh_port': '22', 'ssh_sudo': False, From 30f9d08aa8a261237f99f1e1980e21ec6344e706 Mon Sep 17 00:00:00 2001 From: Colton Myers Date: Thu, 19 Mar 2015 10:42:06 -0600 Subject: [PATCH 17/23] Pop oldest jid if we hit the jid_forward_cache_hwm --- salt/minion.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/salt/minion.py b/salt/minion.py index c636b12781..de09af1d56 100644 --- a/salt/minion.py +++ b/salt/minion.py @@ -2313,6 +2313,11 @@ class MultiSyndic(MinionBase): if event['data']['jid'] not in self.jid_forward_cache: fstr = '{0}.get_load'.format(self.opts['master_job_cache']) self.jid_forward_cache.add(event['data']['jid']) + if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']: + # Pop the oldest jid from the cache + tmp = sorted(list(self.jid_forward_cache)) + tmp.pop(0) + self.jid_forward_cache = set(tmp) jdict['__load__'].update( self.mminion.returners[fstr](event['data']['jid']) ) From e7258ffbcd0d9d913c68b38ba4aa84427992fa36 Mon Sep 17 00:00:00 2001 From: Colton Myers Date: Thu, 19 Mar 2015 10:42:26 -0600 Subject: [PATCH 18/23] Add another comment --- salt/minion.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/minion.py b/salt/minion.py index de09af1d56..d13ec3e838 100644 --- a/salt/minion.py +++ b/salt/minion.py @@ -2310,6 +2310,8 @@ class MultiSyndic(MinionBase): jdict['__fun__'] = event['data'].get('fun') jdict['__jid__'] = event['data']['jid'] jdict['__load__'] = {} + # Only need to forward each load once. Don't hit the disk + # for every minion return! if event['data']['jid'] not in self.jid_forward_cache: fstr = '{0}.get_load'.format(self.opts['master_job_cache']) self.jid_forward_cache.add(event['data']['jid']) From 9cd3438cf43cad123a9ee5bdf497fb62567575a5 Mon Sep 17 00:00:00 2001 From: Colton Myers Date: Thu, 19 Mar 2015 10:52:50 -0600 Subject: [PATCH 19/23] Gate the correct function call. *facepalm* --- salt/minion.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/minion.py b/salt/minion.py index d13ec3e838..b3dd1a0623 100644 --- a/salt/minion.py +++ b/salt/minion.py @@ -2312,17 +2312,17 @@ class MultiSyndic(MinionBase): jdict['__load__'] = {} # Only need to forward each load once. Don't hit the disk # for every minion return! + fstr = '{0}.get_load'.format(self.opts['master_job_cache']) if event['data']['jid'] not in self.jid_forward_cache: - fstr = '{0}.get_load'.format(self.opts['master_job_cache']) + jdict['__load__'].update( + self.mminion.returners[fstr](event['data']['jid']) + ) self.jid_forward_cache.add(event['data']['jid']) if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']: # Pop the oldest jid from the cache tmp = sorted(list(self.jid_forward_cache)) tmp.pop(0) self.jid_forward_cache = set(tmp) - jdict['__load__'].update( - self.mminion.returners[fstr](event['data']['jid']) - ) if 'master_id' in event['data']: # __'s to make sure it doesn't print out on the master cli jdict['__master_id__'] = event['data']['master_id'] From 07c354bd27adb520cce8625df38c95dbc5628df2 Mon Sep 17 00:00:00 2001 From: Colton Myers Date: Thu, 19 Mar 2015 10:53:44 -0600 Subject: [PATCH 20/23] Rearrange a little --- salt/minion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/minion.py b/salt/minion.py index b3dd1a0623..8ff9a991f4 100644 --- a/salt/minion.py +++ b/salt/minion.py @@ -2310,9 +2310,9 @@ class MultiSyndic(MinionBase): jdict['__fun__'] = event['data'].get('fun') jdict['__jid__'] = event['data']['jid'] jdict['__load__'] = {} + fstr = '{0}.get_load'.format(self.opts['master_job_cache']) # Only need to forward each load once. Don't hit the disk # for every minion return! - fstr = '{0}.get_load'.format(self.opts['master_job_cache']) if event['data']['jid'] not in self.jid_forward_cache: jdict['__load__'].update( self.mminion.returners[fstr](event['data']['jid']) From 3870c666f3992ac0edee46c6ed364566d0c86183 Mon Sep 17 00:00:00 2001 From: Colton Myers Date: Thu, 19 Mar 2015 13:17:52 -0600 Subject: [PATCH 21/23] Pass in the load you just checked for Should fix the remaining job cache syndic issues --- salt/master.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/master.py b/salt/master.py index 5fe7724dd9..23fc49b98f 100644 --- a/salt/master.py +++ b/salt/master.py @@ -1187,7 +1187,7 @@ class AESFuncs(object): # if we have a load, save it if load.get('load'): fstr = '{0}.save_load'.format(self.opts['master_job_cache']) - self.mminion.returners[fstr](load['jid'], load) + self.mminion.returners[fstr](load['jid'], load['load']) # Format individual return loads for key, item in load['return'].items(): From 06c3cf8fc1f2148416233ccb8728fbc3e8174351 Mon Sep 17 00:00:00 2001 From: Colton Myers Date: Thu, 19 Mar 2015 13:54:22 -0600 Subject: [PATCH 22/23] Make changes in both MultiSyndic and Syndic --- salt/minion.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/salt/minion.py b/salt/minion.py index 8ff9a991f4..6e2a660a22 100644 --- a/salt/minion.py +++ b/salt/minion.py @@ -2074,9 +2074,18 @@ class Syndic(Minion): jdict['__jid__'] = event['data']['jid'] jdict['__load__'] = {} fstr = '{0}.get_jid'.format(self.opts['master_job_cache']) - jdict['__load__'].update( - self.mminion.returners[fstr](event['data']['jid']) - ) + # Only need to forward each load once. Don't hit the disk + # for every minion return! + if event['data']['jid'] not in self.jid_forward_cache: + jdict['__load__'].update( + self.mminion.returners[fstr](event['data']['jid']) + ) + self.jid_forward_cache.add(event['data']['jid']) + if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']: + # Pop the oldest jid from the cache + tmp = sorted(list(self.jid_forward_cache)) + tmp.pop(0) + self.jid_forward_cache = set(tmp) if 'master_id' in event['data']: jdict['master_id'] = event['data']['master_id'] jdict[event['data']['id']] = event['data']['return'] @@ -2129,6 +2138,7 @@ class MultiSyndic(MinionBase): opts['loop_interval'] = 1 super(MultiSyndic, self).__init__(opts) self.mminion = salt.minion.MasterMinion(opts) + self.jid_forward_cache = set() # create all of the syndics you need self.master_syndics = {} From 417e2ab79bb9d529795de12c304839be3a838c5b Mon Sep 17 00:00:00 2001 From: Colton Myers Date: Thu, 19 Mar 2015 15:59:49 -0600 Subject: [PATCH 23/23] Stupid backport didn't get this --- salt/minion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/minion.py b/salt/minion.py index 6e2a660a22..e876ef031f 100644 --- a/salt/minion.py +++ b/salt/minion.py @@ -2073,7 +2073,7 @@ class Syndic(Minion): jdict['__fun__'] = event['data'].get('fun') jdict['__jid__'] = event['data']['jid'] jdict['__load__'] = {} - fstr = '{0}.get_jid'.format(self.opts['master_job_cache']) + fstr = '{0}.get_load'.format(self.opts['master_job_cache']) # Only need to forward each load once. Don't hit the disk # for every minion return! if event['data']['jid'] not in self.jid_forward_cache: