mirror of
https://github.com/valitydev/salt.git
synced 2024-11-06 08:35:21 +00:00
Various follow up fixes
Merge branch '2015.8.12_follow_up' into '2015.8'
This commit is contained in:
commit
b910499dbe
14
conf/master
14
conf/master
@ -357,6 +357,20 @@
|
||||
# Pass in an alternative location for the salt-ssh roster file
|
||||
#roster_file: /etc/salt/roster
|
||||
|
||||
# Define a location for roster files so they can be chosen when using Salt API.
|
||||
# An administrator can place roster files into these locations. Then when
|
||||
# calling Salt API, parameter 'roster_file' should contain a relative path to
|
||||
# these locations. That is, "roster_file=/foo/roster" will be resolved as
|
||||
# "/etc/salt/roster.d/foo/roster" etc. This feature prevents passing insecure
|
||||
# custom rosters through the Salt API.
|
||||
#
|
||||
#rosters:
|
||||
# - /etc/salt/roster.d
|
||||
# - /opt/salt/some/more/rosters
|
||||
|
||||
# The log file of the salt-ssh command:
|
||||
#ssh_log_file: /var/log/salt/ssh
|
||||
|
||||
# Pass in minion option overrides that will be inserted into the SHIM for
|
||||
# salt-ssh calls. The local minion config is not used for salt-ssh. Can be
|
||||
# overridden on a per-minion basis in the roster (`minion_opts`)
|
||||
|
@ -42,5 +42,5 @@ simply by creating a data structure. (And this is exactly how much of Salt's
|
||||
own internals work!)
|
||||
|
||||
.. autoclass:: salt.netapi.NetapiClient
|
||||
:members: local, local_async, local_batch, ssh, ssh_async, runner,
|
||||
:members: local, local_async, ssh, ssh_async, runner,
|
||||
runner_async, wheel, wheel_async
|
||||
|
@ -1,3 +1,5 @@
|
||||
.. _release-2015-8-0:
|
||||
|
||||
================================================
|
||||
Salt 2015.8.0 Release Notes - Codename Beryllium
|
||||
================================================
|
||||
|
31
doc/topics/releases/2015.8.13.rst
Normal file
31
doc/topics/releases/2015.8.13.rst
Normal file
@ -0,0 +1,31 @@
|
||||
============================
|
||||
Salt 2015.8.13 Release Notes
|
||||
============================
|
||||
|
||||
Version 2015.8.13 is a bugfix release for :ref:`2015.8.0 <release-2015-8-0>`.
|
||||
|
||||
|
||||
Changes for v2015.8.12..v2015.8.13
|
||||
----------------------------------
|
||||
|
||||
Extended changelog courtesy of Todd Stansell (https://github.com/tjstansell/salt-changelogs):
|
||||
|
||||
*Generated at: 2017-01-09T21:17:06Z*
|
||||
|
||||
Statistics:
|
||||
|
||||
- Total Merges: **3**
|
||||
- Total Issue references: **3**
|
||||
- Total PR references: **5**
|
||||
|
||||
Changes:
|
||||
|
||||
* 3428232 Clean up tests and docs for batch execution
|
||||
* 3d8f3d1 Remove batch execution from NetapiClient and Saltnado
|
||||
* 97b0f64 Lintfix
|
||||
* d151666 Add explanation comment
|
||||
* 62f2c87 Add docstring
|
||||
* 9b0a786 Explain what it is about and how to configure that
|
||||
* 5ea3579 Pick up a specified roster file from the configured locations
|
||||
* 3a8614c Disable custom rosters in API
|
||||
* c0e5a11 Add roster disable flag
|
@ -22,7 +22,8 @@ class SSHClient(object):
|
||||
'''
|
||||
def __init__(self,
|
||||
c_path=os.path.join(syspaths.CONFIG_DIR, 'master'),
|
||||
mopts=None):
|
||||
mopts=None,
|
||||
disable_custom_roster=False):
|
||||
if mopts:
|
||||
self.opts = mopts
|
||||
else:
|
||||
@ -35,6 +36,9 @@ class SSHClient(object):
|
||||
)
|
||||
self.opts = salt.config.client_config(c_path)
|
||||
|
||||
# Salt API should never offer a custom roster!
|
||||
self.opts['__disable_custom_roster'] = disable_custom_roster
|
||||
|
||||
def _prep_ssh(
|
||||
self,
|
||||
tgt,
|
||||
|
@ -97,20 +97,6 @@ class NetapiClient(object):
|
||||
local = salt.client.get_local_client(mopts=self.opts)
|
||||
return local.cmd(*args, **kwargs)
|
||||
|
||||
def local_batch(self, *args, **kwargs):
|
||||
'''
|
||||
Run :ref:`execution modules <all-salt.modules>` against batches of minions
|
||||
|
||||
.. versionadded:: 0.8.4
|
||||
|
||||
Wraps :py:meth:`salt.client.LocalClient.cmd_batch`
|
||||
|
||||
:return: Returns the result from the exeuction module for each batch of
|
||||
returns
|
||||
'''
|
||||
local = salt.client.get_local_client(mopts=self.opts)
|
||||
return local.cmd_batch(*args, **kwargs)
|
||||
|
||||
def ssh(self, *args, **kwargs):
|
||||
'''
|
||||
Run salt-ssh commands synchronously
|
||||
@ -119,7 +105,8 @@ class NetapiClient(object):
|
||||
|
||||
:return: Returns the result from the salt-ssh command
|
||||
'''
|
||||
ssh_client = salt.client.ssh.client.SSHClient(mopts=self.opts)
|
||||
ssh_client = salt.client.ssh.client.SSHClient(mopts=self.opts,
|
||||
disable_custom_roster=True)
|
||||
return ssh_client.cmd_sync(kwargs)
|
||||
|
||||
def ssh_async(self, fun, timeout=None, **kwargs):
|
||||
|
@ -192,7 +192,6 @@ logger = logging.getLogger()
|
||||
# # all of these require coordinating minion stuff
|
||||
# - "local" (done)
|
||||
# - "local_async" (done)
|
||||
# - "local_batch" (done)
|
||||
|
||||
# # master side
|
||||
# - "runner" (done)
|
||||
@ -214,7 +213,6 @@ class SaltClientsMixIn(object):
|
||||
SaltClientsMixIn.__saltclients = {
|
||||
'local': local_client.run_job,
|
||||
# not the actual client we'll use.. but its what we'll use to get args
|
||||
'local_batch': local_client.cmd_batch,
|
||||
'local_async': local_client.run_job,
|
||||
'runner': salt.runner.RunnerClient(opts=self.application.opts).cmd_async,
|
||||
'runner_async': None, # empty, since we use the same client as `runner`
|
||||
@ -355,30 +353,6 @@ class EventListener(object):
|
||||
del self.timeout_map[future]
|
||||
|
||||
|
||||
# TODO: move to a utils function within salt-- the batching stuff is a bit tied together
|
||||
def get_batch_size(batch, num_minions):
|
||||
'''
|
||||
Return the batch size that you should have
|
||||
batch: string
|
||||
num_minions: int
|
||||
|
||||
'''
|
||||
# figure out how many we can keep in flight
|
||||
partition = lambda x: float(x) / 100.0 * num_minions
|
||||
try:
|
||||
if '%' in batch:
|
||||
res = partition(float(batch.strip('%')))
|
||||
if res < 1:
|
||||
return int(math.ceil(res))
|
||||
else:
|
||||
return int(res)
|
||||
else:
|
||||
return int(batch)
|
||||
except ValueError:
|
||||
print(('Invalid batch data sent: {0}\nData must be in the form'
|
||||
'of %10, 10% or 3').format(batch))
|
||||
|
||||
|
||||
class BaseSaltAPIHandler(tornado.web.RequestHandler, SaltClientsMixIn): # pylint: disable=W0223
|
||||
ct_out_map = (
|
||||
('application/json', json.dumps),
|
||||
@ -718,7 +692,7 @@ class SaltAPIHandler(BaseSaltAPIHandler, SaltClientsMixIn): # pylint: disable=W
|
||||
Content-Type: application/json
|
||||
Content-Legnth: 83
|
||||
|
||||
{"clients": ["local", "local_batch", "local_async", "runner", "runner_async"], "return": "Welcome"}
|
||||
{"clients": ["local", "local_async", "runner", "runner_async"], "return": "Welcome"}
|
||||
'''
|
||||
ret = {"clients": list(self.saltclients.keys()),
|
||||
"return": "Welcome"}
|
||||
@ -836,57 +810,6 @@ class SaltAPIHandler(BaseSaltAPIHandler, SaltClientsMixIn): # pylint: disable=W
|
||||
self.write(self.serialize({'return': ret}))
|
||||
self.finish()
|
||||
|
||||
@tornado.gen.coroutine
|
||||
def _disbatch_local_batch(self, chunk):
|
||||
'''
|
||||
Disbatch local client batched commands
|
||||
'''
|
||||
f_call = salt.utils.format_call(self.saltclients['local_batch'], chunk)
|
||||
|
||||
# ping all the minions (to see who we have to talk to)
|
||||
# Don't catch any exception, since we won't know what to do, we'll
|
||||
# let the upper level deal with this one
|
||||
ping_ret = yield self._disbatch_local({'tgt': chunk['tgt'],
|
||||
'fun': 'test.ping',
|
||||
'expr_form': f_call['kwargs']['expr_form']})
|
||||
|
||||
chunk_ret = {}
|
||||
|
||||
if not isinstance(ping_ret, dict):
|
||||
raise tornado.gen.Return(chunk_ret)
|
||||
minions = list(ping_ret.keys())
|
||||
|
||||
maxflight = get_batch_size(f_call['kwargs']['batch'], len(minions))
|
||||
inflight_futures = []
|
||||
|
||||
# override the expr_form
|
||||
f_call['kwargs']['expr_form'] = 'list'
|
||||
# do this batch
|
||||
while len(minions) > 0 or len(inflight_futures) > 0:
|
||||
# if you have more to go, lets disbatch jobs
|
||||
while len(inflight_futures) < maxflight and len(minions) > 0:
|
||||
minion_id = minions.pop(0)
|
||||
batch_chunk = dict(chunk)
|
||||
batch_chunk['tgt'] = [minion_id]
|
||||
batch_chunk['expr_form'] = 'list'
|
||||
future = self._disbatch_local(batch_chunk)
|
||||
inflight_futures.append(future)
|
||||
|
||||
# if we have nothing to wait for, don't wait
|
||||
if len(inflight_futures) == 0:
|
||||
continue
|
||||
|
||||
# wait until someone is done
|
||||
finished_future = yield Any(inflight_futures)
|
||||
try:
|
||||
b_ret = finished_future.result()
|
||||
except TimeoutException:
|
||||
break
|
||||
chunk_ret.update(b_ret)
|
||||
inflight_futures.remove(finished_future)
|
||||
|
||||
raise tornado.gen.Return(chunk_ret)
|
||||
|
||||
@tornado.gen.coroutine
|
||||
def _disbatch_local(self, chunk):
|
||||
'''
|
||||
|
@ -19,19 +19,43 @@ log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_roster_file(options):
|
||||
if options.get('roster_file'):
|
||||
template = options.get('roster_file')
|
||||
elif 'config_dir' in options.get('__master_opts__', {}):
|
||||
template = os.path.join(options['__master_opts__']['config_dir'],
|
||||
'roster')
|
||||
elif 'config_dir' in options:
|
||||
template = os.path.join(options['config_dir'], 'roster')
|
||||
else:
|
||||
template = os.path.join(salt.syspaths.CONFIG_DIR, 'roster')
|
||||
'''
|
||||
Find respective roster file.
|
||||
|
||||
:param options:
|
||||
:return:
|
||||
'''
|
||||
template = None
|
||||
# The __disable_custom_roster is always True if Salt SSH Client comes
|
||||
# from Salt API. In that case no way to define own 'roster_file', instead
|
||||
# this file needs to be chosen from already validated rosters
|
||||
# (see /etc/salt/master config).
|
||||
if options.get('__disable_custom_roster') and options.get('roster_file'):
|
||||
roster = options.get('roster_file').strip('/')
|
||||
for roster_location in options.get('rosters'):
|
||||
r_file = os.path.join(roster_location, roster)
|
||||
if os.path.isfile(r_file):
|
||||
template = r_file
|
||||
break
|
||||
del options['roster_file']
|
||||
|
||||
if not template:
|
||||
if options.get('roster_file'):
|
||||
template = options.get('roster_file')
|
||||
elif 'config_dir' in options.get('__master_opts__', {}):
|
||||
template = os.path.join(options['__master_opts__']['config_dir'],
|
||||
'roster')
|
||||
elif 'config_dir' in options:
|
||||
template = os.path.join(options['config_dir'], 'roster')
|
||||
else:
|
||||
template = os.path.join(salt.syspaths.CONFIG_DIR, 'roster')
|
||||
|
||||
if not os.path.isfile(template):
|
||||
raise IOError('No roster file found')
|
||||
|
||||
if not os.access(template, os.R_OK):
|
||||
raise IOError('Access denied to roster "{0}"'.format(template))
|
||||
|
||||
return template
|
||||
|
||||
|
||||
|
@ -48,13 +48,12 @@ class TestSaltAPIHandler(SaltnadoTestCase):
|
||||
)
|
||||
self.assertEqual(response.code, 200)
|
||||
response_obj = json.loads(response.body)
|
||||
self.assertEqual(response_obj['clients'],
|
||||
['runner',
|
||||
'runner_async',
|
||||
'local_async',
|
||||
'local',
|
||||
'local_batch']
|
||||
)
|
||||
self.assertItemsEqual(response_obj['clients'],
|
||||
['runner',
|
||||
'runner_async',
|
||||
'local_async',
|
||||
'local']
|
||||
)
|
||||
self.assertEqual(response_obj['return'], 'Welcome')
|
||||
|
||||
def test_post_no_auth(self):
|
||||
@ -117,68 +116,6 @@ class TestSaltAPIHandler(SaltnadoTestCase):
|
||||
response_obj = json.loads(response.body)
|
||||
self.assertEqual(response_obj['return'], ["No minions matched the target. No command was sent, no jid was assigned."])
|
||||
|
||||
# local_batch tests
|
||||
@skipIf(True, 'to be reenabled when #23623 is merged')
|
||||
def test_simple_local_batch_post(self):
|
||||
'''
|
||||
Basic post against local_batch
|
||||
'''
|
||||
low = [{'client': 'local_batch',
|
||||
'tgt': '*',
|
||||
'fun': 'test.ping',
|
||||
}]
|
||||
response = self.fetch('/',
|
||||
method='POST',
|
||||
body=json.dumps(low),
|
||||
headers={'Content-Type': self.content_type_map['json'],
|
||||
saltnado.AUTH_TOKEN_HEADER: self.token['token']},
|
||||
connect_timeout=30,
|
||||
request_timeout=30,
|
||||
)
|
||||
response_obj = json.loads(response.body)
|
||||
self.assertEqual(response_obj['return'], [{'minion': True, 'sub_minion': True}])
|
||||
|
||||
# local_batch tests
|
||||
@skipIf(True, 'to be reenabled when #23623 is merged')
|
||||
def test_full_local_batch_post(self):
|
||||
'''
|
||||
Test full parallelism of local_batch
|
||||
'''
|
||||
low = [{'client': 'local_batch',
|
||||
'tgt': '*',
|
||||
'fun': 'test.ping',
|
||||
'batch': '100%',
|
||||
}]
|
||||
response = self.fetch('/',
|
||||
method='POST',
|
||||
body=json.dumps(low),
|
||||
headers={'Content-Type': self.content_type_map['json'],
|
||||
saltnado.AUTH_TOKEN_HEADER: self.token['token']},
|
||||
connect_timeout=30,
|
||||
request_timeout=30,
|
||||
)
|
||||
response_obj = json.loads(response.body)
|
||||
self.assertEqual(response_obj['return'], [{'minion': True, 'sub_minion': True}])
|
||||
|
||||
def test_simple_local_batch_post_no_tgt(self):
|
||||
'''
|
||||
Local_batch testing with no tgt
|
||||
'''
|
||||
low = [{'client': 'local_batch',
|
||||
'tgt': 'minion_we_dont_have',
|
||||
'fun': 'test.ping',
|
||||
}]
|
||||
response = self.fetch('/',
|
||||
method='POST',
|
||||
body=json.dumps(low),
|
||||
headers={'Content-Type': self.content_type_map['json'],
|
||||
saltnado.AUTH_TOKEN_HEADER: self.token['token']},
|
||||
connect_timeout=30,
|
||||
request_timeout=30,
|
||||
)
|
||||
response_obj = json.loads(response.body)
|
||||
self.assertEqual(response_obj['return'], [{}])
|
||||
|
||||
# local_async tests
|
||||
def test_simple_local_async_post(self):
|
||||
low = [{'client': 'local_async',
|
||||
@ -400,7 +337,7 @@ class TestMinionSaltAPIHandler(SaltnadoTestCase):
|
||||
make sure you get an error
|
||||
'''
|
||||
# get a token for this test
|
||||
low = [{'client': 'local_batch',
|
||||
low = [{'client': 'local',
|
||||
'tgt': '*',
|
||||
'fun': 'test.ping',
|
||||
}]
|
||||
|
Loading…
Reference in New Issue
Block a user