mirror of
https://github.com/valitydev/salt.git
synced 2024-11-06 08:35:21 +00:00
Merge branch '2016.11' into 'develop'
Conflicts: - salt/client/ssh/__init__.py - salt/pillar/makostack.py - tests/integration/client/test_runner.py - tests/integration/states/test_file.py
This commit is contained in:
commit
4fc9b5484b
@ -15,13 +15,41 @@ More information about Azure is located at `http://www.windowsazure.com/
|
||||
|
||||
Dependencies
|
||||
============
|
||||
* `Microsoft Azure SDK for Python <https://pypi.python.org/pypi/azure>`_ >= 2.0rc5
|
||||
* `Microsoft Azure SDK for Python <https://pypi.python.org/pypi/azure>`_ >= 2.0rc6
|
||||
* `Microsoft Azure Storage SDK for Python <https://pypi.python.org/pypi/azure-storage>`_ >= 0.32
|
||||
* The python-requests library, for Python < 2.7.9.
|
||||
* A Microsoft Azure account
|
||||
* `Salt <https://github.com/saltstack/salt>`_
|
||||
|
||||
|
||||
Installation Tips
|
||||
=================
|
||||
Because the ``azure`` library requires the ``cryptography`` library, which is
|
||||
compiled on-the-fly by ``pip``, you may need to install the development tools
|
||||
for your operating system.
|
||||
|
||||
Before you install ``azure`` with ``pip``, you should make sure that the
|
||||
required libraries are installed.
|
||||
|
||||
Debian
|
||||
------
|
||||
For Debian and Ubuntu, the following command will ensure that the required
|
||||
dependencies are installed:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo apt-get install build-essential libssl-dev libffi-dev python-dev
|
||||
|
||||
Red Hat
|
||||
-------
|
||||
For Fedora and RHEL-derivatives, the following command will ensure that the
|
||||
required dependencies are installed:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo yum install gcc libffi-devel python-devel openssl-devel
|
||||
|
||||
|
||||
Configuration
|
||||
=============
|
||||
|
||||
@ -33,7 +61,7 @@ Set up the provider config at ``/etc/salt/cloud.providers.d/azurearm.conf``:
|
||||
|
||||
my-azurearm-config:
|
||||
driver: azurearm
|
||||
master: limejack.com
|
||||
master: salt.example.com
|
||||
subscription_id: 01234567-890a-bcde-f012-34567890abdc
|
||||
|
||||
# https://apps.dev.microsoft.com/#/appList
|
||||
|
@ -271,7 +271,8 @@ class KeyCLI(object):
|
||||
ret = list_ret
|
||||
for minions in ret.values():
|
||||
for minion in minions:
|
||||
print('Key for minion {0} {1}ed.'.format(minion, cmd))
|
||||
print('Key for minion {0} {1}ed.'.format(minion,
|
||||
cmd.rstrip('e')))
|
||||
elif isinstance(ret, dict):
|
||||
salt.output.display_output(ret, 'key', opts=self.opts)
|
||||
else:
|
||||
|
476
salt/minion.py
476
salt/minion.py
@ -24,7 +24,7 @@ from stat import S_IMODE
|
||||
# pylint: disable=import-error,no-name-in-module,redefined-builtin
|
||||
import salt.ext.six as six
|
||||
if six.PY3:
|
||||
import ipaddress # pylint: disable=3rd-party-module-not-gated
|
||||
import ipaddress
|
||||
else:
|
||||
import salt.ext.ipaddress as ipaddress
|
||||
from salt.ext.six.moves import range
|
||||
@ -83,7 +83,6 @@ import salt.engines
|
||||
import salt.payload
|
||||
import salt.syspaths
|
||||
import salt.utils
|
||||
import salt.utils.dictupdate
|
||||
import salt.utils.context
|
||||
import salt.utils.jid
|
||||
import salt.pillar
|
||||
@ -285,15 +284,24 @@ def load_args_and_kwargs(func, args, data=None, ignore_invalid=False):
|
||||
# above, that would result in a 2nd call to
|
||||
# salt.utils.cli.yamlify_arg(), which could mangle the input.
|
||||
_args.append(arg)
|
||||
if string_kwarg:
|
||||
log.critical(
|
||||
'String kwarg(s) %s passed to '
|
||||
'salt.minion.load_args_and_kwargs(). This is no longer '
|
||||
'supported, so the kwarg(s) will be ignored. Arguments '
|
||||
'passed to salt.minion.load_args_and_kwargs() should be '
|
||||
'passed to salt.utils.args.parse_input() first to load '
|
||||
'and condition them properly.', string_kwarg
|
||||
elif string_kwarg:
|
||||
salt.utils.warn_until(
|
||||
'Nitrogen',
|
||||
'The list of function args and kwargs should be parsed '
|
||||
'by salt.utils.args.parse_input() before calling '
|
||||
'salt.minion.load_args_and_kwargs().'
|
||||
)
|
||||
if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args:
|
||||
# Function supports **kwargs or is a positional argument to
|
||||
# the function.
|
||||
_kwargs.update(string_kwarg)
|
||||
else:
|
||||
# **kwargs not in argspec and parsed argument name not in
|
||||
# list of positional arguments. This keyword argument is
|
||||
# invalid.
|
||||
for key, val in six.iteritems(string_kwarg):
|
||||
invalid_kwargs.append('{0}={1}'.format(key, val))
|
||||
continue
|
||||
|
||||
# if the arg is a dict with __kwarg__ == True, then its a kwarg
|
||||
elif isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:
|
||||
@ -338,15 +346,14 @@ def eval_master_func(opts):
|
||||
raise KeyError
|
||||
# we take whatever the module returns as master address
|
||||
opts['master'] = master_mod[mod_fun]()
|
||||
if not isinstance(opts['master'], str) and \
|
||||
not isinstance(opts['master'], list):
|
||||
if not isinstance(opts['master'], str):
|
||||
raise TypeError
|
||||
opts['__master_func_evaluated'] = True
|
||||
except KeyError:
|
||||
log.error('Failed to load module {0}'.format(mod_fun))
|
||||
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
|
||||
except TypeError:
|
||||
log.error('{0} returned from {1} is not a string or a list'.format(opts['master'], mod_fun))
|
||||
log.error('{0} returned from {1} is not a string'.format(opts['master'], mod_fun))
|
||||
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
|
||||
log.info('Evaluated master from module: {0}'.format(mod_fun))
|
||||
|
||||
@ -505,7 +512,6 @@ class MinionBase(object):
|
||||
|
||||
tries = opts.get('master_tries', 1)
|
||||
attempts = 0
|
||||
resolve_dns_fallback = opts.get('resolve_dns_fallback', False)
|
||||
|
||||
# if we have a list of masters, loop through them and be
|
||||
# happy with the first one that allows us to connect
|
||||
@ -544,14 +550,7 @@ class MinionBase(object):
|
||||
for master in opts['local_masters']:
|
||||
opts['master'] = master
|
||||
opts.update(prep_ip_port(opts))
|
||||
try:
|
||||
opts.update(resolve_dns(opts, fallback=resolve_dns_fallback))
|
||||
except SaltClientError as exc:
|
||||
last_exc = exc
|
||||
msg = ('Master hostname: \'{0}\' not found. Trying '
|
||||
'next master (if any)'.format(opts['master']))
|
||||
log.info(msg)
|
||||
continue
|
||||
opts.update(resolve_dns(opts))
|
||||
|
||||
# on first run, update self.opts with the whole master list
|
||||
# to enable a minion to re-use old masters if they get fixed
|
||||
@ -607,8 +606,8 @@ class MinionBase(object):
|
||||
'(infinite attempts)'.format(attempts)
|
||||
)
|
||||
opts.update(prep_ip_port(opts))
|
||||
opts.update(resolve_dns(opts))
|
||||
try:
|
||||
opts.update(resolve_dns(opts, fallback=resolve_dns_fallback))
|
||||
if self.opts['transport'] == 'detect':
|
||||
self.opts['detect_mode'] = True
|
||||
for trans in ('zeromq', 'tcp'):
|
||||
@ -688,13 +687,14 @@ class SMinion(MinionBase):
|
||||
|
||||
salt '*' sys.reload_modules
|
||||
'''
|
||||
# Ensure that a pillar key is set in the opts, otherwise the loader
|
||||
# will pack a newly-generated empty dict as the __pillar__ dunder, and
|
||||
# the fact that we compile the pillar below won't matter as it won't be
|
||||
# packed into any of the modules/functions processed by the loader.
|
||||
# Below, when pillar data is compiled, we will update this dict with
|
||||
# the compiled pillar data.
|
||||
self.opts['pillar'] = {}
|
||||
self.opts['pillar'] = salt.pillar.get_pillar(
|
||||
self.opts,
|
||||
self.opts['grains'],
|
||||
self.opts['id'],
|
||||
self.opts['environment'],
|
||||
pillarenv=self.opts.get('pillarenv'),
|
||||
).compile_pillar()
|
||||
|
||||
self.utils = salt.loader.utils(self.opts)
|
||||
self.functions = salt.loader.minion_mods(self.opts, utils=self.utils)
|
||||
self.serializers = salt.loader.serializers(self.opts)
|
||||
@ -711,26 +711,6 @@ class SMinion(MinionBase):
|
||||
self.functions['sys.reload_modules'] = self.gen_modules
|
||||
self.executors = salt.loader.executors(self.opts)
|
||||
|
||||
compiled_pillar = salt.pillar.get_pillar(
|
||||
self.opts,
|
||||
self.opts['grains'],
|
||||
self.opts['id'],
|
||||
self.opts['environment'],
|
||||
pillarenv=self.opts.get('pillarenv'),
|
||||
funcs=self.functions
|
||||
).compile_pillar()
|
||||
|
||||
# Update the existing (empty) pillar dict with the compiled pillar
|
||||
# data. This ensures that the __pillar__ dunder packed into all of the
|
||||
# functions processed by the loader is not empty.
|
||||
try:
|
||||
self.opts['pillar'].update(compiled_pillar)
|
||||
except TypeError:
|
||||
log.warning(
|
||||
'Compiled Pillar data %s is not a dictionary',
|
||||
compiled_pillar
|
||||
)
|
||||
|
||||
|
||||
class MasterMinion(object):
|
||||
'''
|
||||
@ -870,10 +850,6 @@ class MinionManager(MinionBase):
|
||||
failed = False
|
||||
while True:
|
||||
try:
|
||||
if minion.opts.get('beacons_before_connect', False):
|
||||
minion.setup_beacons()
|
||||
if minion.opts.get('scheduler_before_connect', False):
|
||||
minion.setup_scheduler()
|
||||
yield minion.connect_master(failed=failed)
|
||||
minion.tune_in(start=False)
|
||||
break
|
||||
@ -924,10 +900,6 @@ class MinionManager(MinionBase):
|
||||
for minion in self.minions:
|
||||
minion.destroy()
|
||||
|
||||
def reload(self):
|
||||
for minion in self.minions:
|
||||
minion.reload()
|
||||
|
||||
|
||||
class Minion(MinionBase):
|
||||
'''
|
||||
@ -952,7 +924,6 @@ class Minion(MinionBase):
|
||||
# True means the Minion is fully functional and ready to handle events.
|
||||
self.ready = False
|
||||
self.jid_queue = jid_queue
|
||||
self.periodic_callbacks = {}
|
||||
|
||||
if io_loop is None:
|
||||
if HAS_ZMQ:
|
||||
@ -993,19 +964,6 @@ class Minion(MinionBase):
|
||||
if not salt.utils.is_proxy():
|
||||
self.io_loop.spawn_callback(salt.engines.start_engines, self.opts,
|
||||
self.process_manager)
|
||||
else:
|
||||
if self.opts.get('beacons_before_connect', False):
|
||||
log.warning(
|
||||
'\'beacons_before_connect\' is not supported '
|
||||
'for proxy minions. Setting to False'
|
||||
)
|
||||
self.opts['beacons_before_connect'] = False
|
||||
if self.opts.get('scheduler_before_connect', False):
|
||||
log.warning(
|
||||
'\'scheduler_before_connect\' is not supported '
|
||||
'for proxy minions. Setting to False'
|
||||
)
|
||||
self.opts['scheduler_before_connect'] = False
|
||||
|
||||
# Install the SIGINT/SIGTERM handlers if not done so far
|
||||
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
|
||||
@ -1056,14 +1014,6 @@ class Minion(MinionBase):
|
||||
if timeout and self._sync_connect_master_success is False:
|
||||
raise SaltDaemonNotRunning('Failed to connect to the salt-master')
|
||||
|
||||
def reload(self):
|
||||
log.info('Minion reloading config')
|
||||
disk_opts = salt.config.minion_config(os.path.join(salt.syspaths.CONFIG_DIR, 'minion')) # FIXME POC
|
||||
self.opts = salt.utils.dictupdate.merge_overwrite(self.opts, disk_opts)
|
||||
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
|
||||
self.schedule.functions = self.functions
|
||||
self.schedule.returners = self.returners
|
||||
|
||||
@tornado.gen.coroutine
|
||||
def connect_master(self, failed=False):
|
||||
'''
|
||||
@ -1100,22 +1050,19 @@ class Minion(MinionBase):
|
||||
pillarenv=self.opts.get('pillarenv')
|
||||
).compile_pillar()
|
||||
|
||||
if not self.ready:
|
||||
self._setup_core()
|
||||
elif self.connected and self.opts['pillar']:
|
||||
# The pillar has changed due to the connection to the master.
|
||||
# Reload the functions so that they can use the new pillar data.
|
||||
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
|
||||
if hasattr(self, 'schedule'):
|
||||
self.schedule.functions = self.functions
|
||||
self.schedule.returners = self.returners
|
||||
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
|
||||
self.serial = salt.payload.Serial(self.opts)
|
||||
self.mod_opts = self._prep_mod_opts()
|
||||
self.matcher = Matcher(self.opts, self.functions)
|
||||
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
|
||||
uid = salt.utils.get_uid(user=self.opts.get('user', None))
|
||||
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
|
||||
|
||||
if not hasattr(self, 'schedule'):
|
||||
self.schedule = salt.utils.schedule.Schedule(
|
||||
self.opts,
|
||||
self.functions,
|
||||
self.returners,
|
||||
cleanup=[master_event(type='alive')])
|
||||
self.schedule = salt.utils.schedule.Schedule(
|
||||
self.opts,
|
||||
self.functions,
|
||||
self.returners,
|
||||
cleanup=[master_event(type='alive')])
|
||||
|
||||
# add default scheduling jobs to the minions scheduler
|
||||
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
|
||||
@ -1169,6 +1116,9 @@ class Minion(MinionBase):
|
||||
self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True)
|
||||
self.schedule.delete_job(master_event(type='failback'), persist=True)
|
||||
|
||||
self.grains_cache = self.opts['grains']
|
||||
self.ready = True
|
||||
|
||||
def _return_retry_timer(self):
|
||||
'''
|
||||
Based on the minion configuration, either return a randomized timer or
|
||||
@ -1613,24 +1563,13 @@ class Minion(MinionBase):
|
||||
minion side execution.
|
||||
'''
|
||||
salt.utils.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid']))
|
||||
multifunc_ordered = opts.get('multifunc_ordered', False)
|
||||
num_funcs = len(data['fun'])
|
||||
if multifunc_ordered:
|
||||
ret = {
|
||||
'return': [None] * num_funcs,
|
||||
'retcode': [None] * num_funcs,
|
||||
'success': [False] * num_funcs
|
||||
}
|
||||
else:
|
||||
ret = {
|
||||
'return': {},
|
||||
'retcode': {},
|
||||
'success': {}
|
||||
}
|
||||
|
||||
for ind in range(0, num_funcs):
|
||||
if not multifunc_ordered:
|
||||
ret['success'][data['fun'][ind]] = False
|
||||
ret = {
|
||||
'return': {},
|
||||
'retcode': {},
|
||||
'success': {}
|
||||
}
|
||||
for ind in range(0, len(data['fun'])):
|
||||
ret['success'][data['fun'][ind]] = False
|
||||
try:
|
||||
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
|
||||
# this minion is blacked out. Only allow saltutil.refresh_pillar
|
||||
@ -1645,20 +1584,12 @@ class Minion(MinionBase):
|
||||
data['arg'][ind],
|
||||
data)
|
||||
minion_instance.functions.pack['__context__']['retcode'] = 0
|
||||
if multifunc_ordered:
|
||||
ret['return'][ind] = func(*args, **kwargs)
|
||||
ret['retcode'][ind] = minion_instance.functions.pack['__context__'].get(
|
||||
'retcode',
|
||||
0
|
||||
)
|
||||
ret['success'][ind] = True
|
||||
else:
|
||||
ret['return'][data['fun'][ind]] = func(*args, **kwargs)
|
||||
ret['retcode'][data['fun'][ind]] = minion_instance.functions.pack['__context__'].get(
|
||||
'retcode',
|
||||
0
|
||||
)
|
||||
ret['success'][data['fun'][ind]] = True
|
||||
ret['return'][data['fun'][ind]] = func(*args, **kwargs)
|
||||
ret['retcode'][data['fun'][ind]] = minion_instance.functions.pack['__context__'].get(
|
||||
'retcode',
|
||||
0
|
||||
)
|
||||
ret['success'][data['fun'][ind]] = True
|
||||
except Exception as exc:
|
||||
trb = traceback.format_exc()
|
||||
log.warning(
|
||||
@ -1666,10 +1597,7 @@ class Minion(MinionBase):
|
||||
exc
|
||||
)
|
||||
)
|
||||
if multifunc_ordered:
|
||||
ret['return'][ind] = trb
|
||||
else:
|
||||
ret['return'][data['fun'][ind]] = trb
|
||||
ret['return'][data['fun'][ind]] = trb
|
||||
ret['jid'] = data['jid']
|
||||
ret['fun'] = data['fun']
|
||||
ret['fun_args'] = data['arg']
|
||||
@ -2023,14 +1951,8 @@ class Minion(MinionBase):
|
||||
elif tag.startswith('_minion_mine'):
|
||||
self._mine_send(tag, data)
|
||||
elif tag.startswith('fire_master'):
|
||||
if self.connected:
|
||||
log.debug('Forwarding master event tag={tag}'.format(tag=data['tag']))
|
||||
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
|
||||
elif tag.startswith('__schedule_return'):
|
||||
# reporting current connection with master
|
||||
if data['schedule'].startswith(master_event(type='alive', master='')):
|
||||
if data['return']:
|
||||
log.debug('Connected to master {0}'.format(data['schedule'].split(master_event(type='alive', master=''))[1]))
|
||||
log.debug('Forwarding master event tag={tag}'.format(tag=data['tag']))
|
||||
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
|
||||
elif tag.startswith(master_event(type='disconnected')) or tag.startswith(master_event(type='failback')):
|
||||
# if the master disconnect event is for a different master, raise an exception
|
||||
if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']:
|
||||
@ -2128,9 +2050,9 @@ class Minion(MinionBase):
|
||||
schedule=schedule)
|
||||
else:
|
||||
self.schedule.delete_job(name=master_event(type='failback'), persist=True)
|
||||
else:
|
||||
self.restart = True
|
||||
self.io_loop.stop()
|
||||
else:
|
||||
self.restart = True
|
||||
self.io_loop.stop()
|
||||
|
||||
elif tag.startswith(master_event(type='connected')):
|
||||
# handle this event only once. otherwise it will pollute the log
|
||||
@ -2187,109 +2109,6 @@ class Minion(MinionBase):
|
||||
except (ValueError, NameError):
|
||||
pass
|
||||
|
||||
def _setup_core(self):
|
||||
'''
|
||||
Set up the core minion attributes.
|
||||
This is safe to call multiple times.
|
||||
'''
|
||||
if not self.ready:
|
||||
# First call. Initialize.
|
||||
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
|
||||
self.serial = salt.payload.Serial(self.opts)
|
||||
self.mod_opts = self._prep_mod_opts()
|
||||
self.matcher = Matcher(self.opts, self.functions)
|
||||
uid = salt.utils.get_uid(user=self.opts.get('user', None))
|
||||
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
|
||||
self.grains_cache = self.opts['grains']
|
||||
self.ready = True
|
||||
|
||||
def setup_beacons(self):
|
||||
'''
|
||||
Set up the beacons.
|
||||
This is safe to call multiple times.
|
||||
'''
|
||||
self._setup_core()
|
||||
|
||||
loop_interval = self.opts['loop_interval']
|
||||
new_periodic_callbacks = {}
|
||||
|
||||
if 'beacons' not in self.periodic_callbacks:
|
||||
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
|
||||
|
||||
def handle_beacons():
|
||||
# Process Beacons
|
||||
beacons = None
|
||||
try:
|
||||
beacons = self.process_beacons(self.functions)
|
||||
except Exception:
|
||||
log.critical('The beacon errored: ', exc_info=True)
|
||||
if beacons and self.connected:
|
||||
self._fire_master(events=beacons)
|
||||
|
||||
new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop)
|
||||
|
||||
if 'cleanup' not in self.periodic_callbacks:
|
||||
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
|
||||
|
||||
# start all the other callbacks
|
||||
for periodic_cb in six.itervalues(new_periodic_callbacks):
|
||||
periodic_cb.start()
|
||||
|
||||
self.periodic_callbacks.update(new_periodic_callbacks)
|
||||
|
||||
def setup_scheduler(self):
|
||||
'''
|
||||
Set up the scheduler.
|
||||
This is safe to call multiple times.
|
||||
'''
|
||||
self._setup_core()
|
||||
|
||||
loop_interval = self.opts['loop_interval']
|
||||
new_periodic_callbacks = {}
|
||||
|
||||
if 'schedule' not in self.periodic_callbacks:
|
||||
if not hasattr(self, 'schedule'):
|
||||
self.schedule = salt.utils.schedule.Schedule(
|
||||
self.opts,
|
||||
self.functions,
|
||||
self.returners,
|
||||
cleanup=[master_event(type='alive')])
|
||||
|
||||
try:
|
||||
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
|
||||
if self.opts['grains_refresh_every'] > 1:
|
||||
log.debug(
|
||||
'Enabling the grains refresher. Will run every {0} minutes.'.format(
|
||||
self.opts['grains_refresh_every'])
|
||||
)
|
||||
else: # Clean up minute vs. minutes in log message
|
||||
log.debug(
|
||||
'Enabling the grains refresher. Will run every {0} minute.'.format(
|
||||
self.opts['grains_refresh_every'])
|
||||
)
|
||||
self._refresh_grains_watcher(
|
||||
abs(self.opts['grains_refresh_every'])
|
||||
)
|
||||
except Exception as exc:
|
||||
log.error(
|
||||
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
|
||||
exc)
|
||||
)
|
||||
|
||||
# TODO: actually listen to the return and change period
|
||||
def handle_schedule():
|
||||
self.process_schedule(self, loop_interval)
|
||||
new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000, io_loop=self.io_loop)
|
||||
|
||||
if 'cleanup' not in self.periodic_callbacks:
|
||||
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
|
||||
|
||||
# start all the other callbacks
|
||||
for periodic_cb in six.itervalues(new_periodic_callbacks):
|
||||
periodic_cb.start()
|
||||
|
||||
self.periodic_callbacks.update(new_periodic_callbacks)
|
||||
|
||||
# Main Minion Tune In
|
||||
def tune_in(self, start=True):
|
||||
'''
|
||||
@ -2301,10 +2120,6 @@ class Minion(MinionBase):
|
||||
log.debug('Minion \'{0}\' trying to tune in'.format(self.opts['id']))
|
||||
|
||||
if start:
|
||||
if self.opts.get('beacons_before_connect', False):
|
||||
self.setup_beacons()
|
||||
if self.opts.get('scheduler_before_connect', False):
|
||||
self.setup_scheduler()
|
||||
self.sync_connect_master()
|
||||
if self.connected:
|
||||
self._fire_master_minion_start()
|
||||
@ -2319,9 +2134,31 @@ class Minion(MinionBase):
|
||||
# On first startup execute a state run if configured to do so
|
||||
self._state_run()
|
||||
|
||||
self.setup_beacons()
|
||||
self.setup_scheduler()
|
||||
loop_interval = self.opts['loop_interval']
|
||||
|
||||
try:
|
||||
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
|
||||
if self.opts['grains_refresh_every'] > 1:
|
||||
log.debug(
|
||||
'Enabling the grains refresher. Will run every {0} minutes.'.format(
|
||||
self.opts['grains_refresh_every'])
|
||||
)
|
||||
else: # Clean up minute vs. minutes in log message
|
||||
log.debug(
|
||||
'Enabling the grains refresher. Will run every {0} minute.'.format(
|
||||
self.opts['grains_refresh_every'])
|
||||
|
||||
)
|
||||
self._refresh_grains_watcher(
|
||||
abs(self.opts['grains_refresh_every'])
|
||||
)
|
||||
except Exception as exc:
|
||||
log.error(
|
||||
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
|
||||
exc)
|
||||
)
|
||||
|
||||
self.periodic_callbacks = {}
|
||||
# schedule the stuff that runs every interval
|
||||
ping_interval = self.opts.get('ping_interval', 0) * 60
|
||||
if ping_interval > 0 and self.connected:
|
||||
@ -2337,7 +2174,30 @@ class Minion(MinionBase):
|
||||
except Exception:
|
||||
log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG)
|
||||
self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000, io_loop=self.io_loop)
|
||||
self.periodic_callbacks['ping'].start()
|
||||
|
||||
self.periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
|
||||
|
||||
def handle_beacons():
|
||||
# Process Beacons
|
||||
beacons = None
|
||||
try:
|
||||
beacons = self.process_beacons(self.functions)
|
||||
except Exception:
|
||||
log.critical('The beacon errored: ', exc_info=True)
|
||||
if beacons and self.connected:
|
||||
self._fire_master(events=beacons)
|
||||
|
||||
self.periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop)
|
||||
|
||||
# TODO: actually listen to the return and change period
|
||||
def handle_schedule():
|
||||
self.process_schedule(self, loop_interval)
|
||||
if hasattr(self, 'schedule'):
|
||||
self.periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000, io_loop=self.io_loop)
|
||||
|
||||
# start all the other callbacks
|
||||
for periodic_cb in six.itervalues(self.periodic_callbacks):
|
||||
periodic_cb.start()
|
||||
|
||||
# add handler to subscriber
|
||||
if hasattr(self, 'pub_channel') and self.pub_channel is not None:
|
||||
@ -2680,7 +2540,6 @@ class SyndicManager(MinionBase):
|
||||
'''
|
||||
Wrapper to call a given func on a syndic, best effort to get the one you asked for
|
||||
'''
|
||||
success = False
|
||||
if kwargs is None:
|
||||
kwargs = {}
|
||||
for master, syndic_future in self.iter_master_options(master_id):
|
||||
@ -2690,22 +2549,17 @@ class SyndicManager(MinionBase):
|
||||
|
||||
try:
|
||||
getattr(syndic_future.result(), func)(*args, **kwargs)
|
||||
success = True
|
||||
if self.opts['syndic_forward_all_events']:
|
||||
continue
|
||||
return
|
||||
except SaltClientError:
|
||||
log.error('Unable to call {0} on {1}, trying another...'.format(func, master))
|
||||
self._mark_master_dead(master)
|
||||
continue
|
||||
if not success:
|
||||
log.critical('Unable to call {0} on any masters!'.format(func))
|
||||
log.critical('Unable to call {0} on any masters!'.format(func))
|
||||
|
||||
def _return_pub_syndic(self, values, master_id=None):
|
||||
'''
|
||||
Wrapper to call the '_return_pub_multi' a syndic, best effort to get the one you asked for
|
||||
'''
|
||||
success = False
|
||||
func = '_return_pub_multi'
|
||||
for master, syndic_future in self.iter_master_options(master_id):
|
||||
if not syndic_future.done() or syndic_future.exception():
|
||||
@ -2731,11 +2585,9 @@ class SyndicManager(MinionBase):
|
||||
continue
|
||||
future = getattr(syndic_future.result(), func)(values)
|
||||
self.pub_futures[master] = (future, values)
|
||||
success = True
|
||||
if self.opts['syndic_forward_all_events']:
|
||||
continue
|
||||
break
|
||||
return success
|
||||
return True
|
||||
# Loop done and didn't exit: wasn't sent, try again later
|
||||
return False
|
||||
|
||||
def iter_master_options(self, master_id=None):
|
||||
'''
|
||||
@ -3180,6 +3032,7 @@ class ProxyMinion(Minion):
|
||||
This class instantiates a 'proxy' minion--a minion that does not manipulate
|
||||
the host it runs on, but instead manipulates a device that cannot run a minion.
|
||||
'''
|
||||
|
||||
# TODO: better name...
|
||||
@tornado.gen.coroutine
|
||||
def _post_master_init(self, master):
|
||||
@ -3227,18 +3080,16 @@ class ProxyMinion(Minion):
|
||||
# we can then sync any proxymodules down from the master
|
||||
# we do a sync_all here in case proxy code was installed by
|
||||
# SPM or was manually placed in /srv/salt/_modules etc.
|
||||
self.functions['saltutil.sync_all'](saltenv=self.opts['environment'])
|
||||
# Pull in the utils
|
||||
self.utils = salt.loader.utils(self.opts)
|
||||
self.functions['saltutil.sync_all'](saltenv='base')
|
||||
|
||||
# Pull in the utils
|
||||
self.utils = salt.loader.utils(self.opts)
|
||||
|
||||
# Then load the proxy module
|
||||
self.proxy = salt.loader.proxy(self.opts, utils=self.utils)
|
||||
self.proxy = salt.loader.proxy(self.opts)
|
||||
|
||||
# And re-load the modules so the __proxy__ variable gets injected
|
||||
self._setup_core()
|
||||
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
|
||||
self.functions.pack['__proxy__'] = self.proxy
|
||||
self.proxy.pack['__salt__'] = self.functions
|
||||
self.proxy.pack['__ret__'] = self.returners
|
||||
@ -3267,35 +3118,29 @@ class ProxyMinion(Minion):
|
||||
|
||||
self.opts['grains'] = salt.loader.grains(self.opts, proxy=self.proxy)
|
||||
|
||||
self.setup_beacons()
|
||||
self.serial = salt.payload.Serial(self.opts)
|
||||
self.mod_opts = self._prep_mod_opts()
|
||||
self.matcher = Matcher(self.opts, self.functions)
|
||||
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
|
||||
uid = salt.utils.get_uid(user=self.opts.get('user', None))
|
||||
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
|
||||
|
||||
if self.connected and self.opts['pillar']:
|
||||
# The pillar has changed due to the connection to the master.
|
||||
# Reload the functions so that they can use the new pillar data.
|
||||
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
|
||||
if hasattr(self, 'schedule'):
|
||||
self.schedule.functions = self.functions
|
||||
self.schedule.returners = self.returners
|
||||
|
||||
if not hasattr(self, 'schedule'):
|
||||
self.schedule = salt.utils.schedule.Schedule(
|
||||
self.opts,
|
||||
self.functions,
|
||||
self.returners,
|
||||
cleanup=[master_event(type='alive')],
|
||||
proxy=self.proxy)
|
||||
self.schedule = salt.utils.schedule.Schedule(
|
||||
self.opts,
|
||||
self.functions,
|
||||
self.returners)
|
||||
|
||||
# add default scheduling jobs to the minions scheduler
|
||||
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
|
||||
self.schedule.add_job({
|
||||
'__mine_interval':
|
||||
{
|
||||
'function': 'mine.update',
|
||||
'minutes': self.opts['mine_interval'],
|
||||
'jid_include': True,
|
||||
'maxrunning': 2,
|
||||
'return_job': self.opts.get('mine_return_job', False)
|
||||
}
|
||||
{
|
||||
'function': 'mine.update',
|
||||
'minutes': self.opts['mine_interval'],
|
||||
'jid_include': True,
|
||||
'maxrunning': 2,
|
||||
'return_job': self.opts.get('mine_return_job', False)
|
||||
}
|
||||
}, persist=True)
|
||||
log.info('Added mine.update to scheduler')
|
||||
else:
|
||||
@ -3303,21 +3148,19 @@ class ProxyMinion(Minion):
|
||||
|
||||
# add master_alive job if enabled
|
||||
if (self.opts['transport'] != 'tcp' and
|
||||
self.opts['master_alive_interval'] > 0 and
|
||||
self.connected):
|
||||
self.opts['master_alive_interval'] > 0):
|
||||
self.schedule.add_job({
|
||||
master_event(type='alive', master=self.opts['master']):
|
||||
{
|
||||
'function': 'status.master',
|
||||
'seconds': self.opts['master_alive_interval'],
|
||||
'jid_include': True,
|
||||
'maxrunning': 1,
|
||||
'return_job': False,
|
||||
'kwargs': {'master': self.opts['master'],
|
||||
'connected': True}
|
||||
}
|
||||
{
|
||||
'function': 'status.master',
|
||||
'seconds': self.opts['master_alive_interval'],
|
||||
'jid_include': True,
|
||||
'maxrunning': 1,
|
||||
'return_job': False,
|
||||
'kwargs': {'master': self.opts['master'],
|
||||
'connected': True}
|
||||
}
|
||||
}, persist=True)
|
||||
|
||||
if self.opts['master_failback'] and \
|
||||
'master_list' in self.opts and \
|
||||
self.opts['master'] != self.opts['master_list'][0]:
|
||||
@ -3332,36 +3175,13 @@ class ProxyMinion(Minion):
|
||||
'kwargs': {'master': self.opts['master_list'][0]}
|
||||
}
|
||||
}, persist=True)
|
||||
|
||||
else:
|
||||
self.schedule.delete_job(master_event(type='failback'), persist=True)
|
||||
else:
|
||||
self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True)
|
||||
self.schedule.delete_job(master_event(type='failback'), persist=True)
|
||||
|
||||
# proxy keepalive
|
||||
proxy_alive_fn = fq_proxyname+'.alive'
|
||||
if proxy_alive_fn in self.proxy and 'status.proxy_reconnect' in self.functions and \
|
||||
('proxy_keep_alive' not in self.opts or ('proxy_keep_alive' in self.opts and self.opts['proxy_keep_alive'])):
|
||||
# if `proxy_keep_alive` is either not specified, either set to False does not retry reconnecting
|
||||
self.schedule.add_job({
|
||||
'__proxy_keepalive':
|
||||
{
|
||||
'function': 'status.proxy_reconnect',
|
||||
'minutes': self.opts.get('proxy_keep_alive_interval', 1), # by default, check once per minute
|
||||
'jid_include': True,
|
||||
'maxrunning': 1,
|
||||
'return_job': False,
|
||||
'kwargs': {
|
||||
'proxy_name': fq_proxyname
|
||||
}
|
||||
}
|
||||
}, persist=True)
|
||||
self.schedule.enable_schedule()
|
||||
else:
|
||||
self.schedule.delete_job('__proxy_keepalive', persist=True)
|
||||
|
||||
# Sync the grains here so the proxy can communicate them to the master
|
||||
self.functions['saltutil.sync_grains'](saltenv=self.opts['environment'])
|
||||
self.functions['saltutil.sync_grains'](saltenv='base')
|
||||
self.grains_cache = self.opts['grains']
|
||||
self.ready = True
|
||||
|
@ -3811,13 +3811,13 @@ def get_managed(
|
||||
if cached_dest and (source_hash or skip_verify):
|
||||
htype = source_sum.get('hash_type', 'sha256')
|
||||
cached_sum = get_hash(cached_dest, form=htype)
|
||||
if cached_sum != source_sum['hsum']:
|
||||
cache_refetch = True
|
||||
elif skip_verify:
|
||||
if skip_verify:
|
||||
# prev: if skip_verify or cached_sum == source_sum['hsum']:
|
||||
# but `cached_sum == source_sum['hsum']` is elliptical as prev if
|
||||
sfn = cached_dest
|
||||
source_sum = {'hsum': cached_sum, 'hash_type': htype}
|
||||
elif cached_sum != source_sum['hsum']:
|
||||
cache_refetch = True
|
||||
|
||||
# If we didn't have the template or remote file, let's get it
|
||||
# Similarly when the file has been updated and the cache has to be refreshed
|
||||
|
@ -31,7 +31,7 @@ import salt.state
|
||||
import salt.utils
|
||||
import salt.utils.jid
|
||||
import salt.utils.url
|
||||
from salt.exceptions import SaltInvocationError
|
||||
from salt.exceptions import CommandExecutionError, SaltInvocationError
|
||||
from salt.runners.state import orchestrate as _orchestrate
|
||||
|
||||
# Import 3rd-party libs
|
||||
@ -98,14 +98,15 @@ def _set_retcode(ret):
|
||||
__context__['retcode'] = 2
|
||||
|
||||
|
||||
def _check_pillar(kwargs):
|
||||
def _check_pillar(kwargs, pillar=None):
|
||||
'''
|
||||
Check the pillar for errors, refuse to run the state if there are errors
|
||||
in the pillar and return the pillar errors
|
||||
'''
|
||||
if kwargs.get('force'):
|
||||
return True
|
||||
if '_errors' in __pillar__:
|
||||
pillar_dict = pillar if pillar is not None else __pillar__
|
||||
if '_errors' in pillar_dict:
|
||||
return False
|
||||
return True
|
||||
|
||||
@ -379,6 +380,12 @@ def template(tem, queue=False, **kwargs):
|
||||
if conflict is not None:
|
||||
return conflict
|
||||
st_ = salt.state.HighState(__opts__, context=__context__)
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
__context__['retcode'] = 5
|
||||
raise CommandExecutionError('Pillar failed to render',
|
||||
info=st_.opts['pillar']['_errors'])
|
||||
|
||||
if not tem.endswith('.sls'):
|
||||
tem = '{sls}.sls'.format(sls=tem)
|
||||
high_state, errors = st_.render_state(tem, saltenv, '', None, local=True)
|
||||
@ -825,6 +832,12 @@ def highstate(test=None,
|
||||
pillar_enc=pillar_enc,
|
||||
mocked=kwargs.get('mock', False))
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
__context__['retcode'] = 5
|
||||
err = ['Pillar failed to render with the following messages:']
|
||||
err += __pillar__['_errors']
|
||||
return err
|
||||
|
||||
st_.push_active()
|
||||
ret = {}
|
||||
orchestration_jid = kwargs.get('orchestration_jid')
|
||||
@ -992,11 +1005,6 @@ def sls(mods,
|
||||
__context__['retcode'] = 1
|
||||
return disabled
|
||||
|
||||
if not _check_pillar(kwargs):
|
||||
__context__['retcode'] = 5
|
||||
err = ['Pillar failed to render with the following messages:']
|
||||
err += __pillar__['_errors']
|
||||
return err
|
||||
orig_test = __opts__.get('test', None)
|
||||
opts = _get_opts(kwargs.get('localconfig'))
|
||||
|
||||
@ -1032,6 +1040,12 @@ def sls(mods,
|
||||
pillar_enc=pillar_enc,
|
||||
mocked=kwargs.get('mock', False))
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
__context__['retcode'] = 5
|
||||
err = ['Pillar failed to render with the following messages:']
|
||||
err += __pillar__['_errors']
|
||||
return err
|
||||
|
||||
orchestration_jid = kwargs.get('orchestration_jid')
|
||||
umask = os.umask(0o77)
|
||||
if kwargs.get('cache'):
|
||||
@ -1138,11 +1152,6 @@ def top(topfn,
|
||||
conflict = _check_queue(queue, kwargs)
|
||||
if conflict is not None:
|
||||
return conflict
|
||||
if not _check_pillar(kwargs):
|
||||
__context__['retcode'] = 5
|
||||
err = ['Pillar failed to render with the following messages:']
|
||||
err += __pillar__['_errors']
|
||||
return err
|
||||
orig_test = __opts__.get('test', None)
|
||||
opts = _get_opts(kwargs.get('localconfig'))
|
||||
opts['test'] = _get_test_value(test, **kwargs)
|
||||
@ -1164,6 +1173,12 @@ def top(topfn,
|
||||
)
|
||||
|
||||
st_ = salt.state.HighState(opts, pillar, pillar_enc=pillar_enc, context=__context__)
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
__context__['retcode'] = 5
|
||||
err = ['Pillar failed to render with the following messages:']
|
||||
err += __pillar__['_errors']
|
||||
return err
|
||||
|
||||
st_.push_active()
|
||||
st_.opts['state_top'] = salt.utils.url.create(topfn)
|
||||
ret = {}
|
||||
@ -1214,6 +1229,12 @@ def show_highstate(queue=False, **kwargs):
|
||||
)
|
||||
|
||||
st_ = salt.state.HighState(__opts__, pillar, pillar_enc=pillar_enc)
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
__context__['retcode'] = 5
|
||||
raise CommandExecutionError('Pillar failed to render',
|
||||
info=st_.opts['pillar']['_errors'])
|
||||
|
||||
st_.push_active()
|
||||
try:
|
||||
ret = st_.compile_highstate()
|
||||
@ -1238,6 +1259,12 @@ def show_lowstate(queue=False, **kwargs):
|
||||
assert False
|
||||
return conflict
|
||||
st_ = salt.state.HighState(__opts__)
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
__context__['retcode'] = 5
|
||||
raise CommandExecutionError('Pillar failed to render',
|
||||
info=st_.opts['pillar']['_errors'])
|
||||
|
||||
st_.push_active()
|
||||
try:
|
||||
ret = st_.compile_low_chunks()
|
||||
@ -1345,6 +1372,13 @@ def sls_id(
|
||||
st_ = salt.state.HighState(opts, pillar=pillar, pillar_enc=pillar_enc, proxy=__proxy__)
|
||||
except NameError:
|
||||
st_ = salt.state.HighState(opts)
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
__context__['retcode'] = 5
|
||||
err = ['Pillar failed to render with the following messages:']
|
||||
err += __pillar__['_errors']
|
||||
return err
|
||||
|
||||
if isinstance(mods, six.string_types):
|
||||
split_mods = mods.split(',')
|
||||
st_.push_active()
|
||||
@ -1419,6 +1453,12 @@ def show_low_sls(mods,
|
||||
if pillarenv is not None:
|
||||
opts['pillarenv'] = pillarenv
|
||||
st_ = salt.state.HighState(opts)
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
__context__['retcode'] = 5
|
||||
raise CommandExecutionError('Pillar failed to render',
|
||||
info=st_.opts['pillar']['_errors'])
|
||||
|
||||
if isinstance(mods, six.string_types):
|
||||
mods = mods.split(',')
|
||||
st_.push_active()
|
||||
@ -1495,6 +1535,12 @@ def show_sls(mods, saltenv='base', test=None, queue=False, **kwargs):
|
||||
opts['pillarenv'] = kwargs['pillarenv']
|
||||
|
||||
st_ = salt.state.HighState(opts, pillar, pillar_enc=pillar_enc)
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
__context__['retcode'] = 5
|
||||
raise CommandExecutionError('Pillar failed to render',
|
||||
info=st_.opts['pillar']['_errors'])
|
||||
|
||||
if isinstance(mods, six.string_types):
|
||||
mods = mods.split(',')
|
||||
st_.push_active()
|
||||
@ -1539,6 +1585,12 @@ def show_top(queue=False, **kwargs):
|
||||
if conflict is not None:
|
||||
return conflict
|
||||
st_ = salt.state.HighState(opts)
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
__context__['retcode'] = 5
|
||||
raise CommandExecutionError('Pillar failed to render',
|
||||
info=st_.opts['pillar']['_errors'])
|
||||
|
||||
errors = []
|
||||
top_ = st_.get_top()
|
||||
errors += st_.verify_tops(top_)
|
||||
|
@ -847,7 +847,9 @@ class Pillar(object):
|
||||
top, top_errors = self.get_top()
|
||||
if ext:
|
||||
if self.opts.get('ext_pillar_first', False):
|
||||
self.opts['pillar'], errors = self.ext_pillar({}, pillar_dirs)
|
||||
self.opts['pillar'], errors = self.ext_pillar(
|
||||
self.pillar_override,
|
||||
pillar_dirs)
|
||||
self.rend = salt.loader.render(self.opts, self.functions)
|
||||
matches = self.top_matches(top)
|
||||
pillar, errors = self.render_pillar(matches, errors=errors)
|
||||
|
@ -465,7 +465,7 @@ def _process_stack_cfg(cfg, stack, minion_id, pillar, namespace):
|
||||
for sub in namespace.split(':')[::-1]:
|
||||
obj = {sub: obj}
|
||||
stack = _merge_dict(stack, obj)
|
||||
log.info('Stack template {0} parsed'.format(path))
|
||||
log.info('Stack template "{0}" parsed'.format(path))
|
||||
except exceptions.TopLevelLookupException as e:
|
||||
log.info('Stack template "{0}" not found.'.format(path))
|
||||
continue
|
||||
|
@ -62,36 +62,33 @@ class RunnerClient(mixins.SyncClientMixin, mixins.AsyncClientMixin, object):
|
||||
'''
|
||||
Format the low data for RunnerClient()'s master_call() function
|
||||
|
||||
The master_call function here has a different function signature than
|
||||
on WheelClient. So extract all the eauth keys and the fun key and
|
||||
assume everything else is a kwarg to pass along to the runner function
|
||||
to be called.
|
||||
This also normalizes the following low data formats to a single, common
|
||||
low data structure.
|
||||
|
||||
Old-style low: ``{'fun': 'jobs.lookup_jid', 'jid': '1234'}``
|
||||
New-style: ``{'fun': 'jobs.lookup_jid', 'kwarg': {'jid': '1234'}}``
|
||||
CLI-style: ``{'fun': 'jobs.lookup_jid', 'arg': ['jid="1234"']}``
|
||||
'''
|
||||
auth_creds = dict([(i, low.pop(i)) for i in [
|
||||
'username', 'password', 'eauth', 'token', 'client', 'user', 'key'
|
||||
] if i in low])
|
||||
fun = low.pop('fun')
|
||||
reformatted_low = {'fun': fun}
|
||||
reformatted_low.update(auth_creds)
|
||||
# Support old style calls where arguments could be specified in 'low' top level
|
||||
if not low.get('arg') and not low.get('kwarg'): # not specified or empty
|
||||
verify_fun(self.functions, fun)
|
||||
merged_args_kwargs = salt.utils.args.condition_input([], low)
|
||||
parsed_input = salt.utils.args.parse_input(merged_args_kwargs)
|
||||
args, kwargs = salt.minion.load_args_and_kwargs(
|
||||
self.functions[fun],
|
||||
parsed_input,
|
||||
self.opts,
|
||||
ignore_invalid=True
|
||||
)
|
||||
low['arg'] = args
|
||||
low['kwarg'] = kwargs
|
||||
if 'kwarg' not in low:
|
||||
low['kwarg'] = {}
|
||||
if 'arg' not in low:
|
||||
low['arg'] = []
|
||||
reformatted_low['kwarg'] = low
|
||||
return reformatted_low
|
||||
verify_fun(self.functions, fun)
|
||||
|
||||
reserved_kwargs = dict([(i, low.pop(i)) for i in [
|
||||
'username', 'password', 'eauth', 'token', 'client', 'user', 'key',
|
||||
] if i in low])
|
||||
|
||||
# Run name=value args through parse_input. We don't need to run kwargs
|
||||
# through because there is no way to send name=value strings in the low
|
||||
# dict other than by including an `arg` array.
|
||||
arg, kwarg = salt.utils.args.parse_input(
|
||||
low.pop('arg', []), condition=False)
|
||||
kwarg.update(low.pop('kwarg', {}))
|
||||
|
||||
# If anything hasn't been pop()'ed out of low by this point it must be
|
||||
# an old-style kwarg.
|
||||
kwarg.update(low)
|
||||
|
||||
return dict(fun=fun, kwarg={'kwarg': kwarg, 'arg': arg},
|
||||
**reserved_kwargs)
|
||||
|
||||
def cmd_async(self, low):
|
||||
'''
|
||||
|
@ -897,17 +897,6 @@ def extracted(name,
|
||||
ret['comment'] = '\n'.join([str(x) for x in file_result])
|
||||
return ret
|
||||
|
||||
# Get actual state result. The state.single return is a single-element
|
||||
# dictionary with the state's unique ID at the top level, and its value
|
||||
# being the state's return dictionary. next(iter(dict_name)) will give
|
||||
# us the value of the first key, so
|
||||
# file_result[next(iter(file_result))] will give us the results of the
|
||||
# state.single we just ran.
|
||||
try:
|
||||
file_result = file_result[next(iter(file_result))]
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
try:
|
||||
if not file_result['result']:
|
||||
log.debug('failed to download {0}'.format(source_match))
|
||||
@ -1344,10 +1333,6 @@ def extracted(name,
|
||||
group=group,
|
||||
recurse=recurse,
|
||||
test=__opts__['test'])
|
||||
try:
|
||||
dir_result = dir_result[next(iter(dir_result))]
|
||||
except AttributeError:
|
||||
pass
|
||||
log.debug('file.directory: %s', dir_result)
|
||||
|
||||
if __opts__['test']:
|
||||
|
@ -32,7 +32,7 @@ def reg(name):
|
||||
for key in event['data']['data']:
|
||||
if key in ('id', 'recv_time'):
|
||||
continue
|
||||
idata[key] = event['data'][key]
|
||||
__reg__['status']['val'][event['data']['data']['id']] = idata
|
||||
ret['changes'][event['data']['data']['id']] = True
|
||||
idata[key] = event['data']['data'][key]
|
||||
__reg__['status']['val'][event['data']['id']] = idata
|
||||
ret['changes'][event['data']['id']] = True
|
||||
return ret
|
||||
|
@ -61,8 +61,7 @@ def parse_input(args, condition=True):
|
||||
# condition_input is called below, but this is the only way to
|
||||
# gracefully handle both CLI and API input.
|
||||
if arg.pop('__kwarg__', False) is True:
|
||||
for key, val in six.iteritems(arg):
|
||||
_kwargs[key] = yamlify_arg(val)
|
||||
_kwargs.update(arg)
|
||||
else:
|
||||
_args.append(arg)
|
||||
else:
|
||||
|
@ -444,6 +444,9 @@ class Schedule(object):
|
||||
config_dir,
|
||||
os.path.dirname(self.opts.get('default_include',
|
||||
salt.config.DEFAULT_MINION_OPTS['default_include'])))
|
||||
if salt.utils.is_proxy():
|
||||
# each proxy will have a separate _schedule.conf file
|
||||
minion_d_dir = os.path.join(minion_d_dir, self.opts['proxyid'])
|
||||
|
||||
if not os.path.isdir(minion_d_dir):
|
||||
os.makedirs(minion_d_dir)
|
||||
|
@ -57,7 +57,6 @@ class RunnerModuleTest(integration.TestCase, integration.AdaptedConfigurationTes
|
||||
'token': token['token'],
|
||||
})
|
||||
|
||||
@skipIf(True, 'to be reenabled when #23623 is merged')
|
||||
def test_cmd_sync(self):
|
||||
low = {
|
||||
'client': 'runner',
|
||||
@ -76,7 +75,6 @@ class RunnerModuleTest(integration.TestCase, integration.AdaptedConfigurationTes
|
||||
|
||||
self.runner.cmd_async(low)
|
||||
|
||||
@skipIf(True, 'to be reenabled when #23623 is merged')
|
||||
def test_cmd_sync_w_arg(self):
|
||||
low = {
|
||||
'fun': 'test.arg',
|
||||
@ -89,7 +87,6 @@ class RunnerModuleTest(integration.TestCase, integration.AdaptedConfigurationTes
|
||||
self.assertEqual(ret['kwargs']['foo'], 'Foo!')
|
||||
self.assertEqual(ret['kwargs']['bar'], 'Bar!')
|
||||
|
||||
@skipIf(True, 'to be reenabled when #23623 is merged')
|
||||
def test_wildcard_auth(self):
|
||||
low = {
|
||||
'username': 'the_s0und_of_t3ch',
|
||||
@ -106,3 +103,33 @@ class RunnerModuleTest(integration.TestCase, integration.AdaptedConfigurationTes
|
||||
low.update(self.eauth_creds)
|
||||
ret = self.runner.cmd_sync(low, full_return=True)
|
||||
self.assertIn('success', ret['data'])
|
||||
|
||||
def test_cmd_sync_arg_kwarg_parsing(self):
|
||||
low = {
|
||||
'client': 'runner',
|
||||
'fun': 'test.arg',
|
||||
'arg': [
|
||||
'foo',
|
||||
'bar=off',
|
||||
'baz={qux: 123}'
|
||||
],
|
||||
'kwarg': {
|
||||
'quux': 'Quux',
|
||||
},
|
||||
'quuz': 'on',
|
||||
}
|
||||
low.update(self.eauth_creds)
|
||||
|
||||
ret = self.runner.cmd_sync(low)
|
||||
self.assertEqual(ret, {
|
||||
'args': ['foo'],
|
||||
'kwargs': {
|
||||
'bar': False,
|
||||
'baz': {
|
||||
'qux': 123,
|
||||
},
|
||||
'quux': 'Quux',
|
||||
'quuz': 'on',
|
||||
},
|
||||
})
|
||||
|
||||
|
@ -117,6 +117,7 @@ class TestSaltAPIHandler(SaltnadoTestCase):
|
||||
self.assertEqual(response_obj['return'], ["No minions matched the target. No command was sent, no jid was assigned."])
|
||||
|
||||
# local client request body test
|
||||
@skipIf(True, 'Undetermined race condition in test. Temporarily disabled.')
|
||||
def test_simple_local_post_only_dictionary_request(self):
|
||||
'''
|
||||
Test a basic API of /
|
||||
|
@ -9,15 +9,22 @@ from __future__ import absolute_import
|
||||
from distutils.version import LooseVersion
|
||||
import errno
|
||||
import glob
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import shutil
|
||||
import socket
|
||||
import stat
|
||||
import tempfile
|
||||
import textwrap
|
||||
import threading
|
||||
import tornado.ioloop
|
||||
import tornado.web
|
||||
import filecmp
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# Import Salt Testing libs
|
||||
import tests.integration as integration
|
||||
from tests.support.unit import skipIf
|
||||
@ -2414,3 +2421,98 @@ class FileTest(integration.ModuleCase, integration.SaltReturnAssertsMixIn):
|
||||
check_file = self.run_function('file.file_exists', [file])
|
||||
if check_file:
|
||||
self.run_function('file.remove', [file])
|
||||
|
||||
PORT = 9999
|
||||
FILE_SOURCE = 'http://localhost:{0}/grail/scene33'.format(PORT)
|
||||
FILE_HASH = 'd2feb3beb323c79fc7a0f44f1408b4a3'
|
||||
|
||||
|
||||
class RemoteFileTest(integration.ModuleCase, integration.SaltReturnAssertsMixIn):
|
||||
'''
|
||||
Uses a local tornado webserver to test http(s) file.managed states with and
|
||||
without skip_verify
|
||||
'''
|
||||
@classmethod
|
||||
def webserver(cls):
|
||||
'''
|
||||
method to start tornado static web app
|
||||
'''
|
||||
application = tornado.web.Application([
|
||||
(r'/(.*)', tornado.web.StaticFileHandler, {'path': STATE_DIR})
|
||||
])
|
||||
application.listen(PORT)
|
||||
tornado.ioloop.IOLoop.instance().start()
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
'''
|
||||
start tornado app on thread and wait until it is running
|
||||
'''
|
||||
cls.server_thread = threading.Thread(target=cls.webserver)
|
||||
cls.server_thread.daemon = True
|
||||
cls.server_thread.start()
|
||||
# check if tornado app is up
|
||||
port_closed = True
|
||||
while port_closed:
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
result = sock.connect_ex(('127.0.0.1', PORT))
|
||||
if result == 0:
|
||||
port_closed = False
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
tornado.ioloop.IOLoop.instance().stop()
|
||||
cls.server_thread.join()
|
||||
|
||||
def setUp(self):
|
||||
fd_, self.name = tempfile.mkstemp(dir=integration.TMP)
|
||||
try:
|
||||
os.close(fd_)
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.EBADF:
|
||||
raise exc
|
||||
# Remove the file that mkstemp just created so that the states can test
|
||||
# creating a new file instead of a diff from a zero-length file.
|
||||
self.tearDown()
|
||||
|
||||
def tearDown(self):
|
||||
try:
|
||||
os.remove(self.name)
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.ENOENT:
|
||||
raise exc
|
||||
|
||||
def test_file_managed_http_source_no_hash(self):
|
||||
'''
|
||||
Test a remote file with no hash
|
||||
'''
|
||||
ret = self.run_state('file.managed',
|
||||
name=self.name,
|
||||
source=FILE_SOURCE,
|
||||
skip_verify=False)
|
||||
log.debug('ret = %s', ret)
|
||||
# This should fail because no hash was provided
|
||||
self.assertSaltFalseReturn(ret)
|
||||
|
||||
def test_file_managed_http_source(self):
|
||||
'''
|
||||
Test a remote file with no hash
|
||||
'''
|
||||
ret = self.run_state('file.managed',
|
||||
name=self.name,
|
||||
source=FILE_SOURCE,
|
||||
source_hash=FILE_HASH,
|
||||
skip_verify=False)
|
||||
log.debug('ret = %s', ret)
|
||||
self.assertSaltTrueReturn(ret)
|
||||
|
||||
def test_file_managed_http_source_skip_verify(self):
|
||||
'''
|
||||
Test a remote file using skip_verify
|
||||
'''
|
||||
ret = self.run_state('file.managed',
|
||||
name=self.name,
|
||||
source=FILE_SOURCE,
|
||||
skip_verify=True)
|
||||
log.debug('ret = %s', ret)
|
||||
self.assertSaltTrueReturn(ret)
|
||||
|
@ -25,7 +25,7 @@ from salt.modules import state
|
||||
# Globals
|
||||
state.__salt__ = {}
|
||||
state.__context__ = {}
|
||||
state.__opts__ = {}
|
||||
state.__opts__ = {'cachedir': '/D'}
|
||||
state.__pillar__ = {}
|
||||
|
||||
|
||||
@ -128,7 +128,8 @@ class MockState(object):
|
||||
Mock HighState class
|
||||
'''
|
||||
flag = False
|
||||
opts = {'state_top': ""}
|
||||
opts = {'state_top': '',
|
||||
'pillar': {}}
|
||||
|
||||
def __init__(self, opts, pillar=None, *args, **kwargs):
|
||||
self.state = MockState.State(opts,
|
||||
@ -469,16 +470,15 @@ class StateTestCase(TestCase):
|
||||
'''
|
||||
Test to clear out cached state file
|
||||
'''
|
||||
with patch.dict(state.__opts__, {"cachedir": "/D/"}):
|
||||
mock = MagicMock(return_value=["A.cache.p", "B.cache.p", "C"])
|
||||
with patch.object(os, 'listdir', mock):
|
||||
mock = MagicMock(return_value=["A.cache.p", "B.cache.p", "C"])
|
||||
with patch.object(os, 'listdir', mock):
|
||||
mock = MagicMock(return_value=True)
|
||||
with patch.object(os.path, 'isfile', mock):
|
||||
mock = MagicMock(return_value=True)
|
||||
with patch.object(os.path, 'isfile', mock):
|
||||
mock = MagicMock(return_value=True)
|
||||
with patch.object(os, 'remove', mock):
|
||||
self.assertEqual(state.clear_cache(),
|
||||
['A.cache.p',
|
||||
'B.cache.p'])
|
||||
with patch.object(os, 'remove', mock):
|
||||
self.assertEqual(state.clear_cache(),
|
||||
['A.cache.p',
|
||||
'B.cache.p'])
|
||||
|
||||
def test_single(self):
|
||||
'''
|
||||
@ -541,11 +541,10 @@ class StateTestCase(TestCase):
|
||||
|
||||
mock = MagicMock(return_value=["True"])
|
||||
with patch.object(state, 'apply_', mock):
|
||||
with patch.dict(state.__opts__, {"cachedir": "/D/"}):
|
||||
mock = MagicMock(return_value="")
|
||||
with patch.object(os, 'remove', mock):
|
||||
self.assertListEqual(state.run_request("name"),
|
||||
["True"])
|
||||
mock = MagicMock(return_value="")
|
||||
with patch.object(os, 'remove', mock):
|
||||
self.assertListEqual(state.run_request("name"),
|
||||
["True"])
|
||||
|
||||
def test_show_highstate(self):
|
||||
'''
|
||||
@ -730,57 +729,53 @@ class StateTestCase(TestCase):
|
||||
mock = MagicMock(return_value=True)
|
||||
with patch.object(salt.payload, 'Serial',
|
||||
mock):
|
||||
with patch.dict(state.__opts__,
|
||||
{"cachedir": "D"}):
|
||||
with patch.object(os.path,
|
||||
'join', mock):
|
||||
with patch.object(
|
||||
state,
|
||||
'_set'
|
||||
'_retcode',
|
||||
mock):
|
||||
self.assertTrue(state.
|
||||
highstate
|
||||
(arg))
|
||||
with patch.object(os.path,
|
||||
'join', mock):
|
||||
with patch.object(
|
||||
state,
|
||||
'_set'
|
||||
'_retcode',
|
||||
mock):
|
||||
self.assertTrue(state.
|
||||
highstate
|
||||
(arg))
|
||||
|
||||
def test_clear_request(self):
|
||||
'''
|
||||
Test to clear out the state execution request without executing it
|
||||
'''
|
||||
with patch.dict(state.__opts__, {"cachedir": "D"}):
|
||||
mock = MagicMock(return_value=True)
|
||||
with patch.object(os.path, 'join', mock):
|
||||
mock = MagicMock(return_value=True)
|
||||
with patch.object(os.path, 'join', mock):
|
||||
mock = MagicMock(return_value=True)
|
||||
with patch.object(salt.payload, 'Serial', mock):
|
||||
mock = MagicMock(side_effect=[False, True, True])
|
||||
with patch.object(os.path, 'isfile', mock):
|
||||
self.assertTrue(state.clear_request("A"))
|
||||
with patch.object(salt.payload, 'Serial', mock):
|
||||
mock = MagicMock(side_effect=[False, True, True])
|
||||
with patch.object(os.path, 'isfile', mock):
|
||||
self.assertTrue(state.clear_request("A"))
|
||||
|
||||
mock = MagicMock(return_value=True)
|
||||
with patch.object(os, 'remove', mock):
|
||||
self.assertTrue(state.clear_request())
|
||||
mock = MagicMock(return_value=True)
|
||||
with patch.object(os, 'remove', mock):
|
||||
self.assertTrue(state.clear_request())
|
||||
|
||||
mock = MagicMock(return_value={})
|
||||
with patch.object(state, 'check_request', mock):
|
||||
self.assertFalse(state.clear_request("A"))
|
||||
mock = MagicMock(return_value={})
|
||||
with patch.object(state, 'check_request', mock):
|
||||
self.assertFalse(state.clear_request("A"))
|
||||
|
||||
@patch('salt.modules.state.salt.payload', MockSerial)
|
||||
def test_check_request(self):
|
||||
'''
|
||||
Test to return the state request information
|
||||
'''
|
||||
with patch.dict(state.__opts__, {"cachedir": "D"}):
|
||||
mock = MagicMock(return_value=True)
|
||||
with patch.object(os.path, 'join', mock):
|
||||
mock = MagicMock(side_effect=[True, True, False])
|
||||
with patch.object(os.path, 'isfile', mock):
|
||||
with patch('salt.utils.fopen', mock_open()):
|
||||
self.assertDictEqual(state.check_request(), {'A': 'B'})
|
||||
mock = MagicMock(return_value=True)
|
||||
with patch.object(os.path, 'join', mock):
|
||||
mock = MagicMock(side_effect=[True, True, False])
|
||||
with patch.object(os.path, 'isfile', mock):
|
||||
with patch('salt.utils.fopen', mock_open()):
|
||||
self.assertDictEqual(state.check_request(), {'A': 'B'})
|
||||
|
||||
with patch('salt.utils.fopen', mock_open()):
|
||||
self.assertEqual(state.check_request("A"), 'B')
|
||||
with patch('salt.utils.fopen', mock_open()):
|
||||
self.assertEqual(state.check_request("A"), 'B')
|
||||
|
||||
self.assertDictEqual(state.check_request(), {})
|
||||
self.assertDictEqual(state.check_request(), {})
|
||||
|
||||
def test_request(self):
|
||||
'''
|
||||
@ -788,28 +783,21 @@ class StateTestCase(TestCase):
|
||||
'''
|
||||
mock = MagicMock(return_value=True)
|
||||
with patch.object(state, 'apply_', mock):
|
||||
with patch.dict(state.__opts__, {"cachedir": "D"}):
|
||||
mock = MagicMock(return_value=True)
|
||||
with patch.object(os.path, 'join', mock):
|
||||
mock = MagicMock(return_value=
|
||||
{"test_run": "",
|
||||
"mods": "",
|
||||
"kwargs": ""})
|
||||
with patch.object(state, 'check_request', mock):
|
||||
mock = MagicMock(return_value=True)
|
||||
with patch.object(os, 'umask', mock):
|
||||
with patch.object(salt.utils, 'is_windows', mock):
|
||||
with patch.dict(state.__salt__,
|
||||
{'cmd.run': mock}):
|
||||
with patch('salt.utils.fopen',
|
||||
mock_open()):
|
||||
mock = MagicMock(
|
||||
return_value=True)
|
||||
with patch.object(os, 'umask',
|
||||
mock):
|
||||
self.assertTrue(
|
||||
state.request("A")
|
||||
)
|
||||
mock = MagicMock(return_value=True)
|
||||
with patch.object(os.path, 'join', mock):
|
||||
mock = MagicMock(return_value=
|
||||
{"test_run": "",
|
||||
"mods": "",
|
||||
"kwargs": ""})
|
||||
with patch.object(state, 'check_request', mock):
|
||||
mock = MagicMock(return_value=True)
|
||||
with patch.object(os, 'umask', mock):
|
||||
with patch.object(salt.utils, 'is_windows', mock):
|
||||
with patch.dict(state.__salt__, {'cmd.run': mock}):
|
||||
with patch('salt.utils.fopen', mock_open()):
|
||||
mock = MagicMock(return_value=True)
|
||||
with patch.object(os, 'umask', mock):
|
||||
self.assertTrue(state.request("A"))
|
||||
|
||||
def test_sls(self):
|
||||
'''
|
||||
@ -866,59 +854,50 @@ class StateTestCase(TestCase):
|
||||
True,
|
||||
pillar="A")
|
||||
|
||||
with patch.dict(
|
||||
state.__opts__,
|
||||
{"cachedir": "/D/"}):
|
||||
mock = MagicMock(return_value=
|
||||
"/D/cache.cache.p")
|
||||
mock = MagicMock(return_value="/D/cache.cache.p")
|
||||
with patch.object(os.path,
|
||||
'join',
|
||||
mock):
|
||||
mock = MagicMock(return_value=True)
|
||||
with patch.object(os.path,
|
||||
'join',
|
||||
'isfile',
|
||||
mock):
|
||||
mock = MagicMock(return_value=True)
|
||||
with patch.object(os.path,
|
||||
'isfile',
|
||||
with patch(
|
||||
'salt.utils.fopen',
|
||||
mock_open()):
|
||||
self.assertTrue(
|
||||
state.sls(arg,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
True,
|
||||
cache
|
||||
=True
|
||||
)
|
||||
)
|
||||
|
||||
MockState.HighState.flag = True
|
||||
self.assertTrue(state.sls("core,edit"
|
||||
".vim dev",
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
True)
|
||||
)
|
||||
|
||||
MockState.HighState.flag = False
|
||||
mock = MagicMock(return_value=True)
|
||||
with patch.dict(state.__salt__,
|
||||
{'config.option':
|
||||
mock}):
|
||||
mock = MagicMock(return_value=
|
||||
True)
|
||||
with patch.object(
|
||||
state,
|
||||
'_filter_'
|
||||
'running',
|
||||
mock):
|
||||
with patch(
|
||||
'salt.utils.fopen',
|
||||
mock_open()):
|
||||
self.assertTrue(
|
||||
state.sls(arg,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
True,
|
||||
cache
|
||||
=True
|
||||
)
|
||||
)
|
||||
|
||||
MockState.HighState.flag = True
|
||||
self.assertTrue(state.sls("core,edit"
|
||||
".vim dev",
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
True)
|
||||
)
|
||||
|
||||
MockState.HighState.flag = False
|
||||
mock = MagicMock(return_value=True)
|
||||
with patch.dict(state.__salt__,
|
||||
{'config.option':
|
||||
mock}):
|
||||
mock = MagicMock(return_value=
|
||||
True)
|
||||
with patch.object(
|
||||
state,
|
||||
'_filter_'
|
||||
'running',
|
||||
mock):
|
||||
with patch.dict(
|
||||
state.
|
||||
__opts__,
|
||||
{"cachedir":
|
||||
"/D/"}):
|
||||
self.sub_test_sls()
|
||||
self.sub_test_sls()
|
||||
|
||||
def sub_test_sls(self):
|
||||
'''
|
||||
|
Loading…
Reference in New Issue
Block a user