mirror of
https://github.com/valitydev/salt.git
synced 2024-11-07 00:55:19 +00:00
Merge branch 'develop' of github.com:saltstack/salt into reserve_public_ip
This commit is contained in:
commit
01cf2aef62
@ -38,7 +38,7 @@ Set up the provider cloud configuration file at ``/etc/salt/cloud.providers`` or
|
||||
Profile Configuration
|
||||
=====================
|
||||
Linode profiles require a ``provider``, ``size``, ``image``, and ``location``. Set up an initial profile
|
||||
at ``/etc/salt/cloud.profiles`` or in the ``/etc/salt/cloud.profiles.d/`` directory:
|
||||
at ``/etc/salt/cloud.profiles`` or ``/etc/salt/cloud.profiles.d/*.conf``:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
|
@ -30,6 +30,21 @@ Deprecations
|
||||
Module Deprecations
|
||||
===================
|
||||
|
||||
The ``napalm_network`` module had the following changes:
|
||||
|
||||
- Support for the ``template_path`` has been removed in the ``load_template``
|
||||
function. This is because support for NAPALM native templates has been
|
||||
dropped.
|
||||
|
||||
The ``trafficserver`` module had the following changes:
|
||||
|
||||
- Support for the ``match_var`` function was removed. Please use the
|
||||
``match_metric`` function instead.
|
||||
- Support for the ``read_var`` function was removed. Please use the
|
||||
``read_config`` function instead.
|
||||
- Support for the ``set_var`` function was removed. Please use the
|
||||
``set_config`` function instead.
|
||||
|
||||
The ``win_update`` module has been removed. It has been replaced by ``win_wua``
|
||||
module.
|
||||
|
||||
@ -114,4 +129,14 @@ instead:
|
||||
- The ``k8s.label_folder_absent`` function was removed. Please update applicable
|
||||
SLS files to use the ``kubernetes.node_label_folder_absent`` function instead.
|
||||
|
||||
The ``netconfig`` state had the following changes:
|
||||
|
||||
- Support for the ``template_path`` option in the ``managed`` state has been
|
||||
removed. This is because support for NAPALM native templates has been dropped.
|
||||
|
||||
The ``trafficserver`` state had the following changes:
|
||||
|
||||
- Support for the ``set_var`` function was removed. Please use the ``config``
|
||||
function instead.
|
||||
|
||||
The ``win_update`` state has been removed. Please use the ``win_wua`` state instead.
|
||||
|
@ -533,10 +533,10 @@ def list_nodes_full(call=None):
|
||||
image_ref['sku'],
|
||||
image_ref['version'],
|
||||
])
|
||||
except TypeError:
|
||||
except (TypeError, KeyError):
|
||||
try:
|
||||
node['image'] = node['storage_profile']['os_disk']['image']['uri']
|
||||
except TypeError:
|
||||
except (TypeError, KeyError):
|
||||
node['image'] = None
|
||||
try:
|
||||
netifaces = node['network_profile']['network_interfaces']
|
||||
|
@ -69,11 +69,11 @@ class Engine(SignalHandlingMultiprocessingProcess):
|
||||
'''
|
||||
Execute the given engine in a new process
|
||||
'''
|
||||
def __init__(self, opts, fun, config, funcs, runners, proxy, log_queue=None):
|
||||
def __init__(self, opts, fun, config, funcs, runners, proxy, **kwargs):
|
||||
'''
|
||||
Set up the process executor
|
||||
'''
|
||||
super(Engine, self).__init__(log_queue=log_queue)
|
||||
super(Engine, self).__init__(**kwargs)
|
||||
self.opts = opts
|
||||
self.config = config
|
||||
self.fun = fun
|
||||
@ -93,17 +93,21 @@ class Engine(SignalHandlingMultiprocessingProcess):
|
||||
state['funcs'],
|
||||
state['runners'],
|
||||
state['proxy'],
|
||||
log_queue=state['log_queue']
|
||||
log_queue=state['log_queue'],
|
||||
log_queue_level=state['log_queue_level']
|
||||
)
|
||||
|
||||
def __getstate__(self):
|
||||
return {'opts': self.opts,
|
||||
'fun': self.fun,
|
||||
'config': self.config,
|
||||
'funcs': self.funcs,
|
||||
'runners': self.runners,
|
||||
'proxy': self.proxy,
|
||||
'log_queue': self.log_queue}
|
||||
return {
|
||||
'opts': self.opts,
|
||||
'fun': self.fun,
|
||||
'config': self.config,
|
||||
'funcs': self.funcs,
|
||||
'runners': self.runners,
|
||||
'proxy': self.proxy,
|
||||
'log_queue': self.log_queue,
|
||||
'log_queue_level': self.log_queue_level
|
||||
}
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
|
@ -117,6 +117,7 @@ __EXTERNAL_LOGGERS_CONFIGURED = False
|
||||
__MP_LOGGING_LISTENER_CONFIGURED = False
|
||||
__MP_LOGGING_CONFIGURED = False
|
||||
__MP_LOGGING_QUEUE = None
|
||||
__MP_LOGGING_LEVEL = GARBAGE
|
||||
__MP_LOGGING_QUEUE_PROCESS = None
|
||||
__MP_LOGGING_QUEUE_HANDLER = None
|
||||
__MP_IN_MAINPROCESS = multiprocessing.current_process().name == 'MainProcess'
|
||||
@ -820,6 +821,37 @@ def set_multiprocessing_logging_queue(queue):
|
||||
__MP_LOGGING_QUEUE = queue
|
||||
|
||||
|
||||
def get_multiprocessing_logging_level():
|
||||
return __MP_LOGGING_LEVEL
|
||||
|
||||
|
||||
def set_multiprocessing_logging_level(log_level):
|
||||
global __MP_LOGGING_LEVEL
|
||||
__MP_LOGGING_LEVEL = log_level
|
||||
|
||||
|
||||
def set_multiprocessing_logging_level_by_opts(opts):
|
||||
'''
|
||||
This will set the multiprocessing logging level to the lowest
|
||||
logging level of all the types of logging that are configured.
|
||||
'''
|
||||
global __MP_LOGGING_LEVEL
|
||||
|
||||
log_levels = []
|
||||
log_levels.append(
|
||||
LOG_LEVELS.get(opts.get('log_level', '').lower(), logging.ERROR)
|
||||
)
|
||||
log_levels.append(
|
||||
LOG_LEVELS.get(opts.get('log_level_logfile', '').lower(), logging.ERROR)
|
||||
)
|
||||
for level in six.itervalues(opts.get('log_granular_levels', {})):
|
||||
log_levels.append(
|
||||
LOG_LEVELS.get(level.lower(), logging.ERROR)
|
||||
)
|
||||
|
||||
__MP_LOGGING_LEVEL = min(log_levels)
|
||||
|
||||
|
||||
def setup_multiprocessing_logging_listener(opts, queue=None):
|
||||
global __MP_LOGGING_QUEUE_PROCESS
|
||||
global __MP_LOGGING_LISTENER_CONFIGURED
|
||||
@ -883,11 +915,13 @@ def setup_multiprocessing_logging(queue=None):
|
||||
# Let's add a queue handler to the logging root handlers
|
||||
__MP_LOGGING_QUEUE_HANDLER = SaltLogQueueHandler(queue or get_multiprocessing_logging_queue())
|
||||
logging.root.addHandler(__MP_LOGGING_QUEUE_HANDLER)
|
||||
# Set the logging root level to the lowest to get all messages
|
||||
logging.root.setLevel(logging.GARBAGE)
|
||||
# Set the logging root level to the lowest needed level to get all
|
||||
# desired messages.
|
||||
log_level = get_multiprocessing_logging_level()
|
||||
logging.root.setLevel(log_level)
|
||||
logging.getLogger(__name__).debug(
|
||||
'Multiprocessing queue logging configured for the process running '
|
||||
'under PID: %s', os.getpid()
|
||||
'under PID: %s at log level %s', os.getpid(), log_level
|
||||
)
|
||||
# The above logging call will create, in some situations, a futex wait
|
||||
# lock condition, probably due to the multiprocessing Queue's internal
|
||||
|
@ -139,13 +139,13 @@ class Maintenance(salt.utils.process.SignalHandlingMultiprocessingProcess):
|
||||
'''
|
||||
A generalized maintenance process which performs maintenance routines.
|
||||
'''
|
||||
def __init__(self, opts, log_queue=None):
|
||||
def __init__(self, opts, **kwargs):
|
||||
'''
|
||||
Create a maintenance instance
|
||||
|
||||
:param dict opts: The salt options
|
||||
'''
|
||||
super(Maintenance, self).__init__(log_queue=log_queue)
|
||||
super(Maintenance, self).__init__(**kwargs)
|
||||
self.opts = opts
|
||||
# How often do we perform the maintenance tasks
|
||||
self.loop_interval = int(self.opts['loop_interval'])
|
||||
@ -159,11 +159,18 @@ class Maintenance(salt.utils.process.SignalHandlingMultiprocessingProcess):
|
||||
# process so that a register_after_fork() equivalent will work on Windows.
|
||||
def __setstate__(self, state):
|
||||
self._is_child = True
|
||||
self.__init__(state['opts'], log_queue=state['log_queue'])
|
||||
self.__init__(
|
||||
state['opts'],
|
||||
log_queue=state['log_queue'],
|
||||
log_queue_level=state['log_queue_level']
|
||||
)
|
||||
|
||||
def __getstate__(self):
|
||||
return {'opts': self.opts,
|
||||
'log_queue': self.log_queue}
|
||||
return {
|
||||
'opts': self.opts,
|
||||
'log_queue': self.log_queue,
|
||||
'log_queue_level': self.log_queue_level
|
||||
}
|
||||
|
||||
def _post_fork_init(self):
|
||||
'''
|
||||
@ -708,6 +715,7 @@ class Master(SMaster):
|
||||
kwargs = {}
|
||||
if salt.utils.platform.is_windows():
|
||||
kwargs['log_queue'] = salt.log.setup.get_multiprocessing_logging_queue()
|
||||
kwargs['log_queue_level'] = salt.log.setup.get_multiprocessing_logging_level()
|
||||
kwargs['secrets'] = SMaster.secrets
|
||||
|
||||
self.process_manager.add_process(
|
||||
@ -757,13 +765,13 @@ class Halite(salt.utils.process.SignalHandlingMultiprocessingProcess):
|
||||
'''
|
||||
Manage the Halite server
|
||||
'''
|
||||
def __init__(self, hopts, log_queue=None):
|
||||
def __init__(self, hopts, **kwargs):
|
||||
'''
|
||||
Create a halite instance
|
||||
|
||||
:param dict hopts: The halite options
|
||||
'''
|
||||
super(Halite, self).__init__(log_queue=log_queue)
|
||||
super(Halite, self).__init__(**kwargs)
|
||||
self.hopts = hopts
|
||||
|
||||
# __setstate__ and __getstate__ are only used on Windows.
|
||||
@ -771,11 +779,18 @@ class Halite(salt.utils.process.SignalHandlingMultiprocessingProcess):
|
||||
# process so that a register_after_fork() equivalent will work on Windows.
|
||||
def __setstate__(self, state):
|
||||
self._is_child = True
|
||||
self.__init__(state['hopts'], log_queue=state['log_queue'])
|
||||
self.__init__(
|
||||
state['hopts'],
|
||||
log_queue=state['log_queue'],
|
||||
log_queue_level=state['log_queue_level']
|
||||
)
|
||||
|
||||
def __getstate__(self):
|
||||
return {'hopts': self.hopts,
|
||||
'log_queue': self.log_queue}
|
||||
return {
|
||||
'hopts': self.hopts,
|
||||
'log_queue': self.log_queue,
|
||||
'log_queue_level': self.log_queue_level
|
||||
}
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
@ -790,7 +805,7 @@ class ReqServer(salt.utils.process.SignalHandlingMultiprocessingProcess):
|
||||
Starts up the master request server, minions send results to this
|
||||
interface.
|
||||
'''
|
||||
def __init__(self, opts, key, mkey, log_queue=None, secrets=None):
|
||||
def __init__(self, opts, key, mkey, secrets=None, **kwargs):
|
||||
'''
|
||||
Create a request server
|
||||
|
||||
@ -801,7 +816,7 @@ class ReqServer(salt.utils.process.SignalHandlingMultiprocessingProcess):
|
||||
:rtype: ReqServer
|
||||
:returns: Request server
|
||||
'''
|
||||
super(ReqServer, self).__init__(log_queue=log_queue)
|
||||
super(ReqServer, self).__init__(**kwargs)
|
||||
self.opts = opts
|
||||
self.master_key = mkey
|
||||
# Prepare the AES key
|
||||
@ -813,15 +828,24 @@ class ReqServer(salt.utils.process.SignalHandlingMultiprocessingProcess):
|
||||
# process so that a register_after_fork() equivalent will work on Windows.
|
||||
def __setstate__(self, state):
|
||||
self._is_child = True
|
||||
self.__init__(state['opts'], state['key'], state['mkey'],
|
||||
log_queue=state['log_queue'], secrets=state['secrets'])
|
||||
self.__init__(
|
||||
state['opts'],
|
||||
state['key'],
|
||||
state['mkey'],
|
||||
secrets=state['secrets'],
|
||||
log_queue=state['log_queue'],
|
||||
log_queue_level=state['log_queue_level']
|
||||
)
|
||||
|
||||
def __getstate__(self):
|
||||
return {'opts': self.opts,
|
||||
'key': self.key,
|
||||
'mkey': self.master_key,
|
||||
'log_queue': self.log_queue,
|
||||
'secrets': self.secrets}
|
||||
return {
|
||||
'opts': self.opts,
|
||||
'key': self.key,
|
||||
'mkey': self.master_key,
|
||||
'secrets': self.secrets,
|
||||
'log_queue': self.log_queue,
|
||||
'log_queue_level': self.log_queue_level
|
||||
}
|
||||
|
||||
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
|
||||
self.destroy(signum)
|
||||
@ -833,6 +857,8 @@ class ReqServer(salt.utils.process.SignalHandlingMultiprocessingProcess):
|
||||
'''
|
||||
if self.log_queue is not None:
|
||||
salt.log.setup.set_multiprocessing_logging_queue(self.log_queue)
|
||||
if self.log_queue_level is not None:
|
||||
salt.log.setup.set_multiprocessing_logging_level(self.log_queue_level)
|
||||
salt.log.setup.setup_multiprocessing_logging(self.log_queue)
|
||||
if self.secrets is not None:
|
||||
SMaster.secrets = self.secrets
|
||||
@ -863,6 +889,7 @@ class ReqServer(salt.utils.process.SignalHandlingMultiprocessingProcess):
|
||||
kwargs = {}
|
||||
if salt.utils.platform.is_windows():
|
||||
kwargs['log_queue'] = self.log_queue
|
||||
kwargs['log_queue_level'] = self.log_queue_level
|
||||
# Use one worker thread if only the TCP transport is set up on
|
||||
# Windows and we are using Python 2. There is load balancer
|
||||
# support on Windows for the TCP transport when using Python 3.
|
||||
@ -944,7 +971,10 @@ class MWorker(salt.utils.process.SignalHandlingMultiprocessingProcess):
|
||||
# non-Windows platforms.
|
||||
def __setstate__(self, state):
|
||||
self._is_child = True
|
||||
super(MWorker, self).__init__(log_queue=state['log_queue'])
|
||||
super(MWorker, self).__init__(
|
||||
log_queue=state['log_queue'],
|
||||
log_queue_level=state['log_queue_level']
|
||||
)
|
||||
self.opts = state['opts']
|
||||
self.req_channels = state['req_channels']
|
||||
self.mkey = state['mkey']
|
||||
@ -953,13 +983,16 @@ class MWorker(salt.utils.process.SignalHandlingMultiprocessingProcess):
|
||||
SMaster.secrets = state['secrets']
|
||||
|
||||
def __getstate__(self):
|
||||
return {'opts': self.opts,
|
||||
'req_channels': self.req_channels,
|
||||
'mkey': self.mkey,
|
||||
'key': self.key,
|
||||
'k_mtime': self.k_mtime,
|
||||
'log_queue': self.log_queue,
|
||||
'secrets': SMaster.secrets}
|
||||
return {
|
||||
'opts': self.opts,
|
||||
'req_channels': self.req_channels,
|
||||
'mkey': self.mkey,
|
||||
'key': self.key,
|
||||
'k_mtime': self.k_mtime,
|
||||
'secrets': SMaster.secrets,
|
||||
'log_queue': self.log_queue,
|
||||
'log_queue_level': self.log_queue_level
|
||||
}
|
||||
|
||||
def _handle_signals(self, signum, sigframe):
|
||||
for channel in getattr(self, 'req_channels', ()):
|
||||
|
@ -530,16 +530,19 @@ def _run(cmd,
|
||||
if python_shell is None:
|
||||
python_shell = False
|
||||
|
||||
kwargs = {'cwd': cwd,
|
||||
'shell': python_shell,
|
||||
'env': run_env if six.PY3 else salt.utils.data.encode(run_env),
|
||||
'stdin': six.text_type(stdin) if stdin is not None else stdin,
|
||||
'stdout': stdout,
|
||||
'stderr': stderr,
|
||||
'with_communicate': with_communicate,
|
||||
'timeout': timeout,
|
||||
'bg': bg,
|
||||
}
|
||||
new_kwargs = {'cwd': cwd,
|
||||
'shell': python_shell,
|
||||
'env': run_env if six.PY3 else salt.utils.data.encode(run_env),
|
||||
'stdin': six.text_type(stdin) if stdin is not None else stdin,
|
||||
'stdout': stdout,
|
||||
'stderr': stderr,
|
||||
'with_communicate': with_communicate,
|
||||
'timeout': timeout,
|
||||
'bg': bg,
|
||||
}
|
||||
|
||||
if 'stdin_raw_newlines' in kwargs:
|
||||
new_kwargs['stdin_raw_newlines'] = kwargs['stdin_raw_newlines']
|
||||
|
||||
if umask is not None:
|
||||
_umask = six.text_type(umask).lstrip('0')
|
||||
@ -556,18 +559,18 @@ def _run(cmd,
|
||||
_umask = None
|
||||
|
||||
if runas or group or umask:
|
||||
kwargs['preexec_fn'] = functools.partial(
|
||||
salt.utils.user.chugid_and_umask,
|
||||
runas,
|
||||
_umask,
|
||||
group)
|
||||
new_kwargs['preexec_fn'] = functools.partial(
|
||||
salt.utils.user.chugid_and_umask,
|
||||
runas,
|
||||
_umask,
|
||||
group)
|
||||
|
||||
if not salt.utils.platform.is_windows():
|
||||
# close_fds is not supported on Windows platforms if you redirect
|
||||
# stdin/stdout/stderr
|
||||
if kwargs['shell'] is True:
|
||||
kwargs['executable'] = shell
|
||||
kwargs['close_fds'] = True
|
||||
if new_kwargs['shell'] is True:
|
||||
new_kwargs['executable'] = shell
|
||||
new_kwargs['close_fds'] = True
|
||||
|
||||
if not os.path.isabs(cwd) or not os.path.isdir(cwd):
|
||||
raise CommandExecutionError(
|
||||
@ -595,14 +598,13 @@ def _run(cmd,
|
||||
if not use_vt:
|
||||
# This is where the magic happens
|
||||
try:
|
||||
proc = salt.utils.timed_subprocess.TimedProc(cmd, **kwargs)
|
||||
proc = salt.utils.timed_subprocess.TimedProc(cmd, **new_kwargs)
|
||||
except (OSError, IOError) as exc:
|
||||
msg = (
|
||||
'Unable to run command \'{0}\' with the context \'{1}\', '
|
||||
'reason: '.format(
|
||||
cmd if output_loglevel is not None
|
||||
else 'REDACTED',
|
||||
kwargs
|
||||
cmd if output_loglevel is not None else 'REDACTED',
|
||||
new_kwargs
|
||||
)
|
||||
)
|
||||
try:
|
||||
@ -678,11 +680,11 @@ def _run(cmd,
|
||||
ret['stdout'] = out
|
||||
ret['stderr'] = err
|
||||
else:
|
||||
to = ''
|
||||
formatted_timeout = ''
|
||||
if timeout:
|
||||
to = ' (timeout: {0}s)'.format(timeout)
|
||||
formatted_timeout = ' (timeout: {0}s)'.format(timeout)
|
||||
if output_loglevel is not None:
|
||||
msg = 'Running {0} in VT{1}'.format(cmd, to)
|
||||
msg = 'Running {0} in VT{1}'.format(cmd, formatted_timeout)
|
||||
log.debug(log_callback(msg))
|
||||
stdout, stderr = '', ''
|
||||
now = time.time()
|
||||
@ -691,18 +693,20 @@ def _run(cmd,
|
||||
else:
|
||||
will_timeout = -1
|
||||
try:
|
||||
proc = salt.utils.vt.Terminal(cmd,
|
||||
shell=True,
|
||||
log_stdout=True,
|
||||
log_stderr=True,
|
||||
cwd=cwd,
|
||||
preexec_fn=kwargs.get('preexec_fn', None),
|
||||
env=run_env,
|
||||
log_stdin_level=output_loglevel,
|
||||
log_stdout_level=output_loglevel,
|
||||
log_stderr_level=output_loglevel,
|
||||
stream_stdout=True,
|
||||
stream_stderr=True)
|
||||
proc = salt.utils.vt.Terminal(
|
||||
cmd,
|
||||
shell=True,
|
||||
log_stdout=True,
|
||||
log_stderr=True,
|
||||
cwd=cwd,
|
||||
preexec_fn=new_kwargs.get('preexec_fn', None),
|
||||
env=run_env,
|
||||
log_stdin_level=output_loglevel,
|
||||
log_stdout_level=output_loglevel,
|
||||
log_stderr_level=output_loglevel,
|
||||
stream_stdout=True,
|
||||
stream_stderr=True
|
||||
)
|
||||
ret['pid'] = proc.pid
|
||||
while proc.has_unread_data:
|
||||
try:
|
||||
@ -1028,6 +1032,14 @@ def run(cmd,
|
||||
|
||||
.. versionadded:: Fluorine
|
||||
|
||||
:param bool stdin_raw_newlines : False
|
||||
Normally, newlines present in ``stdin`` as ``\\n`` will be 'unescaped',
|
||||
i.e. replaced with a ``\n``. Set this parameter to ``True`` to leave
|
||||
the newlines as-is. This should be used when you are supplying data
|
||||
using ``stdin`` that should not be modified.
|
||||
|
||||
.. versionadded:: Fluorine
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -1261,6 +1273,15 @@ def shell(cmd,
|
||||
the return code will be overridden with zero.
|
||||
|
||||
.. versionadded:: Fluorine
|
||||
|
||||
:param bool stdin_raw_newlines : False
|
||||
Normally, newlines present in ``stdin`` as ``\\n`` will be 'unescaped',
|
||||
i.e. replaced with a ``\n``. Set this parameter to ``True`` to leave
|
||||
the newlines as-is. This should be used when you are supplying data
|
||||
using ``stdin`` that should not be modified.
|
||||
|
||||
.. versionadded:: Fluorine
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -1467,6 +1488,15 @@ def run_stdout(cmd,
|
||||
the return code will be overridden with zero.
|
||||
|
||||
.. versionadded:: Fluorine
|
||||
|
||||
:param bool stdin_raw_newlines : False
|
||||
Normally, newlines present in ``stdin`` as ``\\n`` will be 'unescaped',
|
||||
i.e. replaced with a ``\n``. Set this parameter to ``True`` to leave
|
||||
the newlines as-is. This should be used when you are supplying data
|
||||
using ``stdin`` that should not be modified.
|
||||
|
||||
.. versionadded:: Fluorine
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -1656,6 +1686,15 @@ def run_stderr(cmd,
|
||||
the return code will be overridden with zero.
|
||||
|
||||
.. versionadded:: Fluorine
|
||||
|
||||
:param bool stdin_raw_newlines : False
|
||||
Normally, newlines present in ``stdin`` as ``\\n`` will be 'unescaped',
|
||||
i.e. replaced with a ``\n``. Set this parameter to ``True`` to leave
|
||||
the newlines as-is. This should be used when you are supplying data
|
||||
using ``stdin`` that should not be modified.
|
||||
|
||||
.. versionadded:: Fluorine
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -1869,6 +1908,15 @@ def run_all(cmd,
|
||||
the return code will be overridden with zero.
|
||||
|
||||
.. versionadded:: Fluorine
|
||||
|
||||
:param bool stdin_raw_newlines : False
|
||||
Normally, newlines present in ``stdin`` as ``\\n`` will be 'unescaped',
|
||||
i.e. replaced with a ``\n``. Set this parameter to ``True`` to leave
|
||||
the newlines as-is. This should be used when you are supplying data
|
||||
using ``stdin`` that should not be modified.
|
||||
|
||||
.. versionadded:: Fluorine
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -2049,6 +2097,15 @@ def retcode(cmd,
|
||||
the return code will be overridden with zero.
|
||||
|
||||
.. versionadded:: Fluorine
|
||||
|
||||
:param bool stdin_raw_newlines : False
|
||||
Normally, newlines present in ``stdin`` as ``\\n`` will be 'unescaped',
|
||||
i.e. replaced with a ``\n``. Set this parameter to ``True`` to leave
|
||||
the newlines as-is. This should be used when you are supplying data
|
||||
using ``stdin`` that should not be modified.
|
||||
|
||||
.. versionadded:: Fluorine
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -2288,6 +2345,15 @@ def script(source,
|
||||
the return code will be overridden with zero.
|
||||
|
||||
.. versionadded:: Fluorine
|
||||
|
||||
:param bool stdin_raw_newlines : False
|
||||
Normally, newlines present in ``stdin`` as ``\\n`` will be 'unescaped',
|
||||
i.e. replaced with a ``\n``. Set this parameter to ``True`` to leave
|
||||
the newlines as-is. This should be used when you are supplying data
|
||||
using ``stdin`` that should not be modified.
|
||||
|
||||
.. versionadded:: Fluorine
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -2522,6 +2588,15 @@ def script_retcode(source,
|
||||
the return code will be overridden with zero.
|
||||
|
||||
.. versionadded:: Fluorine
|
||||
|
||||
:param bool stdin_raw_newlines : False
|
||||
Normally, newlines present in ``stdin`` as ``\\n`` will be 'unescaped',
|
||||
i.e. replaced with a ``\n``. Set this parameter to ``True`` to leave
|
||||
the newlines as-is. This should be used when you are supplying data
|
||||
using ``stdin`` that should not be modified.
|
||||
|
||||
.. versionadded:: Fluorine
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -3320,6 +3395,14 @@ def powershell(cmd,
|
||||
|
||||
.. versionadded:: Fluorine
|
||||
|
||||
:param bool stdin_raw_newlines : False
|
||||
Normally, newlines present in ``stdin`` as ``\\n`` will be 'unescaped',
|
||||
i.e. replaced with a ``\n``. Set this parameter to ``True`` to leave
|
||||
the newlines as-is. This should be used when you are supplying data
|
||||
using ``stdin`` that should not be modified.
|
||||
|
||||
.. versionadded:: Fluorine
|
||||
|
||||
:returns:
|
||||
:dict: A dictionary of data returned by the powershell command.
|
||||
|
||||
@ -3618,6 +3701,14 @@ def powershell_all(cmd,
|
||||
|
||||
.. versionadded:: Fluorine
|
||||
|
||||
:param bool stdin_raw_newlines : False
|
||||
Normally, newlines present in ``stdin`` as ``\\n`` will be 'unescaped',
|
||||
i.e. replaced with a ``\n``. Set this parameter to ``True`` to leave
|
||||
the newlines as-is. This should be used when you are supplying data
|
||||
using ``stdin`` that should not be modified.
|
||||
|
||||
.. versionadded:: Fluorine
|
||||
|
||||
:return: A dictionary with the following entries:
|
||||
|
||||
result
|
||||
@ -3869,6 +3960,14 @@ def run_bg(cmd,
|
||||
|
||||
.. versionadded:: Fluorine
|
||||
|
||||
:param bool stdin_raw_newlines : False
|
||||
Normally, newlines present in ``stdin`` as ``\\n`` will be 'unescaped',
|
||||
i.e. replaced with a ``\n``. Set this parameter to ``True`` to leave
|
||||
the newlines as-is. This should be used when you are supplying data
|
||||
using ``stdin`` that should not be modified.
|
||||
|
||||
.. versionadded:: Fluorine
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
@ -19,17 +19,16 @@ Dependencies
|
||||
.. versionchanged:: 2017.7.0
|
||||
'''
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import, unicode_literals, print_function
|
||||
|
||||
# Import Python lib
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# Import Salt libs
|
||||
import salt.utils.files
|
||||
import salt.utils.napalm
|
||||
import salt.utils.templates
|
||||
import salt.utils.versions
|
||||
|
||||
# Import 3rd-party libs
|
||||
from salt.ext import six
|
||||
@ -1314,7 +1313,6 @@ def load_config(filename=None,
|
||||
@salt.utils.napalm.proxy_napalm_wrap
|
||||
def load_template(template_name,
|
||||
template_source=None,
|
||||
template_path=None,
|
||||
template_hash=None,
|
||||
template_hash_name=None,
|
||||
template_user='root',
|
||||
@ -1350,10 +1348,6 @@ def load_template(template_name,
|
||||
|
||||
To replace the config, set ``replace`` to ``True``.
|
||||
|
||||
.. warning::
|
||||
The support for native NAPALM templates will be dropped in Salt Fluorine.
|
||||
Implicitly, the ``template_path`` argument will be removed.
|
||||
|
||||
template_name
|
||||
Identifies path to the template source.
|
||||
The template can be either stored on the local machine, either remotely.
|
||||
@ -1383,16 +1377,6 @@ def load_template(template_name,
|
||||
template_source: None
|
||||
Inline config template to be rendered and loaded on the device.
|
||||
|
||||
template_path: None
|
||||
Required only in case the argument ``template_name`` provides only the file basename
|
||||
when referencing a local template using the absolute path.
|
||||
E.g.: if ``template_name`` is specified as ``my_template.jinja``,
|
||||
in order to find the template, this argument must be provided:
|
||||
``template_path: /absolute/path/to/``.
|
||||
|
||||
.. note::
|
||||
This argument will be deprecated beginning with release codename ``Fluorine``.
|
||||
|
||||
template_hash: None
|
||||
Hash of the template file. Format: ``{hash_type: 'md5', 'hsum': <md5sum>}``
|
||||
|
||||
@ -1528,15 +1512,15 @@ def load_template(template_name,
|
||||
# inline template using pillar data:
|
||||
salt -G 'os:junos' net.load_template set_hostname template_source='system { host-name {{pillar.proxy.host}}; }'
|
||||
|
||||
salt '*' net.load_template my_template template_path='/tmp/tpl/' my_param='aaa' # will commit
|
||||
salt '*' net.load_template my_template template_path='/tmp/tpl/' my_param='aaa' test=True # dry run
|
||||
salt '*' net.load_template my_template my_param='aaa' # will commit
|
||||
salt '*' net.load_template my_template my_param='aaa' test=True # dry run
|
||||
|
||||
salt '*' net.load_template salt://templates/my_stuff.jinja debug=True # equivalent of the next command
|
||||
salt '*' net.load_template my_stuff.jinja template_path=salt://templates/ debug=True
|
||||
salt '*' net.load_template my_stuff.jinja debug=True
|
||||
|
||||
# in case the template needs to include files that are not under the same path (e.g. http://),
|
||||
# to help the templating engine find it, you will need to specify the `saltenv` argument:
|
||||
salt '*' net.load_template my_stuff.jinja template_path=salt://templates saltenv=/path/to/includes debug=True
|
||||
salt '*' net.load_template my_stuff.jinja saltenv=/path/to/includes debug=True
|
||||
|
||||
# render a mako template:
|
||||
salt '*' net.load_template salt://templates/my_stuff.mako template_engine=mako debug=True
|
||||
@ -1564,11 +1548,6 @@ def load_template(template_name,
|
||||
'out': None
|
||||
}
|
||||
loaded_config = None
|
||||
if template_path:
|
||||
salt.utils.versions.warn_until(
|
||||
'Fluorine',
|
||||
'Use of `template_path` detected. This argument will be removed in Salt Fluorine.'
|
||||
)
|
||||
# prechecks
|
||||
if template_engine not in salt.utils.templates.TEMPLATE_REGISTRY:
|
||||
_loaded.update({
|
||||
@ -1584,11 +1563,10 @@ def load_template(template_name,
|
||||
salt_render = False
|
||||
for salt_render_prefix in salt_render_prefixes:
|
||||
if not salt_render:
|
||||
salt_render = salt_render or template_name.startswith(salt_render_prefix) or \
|
||||
(template_path and template_path.startswith(salt_render_prefix))
|
||||
salt_render = salt_render or template_name.startswith(salt_render_prefix)
|
||||
file_exists = __salt__['file.file_exists'](template_name)
|
||||
|
||||
if template_source or template_path or file_exists or salt_render:
|
||||
if template_source or file_exists or salt_render:
|
||||
# either inline template
|
||||
# either template in a custom path
|
||||
# either abs path send
|
||||
@ -1599,7 +1577,7 @@ def load_template(template_name,
|
||||
if template_source:
|
||||
# render the content
|
||||
if not saltenv:
|
||||
saltenv = template_path if template_path else 'base' # either use the env from the path, either base
|
||||
saltenv = 'base'
|
||||
_rendered = __salt__['file.apply_template_on_contents'](
|
||||
contents=template_source,
|
||||
template=template_engine,
|
||||
@ -1618,16 +1596,15 @@ def load_template(template_name,
|
||||
_loaded['comment'] = 'Error while rendering the template.'
|
||||
return _loaded
|
||||
else:
|
||||
if template_path and not file_exists:
|
||||
template_name = __salt__['file.join'](template_path, template_name)
|
||||
if not saltenv:
|
||||
# no saltenv overridden
|
||||
# use the custom template path
|
||||
saltenv = template_path if not salt_render else 'base'
|
||||
if not file_exists and not saltenv:
|
||||
# no saltenv overridden
|
||||
# use the custom template path
|
||||
saltenv = 'base'
|
||||
elif salt_render and not saltenv:
|
||||
# if saltenv not overrided and path specified as salt:// or http:// etc.
|
||||
# if saltenv not overridden and path specified as salt:// or http:// etc.
|
||||
# will use the default environment, from the base
|
||||
saltenv = template_path if template_path else 'base'
|
||||
saltenv = 'base'
|
||||
|
||||
if not saltenv:
|
||||
# still not specified, default to `base`
|
||||
saltenv = 'base'
|
||||
@ -1700,7 +1677,6 @@ def load_template(template_name,
|
||||
{
|
||||
'template_name': template_name,
|
||||
'template_source': template_source, # inline template
|
||||
'template_path': template_path,
|
||||
'pillar': __pillar__, # inject pillar content
|
||||
'grains': __grains__, # inject grains content
|
||||
'opts': __opts__ # inject opts content
|
||||
|
@ -16,7 +16,6 @@ import subprocess
|
||||
# Import salt libs
|
||||
import salt.utils.path
|
||||
import salt.utils.stringutils
|
||||
import salt.utils.versions
|
||||
|
||||
__virtualname__ = 'trafficserver'
|
||||
|
||||
@ -203,28 +202,6 @@ def restart_local(drain=False):
|
||||
return _subprocess(cmd)
|
||||
|
||||
|
||||
def match_var(regex):
|
||||
'''
|
||||
Display the current values of all performance statistics or configuration
|
||||
variables whose names match the given regular expression.
|
||||
|
||||
.. deprecated:: Fluorine
|
||||
Use ``match_metric`` or ``match_config`` instead.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' trafficserver.match_var regex
|
||||
'''
|
||||
salt.utils.versions.warn_until(
|
||||
'Fluorine',
|
||||
'The \'match_var\' function has been deprecated and will be removed in Salt '
|
||||
'{version}. Please use \'match_metric\' or \'match_config\' instead.'
|
||||
)
|
||||
cmd = _traffic_line('-m', regex)
|
||||
log.debug('Running: %s', cmd)
|
||||
return _subprocess(cmd)
|
||||
|
||||
|
||||
def match_metric(regex):
|
||||
'''
|
||||
Display the current values of all metrics whose names match the
|
||||
@ -345,55 +322,6 @@ def set_config(variable, value):
|
||||
return _subprocess(cmd)
|
||||
|
||||
|
||||
def read_var(*args):
|
||||
'''
|
||||
Read variable definitions from the traffic_line command.
|
||||
|
||||
.. deprecated:: Fluorine
|
||||
Use ``read_metric`` or ``read_config`` instead. Note that this
|
||||
function does not work for Traffic Server versions >= 7.0.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' trafficserver.read_var proxy.process.http.tcp_hit_count_stat
|
||||
'''
|
||||
salt.utils.versions.warn_until(
|
||||
'Fluorine',
|
||||
'The \'read_var\' function has been deprecated and will be removed in Salt '
|
||||
'{version}. Please use \'read_metric\' or \'read_config\' instead.'
|
||||
)
|
||||
|
||||
ret = {}
|
||||
|
||||
try:
|
||||
for arg in args:
|
||||
log.debug('Querying: %s', arg)
|
||||
cmd = '{0} {1} {2}'.format(_TRAFFICLINE, '-r', arg)
|
||||
ret[arg] = _subprocess(cmd)
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def set_var(variable, value):
|
||||
'''
|
||||
.. code-block:: bash
|
||||
|
||||
.. deprecated:: Fluorine
|
||||
Use ``set_config`` instead. Note that this function does
|
||||
not work for Traffic Server versions >= 7.0.
|
||||
|
||||
salt '*' trafficserver.set_var proxy.config.http.server_ports
|
||||
'''
|
||||
salt.utils.versions.warn_until(
|
||||
'Fluorine',
|
||||
'The \'set_var\' function has been deprecated and will be removed in Salt '
|
||||
'{version}. Please use \'set_config\' instead.'
|
||||
)
|
||||
return set_config(variable, value)
|
||||
|
||||
|
||||
def shutdown():
|
||||
'''
|
||||
Shut down Traffic Server on the local node.
|
||||
|
129
salt/pillar/netbox.py
Normal file
129
salt/pillar/netbox.py
Normal file
@ -0,0 +1,129 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
A module that adds data to the Pillar structure from a NetBox API.
|
||||
|
||||
|
||||
Configuring the NetBox ext_pillar
|
||||
====================================
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
ext_pillar:
|
||||
- netbox:
|
||||
api_url: http://netbox_url.com/api/
|
||||
|
||||
The following are optional, and determine whether or not the module will
|
||||
attempt to configure the ``proxy`` pillar data for use with the napalm
|
||||
proxy-minion:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
proxy_return: True
|
||||
proxy_username: admin
|
||||
api_token: 123abc
|
||||
|
||||
Create a token in your NetBox instance at
|
||||
http://netbox_url.com/user/api-tokens/
|
||||
|
||||
By default, this module will query the NetBox API for the platform associated
|
||||
with the device, and use the 'NAPALM driver' field to set the napalm
|
||||
proxy-minion driver. (Currently only 'napalm' is supported for drivertype.)
|
||||
|
||||
This module assumes you will use SSH keys to authenticate to the network device
|
||||
If password authentication is desired, it is recommended to create another
|
||||
``proxy`` key in pillar_roots (or git_pillar) with just the ``passwd`` key and
|
||||
use :py:func:`salt.renderers.gpg <salt.renderers.gpg>` to encrypt the value.
|
||||
If any additional options for the proxy setup are needed they should also be
|
||||
configured in pillar_roots.
|
||||
'''
|
||||
|
||||
|
||||
from __future__ import absolute_import, print_function, unicode_literals
|
||||
import logging
|
||||
|
||||
try:
|
||||
import requests
|
||||
import ipaddress
|
||||
_HAS_DEPENDENCIES = True
|
||||
except ImportError:
|
||||
_HAS_DEPENDENCIES = False
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def __virtual__():
|
||||
return _HAS_DEPENDENCIES
|
||||
|
||||
|
||||
def ext_pillar(minion_id, pillar, *args, **kwargs):
|
||||
'''
|
||||
Query NetBox API for minion data
|
||||
'''
|
||||
|
||||
# Pull settings from kwargs
|
||||
api_url = kwargs['api_url'].rstrip('/')
|
||||
|
||||
api_token = kwargs.get('api_token', None)
|
||||
proxy_username = kwargs.get('proxy_username', None)
|
||||
proxy_return = kwargs.get('proxy_return', True)
|
||||
|
||||
ret = {}
|
||||
|
||||
headers = {}
|
||||
if api_token:
|
||||
headers['Authorization'] = 'Token ' + api_token
|
||||
|
||||
# Fetch device from API
|
||||
device_results = requests.get(
|
||||
api_url + '/dcim/devices/',
|
||||
params={'name': minion_id, },
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
# Check status code for API call
|
||||
if device_results.status_code != requests.codes.ok:
|
||||
log.warn('API query failed for "%s", status code: %d',
|
||||
minion_id, device_results.status_code)
|
||||
|
||||
# Assign results from API call to "netbox" key
|
||||
try:
|
||||
devices = device_results.json()['results']
|
||||
if len(devices) == 1:
|
||||
ret['netbox'] = devices[0]
|
||||
elif len(devices) > 1:
|
||||
log.error('More than one device found for "%s"', minion_id)
|
||||
except Exception:
|
||||
log.error('Device not found for "%s"', minion_id)
|
||||
if proxy_return:
|
||||
# Attempt to add "proxy" key, based on platform API call
|
||||
try:
|
||||
# Fetch device from API
|
||||
platform_results = requests.get(
|
||||
ret['netbox']['platform']['url'],
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
# Check status code for API call
|
||||
if platform_results.status_code != requests.codes.ok:
|
||||
log.info('API query failed for "%s", status code: %d',
|
||||
minion_id, platform_results.status_code)
|
||||
|
||||
# Assign results from API call to "proxy" key if the platform has a
|
||||
# napalm_driver defined.
|
||||
napalm_driver = platform_results.json().get('napalm_driver')
|
||||
if napalm_driver:
|
||||
ret['proxy'] = {
|
||||
'host': str(ipaddress.IPv4Interface(
|
||||
ret['netbox']['primary_ip4']['address']).ip),
|
||||
'driver': napalm_driver,
|
||||
'proxytype': 'napalm',
|
||||
}
|
||||
|
||||
if proxy_username:
|
||||
ret['proxy']['username'] = proxy_username
|
||||
|
||||
except Exception:
|
||||
log.debug(
|
||||
'Could not create proxy config data for "%s"', minion_id)
|
||||
|
||||
return ret
|
@ -263,13 +263,12 @@ def _get_key_dir():
|
||||
return gpg_keydir
|
||||
|
||||
|
||||
def _decrypt_ciphertext(matchobj):
|
||||
def _decrypt_ciphertext(cipher):
|
||||
'''
|
||||
Given a block of ciphertext as a string, and a gpg object, try to decrypt
|
||||
the cipher and return the decrypted string. If the cipher cannot be
|
||||
decrypted, log the error, and return the ciphertext back out.
|
||||
'''
|
||||
cipher = matchobj.group()
|
||||
if six.PY3:
|
||||
cipher = cipher.encode(__salt_system_encoding__)
|
||||
cmd = [_get_gpg_exec(), '--homedir', _get_key_dir(), '--status-fd', '2',
|
||||
@ -294,7 +293,14 @@ def _decrypt_ciphertext(matchobj):
|
||||
def _decrypt_ciphertexts(cipher, translate_newlines=False):
|
||||
if translate_newlines:
|
||||
cipher = cipher.replace(r'\n', '\n')
|
||||
return GPG_CIPHERTEXT.sub(_decrypt_ciphertext, cipher)
|
||||
ret, num = GPG_CIPHERTEXT.subn(lambda m: _decrypt_ciphertext(m.group()), cipher)
|
||||
if num > 0:
|
||||
# Remove trailing newlines. Without if crypted value initially specified as a YAML multiline
|
||||
# it will conain unexpected trailing newline.
|
||||
return ret.rstrip('\n')
|
||||
else:
|
||||
# Possibly just encrypted data without begin/end marks
|
||||
return _decrypt_ciphertext(cipher)
|
||||
|
||||
|
||||
def _decrypt_object(obj, translate_newlines=False):
|
||||
|
@ -18,14 +18,14 @@ Dependencies
|
||||
.. versionadded:: 2017.7.0
|
||||
'''
|
||||
|
||||
# Import Salt libs
|
||||
from __future__ import absolute_import, print_function, unicode_literals
|
||||
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# import NAPALM utils
|
||||
# import Salt libs
|
||||
import salt.utils.napalm
|
||||
import salt.utils.versions
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------------
|
||||
# state properties
|
||||
@ -55,7 +55,6 @@ def __virtual__():
|
||||
|
||||
def _update_config(template_name,
|
||||
template_source=None,
|
||||
template_path=None,
|
||||
template_hash=None,
|
||||
template_hash_name=None,
|
||||
template_user='root',
|
||||
@ -79,7 +78,6 @@ def _update_config(template_name,
|
||||
|
||||
return __salt__['net.load_template'](template_name,
|
||||
template_source=template_source,
|
||||
template_path=template_path,
|
||||
template_hash=template_hash,
|
||||
template_hash_name=template_hash_name,
|
||||
template_user=template_user,
|
||||
@ -104,7 +102,6 @@ def _update_config(template_name,
|
||||
def managed(name,
|
||||
template_name,
|
||||
template_source=None,
|
||||
template_path=None,
|
||||
template_hash=None,
|
||||
template_hash_name=None,
|
||||
template_user='root',
|
||||
@ -137,10 +134,6 @@ def managed(name,
|
||||
|
||||
To replace the config, set ``replace`` to ``True``. This option is recommended to be used with caution!
|
||||
|
||||
.. warning::
|
||||
The support for NAPALM native templates will be dropped beginning with Salt Fluorine.
|
||||
Implicitly, the ``template_path`` argument will be deprecated and removed.
|
||||
|
||||
template_name
|
||||
Identifies path to the template source. The template can be either stored on the local machine,
|
||||
either remotely.
|
||||
@ -169,11 +162,6 @@ def managed(name,
|
||||
template_source: None
|
||||
Inline config template to be rendered and loaded on the device.
|
||||
|
||||
template_path: None
|
||||
Required only in case the argument ``template_name`` provides only the file basename.
|
||||
E.g.: if ``template_name`` is specified as ``my_template.jinja``, in order to find the
|
||||
template, this argument must be provided: ``template_path: /absolute/path/to/``.
|
||||
|
||||
template_hash: None
|
||||
Hash of the template file. Format: ``{hash_type: 'md5', 'hsum': <md5sum>}``
|
||||
|
||||
@ -252,7 +240,6 @@ def managed(name,
|
||||
prefix_lists_example:
|
||||
netconfig.managed:
|
||||
- template_name: prefix_lists.cheetah
|
||||
- template_path: /absolute/path/to/
|
||||
- debug: True
|
||||
- template_engine: cheetah
|
||||
ntp_peers_example:
|
||||
@ -333,11 +320,6 @@ def managed(name,
|
||||
}
|
||||
}
|
||||
'''
|
||||
if template_path:
|
||||
salt.utils.versions.warn_until(
|
||||
'Fluorine',
|
||||
'Use of `template_path` detected. This argument will be removed in Salt Fluorine.'
|
||||
)
|
||||
ret = salt.utils.napalm.default_ret(name)
|
||||
|
||||
# the user can override the flags the equivalent CLI args
|
||||
@ -350,7 +332,6 @@ def managed(name,
|
||||
|
||||
config_update_ret = _update_config(template_name,
|
||||
template_source=template_source,
|
||||
template_path=template_path,
|
||||
template_hash=template_hash,
|
||||
template_hash_name=template_hash_name,
|
||||
template_user=template_user,
|
||||
|
@ -9,8 +9,10 @@ Enforce state for SSL/TLS
|
||||
from __future__ import absolute_import, unicode_literals, print_function
|
||||
import time
|
||||
import datetime
|
||||
import logging
|
||||
|
||||
__virtualname__ = 'tls'
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def __virtual__():
|
||||
@ -20,14 +22,12 @@ def __virtual__():
|
||||
return __virtualname__
|
||||
|
||||
|
||||
def valid_certificate(
|
||||
name,
|
||||
weeks=0,
|
||||
days=0,
|
||||
hours=0,
|
||||
minutes=0,
|
||||
seconds=0,
|
||||
):
|
||||
def valid_certificate(name,
|
||||
weeks=0,
|
||||
days=0,
|
||||
hours=0,
|
||||
minutes=0,
|
||||
seconds=0):
|
||||
'''
|
||||
Verify that a TLS certificate is valid now and (optionally) will be valid
|
||||
for the time specified through weeks, days, hours, minutes, and seconds.
|
||||
@ -38,7 +38,13 @@ def valid_certificate(
|
||||
'comment': ''}
|
||||
|
||||
now = time.time()
|
||||
cert_info = __salt__['tls.cert_info'](name)
|
||||
try:
|
||||
cert_info = __salt__['tls.cert_info'](name)
|
||||
except IOError as exc:
|
||||
ret['comment'] = '{}'.format(exc)
|
||||
ret['result'] = False
|
||||
log.error(ret['comment'])
|
||||
return ret
|
||||
|
||||
# verify that the cert is valid *now*
|
||||
if now < cert_info['not_before']:
|
||||
|
@ -9,9 +9,6 @@ Control Apache Traffic Server
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import, unicode_literals, print_function
|
||||
|
||||
# Import Salt libs
|
||||
import salt.utils.versions
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
@ -239,35 +236,6 @@ def config(name, value):
|
||||
return ret
|
||||
|
||||
|
||||
def set_var(name, value):
|
||||
'''
|
||||
Set Traffic Server configuration variable values.
|
||||
|
||||
.. deprecated:: Fluorine
|
||||
Use ``trafficserver.config`` instead.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
proxy.config.proxy_name:
|
||||
trafficserver.set_var:
|
||||
- value: cdn.site.domain.tld
|
||||
|
||||
OR
|
||||
|
||||
traffic_server_setting:
|
||||
trafficserver.set_var:
|
||||
- name: proxy.config.proxy_name
|
||||
- value: cdn.site.domain.tld
|
||||
|
||||
'''
|
||||
salt.utils.versions.warn_until(
|
||||
'Fluorine',
|
||||
'The \'set_var\' function has been deprecated and will be removed in Salt '
|
||||
'{version}. Please use \'trafficserver.config\' instead.'
|
||||
)
|
||||
return config(name, value)
|
||||
|
||||
|
||||
def shutdown(name):
|
||||
'''
|
||||
Shut down Traffic Server on the local node.
|
||||
|
@ -145,8 +145,8 @@ if USE_LOAD_BALANCER:
|
||||
# Based on default used in tornado.netutil.bind_sockets()
|
||||
backlog = 128
|
||||
|
||||
def __init__(self, opts, socket_queue, log_queue=None):
|
||||
super(LoadBalancerServer, self).__init__(log_queue=log_queue)
|
||||
def __init__(self, opts, socket_queue, **kwargs):
|
||||
super(LoadBalancerServer, self).__init__(**kwargs)
|
||||
self.opts = opts
|
||||
self.socket_queue = socket_queue
|
||||
self._socket = None
|
||||
@ -160,13 +160,17 @@ if USE_LOAD_BALANCER:
|
||||
self.__init__(
|
||||
state['opts'],
|
||||
state['socket_queue'],
|
||||
log_queue=state['log_queue']
|
||||
log_queue=state['log_queue'],
|
||||
log_queue_level=state['log_queue_level']
|
||||
)
|
||||
|
||||
def __getstate__(self):
|
||||
return {'opts': self.opts,
|
||||
'socket_queue': self.socket_queue,
|
||||
'log_queue': self.log_queue}
|
||||
return {
|
||||
'opts': self.opts,
|
||||
'socket_queue': self.socket_queue,
|
||||
'log_queue': self.log_queue,
|
||||
'log_queue_level': self.log_queue_level
|
||||
}
|
||||
|
||||
def close(self):
|
||||
if self._socket is not None:
|
||||
@ -1348,14 +1352,18 @@ class TCPPubServerChannel(salt.transport.server.PubServerChannel):
|
||||
return {'opts': self.opts,
|
||||
'secrets': salt.master.SMaster.secrets}
|
||||
|
||||
def _publish_daemon(self, log_queue=None):
|
||||
def _publish_daemon(self, **kwargs):
|
||||
'''
|
||||
Bind to the interface specified in the configuration file
|
||||
'''
|
||||
salt.utils.process.appendproctitle(self.__class__.__name__)
|
||||
|
||||
log_queue = kwargs.get('log_queue')
|
||||
if log_queue is not None:
|
||||
salt.log.setup.set_multiprocessing_logging_queue(log_queue)
|
||||
log_queue_level = kwargs.get('log_queue_level')
|
||||
if log_queue_level is not None:
|
||||
salt.log.setup.set_multiprocessing_logging_level(log_queue_level)
|
||||
salt.log.setup.setup_multiprocessing_logging(log_queue)
|
||||
|
||||
# Check if io_loop was set outside
|
||||
@ -1407,6 +1415,9 @@ class TCPPubServerChannel(salt.transport.server.PubServerChannel):
|
||||
kwargs['log_queue'] = (
|
||||
salt.log.setup.get_multiprocessing_logging_queue()
|
||||
)
|
||||
kwargs['log_queue_level'] = (
|
||||
salt.log.setup.get_multiprocessing_logging_level()
|
||||
)
|
||||
|
||||
process_manager.add_process(self._publish_daemon, kwargs=kwargs)
|
||||
|
||||
|
@ -216,22 +216,13 @@ class _DeprecationDecorator(object):
|
||||
|
||||
def _get_args(self, kwargs):
|
||||
'''
|
||||
Extract function-specific keywords from all of the kwargs.
|
||||
Discard all keywords which aren't function-specific from the kwargs.
|
||||
|
||||
:param kwargs:
|
||||
:return:
|
||||
'''
|
||||
_args = list()
|
||||
_kwargs = dict()
|
||||
|
||||
if '__pub_arg' in kwargs: # For modules
|
||||
for arg_item in kwargs.get('__pub_arg', list()):
|
||||
if type(arg_item) == dict:
|
||||
_kwargs.update(arg_item.copy())
|
||||
else:
|
||||
_args.append(arg_item)
|
||||
else:
|
||||
_kwargs = kwargs.copy() # For states
|
||||
_kwargs = salt.utils.args.clean_kwargs(**kwargs)
|
||||
|
||||
return _args, _kwargs
|
||||
|
||||
|
@ -172,9 +172,9 @@ def fire_args(opts, jid, tag_data, prefix=''):
|
||||
except NameError:
|
||||
pass
|
||||
else:
|
||||
tag = tagify(tag_suffix, prefix)
|
||||
try:
|
||||
_event = get_master_event(opts, opts['sock_dir'], listen=False)
|
||||
tag = tagify(tag_suffix, prefix)
|
||||
_event.fire_event(tag_data, tag=tag)
|
||||
except Exception as exc:
|
||||
# Don't let a problem here hold up the rest of the orchestration
|
||||
@ -201,6 +201,12 @@ def tagify(suffix='', prefix='', base=SALT):
|
||||
parts.extend(suffix)
|
||||
else: # string so append
|
||||
parts.append(suffix)
|
||||
|
||||
for index, _ in enumerate(parts):
|
||||
try:
|
||||
parts[index] = salt.utils.stringutils.to_str(parts[index])
|
||||
except TypeError:
|
||||
parts[index] = str(parts[index])
|
||||
return TAGPARTER.join([part for part in parts if part])
|
||||
|
||||
|
||||
@ -1054,8 +1060,8 @@ class EventPublisher(salt.utils.process.SignalHandlingMultiprocessingProcess):
|
||||
The interface that takes master events and republishes them out to anyone
|
||||
who wants to listen
|
||||
'''
|
||||
def __init__(self, opts, log_queue=None):
|
||||
super(EventPublisher, self).__init__(log_queue=log_queue)
|
||||
def __init__(self, opts, **kwargs):
|
||||
super(EventPublisher, self).__init__(**kwargs)
|
||||
self.opts = salt.config.DEFAULT_MASTER_OPTS.copy()
|
||||
self.opts.update(opts)
|
||||
self._closing = False
|
||||
@ -1065,11 +1071,18 @@ class EventPublisher(salt.utils.process.SignalHandlingMultiprocessingProcess):
|
||||
# process so that a register_after_fork() equivalent will work on Windows.
|
||||
def __setstate__(self, state):
|
||||
self._is_child = True
|
||||
self.__init__(state['opts'], log_queue=state['log_queue'])
|
||||
self.__init__(
|
||||
state['opts'],
|
||||
log_queue=state['log_queue'],
|
||||
log_queue_level=state['log_queue_level']
|
||||
)
|
||||
|
||||
def __getstate__(self):
|
||||
return {'opts': self.opts,
|
||||
'log_queue': self.log_queue}
|
||||
return {
|
||||
'opts': self.opts,
|
||||
'log_queue': self.log_queue,
|
||||
'log_queue_level': self.log_queue_level
|
||||
}
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
@ -1166,13 +1179,13 @@ class EventReturn(salt.utils.process.SignalHandlingMultiprocessingProcess):
|
||||
instance = super(EventReturn, cls).__new__(cls, *args, **kwargs)
|
||||
return instance
|
||||
|
||||
def __init__(self, opts, log_queue=None):
|
||||
def __init__(self, opts, **kwargs):
|
||||
'''
|
||||
Initialize the EventReturn system
|
||||
|
||||
Return an EventReturn instance
|
||||
'''
|
||||
super(EventReturn, self).__init__(log_queue=log_queue)
|
||||
super(EventReturn, self).__init__(**kwargs)
|
||||
|
||||
self.opts = opts
|
||||
self.event_return_queue = self.opts['event_return_queue']
|
||||
@ -1187,11 +1200,18 @@ class EventReturn(salt.utils.process.SignalHandlingMultiprocessingProcess):
|
||||
# process so that a register_after_fork() equivalent will work on Windows.
|
||||
def __setstate__(self, state):
|
||||
self._is_child = True
|
||||
self.__init__(state['opts'], log_queue=state['log_queue'])
|
||||
self.__init__(
|
||||
state['opts'],
|
||||
log_queue=state['log_queue'],
|
||||
log_queue_level=state['log_queue_level']
|
||||
)
|
||||
|
||||
def __getstate__(self):
|
||||
return {'opts': self.opts,
|
||||
'log_queue': self.log_queue}
|
||||
return {
|
||||
'opts': self.opts,
|
||||
'log_queue': self.log_queue,
|
||||
'log_queue_level': self.log_queue_level
|
||||
}
|
||||
|
||||
def _handle_signals(self, signum, sigframe):
|
||||
# Flush and terminate
|
||||
|
@ -438,11 +438,11 @@ class CacheWorker(MultiprocessingProcess):
|
||||
main-loop when refreshing minion-list
|
||||
'''
|
||||
|
||||
def __init__(self, opts, log_queue=None):
|
||||
def __init__(self, opts, **kwargs):
|
||||
'''
|
||||
Sets up the zmq-connection to the ConCache
|
||||
'''
|
||||
super(CacheWorker, self).__init__(log_queue=log_queue)
|
||||
super(CacheWorker, self).__init__(**kwargs)
|
||||
self.opts = opts
|
||||
|
||||
# __setstate__ and __getstate__ are only used on Windows.
|
||||
@ -450,11 +450,18 @@ class CacheWorker(MultiprocessingProcess):
|
||||
# process so that a register_after_fork() equivalent will work on Windows.
|
||||
def __setstate__(self, state):
|
||||
self._is_child = True
|
||||
self.__init__(state['opts'], log_queue=state['log_queue'])
|
||||
self.__init__(
|
||||
state['opts'],
|
||||
log_queue=state['log_queue'],
|
||||
log_queue_level=state['log_queue_level']
|
||||
)
|
||||
|
||||
def __getstate__(self):
|
||||
return {'opts': self.opts,
|
||||
'log_queue': self.log_queue}
|
||||
return {
|
||||
'opts': self.opts,
|
||||
'log_queue': self.log_queue,
|
||||
'log_queue_level': self.log_queue_level
|
||||
}
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
@ -475,11 +482,11 @@ class ConnectedCache(MultiprocessingProcess):
|
||||
the master publisher port.
|
||||
'''
|
||||
|
||||
def __init__(self, opts, log_queue=None):
|
||||
def __init__(self, opts, **kwargs):
|
||||
'''
|
||||
starts the timer and inits the cache itself
|
||||
'''
|
||||
super(ConnectedCache, self).__init__(log_queue=log_queue)
|
||||
super(ConnectedCache, self).__init__(**kwargs)
|
||||
log.debug('ConCache initializing...')
|
||||
|
||||
# the possible settings for the cache
|
||||
@ -506,11 +513,18 @@ class ConnectedCache(MultiprocessingProcess):
|
||||
# process so that a register_after_fork() equivalent will work on Windows.
|
||||
def __setstate__(self, state):
|
||||
self._is_child = True
|
||||
self.__init__(state['opts'], log_queue=state['log_queue'])
|
||||
self.__init__(
|
||||
state['opts'],
|
||||
log_queue=state['log_queue'],
|
||||
log_queue_level=state['log_queue_level']
|
||||
)
|
||||
|
||||
def __getstate__(self):
|
||||
return {'opts': self.opts,
|
||||
'log_queue': self.log_queue}
|
||||
return {
|
||||
'opts': self.opts,
|
||||
'log_queue': self.log_queue,
|
||||
'log_queue_level': self.log_queue_level
|
||||
}
|
||||
|
||||
def signal_handler(self, sig, frame):
|
||||
'''
|
||||
|
@ -861,21 +861,30 @@ class LogLevelMixIn(six.with_metaclass(MixInMeta, object)):
|
||||
)
|
||||
|
||||
def _setup_mp_logging_client(self, *args): # pylint: disable=unused-argument
|
||||
if salt.utils.platform.is_windows() and self._setup_mp_logging_listener_:
|
||||
# On Windows, all logging including console and
|
||||
# log file logging will go through the multiprocessing
|
||||
# logging listener if it exists.
|
||||
# This will allow log file rotation on Windows
|
||||
# since only one process can own the log file
|
||||
# for log file rotation to work.
|
||||
log.setup_multiprocessing_logging(
|
||||
self._get_mp_logging_listener_queue()
|
||||
)
|
||||
# Remove the temp logger and any other configured loggers since all of
|
||||
# our logging is going through the multiprocessing logging listener.
|
||||
log.shutdown_temp_logging()
|
||||
log.shutdown_console_logging()
|
||||
log.shutdown_logfile_logging()
|
||||
if self._setup_mp_logging_listener_:
|
||||
# Set multiprocessing logging level even in non-Windows
|
||||
# environments. In non-Windows environments, this setting will
|
||||
# propogate from process to process via fork behavior and will be
|
||||
# used by child processes if they invoke the multiprocessing
|
||||
# logging client.
|
||||
log.set_multiprocessing_logging_level_by_opts(self.config)
|
||||
|
||||
if salt.utils.platform.is_windows():
|
||||
# On Windows, all logging including console and
|
||||
# log file logging will go through the multiprocessing
|
||||
# logging listener if it exists.
|
||||
# This will allow log file rotation on Windows
|
||||
# since only one process can own the log file
|
||||
# for log file rotation to work.
|
||||
log.setup_multiprocessing_logging(
|
||||
self._get_mp_logging_listener_queue()
|
||||
)
|
||||
# Remove the temp logger and any other configured loggers since
|
||||
# all of our logging is going through the multiprocessing
|
||||
# logging listener.
|
||||
log.shutdown_temp_logging()
|
||||
log.shutdown_console_logging()
|
||||
log.shutdown_logfile_logging()
|
||||
|
||||
def __setup_console_logger_config(self, *args): # pylint: disable=unused-argument
|
||||
# Since we're not going to be a daemon, setup the console logger
|
||||
|
@ -376,20 +376,30 @@ class ProcessManager(object):
|
||||
kwargs = {}
|
||||
|
||||
if salt.utils.platform.is_windows():
|
||||
# Need to ensure that 'log_queue' is correctly transferred to
|
||||
# processes that inherit from 'MultiprocessingProcess'.
|
||||
# Need to ensure that 'log_queue' and 'log_queue_level' is
|
||||
# correctly transferred to processes that inherit from
|
||||
# 'MultiprocessingProcess'.
|
||||
if type(MultiprocessingProcess) is type(tgt) and (
|
||||
issubclass(tgt, MultiprocessingProcess)):
|
||||
need_log_queue = True
|
||||
else:
|
||||
need_log_queue = False
|
||||
|
||||
if need_log_queue and 'log_queue' not in kwargs:
|
||||
if hasattr(self, 'log_queue'):
|
||||
kwargs['log_queue'] = self.log_queue
|
||||
else:
|
||||
kwargs['log_queue'] = (
|
||||
salt.log.setup.get_multiprocessing_logging_queue())
|
||||
if need_log_queue:
|
||||
if 'log_queue' not in kwargs:
|
||||
if hasattr(self, 'log_queue'):
|
||||
kwargs['log_queue'] = self.log_queue
|
||||
else:
|
||||
kwargs['log_queue'] = (
|
||||
salt.log.setup.get_multiprocessing_logging_queue()
|
||||
)
|
||||
if 'log_queue_level' not in kwargs:
|
||||
if hasattr(self, 'log_queue_level'):
|
||||
kwargs['log_queue_level'] = self.log_queue_level
|
||||
else:
|
||||
kwargs['log_queue_level'] = (
|
||||
salt.log.setup.get_multiprocessing_logging_level()
|
||||
)
|
||||
|
||||
# create a nicer name for the debug log
|
||||
if name is None:
|
||||
@ -686,8 +696,14 @@ class MultiprocessingProcess(multiprocessing.Process, NewStyleClassMixIn):
|
||||
# salt.log.setup.get_multiprocessing_logging_queue().
|
||||
salt.log.setup.set_multiprocessing_logging_queue(self.log_queue)
|
||||
|
||||
self.log_queue_level = kwargs.pop('log_queue_level', None)
|
||||
if self.log_queue_level is None:
|
||||
self.log_queue_level = salt.log.setup.get_multiprocessing_logging_level()
|
||||
else:
|
||||
salt.log.setup.set_multiprocessing_logging_level(self.log_queue_level)
|
||||
|
||||
# Call __init__ from 'multiprocessing.Process' only after removing
|
||||
# 'log_queue' from kwargs.
|
||||
# 'log_queue' and 'log_queue_level' from kwargs.
|
||||
super(MultiprocessingProcess, self).__init__(*args, **kwargs)
|
||||
|
||||
if salt.utils.platform.is_windows():
|
||||
@ -732,6 +748,8 @@ class MultiprocessingProcess(multiprocessing.Process, NewStyleClassMixIn):
|
||||
kwargs = self._kwargs_for_getstate
|
||||
if 'log_queue' not in kwargs:
|
||||
kwargs['log_queue'] = self.log_queue
|
||||
if 'log_queue_level' not in kwargs:
|
||||
kwargs['log_queue_level'] = self.log_queue_level
|
||||
# Remove the version of these in the parent process since
|
||||
# they are no longer needed.
|
||||
del self._args_for_getstate
|
||||
|
@ -50,8 +50,8 @@ class Reactor(salt.utils.process.SignalHandlingMultiprocessingProcess, salt.stat
|
||||
'cmd': 'local',
|
||||
}
|
||||
|
||||
def __init__(self, opts, log_queue=None):
|
||||
super(Reactor, self).__init__(log_queue=log_queue)
|
||||
def __init__(self, opts, **kwargs):
|
||||
super(Reactor, self).__init__(**kwargs)
|
||||
local_minion_opts = opts.copy()
|
||||
local_minion_opts['file_client'] = 'local'
|
||||
self.minion = salt.minion.MasterMinion(local_minion_opts)
|
||||
@ -66,11 +66,16 @@ class Reactor(salt.utils.process.SignalHandlingMultiprocessingProcess, salt.stat
|
||||
self._is_child = True
|
||||
Reactor.__init__(
|
||||
self, state['opts'],
|
||||
log_queue=state['log_queue'])
|
||||
log_queue=state['log_queue'],
|
||||
log_queue_level=state['log_queue_level']
|
||||
)
|
||||
|
||||
def __getstate__(self):
|
||||
return {'opts': self.opts,
|
||||
'log_queue': self.log_queue}
|
||||
return {
|
||||
'opts': self.opts,
|
||||
'log_queue': self.log_queue,
|
||||
'log_queue_level': self.log_queue_level
|
||||
}
|
||||
|
||||
def render_reaction(self, glob_ref, tag, data):
|
||||
'''
|
||||
|
@ -911,40 +911,30 @@ class Schedule(object):
|
||||
'must be a dict. '
|
||||
'Ignoring job {0}.'.format(job))
|
||||
log.error(data['_error'])
|
||||
__when = self.opts['pillar']['whens'][i]
|
||||
try:
|
||||
when__ = dateutil_parser.parse(__when)
|
||||
except ValueError:
|
||||
data['_error'] = ('Invalid date string. '
|
||||
'Ignoring job {0}.'.format(job))
|
||||
log.error(data['_error'])
|
||||
return data
|
||||
when_ = self.opts['pillar']['whens'][i]
|
||||
elif ('whens' in self.opts['grains'] and
|
||||
i in self.opts['grains']['whens']):
|
||||
if not isinstance(self.opts['grains']['whens'],
|
||||
dict):
|
||||
data['_error'] = ('Grain "whens" must be dict.'
|
||||
'Ignoring job {0}.'.format(job))
|
||||
log.error(data['_error'])
|
||||
return data
|
||||
__when = self.opts['grains']['whens'][i]
|
||||
try:
|
||||
when__ = dateutil_parser.parse(__when)
|
||||
except ValueError:
|
||||
data['_error'] = ('Invalid date string. '
|
||||
data['_error'] = ('Grain "whens" must be a dict.'
|
||||
'Ignoring job {0}.'.format(job))
|
||||
log.error(data['_error'])
|
||||
return data
|
||||
when_ = self.opts['grains']['whens'][i]
|
||||
else:
|
||||
when_ = i
|
||||
|
||||
if not isinstance(when_, datetime.datetime):
|
||||
try:
|
||||
when__ = dateutil_parser.parse(i)
|
||||
when_ = dateutil_parser.parse(when_)
|
||||
except ValueError:
|
||||
data['_error'] = ('Invalid date string {0}. '
|
||||
'Ignoring job {1}.'.format(i, job))
|
||||
log.error(data['_error'])
|
||||
return data
|
||||
|
||||
_when.append(when__)
|
||||
_when.append(when_)
|
||||
|
||||
if data['_splay']:
|
||||
_when.append(data['_splay'])
|
||||
@ -992,32 +982,21 @@ class Schedule(object):
|
||||
'Ignoring job {0}.'.format(job))
|
||||
log.error(data['_error'])
|
||||
return data
|
||||
_when = self.opts['pillar']['whens'][data['when']]
|
||||
try:
|
||||
when = dateutil_parser.parse(_when)
|
||||
except ValueError:
|
||||
data['_error'] = ('Invalid date string. '
|
||||
'Ignoring job {0}.'.format(job))
|
||||
log.error(data['_error'])
|
||||
return data
|
||||
when = self.opts['pillar']['whens'][data['when']]
|
||||
elif ('whens' in self.opts['grains'] and
|
||||
data['when'] in self.opts['grains']['whens']):
|
||||
if not isinstance(self.opts['grains']['whens'], dict):
|
||||
data['_error'] = ('Grain "whens" must be dict. '
|
||||
'Ignoring job {0}.'.format(job))
|
||||
log.error(data['_error'])
|
||||
return data
|
||||
_when = self.opts['grains']['whens'][data['when']]
|
||||
try:
|
||||
when = dateutil_parser.parse(_when)
|
||||
except ValueError:
|
||||
data['_error'] = ('Invalid date string. '
|
||||
data['_error'] = ('Grain "whens" must be a dict. '
|
||||
'Ignoring job {0}.'.format(job))
|
||||
log.error(data['_error'])
|
||||
return data
|
||||
when = self.opts['grains']['whens'][data['when']]
|
||||
else:
|
||||
when = data['when']
|
||||
|
||||
if not isinstance(when, datetime.datetime):
|
||||
try:
|
||||
when = dateutil_parser.parse(data['when'])
|
||||
when = dateutil_parser.parse(when)
|
||||
except ValueError:
|
||||
data['_error'] = ('Invalid date string. '
|
||||
'Ignoring job {0}.'.format(job))
|
||||
@ -1146,22 +1125,26 @@ class Schedule(object):
|
||||
return data
|
||||
else:
|
||||
if isinstance(data['skip_during_range'], dict):
|
||||
try:
|
||||
start = dateutil_parser.parse(data['skip_during_range']['start'])
|
||||
except ValueError:
|
||||
data['_error'] = ('Invalid date string for start in '
|
||||
'skip_during_range. Ignoring '
|
||||
'job {0}.'.format(job))
|
||||
log.error(data['_error'])
|
||||
return data
|
||||
try:
|
||||
end = dateutil_parser.parse(data['skip_during_range']['end'])
|
||||
except ValueError:
|
||||
data['_error'] = ('Invalid date string for end in '
|
||||
'skip_during_range. Ignoring '
|
||||
'job {0}.'.format(job))
|
||||
log.error(data['_error'])
|
||||
return data
|
||||
start = data['skip_during_range']['start']
|
||||
end = data['skip_during_range']['end']
|
||||
if not isinstance(start, datetime.datetime):
|
||||
try:
|
||||
start = dateutil_parser.parse(start)
|
||||
except ValueError:
|
||||
data['_error'] = ('Invalid date string for start in '
|
||||
'skip_during_range. Ignoring '
|
||||
'job {0}.'.format(job))
|
||||
log.error(data['_error'])
|
||||
return data
|
||||
if not isinstance(end, datetime.datetime):
|
||||
try:
|
||||
end = dateutil_parser.parse(end)
|
||||
except ValueError:
|
||||
data['_error'] = ('Invalid date string for end in '
|
||||
'skip_during_range. Ignoring '
|
||||
'job {0}.'.format(job))
|
||||
log.error(data['_error'])
|
||||
return data
|
||||
|
||||
# Check to see if we should run the job immediately
|
||||
# after the skip_during_range is over
|
||||
@ -1196,7 +1179,7 @@ class Schedule(object):
|
||||
return data
|
||||
else:
|
||||
data['_error'] = ('schedule.handle_func: Invalid, range '
|
||||
'must be specified as a dictionary '
|
||||
'must be specified as a dictionary. '
|
||||
'Ignoring job {0}.'.format(job))
|
||||
log.error(data['_error'])
|
||||
return data
|
||||
@ -1213,20 +1196,24 @@ class Schedule(object):
|
||||
return data
|
||||
else:
|
||||
if isinstance(data['range'], dict):
|
||||
try:
|
||||
start = dateutil_parser.parse(data['range']['start'])
|
||||
except ValueError:
|
||||
data['_error'] = ('Invalid date string for start. '
|
||||
'Ignoring job {0}.'.format(job))
|
||||
log.error(data['_error'])
|
||||
return data
|
||||
try:
|
||||
end = dateutil_parser.parse(data['range']['end'])
|
||||
except ValueError:
|
||||
data['_error'] = ('Invalid date string for end.'
|
||||
' Ignoring job {0}.'.format(job))
|
||||
log.error(data['_error'])
|
||||
return data
|
||||
start = data['range']['start']
|
||||
end = data['range']['end']
|
||||
if not isinstance(start, datetime.datetime):
|
||||
try:
|
||||
start = dateutil_parser.parse(start)
|
||||
except ValueError:
|
||||
data['_error'] = ('Invalid date string for start. '
|
||||
'Ignoring job {0}.'.format(job))
|
||||
log.error(data['_error'])
|
||||
return data
|
||||
if not isinstance(end, datetime.datetime):
|
||||
try:
|
||||
end = dateutil_parser.parse(end)
|
||||
except ValueError:
|
||||
data['_error'] = ('Invalid date string for end.'
|
||||
' Ignoring job {0}.'.format(job))
|
||||
log.error(data['_error'])
|
||||
return data
|
||||
if end > start:
|
||||
if 'invert' in data['range'] and data['range']['invert']:
|
||||
if now <= start or now >= end:
|
||||
@ -1266,7 +1253,9 @@ class Schedule(object):
|
||||
'Ignoring job {0}'.format(job))
|
||||
log.error(data['_error'])
|
||||
else:
|
||||
after = dateutil_parser.parse(data['after'])
|
||||
after = data['after']
|
||||
if not isinstance(after, datetime.datetime):
|
||||
after = dateutil_parser.parse(after)
|
||||
|
||||
if after >= now:
|
||||
log.debug(
|
||||
@ -1290,7 +1279,9 @@ class Schedule(object):
|
||||
'Ignoring job {0}'.format(job))
|
||||
log.error(data['_error'])
|
||||
else:
|
||||
until = dateutil_parser.parse(data['until'])
|
||||
until = data['until']
|
||||
if not isinstance(until, datetime.datetime):
|
||||
until = dateutil_parser.parse(until)
|
||||
|
||||
if until <= now:
|
||||
log.debug(
|
||||
|
@ -22,6 +22,7 @@ class TimedProc(object):
|
||||
self.stdin = kwargs.pop('stdin', None)
|
||||
self.with_communicate = kwargs.pop('with_communicate', self.wait)
|
||||
self.timeout = kwargs.pop('timeout', None)
|
||||
self.stdin_raw_newlines = kwargs.pop('stdin_raw_newlines', False)
|
||||
|
||||
# If you're not willing to wait for the process
|
||||
# you can't define any stdin, stdout or stderr
|
||||
@ -29,9 +30,10 @@ class TimedProc(object):
|
||||
self.stdin = kwargs['stdin'] = None
|
||||
self.with_communicate = False
|
||||
elif self.stdin is not None:
|
||||
# Translate a newline submitted as '\n' on the CLI to an actual
|
||||
# newline character.
|
||||
self.stdin = self.stdin.replace('\\n', '\n').encode(__salt_system_encoding__)
|
||||
if not self.stdin_raw_newlines:
|
||||
# Translate a newline submitted as '\n' on the CLI to an actual
|
||||
# newline character.
|
||||
self.stdin = self.stdin.replace('\\n', '\n').encode(__salt_system_encoding__)
|
||||
kwargs['stdin'] = subprocess.PIPE
|
||||
|
||||
if not self.with_communicate:
|
||||
|
261
tests/integration/scheduler/test_error.py
Normal file
261
tests/integration/scheduler/test_error.py
Normal file
@ -0,0 +1,261 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import copy
|
||||
import logging
|
||||
import os
|
||||
|
||||
import dateutil.parser as dateutil_parser
|
||||
|
||||
# Import Salt Testing libs
|
||||
from tests.support.case import ModuleCase
|
||||
from tests.support.mixins import SaltReturnAssertsMixin
|
||||
|
||||
# Import Salt Testing Libs
|
||||
from tests.support.mock import MagicMock, patch
|
||||
from tests.support.unit import skipIf
|
||||
import tests.integration as integration
|
||||
|
||||
# Import Salt libs
|
||||
import salt.utils.schedule
|
||||
|
||||
from salt.modules.test import ping as ping
|
||||
|
||||
try:
|
||||
import croniter # pylint: disable=W0611
|
||||
HAS_CRONITER = True
|
||||
except ImportError:
|
||||
HAS_CRONITER = False
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
ROOT_DIR = os.path.join(integration.TMP, 'schedule-unit-tests')
|
||||
SOCK_DIR = os.path.join(ROOT_DIR, 'test-socks')
|
||||
|
||||
DEFAULT_CONFIG = salt.config.minion_config(None)
|
||||
DEFAULT_CONFIG['conf_dir'] = ROOT_DIR
|
||||
DEFAULT_CONFIG['root_dir'] = ROOT_DIR
|
||||
DEFAULT_CONFIG['sock_dir'] = SOCK_DIR
|
||||
DEFAULT_CONFIG['pki_dir'] = os.path.join(ROOT_DIR, 'pki')
|
||||
DEFAULT_CONFIG['cachedir'] = os.path.join(ROOT_DIR, 'cache')
|
||||
|
||||
|
||||
class SchedulerErrorTest(ModuleCase, SaltReturnAssertsMixin):
|
||||
'''
|
||||
Validate the pkg module
|
||||
'''
|
||||
def setUp(self):
|
||||
with patch('salt.utils.schedule.clean_proc_dir', MagicMock(return_value=None)):
|
||||
functions = {'test.ping': ping}
|
||||
self.schedule = salt.utils.schedule.Schedule(copy.deepcopy(DEFAULT_CONFIG), functions, returners={})
|
||||
self.schedule.opts['loop_interval'] = 1
|
||||
|
||||
self.schedule.opts['grains']['whens'] = {'tea time': '11/29/2017 12:00pm'}
|
||||
|
||||
def tearDown(self):
|
||||
self.schedule.reset()
|
||||
|
||||
@skipIf(not HAS_CRONITER, 'Cannot find croniter python module')
|
||||
def test_eval_cron_invalid(self):
|
||||
'''
|
||||
verify that scheduled job runs
|
||||
'''
|
||||
job = {
|
||||
'schedule': {
|
||||
'job1': {
|
||||
'function': 'test.ping',
|
||||
'cron': '0 16 29 13 *'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Add the job to the scheduler
|
||||
self.schedule.opts.update(job)
|
||||
|
||||
run_time = dateutil_parser.parse('11/29/2017 4:00pm')
|
||||
with patch('croniter.croniter.get_next', MagicMock(return_value=run_time)):
|
||||
self.schedule.eval(now=run_time)
|
||||
|
||||
ret = self.schedule.job_status('job1')
|
||||
self.assertEqual(ret['_error'],
|
||||
'Invalid cron string. Ignoring job job1.')
|
||||
|
||||
def test_eval_when_invalid_date(self):
|
||||
'''
|
||||
verify that scheduled job does not run
|
||||
and returns the right error
|
||||
'''
|
||||
run_time = dateutil_parser.parse('11/29/2017 4:00pm')
|
||||
|
||||
job = {
|
||||
'schedule': {
|
||||
'job1': {
|
||||
'function': 'test.ping',
|
||||
'when': '13/29/2017 1:00pm',
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Add the job to the scheduler
|
||||
self.schedule.opts.update(job)
|
||||
|
||||
# Evaluate 1 second before the run time
|
||||
self.schedule.eval(now=run_time)
|
||||
ret = self.schedule.job_status('job1')
|
||||
self.assertEqual(ret['_error'],
|
||||
'Invalid date string. Ignoring job job1.')
|
||||
|
||||
def test_eval_whens_grain_not_dict(self):
|
||||
'''
|
||||
verify that scheduled job does not run
|
||||
and returns the right error
|
||||
'''
|
||||
run_time = dateutil_parser.parse('11/29/2017 4:00pm')
|
||||
|
||||
job = {
|
||||
'schedule': {
|
||||
'job1': {
|
||||
'function': 'test.ping',
|
||||
'when': 'tea time',
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.schedule.opts['grains']['whens'] = ['tea time']
|
||||
|
||||
# Add the job to the scheduler
|
||||
self.schedule.opts.update(job)
|
||||
|
||||
# Evaluate 1 second before the run time
|
||||
self.schedule.eval(now=run_time)
|
||||
ret = self.schedule.job_status('job1')
|
||||
self.assertEqual(ret['_error'],
|
||||
'Grain "whens" must be a dict. Ignoring job job1.')
|
||||
|
||||
def test_eval_once_invalid_datestring(self):
|
||||
'''
|
||||
verify that scheduled job does not run
|
||||
and returns the right error
|
||||
'''
|
||||
job = {
|
||||
'schedule': {
|
||||
'job1': {
|
||||
'function': 'test.ping',
|
||||
'once': '2017-13-13T13:00:00',
|
||||
}
|
||||
}
|
||||
}
|
||||
run_time = dateutil_parser.parse('12/13/2017 1:00pm')
|
||||
|
||||
# Add the job to the scheduler
|
||||
self.schedule.opts.update(job)
|
||||
|
||||
# Evaluate 1 second at the run time
|
||||
self.schedule.eval(now=run_time)
|
||||
ret = self.schedule.job_status('job1')
|
||||
_expected = ('Date string could not be parsed: '
|
||||
'2017-13-13T13:00:00, %Y-%m-%dT%H:%M:%S. '
|
||||
'Ignoring job job1.')
|
||||
self.assertEqual(ret['_error'], _expected)
|
||||
|
||||
def test_eval_skip_during_range_invalid_date(self):
|
||||
'''
|
||||
verify that scheduled job does not run
|
||||
and returns the right error
|
||||
'''
|
||||
|
||||
job = {
|
||||
'schedule': {
|
||||
'job1': {
|
||||
'function': 'test.ping',
|
||||
'hours': 1,
|
||||
'skip_during_range': {'start': '1:00pm', 'end': '25:00pm'}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Add the job to the scheduler
|
||||
self.schedule.opts.update(job)
|
||||
|
||||
# eval at 3:00pm to prime, simulate minion start up.
|
||||
run_time = dateutil_parser.parse('11/29/2017 3:00pm')
|
||||
self.schedule.eval(now=run_time)
|
||||
ret = self.schedule.job_status('job1')
|
||||
|
||||
# eval at 4:00pm to prime
|
||||
run_time = dateutil_parser.parse('11/29/2017 4:00pm')
|
||||
self.schedule.eval(now=run_time)
|
||||
ret = self.schedule.job_status('job1')
|
||||
_expected = ('Invalid date string for end in '
|
||||
'skip_during_range. Ignoring '
|
||||
'job job1.')
|
||||
self.assertEqual(ret['_error'], _expected)
|
||||
|
||||
def test_eval_skip_during_range_end_before_start(self):
|
||||
'''
|
||||
verify that scheduled job does not run
|
||||
and returns the right error
|
||||
'''
|
||||
|
||||
job = {
|
||||
'schedule': {
|
||||
'job1': {
|
||||
'function': 'test.ping',
|
||||
'hours': 1,
|
||||
'skip_during_range': {'start': '1:00pm', 'end': '12:00pm'}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Add the job to the scheduler
|
||||
self.schedule.opts.update(job)
|
||||
|
||||
# eval at 3:00pm to prime, simulate minion start up.
|
||||
run_time = dateutil_parser.parse('11/29/2017 3:00pm')
|
||||
self.schedule.eval(now=run_time)
|
||||
ret = self.schedule.job_status('job1')
|
||||
|
||||
# eval at 4:00pm to prime
|
||||
run_time = dateutil_parser.parse('11/29/2017 4:00pm')
|
||||
self.schedule.eval(now=run_time)
|
||||
ret = self.schedule.job_status('job1')
|
||||
_expected = ('schedule.handle_func: Invalid '
|
||||
'range, end must be larger than '
|
||||
'start. Ignoring job job1.')
|
||||
self.assertEqual(ret['_error'], _expected)
|
||||
|
||||
def test_eval_skip_during_range_not_dict(self):
|
||||
'''
|
||||
verify that scheduled job does not run
|
||||
and returns the right error
|
||||
'''
|
||||
|
||||
job = {
|
||||
'schedule': {
|
||||
'job1': {
|
||||
'function': 'test.ping',
|
||||
'hours': 1,
|
||||
'skip_during_range': ['start', '1:00pm', 'end', '12:00pm']
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Add the job to the scheduler
|
||||
self.schedule.opts.update(job)
|
||||
|
||||
# eval at 3:00pm to prime, simulate minion start up.
|
||||
run_time = dateutil_parser.parse('11/29/2017 3:00pm')
|
||||
self.schedule.eval(now=run_time)
|
||||
ret = self.schedule.job_status('job1')
|
||||
|
||||
# eval at 4:00pm to prime
|
||||
run_time = dateutil_parser.parse('11/29/2017 4:00pm')
|
||||
self.schedule.eval(now=run_time)
|
||||
ret = self.schedule.job_status('job1')
|
||||
_expected = ('schedule.handle_func: Invalid, '
|
||||
'range must be specified as a '
|
||||
'dictionary. Ignoring job job1.')
|
||||
self.assertEqual(ret['_error'], _expected)
|
@ -274,30 +274,6 @@ class SchedulerEvalTest(ModuleCase, SaltReturnAssertsMixin):
|
||||
ret = self.schedule.job_status('job1')
|
||||
self.assertEqual(ret['_last_run'], run_time)
|
||||
|
||||
@skipIf(not HAS_CRONITER, 'Cannot find croniter python module')
|
||||
def test_eval_cron_invalid(self):
|
||||
'''
|
||||
verify that scheduled job runs
|
||||
'''
|
||||
job = {
|
||||
'schedule': {
|
||||
'job1': {
|
||||
'function': 'test.ping',
|
||||
'cron': '0 16 29 13 *'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Add the job to the scheduler
|
||||
self.schedule.opts.update(job)
|
||||
|
||||
run_time = dateutil_parser.parse('11/29/2017 4:00pm')
|
||||
with patch('croniter.croniter.get_next', MagicMock(return_value=run_time)):
|
||||
self.schedule.eval(now=run_time)
|
||||
|
||||
ret = self.schedule.job_status('job1')
|
||||
self.assertEqual(ret['_error'], 'Invalid cron string. Ignoring job job1.')
|
||||
|
||||
@skipIf(not HAS_CRONITER, 'Cannot find croniter python module')
|
||||
def test_eval_cron_loop_interval(self):
|
||||
'''
|
||||
@ -325,56 +301,6 @@ class SchedulerEvalTest(ModuleCase, SaltReturnAssertsMixin):
|
||||
ret = self.schedule.job_status('job1')
|
||||
self.assertEqual(ret['_last_run'], run_time)
|
||||
|
||||
def test_eval_when_invalid_date(self):
|
||||
'''
|
||||
verify that scheduled job does not run
|
||||
and returns the right error
|
||||
'''
|
||||
run_time = dateutil_parser.parse('11/29/2017 4:00pm')
|
||||
|
||||
job = {
|
||||
'schedule': {
|
||||
'job1': {
|
||||
'function': 'test.ping',
|
||||
'when': '13/29/2017 1:00pm',
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Add the job to the scheduler
|
||||
self.schedule.opts.update(job)
|
||||
|
||||
# Evaluate 1 second before the run time
|
||||
self.schedule.eval(now=run_time)
|
||||
ret = self.schedule.job_status('job1')
|
||||
self.assertEqual(ret['_error'], 'Invalid date string. Ignoring job job1.')
|
||||
|
||||
def test_eval_once_invalid_datestring(self):
|
||||
'''
|
||||
verify that scheduled job does not run
|
||||
and returns the right error
|
||||
'''
|
||||
job = {
|
||||
'schedule': {
|
||||
'job1': {
|
||||
'function': 'test.ping',
|
||||
'once': '2017-13-13T13:00:00',
|
||||
}
|
||||
}
|
||||
}
|
||||
run_time = dateutil_parser.parse('12/13/2017 1:00pm')
|
||||
|
||||
# Add the job to the scheduler
|
||||
self.schedule.opts.update(job)
|
||||
|
||||
# Evaluate 1 second at the run time
|
||||
self.schedule.eval(now=run_time)
|
||||
ret = self.schedule.job_status('job1')
|
||||
_expected = ('Date string could not be parsed: '
|
||||
'2017-13-13T13:00:00, %Y-%m-%dT%H:%M:%S. '
|
||||
'Ignoring job job1.')
|
||||
self.assertEqual(ret['_error'], _expected)
|
||||
|
||||
def test_eval_until(self):
|
||||
'''
|
||||
verify that scheduled job is skipped once the current
|
||||
|
@ -20,11 +20,9 @@ from salt.exceptions import SaltRenderError
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
class GPGTestCase(TestCase, LoaderModuleMockMixin):
|
||||
|
||||
'''
|
||||
unit test GPG renderer
|
||||
'''
|
||||
|
||||
def setup_loader_modules(self):
|
||||
return {gpg: {}}
|
||||
|
||||
@ -46,29 +44,30 @@ class GPGTestCase(TestCase, LoaderModuleMockMixin):
|
||||
'''
|
||||
key_dir = '/etc/salt/gpgkeys'
|
||||
secret = 'Use more salt.'
|
||||
crypted = '-----BEGIN PGP MESSAGE-----!@#$%^&*()_+-----END PGP MESSAGE-----'
|
||||
crypted_long = '-----BEGIN PGP MESSAGE-----!@#$%^&*()_+-----END PGP MESSAGE-----'
|
||||
crypted_short = '!@#$%^&*()_+'
|
||||
|
||||
multisecret = 'password is {0} and salt is {0}'.format(secret)
|
||||
multicrypted = 'password is {0} and salt is {0}'.format(crypted)
|
||||
multicrypted = 'password is {0} and salt is {0}'.format(crypted_long)
|
||||
|
||||
class GPGDecrypt(object):
|
||||
|
||||
def communicate(self, *args, **kwargs):
|
||||
return [secret, None]
|
||||
|
||||
class GPGNotDecrypt(object):
|
||||
|
||||
def communicate(self, *args, **kwargs):
|
||||
return [None, 'decrypt error']
|
||||
|
||||
with patch('salt.renderers.gpg._get_key_dir', MagicMock(return_value=key_dir)), \
|
||||
patch('salt.utils.path.which', MagicMock()):
|
||||
with patch('salt.renderers.gpg.Popen', MagicMock(return_value=GPGDecrypt())):
|
||||
self.assertEqual(gpg._decrypt_ciphertexts(crypted), secret)
|
||||
self.assertEqual(gpg._decrypt_ciphertexts(crypted_short), secret)
|
||||
self.assertEqual(gpg._decrypt_ciphertexts(crypted_long), secret)
|
||||
self.assertEqual(
|
||||
gpg._decrypt_ciphertexts(multicrypted), multisecret)
|
||||
with patch('salt.renderers.gpg.Popen', MagicMock(return_value=GPGNotDecrypt())):
|
||||
self.assertEqual(gpg._decrypt_ciphertexts(crypted), crypted)
|
||||
self.assertEqual(gpg._decrypt_ciphertexts(crypted_short), crypted_short)
|
||||
self.assertEqual(gpg._decrypt_ciphertexts(crypted_long), crypted_long)
|
||||
self.assertEqual(
|
||||
gpg._decrypt_ciphertexts(multicrypted), multicrypted)
|
||||
|
||||
@ -76,7 +75,6 @@ class GPGTestCase(TestCase, LoaderModuleMockMixin):
|
||||
'''
|
||||
test _decrypt_object
|
||||
'''
|
||||
|
||||
secret = 'Use more salt.'
|
||||
crypted = '-----BEGIN PGP MESSAGE-----!@#$%^&*()_+-----END PGP MESSAGE-----'
|
||||
|
||||
@ -97,7 +95,6 @@ class GPGTestCase(TestCase, LoaderModuleMockMixin):
|
||||
'''
|
||||
test render
|
||||
'''
|
||||
|
||||
key_dir = '/etc/salt/gpgkeys'
|
||||
secret = 'Use more salt.'
|
||||
crypted = '-----BEGIN PGP MESSAGE-----!@#$%^&*()_+'
|
||||
|
Loading…
Reference in New Issue
Block a user