MANY dunder variable fixes for proxies + proxy keepalive from @mirceaulinic (#38829)

* Add proxy keep alive options

* Documentation for the alive function

* MANY dunder variable fixes for proxies.

* Lint.

* Lint.

* More Lint.
This commit is contained in:
C. R. Oldham 2017-02-02 11:35:40 -07:00 committed by Nicole Thomas
parent 8cd74af1e4
commit 8e6c155ead
10 changed files with 154 additions and 41 deletions

View File

@ -351,6 +351,13 @@ the keyword ``pass`` if there is no shutdown logic required.
be defined in the proxymodule. The code for ``ping`` should contact the
controlled device and make sure it is really available.
``alive(opts)``: Another optional function, it is used together with the
``proxy_keep_alive`` option (default: ``True``). This function should
return a boolean value corresponding to the state of the connection.
If the connection is down, will try to restart (``shutdown``
followed by ``init``). The polling frequency is controlled using
the ``proxy_keep_alive_interval`` option, in minutes.
``grains()``: Rather than including grains in /srv/salt/_grains or in
the standard install directories for grains, grains can be computed and
returned by this function. This function will be called automatically
@ -404,6 +411,9 @@ and status; "package" installation, and a ping.
return True
def _complicated_function_that_determines_if_alive():
return True
# Every proxy module needs an 'init', though you can
# just put DETAILS['initialized'] = True here if nothing
# else needs to be done.
@ -419,6 +429,16 @@ and status; "package" installation, and a ping.
if not DETAILS['url'].endswith('/'):
DETAILS['url'] += '/'
def alive(opts):
'''
This function returns a flag with the connection state.
It is very useful when the proxy minion establishes the communication
via a channel that requires a more elaborated keep-alive mechanism, e.g.
NETCONF over SSH.
'''
log.debug('rest_sample proxy alive() called...')
return _complicated_function_that_determines_if_alive()
def initialized():
'''

View File

@ -548,6 +548,14 @@ VALID_OPTS = {
# but this was the default pre 2015.8.2. This should default to
# False in 2016.3.0
'add_proxymodule_to_opts': bool,
# Poll the connection state with the proxy minion
# If enabled, this option requires the function `alive`
# to be implemented in the proxy module
'proxy_keep_alive': bool,
# Frequency of the proxy_keep_alive, in minutes
'proxy_keep_alive_interval': int,
'git_pillar_base': str,
'git_pillar_branch': str,
'git_pillar_env': str,
@ -1501,6 +1509,8 @@ DEFAULT_PROXY_MINION_OPTS = {
'proxy_merge_grains_in_module': True,
'append_minionid_config_dirs': ['cachedir', 'pidfile'],
'default_include': 'proxy.d/*.conf',
'proxy_keep_alive': True, # by default will try to keep alive the connection
'proxy_keep_alive_interval': 1 # frequency of the proxy keepalive in minutes
}
# ----- Salt Cloud Configuration Defaults ----------------------------------->

View File

@ -21,12 +21,12 @@ def start_engines(opts, proc_mgr, proxy=None):
'''
Fire up the configured engines!
'''
utils = salt.loader.utils(opts)
utils = salt.loader.utils(opts, proxy=proxy)
if opts['__role'] == 'master':
runners = salt.loader.runner(opts, utils=utils)
else:
runners = []
funcs = salt.loader.minion_mods(opts, utils=utils)
funcs = salt.loader.minion_mods(opts, utils=utils, proxy=proxy)
engines = salt.loader.engines(opts, funcs, runners, proxy=proxy)
engines_opt = opts.get('engines', [])

View File

@ -497,8 +497,8 @@ def beacons(opts, functions, context=None, proxy=None):
_module_dirs(opts, 'beacons'),
opts,
tag='beacons',
virtual_funcs=['__validate__'],
pack={'__context__': context, '__salt__': functions, '__proxy__': proxy or {}},
virtual_funcs=['__validate__'],
)

View File

@ -3221,15 +3221,17 @@ class ProxyMinion(Minion):
# we do a sync_all here in case proxy code was installed by
# SPM or was manually placed in /srv/salt/_modules etc.
self.functions['saltutil.sync_all'](saltenv=self.opts['environment'])
# Pull in the utils
self.utils = salt.loader.utils(self.opts)
# Pull in the utils
self.utils = salt.loader.utils(self.opts)
# Then load the proxy module
self.proxy = salt.loader.proxy(self.opts)
self.proxy = salt.loader.proxy(self.opts, utils=self.utils)
# And re-load the modules so the __proxy__ variable gets injected
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self._setup_core()
self.functions.pack['__proxy__'] = self.proxy
self.proxy.pack['__salt__'] = self.functions
self.proxy.pack['__ret__'] = self.returners
@ -3258,29 +3260,35 @@ class ProxyMinion(Minion):
self.opts['grains'] = salt.loader.grains(self.opts, proxy=self.proxy)
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
self.setup_beacons()
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners)
if self.connected and self.opts['pillar']:
# The pillar has changed due to the connection to the master.
# Reload the functions so that they can use the new pillar data.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
if hasattr(self, 'schedule'):
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[master_event(type='alive')],
proxy=self.proxy)
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2,
'return_job': self.opts.get('mine_return_job', False)
}
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2,
'return_job': self.opts.get('mine_return_job', False)
}
}, persist=True)
log.info('Added mine.update to scheduler')
else:
@ -3288,19 +3296,21 @@ class ProxyMinion(Minion):
# add master_alive job if enabled
if (self.opts['transport'] != 'tcp' and
self.opts['master_alive_interval'] > 0):
self.opts['master_alive_interval'] > 0 and
self.connected):
self.schedule.add_job({
master_event(type='alive', master=self.opts['master']):
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
if self.opts['master_failback'] and \
'master_list' in self.opts and \
self.opts['master'] != self.opts['master_list'][0]:
@ -3315,12 +3325,35 @@ class ProxyMinion(Minion):
'kwargs': {'master': self.opts['master_list'][0]}
}
}, persist=True)
else:
self.schedule.delete_job(master_event(type='failback'), persist=True)
else:
self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True)
self.schedule.delete_job(master_event(type='failback'), persist=True)
# proxy keepalive
proxy_alive_fn = fq_proxyname+'.alive'
if proxy_alive_fn in self.proxy and 'status.proxy_reconnect' in self.functions and \
('proxy_keep_alive' not in self.opts or ('proxy_keep_alive' in self.opts and self.opts['proxy_keep_alive'])):
# if `proxy_keep_alive` is either not specified, either set to False does not retry reconnecting
self.schedule.add_job({
'__proxy_keepalive':
{
'function': 'status.proxy_reconnect',
'minutes': self.opts.get('proxy_keep_alive_interval', 1), # by default, check once per minute
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {
'proxy_name': fq_proxyname
}
}
}, persist=True)
self.schedule.enable_schedule()
else:
self.schedule.delete_job('__proxy_keepalive', persist=True)
# Sync the grains here so the proxy can communicate them to the master
self.functions['saltutil.sync_grains'](saltenv=self.opts['environment'])
self.grains_cache = self.opts['grains']

View File

@ -30,6 +30,5 @@ def get_test_string():
.. code-block:: bash
salt 'rest-sample-proxy' rest_sample.get_test_string
'''
return __proxy__['rest_sample.test_from_state']()

View File

@ -14,6 +14,7 @@ import fnmatch
import collections
import copy
import time
import logging
# Import 3rd-party libs
import salt.ext.six as six
@ -29,6 +30,8 @@ from salt.utils.network import host_to_ips as _host_to_ips
from salt.ext.six.moves import zip
from salt.exceptions import CommandExecutionError
log = logging.getLogger(__file__)
__virtualname__ = 'status'
__opts__ = {}
@ -1115,6 +1118,46 @@ def ping_master(master):
return result
def proxy_reconnect(proxy_name, opts=None):
'''
Forces proxy minion reconnection when not alive.
proxy_name
The virtual name of the proxy module.
opts: None
Opts dictionary.
'''
if not opts:
opts = __opts__
if 'proxy' not in opts:
return False # fail
proxy_keepalive_fn = proxy_name+'.alive'
if proxy_keepalive_fn not in __proxy__:
return False # fail
is_alive = __proxy__[proxy_keepalive_fn](opts)
if not is_alive:
minion_id = opts.get('proxyid', '') or opts.get('id', '')
log.info('{minion_id} ({proxy_name} proxy) is down. Restarting.'.format(
minion_id=minion_id,
proxy_name=proxy_name
)
)
__proxy__[proxy_name+'.shutdown'](opts) # safely close connection
__proxy__[proxy_name+'.init'](opts) # reopen connection
log.debug('Restarted {minion_id} ({proxy_name} proxy)!'.format(
minion_id=minion_id,
proxy_name=proxy_name
)
)
return True # success
def time_(format='%A, %d. %B %Y %I:%M%p'):
'''
.. versionadded:: 2016.3.0

View File

@ -33,11 +33,11 @@ def __virtual__():
log.debug('rest_sample proxy __virtual__() called...')
return True
# Every proxy module needs an 'init', though you can
# just put DETAILS['initialized'] = True here if nothing
# else needs to be done.
def init(opts):
log.debug('rest_sample proxy init() called...')
DETAILS['initialized'] = True
@ -59,6 +59,13 @@ def initialized():
return DETAILS.get('initialized', False)
def alive(opts):
log.debug('=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-')
log.debug('proxys alive() fn called')
log.debug('=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-')
return False
def id(opts):
'''
Return a unique ID for this proxy minion. This ID MUST NOT CHANGE.

View File

@ -861,7 +861,7 @@ class State(object):
Read the state loader value and loadup the correct states subsystem
'''
if self.states_loader == 'thorium':
self.states = salt.loader.thorium(self.opts, self.functions, {}) # TODO: Add runners
self.states = salt.loader.thorium(self.opts, self.functions, {}) # TODO: Add runners, proxy?
else:
self.states = salt.loader.states(self.opts, self.functions, self.utils,
self.serializers, proxy=self.proxy)

View File

@ -378,7 +378,7 @@ class Schedule(object):
'''
instance = None
def __new__(cls, opts, functions, returners=None, intervals=None, cleanup=None):
def __new__(cls, opts, functions, returners=None, intervals=None, cleanup=None, proxy=None):
'''
Only create one instance of Schedule
'''
@ -388,18 +388,19 @@ class Schedule(object):
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
cls.instance = object.__new__(cls)
cls.instance.__singleton_init__(opts, functions, returners, intervals, cleanup)
cls.instance.__singleton_init__(opts, functions, returners, intervals, cleanup, proxy)
else:
log.debug('Re-using Schedule')
return cls.instance
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, functions, returners=None, intervals=None, cleanup=None):
def __init__(self, opts, functions, returners=None, intervals=None, cleanup=None, proxy=None):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts, functions, returners=None, intervals=None, cleanup=None):
def __singleton_init__(self, opts, functions, returners=None, intervals=None, cleanup=None, proxy=None):
self.opts = opts
self.proxy = proxy
self.functions = functions
if isinstance(intervals, dict):
self.intervals = intervals
@ -742,8 +743,8 @@ class Schedule(object):
# This also needed for ZeroMQ transport to reset all functions
# context data that could keep paretns connections. ZeroMQ will
# hang on polling parents connections from the child process.
self.functions = salt.loader.minion_mods(self.opts)
self.returners = salt.loader.returners(self.opts, self.functions)
self.functions = salt.loader.minion_mods(self.opts, proxy=self.proxy)
self.returners = salt.loader.returners(self.opts, self.functions, proxy=self.proxy)
ret = {'id': self.opts.get('id', 'master'),
'fun': func,
'fun_args': [],