mirror of
https://github.com/valitydev/salt.git
synced 2024-11-06 16:45:27 +00:00
Merge branch '2016.3' into 'develop'
Conflicts: - salt/states/pkg.py
This commit is contained in:
commit
72a116b731
6
doc/topics/releases/2015.8.12.rst
Normal file
6
doc/topics/releases/2015.8.12.rst
Normal file
@ -0,0 +1,6 @@
|
||||
============================
|
||||
Salt 2015.8.12 Release Notes
|
||||
============================
|
||||
|
||||
Version 2015.8.12 is a bugfix release for :doc:`2015.8.0
|
||||
</topics/releases/2015.8.0>`.
|
@ -1008,6 +1008,7 @@ class Minion(MinionBase):
|
||||
'seconds': self.opts['master_alive_interval'],
|
||||
'jid_include': True,
|
||||
'maxrunning': 1,
|
||||
'return_job': False,
|
||||
'kwargs': {'master': self.opts['master'],
|
||||
'connected': True}
|
||||
}
|
||||
@ -1022,6 +1023,7 @@ class Minion(MinionBase):
|
||||
'seconds': self.opts['master_failback_interval'],
|
||||
'jid_include': True,
|
||||
'maxrunning': 1,
|
||||
'return_job': False,
|
||||
'kwargs': {'master': self.opts['master_list'][0]}
|
||||
}
|
||||
}, persist=True)
|
||||
@ -1855,6 +1857,7 @@ class Minion(MinionBase):
|
||||
'seconds': self.opts['master_alive_interval'],
|
||||
'jid_include': True,
|
||||
'maxrunning': 1,
|
||||
'return_job': False,
|
||||
'kwargs': {'master': self.opts['master'],
|
||||
'connected': False}
|
||||
}
|
||||
@ -1902,6 +1905,7 @@ class Minion(MinionBase):
|
||||
'seconds': self.opts['master_alive_interval'],
|
||||
'jid_include': True,
|
||||
'maxrunning': 1,
|
||||
'return_job': False,
|
||||
'kwargs': {'master': self.opts['master'],
|
||||
'connected': True}
|
||||
}
|
||||
@ -1915,6 +1919,7 @@ class Minion(MinionBase):
|
||||
'seconds': self.opts['master_failback_interval'],
|
||||
'jid_include': True,
|
||||
'maxrunning': 1,
|
||||
'return_job': False,
|
||||
'kwargs': {'master': self.opts['master_list'][0]}
|
||||
}
|
||||
self.schedule.modify_job(name='__master_failback',
|
||||
@ -1938,6 +1943,7 @@ class Minion(MinionBase):
|
||||
'seconds': self.opts['master_alive_interval'],
|
||||
'jid_include': True,
|
||||
'maxrunning': 1,
|
||||
'return_job': False,
|
||||
'kwargs': {'master': self.opts['master'],
|
||||
'connected': True}
|
||||
}
|
||||
@ -2972,6 +2978,7 @@ class ProxyMinion(Minion):
|
||||
'seconds': self.opts['master_alive_interval'],
|
||||
'jid_include': True,
|
||||
'maxrunning': 1,
|
||||
'return_job': False,
|
||||
'kwargs': {'master': self.opts['master'],
|
||||
'connected': True}
|
||||
}
|
||||
@ -2986,6 +2993,7 @@ class ProxyMinion(Minion):
|
||||
'seconds': self.opts['master_failback_interval'],
|
||||
'jid_include': True,
|
||||
'maxrunning': 1,
|
||||
'return_job': False,
|
||||
'kwargs': {'master': self.opts['master_list'][0]}
|
||||
}
|
||||
}, persist=True)
|
||||
|
@ -40,6 +40,17 @@ the repo's URL. Configuration details can be found below.
|
||||
'dev-*':
|
||||
- bar
|
||||
|
||||
Additionally, while git_pillar allows for the branch/tag to be overridden
|
||||
(see :ref:`here <git-pillar-env-remap>`, or :ref:`here
|
||||
<git-pillar-env-remap-legacy>` for Salt releases before 2015.8.0), keep in
|
||||
mind that the top file must reference the actual environment name. It is
|
||||
common practice to make the environment in a git_pillar top file match the
|
||||
branch/tag name, but when remapping, the environment of course no longer
|
||||
matches the branch/tag, and the top file needs to be adjusted accordingly.
|
||||
When expected Pillar values configured in git_pillar are missing, this is a
|
||||
common misconfiguration that may be to blame, and is a good first step in
|
||||
troubleshooting.
|
||||
|
||||
.. _git-pillar-pre-2015-8-0:
|
||||
|
||||
Configuring git_pillar for Salt releases before 2015.8.0
|
||||
@ -69,6 +80,8 @@ specified under :conf_master:`ext_pillar`:
|
||||
- git: master https://gitserver/git-pillar.git
|
||||
- git: dev https://gitserver/git-pillar.git
|
||||
|
||||
.. _git-pillar-env-remap-legacy:
|
||||
|
||||
To remap a specific branch to a specific Pillar environment, use the format
|
||||
``<branch>:<env>``:
|
||||
|
||||
@ -179,6 +192,18 @@ Per-remote configuration parameters are supported (similar to :ref:`gitfs
|
||||
<gitfs-per-remote-config>`), and global versions of the git_pillar
|
||||
configuration parameters can also be set.
|
||||
|
||||
.. _git-pillar-env-remap:
|
||||
|
||||
To remap a specific branch to a specific Pillar environment, use the ``env``
|
||||
per-remote parameter:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
ext_pillar:
|
||||
- git:
|
||||
- production https://gitserver/git-pillar.git:
|
||||
- env: prod
|
||||
|
||||
With the addition of pygit2_ support, git_pillar can now interact with
|
||||
authenticated remotes. Authentication works just like in gitfs (as outlined in
|
||||
the :ref:`Git Fileserver Backend Walkthrough <gitfs-authentication>`), only
|
||||
@ -187,10 +212,9 @@ instead of ``gitfs`` (e.g. :conf_master:`git_pillar_pubkey`,
|
||||
:conf_master:`git_pillar_privkey`, :conf_master:`git_pillar_passphrase`, etc.).
|
||||
|
||||
.. note::
|
||||
|
||||
The ``name`` parameter can be used to further differentiate between two
|
||||
remotes with the same URL. If you're using two remotes with the same URL,
|
||||
the ``name`` option is required.
|
||||
remotes with the same URL and branch. When using two remotes with the same
|
||||
URL, the ``name`` option is required.
|
||||
|
||||
.. _GitPython: https://github.com/gitpython-developers/GitPython
|
||||
.. _pygit2: https://github.com/libgit2/pygit2
|
||||
|
@ -1195,8 +1195,24 @@ def installed(
|
||||
if not isinstance(version, six.string_types) and version is not None:
|
||||
version = str(version)
|
||||
|
||||
was_refreshed = False
|
||||
|
||||
if version is not None and version == 'latest':
|
||||
version = __salt__['pkg.latest_version'](name, **kwargs)
|
||||
try:
|
||||
version = __salt__['pkg.latest_version'](name,
|
||||
fromrepo=fromrepo,
|
||||
refresh=refresh)
|
||||
except CommandExecutionError as exc:
|
||||
return {'name': name,
|
||||
'changes': {},
|
||||
'result': False,
|
||||
'comment': 'An error was encountered while checking the '
|
||||
'newest available version of package(s): {0}'
|
||||
.format(exc)}
|
||||
|
||||
was_refreshed = refresh
|
||||
refresh = False
|
||||
|
||||
# If version is empty, it means the latest version is installed
|
||||
# so we grab that version to avoid passing an empty string
|
||||
if not version:
|
||||
@ -1209,6 +1225,13 @@ def installed(
|
||||
'comment': exc.strerror}
|
||||
|
||||
kwargs['allow_updates'] = allow_updates
|
||||
|
||||
# if windows and a refresh
|
||||
# is required, we will have to do a refresh when _find_install_targets
|
||||
# calls pkg.list_pkgs
|
||||
if salt.utils.is_windows():
|
||||
kwargs['refresh'] = refresh
|
||||
|
||||
result = _find_install_targets(name, version, pkgs, sources,
|
||||
fromrepo=fromrepo,
|
||||
skip_suggestions=skip_suggestions,
|
||||
@ -1218,6 +1241,11 @@ def installed(
|
||||
reinstall=reinstall,
|
||||
**kwargs)
|
||||
|
||||
if salt.utils.is_windows():
|
||||
was_refreshed = was_refreshed or refresh
|
||||
kwargs.pop('refresh')
|
||||
refresh = False
|
||||
|
||||
try:
|
||||
(desired, targets, to_unpurge,
|
||||
to_reinstall, altered_files, warnings) = result
|
||||
@ -1364,9 +1392,6 @@ def installed(
|
||||
normalize=normalize,
|
||||
update_holds=update_holds,
|
||||
**kwargs)
|
||||
|
||||
if os.path.isfile(rtag) and refresh:
|
||||
os.remove(rtag)
|
||||
except CommandExecutionError as exc:
|
||||
ret = {'name': name, 'result': False}
|
||||
if exc.info:
|
||||
@ -1381,6 +1406,8 @@ def installed(
|
||||
ret['comment'] += '\n\n' + '. '.join(warnings) + '.'
|
||||
return ret
|
||||
|
||||
was_refreshed = was_refreshed or refresh
|
||||
|
||||
if isinstance(pkg_ret, dict):
|
||||
changes['installed'].update(pkg_ret)
|
||||
elif isinstance(pkg_ret, six.string_types):
|
||||
@ -1432,6 +1459,9 @@ def installed(
|
||||
failed_hold = [hold_ret[x] for x in hold_ret
|
||||
if not hold_ret[x]['result']]
|
||||
|
||||
if os.path.isfile(rtag) and was_refreshed:
|
||||
os.remove(rtag)
|
||||
|
||||
if to_unpurge:
|
||||
changes['purge_desired'] = __salt__['lowpkg.unpurge'](*to_unpurge)
|
||||
|
||||
|
@ -281,13 +281,14 @@ class IPCClient(object):
|
||||
else:
|
||||
future = tornado.concurrent.Future()
|
||||
self._connecting_future = future
|
||||
self.io_loop.add_callback(self._connect, timeout=timeout)
|
||||
self._connect(timeout=timeout)
|
||||
|
||||
if callback is not None:
|
||||
def handle_future(future):
|
||||
response = future.result()
|
||||
self.io_loop.add_callback(callback, response)
|
||||
future.add_done_callback(handle_future)
|
||||
|
||||
return future
|
||||
|
||||
@tornado.gen.coroutine
|
||||
@ -674,14 +675,6 @@ class IPCMessageSubscriber(IPCClient):
|
||||
|
||||
@tornado.gen.coroutine
|
||||
def _read_async(self, callback):
|
||||
while not self.connected():
|
||||
try:
|
||||
yield self.connect()
|
||||
except tornado.iostream.StreamClosedError:
|
||||
log.trace('Subscriber closed stream on IPC {0} before connect'.format(self.socket_path))
|
||||
except Exception as exc:
|
||||
log.error('Exception occurred while Subscriber connecting: {0}'.format(exc))
|
||||
|
||||
while not self.stream.closed():
|
||||
try:
|
||||
self._read_stream_future = self.stream.read_bytes(4096, partial=True)
|
||||
@ -703,6 +696,14 @@ class IPCMessageSubscriber(IPCClient):
|
||||
|
||||
:param callback: A callback with the received data
|
||||
'''
|
||||
while not self.connected():
|
||||
try:
|
||||
self.connect()
|
||||
except tornado.iostream.StreamClosedError:
|
||||
log.trace('Subscriber closed stream on IPC {0} before connect'.format(self.socket_path))
|
||||
except Exception as exc:
|
||||
log.error('Exception occurred while Subscriber connecting: {0}'.format(exc))
|
||||
|
||||
self.io_loop.spawn_callback(self._read_async, callback)
|
||||
|
||||
def close(self):
|
||||
|
@ -328,6 +328,8 @@ class ProcessManager(object):
|
||||
'''
|
||||
Create new process (assuming this one is dead), then remove the old one
|
||||
'''
|
||||
if self._restart_processes is False:
|
||||
return
|
||||
log.info('Process {0} ({1}) died with exit status {2},'
|
||||
' restarting...'.format(self._process_map[pid]['tgt'],
|
||||
pid,
|
||||
@ -397,7 +399,8 @@ class ProcessManager(object):
|
||||
log.debug('Process of pid {0} died, not a known'
|
||||
' process, will not restart'.format(pid))
|
||||
continue
|
||||
self.restart_process(pid)
|
||||
if self._restart_processes is True:
|
||||
self.restart_process(pid)
|
||||
elif async is True:
|
||||
yield gen.sleep(10)
|
||||
elif async is False:
|
||||
|
@ -103,7 +103,8 @@ class TestEventListener(AsyncTestCase):
|
||||
event_listener = saltnado.EventListener({}, # we don't use mod_opts, don't save?
|
||||
{'sock_dir': SOCK_DIR,
|
||||
'transport': 'zeromq'})
|
||||
event_future = event_listener.get_event(1, 'evt1', self.stop) # get an event future
|
||||
self._finished = False # fit to event_listener's behavior
|
||||
event_future = event_listener.get_event(self, 'evt1', self.stop) # get an event future
|
||||
me.fire_event({'data': 'foo2'}, 'evt2') # fire an event we don't want
|
||||
me.fire_event({'data': 'foo1'}, 'evt1') # fire an event we do want
|
||||
self.wait() # wait for the future
|
||||
@ -113,6 +114,27 @@ class TestEventListener(AsyncTestCase):
|
||||
self.assertEqual(event_future.result()['tag'], 'evt1')
|
||||
self.assertEqual(event_future.result()['data']['data'], 'foo1')
|
||||
|
||||
def test_set_event_handler(self):
|
||||
'''
|
||||
Test subscribing events using set_event_handler
|
||||
'''
|
||||
with eventpublisher_process():
|
||||
me = event.MasterEvent(SOCK_DIR)
|
||||
event_listener = saltnado.EventListener({}, # we don't use mod_opts, don't save?
|
||||
{'sock_dir': SOCK_DIR,
|
||||
'transport': 'zeromq'})
|
||||
self._finished = False # fit to event_listener's behavior
|
||||
event_future = event_listener.get_event(self,
|
||||
tag='evt',
|
||||
callback=self.stop,
|
||||
timeout=1,
|
||||
) # get an event future
|
||||
me.fire_event({'data': 'foo'}, 'evt') # fire an event we do want
|
||||
self.wait()
|
||||
|
||||
# check that we subscribed the event we wanted
|
||||
self.assertEqual(len(event_listener.timeout_map), 0)
|
||||
|
||||
def test_timeout(self):
|
||||
'''
|
||||
Make sure timeouts work correctly
|
||||
@ -121,7 +143,8 @@ class TestEventListener(AsyncTestCase):
|
||||
event_listener = saltnado.EventListener({}, # we don't use mod_opts, don't save?
|
||||
{'sock_dir': SOCK_DIR,
|
||||
'transport': 'zeromq'})
|
||||
event_future = event_listener.get_event(1,
|
||||
self._finished = False # fit to event_listener's behavior
|
||||
event_future = event_listener.get_event(self,
|
||||
tag='evt1',
|
||||
callback=self.stop,
|
||||
timeout=1,
|
||||
|
Loading…
Reference in New Issue
Block a user