mirror of
https://github.com/valitydev/salt.git
synced 2024-11-06 16:45:27 +00:00
Merge branch '2016.11' into '2017.7'
Conflicts: - salt/config/__init__.py - salt/master.py - salt/modules/zypper.py - tests/unit/modules/timezone_test.py
This commit is contained in:
commit
58262608cd
14
conf/master
14
conf/master
@ -405,6 +405,20 @@
|
|||||||
# will cause minion to throw an exception and drop the message.
|
# will cause minion to throw an exception and drop the message.
|
||||||
# sign_pub_messages: False
|
# sign_pub_messages: False
|
||||||
|
|
||||||
|
# Signature verification on messages published from minions
|
||||||
|
# This requires that minions cryptographically sign the messages they
|
||||||
|
# publish to the master. If minions are not signing, then log this information
|
||||||
|
# at loglevel 'INFO' and drop the message without acting on it.
|
||||||
|
# require_minion_sign_messages: False
|
||||||
|
|
||||||
|
# The below will drop messages when their signatures do not validate.
|
||||||
|
# Note that when this option is False but `require_minion_sign_messages` is True
|
||||||
|
# minions MUST sign their messages but the validity of their signatures
|
||||||
|
# is ignored.
|
||||||
|
# These two config options exist so a Salt infrastructure can be moved
|
||||||
|
# to signing minion messages gradually.
|
||||||
|
# drop_messages_signature_fail: False
|
||||||
|
|
||||||
# Use TLS/SSL encrypted connection between master and minion.
|
# Use TLS/SSL encrypted connection between master and minion.
|
||||||
# Can be set to a dictionary containing keyword arguments corresponding to Python's
|
# Can be set to a dictionary containing keyword arguments corresponding to Python's
|
||||||
# 'ssl.wrap_socket' method.
|
# 'ssl.wrap_socket' method.
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -9,3 +9,18 @@ controls whether a minion can request that the master revoke its key. When True
|
|||||||
can request a key revocation and the master will comply. If it is False, the key will not
|
can request a key revocation and the master will comply. If it is False, the key will not
|
||||||
be revoked by the msater.
|
be revoked by the msater.
|
||||||
|
|
||||||
|
New master configuration option `require_minion_sign_messages`
|
||||||
|
This requires that minions cryptographically sign the messages they
|
||||||
|
publish to the master. If minions are not signing, then log this information
|
||||||
|
at loglevel 'INFO' and drop the message without acting on it.
|
||||||
|
|
||||||
|
New master configuration option `drop_messages_signature_fail`
|
||||||
|
Drop messages from minions when their signatures do not validate.
|
||||||
|
Note that when this option is False but `require_minion_sign_messages` is True
|
||||||
|
minions MUST sign their messages but the validity of their signatures
|
||||||
|
is ignored.
|
||||||
|
|
||||||
|
New minion configuration option `minion_sign_messages`
|
||||||
|
Causes the minion to cryptographically sign the payload of messages it places
|
||||||
|
on the event bus for the master. The payloads are signed with the minion's
|
||||||
|
private key so the master can verify the signature with its public key.
|
||||||
|
@ -1044,6 +1044,19 @@ VALID_OPTS = {
|
|||||||
|
|
||||||
# File chunk size for salt-cp
|
# File chunk size for salt-cp
|
||||||
'salt_cp_chunk_size': int,
|
'salt_cp_chunk_size': int,
|
||||||
|
|
||||||
|
# Require that the minion sign messages it posts to the master on the event
|
||||||
|
# bus
|
||||||
|
'minion_sign_messages': bool,
|
||||||
|
|
||||||
|
# Have master drop messages from minions for which their signatures do
|
||||||
|
# not verify
|
||||||
|
'drop_messages_signature_fail': bool,
|
||||||
|
|
||||||
|
# Require that payloads from minions have a 'sig' entry
|
||||||
|
# (in other words, require that minions have 'minion_sign_messages'
|
||||||
|
# turned on)
|
||||||
|
'require_minion_sign_messages': bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
# default configurations
|
# default configurations
|
||||||
@ -1307,6 +1320,7 @@ DEFAULT_MINION_OPTS = {
|
|||||||
'salt_cp_chunk_size': 65536,
|
'salt_cp_chunk_size': 65536,
|
||||||
'extmod_whitelist': {},
|
'extmod_whitelist': {},
|
||||||
'extmod_blacklist': {},
|
'extmod_blacklist': {},
|
||||||
|
'minion_sign_messages': False,
|
||||||
}
|
}
|
||||||
|
|
||||||
DEFAULT_MASTER_OPTS = {
|
DEFAULT_MASTER_OPTS = {
|
||||||
@ -1602,6 +1616,8 @@ DEFAULT_MASTER_OPTS = {
|
|||||||
'django_auth_settings': '',
|
'django_auth_settings': '',
|
||||||
'allow_minion_key_revoke': True,
|
'allow_minion_key_revoke': True,
|
||||||
'salt_cp_chunk_size': 98304,
|
'salt_cp_chunk_size': 98304,
|
||||||
|
'require_minion_sign_messages': False,
|
||||||
|
'drop_messages_signature_fail': False,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -47,6 +47,7 @@ if not CDOME:
|
|||||||
# Import salt libs
|
# Import salt libs
|
||||||
import salt.defaults.exitcodes
|
import salt.defaults.exitcodes
|
||||||
import salt.utils
|
import salt.utils
|
||||||
|
import salt.utils.decorators
|
||||||
import salt.payload
|
import salt.payload
|
||||||
import salt.transport.client
|
import salt.transport.client
|
||||||
import salt.transport.frame
|
import salt.transport.frame
|
||||||
@ -138,13 +139,41 @@ def gen_keys(keydir, keyname, keysize, user=None):
|
|||||||
return priv
|
return priv
|
||||||
|
|
||||||
|
|
||||||
|
@salt.utils.decorators.memoize
|
||||||
|
def _get_key_with_evict(path, timestamp):
|
||||||
|
'''
|
||||||
|
Load a key from disk. `timestamp` above is intended to be the timestamp
|
||||||
|
of the file's last modification. This fn is memoized so if it is called with the
|
||||||
|
same path and timestamp (the file's last modified time) the second time
|
||||||
|
the result is returned from the memoiziation. If the file gets modified
|
||||||
|
then the params are different and the key is loaded from disk.
|
||||||
|
'''
|
||||||
|
log.debug('salt.crypt._get_key_with_evict: Loading private key')
|
||||||
|
with salt.utils.fopen(path) as f:
|
||||||
|
key = RSA.importKey(f.read())
|
||||||
|
return key
|
||||||
|
|
||||||
|
|
||||||
|
def _get_rsa_key(path):
|
||||||
|
'''
|
||||||
|
Read a key off the disk. Poor man's simple cache in effect here,
|
||||||
|
we memoize the result of calling _get_rsa_with_evict. This means
|
||||||
|
the first time _get_key_with_evict is called with a path and a timestamp
|
||||||
|
the result is cached. If the file (the private key) does not change
|
||||||
|
then its timestamp will not change and the next time the result is returned
|
||||||
|
from the cache. If the key DOES change the next time _get_rsa_with_evict
|
||||||
|
is called it is called with different parameters and the fn is run fully to
|
||||||
|
retrieve the key from disk.
|
||||||
|
'''
|
||||||
|
log.debug('salt.crypt._get_rsa_key: Loading private key')
|
||||||
|
return _get_key_with_evict(path, os.path.getmtime(path))
|
||||||
|
|
||||||
|
|
||||||
def sign_message(privkey_path, message):
|
def sign_message(privkey_path, message):
|
||||||
'''
|
'''
|
||||||
Use Crypto.Signature.PKCS1_v1_5 to sign a message. Returns the signature.
|
Use Crypto.Signature.PKCS1_v1_5 to sign a message. Returns the signature.
|
||||||
'''
|
'''
|
||||||
log.debug('salt.crypt.sign_message: Loading private key')
|
key = _get_rsa_key(privkey_path)
|
||||||
with salt.utils.fopen(privkey_path) as f:
|
|
||||||
key = RSA.importKey(f.read())
|
|
||||||
log.debug('salt.crypt.sign_message: Signing message.')
|
log.debug('salt.crypt.sign_message: Signing message.')
|
||||||
signer = PKCS1_v1_5.new(key)
|
signer = PKCS1_v1_5.new(key)
|
||||||
return signer.sign(SHA.new(message))
|
return signer.sign(SHA.new(message))
|
||||||
|
@ -256,27 +256,12 @@ def access_keys(opts):
|
|||||||
acl_users.add(opts['user'])
|
acl_users.add(opts['user'])
|
||||||
acl_users.add(salt.utils.get_user())
|
acl_users.add(salt.utils.get_user())
|
||||||
if opts['client_acl_verify'] and HAS_PWD:
|
if opts['client_acl_verify'] and HAS_PWD:
|
||||||
log.profile('Beginning pwd.getpwall() call in masterarpi acess_keys function')
|
log.profile('Beginning pwd.getpwall() call in masterarpi access_keys function')
|
||||||
for user in pwd.getpwall():
|
for user in pwd.getpwall():
|
||||||
users.append(user.pw_name)
|
users.append(user.pw_name)
|
||||||
log.profile('End pwd.getpwall() call in masterarpi acess_keys function')
|
log.profile('End pwd.getpwall() call in masterarpi access_keys function')
|
||||||
for user in acl_users:
|
for user in acl_users:
|
||||||
log.info(
|
log.info('Preparing the %s key for local communication', user)
|
||||||
'Preparing the {0} key for local communication'.format(
|
|
||||||
user
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
if opts['client_acl_verify'] and HAS_PWD:
|
|
||||||
if user not in users:
|
|
||||||
try:
|
|
||||||
log.profile('Beginning pwd.getpnam() call in masterarpi acess_keys function')
|
|
||||||
user = pwd.getpwnam(user).pw_name
|
|
||||||
log.profile('Beginning pwd.getpwnam() call in masterarpi acess_keys function')
|
|
||||||
except KeyError:
|
|
||||||
log.error('ACL user {0} is not available'.format(user))
|
|
||||||
continue
|
|
||||||
|
|
||||||
keys[user] = mk_key(opts, user)
|
keys[user] = mk_key(opts, user)
|
||||||
|
|
||||||
# Check other users matching ACL patterns
|
# Check other users matching ACL patterns
|
||||||
@ -773,6 +758,7 @@ class RemoteFuncs(object):
|
|||||||
# If the return data is invalid, just ignore it
|
# If the return data is invalid, just ignore it
|
||||||
if any(key not in load for key in ('return', 'jid', 'id')):
|
if any(key not in load for key in ('return', 'jid', 'id')):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if load['jid'] == 'req':
|
if load['jid'] == 'req':
|
||||||
# The minion is returning a standalone job, request a jobid
|
# The minion is returning a standalone job, request a jobid
|
||||||
prep_fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
|
prep_fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
|
||||||
|
@ -17,6 +17,7 @@ import signal
|
|||||||
import stat
|
import stat
|
||||||
import logging
|
import logging
|
||||||
import multiprocessing
|
import multiprocessing
|
||||||
|
import salt.serializers.msgpack
|
||||||
|
|
||||||
# Import third party libs
|
# Import third party libs
|
||||||
try:
|
try:
|
||||||
@ -1121,8 +1122,10 @@ class AESFuncs(object):
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if 'tok' in load:
|
if 'tok' in load:
|
||||||
load.pop('tok')
|
load.pop('tok')
|
||||||
|
|
||||||
return load
|
return load
|
||||||
|
|
||||||
def _ext_nodes(self, load):
|
def _ext_nodes(self, load):
|
||||||
@ -1408,6 +1411,24 @@ class AESFuncs(object):
|
|||||||
|
|
||||||
:param dict load: The minion payload
|
:param dict load: The minion payload
|
||||||
'''
|
'''
|
||||||
|
if self.opts['require_minion_sign_messages'] and 'sig' not in load:
|
||||||
|
log.critical('_return: Master is requiring minions to sign their messages, but there is no signature in this payload from {0}.'.format(load['id']))
|
||||||
|
return False
|
||||||
|
|
||||||
|
if 'sig' in load:
|
||||||
|
log.trace('Verifying signed event publish from minion')
|
||||||
|
sig = load.pop('sig')
|
||||||
|
this_minion_pubkey = os.path.join(self.opts['pki_dir'], 'minions/{0}'.format(load['id']))
|
||||||
|
serialized_load = salt.serializers.msgpack.serialize(load)
|
||||||
|
if not salt.crypt.verify_signature(this_minion_pubkey, serialized_load, sig):
|
||||||
|
log.info('Failed to verify event signature from minion {0}.'.format(load['id']))
|
||||||
|
if self.opts['drop_messages_signature_fail']:
|
||||||
|
log.critical('Drop_messages_signature_fail is enabled, dropping message from {0}'.format(load['id']))
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
log.info('But \'drop_message_signature_fail\' is disabled, so message is still accepted.')
|
||||||
|
load['sig'] = sig
|
||||||
|
|
||||||
try:
|
try:
|
||||||
salt.utils.job.store_job(
|
salt.utils.job.store_job(
|
||||||
self.opts, load, event=self.event, mminion=self.mminion)
|
self.opts, load, event=self.event, mminion=self.mminion)
|
||||||
@ -1451,6 +1472,9 @@ class AESFuncs(object):
|
|||||||
ret['fun_args'] = load['arg']
|
ret['fun_args'] = load['arg']
|
||||||
if 'out' in load:
|
if 'out' in load:
|
||||||
ret['out'] = load['out']
|
ret['out'] = load['out']
|
||||||
|
if 'sig' in load:
|
||||||
|
ret['sig'] = load['sig']
|
||||||
|
|
||||||
self._return(ret)
|
self._return(ret)
|
||||||
|
|
||||||
def minion_runner(self, clear_load):
|
def minion_runner(self, clear_load):
|
||||||
|
@ -20,6 +20,7 @@ import contextlib
|
|||||||
import multiprocessing
|
import multiprocessing
|
||||||
from random import randint, shuffle
|
from random import randint, shuffle
|
||||||
from stat import S_IMODE
|
from stat import S_IMODE
|
||||||
|
import salt.serializers.msgpack
|
||||||
|
|
||||||
# Import Salt Libs
|
# Import Salt Libs
|
||||||
# pylint: disable=import-error,no-name-in-module,redefined-builtin
|
# pylint: disable=import-error,no-name-in-module,redefined-builtin
|
||||||
@ -1225,11 +1226,25 @@ class Minion(MinionBase):
|
|||||||
return functions, returners, errors, executors
|
return functions, returners, errors, executors
|
||||||
|
|
||||||
def _send_req_sync(self, load, timeout):
|
def _send_req_sync(self, load, timeout):
|
||||||
|
|
||||||
|
if self.opts['minion_sign_messages']:
|
||||||
|
log.trace('Signing event to be published onto the bus.')
|
||||||
|
minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem')
|
||||||
|
sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load))
|
||||||
|
load['sig'] = sig
|
||||||
|
|
||||||
channel = salt.transport.Channel.factory(self.opts)
|
channel = salt.transport.Channel.factory(self.opts)
|
||||||
return channel.send(load, timeout=timeout)
|
return channel.send(load, timeout=timeout)
|
||||||
|
|
||||||
@tornado.gen.coroutine
|
@tornado.gen.coroutine
|
||||||
def _send_req_async(self, load, timeout):
|
def _send_req_async(self, load, timeout):
|
||||||
|
|
||||||
|
if self.opts['minion_sign_messages']:
|
||||||
|
log.trace('Signing event to be published onto the bus.')
|
||||||
|
minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem')
|
||||||
|
sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load))
|
||||||
|
load['sig'] = sig
|
||||||
|
|
||||||
channel = salt.transport.client.AsyncReqChannel.factory(self.opts)
|
channel = salt.transport.client.AsyncReqChannel.factory(self.opts)
|
||||||
ret = yield channel.send(load, timeout=timeout)
|
ret = yield channel.send(load, timeout=timeout)
|
||||||
raise tornado.gen.Return(ret)
|
raise tornado.gen.Return(ret)
|
||||||
|
@ -592,9 +592,7 @@ def install(name=None,
|
|||||||
# Handle version kwarg for a single package target
|
# Handle version kwarg for a single package target
|
||||||
if pkgs is None and sources is None:
|
if pkgs is None and sources is None:
|
||||||
version_num = kwargs.get('version')
|
version_num = kwargs.get('version')
|
||||||
if version_num:
|
if not version_num:
|
||||||
pkg_params = {name: version_num}
|
|
||||||
else:
|
|
||||||
version_num = ''
|
version_num = ''
|
||||||
if slot is not None:
|
if slot is not None:
|
||||||
version_num += ':{0}'.format(slot)
|
version_num += ':{0}'.format(slot)
|
||||||
|
@ -529,15 +529,6 @@ def install(name=None,
|
|||||||
if pkg_params is None or len(pkg_params) == 0:
|
if pkg_params is None or len(pkg_params) == 0:
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
version_num = kwargs.get('version')
|
|
||||||
if version_num:
|
|
||||||
if pkgs is None and sources is None:
|
|
||||||
# Allow 'version' to work for single package target
|
|
||||||
pkg_params = {name: version_num}
|
|
||||||
else:
|
|
||||||
log.warning('\'version\' parameter will be ignored for multiple '
|
|
||||||
'package targets')
|
|
||||||
|
|
||||||
if 'root' in kwargs:
|
if 'root' in kwargs:
|
||||||
pkg_params['-r'] = kwargs['root']
|
pkg_params['-r'] = kwargs['root']
|
||||||
|
|
||||||
|
@ -115,11 +115,16 @@ def parse_targets(name=None,
|
|||||||
if __grains__['os'] == 'MacOS' and sources:
|
if __grains__['os'] == 'MacOS' and sources:
|
||||||
log.warning('Parameter "sources" ignored on MacOS hosts.')
|
log.warning('Parameter "sources" ignored on MacOS hosts.')
|
||||||
|
|
||||||
|
version = kwargs.get('version')
|
||||||
|
|
||||||
if pkgs and sources:
|
if pkgs and sources:
|
||||||
log.error('Only one of "pkgs" and "sources" can be used.')
|
log.error('Only one of "pkgs" and "sources" can be used.')
|
||||||
return None, None
|
return None, None
|
||||||
|
|
||||||
elif pkgs:
|
elif pkgs:
|
||||||
|
if version is not None:
|
||||||
|
log.warning('\'version\' argument will be ignored for multiple '
|
||||||
|
'package targets')
|
||||||
pkgs = _repack_pkgs(pkgs, normalize=normalize)
|
pkgs = _repack_pkgs(pkgs, normalize=normalize)
|
||||||
if not pkgs:
|
if not pkgs:
|
||||||
return None, None
|
return None, None
|
||||||
@ -127,6 +132,9 @@ def parse_targets(name=None,
|
|||||||
return pkgs, 'repository'
|
return pkgs, 'repository'
|
||||||
|
|
||||||
elif sources and __grains__['os'] != 'MacOS':
|
elif sources and __grains__['os'] != 'MacOS':
|
||||||
|
if version is not None:
|
||||||
|
log.warning('\'version\' argument will be ignored for multiple '
|
||||||
|
'package targets')
|
||||||
sources = pack_sources(sources, normalize=normalize)
|
sources = pack_sources(sources, normalize=normalize)
|
||||||
if not sources:
|
if not sources:
|
||||||
return None, None
|
return None, None
|
||||||
@ -153,9 +161,9 @@ def parse_targets(name=None,
|
|||||||
if normalize:
|
if normalize:
|
||||||
_normalize_name = \
|
_normalize_name = \
|
||||||
__salt__.get('pkg.normalize_name', lambda pkgname: pkgname)
|
__salt__.get('pkg.normalize_name', lambda pkgname: pkgname)
|
||||||
packed = dict([(_normalize_name(x), None) for x in name.split(',')])
|
packed = dict([(_normalize_name(x), version) for x in name.split(',')])
|
||||||
else:
|
else:
|
||||||
packed = dict([(x, None) for x in name.split(',')])
|
packed = dict([(x, version) for x in name.split(',')])
|
||||||
return packed, 'repository'
|
return packed, 'repository'
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
@ -1199,15 +1199,6 @@ def install(name=None,
|
|||||||
if pkg_params is None or len(pkg_params) == 0:
|
if pkg_params is None or len(pkg_params) == 0:
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
version_num = kwargs.get('version')
|
|
||||||
if version_num:
|
|
||||||
if pkgs is None and sources is None:
|
|
||||||
# Allow "version" to work for single package target
|
|
||||||
pkg_params = {name: version_num}
|
|
||||||
else:
|
|
||||||
log.warning('"version" parameter will be ignored for multiple '
|
|
||||||
'package targets')
|
|
||||||
|
|
||||||
old = list_pkgs(versions_as_list=False)
|
old = list_pkgs(versions_as_list=False)
|
||||||
# Use of __context__ means no duplicate work here, just accessing
|
# Use of __context__ means no duplicate work here, just accessing
|
||||||
# information already in __context__ from the previous call to list_pkgs()
|
# information already in __context__ from the previous call to list_pkgs()
|
||||||
|
@ -1085,13 +1085,6 @@ def install(name=None,
|
|||||||
return {}
|
return {}
|
||||||
|
|
||||||
version_num = Wildcard(__zypper__)(name, version)
|
version_num = Wildcard(__zypper__)(name, version)
|
||||||
if version_num:
|
|
||||||
if pkgs is None and sources is None:
|
|
||||||
# Allow "version" to work for single package target
|
|
||||||
pkg_params = {name: version_num}
|
|
||||||
else:
|
|
||||||
log.warning("'version' parameter will be ignored for multiple package targets")
|
|
||||||
|
|
||||||
if pkg_type == 'repository':
|
if pkg_type == 'repository':
|
||||||
targets = []
|
targets = []
|
||||||
problems = []
|
problems = []
|
||||||
|
@ -95,13 +95,29 @@ def latest(name,
|
|||||||
'The path "{0}" exists and is not '
|
'The path "{0}" exists and is not '
|
||||||
'a directory.'.format(target)
|
'a directory.'.format(target)
|
||||||
)
|
)
|
||||||
|
|
||||||
if __opts__['test']:
|
if __opts__['test']:
|
||||||
|
if rev:
|
||||||
|
new_rev = str(rev)
|
||||||
|
else:
|
||||||
|
new_rev = 'HEAD'
|
||||||
|
|
||||||
if not os.path.exists(target):
|
if not os.path.exists(target):
|
||||||
return _neutral_test(
|
return _neutral_test(
|
||||||
ret,
|
ret,
|
||||||
('{0} doesn\'t exist and is set to be checked out.').format(target))
|
('{0} doesn\'t exist and is set to be checked out at revision ' + new_rev + '.').format(target))
|
||||||
svn_cmd = 'svn.diff'
|
|
||||||
opts += ('-r', 'HEAD')
|
try:
|
||||||
|
current_info = __salt__['svn.info'](cwd, target, user=user, username=username, password=password, fmt='dict')
|
||||||
|
svn_cmd = 'svn.diff'
|
||||||
|
except exceptions.CommandExecutionError:
|
||||||
|
return _fail(
|
||||||
|
ret,
|
||||||
|
('{0} exists but is not a svn working copy.').format(target))
|
||||||
|
|
||||||
|
current_rev = current_info[0]['Revision']
|
||||||
|
|
||||||
|
opts += ('-r', current_rev + ':' + new_rev)
|
||||||
|
|
||||||
if trust:
|
if trust:
|
||||||
opts += ('--trust-server-cert',)
|
opts += ('--trust-server-cert',)
|
||||||
|
@ -324,6 +324,7 @@ from __future__ import absolute_import, with_statement
|
|||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
|
import copy
|
||||||
import signal
|
import signal
|
||||||
import datetime
|
import datetime
|
||||||
import itertools
|
import itertools
|
||||||
@ -827,7 +828,7 @@ class Schedule(object):
|
|||||||
kwargs = {}
|
kwargs = {}
|
||||||
if 'kwargs' in data:
|
if 'kwargs' in data:
|
||||||
kwargs = data['kwargs']
|
kwargs = data['kwargs']
|
||||||
ret['fun_args'].append(data['kwargs'])
|
ret['fun_args'].append(copy.deepcopy(kwargs))
|
||||||
|
|
||||||
if func not in self.functions:
|
if func not in self.functions:
|
||||||
ret['return'] = self.functions.missing_fun_string(func)
|
ret['return'] = self.functions.missing_fun_string(func)
|
||||||
@ -884,9 +885,9 @@ class Schedule(object):
|
|||||||
ret['success'] = False
|
ret['success'] = False
|
||||||
ret['retcode'] = 254
|
ret['retcode'] = 254
|
||||||
finally:
|
finally:
|
||||||
try:
|
# Only attempt to return data to the master
|
||||||
# Only attempt to return data to the master
|
# if the scheduled job is running on a minion.
|
||||||
# if the scheduled job is running on a minion.
|
if '__role' in self.opts and self.opts['__role'] == 'minion':
|
||||||
if 'return_job' in data and not data['return_job']:
|
if 'return_job' in data and not data['return_job']:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
@ -908,9 +909,13 @@ class Schedule(object):
|
|||||||
elif '__role' in self.opts and self.opts['__role'] == 'master':
|
elif '__role' in self.opts and self.opts['__role'] == 'master':
|
||||||
event = salt.utils.event.get_master_event(self.opts,
|
event = salt.utils.event.get_master_event(self.opts,
|
||||||
self.opts['sock_dir'])
|
self.opts['sock_dir'])
|
||||||
event.fire_event(load, '__schedule_return')
|
try:
|
||||||
|
event.fire_event(load, '__schedule_return')
|
||||||
|
except Exception as exc:
|
||||||
|
log.exception("Unhandled exception firing event: {0}".format(exc))
|
||||||
|
|
||||||
log.debug('schedule.handle_func: Removing {0}'.format(proc_fn))
|
log.debug('schedule.handle_func: Removing {0}'.format(proc_fn))
|
||||||
|
try:
|
||||||
os.unlink(proc_fn)
|
os.unlink(proc_fn)
|
||||||
except OSError as exc:
|
except OSError as exc:
|
||||||
if exc.errno == errno.EEXIST or exc.errno == errno.ENOENT:
|
if exc.errno == errno.EEXIST or exc.errno == errno.ENOENT:
|
||||||
|
@ -53,7 +53,8 @@ class SvnTestCase(TestCase, LoaderModuleMockMixin):
|
|||||||
mock = MagicMock(side_effect=[False, True])
|
mock = MagicMock(side_effect=[False, True])
|
||||||
with patch.object(os.path, 'exists', mock):
|
with patch.object(os.path, 'exists', mock):
|
||||||
mock = MagicMock(return_value=True)
|
mock = MagicMock(return_value=True)
|
||||||
with patch.dict(svn.__salt__, {'svn.diff': mock}):
|
info_mock = MagicMock(return_value=[{'Revision': 'mocked'}])
|
||||||
|
with patch.dict(svn.__salt__, {'svn.diff': mock, 'svn.info': info_mock}):
|
||||||
mock = MagicMock(return_value=["Dude"])
|
mock = MagicMock(return_value=["Dude"])
|
||||||
with patch.object(svn, '_neutral_test', mock):
|
with patch.object(svn, '_neutral_test', mock):
|
||||||
self.assertListEqual(svn.latest("salt",
|
self.assertListEqual(svn.latest("salt",
|
||||||
|
@ -97,8 +97,9 @@ class CryptTestCase(TestCase):
|
|||||||
salt.utils.fopen.assert_has_calls([open_priv_wb, open_pub_wb], any_order=True)
|
salt.utils.fopen.assert_has_calls([open_priv_wb, open_pub_wb], any_order=True)
|
||||||
|
|
||||||
def test_sign_message(self):
|
def test_sign_message(self):
|
||||||
with patch('salt.utils.fopen', mock_open(read_data=PRIVKEY_DATA)):
|
key = Crypto.PublicKey.RSA.importKey(PRIVKEY_DATA)
|
||||||
self.assertEqual(SIG, crypt.sign_message('/keydir/keyname.pem', MSG))
|
with patch('salt.crypt._get_rsa_key', return_value=key):
|
||||||
|
self.assertEqual(SIG, salt.crypt.sign_message('/keydir/keyname.pem', MSG))
|
||||||
|
|
||||||
def test_verify_signature(self):
|
def test_verify_signature(self):
|
||||||
with patch('salt.utils.fopen', mock_open(read_data=PUBKEY_DATA)):
|
with patch('salt.utils.fopen', mock_open(read_data=PUBKEY_DATA)):
|
||||||
|
Loading…
Reference in New Issue
Block a user