mirror of
https://github.com/valitydev/salt.git
synced 2024-11-08 01:18:58 +00:00
Merge branch 'develop' of github.com:saltstack/salt into develop
This commit is contained in:
commit
a3e37cc2d0
@ -20,6 +20,13 @@ Features
|
||||
SSL can be enabled by setting ``ssl_options`` for the returner.
|
||||
Also added support for specifying ``protocol_version`` when establishing
|
||||
cluster connection.
|
||||
- The ``mode`` parameter in the :py:mod:`file.managed
|
||||
<salt.states.file.managed>` state, and the ``file_mode`` parameter in the
|
||||
:py:mod:`file.managed <salt.states.file.managed>`, can both now be set to
|
||||
``keep`` and the minion will keep the mode of the file from the Salt
|
||||
fileserver. This works only with files coming from sources prefixed with
|
||||
``salt://``, or files local to the minion (i.e. those which are absolute
|
||||
paths, or are prefixed with ``file://``).
|
||||
|
||||
Config Changes
|
||||
==============
|
||||
|
117
salt/beacons/avahi_announce.py
Normal file
117
salt/beacons/avahi_announce.py
Normal file
@ -0,0 +1,117 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Beacon to announce via avahi (zeroconf)
|
||||
|
||||
'''
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
|
||||
# Import 3rd Party libs
|
||||
try:
|
||||
import avahi
|
||||
HAS_PYAVAHI = True
|
||||
except ImportError:
|
||||
HAS_PYAVAHI = False
|
||||
import dbus
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__virtualname__ = 'avahi_announce'
|
||||
|
||||
LAST_GRAINS = {}
|
||||
BUS = dbus.SystemBus()
|
||||
SERVER = dbus.Interface(BUS.get_object(avahi.DBUS_NAME, avahi.DBUS_PATH_SERVER),
|
||||
avahi.DBUS_INTERFACE_SERVER)
|
||||
GROUP = dbus.Interface(BUS.get_object(avahi.DBUS_NAME, SERVER.EntryGroupNew()),
|
||||
avahi.DBUS_INTERFACE_ENTRY_GROUP)
|
||||
|
||||
|
||||
def __virtual__():
|
||||
if HAS_PYAVAHI:
|
||||
return __virtualname__
|
||||
return False
|
||||
|
||||
|
||||
def validate(config):
|
||||
'''
|
||||
Validate the beacon configuration
|
||||
'''
|
||||
if not isinstance(config, dict):
|
||||
return False, ('Configuration for avahi_announcement '
|
||||
'beacon must be a dictionary')
|
||||
elif not all(x in list(config.keys()) for x in ('servicetype', 'port', 'txt')):
|
||||
return False, ('Configuration for avahi_announce beacon '
|
||||
'must contain servicetype, port and txt items')
|
||||
return True, 'Valid beacon configuration'
|
||||
|
||||
|
||||
def beacon(config):
|
||||
'''
|
||||
Broadcast values via zeroconf
|
||||
|
||||
If the announced values are static, it is adviced to set run_once: True
|
||||
(do not poll) on the beacon configuration. Grains can be used to define
|
||||
txt values using the syntax: grains.<grain_name>
|
||||
|
||||
The default servicename its the hostname grain value.
|
||||
|
||||
Example Config
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
beacons:
|
||||
avahi_announce:
|
||||
run_once: True
|
||||
servicetype: _demo._tcp
|
||||
txt:
|
||||
ProdName: grains.productname
|
||||
SerialNo: grains.serialnumber
|
||||
Comments: 'this is a test'
|
||||
'''
|
||||
ret = []
|
||||
changes = {}
|
||||
txt = {}
|
||||
|
||||
global LAST_GRAINS
|
||||
|
||||
_validate = validate(config)
|
||||
if not _validate[0]:
|
||||
log.warning('Beacon {0} configuration invalid, '
|
||||
'not adding. {1}'.format(__virtualname__, _validate[1]))
|
||||
return ret
|
||||
|
||||
if 'servicename' in config:
|
||||
servicename = config['servicename']
|
||||
else:
|
||||
servicename = __grains__['host']
|
||||
|
||||
for item in config['txt']:
|
||||
if config['txt'][item].startswith('grains.'):
|
||||
grain = config['txt'][item][7:]
|
||||
txt[item] = __grains__[grain]
|
||||
if LAST_GRAINS and (LAST_GRAINS[grain] != __grains__[grain]):
|
||||
changes[str('txt.' + item)] = txt[item]
|
||||
else:
|
||||
txt[item] = config['txt'][item]
|
||||
|
||||
if not LAST_GRAINS:
|
||||
changes[str('txt.' + item)] = txt[item]
|
||||
|
||||
if changes:
|
||||
if not LAST_GRAINS:
|
||||
changes['servicename'] = servicename
|
||||
changes['servicetype'] = config['servicetype']
|
||||
changes['port'] = config['port']
|
||||
else:
|
||||
GROUP.Reset()
|
||||
GROUP.AddService(avahi.IF_UNSPEC, avahi.PROTO_UNSPEC, dbus.UInt32(0),
|
||||
servicename, config['servicetype'], '', '',
|
||||
dbus.UInt16(config['port']), avahi.dict_to_txt_array(txt))
|
||||
GROUP.Commit()
|
||||
|
||||
ret.append({'tag': 'result', 'changes': changes})
|
||||
|
||||
LAST_GRAINS = __grains__
|
||||
|
||||
return ret
|
@ -46,7 +46,6 @@ from salt.utils import kinds
|
||||
try:
|
||||
from salt.utils import parsers, ip_bracket
|
||||
from salt.utils.verify import check_user, verify_env, verify_socket
|
||||
from salt.utils.verify import verify_files
|
||||
except ImportError as exc:
|
||||
if exc.args[0] != 'No module named _msgpack':
|
||||
raise
|
||||
@ -162,12 +161,6 @@ class Master(parsers.MasterOptionParser, DaemonsMixin): # pylint: disable=no-in
|
||||
permissive=self.config['permissive_pki_access'],
|
||||
pki_dir=self.config['pki_dir'],
|
||||
)
|
||||
logfile = self.config['log_file']
|
||||
if logfile is not None and not logfile.startswith(('tcp://', 'udp://', 'file://')):
|
||||
# Logfile is not using Syslog, verify
|
||||
current_umask = os.umask(0o027)
|
||||
verify_files([logfile], self.config['user'])
|
||||
os.umask(current_umask)
|
||||
# Clear out syndics from cachedir
|
||||
for syndic_file in os.listdir(self.config['syndic_dir']):
|
||||
os.remove(os.path.join(self.config['syndic_dir'], syndic_file))
|
||||
@ -288,12 +281,6 @@ class Minion(parsers.MinionOptionParser, DaemonsMixin): # pylint: disable=no-in
|
||||
permissive=self.config['permissive_pki_access'],
|
||||
pki_dir=self.config['pki_dir'],
|
||||
)
|
||||
logfile = self.config['log_file']
|
||||
if logfile is not None and not logfile.startswith(('tcp://', 'udp://', 'file://')):
|
||||
# Logfile is not using Syslog, verify
|
||||
current_umask = os.umask(0o027)
|
||||
verify_files([logfile], self.config['user'])
|
||||
os.umask(current_umask)
|
||||
except OSError as error:
|
||||
self.environment_failure(error)
|
||||
|
||||
@ -464,14 +451,6 @@ class ProxyMinion(parsers.ProxyMinionOptionParser, DaemonsMixin): # pylint: dis
|
||||
permissive=self.config['permissive_pki_access'],
|
||||
pki_dir=self.config['pki_dir'],
|
||||
)
|
||||
|
||||
logfile = self.config.get('proxy_log') or self.config['log_file']
|
||||
if logfile is not None and not logfile.startswith(('tcp://', 'udp://', 'file://')):
|
||||
# Logfile is not using Syslog, verify
|
||||
current_umask = os.umask(0o027)
|
||||
verify_files([logfile], self.config['user'])
|
||||
os.umask(current_umask)
|
||||
|
||||
except OSError as error:
|
||||
self.environment_failure(error)
|
||||
|
||||
@ -569,12 +548,6 @@ class Syndic(parsers.SyndicOptionParser, DaemonsMixin): # pylint: disable=no-in
|
||||
permissive=self.config['permissive_pki_access'],
|
||||
pki_dir=self.config['pki_dir'],
|
||||
)
|
||||
logfile = self.config['log_file']
|
||||
if logfile is not None and not logfile.startswith(('tcp://', 'udp://', 'file://')):
|
||||
# Logfile is not using Syslog, verify
|
||||
current_umask = os.umask(0o027)
|
||||
verify_files([logfile], self.config['user'])
|
||||
os.umask(current_umask)
|
||||
except OSError as error:
|
||||
self.environment_failure(error)
|
||||
|
||||
|
@ -1299,6 +1299,10 @@ def mod_data(fsclient):
|
||||
ret[ref] = mods_data
|
||||
if not ret:
|
||||
return {}
|
||||
|
||||
if six.PY3:
|
||||
ver_base = salt.utils.to_bytes(ver_base)
|
||||
|
||||
ver = hashlib.sha1(ver_base).hexdigest()
|
||||
ext_tar_path = os.path.join(
|
||||
fsclient.opts['cachedir'],
|
||||
|
@ -78,12 +78,10 @@ def manage_mode(mode):
|
||||
|
||||
salt '*' config.manage_mode
|
||||
'''
|
||||
if mode is None:
|
||||
return None
|
||||
ret = str(mode).lstrip('0').zfill(4)
|
||||
if ret[0] != '0':
|
||||
return '0{0}'.format(ret)
|
||||
return ret
|
||||
# config.manage_mode should no longer be invoked from the __salt__ dunder
|
||||
# in Salt code, this function is only being left here for backwards
|
||||
# compatibility.
|
||||
return salt.utils.normalize_mode(mode)
|
||||
|
||||
|
||||
def valid_fileproto(uri):
|
||||
|
@ -391,7 +391,11 @@ def create(vm_):
|
||||
)
|
||||
if dns_hostname and dns_domain:
|
||||
log.info('create_dns_record: using dns_hostname="{0}", dns_domain="{1}"'.format(dns_hostname, dns_domain))
|
||||
__add_dns_addr__ = lambda t, d: post_dns_record(dns_domain, dns_hostname, t, d)
|
||||
__add_dns_addr__ = lambda t, d: post_dns_record(dns_domain=dns_domain,
|
||||
name=dns_hostname,
|
||||
record_type=t,
|
||||
record_data=d)
|
||||
|
||||
log.debug('create_dns_record: {0}'.format(__add_dns_addr__))
|
||||
else:
|
||||
log.error('create_dns_record: could not determine dns_hostname and/or dns_domain')
|
||||
@ -815,18 +819,30 @@ def destroy(name, call=None):
|
||||
return node
|
||||
|
||||
|
||||
def post_dns_record(dns_domain, name, record_type, record_data):
|
||||
def post_dns_record(**kwargs):
|
||||
'''
|
||||
Creates or updates a DNS record for the given name if the domain is managed with DO.
|
||||
Creates a DNS record for the given name if the domain is managed with DO.
|
||||
'''
|
||||
domain = query(method='domains', droplet_id=dns_domain)
|
||||
if 'kwargs' in kwargs: # flatten kwargs if called via salt-cloud -f
|
||||
f_kwargs = kwargs['kwargs']
|
||||
del kwargs['kwargs']
|
||||
kwargs.update(f_kwargs)
|
||||
mandatory_kwargs = ('dns_domain', 'name', 'record_type', 'record_data')
|
||||
for i in mandatory_kwargs:
|
||||
if kwargs[i]:
|
||||
pass
|
||||
else:
|
||||
error = '{0}="{1}" ## all mandatory args must be provided: {2}'.format(i, kwargs[i], str(mandatory_kwargs))
|
||||
raise salt.exceptions.SaltInvocationError(error)
|
||||
|
||||
domain = query(method='domains', droplet_id=kwargs['dns_domain'])
|
||||
|
||||
if domain:
|
||||
result = query(
|
||||
method='domains',
|
||||
droplet_id=dns_domain,
|
||||
droplet_id=kwargs['dns_domain'],
|
||||
command='records',
|
||||
args={'type': record_type, 'name': name, 'data': record_data},
|
||||
args={'type': kwargs['record_type'], 'name': kwargs['name'], 'data': kwargs['record_data']},
|
||||
http_method='post'
|
||||
)
|
||||
return result
|
||||
|
@ -475,7 +475,8 @@ class AsyncAuth(object):
|
||||
while True:
|
||||
try:
|
||||
creds = yield self.sign_in(channel=channel)
|
||||
except SaltClientError as error:
|
||||
except SaltClientError as exc:
|
||||
error = exc
|
||||
break
|
||||
if creds == 'retry':
|
||||
if self.opts.get('caller'):
|
||||
|
@ -451,6 +451,7 @@ class RemoteFuncs(object):
|
||||
'''
|
||||
fs_ = salt.fileserver.Fileserver(self.opts)
|
||||
self._serve_file = fs_.serve_file
|
||||
self._file_find = fs_._find_file
|
||||
self._file_hash = fs_.file_hash
|
||||
self._file_list = fs_.file_list
|
||||
self._file_list_emptydirs = fs_.file_list_emptydirs
|
||||
|
@ -7,7 +7,6 @@ from __future__ import absolute_import
|
||||
# Import python libs
|
||||
import contextlib
|
||||
import logging
|
||||
import hashlib
|
||||
import os
|
||||
import shutil
|
||||
import ftplib
|
||||
@ -749,9 +748,27 @@ class LocalClient(Client):
|
||||
'''
|
||||
path = self._check_proto(path)
|
||||
fnd = self._find_file(path, saltenv)
|
||||
if not fnd['path']:
|
||||
fnd_path = fnd.get('path')
|
||||
if not fnd_path:
|
||||
return ''
|
||||
return fnd['path']
|
||||
|
||||
try:
|
||||
fnd_mode = fnd.get('stat', [])[0]
|
||||
except (IndexError, TypeError):
|
||||
fnd_mode = None
|
||||
|
||||
if not salt.utils.is_windows():
|
||||
if fnd_mode is not None:
|
||||
try:
|
||||
if os.stat(dest).st_mode != fnd_mode:
|
||||
try:
|
||||
os.chmod(dest, fnd_mode)
|
||||
except OSError as exc:
|
||||
log.warning('Failed to chmod %s: %s', dest, exc)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return fnd_path
|
||||
|
||||
def file_list(self, saltenv='base', prefix=''):
|
||||
'''
|
||||
@ -804,6 +821,22 @@ class LocalClient(Client):
|
||||
ret.append(sdecode(os.path.relpath(root, path)))
|
||||
return ret
|
||||
|
||||
def __get_file_path(self, path, saltenv='base'):
|
||||
'''
|
||||
Return either a file path or the result of a remote find_file call.
|
||||
'''
|
||||
try:
|
||||
path = self._check_proto(path)
|
||||
except MinionError as err:
|
||||
# Local file path
|
||||
if not os.path.isfile(path):
|
||||
msg = 'specified file {0} is not present to generate hash: {1}'
|
||||
log.warning(msg.format(path, err))
|
||||
return None
|
||||
else:
|
||||
return path
|
||||
return self._find_file(path, saltenv)
|
||||
|
||||
def hash_file(self, path, saltenv='base'):
|
||||
'''
|
||||
Return the hash of a file, to get the hash of a file in the file_roots
|
||||
@ -811,26 +844,51 @@ class LocalClient(Client):
|
||||
file with / for a local file.
|
||||
'''
|
||||
ret = {}
|
||||
fnd = self.__get_file_path(path, saltenv)
|
||||
if fnd is None:
|
||||
return ret
|
||||
|
||||
try:
|
||||
path = self._check_proto(path)
|
||||
except MinionError as err:
|
||||
if not os.path.isfile(path):
|
||||
msg = 'specified file {0} is not present to generate hash: {1}'
|
||||
log.warning(msg.format(path, err))
|
||||
return ret
|
||||
else:
|
||||
opts_hash_type = self.opts.get('hash_type', 'md5')
|
||||
hash_type = getattr(hashlib, opts_hash_type)
|
||||
ret['hsum'] = salt.utils.get_hash(
|
||||
path, form=hash_type)
|
||||
ret['hash_type'] = opts_hash_type
|
||||
return ret
|
||||
path = self._find_file(path, saltenv)['path']
|
||||
if not path:
|
||||
return {}
|
||||
# Remote file path (self._find_file() invoked)
|
||||
fnd_path = fnd['path']
|
||||
except TypeError:
|
||||
# Local file path
|
||||
fnd_path = fnd
|
||||
|
||||
hash_type = self.opts.get('hash_type', 'md5')
|
||||
ret['hsum'] = salt.utils.get_hash(fnd_path, form=hash_type)
|
||||
ret['hash_type'] = hash_type
|
||||
return ret
|
||||
|
||||
def hash_and_stat_file(self, path, saltenv='base'):
|
||||
'''
|
||||
Return the hash of a file, to get the hash of a file in the file_roots
|
||||
prepend the path with salt://<file on server> otherwise, prepend the
|
||||
file with / for a local file.
|
||||
|
||||
Additionally, return the stat result of the file, or None if no stat
|
||||
results were found.
|
||||
'''
|
||||
ret = {}
|
||||
ret['hsum'] = salt.utils.get_hash(path, self.opts['hash_type'])
|
||||
ret['hash_type'] = self.opts['hash_type']
|
||||
fnd = self.__get_file_path(path, saltenv)
|
||||
if fnd is None:
|
||||
return ret, None
|
||||
|
||||
try:
|
||||
# Remote file path (self._find_file() invoked)
|
||||
fnd_path = fnd['path']
|
||||
fnd_stat = fnd.get('stat')
|
||||
except TypeError:
|
||||
# Local file path
|
||||
fnd_path = fnd
|
||||
try:
|
||||
fnd_stat = list(os.stat(fnd_path))
|
||||
except Exception:
|
||||
fnd_stat = None
|
||||
|
||||
hash_type = self.opts.get('hash_type', 'md5')
|
||||
ret['hsum'] = salt.utils.get_hash(fnd_path, form=hash_type)
|
||||
ret['hash_type'] = hash_type
|
||||
return ret
|
||||
|
||||
def list_env(self, saltenv='base'):
|
||||
@ -906,14 +964,22 @@ class RemoteClient(Client):
|
||||
if senv:
|
||||
saltenv = senv
|
||||
|
||||
if not salt.utils.is_windows():
|
||||
hash_server, stat_server = self.hash_and_stat_file(path, saltenv)
|
||||
try:
|
||||
mode_server = stat_server[0]
|
||||
except (IndexError, TypeError):
|
||||
mode_server = None
|
||||
else:
|
||||
hash_server = self.hash_file(path, saltenv)
|
||||
mode_server = None
|
||||
|
||||
# Check if file exists on server, before creating files and
|
||||
# directories
|
||||
hash_server = self.hash_file(path, saltenv)
|
||||
if hash_server == '':
|
||||
log.debug(
|
||||
'Could not find file from saltenv \'{0}\', \'{1}\''.format(
|
||||
saltenv, path
|
||||
)
|
||||
'Could not find file \'%s\' in saltenv \'%s\'',
|
||||
path, saltenv
|
||||
)
|
||||
return False
|
||||
|
||||
@ -924,32 +990,76 @@ class RemoteClient(Client):
|
||||
rel_path = self._check_proto(path)
|
||||
|
||||
log.debug(
|
||||
'In saltenv \'{0}\', looking at rel_path \'{1}\' to resolve '
|
||||
'\'{2}\''.format(saltenv, rel_path, path)
|
||||
'In saltenv \'%s\', looking at rel_path \'%s\' to resolve '
|
||||
'\'%s\'', saltenv, rel_path, path
|
||||
)
|
||||
with self._cache_loc(
|
||||
rel_path, saltenv, cachedir=cachedir) as cache_dest:
|
||||
dest2check = cache_dest
|
||||
|
||||
log.debug(
|
||||
'In saltenv \'{0}\', ** considering ** path \'{1}\' to resolve '
|
||||
'\'{2}\''.format(saltenv, dest2check, path)
|
||||
'In saltenv \'%s\', ** considering ** path \'%s\' to resolve '
|
||||
'\'%s\'', saltenv, dest2check, path
|
||||
)
|
||||
|
||||
if dest2check and os.path.isfile(dest2check):
|
||||
hash_local = self.hash_file(dest2check, saltenv)
|
||||
if not salt.utils.is_windows():
|
||||
hash_local, stat_local = \
|
||||
self.hash_and_stat_file(dest2check, saltenv)
|
||||
try:
|
||||
mode_local = stat_local[0]
|
||||
except IndexError:
|
||||
mode_local = None
|
||||
else:
|
||||
hash_local = self.hash_file(dest2check, saltenv)
|
||||
mode_local = None
|
||||
|
||||
if hash_local == hash_server:
|
||||
log.info(
|
||||
'Fetching file from saltenv \'{0}\', ** skipped ** '
|
||||
'latest already in cache \'{1}\''.format(
|
||||
saltenv, path
|
||||
if not salt.utils.is_windows():
|
||||
if mode_server is None:
|
||||
log.debug('No file mode available for \'%s\'', path)
|
||||
elif mode_local is None:
|
||||
log.debug(
|
||||
'No file mode available for \'%s\'',
|
||||
dest2check
|
||||
)
|
||||
else:
|
||||
if mode_server == mode_local:
|
||||
log.info(
|
||||
'Fetching file from saltenv \'%s\', '
|
||||
'** skipped ** latest already in cache '
|
||||
'\'%s\', mode up-to-date', saltenv, path
|
||||
)
|
||||
else:
|
||||
try:
|
||||
os.chmod(dest2check, mode_server)
|
||||
log.info(
|
||||
'Fetching file from saltenv \'%s\', '
|
||||
'** updated ** latest already in cache, '
|
||||
'\'%s\', mode updated from %s to %s',
|
||||
saltenv,
|
||||
path,
|
||||
salt.utils.st_mode_to_octal(mode_local),
|
||||
salt.utils.st_mode_to_octal(mode_server)
|
||||
)
|
||||
except OSError as exc:
|
||||
log.warning(
|
||||
'Failed to chmod %s: %s', dest2check, exc
|
||||
)
|
||||
# We may not have been able to check/set the mode, but we
|
||||
# don't want to re-download the file because of a failure
|
||||
# in mode checking. Return the cached path.
|
||||
return dest2check
|
||||
else:
|
||||
log.info(
|
||||
'Fetching file from saltenv \'%s\', ** skipped ** '
|
||||
'latest already in cache \'%s\'', saltenv, path
|
||||
)
|
||||
)
|
||||
return dest2check
|
||||
return dest2check
|
||||
|
||||
log.debug(
|
||||
'Fetching file from saltenv \'{0}\', ** attempting ** '
|
||||
'\'{1}\''.format(saltenv, path)
|
||||
'Fetching file from saltenv \'%s\', ** attempting ** \'%s\'',
|
||||
saltenv, path
|
||||
)
|
||||
d_tries = 0
|
||||
transport_tries = 0
|
||||
@ -971,7 +1081,7 @@ class RemoteClient(Client):
|
||||
return False
|
||||
fn_ = salt.utils.fopen(dest, 'wb+')
|
||||
else:
|
||||
log.debug('No dest file found {0}'.format(dest))
|
||||
log.debug('No dest file found')
|
||||
|
||||
while True:
|
||||
if not fn_:
|
||||
@ -1003,8 +1113,10 @@ class RemoteClient(Client):
|
||||
d_tries += 1
|
||||
hsum = salt.utils.get_hash(dest, salt.utils.to_str(data.get('hash_type', b'md5')))
|
||||
if hsum != data['hsum']:
|
||||
log.warning('Bad download of file {0}, attempt {1} '
|
||||
'of 3'.format(path, d_tries))
|
||||
log.warning(
|
||||
'Bad download of file %s, attempt %d of 3',
|
||||
path, d_tries
|
||||
)
|
||||
continue
|
||||
break
|
||||
if not fn_:
|
||||
@ -1023,33 +1135,56 @@ class RemoteClient(Client):
|
||||
else:
|
||||
data = data['data']
|
||||
fn_.write(data)
|
||||
except (TypeError, KeyError) as e:
|
||||
except (TypeError, KeyError) as exc:
|
||||
try:
|
||||
data_type = type(data).__name__
|
||||
except AttributeError:
|
||||
# Shouldn't happen, but don't let this cause a traceback.
|
||||
data_type = str(type(data))
|
||||
transport_tries += 1
|
||||
log.warning('Data transport is broken, got: {0}, type: {1}, '
|
||||
'exception: {2}, attempt {3} of 3'.format(
|
||||
data, type(data), e, transport_tries)
|
||||
)
|
||||
log.warning(
|
||||
'Data transport is broken, got: %s, type: %s, '
|
||||
'exception: %s, attempt %d of 3',
|
||||
data, data_type, exc, transport_tries
|
||||
)
|
||||
self._refresh_channel()
|
||||
if transport_tries > 3:
|
||||
log.error('Data transport is broken, got: {0}, type: {1}, '
|
||||
'exception: {2}, '
|
||||
'Retry attempts exhausted'.format(
|
||||
data, type(data), e)
|
||||
)
|
||||
log.error(
|
||||
'Data transport is broken, got: %s, type: %s, '
|
||||
'exception: %s, retry attempts exhausted',
|
||||
data, data_type, exc
|
||||
)
|
||||
break
|
||||
|
||||
if fn_:
|
||||
fn_.close()
|
||||
log.info(
|
||||
'Fetching file from saltenv \'{0}\', ** done ** '
|
||||
'\'{1}\''.format(saltenv, path)
|
||||
'Fetching file from saltenv \'%s\', ** done ** \'%s\'',
|
||||
saltenv, path
|
||||
)
|
||||
else:
|
||||
log.debug(
|
||||
'In saltenv \'{0}\', we are ** missing ** the file '
|
||||
'\'{1}\''.format(saltenv, path)
|
||||
'In saltenv \'%s\', we are ** missing ** the file \'%s\'',
|
||||
saltenv, path
|
||||
)
|
||||
|
||||
if not salt.utils.is_windows():
|
||||
if mode_server is not None:
|
||||
try:
|
||||
if os.stat(dest).st_mode != mode_server:
|
||||
try:
|
||||
os.chmod(dest, mode_server)
|
||||
log.info(
|
||||
'Fetching file from saltenv \'%s\', '
|
||||
'** done ** \'%s\', mode set to %s',
|
||||
saltenv,
|
||||
path,
|
||||
salt.utils.st_mode_to_octal(mode_server)
|
||||
)
|
||||
except OSError:
|
||||
log.warning('Failed to chmod %s: %s', dest, exc)
|
||||
except OSError:
|
||||
pass
|
||||
return dest
|
||||
|
||||
def file_list(self, saltenv='base', prefix=''):
|
||||
@ -1089,11 +1224,9 @@ class RemoteClient(Client):
|
||||
'cmd': '_symlink_list'}
|
||||
return self.channel.send(load)
|
||||
|
||||
def hash_file(self, path, saltenv='base'):
|
||||
def __hash_and_stat_file(self, path, saltenv='base'):
|
||||
'''
|
||||
Return the hash of a file, to get the hash of a file on the salt
|
||||
master file server prepend the path with salt://<file on server>
|
||||
otherwise, prepend the file with / for a local file.
|
||||
Common code for hashing and stating files
|
||||
'''
|
||||
try:
|
||||
path = self._check_proto(path)
|
||||
@ -1105,8 +1238,7 @@ class RemoteClient(Client):
|
||||
else:
|
||||
ret = {}
|
||||
hash_type = self.opts.get('hash_type', 'md5')
|
||||
ret['hsum'] = salt.utils.get_hash(
|
||||
path, form=hash_type)
|
||||
ret['hsum'] = salt.utils.get_hash(path, form=hash_type)
|
||||
ret['hash_type'] = hash_type
|
||||
return ret
|
||||
load = {'path': path,
|
||||
@ -1114,6 +1246,37 @@ class RemoteClient(Client):
|
||||
'cmd': '_file_hash'}
|
||||
return self.channel.send(load)
|
||||
|
||||
def hash_file(self, path, saltenv='base'):
|
||||
'''
|
||||
Return the hash of a file, to get the hash of a file on the salt
|
||||
master file server prepend the path with salt://<file on server>
|
||||
otherwise, prepend the file with / for a local file.
|
||||
'''
|
||||
return self.__hash_and_stat_file(path, saltenv)
|
||||
|
||||
def hash_and_stat_file(self, path, saltenv='base'):
|
||||
'''
|
||||
The same as hash_file, but also return the file's mode, or None if no
|
||||
mode data is present.
|
||||
'''
|
||||
hash_result = self.hash_file(path, saltenv)
|
||||
try:
|
||||
path = self._check_proto(path)
|
||||
except MinionError as err:
|
||||
if not os.path.isfile(path):
|
||||
return hash_result, None
|
||||
else:
|
||||
try:
|
||||
return hash_result, list(os.stat(path))
|
||||
except Exception:
|
||||
return hash_result, None
|
||||
load = {'path': path,
|
||||
'saltenv': saltenv,
|
||||
'cmd': '_file_find'}
|
||||
fnd = self.channel.send(load)
|
||||
stat_result = fnd.get('stat')
|
||||
return hash_result, stat_result
|
||||
|
||||
def list_env(self, saltenv='base'):
|
||||
'''
|
||||
Return a list of the files in the file server's specified environment
|
||||
|
@ -480,6 +480,28 @@ class Fileserver(object):
|
||||
if fstr in self.servers:
|
||||
self.servers[fstr]()
|
||||
|
||||
def _find_file(self, load):
|
||||
'''
|
||||
Convenience function for calls made using the RemoteClient
|
||||
'''
|
||||
path = load.get('path')
|
||||
if not path:
|
||||
return {'path': '',
|
||||
'rel': ''}
|
||||
tgt_env = load.get('saltenv', 'base')
|
||||
return self.find_file(path, tgt_env)
|
||||
|
||||
def file_find(self, load):
|
||||
'''
|
||||
Convenience function for calls made using the LocalClient
|
||||
'''
|
||||
path = load.get('path')
|
||||
if not path:
|
||||
return {'path': '',
|
||||
'rel': ''}
|
||||
tgt_env = load.get('saltenv', 'base')
|
||||
return self.find_file(path, tgt_env)
|
||||
|
||||
def find_file(self, path, saltenv, back=None):
|
||||
'''
|
||||
Find the path and return the fnd structure, this structure is passed
|
||||
@ -560,32 +582,52 @@ class Fileserver(object):
|
||||
return self.servers[fstr](load, fnd)
|
||||
return ret
|
||||
|
||||
def file_hash(self, load):
|
||||
def __file_hash_and_stat(self, load):
|
||||
'''
|
||||
Return the hash of a given file
|
||||
Common code for hashing and stating files
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt Carbon. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
'Parameter \'env\' has been detected in the argument list. '
|
||||
'This parameter is no longer used and has been replaced by '
|
||||
'\'saltenv\' as of Salt Carbon. This warning will be removed '
|
||||
'in Salt Oxygen.'
|
||||
)
|
||||
load.pop('env')
|
||||
|
||||
if 'path' not in load or 'saltenv' not in load:
|
||||
return ''
|
||||
return '', None
|
||||
if not isinstance(load['saltenv'], six.string_types):
|
||||
load['saltenv'] = six.text_type(load['saltenv'])
|
||||
|
||||
fnd = self.find_file(salt.utils.locales.sdecode(load['path']),
|
||||
load['saltenv'])
|
||||
if not fnd.get('back'):
|
||||
return ''
|
||||
return '', None
|
||||
stat_result = fnd.get('stat', None)
|
||||
fstr = '{0}.file_hash'.format(fnd['back'])
|
||||
if fstr in self.servers:
|
||||
return self.servers[fstr](load, fnd)
|
||||
return ''
|
||||
return self.servers[fstr](load, fnd), stat_result
|
||||
return '', None
|
||||
|
||||
def file_hash(self, load):
|
||||
'''
|
||||
Return the hash of a given file
|
||||
'''
|
||||
try:
|
||||
return self.__file_hash_and_stat(load)[0]
|
||||
except (IndexError, TypeError):
|
||||
return ''
|
||||
|
||||
def file_hash_and_stat(self, load):
|
||||
'''
|
||||
Return the hash and stat result of a given file
|
||||
'''
|
||||
try:
|
||||
return self.__file_hash_and_stat(load)
|
||||
except (IndexError, TypeError):
|
||||
return '', None
|
||||
|
||||
def file_list(self, load):
|
||||
'''
|
||||
|
@ -86,18 +86,30 @@ def find_file(path, saltenv='base', **kwargs):
|
||||
'rel': ''}
|
||||
try:
|
||||
root = os.path.join(salt.syspaths.CACHE_DIR, 'azure')
|
||||
except IndexError:
|
||||
# An invalid index was passed
|
||||
return fnd
|
||||
except ValueError:
|
||||
# An invalid index option was passed
|
||||
except (IndexError, ValueError):
|
||||
# An invalid index or index option was passed
|
||||
return fnd
|
||||
full = os.path.join(root, path)
|
||||
if os.path.isfile(full) and not salt.fileserver.is_file_ignored(
|
||||
__opts__, full):
|
||||
fnd['path'] = full
|
||||
fnd['rel'] = path
|
||||
fnd['stat'] = list(os.stat(full))
|
||||
try:
|
||||
# Converting the stat result to a list, the elements of the
|
||||
# list correspond to the following stat_result params:
|
||||
# 0 => st_mode=33188
|
||||
# 1 => st_ino=10227377
|
||||
# 2 => st_dev=65026
|
||||
# 3 => st_nlink=1
|
||||
# 4 => st_uid=1000
|
||||
# 5 => st_gid=1000
|
||||
# 6 => st_size=1056233
|
||||
# 7 => st_atime=1468284229
|
||||
# 8 => st_mtime=1456338235
|
||||
# 9 => st_ctime=1456338235
|
||||
fnd['stat'] = list(os.stat(full))
|
||||
except Exception:
|
||||
pass
|
||||
return fnd
|
||||
|
||||
|
||||
|
@ -687,7 +687,22 @@ def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613
|
||||
pass
|
||||
fnd['rel'] = path
|
||||
fnd['path'] = dest
|
||||
fnd['stat'] = list(os.stat(dest))
|
||||
try:
|
||||
# Converting the stat result to a list, the elements of the
|
||||
# list correspond to the following stat_result params:
|
||||
# 0 => st_mode=33188
|
||||
# 1 => st_ino=10227377
|
||||
# 2 => st_dev=65026
|
||||
# 3 => st_nlink=1
|
||||
# 4 => st_uid=1000
|
||||
# 5 => st_gid=1000
|
||||
# 6 => st_size=1056233
|
||||
# 7 => st_atime=1468284229
|
||||
# 8 => st_mtime=1456338235
|
||||
# 9 => st_ctime=1456338235
|
||||
fnd['stat'] = list(os.stat(dest))
|
||||
except Exception:
|
||||
pass
|
||||
repo['repo'].close()
|
||||
return fnd
|
||||
return fnd
|
||||
|
@ -33,7 +33,7 @@ log = logging.getLogger(__name__)
|
||||
|
||||
def find_file(path, saltenv='base', **kwargs):
|
||||
'''
|
||||
Search the environment for the relative path
|
||||
Search the environment for the relative path.
|
||||
'''
|
||||
if 'env' in kwargs:
|
||||
salt.utils.warn_until(
|
||||
@ -51,6 +51,32 @@ def find_file(path, saltenv='base', **kwargs):
|
||||
return fnd
|
||||
if saltenv not in __opts__['file_roots']:
|
||||
return fnd
|
||||
|
||||
def _add_file_stat(fnd):
|
||||
'''
|
||||
Stat the file and, assuming no errors were found, convert the stat
|
||||
result to a list of values and add to the return dict.
|
||||
|
||||
Converting the stat result to a list, the elements of the list
|
||||
correspond to the following stat_result params:
|
||||
|
||||
0 => st_mode=33188
|
||||
1 => st_ino=10227377
|
||||
2 => st_dev=65026
|
||||
3 => st_nlink=1
|
||||
4 => st_uid=1000
|
||||
5 => st_gid=1000
|
||||
6 => st_size=1056233
|
||||
7 => st_atime=1468284229
|
||||
8 => st_mtime=1456338235
|
||||
9 => st_ctime=1456338235
|
||||
'''
|
||||
try:
|
||||
fnd['stat'] = list(os.stat(fnd['path']))
|
||||
except Exception:
|
||||
pass
|
||||
return fnd
|
||||
|
||||
if 'index' in kwargs:
|
||||
try:
|
||||
root = __opts__['file_roots'][saltenv][int(kwargs['index'])]
|
||||
@ -64,15 +90,14 @@ def find_file(path, saltenv='base', **kwargs):
|
||||
if os.path.isfile(full) and not salt.fileserver.is_file_ignored(__opts__, full):
|
||||
fnd['path'] = full
|
||||
fnd['rel'] = path
|
||||
fnd['stat'] = list(os.stat(full))
|
||||
return _add_file_stat(fnd)
|
||||
return fnd
|
||||
for root in __opts__['file_roots'][saltenv]:
|
||||
full = os.path.join(root, path)
|
||||
if os.path.isfile(full) and not salt.fileserver.is_file_ignored(__opts__, full):
|
||||
fnd['path'] = full
|
||||
fnd['rel'] = path
|
||||
fnd['stat'] = list(os.stat(full))
|
||||
return fnd
|
||||
return _add_file_stat(fnd)
|
||||
return fnd
|
||||
|
||||
|
||||
|
@ -583,6 +583,22 @@ def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613
|
||||
if os.path.isfile(full):
|
||||
fnd['rel'] = path
|
||||
fnd['path'] = full
|
||||
try:
|
||||
# Converting the stat result to a list, the elements of the
|
||||
# list correspond to the following stat_result params:
|
||||
# 0 => st_mode=33188
|
||||
# 1 => st_ino=10227377
|
||||
# 2 => st_dev=65026
|
||||
# 3 => st_nlink=1
|
||||
# 4 => st_uid=1000
|
||||
# 5 => st_gid=1000
|
||||
# 6 => st_size=1056233
|
||||
# 7 => st_atime=1468284229
|
||||
# 8 => st_mtime=1456338235
|
||||
# 9 => st_ctime=1456338235
|
||||
fnd['stat'] = list(os.stat(full))
|
||||
except Exception:
|
||||
pass
|
||||
return fnd
|
||||
return fnd
|
||||
|
||||
|
@ -920,9 +920,20 @@ def __process_multiprocessing_logging_queue(opts, queue):
|
||||
salt.utils.appendproctitle('MultiprocessingLoggingQueue')
|
||||
if salt.utils.is_windows():
|
||||
# On Windows, creating a new process doesn't fork (copy the parent
|
||||
# process image). Due to this, we need to setup extended logging
|
||||
# process image). Due to this, we need to setup all of our logging
|
||||
# inside this process.
|
||||
setup_temp_logger()
|
||||
setup_console_logger(
|
||||
log_level=opts.get('log_level'),
|
||||
log_format=opts.get('log_fmt_console'),
|
||||
date_format=opts.get('log_datefmt_console')
|
||||
)
|
||||
setup_logfile_logger(
|
||||
opts.get('log_file'),
|
||||
log_level=opts.get('log_level_logfile'),
|
||||
log_format=opts.get('log_fmt_logfile'),
|
||||
date_format=opts.get('log_datefmt_logfile')
|
||||
)
|
||||
setup_extended_logging(opts)
|
||||
while True:
|
||||
try:
|
||||
|
@ -936,6 +936,7 @@ class AESFuncs(object):
|
||||
'''
|
||||
self.fs_ = salt.fileserver.Fileserver(self.opts)
|
||||
self._serve_file = self.fs_.serve_file
|
||||
self._file_find = self.fs_._find_file
|
||||
self._file_hash = self.fs_.file_hash
|
||||
self._file_list = self.fs_.file_list
|
||||
self._file_list_emptydirs = self.fs_.file_list_emptydirs
|
||||
|
@ -1270,15 +1270,6 @@ class Minion(MinionBase):
|
||||
This method should be used as a threading target, start the actual
|
||||
minion side execution.
|
||||
'''
|
||||
# this seems awkward at first, but it's a workaround for Windows
|
||||
# multiprocessing communication.
|
||||
if sys.platform.startswith('win') and \
|
||||
opts['multiprocessing'] and \
|
||||
not salt.log.setup.is_logging_configured():
|
||||
# We have to re-init the logging system for Windows
|
||||
salt.log.setup.setup_console_logger(log_level=opts.get('log_level', 'info'))
|
||||
if opts.get('log_file'):
|
||||
salt.log.setup.setup_logfile_logger(opts['log_file'], opts.get('log_level_logfile', 'info'))
|
||||
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
|
||||
|
||||
if opts['multiprocessing'] and not salt.utils.is_windows():
|
||||
@ -1457,15 +1448,6 @@ class Minion(MinionBase):
|
||||
minion side execution.
|
||||
'''
|
||||
salt.utils.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid']))
|
||||
# this seems awkward at first, but it's a workaround for Windows
|
||||
# multiprocessing communication.
|
||||
if sys.platform.startswith('win') and \
|
||||
opts['multiprocessing'] and \
|
||||
not salt.log.is_logging_configured():
|
||||
# We have to re-init the logging system for Windows
|
||||
salt.log.setup_console_logger(log_level=opts.get('log_level', 'info'))
|
||||
if opts.get('log_file'):
|
||||
salt.log.setup_logfile_logger(opts['log_file'], opts.get('log_level_logfile', 'info'))
|
||||
ret = {
|
||||
'return': {},
|
||||
'success': {},
|
||||
|
@ -97,17 +97,10 @@ def manage_mode(mode):
|
||||
|
||||
salt '*' config.manage_mode
|
||||
'''
|
||||
if mode is None:
|
||||
return None
|
||||
if not isinstance(mode, six.string_types):
|
||||
# Make it a string in case it's not
|
||||
mode = str(mode)
|
||||
# Strip any quotes and initial 0, though zero-pad it up to 4
|
||||
ret = mode.strip('"').strip('\'').lstrip('0').zfill(4)
|
||||
if ret[0] != '0':
|
||||
# Always include a leading zero
|
||||
return '0{0}'.format(ret)
|
||||
return ret
|
||||
# config.manage_mode should no longer be invoked from the __salt__ dunder
|
||||
# in Salt code, this function is only being left here for backwards
|
||||
# compatibility.
|
||||
return salt.utils.normalize_mode(mode)
|
||||
|
||||
|
||||
def valid_fileproto(uri):
|
||||
|
@ -265,22 +265,24 @@ def raw_cron(user):
|
||||
cmd = 'crontab -l'
|
||||
else:
|
||||
cmd = 'crontab -l {0}'.format(user)
|
||||
# Preserve line endings
|
||||
lines = __salt__['cmd.run_stdout'](cmd,
|
||||
runas=user,
|
||||
rstrip=False,
|
||||
python_shell=False).splitlines()
|
||||
python_shell=False).splitlines(True)
|
||||
else:
|
||||
if appUser == user:
|
||||
cmd = 'crontab -l'
|
||||
else:
|
||||
cmd = 'crontab -l -u {0}'.format(user)
|
||||
# Preserve line endings
|
||||
lines = __salt__['cmd.run_stdout'](cmd,
|
||||
ignore_retcode=True,
|
||||
rstrip=False,
|
||||
python_shell=False).splitlines()
|
||||
python_shell=False).splitlines(True)
|
||||
if len(lines) != 0 and lines[0].startswith('# DO NOT EDIT THIS FILE - edit the master and reinstall.'):
|
||||
del lines[0:3]
|
||||
return '\n'.join(lines)
|
||||
return ''.join(lines)
|
||||
|
||||
|
||||
def list_tab(user):
|
||||
|
@ -1149,7 +1149,7 @@ def comment_line(path,
|
||||
if not salt.utils.is_windows():
|
||||
pre_user = get_user(path)
|
||||
pre_group = get_group(path)
|
||||
pre_mode = __salt__['config.manage_mode'](get_mode(path))
|
||||
pre_mode = salt.utils.normalize_mode(get_mode(path))
|
||||
|
||||
# Create a copy to read from and to use as a backup later
|
||||
try:
|
||||
@ -1827,7 +1827,7 @@ def replace(path,
|
||||
if not salt.utils.is_windows():
|
||||
pre_user = get_user(path)
|
||||
pre_group = get_group(path)
|
||||
pre_mode = __salt__['config.manage_mode'](get_mode(path))
|
||||
pre_mode = salt.utils.normalize_mode(get_mode(path))
|
||||
|
||||
# Avoid TypeErrors by forcing repl to be a string
|
||||
repl = str(repl)
|
||||
@ -2195,7 +2195,7 @@ def blockreplace(path,
|
||||
perms = {}
|
||||
perms['user'] = get_user(path)
|
||||
perms['group'] = get_group(path)
|
||||
perms['mode'] = __salt__['config.manage_mode'](get_mode(path))
|
||||
perms['mode'] = salt.utils.normalize_mode(get_mode(path))
|
||||
|
||||
# write new content in the file while avoiding partial reads
|
||||
try:
|
||||
@ -2899,7 +2899,7 @@ def copy(src, dst, recurse=False, remove_existing=False):
|
||||
if not salt.utils.is_windows():
|
||||
pre_user = get_user(src)
|
||||
pre_group = get_group(src)
|
||||
pre_mode = __salt__['config.manage_mode'](get_mode(src))
|
||||
pre_mode = salt.utils.normalize_mode(get_mode(src))
|
||||
|
||||
try:
|
||||
if (os.path.exists(dst) and os.path.isdir(dst)) or os.path.isdir(src):
|
||||
@ -3734,7 +3734,7 @@ def check_perms(name, ret, user, group, mode, follow_symlinks=False):
|
||||
raise CommandExecutionError('{0} does not exist'.format(name))
|
||||
perms['luser'] = cur['user']
|
||||
perms['lgroup'] = cur['group']
|
||||
perms['lmode'] = __salt__['config.manage_mode'](cur['mode'])
|
||||
perms['lmode'] = salt.utils.normalize_mode(cur['mode'])
|
||||
|
||||
# Mode changes if needed
|
||||
if mode is not None:
|
||||
@ -3743,13 +3743,13 @@ def check_perms(name, ret, user, group, mode, follow_symlinks=False):
|
||||
if os.path.islink(name) and not follow_symlinks:
|
||||
pass
|
||||
else:
|
||||
mode = __salt__['config.manage_mode'](mode)
|
||||
mode = salt.utils.normalize_mode(mode)
|
||||
if mode != perms['lmode']:
|
||||
if __opts__['test'] is True:
|
||||
ret['changes']['mode'] = mode
|
||||
else:
|
||||
set_mode(name, mode)
|
||||
if mode != __salt__['config.manage_mode'](get_mode(name)):
|
||||
if mode != salt.utils.normalize_mode(get_mode(name)):
|
||||
ret['result'] = False
|
||||
ret['comment'].append(
|
||||
'Failed to change mode to {0}'.format(mode)
|
||||
@ -3920,6 +3920,7 @@ def check_managed_changes(
|
||||
saltenv,
|
||||
contents=None,
|
||||
skip_verify=False,
|
||||
keep_mode=False,
|
||||
**kwargs):
|
||||
'''
|
||||
Return a dictionary of what changes need to be made for a file
|
||||
@ -3956,6 +3957,13 @@ def check_managed_changes(
|
||||
if comments:
|
||||
__clean_tmp(sfn)
|
||||
return False, comments
|
||||
if sfn and source and keep_mode:
|
||||
if _urlparse(source).scheme in ('salt', 'file') \
|
||||
or source.startswith('/'):
|
||||
try:
|
||||
mode = salt.utils.st_mode_to_octal(os.stat(sfn).st_mode)
|
||||
except Exception as exc:
|
||||
log.warning('Unable to stat %s: %s', sfn, exc)
|
||||
changes = check_file_meta(name, sfn, source, source_sum, user,
|
||||
group, mode, saltenv, contents)
|
||||
__clean_tmp(sfn)
|
||||
@ -4078,8 +4086,8 @@ def check_file_meta(
|
||||
and group != lstats['gid']):
|
||||
changes['group'] = group
|
||||
# Normalize the file mode
|
||||
smode = __salt__['config.manage_mode'](lstats['mode'])
|
||||
mode = __salt__['config.manage_mode'](mode)
|
||||
smode = salt.utils.normalize_mode(lstats['mode'])
|
||||
mode = salt.utils.normalize_mode(mode)
|
||||
if mode is not None and mode != smode:
|
||||
changes['mode'] = mode
|
||||
return changes
|
||||
@ -4142,7 +4150,8 @@ def manage_file(name,
|
||||
contents=None,
|
||||
dir_mode=None,
|
||||
follow_symlinks=True,
|
||||
skip_verify=False):
|
||||
skip_verify=False,
|
||||
keep_mode=False):
|
||||
'''
|
||||
Checks the destination against what was retrieved with get_managed and
|
||||
makes the appropriate modifications (if necessary).
|
||||
@ -4203,6 +4212,11 @@ def manage_file(name,
|
||||
|
||||
.. versionadded:: 2016.3.0
|
||||
|
||||
keep_mode : False
|
||||
If ``True``, and the ``source`` is a file from the Salt fileserver (or
|
||||
a local file on the minion), the mode of the destination file will be
|
||||
set to the mode of the source file.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -4233,6 +4247,13 @@ def manage_file(name,
|
||||
'hash_type': htype,
|
||||
'hsum': get_hash(sfn, form=htype)
|
||||
}
|
||||
if keep_mode:
|
||||
if _urlparse(source).scheme in ('salt', 'file') \
|
||||
or source.startswith('/'):
|
||||
try:
|
||||
mode = salt.utils.st_mode_to_octal(os.stat(sfn).st_mode)
|
||||
except Exception as exc:
|
||||
log.warning('Unable to stat %s: %s', sfn, exc)
|
||||
|
||||
# Check changes if the target file exists
|
||||
if os.path.isfile(name) or os.path.islink(name):
|
||||
|
@ -25,7 +25,7 @@ def __virtual__():
|
||||
'''
|
||||
if salt.utils.is_darwin() or salt.utils.is_windows():
|
||||
return True
|
||||
return False
|
||||
return (False, 'Module proxy: module only works on Windows or MacOS systems')
|
||||
|
||||
|
||||
def _get_proxy_osx(function, network_service):
|
||||
|
@ -68,7 +68,7 @@ def __virtual__():
|
||||
except Exception:
|
||||
return (False, "Module yumpkg: no yum based system detected")
|
||||
|
||||
enabled = ('amazon', 'xcp', 'xenserver')
|
||||
enabled = ('amazon', 'xcp', 'xenserver', 'virtuozzolinux')
|
||||
|
||||
if os_family == 'redhat' or os_grain in enabled:
|
||||
return __virtualname__
|
||||
|
@ -205,6 +205,7 @@ import hashlib
|
||||
import os
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
import salt.utils.gitfs
|
||||
import salt.utils.dictupdate
|
||||
from salt.exceptions import FileserverConfigError
|
||||
@ -332,7 +333,7 @@ class _LegacyGitPillar(object):
|
||||
|
||||
hash_type = getattr(hashlib, opts.get('hash_type', 'md5'))
|
||||
hash_str = '{0} {1}'.format(self.branch, self.rp_location)
|
||||
repo_hash = hash_type(hash_str).hexdigest()
|
||||
repo_hash = hash_type(salt.utils.to_bytes(hash_str)).hexdigest()
|
||||
rp_ = os.path.join(self.opts['cachedir'], 'pillar_gitfs', repo_hash)
|
||||
|
||||
if not os.path.isdir(rp_):
|
||||
|
@ -12,7 +12,7 @@ This module is a concrete implementation of the sql_base
|
||||
ext_pillar for SQLCipher.
|
||||
|
||||
:maturity: new
|
||||
:depends: pysqlcipher
|
||||
:depends: pysqlcipher (for py2) or pysqlcipher3 (for py3)
|
||||
:platform: all
|
||||
|
||||
Configuring the sqlcipher ext_pillar
|
||||
|
@ -23,6 +23,7 @@ import salt.utils.jid
|
||||
import salt.exceptions
|
||||
|
||||
# Import 3rd-party libs
|
||||
import msgpack
|
||||
import salt.ext.six as six
|
||||
|
||||
|
||||
@ -478,3 +479,47 @@ def get_endtime(jid):
|
||||
with salt.utils.fopen(etpath, 'r') as etfile:
|
||||
endtime = etfile.read().strip('\n')
|
||||
return endtime
|
||||
|
||||
|
||||
def _reg_dir():
|
||||
'''
|
||||
Return the reg_dir for the given job id
|
||||
'''
|
||||
return os.path.join(__opts__['cachedir'], 'thorium')
|
||||
|
||||
|
||||
def save_reg(data):
|
||||
'''
|
||||
Save the register to msgpack files
|
||||
'''
|
||||
reg_dir = _reg_dir()
|
||||
regfile = os.path.join(reg_dir, 'register')
|
||||
try:
|
||||
if not os.path.exists():
|
||||
os.makedirs(reg_dir)
|
||||
except OSError as exc:
|
||||
if exc.errno == errno.EEXIST:
|
||||
pass
|
||||
else:
|
||||
raise
|
||||
try:
|
||||
with salt.utils.fopen(regfile, 'a') as fh_:
|
||||
msgpack.dump(data, fh_)
|
||||
fh_.close()
|
||||
except:
|
||||
log.error('Could not write to msgpack file {0}'.format(__opts__['outdir']))
|
||||
raise
|
||||
|
||||
|
||||
def load_reg():
|
||||
'''
|
||||
Load the register from msgpack files
|
||||
'''
|
||||
reg_dir = _reg_dir()
|
||||
regfile = os.path.join(reg_dir, 'register')
|
||||
try:
|
||||
with salt.utils.fopen(regfile, 'r') as fh_:
|
||||
return msgpack.load(fh_)
|
||||
except:
|
||||
log.error('Could not write to msgpack file {0}'.format(__opts__['outdir']))
|
||||
raise
|
||||
|
@ -469,7 +469,7 @@ def file(name,
|
||||
Overrides the default backup mode for the user's crontab.
|
||||
'''
|
||||
# Initial set up
|
||||
mode = __salt__['config.manage_mode']('0600')
|
||||
mode = salt.utils.normalize_mode('0600')
|
||||
owner, group, crontab_dir = _get_cron_info()
|
||||
|
||||
cron_path = salt.utils.mkstemp()
|
||||
|
@ -575,8 +575,8 @@ def _check_dir_meta(name,
|
||||
and group != stats.get('gid')):
|
||||
changes['group'] = group
|
||||
# Normalize the dir mode
|
||||
smode = __salt__['config.manage_mode'](stats['mode'])
|
||||
mode = __salt__['config.manage_mode'](mode)
|
||||
smode = salt.utils.normalize_mode(stats['mode'])
|
||||
mode = salt.utils.normalize_mode(mode)
|
||||
if mode is not None and mode != smode:
|
||||
changes['mode'] = mode
|
||||
return changes
|
||||
@ -839,7 +839,7 @@ def symlink(
|
||||
name = os.path.expanduser(name)
|
||||
|
||||
# Make sure that leading zeros stripped by YAML loader are added back
|
||||
mode = __salt__['config.manage_mode'](mode)
|
||||
mode = salt.utils.normalize_mode(mode)
|
||||
|
||||
user = _test_owner(kwargs, user=user)
|
||||
ret = {'name': name,
|
||||
@ -1254,8 +1254,18 @@ def managed(name,
|
||||
is running as on the minion On Windows, this is ignored
|
||||
|
||||
mode
|
||||
The permissions to set on this file, aka 644, 0775, 4664. Not supported
|
||||
on Windows
|
||||
The mode to set on this file, e.g. ``644``, ``0775``, or ``4664``.
|
||||
|
||||
.. note::
|
||||
This option is **not** supported on Windows.
|
||||
|
||||
.. versionchanged:: Carbon
|
||||
This option can be set to ``keep``, and Salt will keep the mode
|
||||
from the Salt fileserver. This is only supported when the
|
||||
``source`` URL begins with ``salt://``, or for files local to the
|
||||
minion. Because the ``source`` option cannot be used with any of
|
||||
the ``contents`` options, setting the ``mode`` to ``keep`` is also
|
||||
incompatible with the ``contents`` options.
|
||||
|
||||
template
|
||||
If this setting is applied then the named templating engine will be
|
||||
@ -1270,7 +1280,8 @@ def managed(name,
|
||||
dir_mode
|
||||
If directories are to be created, passing this option specifies the
|
||||
permissions for those directories. If this is not set, directories
|
||||
will be assigned permissions from the 'mode' argument.
|
||||
will be assigned permissions by adding the execute bit to the mode of
|
||||
the files.
|
||||
|
||||
replace : True
|
||||
If set to ``False`` and the file already exists, the file will not be
|
||||
@ -1478,9 +1489,23 @@ def managed(name,
|
||||
'name': name,
|
||||
'result': True}
|
||||
|
||||
content_sources = (contents, contents_pillar, contents_grains)
|
||||
if mode is not None and salt.utils.is_windows():
|
||||
return _error(ret, 'The \'mode\' option is not supported on Windows')
|
||||
|
||||
try:
|
||||
keep_mode = mode.lower() == 'keep'
|
||||
if keep_mode:
|
||||
# We're not hard-coding the mode, so set it to None
|
||||
mode = None
|
||||
except AttributeError:
|
||||
keep_mode = False
|
||||
|
||||
# Make sure that any leading zeros stripped by YAML loader are added back
|
||||
mode = salt.utils.normalize_mode(mode)
|
||||
|
||||
contents_count = len(
|
||||
[x for x in content_sources if x is not None]
|
||||
[x for x in (contents, contents_pillar, contents_grains)
|
||||
if x is not None]
|
||||
)
|
||||
|
||||
if source and contents_count > 0:
|
||||
@ -1489,6 +1514,12 @@ def managed(name,
|
||||
'\'source\' cannot be used in combination with \'contents\', '
|
||||
'\'contents_pillar\', or \'contents_grains\''
|
||||
)
|
||||
elif (mode or keep_mode) and contents_count > 0:
|
||||
return _error(
|
||||
ret,
|
||||
'Mode management cannot be used in combination with \'contents\', '
|
||||
'\'contents_pillar\', or \'contents_grains\''
|
||||
)
|
||||
elif contents_count > 1:
|
||||
return _error(
|
||||
ret,
|
||||
@ -1608,9 +1639,6 @@ def managed(name,
|
||||
ret['comment'] = 'Error while applying template on contents'
|
||||
return ret
|
||||
|
||||
# Make sure that leading zeros stripped by YAML loader are added back
|
||||
mode = __salt__['config.manage_mode'](mode)
|
||||
|
||||
if not name:
|
||||
return _error(ret, 'Must provide name to file.exists')
|
||||
user = _test_owner(kwargs, user=user)
|
||||
@ -1679,6 +1707,7 @@ def managed(name,
|
||||
__env__,
|
||||
contents,
|
||||
skip_verify,
|
||||
keep_mode,
|
||||
**kwargs
|
||||
)
|
||||
else:
|
||||
@ -1766,7 +1795,8 @@ def managed(name,
|
||||
contents,
|
||||
dir_mode,
|
||||
follow_symlinks,
|
||||
skip_verify)
|
||||
skip_verify,
|
||||
keep_mode)
|
||||
except Exception as exc:
|
||||
ret['changes'] = {}
|
||||
log.debug(traceback.format_exc())
|
||||
@ -1823,7 +1853,8 @@ def managed(name,
|
||||
contents,
|
||||
dir_mode,
|
||||
follow_symlinks,
|
||||
skip_verify)
|
||||
skip_verify,
|
||||
keep_mode)
|
||||
except Exception as exc:
|
||||
ret['changes'] = {}
|
||||
log.debug(traceback.format_exc())
|
||||
@ -2044,8 +2075,8 @@ def directory(name,
|
||||
file_mode = dir_mode
|
||||
|
||||
# Make sure that leading zeros stripped by YAML loader are added back
|
||||
dir_mode = __salt__['config.manage_mode'](dir_mode)
|
||||
file_mode = __salt__['config.manage_mode'](file_mode)
|
||||
dir_mode = salt.utils.normalize_mode(dir_mode)
|
||||
file_mode = salt.utils.normalize_mode(file_mode)
|
||||
|
||||
u_check = _check_user(user, group)
|
||||
if u_check:
|
||||
@ -2291,16 +2322,31 @@ def recurse(name,
|
||||
salt is running as on the minion. On Windows, this is ignored
|
||||
|
||||
dir_mode
|
||||
The permissions mode to set on any directories created. Not supported on
|
||||
Windows
|
||||
The mode to set on any directories created.
|
||||
|
||||
.. note::
|
||||
This option is **not** supported on Windows.
|
||||
|
||||
file_mode
|
||||
The permissions mode to set on any files created. Not supported on
|
||||
The mode to set on any files created.
|
||||
Windows
|
||||
|
||||
.. note::
|
||||
This option is **not** supported on Windows.
|
||||
|
||||
.. versionchanged:: Carbon
|
||||
This option can be set to ``keep``, and Salt will keep the mode
|
||||
from the Salt fileserver. This is only supported when the
|
||||
``source`` URL begins with ``salt://``, or for files local to the
|
||||
minion. Because the ``source`` option cannot be used with any of
|
||||
the ``contents`` options, setting the ``mode`` to ``keep`` is also
|
||||
incompatible with the ``contents`` options.
|
||||
|
||||
sym_mode
|
||||
The permissions mode to set on any symlink created. Not supported on
|
||||
Windows
|
||||
The mode to set on any symlink created.
|
||||
|
||||
.. note::
|
||||
This option is **not** supported on Windows.
|
||||
|
||||
template
|
||||
If this setting is applied then the named templating engine will be
|
||||
@ -2408,9 +2454,22 @@ def recurse(name,
|
||||
)
|
||||
return ret
|
||||
|
||||
if any([x is not None for x in (dir_mode, file_mode, sym_mode)]) \
|
||||
and salt.utils.is_windows():
|
||||
return _error(ret, 'mode management is not supported on Windows')
|
||||
|
||||
# Make sure that leading zeros stripped by YAML loader are added back
|
||||
dir_mode = __salt__['config.manage_mode'](dir_mode)
|
||||
file_mode = __salt__['config.manage_mode'](file_mode)
|
||||
dir_mode = salt.utils.normalize_mode(dir_mode)
|
||||
|
||||
try:
|
||||
keep_mode = file_mode.lower() == 'keep'
|
||||
if keep_mode:
|
||||
# We're not hard-coding the mode, so set it to None
|
||||
file_mode = None
|
||||
except AttributeError:
|
||||
keep_mode = False
|
||||
|
||||
file_mode = salt.utils.normalize_mode(file_mode)
|
||||
|
||||
u_check = _check_user(user, group)
|
||||
if u_check:
|
||||
@ -2509,7 +2568,7 @@ def recurse(name,
|
||||
source=source,
|
||||
user=user,
|
||||
group=group,
|
||||
mode=file_mode,
|
||||
mode='keep' if keep_mode else file_mode,
|
||||
template=template,
|
||||
makedirs=True,
|
||||
context=context,
|
||||
@ -4777,7 +4836,11 @@ def serialize(name,
|
||||
salt is running as on the minion
|
||||
|
||||
mode
|
||||
The permissions to set on this file, aka 644, 0775, 4664
|
||||
The permissions to set on this file, e.g. ``644``, ``0775``, or
|
||||
``4664``.
|
||||
|
||||
.. note::
|
||||
This option is **not** supported on Windows.
|
||||
|
||||
backup
|
||||
Overrides the default backup mode for this specific file.
|
||||
@ -4914,6 +4977,9 @@ def serialize(name,
|
||||
|
||||
contents += '\n'
|
||||
|
||||
# Make sure that any leading zeros stripped by YAML loader are added back
|
||||
mode = salt.utils.normalize_mode(mode)
|
||||
|
||||
if __opts__['test']:
|
||||
ret['changes'] = __salt__['file.check_managed_changes'](
|
||||
name=name,
|
||||
|
@ -18,6 +18,7 @@ import traceback
|
||||
|
||||
# Import Salt libs
|
||||
import salt.state
|
||||
import salt.loader
|
||||
import salt.payload
|
||||
from salt.exceptions import SaltRenderError
|
||||
|
||||
@ -43,7 +44,17 @@ class ThorState(salt.state.HighState):
|
||||
opts['file_client'] = 'local'
|
||||
self.opts = opts
|
||||
salt.state.HighState.__init__(self, self.opts, loader='thorium')
|
||||
self.state.inject_globals = {'__reg__': {}}
|
||||
|
||||
self.returners = salt.loader.returners(self.opts, {})
|
||||
self.reg_ret = self.opts.get('register_returner', None)
|
||||
if self.reg_ret is not None:
|
||||
try:
|
||||
regdata = self.returners['{0}.load_reg'.format(self.reg_ret)]()
|
||||
except Exception as exc:
|
||||
log.error(exc)
|
||||
regdata = {}
|
||||
|
||||
self.state.inject_globals = {'__reg__': regdata}
|
||||
self.event = salt.utils.event.get_master_event(
|
||||
self.opts,
|
||||
self.opts['sock_dir'])
|
||||
@ -174,4 +185,6 @@ class ThorState(salt.state.HighState):
|
||||
if (start - r_start) > recompile:
|
||||
cache = self.gather_cache()
|
||||
chunks = self.get_chunks()
|
||||
if self.reg_ret is not None:
|
||||
self.returners['{0}.save_reg'.format(self.reg_ret)](chunks)
|
||||
r_start = time.time()
|
||||
|
@ -1805,6 +1805,34 @@ def check_state_result(running, recurse=False):
|
||||
return ret
|
||||
|
||||
|
||||
def st_mode_to_octal(mode):
|
||||
'''
|
||||
Convert the st_mode value from a stat(2) call (as returned from os.stat())
|
||||
to an octal mode.
|
||||
'''
|
||||
try:
|
||||
return oct(mode)[-4:]
|
||||
except (TypeError, IndexError):
|
||||
return ''
|
||||
|
||||
|
||||
def normalize_mode(mode):
|
||||
'''
|
||||
Return a mode value, normalized to a string and containing a leading zero
|
||||
if it does not have one.
|
||||
|
||||
Allow "keep" as a valid mode (used by file state/module to preserve mode
|
||||
from the Salt fileserver in file states).
|
||||
'''
|
||||
if mode is None:
|
||||
return None
|
||||
if not isinstance(mode, six.string_types):
|
||||
mode = str(mode)
|
||||
# Strip any quotes any initial zeroes, then though zero-pad it up to 4.
|
||||
# This ensures that somethign like '00644' is normalized to '0644'
|
||||
return mode.strip('"').strip('\'').lstrip('0').zfill(4)
|
||||
|
||||
|
||||
def test_mode(**kwargs):
|
||||
'''
|
||||
Examines the kwargs passed and returns True if any kwarg which matching
|
||||
|
@ -940,7 +940,7 @@ class GitPython(GitProvider):
|
||||
tree = self.get_tree(tgt_env)
|
||||
if not tree:
|
||||
# Branch/tag/SHA not found
|
||||
return None, None
|
||||
return None, None, None
|
||||
blob = None
|
||||
depth = 0
|
||||
while True:
|
||||
@ -968,7 +968,9 @@ class GitPython(GitProvider):
|
||||
except KeyError:
|
||||
# File not found or repo_path points to a directory
|
||||
break
|
||||
return blob, blob.hexsha if blob is not None else blob
|
||||
if isinstance(blob, git.Blob):
|
||||
return blob, blob.hexsha, blob.mode
|
||||
return None, None, None
|
||||
|
||||
def get_tree(self, tgt_env):
|
||||
'''
|
||||
@ -1480,29 +1482,33 @@ class Pygit2(GitProvider):
|
||||
tree = self.get_tree(tgt_env)
|
||||
if not tree:
|
||||
# Branch/tag/SHA not found in repo
|
||||
return None, None
|
||||
return None, None, None
|
||||
blob = None
|
||||
mode = None
|
||||
depth = 0
|
||||
while True:
|
||||
depth += 1
|
||||
if depth > SYMLINK_RECURSE_DEPTH:
|
||||
break
|
||||
try:
|
||||
if stat.S_ISLNK(tree[path].filemode):
|
||||
entry = tree[path]
|
||||
mode = entry.filemode
|
||||
if stat.S_ISLNK(mode):
|
||||
# Path is a symlink. The blob data corresponding to this
|
||||
# path's object ID will be the target of the symlink. Follow
|
||||
# the symlink and set path to the location indicated
|
||||
# in the blob data.
|
||||
link_tgt = self.repo[tree[path].oid].data
|
||||
link_tgt = self.repo[entry.oid].data
|
||||
path = os.path.normpath(
|
||||
os.path.join(os.path.dirname(path), link_tgt)
|
||||
)
|
||||
else:
|
||||
oid = tree[path].oid
|
||||
blob = self.repo[oid]
|
||||
blob = self.repo[entry.oid]
|
||||
except KeyError:
|
||||
break
|
||||
return blob, blob.hex if blob is not None else blob
|
||||
if isinstance(blob, pygit2.Blob):
|
||||
return blob, blob.hex, mode
|
||||
return None, None, None
|
||||
|
||||
def get_tree(self, tgt_env):
|
||||
'''
|
||||
@ -1827,8 +1833,9 @@ class Dulwich(GitProvider): # pylint: disable=abstract-method
|
||||
tree = self.get_tree(tgt_env)
|
||||
if not tree:
|
||||
# Branch/tag/SHA not found
|
||||
return None, None
|
||||
return None, None, None
|
||||
blob = None
|
||||
mode = None
|
||||
depth = 0
|
||||
while True:
|
||||
depth += 1
|
||||
@ -1855,7 +1862,9 @@ class Dulwich(GitProvider): # pylint: disable=abstract-method
|
||||
break
|
||||
except KeyError:
|
||||
break
|
||||
return blob, blob.sha().hexdigest() if blob is not None else blob
|
||||
if isinstance(blob, dulwich.objects.Blob):
|
||||
return blob, blob.sha().hexdigest(), mode
|
||||
return None, None, None
|
||||
|
||||
def get_conf(self):
|
||||
'''
|
||||
@ -2697,10 +2706,24 @@ class GitFS(GitBase):
|
||||
if repo.root(tgt_env):
|
||||
repo_path = os.path.join(repo.root(tgt_env), repo_path)
|
||||
|
||||
blob, blob_hexsha = repo.find_file(repo_path, tgt_env)
|
||||
blob, blob_hexsha, blob_mode = repo.find_file(repo_path, tgt_env)
|
||||
if blob is None:
|
||||
continue
|
||||
|
||||
def _add_file_stat(fnd, mode):
|
||||
'''
|
||||
Add a the mode to the return dict. In other fileserver backends
|
||||
we stat the file to get its mode, and add the stat result
|
||||
(passed through list() for better serialization) to the 'stat'
|
||||
key in the return dict. However, since we aren't using the
|
||||
stat result for anything but the mode at this time, we can
|
||||
avoid unnecessary work by just manually creating the list and
|
||||
not running an os.stat() on all files in the repo.
|
||||
'''
|
||||
if mode is not None:
|
||||
fnd['stat'] = [mode]
|
||||
return fnd
|
||||
|
||||
salt.fileserver.wait_lock(lk_fn, dest)
|
||||
if os.path.isfile(blobshadest) and os.path.isfile(dest):
|
||||
with salt.utils.fopen(blobshadest, 'r') as fp_:
|
||||
@ -2708,7 +2731,7 @@ class GitFS(GitBase):
|
||||
if sha == blob_hexsha:
|
||||
fnd['rel'] = path
|
||||
fnd['path'] = dest
|
||||
return fnd
|
||||
return _add_file_stat(fnd, blob_mode)
|
||||
with salt.utils.fopen(lk_fn, 'w+') as fp_:
|
||||
fp_.write('')
|
||||
for filename in glob.glob(hashes_glob):
|
||||
@ -2726,7 +2749,7 @@ class GitFS(GitBase):
|
||||
pass
|
||||
fnd['rel'] = path
|
||||
fnd['path'] = dest
|
||||
return fnd
|
||||
return _add_file_stat(fnd, blob_mode)
|
||||
|
||||
# No matching file was found in tgt_env. Return a dict with empty paths
|
||||
# so the calling function knows the file could not be found.
|
||||
@ -2783,7 +2806,7 @@ class GitFS(GitBase):
|
||||
load.pop('env')
|
||||
|
||||
if not all(x in load for x in ('path', 'saltenv')):
|
||||
return ''
|
||||
return '', None
|
||||
ret = {'hash_type': self.opts['hash_type']}
|
||||
relpath = fnd['rel']
|
||||
path = fnd['path']
|
||||
|
@ -93,7 +93,7 @@ def _generate_minion_id():
|
||||
'::1.*', 'ipv6-.*', 'fe00::.*', 'fe02::.*', '1.0.0.*.ip6.arpa']
|
||||
|
||||
def append(self, p_object):
|
||||
if p_object not in self and not self.filter(p_object):
|
||||
if p_object and p_object not in self and not self.filter(p_object):
|
||||
super(self.__class__, self).append(p_object)
|
||||
return self
|
||||
|
||||
@ -111,7 +111,7 @@ def _generate_minion_id():
|
||||
def first(self):
|
||||
return self and self[0] or None
|
||||
|
||||
hosts = DistinctList().append(platform.node()).append(socket.gethostname()).append(socket.getfqdn())
|
||||
hosts = DistinctList().append(socket.getfqdn()).append(platform.node()).append(socket.gethostname())
|
||||
if not hosts:
|
||||
try:
|
||||
for a_nfo in socket.getaddrinfo(hosts.first(), None, socket.AF_INET,
|
||||
|
@ -38,6 +38,7 @@ import salt.utils.jid
|
||||
from salt.utils import kinds
|
||||
from salt.defaults import DEFAULT_TARGET_DELIM
|
||||
from salt.utils.validate.path import is_writeable
|
||||
from salt.utils.verify import verify_files
|
||||
import salt.exceptions
|
||||
|
||||
# Import 3rd-party libs
|
||||
@ -595,6 +596,10 @@ class LogLevelMixIn(six.with_metaclass(MixInMeta, object)):
|
||||
|
||||
# Setup extended logging right before the last step
|
||||
self._mixin_after_parsed_funcs.append(self.__setup_extended_logging)
|
||||
# Setup the console and log file configuration before the MP logging
|
||||
# listener because the MP logging listener may need that config.
|
||||
self._mixin_after_parsed_funcs.append(self.__setup_logfile_logger_config)
|
||||
self._mixin_after_parsed_funcs.append(self.__setup_console_logger_config)
|
||||
# Setup the multiprocessing log queue listener if enabled
|
||||
self._mixin_after_parsed_funcs.append(self._setup_mp_logging_listener)
|
||||
# Setup the console as the last _mixin_after_parsed_func to run
|
||||
@ -640,7 +645,7 @@ class LogLevelMixIn(six.with_metaclass(MixInMeta, object)):
|
||||
# defined default
|
||||
self.options.log_level = self._default_logging_level_
|
||||
|
||||
def setup_logfile_logger(self):
|
||||
def __setup_logfile_logger_config(self, *args): # pylint: disable=unused-argument
|
||||
if self._logfile_loglevel_config_setting_name_ in self.config and not \
|
||||
self.config.get(self._logfile_loglevel_config_setting_name_):
|
||||
# Remove it from config so it inherits from log_level
|
||||
@ -673,12 +678,23 @@ class LogLevelMixIn(six.with_metaclass(MixInMeta, object)):
|
||||
cli_log_path,
|
||||
self.config.get(
|
||||
# From the config setting
|
||||
self._logfile_config_setting_name_,
|
||||
# From the default setting
|
||||
self._default_logging_logfile_
|
||||
self._logfile_config_setting_name_
|
||||
)
|
||||
)
|
||||
|
||||
if self.config['verify_env']:
|
||||
# Verify the logfile if it was explicitly set but do not try to
|
||||
# verify the default
|
||||
if logfile is not None and not logfile.startswith(('tcp://', 'udp://', 'file://')):
|
||||
# Logfile is not using Syslog, verify
|
||||
current_umask = os.umask(0o027)
|
||||
verify_files([logfile], self.config['user'])
|
||||
os.umask(current_umask)
|
||||
|
||||
if logfile is None:
|
||||
# Use the default setting if the logfile wasn't explicity set
|
||||
logfile = self._default_logging_logfile_
|
||||
|
||||
cli_log_file_fmt = 'cli_{0}_log_file_fmt'.format(
|
||||
self.get_prog_name().replace('-', '_')
|
||||
)
|
||||
@ -782,6 +798,18 @@ class LogLevelMixIn(six.with_metaclass(MixInMeta, object)):
|
||||
# If we haven't changed the logfile path and it's not writeable,
|
||||
# salt will fail once we try to setup the logfile logging.
|
||||
|
||||
# Save the settings back to the configuration
|
||||
self.config[self._logfile_config_setting_name_] = logfile
|
||||
self.config[self._logfile_loglevel_config_setting_name_] = loglevel
|
||||
self.config['log_fmt_logfile'] = log_file_fmt
|
||||
self.config['log_datefmt_logfile'] = log_file_datefmt
|
||||
|
||||
def setup_logfile_logger(self):
|
||||
logfile = self.config[self._logfile_config_setting_name_]
|
||||
loglevel = self.config[self._logfile_loglevel_config_setting_name_]
|
||||
log_file_fmt = self.config['log_fmt_logfile']
|
||||
log_file_datefmt = self.config['log_datefmt_logfile']
|
||||
|
||||
log.setup_logfile_logger(
|
||||
logfile,
|
||||
loglevel,
|
||||
@ -804,11 +832,7 @@ class LogLevelMixIn(six.with_metaclass(MixInMeta, object)):
|
||||
self._get_mp_logging_listener_queue()
|
||||
)
|
||||
|
||||
def __setup_console_logger(self, *args): # pylint: disable=unused-argument
|
||||
# If daemon is set force console logger to quiet
|
||||
if getattr(self.options, 'daemon', False) is True:
|
||||
return
|
||||
|
||||
def __setup_console_logger_config(self, *args): # pylint: disable=unused-argument
|
||||
# Since we're not going to be a daemon, setup the console logger
|
||||
cli_log_fmt = 'cli_{0}_log_fmt'.format(
|
||||
self.get_prog_name().replace('-', '_')
|
||||
@ -849,8 +873,20 @@ class LogLevelMixIn(six.with_metaclass(MixInMeta, object)):
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
# Save the settings back to the configuration
|
||||
self.config['log_fmt_console'] = logfmt
|
||||
self.config['log_datefmt_console'] = datefmt
|
||||
|
||||
def __setup_console_logger(self, *args): # pylint: disable=unused-argument
|
||||
# If daemon is set force console logger to quiet
|
||||
if getattr(self.options, 'daemon', False) is True:
|
||||
return
|
||||
|
||||
log.setup_console_logger(
|
||||
self.config['log_level'], log_format=logfmt, date_format=datefmt
|
||||
self.config['log_level'],
|
||||
log_format=self.config['log_fmt_console'],
|
||||
date_format=self.config['log_datefmt_console']
|
||||
)
|
||||
for name, level in six.iteritems(self.config['log_granular_levels']):
|
||||
log.set_logger_level(name, level)
|
||||
|
@ -4,12 +4,27 @@ from __future__ import absolute_import
|
||||
|
||||
# Import python libs
|
||||
import logging
|
||||
import pythoncom
|
||||
import threading
|
||||
|
||||
try:
|
||||
import pythoncom
|
||||
HAS_LIBS = True
|
||||
except ImportError:
|
||||
HAS_LIBS = False
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only load if required libraries exist
|
||||
'''
|
||||
if not HAS_LIBS:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
class Com(object):
|
||||
def __init__(self):
|
||||
self.need_com_init = not self._is_main_thread()
|
||||
|
@ -62,7 +62,6 @@ import salt.utils.process
|
||||
import salt.log.setup as salt_log_setup
|
||||
from salt.utils.verify import verify_env
|
||||
from salt.utils.immutabletypes import freeze
|
||||
from salt.utils.process import SignalHandlingMultiprocessingProcess
|
||||
from salt.utils.nb_popen import NonBlockingPopen
|
||||
from salt.exceptions import SaltClientError
|
||||
|
||||
@ -76,7 +75,11 @@ except ImportError:
|
||||
import yaml
|
||||
import msgpack
|
||||
import salt.ext.six as six
|
||||
import salt.ext.six.moves.socketserver as socketserver # pylint: disable=no-name-in-module
|
||||
|
||||
try:
|
||||
import salt.ext.six.moves.socketserver as socketserver
|
||||
except ImportError:
|
||||
import socketserver
|
||||
|
||||
if salt.utils.is_windows():
|
||||
import win32api
|
||||
@ -165,7 +168,7 @@ def get_unused_localhost_port():
|
||||
usock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
usock.bind(('127.0.0.1', 0))
|
||||
port = usock.getsockname()[1]
|
||||
if port in (54505, 54506, 64505, 64506, 64510, 64511):
|
||||
if port in (54505, 54506, 64505, 64506, 64510, 64511, 64520, 64521):
|
||||
# These ports are hardcoded in the test configuration
|
||||
port = get_unused_localhost_port()
|
||||
usock.close()
|
||||
@ -401,7 +404,7 @@ class SaltDaemonScriptBase(SaltScriptBase, ShellTestCase):
|
||||
'''
|
||||
Start the daemon subprocess
|
||||
'''
|
||||
self._process = SignalHandlingMultiprocessingProcess(
|
||||
self._process = salt.utils.process.SignalHandlingMultiprocessingProcess(
|
||||
target=self._start, args=(self._running,))
|
||||
self._process.start()
|
||||
self._running.set()
|
||||
@ -436,23 +439,24 @@ class SaltDaemonScriptBase(SaltScriptBase, ShellTestCase):
|
||||
pass
|
||||
|
||||
# Let's begin the shutdown routines
|
||||
if terminal.poll() is None:
|
||||
try:
|
||||
log.info('Sending SIGINT to %s %s DAEMON', self.display_name, self.__class__.__name__)
|
||||
terminal.send_signal(signal.SIGINT)
|
||||
except OSError as exc:
|
||||
if exc.errno not in (errno.ESRCH, errno.EACCES):
|
||||
raise
|
||||
timeout = 15
|
||||
log.info('Waiting %s seconds for %s %s DAEMON to respond to SIGINT',
|
||||
timeout,
|
||||
self.display_name,
|
||||
self.__class__.__name__)
|
||||
while timeout > 0:
|
||||
if terminal.poll() is not None:
|
||||
break
|
||||
timeout -= 0.0125
|
||||
time.sleep(0.0125)
|
||||
if not sys.platform.startswith('win'):
|
||||
if terminal.poll() is None:
|
||||
try:
|
||||
log.info('Sending SIGINT to %s %s DAEMON', self.display_name, self.__class__.__name__)
|
||||
terminal.send_signal(signal.SIGINT)
|
||||
except OSError as exc:
|
||||
if exc.errno not in (errno.ESRCH, errno.EACCES):
|
||||
raise
|
||||
timeout = 15
|
||||
log.info('Waiting %s seconds for %s %s DAEMON to respond to SIGINT',
|
||||
timeout,
|
||||
self.display_name,
|
||||
self.__class__.__name__)
|
||||
while timeout > 0:
|
||||
if terminal.poll() is not None:
|
||||
break
|
||||
timeout -= 0.0125
|
||||
time.sleep(0.0125)
|
||||
if terminal.poll() is None:
|
||||
try:
|
||||
log.info('Sending SIGTERM to %s %s DAEMON', self.display_name, self.__class__.__name__)
|
||||
@ -505,7 +509,10 @@ class SaltDaemonScriptBase(SaltScriptBase, ShellTestCase):
|
||||
# Lets log and kill any child processes which salt left behind
|
||||
for child in children[:]:
|
||||
try:
|
||||
child.send_signal(signal.SIGKILL)
|
||||
if sys.platform.startswith('win'):
|
||||
child.kill()
|
||||
else:
|
||||
child.send_signal(signal.SIGKILL)
|
||||
log.info('Salt left behind the following child process: %s', child.as_dict())
|
||||
try:
|
||||
child.wait(timeout=5)
|
||||
@ -580,7 +587,11 @@ class SaltMinion(SaltDaemonScriptBase):
|
||||
return script_args
|
||||
|
||||
def get_check_ports(self):
|
||||
return set([self.config['id']])
|
||||
if salt.utils.is_windows():
|
||||
return set([self.config['tcp_pub_port'],
|
||||
self.config['tcp_pull_port']])
|
||||
else:
|
||||
return set([self.config['id']])
|
||||
|
||||
|
||||
class SaltMaster(SaltDaemonScriptBase):
|
||||
@ -978,40 +989,51 @@ class TestDaemon(object):
|
||||
running_tests_user = win32api.GetUserName()
|
||||
else:
|
||||
running_tests_user = pwd.getpwuid(os.getuid()).pw_name
|
||||
master_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'master'))
|
||||
master_opts['user'] = running_tests_user
|
||||
|
||||
tests_known_hosts_file = os.path.join(TMP_CONF_DIR, 'salt_ssh_known_hosts')
|
||||
with salt.utils.fopen(tests_known_hosts_file, 'w') as known_hosts:
|
||||
known_hosts.write('')
|
||||
|
||||
# This master connects to syndic_master via a syndic
|
||||
master_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'master'))
|
||||
master_opts['known_hosts_file'] = tests_known_hosts_file
|
||||
master_opts['conf_dir'] = TMP_CONF_DIR
|
||||
master_opts['cachedir'] = os.path.join(TMP, 'rootdir', 'cache')
|
||||
master_opts['user'] = running_tests_user
|
||||
master_opts['config_dir'] = TMP_CONF_DIR
|
||||
master_opts['root_dir'] = os.path.join(TMP, 'rootdir')
|
||||
master_opts['pki_dir'] = os.path.join(TMP, 'rootdir', 'pki', 'master')
|
||||
|
||||
minion_config_path = os.path.join(CONF_DIR, 'minion')
|
||||
minion_opts = salt.config._read_conf_file(minion_config_path)
|
||||
minion_opts['user'] = running_tests_user
|
||||
minion_opts['conf_dir'] = TMP_CONF_DIR
|
||||
|
||||
minion_opts['root_dir'] = master_opts['root_dir'] = os.path.join(TMP, 'rootdir')
|
||||
|
||||
sub_minion_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'sub_minion'))
|
||||
sub_minion_opts['user'] = running_tests_user
|
||||
sub_minion_opts['conf_dir'] = TMP_SUB_MINION_CONF_DIR
|
||||
sub_minion_opts['root_dir'] = os.path.join(TMP, 'rootdir-sub-minion')
|
||||
|
||||
syndic_master_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'syndic_master'))
|
||||
syndic_master_opts['user'] = running_tests_user
|
||||
syndic_master_opts['root_dir'] = os.path.join(TMP, 'rootdir-syndic-master')
|
||||
syndic_master_opts['conf_dir'] = TMP_SYNDIC_MASTER_CONF_DIR
|
||||
|
||||
# The syndic config file has an include setting to include the master configuration
|
||||
# This is the syndic for master
|
||||
# Let's start with a copy of the syndic master configuration
|
||||
syndic_opts = copy.deepcopy(master_opts)
|
||||
# Let's update with the syndic configuration
|
||||
syndic_opts.update(salt.config._read_conf_file(os.path.join(CONF_DIR, 'syndic')))
|
||||
# Lets remove the include setting
|
||||
syndic_opts.pop('include')
|
||||
syndic_opts['user'] = running_tests_user
|
||||
syndic_opts['conf_dir'] = TMP_SYNDIC_MINION_CONF_DIR
|
||||
syndic_opts['cachedir'] = os.path.join(TMP, 'rootdir', 'cache')
|
||||
syndic_opts['config_dir'] = TMP_SYNDIC_MINION_CONF_DIR
|
||||
|
||||
# This minion connects to master
|
||||
minion_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'minion'))
|
||||
minion_opts['cachedir'] = os.path.join(TMP, 'rootdir', 'cache')
|
||||
minion_opts['user'] = running_tests_user
|
||||
minion_opts['config_dir'] = TMP_CONF_DIR
|
||||
minion_opts['root_dir'] = os.path.join(TMP, 'rootdir')
|
||||
minion_opts['pki_dir'] = os.path.join(TMP, 'rootdir', 'pki')
|
||||
|
||||
# This sub_minion also connects to master
|
||||
sub_minion_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'sub_minion'))
|
||||
sub_minion_opts['cachedir'] = os.path.join(TMP, 'rootdir-sub-minion', 'cache')
|
||||
sub_minion_opts['user'] = running_tests_user
|
||||
sub_minion_opts['config_dir'] = TMP_SUB_MINION_CONF_DIR
|
||||
sub_minion_opts['root_dir'] = os.path.join(TMP, 'rootdir-sub-minion')
|
||||
sub_minion_opts['pki_dir'] = os.path.join(TMP, 'rootdir-sub-minion', 'pki', 'minion')
|
||||
|
||||
# This is the master of masters
|
||||
syndic_master_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'syndic_master'))
|
||||
syndic_master_opts['cachedir'] = os.path.join(TMP, 'rootdir-syndic-master', 'cache')
|
||||
syndic_master_opts['user'] = running_tests_user
|
||||
syndic_master_opts['config_dir'] = TMP_SYNDIC_MASTER_CONF_DIR
|
||||
syndic_master_opts['root_dir'] = os.path.join(TMP, 'rootdir-syndic-master')
|
||||
syndic_master_opts['pki_dir'] = os.path.join(TMP, 'rootdir-syndic-master', 'pki', 'master')
|
||||
|
||||
if transport == 'raet':
|
||||
master_opts['transport'] = 'raet'
|
||||
@ -1776,12 +1798,12 @@ class ShellCase(AdaptedConfigurationTestCaseMixIn, ShellTestCase, ScriptPathMixi
|
||||
except OSError:
|
||||
os.chdir(INTEGRATION_TEST_DIR)
|
||||
|
||||
def run_salt(self, arg_str, with_retcode=False, catch_stderr=False):
|
||||
def run_salt(self, arg_str, with_retcode=False, catch_stderr=False, timeout=15): # pylint: disable=W0221
|
||||
'''
|
||||
Execute salt
|
||||
'''
|
||||
arg_str = '-c {0} {1}'.format(self.get_config_dir(), arg_str)
|
||||
return self.run_script('salt', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr)
|
||||
return self.run_script('salt', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr, timeout=timeout)
|
||||
|
||||
def run_ssh(self, arg_str, with_retcode=False, catch_stderr=False):
|
||||
'''
|
||||
|
@ -5,14 +5,15 @@
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Salt Libs
|
||||
import integration
|
||||
|
||||
# Import Salt Testing Libs
|
||||
from salttesting.helpers import ensure_in_syspath
|
||||
|
||||
ensure_in_syspath('../../')
|
||||
|
||||
# Import Salt Libs
|
||||
import integration
|
||||
import salt.ext.six as six
|
||||
|
||||
|
||||
class BatchTest(integration.ShellCase):
|
||||
'''
|
||||
@ -37,9 +38,11 @@ class BatchTest(integration.ShellCase):
|
||||
'retcode:',
|
||||
' 0',
|
||||
' batch testing']
|
||||
ret = sorted(ret)
|
||||
cmd = sorted(self.run_salt('\'*\' test.echo \'batch testing\' -b 50%'))
|
||||
self.assertListEqual(cmd, ret)
|
||||
cmd = self.run_salt('\'*\' test.echo \'batch testing\' -b 50%')
|
||||
if six.PY3:
|
||||
self.assertCountEqual(cmd, ret)
|
||||
else:
|
||||
self.assertListEqual(sorted(cmd), sorted(ret))
|
||||
|
||||
def test_batch_run_number(self):
|
||||
'''
|
||||
@ -57,8 +60,11 @@ class BatchTest(integration.ShellCase):
|
||||
' True',
|
||||
'retcode:',
|
||||
' 0']
|
||||
cmd = sorted(self.run_salt('\'*\' test.ping --batch-size 2'))
|
||||
self.assertListEqual(cmd, sorted(ret))
|
||||
cmd = self.run_salt('\'*\' test.ping --batch-size 2')
|
||||
if six.PY3:
|
||||
self.assertCountEqual(cmd, ret)
|
||||
else:
|
||||
self.assertListEqual(sorted(cmd), sorted(ret))
|
||||
|
||||
def test_batch_run_grains_targeting(self):
|
||||
'''
|
||||
@ -86,8 +92,11 @@ class BatchTest(integration.ShellCase):
|
||||
os_grain = item
|
||||
|
||||
os_grain = os_grain.strip()
|
||||
cmd = sorted(self.run_salt('-G \'os:{0}\' -b 25% test.ping'.format(os_grain)))
|
||||
self.assertListEqual(cmd, sorted(ret))
|
||||
cmd = self.run_salt('-G \'os:{0}\' -b 25% test.ping'.format(os_grain))
|
||||
if six.PY3:
|
||||
self.assertCountEqual(cmd, ret)
|
||||
else:
|
||||
self.assertListEqual(sorted(cmd), sorted(ret))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
@ -1,28 +1,32 @@
|
||||
# Master Settings
|
||||
# Connects to syndic_master via syndic
|
||||
id: master
|
||||
user: ubuntu
|
||||
interface: 127.0.0.1
|
||||
publish_port: 64505
|
||||
ret_port: 64506
|
||||
worker_threads: 3
|
||||
root_dir: /tmp/salttest
|
||||
pidfile: masterpid
|
||||
pki_dir: pki
|
||||
cachedir: cache
|
||||
pidfile: master.pid
|
||||
sock_dir: master_sock
|
||||
timeout: 3
|
||||
sock_dir: .salt-unix
|
||||
open_mode: True
|
||||
syndic_master: localhost
|
||||
fileserver_list_cache_time: 0
|
||||
file_buffer_size: 8192
|
||||
pillar_opts: True
|
||||
log_file: master.log
|
||||
log_level_logfile: debug
|
||||
key_logfile: key.log
|
||||
token_file: /tmp/ksfjhdgiuebfgnkefvsikhfjdgvkjahcsidk
|
||||
|
||||
# These settings needed for tests on Windows which defaults
|
||||
# to ipc_mode: tcp
|
||||
tcp_master_pub_port: 64512
|
||||
tcp_master_pull_port: 64513
|
||||
tcp_master_publish_pull: 64514
|
||||
tcp_master_workers: 64515
|
||||
|
||||
peer:
|
||||
'.*':
|
||||
- 'test.*'
|
||||
log_file: master
|
||||
log_level_logfile: debug
|
||||
key_logfile: key
|
||||
token_file: /tmp/ksfjhdgiuebfgnkefvsikhfjdgvkjahcsidk
|
||||
|
||||
file_buffer_size: 8192
|
||||
|
||||
ext_pillar:
|
||||
- git: master https://github.com/saltstack/pillar1.git
|
||||
|
@ -1,17 +1,16 @@
|
||||
# basic config
|
||||
# Connects to master
|
||||
master: localhost
|
||||
master_port: 64506
|
||||
root_dir: /tmp/salttest
|
||||
pki_dir: pki
|
||||
id: minion
|
||||
cachedir: cachedir
|
||||
interface: 127.0.0.1
|
||||
tcp_pub_port: 64510
|
||||
tcp_pull_port: 64511
|
||||
sock_dir: minion_sock
|
||||
#acceptance_wait_time: = 1
|
||||
id: minion
|
||||
open_mode: True
|
||||
log_file: minion
|
||||
log_file: minion.log
|
||||
log_level_logfile: debug
|
||||
#loop_interval: 0.05
|
||||
config_dir: /tmp/salt-tests-tmpdir
|
||||
pidfile: minion.pid
|
||||
|
||||
# module extension
|
||||
test.foo: baz
|
||||
|
@ -1,15 +1,16 @@
|
||||
# basic config
|
||||
# Connects to master
|
||||
master: localhost
|
||||
interface: 127.0.0.1
|
||||
master_port: 64506
|
||||
root_dir: /tmp/subsalttest
|
||||
pki_dir: pki
|
||||
id: sub_minion
|
||||
cachedir: cachedir
|
||||
tcp_pub_port: 64520
|
||||
tcp_pull_port: 64521
|
||||
sock_dir: sub_minion_sock
|
||||
#acceptance_wait_time: 1
|
||||
id: sub_minion
|
||||
open_mode: True
|
||||
log_file: sub_minion
|
||||
log_file: sub_minion.log
|
||||
log_level_logfile: debug
|
||||
pidfile: sub_minion.pid
|
||||
|
||||
# module extension
|
||||
test.foo: baz
|
||||
@ -33,6 +34,3 @@ grains:
|
||||
- jamie
|
||||
- zoe
|
||||
|
||||
ipc_mode: tcp
|
||||
tcp_pub_port: 64510
|
||||
tcp_pull_port: 64511
|
||||
|
@ -1,13 +1,9 @@
|
||||
include: master
|
||||
# syndic basic config
|
||||
# same config as master ./except the syndic bits
|
||||
# in the TestCase we add at the top of the configfile the content of ./master
|
||||
# to avoid duplication
|
||||
order_masters: True
|
||||
acceptance_wait_time: 1
|
||||
syndic_log_file: osyndic.log
|
||||
log_level_logfile: debug
|
||||
syndic_pidfile: osyndic.pid
|
||||
# Syndic Settings
|
||||
id: syndic
|
||||
interface: 127.0.0.1
|
||||
syndic_master: localhost
|
||||
syndic_master_port: 54506
|
||||
id: syndic
|
||||
syndic_log_file: syndic.log
|
||||
syndic_pidfile: syndic.pid
|
||||
tcp_pub_port: 64510
|
||||
tcp_pull_port: 64511
|
||||
|
@ -1,19 +1,25 @@
|
||||
# Master Settings
|
||||
# This is the Master of Masters
|
||||
id: syndic_master
|
||||
interface: 127.0.0.1
|
||||
publish_port: 54505
|
||||
ret_port: 54506
|
||||
worker_threads: 3
|
||||
root_dir: /tmp/saltsyndictest
|
||||
pidfile: syndicmasterpid
|
||||
pki_dir: pki
|
||||
cachedir: cache
|
||||
pidfile: syndic_master.pid
|
||||
sock_dir: syndic_master_sock
|
||||
timeout: 1
|
||||
sock_dir: .salt-unix-syndic
|
||||
open_mode: True
|
||||
order_masters: True
|
||||
fileserver_list_cache_time: 0
|
||||
pillar_opts: True
|
||||
tcp_master_publish_pull: 33305
|
||||
tcp_master_workers: 33306
|
||||
log_file: syndic_master
|
||||
log_file: syndic_master.log
|
||||
log_level_logfile: debug
|
||||
|
||||
# These settings needed for tests on Windows which defaults
|
||||
# to ipc_mode: tcp
|
||||
tcp_master_pub_port: 54512
|
||||
tcp_master_pull_port: 54513
|
||||
tcp_master_publish_pull: 54514
|
||||
tcp_master_workers: 54515
|
||||
|
||||
# Syndic Settings
|
||||
order_masters: True
|
||||
|
@ -24,7 +24,7 @@ class MinionTimeoutTestCase(integration.ShellCase):
|
||||
'''
|
||||
# Launch the command
|
||||
sleep_length = 30
|
||||
ret = self.run_salt('minion test.sleep {0}'.format(sleep_length))
|
||||
ret = self.run_salt('minion test.sleep {0}'.format(sleep_length), timeout=45)
|
||||
self.assertTrue(isinstance(ret, list), 'Return is not a list. Minion'
|
||||
' may have returned error: {0}'.format(ret))
|
||||
self.assertTrue('True' in ret[1], 'Minion did not return True after '
|
||||
|
@ -50,17 +50,17 @@ class ConfigTest(integration.ModuleCase):
|
||||
self.assertEqual(
|
||||
self.run_function('config.manage_mode', ['"775"']), '0775')
|
||||
self.assertEqual(
|
||||
self.run_function('config.manage_mode', ['"1775"']), '01775')
|
||||
self.run_function('config.manage_mode', ['"1775"']), '1775')
|
||||
self.assertEqual(
|
||||
self.run_function('config.manage_mode', ['"0775"']), '0775')
|
||||
self.assertEqual(
|
||||
self.run_function('config.manage_mode', ['"01775"']), '01775')
|
||||
self.run_function('config.manage_mode', ['"01775"']), '1775')
|
||||
self.assertEqual(
|
||||
self.run_function('config.manage_mode', ['"0"']), '0000')
|
||||
self.assertEqual(
|
||||
self.run_function('config.manage_mode', ['775']), '0775')
|
||||
self.assertEqual(
|
||||
self.run_function('config.manage_mode', ['1775']), '01775')
|
||||
self.run_function('config.manage_mode', ['1775']), '1775')
|
||||
self.assertEqual(
|
||||
self.run_function('config.manage_mode', ['0']), '0000')
|
||||
|
||||
@ -74,12 +74,6 @@ class ConfigTest(integration.ModuleCase):
|
||||
'config.option',
|
||||
['master_port']),
|
||||
64506)
|
||||
# Master conf opt
|
||||
self.assertEqual(
|
||||
self.run_function(
|
||||
'config.option',
|
||||
['syndic_master']),
|
||||
'localhost')
|
||||
# pillar conf opt
|
||||
self.assertEqual(
|
||||
self.run_function(
|
||||
|
@ -56,6 +56,66 @@ FILEPILLARDEF = '/tmp/filepillar-defaultvalue'
|
||||
FILEPILLARGIT = '/tmp/filepillar-bar'
|
||||
|
||||
|
||||
def _test_managed_file_mode_keep_helper(testcase, local=False):
|
||||
'''
|
||||
DRY helper function to run the same test with a local or remote path
|
||||
'''
|
||||
rel_path = 'grail/scene33'
|
||||
name = os.path.join(integration.TMP, os.path.basename(rel_path))
|
||||
grail_fs_path = os.path.join(integration.FILES, 'file', 'base', rel_path)
|
||||
grail = 'salt://' + rel_path if not local else grail_fs_path
|
||||
|
||||
# Get the current mode so that we can put the file back the way we
|
||||
# found it when we're done.
|
||||
grail_fs_mode = os.stat(grail_fs_path).st_mode
|
||||
initial_mode = 504 # 0770 octal
|
||||
new_mode_1 = 384 # 0600 octal
|
||||
new_mode_2 = 420 # 0644 octal
|
||||
|
||||
# Set the initial mode, so we can be assured that when we set the mode
|
||||
# to "keep", we're actually changing the permissions of the file to the
|
||||
# new mode.
|
||||
ret = testcase.run_state(
|
||||
'file.managed',
|
||||
name=name,
|
||||
mode=oct(initial_mode),
|
||||
source=grail,
|
||||
)
|
||||
testcase.assertSaltTrueReturn(ret)
|
||||
try:
|
||||
# Update the mode on the fileserver (pass 1)
|
||||
os.chmod(grail_fs_path, new_mode_1)
|
||||
ret = testcase.run_state(
|
||||
'file.managed',
|
||||
name=name,
|
||||
mode='keep',
|
||||
source=grail,
|
||||
)
|
||||
testcase.assertSaltTrueReturn(ret)
|
||||
managed_mode = stat.S_IMODE(os.stat(name).st_mode)
|
||||
testcase.assertEqual(oct(managed_mode), oct(new_mode_1))
|
||||
# Update the mode on the fileserver (pass 2)
|
||||
# This assures us that if the file in file_roots was originally set
|
||||
# to the same mode as new_mode_1, we definitely get an updated mode
|
||||
# this time.
|
||||
os.chmod(grail_fs_path, new_mode_2)
|
||||
ret = testcase.run_state(
|
||||
'file.managed',
|
||||
name=name,
|
||||
mode='keep',
|
||||
source=grail,
|
||||
)
|
||||
testcase.assertSaltTrueReturn(ret)
|
||||
managed_mode = stat.S_IMODE(os.stat(name).st_mode)
|
||||
testcase.assertEqual(oct(managed_mode), oct(new_mode_2))
|
||||
except Exception:
|
||||
raise
|
||||
finally:
|
||||
# Set the mode of the file in the file_roots back to what it
|
||||
# originally was.
|
||||
os.chmod(grail_fs_path, grail_fs_mode)
|
||||
|
||||
|
||||
class FileTest(integration.ModuleCase, integration.SaltReturnAssertsMixIn):
|
||||
'''
|
||||
Validate the file state
|
||||
@ -165,6 +225,19 @@ class FileTest(integration.ModuleCase, integration.SaltReturnAssertsMixIn):
|
||||
self.assertEqual(oct(desired_mode), oct(resulting_mode))
|
||||
self.assertSaltTrueReturn(ret)
|
||||
|
||||
def test_managed_file_mode_keep(self):
|
||||
'''
|
||||
Test using "mode: keep" in a file.managed state
|
||||
'''
|
||||
_test_managed_file_mode_keep_helper(self, local=False)
|
||||
|
||||
def test_managed_file_mode_keep_local_source(self):
|
||||
'''
|
||||
Test using "mode: keep" in a file.managed state, with a local file path
|
||||
as the source.
|
||||
'''
|
||||
_test_managed_file_mode_keep_helper(self, local=True)
|
||||
|
||||
def test_managed_file_mode_file_exists_replace(self):
|
||||
'''
|
||||
file.managed, existing file with replace=True, change permissions
|
||||
|
@ -37,6 +37,7 @@ _PKG_TARGETS = {
|
||||
'FreeBSD': ['aalib', 'pth'],
|
||||
'SUSE': ['aalib', 'python-pssh'],
|
||||
'MacOS': ['libpng', 'jpeg'],
|
||||
'Windows': ['firefox', '7zip'],
|
||||
}
|
||||
|
||||
_PKG_TARGETS_32 = {
|
||||
|
@ -389,15 +389,22 @@ class SaltTestsuiteParser(SaltCoverageTestingParser):
|
||||
print_header(' * Salt daemons started')
|
||||
master_conf = TestDaemon.config('master')
|
||||
minion_conf = TestDaemon.config('minion')
|
||||
sub_minion_conf = TestDaemon.config('sub_minion')
|
||||
syndic_conf = TestDaemon.config('syndic')
|
||||
syndic_master_conf = TestDaemon.config('syndic_master')
|
||||
|
||||
print_header(' * Syndic master configuration values', top=False)
|
||||
print_header(' * Syndic master configuration values (MoM)', top=False)
|
||||
print('interface: {0}'.format(syndic_master_conf['interface']))
|
||||
print('publish port: {0}'.format(syndic_master_conf['publish_port']))
|
||||
print('return port: {0}'.format(syndic_master_conf['ret_port']))
|
||||
print('\n')
|
||||
|
||||
print_header(' * Syndic configuration values', top=True)
|
||||
print('interface: {0}'.format(syndic_conf['interface']))
|
||||
print('syndic master: {0}'.format(syndic_conf['syndic_master']))
|
||||
print('syndic master port: {0}'.format(syndic_conf['syndic_master_port']))
|
||||
print('\n')
|
||||
|
||||
print_header(' * Master configuration values', top=True)
|
||||
print('interface: {0}'.format(master_conf['interface']))
|
||||
print('publish port: {0}'.format(master_conf['publish_port']))
|
||||
@ -406,15 +413,24 @@ class SaltTestsuiteParser(SaltCoverageTestingParser):
|
||||
|
||||
print_header(' * Minion configuration values', top=True)
|
||||
print('interface: {0}'.format(minion_conf['interface']))
|
||||
print('master: {0}'.format(minion_conf['master']))
|
||||
print('master port: {0}'.format(minion_conf['master_port']))
|
||||
if minion_conf['ipc_mode'] == 'tcp':
|
||||
print('tcp pub port: {0}'.format(minion_conf['tcp_pub_port']))
|
||||
print('tcp pull port: {0}'.format(minion_conf['tcp_pull_port']))
|
||||
print('\n')
|
||||
|
||||
print_header(' * Syndic configuration values', top=True)
|
||||
print('interface: {0}'.format(syndic_conf['interface']))
|
||||
print('syndic master port: {0}'.format(syndic_conf['syndic_master']))
|
||||
print_header(' * Sub Minion configuration values', top=True)
|
||||
print('interface: {0}'.format(sub_minion_conf['interface']))
|
||||
print('master: {0}'.format(sub_minion_conf['master']))
|
||||
print('master port: {0}'.format(sub_minion_conf['master_port']))
|
||||
if sub_minion_conf['ipc_mode'] == 'tcp':
|
||||
print('tcp pub port: {0}'.format(sub_minion_conf['tcp_pub_port']))
|
||||
print('tcp pull port: {0}'.format(sub_minion_conf['tcp_pull_port']))
|
||||
print('\n')
|
||||
|
||||
print_header(' Your client configuration is at {0}'.format(TestDaemon.config_location()))
|
||||
print('To access the minion: `salt -c {0} minion test.ping'.format(TestDaemon.config_location()))
|
||||
print('To access the minion: salt -c {0} minion test.ping'.format(TestDaemon.config_location()))
|
||||
|
||||
while True:
|
||||
time.sleep(1)
|
||||
|
@ -149,22 +149,6 @@ class DimensionDataTestCase(ExtendedTestCase):
|
||||
'default'
|
||||
)
|
||||
|
||||
@patch('libcloud.compute.drivers.dimensiondata.DimensionDataNodeDriver.list_nodes', MagicMock(return_value=[]))
|
||||
def test_list_nodes(self):
|
||||
nodes = dimensiondata.list_nodes()
|
||||
self.assertEqual(
|
||||
nodes,
|
||||
{}
|
||||
)
|
||||
|
||||
@patch('libcloud.compute.drivers.dimensiondata.DimensionDataNodeDriver.list_locations', MagicMock(return_value=[]))
|
||||
def test_list_locations(self):
|
||||
locations = dimensiondata.avail_locations()
|
||||
self.assertEqual(
|
||||
locations,
|
||||
{}
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from integration import run_tests
|
||||
|
@ -324,8 +324,8 @@ class ConfigTestCase(TestCase, integration.AdaptedConfigurationTestCaseMixIn):
|
||||
self.assertEqual(syndic_opts['master'], 'localhost')
|
||||
self.assertEqual(syndic_opts['sock_dir'], os.path.join(root_dir, 'minion_sock'))
|
||||
self.assertEqual(syndic_opts['cachedir'], os.path.join(root_dir, 'cache'))
|
||||
self.assertEqual(syndic_opts['log_file'], os.path.join(root_dir, 'osyndic.log'))
|
||||
self.assertEqual(syndic_opts['pidfile'], os.path.join(root_dir, 'osyndic.pid'))
|
||||
self.assertEqual(syndic_opts['log_file'], os.path.join(root_dir, 'syndic.log'))
|
||||
self.assertEqual(syndic_opts['pidfile'], os.path.join(root_dir, 'syndic.pid'))
|
||||
# Show that the options of localclient that repub to local master
|
||||
# are not merged with syndic ones
|
||||
self.assertEqual(syndic_opts['_master_conf_file'], minion_conf_path)
|
||||
|
@ -161,7 +161,7 @@ class CronTestCase(TestCase):
|
||||
'salt.modules.cron.raw_cron',
|
||||
new=MagicMock(side_effect=get_crontab)
|
||||
):
|
||||
set_crontab(L + '* * * * * ls\n')
|
||||
set_crontab(L + '* * * * * ls\n\n')
|
||||
cron.set_job(
|
||||
user='root',
|
||||
minute='*',
|
||||
@ -179,6 +179,7 @@ class CronTestCase(TestCase):
|
||||
c1,
|
||||
'# Lines below here are managed by Salt, do not edit\n'
|
||||
'* * * * * ls\n'
|
||||
'\n'
|
||||
)
|
||||
cron.set_job(
|
||||
user='root',
|
||||
|
@ -12,9 +12,11 @@ ensure_in_syspath('../../')
|
||||
|
||||
# Import Salt Libs
|
||||
from salt.pillar import mysql
|
||||
from salt.ext.six import PY3
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
@skipIf(PY3, 'MySQL-python is not compatible with python3')
|
||||
class MysqlPillarTestCase(TestCase):
|
||||
maxDiff = None
|
||||
|
||||
|
@ -4,8 +4,7 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Salt Testing libs
|
||||
from salttesting import TestCase, skipIf
|
||||
from salttesting.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, call, patch
|
||||
from salttesting import TestCase
|
||||
from salttesting.helpers import ensure_in_syspath
|
||||
|
||||
ensure_in_syspath('../../')
|
||||
@ -16,31 +15,24 @@ from salt.pillar import nodegroups
|
||||
fake_minion_id = 'fake_id'
|
||||
fake_pillar = {}
|
||||
fake_nodegroups = {
|
||||
'a': 'nodegroup_a',
|
||||
'a': fake_minion_id,
|
||||
'b': 'nodegroup_b',
|
||||
}
|
||||
fake_opts = {'nodegroups': fake_nodegroups, }
|
||||
fake_opts = {'nodegroups': fake_nodegroups, 'id': fake_minion_id}
|
||||
fake_pillar_name = 'fake_pillar_name'
|
||||
|
||||
nodegroups.__opts__ = fake_opts
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
class NodegroupsPillarTestCase(TestCase):
|
||||
'''
|
||||
Tests for salt.pillar.nodegroups
|
||||
'''
|
||||
|
||||
def _runner(self, expected_ret, pillar_name=None, nodegroup_matches=None):
|
||||
def _runner(self, expected_ret, pillar_name=None):
|
||||
pillar_name = pillar_name or fake_pillar_name
|
||||
nodegroup_matches = nodegroup_matches or [True, False, ]
|
||||
mock_nodegroup_match = MagicMock(side_effect=nodegroup_matches)
|
||||
with patch.object(nodegroups.Matcher, 'nodegroup_match', mock_nodegroup_match):
|
||||
actual_ret = nodegroups.ext_pillar(fake_minion_id, fake_pillar, pillar_name=pillar_name)
|
||||
actual_ret = nodegroups.ext_pillar(fake_minion_id, fake_pillar, pillar_name=pillar_name)
|
||||
self.assertDictEqual(actual_ret, expected_ret)
|
||||
fake_nodegroup_count = len(fake_nodegroups)
|
||||
self.assertEqual(mock_nodegroup_match.call_count, fake_nodegroup_count)
|
||||
mock_nodegroup_match.assert_has_calls([call(x, fake_nodegroups) for x in fake_nodegroups.keys()])
|
||||
|
||||
def test_succeeds(self):
|
||||
ret = {fake_pillar_name: ['a', ]}
|
||||
|
@ -131,7 +131,7 @@ class TestFileState(TestCase):
|
||||
# If the test is failing, check the position of the "contents" param
|
||||
# in the manage_file() function in salt/modules/file.py, the fix is
|
||||
# likely as simple as updating the 2nd index below.
|
||||
self.assertEqual(expected, returner.call_args[0][-4])
|
||||
self.assertEqual(expected, returner.call_args[0][-5])
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
|
@ -251,7 +251,7 @@ class NetworkTestCase(TestCase):
|
||||
:return:
|
||||
'''
|
||||
self.assertEqual(network._generate_minion_id(),
|
||||
['nodename', 'hostname', 'hostname.domainname.blank', '1.2.3.4', '5.6.7.8'])
|
||||
['hostname.domainname.blank', 'nodename', 'hostname', '1.2.3.4', '5.6.7.8'])
|
||||
|
||||
@patch('platform.node', MagicMock(return_value='hostname'))
|
||||
@patch('socket.gethostname', MagicMock(return_value='hostname'))
|
||||
@ -270,7 +270,7 @@ class NetworkTestCase(TestCase):
|
||||
|
||||
@patch('platform.node', MagicMock(return_value='very.long.and.complex.domain.name'))
|
||||
@patch('socket.gethostname', MagicMock(return_value='hostname'))
|
||||
@patch('socket.getfqdn', MagicMock(return_value='hostname'))
|
||||
@patch('socket.getfqdn', MagicMock(return_value=''))
|
||||
@patch('socket.getaddrinfo', MagicMock(return_value=[(2, 3, 0, 'hostname', ('127.0.1.1', 0))]))
|
||||
@patch('salt.utils.fopen', MagicMock(return_value=False))
|
||||
@patch('os.path.exists', MagicMock(return_value=False))
|
||||
@ -286,7 +286,7 @@ class NetworkTestCase(TestCase):
|
||||
|
||||
@patch('platform.node', MagicMock(return_value='localhost'))
|
||||
@patch('socket.gethostname', MagicMock(return_value='pick.me'))
|
||||
@patch('socket.getfqdn', MagicMock(return_value='hostname'))
|
||||
@patch('socket.getfqdn', MagicMock(return_value='hostname.domainname.blank'))
|
||||
@patch('socket.getaddrinfo', MagicMock(return_value=[(2, 3, 0, 'hostname', ('127.0.1.1', 0))]))
|
||||
@patch('salt.utils.fopen', MagicMock(return_value=False))
|
||||
@patch('os.path.exists', MagicMock(return_value=False))
|
||||
@ -297,7 +297,7 @@ class NetworkTestCase(TestCase):
|
||||
|
||||
:return:
|
||||
'''
|
||||
self.assertEqual(network.generate_minion_id(), 'pick.me')
|
||||
self.assertEqual(network.generate_minion_id(), 'hostname.domainname.blank')
|
||||
|
||||
@patch('platform.node', MagicMock(return_value='localhost'))
|
||||
@patch('socket.gethostname', MagicMock(return_value='ip6-loopback'))
|
||||
|
Loading…
Reference in New Issue
Block a user