mirror of
https://github.com/valitydev/salt.git
synced 2024-11-08 09:23:56 +00:00
Merge branch 'develop' of github.com:saltstack/salt into develop
This commit is contained in:
commit
a3e37cc2d0
@ -20,6 +20,13 @@ Features
|
|||||||
SSL can be enabled by setting ``ssl_options`` for the returner.
|
SSL can be enabled by setting ``ssl_options`` for the returner.
|
||||||
Also added support for specifying ``protocol_version`` when establishing
|
Also added support for specifying ``protocol_version`` when establishing
|
||||||
cluster connection.
|
cluster connection.
|
||||||
|
- The ``mode`` parameter in the :py:mod:`file.managed
|
||||||
|
<salt.states.file.managed>` state, and the ``file_mode`` parameter in the
|
||||||
|
:py:mod:`file.managed <salt.states.file.managed>`, can both now be set to
|
||||||
|
``keep`` and the minion will keep the mode of the file from the Salt
|
||||||
|
fileserver. This works only with files coming from sources prefixed with
|
||||||
|
``salt://``, or files local to the minion (i.e. those which are absolute
|
||||||
|
paths, or are prefixed with ``file://``).
|
||||||
|
|
||||||
Config Changes
|
Config Changes
|
||||||
==============
|
==============
|
||||||
|
117
salt/beacons/avahi_announce.py
Normal file
117
salt/beacons/avahi_announce.py
Normal file
@ -0,0 +1,117 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
'''
|
||||||
|
Beacon to announce via avahi (zeroconf)
|
||||||
|
|
||||||
|
'''
|
||||||
|
# Import Python libs
|
||||||
|
from __future__ import absolute_import
|
||||||
|
import logging
|
||||||
|
|
||||||
|
# Import 3rd Party libs
|
||||||
|
try:
|
||||||
|
import avahi
|
||||||
|
HAS_PYAVAHI = True
|
||||||
|
except ImportError:
|
||||||
|
HAS_PYAVAHI = False
|
||||||
|
import dbus
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
__virtualname__ = 'avahi_announce'
|
||||||
|
|
||||||
|
LAST_GRAINS = {}
|
||||||
|
BUS = dbus.SystemBus()
|
||||||
|
SERVER = dbus.Interface(BUS.get_object(avahi.DBUS_NAME, avahi.DBUS_PATH_SERVER),
|
||||||
|
avahi.DBUS_INTERFACE_SERVER)
|
||||||
|
GROUP = dbus.Interface(BUS.get_object(avahi.DBUS_NAME, SERVER.EntryGroupNew()),
|
||||||
|
avahi.DBUS_INTERFACE_ENTRY_GROUP)
|
||||||
|
|
||||||
|
|
||||||
|
def __virtual__():
|
||||||
|
if HAS_PYAVAHI:
|
||||||
|
return __virtualname__
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def validate(config):
|
||||||
|
'''
|
||||||
|
Validate the beacon configuration
|
||||||
|
'''
|
||||||
|
if not isinstance(config, dict):
|
||||||
|
return False, ('Configuration for avahi_announcement '
|
||||||
|
'beacon must be a dictionary')
|
||||||
|
elif not all(x in list(config.keys()) for x in ('servicetype', 'port', 'txt')):
|
||||||
|
return False, ('Configuration for avahi_announce beacon '
|
||||||
|
'must contain servicetype, port and txt items')
|
||||||
|
return True, 'Valid beacon configuration'
|
||||||
|
|
||||||
|
|
||||||
|
def beacon(config):
|
||||||
|
'''
|
||||||
|
Broadcast values via zeroconf
|
||||||
|
|
||||||
|
If the announced values are static, it is adviced to set run_once: True
|
||||||
|
(do not poll) on the beacon configuration. Grains can be used to define
|
||||||
|
txt values using the syntax: grains.<grain_name>
|
||||||
|
|
||||||
|
The default servicename its the hostname grain value.
|
||||||
|
|
||||||
|
Example Config
|
||||||
|
|
||||||
|
.. code-block:: yaml
|
||||||
|
|
||||||
|
beacons:
|
||||||
|
avahi_announce:
|
||||||
|
run_once: True
|
||||||
|
servicetype: _demo._tcp
|
||||||
|
txt:
|
||||||
|
ProdName: grains.productname
|
||||||
|
SerialNo: grains.serialnumber
|
||||||
|
Comments: 'this is a test'
|
||||||
|
'''
|
||||||
|
ret = []
|
||||||
|
changes = {}
|
||||||
|
txt = {}
|
||||||
|
|
||||||
|
global LAST_GRAINS
|
||||||
|
|
||||||
|
_validate = validate(config)
|
||||||
|
if not _validate[0]:
|
||||||
|
log.warning('Beacon {0} configuration invalid, '
|
||||||
|
'not adding. {1}'.format(__virtualname__, _validate[1]))
|
||||||
|
return ret
|
||||||
|
|
||||||
|
if 'servicename' in config:
|
||||||
|
servicename = config['servicename']
|
||||||
|
else:
|
||||||
|
servicename = __grains__['host']
|
||||||
|
|
||||||
|
for item in config['txt']:
|
||||||
|
if config['txt'][item].startswith('grains.'):
|
||||||
|
grain = config['txt'][item][7:]
|
||||||
|
txt[item] = __grains__[grain]
|
||||||
|
if LAST_GRAINS and (LAST_GRAINS[grain] != __grains__[grain]):
|
||||||
|
changes[str('txt.' + item)] = txt[item]
|
||||||
|
else:
|
||||||
|
txt[item] = config['txt'][item]
|
||||||
|
|
||||||
|
if not LAST_GRAINS:
|
||||||
|
changes[str('txt.' + item)] = txt[item]
|
||||||
|
|
||||||
|
if changes:
|
||||||
|
if not LAST_GRAINS:
|
||||||
|
changes['servicename'] = servicename
|
||||||
|
changes['servicetype'] = config['servicetype']
|
||||||
|
changes['port'] = config['port']
|
||||||
|
else:
|
||||||
|
GROUP.Reset()
|
||||||
|
GROUP.AddService(avahi.IF_UNSPEC, avahi.PROTO_UNSPEC, dbus.UInt32(0),
|
||||||
|
servicename, config['servicetype'], '', '',
|
||||||
|
dbus.UInt16(config['port']), avahi.dict_to_txt_array(txt))
|
||||||
|
GROUP.Commit()
|
||||||
|
|
||||||
|
ret.append({'tag': 'result', 'changes': changes})
|
||||||
|
|
||||||
|
LAST_GRAINS = __grains__
|
||||||
|
|
||||||
|
return ret
|
@ -46,7 +46,6 @@ from salt.utils import kinds
|
|||||||
try:
|
try:
|
||||||
from salt.utils import parsers, ip_bracket
|
from salt.utils import parsers, ip_bracket
|
||||||
from salt.utils.verify import check_user, verify_env, verify_socket
|
from salt.utils.verify import check_user, verify_env, verify_socket
|
||||||
from salt.utils.verify import verify_files
|
|
||||||
except ImportError as exc:
|
except ImportError as exc:
|
||||||
if exc.args[0] != 'No module named _msgpack':
|
if exc.args[0] != 'No module named _msgpack':
|
||||||
raise
|
raise
|
||||||
@ -162,12 +161,6 @@ class Master(parsers.MasterOptionParser, DaemonsMixin): # pylint: disable=no-in
|
|||||||
permissive=self.config['permissive_pki_access'],
|
permissive=self.config['permissive_pki_access'],
|
||||||
pki_dir=self.config['pki_dir'],
|
pki_dir=self.config['pki_dir'],
|
||||||
)
|
)
|
||||||
logfile = self.config['log_file']
|
|
||||||
if logfile is not None and not logfile.startswith(('tcp://', 'udp://', 'file://')):
|
|
||||||
# Logfile is not using Syslog, verify
|
|
||||||
current_umask = os.umask(0o027)
|
|
||||||
verify_files([logfile], self.config['user'])
|
|
||||||
os.umask(current_umask)
|
|
||||||
# Clear out syndics from cachedir
|
# Clear out syndics from cachedir
|
||||||
for syndic_file in os.listdir(self.config['syndic_dir']):
|
for syndic_file in os.listdir(self.config['syndic_dir']):
|
||||||
os.remove(os.path.join(self.config['syndic_dir'], syndic_file))
|
os.remove(os.path.join(self.config['syndic_dir'], syndic_file))
|
||||||
@ -288,12 +281,6 @@ class Minion(parsers.MinionOptionParser, DaemonsMixin): # pylint: disable=no-in
|
|||||||
permissive=self.config['permissive_pki_access'],
|
permissive=self.config['permissive_pki_access'],
|
||||||
pki_dir=self.config['pki_dir'],
|
pki_dir=self.config['pki_dir'],
|
||||||
)
|
)
|
||||||
logfile = self.config['log_file']
|
|
||||||
if logfile is not None and not logfile.startswith(('tcp://', 'udp://', 'file://')):
|
|
||||||
# Logfile is not using Syslog, verify
|
|
||||||
current_umask = os.umask(0o027)
|
|
||||||
verify_files([logfile], self.config['user'])
|
|
||||||
os.umask(current_umask)
|
|
||||||
except OSError as error:
|
except OSError as error:
|
||||||
self.environment_failure(error)
|
self.environment_failure(error)
|
||||||
|
|
||||||
@ -464,14 +451,6 @@ class ProxyMinion(parsers.ProxyMinionOptionParser, DaemonsMixin): # pylint: dis
|
|||||||
permissive=self.config['permissive_pki_access'],
|
permissive=self.config['permissive_pki_access'],
|
||||||
pki_dir=self.config['pki_dir'],
|
pki_dir=self.config['pki_dir'],
|
||||||
)
|
)
|
||||||
|
|
||||||
logfile = self.config.get('proxy_log') or self.config['log_file']
|
|
||||||
if logfile is not None and not logfile.startswith(('tcp://', 'udp://', 'file://')):
|
|
||||||
# Logfile is not using Syslog, verify
|
|
||||||
current_umask = os.umask(0o027)
|
|
||||||
verify_files([logfile], self.config['user'])
|
|
||||||
os.umask(current_umask)
|
|
||||||
|
|
||||||
except OSError as error:
|
except OSError as error:
|
||||||
self.environment_failure(error)
|
self.environment_failure(error)
|
||||||
|
|
||||||
@ -569,12 +548,6 @@ class Syndic(parsers.SyndicOptionParser, DaemonsMixin): # pylint: disable=no-in
|
|||||||
permissive=self.config['permissive_pki_access'],
|
permissive=self.config['permissive_pki_access'],
|
||||||
pki_dir=self.config['pki_dir'],
|
pki_dir=self.config['pki_dir'],
|
||||||
)
|
)
|
||||||
logfile = self.config['log_file']
|
|
||||||
if logfile is not None and not logfile.startswith(('tcp://', 'udp://', 'file://')):
|
|
||||||
# Logfile is not using Syslog, verify
|
|
||||||
current_umask = os.umask(0o027)
|
|
||||||
verify_files([logfile], self.config['user'])
|
|
||||||
os.umask(current_umask)
|
|
||||||
except OSError as error:
|
except OSError as error:
|
||||||
self.environment_failure(error)
|
self.environment_failure(error)
|
||||||
|
|
||||||
|
@ -1299,6 +1299,10 @@ def mod_data(fsclient):
|
|||||||
ret[ref] = mods_data
|
ret[ref] = mods_data
|
||||||
if not ret:
|
if not ret:
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
|
if six.PY3:
|
||||||
|
ver_base = salt.utils.to_bytes(ver_base)
|
||||||
|
|
||||||
ver = hashlib.sha1(ver_base).hexdigest()
|
ver = hashlib.sha1(ver_base).hexdigest()
|
||||||
ext_tar_path = os.path.join(
|
ext_tar_path = os.path.join(
|
||||||
fsclient.opts['cachedir'],
|
fsclient.opts['cachedir'],
|
||||||
|
@ -78,12 +78,10 @@ def manage_mode(mode):
|
|||||||
|
|
||||||
salt '*' config.manage_mode
|
salt '*' config.manage_mode
|
||||||
'''
|
'''
|
||||||
if mode is None:
|
# config.manage_mode should no longer be invoked from the __salt__ dunder
|
||||||
return None
|
# in Salt code, this function is only being left here for backwards
|
||||||
ret = str(mode).lstrip('0').zfill(4)
|
# compatibility.
|
||||||
if ret[0] != '0':
|
return salt.utils.normalize_mode(mode)
|
||||||
return '0{0}'.format(ret)
|
|
||||||
return ret
|
|
||||||
|
|
||||||
|
|
||||||
def valid_fileproto(uri):
|
def valid_fileproto(uri):
|
||||||
|
@ -391,7 +391,11 @@ def create(vm_):
|
|||||||
)
|
)
|
||||||
if dns_hostname and dns_domain:
|
if dns_hostname and dns_domain:
|
||||||
log.info('create_dns_record: using dns_hostname="{0}", dns_domain="{1}"'.format(dns_hostname, dns_domain))
|
log.info('create_dns_record: using dns_hostname="{0}", dns_domain="{1}"'.format(dns_hostname, dns_domain))
|
||||||
__add_dns_addr__ = lambda t, d: post_dns_record(dns_domain, dns_hostname, t, d)
|
__add_dns_addr__ = lambda t, d: post_dns_record(dns_domain=dns_domain,
|
||||||
|
name=dns_hostname,
|
||||||
|
record_type=t,
|
||||||
|
record_data=d)
|
||||||
|
|
||||||
log.debug('create_dns_record: {0}'.format(__add_dns_addr__))
|
log.debug('create_dns_record: {0}'.format(__add_dns_addr__))
|
||||||
else:
|
else:
|
||||||
log.error('create_dns_record: could not determine dns_hostname and/or dns_domain')
|
log.error('create_dns_record: could not determine dns_hostname and/or dns_domain')
|
||||||
@ -815,18 +819,30 @@ def destroy(name, call=None):
|
|||||||
return node
|
return node
|
||||||
|
|
||||||
|
|
||||||
def post_dns_record(dns_domain, name, record_type, record_data):
|
def post_dns_record(**kwargs):
|
||||||
'''
|
'''
|
||||||
Creates or updates a DNS record for the given name if the domain is managed with DO.
|
Creates a DNS record for the given name if the domain is managed with DO.
|
||||||
'''
|
'''
|
||||||
domain = query(method='domains', droplet_id=dns_domain)
|
if 'kwargs' in kwargs: # flatten kwargs if called via salt-cloud -f
|
||||||
|
f_kwargs = kwargs['kwargs']
|
||||||
|
del kwargs['kwargs']
|
||||||
|
kwargs.update(f_kwargs)
|
||||||
|
mandatory_kwargs = ('dns_domain', 'name', 'record_type', 'record_data')
|
||||||
|
for i in mandatory_kwargs:
|
||||||
|
if kwargs[i]:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
error = '{0}="{1}" ## all mandatory args must be provided: {2}'.format(i, kwargs[i], str(mandatory_kwargs))
|
||||||
|
raise salt.exceptions.SaltInvocationError(error)
|
||||||
|
|
||||||
|
domain = query(method='domains', droplet_id=kwargs['dns_domain'])
|
||||||
|
|
||||||
if domain:
|
if domain:
|
||||||
result = query(
|
result = query(
|
||||||
method='domains',
|
method='domains',
|
||||||
droplet_id=dns_domain,
|
droplet_id=kwargs['dns_domain'],
|
||||||
command='records',
|
command='records',
|
||||||
args={'type': record_type, 'name': name, 'data': record_data},
|
args={'type': kwargs['record_type'], 'name': kwargs['name'], 'data': kwargs['record_data']},
|
||||||
http_method='post'
|
http_method='post'
|
||||||
)
|
)
|
||||||
return result
|
return result
|
||||||
|
@ -475,7 +475,8 @@ class AsyncAuth(object):
|
|||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
creds = yield self.sign_in(channel=channel)
|
creds = yield self.sign_in(channel=channel)
|
||||||
except SaltClientError as error:
|
except SaltClientError as exc:
|
||||||
|
error = exc
|
||||||
break
|
break
|
||||||
if creds == 'retry':
|
if creds == 'retry':
|
||||||
if self.opts.get('caller'):
|
if self.opts.get('caller'):
|
||||||
|
@ -451,6 +451,7 @@ class RemoteFuncs(object):
|
|||||||
'''
|
'''
|
||||||
fs_ = salt.fileserver.Fileserver(self.opts)
|
fs_ = salt.fileserver.Fileserver(self.opts)
|
||||||
self._serve_file = fs_.serve_file
|
self._serve_file = fs_.serve_file
|
||||||
|
self._file_find = fs_._find_file
|
||||||
self._file_hash = fs_.file_hash
|
self._file_hash = fs_.file_hash
|
||||||
self._file_list = fs_.file_list
|
self._file_list = fs_.file_list
|
||||||
self._file_list_emptydirs = fs_.file_list_emptydirs
|
self._file_list_emptydirs = fs_.file_list_emptydirs
|
||||||
|
@ -7,7 +7,6 @@ from __future__ import absolute_import
|
|||||||
# Import python libs
|
# Import python libs
|
||||||
import contextlib
|
import contextlib
|
||||||
import logging
|
import logging
|
||||||
import hashlib
|
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
import ftplib
|
import ftplib
|
||||||
@ -749,9 +748,27 @@ class LocalClient(Client):
|
|||||||
'''
|
'''
|
||||||
path = self._check_proto(path)
|
path = self._check_proto(path)
|
||||||
fnd = self._find_file(path, saltenv)
|
fnd = self._find_file(path, saltenv)
|
||||||
if not fnd['path']:
|
fnd_path = fnd.get('path')
|
||||||
|
if not fnd_path:
|
||||||
return ''
|
return ''
|
||||||
return fnd['path']
|
|
||||||
|
try:
|
||||||
|
fnd_mode = fnd.get('stat', [])[0]
|
||||||
|
except (IndexError, TypeError):
|
||||||
|
fnd_mode = None
|
||||||
|
|
||||||
|
if not salt.utils.is_windows():
|
||||||
|
if fnd_mode is not None:
|
||||||
|
try:
|
||||||
|
if os.stat(dest).st_mode != fnd_mode:
|
||||||
|
try:
|
||||||
|
os.chmod(dest, fnd_mode)
|
||||||
|
except OSError as exc:
|
||||||
|
log.warning('Failed to chmod %s: %s', dest, exc)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return fnd_path
|
||||||
|
|
||||||
def file_list(self, saltenv='base', prefix=''):
|
def file_list(self, saltenv='base', prefix=''):
|
||||||
'''
|
'''
|
||||||
@ -804,6 +821,22 @@ class LocalClient(Client):
|
|||||||
ret.append(sdecode(os.path.relpath(root, path)))
|
ret.append(sdecode(os.path.relpath(root, path)))
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
def __get_file_path(self, path, saltenv='base'):
|
||||||
|
'''
|
||||||
|
Return either a file path or the result of a remote find_file call.
|
||||||
|
'''
|
||||||
|
try:
|
||||||
|
path = self._check_proto(path)
|
||||||
|
except MinionError as err:
|
||||||
|
# Local file path
|
||||||
|
if not os.path.isfile(path):
|
||||||
|
msg = 'specified file {0} is not present to generate hash: {1}'
|
||||||
|
log.warning(msg.format(path, err))
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
return path
|
||||||
|
return self._find_file(path, saltenv)
|
||||||
|
|
||||||
def hash_file(self, path, saltenv='base'):
|
def hash_file(self, path, saltenv='base'):
|
||||||
'''
|
'''
|
||||||
Return the hash of a file, to get the hash of a file in the file_roots
|
Return the hash of a file, to get the hash of a file in the file_roots
|
||||||
@ -811,26 +844,51 @@ class LocalClient(Client):
|
|||||||
file with / for a local file.
|
file with / for a local file.
|
||||||
'''
|
'''
|
||||||
ret = {}
|
ret = {}
|
||||||
|
fnd = self.__get_file_path(path, saltenv)
|
||||||
|
if fnd is None:
|
||||||
|
return ret
|
||||||
|
|
||||||
try:
|
try:
|
||||||
path = self._check_proto(path)
|
# Remote file path (self._find_file() invoked)
|
||||||
except MinionError as err:
|
fnd_path = fnd['path']
|
||||||
if not os.path.isfile(path):
|
except TypeError:
|
||||||
msg = 'specified file {0} is not present to generate hash: {1}'
|
# Local file path
|
||||||
log.warning(msg.format(path, err))
|
fnd_path = fnd
|
||||||
|
|
||||||
|
hash_type = self.opts.get('hash_type', 'md5')
|
||||||
|
ret['hsum'] = salt.utils.get_hash(fnd_path, form=hash_type)
|
||||||
|
ret['hash_type'] = hash_type
|
||||||
return ret
|
return ret
|
||||||
else:
|
|
||||||
opts_hash_type = self.opts.get('hash_type', 'md5')
|
def hash_and_stat_file(self, path, saltenv='base'):
|
||||||
hash_type = getattr(hashlib, opts_hash_type)
|
'''
|
||||||
ret['hsum'] = salt.utils.get_hash(
|
Return the hash of a file, to get the hash of a file in the file_roots
|
||||||
path, form=hash_type)
|
prepend the path with salt://<file on server> otherwise, prepend the
|
||||||
ret['hash_type'] = opts_hash_type
|
file with / for a local file.
|
||||||
return ret
|
|
||||||
path = self._find_file(path, saltenv)['path']
|
Additionally, return the stat result of the file, or None if no stat
|
||||||
if not path:
|
results were found.
|
||||||
return {}
|
'''
|
||||||
ret = {}
|
ret = {}
|
||||||
ret['hsum'] = salt.utils.get_hash(path, self.opts['hash_type'])
|
fnd = self.__get_file_path(path, saltenv)
|
||||||
ret['hash_type'] = self.opts['hash_type']
|
if fnd is None:
|
||||||
|
return ret, None
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Remote file path (self._find_file() invoked)
|
||||||
|
fnd_path = fnd['path']
|
||||||
|
fnd_stat = fnd.get('stat')
|
||||||
|
except TypeError:
|
||||||
|
# Local file path
|
||||||
|
fnd_path = fnd
|
||||||
|
try:
|
||||||
|
fnd_stat = list(os.stat(fnd_path))
|
||||||
|
except Exception:
|
||||||
|
fnd_stat = None
|
||||||
|
|
||||||
|
hash_type = self.opts.get('hash_type', 'md5')
|
||||||
|
ret['hsum'] = salt.utils.get_hash(fnd_path, form=hash_type)
|
||||||
|
ret['hash_type'] = hash_type
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
def list_env(self, saltenv='base'):
|
def list_env(self, saltenv='base'):
|
||||||
@ -906,14 +964,22 @@ class RemoteClient(Client):
|
|||||||
if senv:
|
if senv:
|
||||||
saltenv = senv
|
saltenv = senv
|
||||||
|
|
||||||
|
if not salt.utils.is_windows():
|
||||||
|
hash_server, stat_server = self.hash_and_stat_file(path, saltenv)
|
||||||
|
try:
|
||||||
|
mode_server = stat_server[0]
|
||||||
|
except (IndexError, TypeError):
|
||||||
|
mode_server = None
|
||||||
|
else:
|
||||||
|
hash_server = self.hash_file(path, saltenv)
|
||||||
|
mode_server = None
|
||||||
|
|
||||||
# Check if file exists on server, before creating files and
|
# Check if file exists on server, before creating files and
|
||||||
# directories
|
# directories
|
||||||
hash_server = self.hash_file(path, saltenv)
|
|
||||||
if hash_server == '':
|
if hash_server == '':
|
||||||
log.debug(
|
log.debug(
|
||||||
'Could not find file from saltenv \'{0}\', \'{1}\''.format(
|
'Could not find file \'%s\' in saltenv \'%s\'',
|
||||||
saltenv, path
|
path, saltenv
|
||||||
)
|
|
||||||
)
|
)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -924,32 +990,76 @@ class RemoteClient(Client):
|
|||||||
rel_path = self._check_proto(path)
|
rel_path = self._check_proto(path)
|
||||||
|
|
||||||
log.debug(
|
log.debug(
|
||||||
'In saltenv \'{0}\', looking at rel_path \'{1}\' to resolve '
|
'In saltenv \'%s\', looking at rel_path \'%s\' to resolve '
|
||||||
'\'{2}\''.format(saltenv, rel_path, path)
|
'\'%s\'', saltenv, rel_path, path
|
||||||
)
|
)
|
||||||
with self._cache_loc(
|
with self._cache_loc(
|
||||||
rel_path, saltenv, cachedir=cachedir) as cache_dest:
|
rel_path, saltenv, cachedir=cachedir) as cache_dest:
|
||||||
dest2check = cache_dest
|
dest2check = cache_dest
|
||||||
|
|
||||||
log.debug(
|
log.debug(
|
||||||
'In saltenv \'{0}\', ** considering ** path \'{1}\' to resolve '
|
'In saltenv \'%s\', ** considering ** path \'%s\' to resolve '
|
||||||
'\'{2}\''.format(saltenv, dest2check, path)
|
'\'%s\'', saltenv, dest2check, path
|
||||||
)
|
)
|
||||||
|
|
||||||
if dest2check and os.path.isfile(dest2check):
|
if dest2check and os.path.isfile(dest2check):
|
||||||
|
if not salt.utils.is_windows():
|
||||||
|
hash_local, stat_local = \
|
||||||
|
self.hash_and_stat_file(dest2check, saltenv)
|
||||||
|
try:
|
||||||
|
mode_local = stat_local[0]
|
||||||
|
except IndexError:
|
||||||
|
mode_local = None
|
||||||
|
else:
|
||||||
hash_local = self.hash_file(dest2check, saltenv)
|
hash_local = self.hash_file(dest2check, saltenv)
|
||||||
|
mode_local = None
|
||||||
|
|
||||||
if hash_local == hash_server:
|
if hash_local == hash_server:
|
||||||
log.info(
|
if not salt.utils.is_windows():
|
||||||
'Fetching file from saltenv \'{0}\', ** skipped ** '
|
if mode_server is None:
|
||||||
'latest already in cache \'{1}\''.format(
|
log.debug('No file mode available for \'%s\'', path)
|
||||||
saltenv, path
|
elif mode_local is None:
|
||||||
|
log.debug(
|
||||||
|
'No file mode available for \'%s\'',
|
||||||
|
dest2check
|
||||||
)
|
)
|
||||||
|
else:
|
||||||
|
if mode_server == mode_local:
|
||||||
|
log.info(
|
||||||
|
'Fetching file from saltenv \'%s\', '
|
||||||
|
'** skipped ** latest already in cache '
|
||||||
|
'\'%s\', mode up-to-date', saltenv, path
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
os.chmod(dest2check, mode_server)
|
||||||
|
log.info(
|
||||||
|
'Fetching file from saltenv \'%s\', '
|
||||||
|
'** updated ** latest already in cache, '
|
||||||
|
'\'%s\', mode updated from %s to %s',
|
||||||
|
saltenv,
|
||||||
|
path,
|
||||||
|
salt.utils.st_mode_to_octal(mode_local),
|
||||||
|
salt.utils.st_mode_to_octal(mode_server)
|
||||||
|
)
|
||||||
|
except OSError as exc:
|
||||||
|
log.warning(
|
||||||
|
'Failed to chmod %s: %s', dest2check, exc
|
||||||
|
)
|
||||||
|
# We may not have been able to check/set the mode, but we
|
||||||
|
# don't want to re-download the file because of a failure
|
||||||
|
# in mode checking. Return the cached path.
|
||||||
|
return dest2check
|
||||||
|
else:
|
||||||
|
log.info(
|
||||||
|
'Fetching file from saltenv \'%s\', ** skipped ** '
|
||||||
|
'latest already in cache \'%s\'', saltenv, path
|
||||||
)
|
)
|
||||||
return dest2check
|
return dest2check
|
||||||
|
|
||||||
log.debug(
|
log.debug(
|
||||||
'Fetching file from saltenv \'{0}\', ** attempting ** '
|
'Fetching file from saltenv \'%s\', ** attempting ** \'%s\'',
|
||||||
'\'{1}\''.format(saltenv, path)
|
saltenv, path
|
||||||
)
|
)
|
||||||
d_tries = 0
|
d_tries = 0
|
||||||
transport_tries = 0
|
transport_tries = 0
|
||||||
@ -971,7 +1081,7 @@ class RemoteClient(Client):
|
|||||||
return False
|
return False
|
||||||
fn_ = salt.utils.fopen(dest, 'wb+')
|
fn_ = salt.utils.fopen(dest, 'wb+')
|
||||||
else:
|
else:
|
||||||
log.debug('No dest file found {0}'.format(dest))
|
log.debug('No dest file found')
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
if not fn_:
|
if not fn_:
|
||||||
@ -1003,8 +1113,10 @@ class RemoteClient(Client):
|
|||||||
d_tries += 1
|
d_tries += 1
|
||||||
hsum = salt.utils.get_hash(dest, salt.utils.to_str(data.get('hash_type', b'md5')))
|
hsum = salt.utils.get_hash(dest, salt.utils.to_str(data.get('hash_type', b'md5')))
|
||||||
if hsum != data['hsum']:
|
if hsum != data['hsum']:
|
||||||
log.warning('Bad download of file {0}, attempt {1} '
|
log.warning(
|
||||||
'of 3'.format(path, d_tries))
|
'Bad download of file %s, attempt %d of 3',
|
||||||
|
path, d_tries
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
break
|
break
|
||||||
if not fn_:
|
if not fn_:
|
||||||
@ -1023,33 +1135,56 @@ class RemoteClient(Client):
|
|||||||
else:
|
else:
|
||||||
data = data['data']
|
data = data['data']
|
||||||
fn_.write(data)
|
fn_.write(data)
|
||||||
except (TypeError, KeyError) as e:
|
except (TypeError, KeyError) as exc:
|
||||||
|
try:
|
||||||
|
data_type = type(data).__name__
|
||||||
|
except AttributeError:
|
||||||
|
# Shouldn't happen, but don't let this cause a traceback.
|
||||||
|
data_type = str(type(data))
|
||||||
transport_tries += 1
|
transport_tries += 1
|
||||||
log.warning('Data transport is broken, got: {0}, type: {1}, '
|
log.warning(
|
||||||
'exception: {2}, attempt {3} of 3'.format(
|
'Data transport is broken, got: %s, type: %s, '
|
||||||
data, type(data), e, transport_tries)
|
'exception: %s, attempt %d of 3',
|
||||||
|
data, data_type, exc, transport_tries
|
||||||
)
|
)
|
||||||
self._refresh_channel()
|
self._refresh_channel()
|
||||||
if transport_tries > 3:
|
if transport_tries > 3:
|
||||||
log.error('Data transport is broken, got: {0}, type: {1}, '
|
log.error(
|
||||||
'exception: {2}, '
|
'Data transport is broken, got: %s, type: %s, '
|
||||||
'Retry attempts exhausted'.format(
|
'exception: %s, retry attempts exhausted',
|
||||||
data, type(data), e)
|
data, data_type, exc
|
||||||
)
|
)
|
||||||
break
|
break
|
||||||
|
|
||||||
if fn_:
|
if fn_:
|
||||||
fn_.close()
|
fn_.close()
|
||||||
log.info(
|
log.info(
|
||||||
'Fetching file from saltenv \'{0}\', ** done ** '
|
'Fetching file from saltenv \'%s\', ** done ** \'%s\'',
|
||||||
'\'{1}\''.format(saltenv, path)
|
saltenv, path
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
log.debug(
|
log.debug(
|
||||||
'In saltenv \'{0}\', we are ** missing ** the file '
|
'In saltenv \'%s\', we are ** missing ** the file \'%s\'',
|
||||||
'\'{1}\''.format(saltenv, path)
|
saltenv, path
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if not salt.utils.is_windows():
|
||||||
|
if mode_server is not None:
|
||||||
|
try:
|
||||||
|
if os.stat(dest).st_mode != mode_server:
|
||||||
|
try:
|
||||||
|
os.chmod(dest, mode_server)
|
||||||
|
log.info(
|
||||||
|
'Fetching file from saltenv \'%s\', '
|
||||||
|
'** done ** \'%s\', mode set to %s',
|
||||||
|
saltenv,
|
||||||
|
path,
|
||||||
|
salt.utils.st_mode_to_octal(mode_server)
|
||||||
|
)
|
||||||
|
except OSError:
|
||||||
|
log.warning('Failed to chmod %s: %s', dest, exc)
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
return dest
|
return dest
|
||||||
|
|
||||||
def file_list(self, saltenv='base', prefix=''):
|
def file_list(self, saltenv='base', prefix=''):
|
||||||
@ -1089,11 +1224,9 @@ class RemoteClient(Client):
|
|||||||
'cmd': '_symlink_list'}
|
'cmd': '_symlink_list'}
|
||||||
return self.channel.send(load)
|
return self.channel.send(load)
|
||||||
|
|
||||||
def hash_file(self, path, saltenv='base'):
|
def __hash_and_stat_file(self, path, saltenv='base'):
|
||||||
'''
|
'''
|
||||||
Return the hash of a file, to get the hash of a file on the salt
|
Common code for hashing and stating files
|
||||||
master file server prepend the path with salt://<file on server>
|
|
||||||
otherwise, prepend the file with / for a local file.
|
|
||||||
'''
|
'''
|
||||||
try:
|
try:
|
||||||
path = self._check_proto(path)
|
path = self._check_proto(path)
|
||||||
@ -1105,8 +1238,7 @@ class RemoteClient(Client):
|
|||||||
else:
|
else:
|
||||||
ret = {}
|
ret = {}
|
||||||
hash_type = self.opts.get('hash_type', 'md5')
|
hash_type = self.opts.get('hash_type', 'md5')
|
||||||
ret['hsum'] = salt.utils.get_hash(
|
ret['hsum'] = salt.utils.get_hash(path, form=hash_type)
|
||||||
path, form=hash_type)
|
|
||||||
ret['hash_type'] = hash_type
|
ret['hash_type'] = hash_type
|
||||||
return ret
|
return ret
|
||||||
load = {'path': path,
|
load = {'path': path,
|
||||||
@ -1114,6 +1246,37 @@ class RemoteClient(Client):
|
|||||||
'cmd': '_file_hash'}
|
'cmd': '_file_hash'}
|
||||||
return self.channel.send(load)
|
return self.channel.send(load)
|
||||||
|
|
||||||
|
def hash_file(self, path, saltenv='base'):
|
||||||
|
'''
|
||||||
|
Return the hash of a file, to get the hash of a file on the salt
|
||||||
|
master file server prepend the path with salt://<file on server>
|
||||||
|
otherwise, prepend the file with / for a local file.
|
||||||
|
'''
|
||||||
|
return self.__hash_and_stat_file(path, saltenv)
|
||||||
|
|
||||||
|
def hash_and_stat_file(self, path, saltenv='base'):
|
||||||
|
'''
|
||||||
|
The same as hash_file, but also return the file's mode, or None if no
|
||||||
|
mode data is present.
|
||||||
|
'''
|
||||||
|
hash_result = self.hash_file(path, saltenv)
|
||||||
|
try:
|
||||||
|
path = self._check_proto(path)
|
||||||
|
except MinionError as err:
|
||||||
|
if not os.path.isfile(path):
|
||||||
|
return hash_result, None
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
return hash_result, list(os.stat(path))
|
||||||
|
except Exception:
|
||||||
|
return hash_result, None
|
||||||
|
load = {'path': path,
|
||||||
|
'saltenv': saltenv,
|
||||||
|
'cmd': '_file_find'}
|
||||||
|
fnd = self.channel.send(load)
|
||||||
|
stat_result = fnd.get('stat')
|
||||||
|
return hash_result, stat_result
|
||||||
|
|
||||||
def list_env(self, saltenv='base'):
|
def list_env(self, saltenv='base'):
|
||||||
'''
|
'''
|
||||||
Return a list of the files in the file server's specified environment
|
Return a list of the files in the file server's specified environment
|
||||||
|
@ -480,6 +480,28 @@ class Fileserver(object):
|
|||||||
if fstr in self.servers:
|
if fstr in self.servers:
|
||||||
self.servers[fstr]()
|
self.servers[fstr]()
|
||||||
|
|
||||||
|
def _find_file(self, load):
|
||||||
|
'''
|
||||||
|
Convenience function for calls made using the RemoteClient
|
||||||
|
'''
|
||||||
|
path = load.get('path')
|
||||||
|
if not path:
|
||||||
|
return {'path': '',
|
||||||
|
'rel': ''}
|
||||||
|
tgt_env = load.get('saltenv', 'base')
|
||||||
|
return self.find_file(path, tgt_env)
|
||||||
|
|
||||||
|
def file_find(self, load):
|
||||||
|
'''
|
||||||
|
Convenience function for calls made using the LocalClient
|
||||||
|
'''
|
||||||
|
path = load.get('path')
|
||||||
|
if not path:
|
||||||
|
return {'path': '',
|
||||||
|
'rel': ''}
|
||||||
|
tgt_env = load.get('saltenv', 'base')
|
||||||
|
return self.find_file(path, tgt_env)
|
||||||
|
|
||||||
def find_file(self, path, saltenv, back=None):
|
def find_file(self, path, saltenv, back=None):
|
||||||
'''
|
'''
|
||||||
Find the path and return the fnd structure, this structure is passed
|
Find the path and return the fnd structure, this structure is passed
|
||||||
@ -560,33 +582,53 @@ class Fileserver(object):
|
|||||||
return self.servers[fstr](load, fnd)
|
return self.servers[fstr](load, fnd)
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
def file_hash(self, load):
|
def __file_hash_and_stat(self, load):
|
||||||
'''
|
'''
|
||||||
Return the hash of a given file
|
Common code for hashing and stating files
|
||||||
'''
|
'''
|
||||||
if 'env' in load:
|
if 'env' in load:
|
||||||
salt.utils.warn_until(
|
salt.utils.warn_until(
|
||||||
'Oxygen',
|
'Oxygen',
|
||||||
'Parameter \'env\' has been detected in the argument list. This '
|
'Parameter \'env\' has been detected in the argument list. '
|
||||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
'This parameter is no longer used and has been replaced by '
|
||||||
'as of Salt Carbon. This warning will be removed in Salt Oxygen.'
|
'\'saltenv\' as of Salt Carbon. This warning will be removed '
|
||||||
|
'in Salt Oxygen.'
|
||||||
)
|
)
|
||||||
load.pop('env')
|
load.pop('env')
|
||||||
|
|
||||||
if 'path' not in load or 'saltenv' not in load:
|
if 'path' not in load or 'saltenv' not in load:
|
||||||
return ''
|
return '', None
|
||||||
if not isinstance(load['saltenv'], six.string_types):
|
if not isinstance(load['saltenv'], six.string_types):
|
||||||
load['saltenv'] = six.text_type(load['saltenv'])
|
load['saltenv'] = six.text_type(load['saltenv'])
|
||||||
|
|
||||||
fnd = self.find_file(salt.utils.locales.sdecode(load['path']),
|
fnd = self.find_file(salt.utils.locales.sdecode(load['path']),
|
||||||
load['saltenv'])
|
load['saltenv'])
|
||||||
if not fnd.get('back'):
|
if not fnd.get('back'):
|
||||||
return ''
|
return '', None
|
||||||
|
stat_result = fnd.get('stat', None)
|
||||||
fstr = '{0}.file_hash'.format(fnd['back'])
|
fstr = '{0}.file_hash'.format(fnd['back'])
|
||||||
if fstr in self.servers:
|
if fstr in self.servers:
|
||||||
return self.servers[fstr](load, fnd)
|
return self.servers[fstr](load, fnd), stat_result
|
||||||
|
return '', None
|
||||||
|
|
||||||
|
def file_hash(self, load):
|
||||||
|
'''
|
||||||
|
Return the hash of a given file
|
||||||
|
'''
|
||||||
|
try:
|
||||||
|
return self.__file_hash_and_stat(load)[0]
|
||||||
|
except (IndexError, TypeError):
|
||||||
return ''
|
return ''
|
||||||
|
|
||||||
|
def file_hash_and_stat(self, load):
|
||||||
|
'''
|
||||||
|
Return the hash and stat result of a given file
|
||||||
|
'''
|
||||||
|
try:
|
||||||
|
return self.__file_hash_and_stat(load)
|
||||||
|
except (IndexError, TypeError):
|
||||||
|
return '', None
|
||||||
|
|
||||||
def file_list(self, load):
|
def file_list(self, load):
|
||||||
'''
|
'''
|
||||||
Return a list of files from the dominant environment
|
Return a list of files from the dominant environment
|
||||||
|
@ -86,18 +86,30 @@ def find_file(path, saltenv='base', **kwargs):
|
|||||||
'rel': ''}
|
'rel': ''}
|
||||||
try:
|
try:
|
||||||
root = os.path.join(salt.syspaths.CACHE_DIR, 'azure')
|
root = os.path.join(salt.syspaths.CACHE_DIR, 'azure')
|
||||||
except IndexError:
|
except (IndexError, ValueError):
|
||||||
# An invalid index was passed
|
# An invalid index or index option was passed
|
||||||
return fnd
|
|
||||||
except ValueError:
|
|
||||||
# An invalid index option was passed
|
|
||||||
return fnd
|
return fnd
|
||||||
full = os.path.join(root, path)
|
full = os.path.join(root, path)
|
||||||
if os.path.isfile(full) and not salt.fileserver.is_file_ignored(
|
if os.path.isfile(full) and not salt.fileserver.is_file_ignored(
|
||||||
__opts__, full):
|
__opts__, full):
|
||||||
fnd['path'] = full
|
fnd['path'] = full
|
||||||
fnd['rel'] = path
|
fnd['rel'] = path
|
||||||
|
try:
|
||||||
|
# Converting the stat result to a list, the elements of the
|
||||||
|
# list correspond to the following stat_result params:
|
||||||
|
# 0 => st_mode=33188
|
||||||
|
# 1 => st_ino=10227377
|
||||||
|
# 2 => st_dev=65026
|
||||||
|
# 3 => st_nlink=1
|
||||||
|
# 4 => st_uid=1000
|
||||||
|
# 5 => st_gid=1000
|
||||||
|
# 6 => st_size=1056233
|
||||||
|
# 7 => st_atime=1468284229
|
||||||
|
# 8 => st_mtime=1456338235
|
||||||
|
# 9 => st_ctime=1456338235
|
||||||
fnd['stat'] = list(os.stat(full))
|
fnd['stat'] = list(os.stat(full))
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
return fnd
|
return fnd
|
||||||
|
|
||||||
|
|
||||||
|
@ -687,7 +687,22 @@ def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613
|
|||||||
pass
|
pass
|
||||||
fnd['rel'] = path
|
fnd['rel'] = path
|
||||||
fnd['path'] = dest
|
fnd['path'] = dest
|
||||||
|
try:
|
||||||
|
# Converting the stat result to a list, the elements of the
|
||||||
|
# list correspond to the following stat_result params:
|
||||||
|
# 0 => st_mode=33188
|
||||||
|
# 1 => st_ino=10227377
|
||||||
|
# 2 => st_dev=65026
|
||||||
|
# 3 => st_nlink=1
|
||||||
|
# 4 => st_uid=1000
|
||||||
|
# 5 => st_gid=1000
|
||||||
|
# 6 => st_size=1056233
|
||||||
|
# 7 => st_atime=1468284229
|
||||||
|
# 8 => st_mtime=1456338235
|
||||||
|
# 9 => st_ctime=1456338235
|
||||||
fnd['stat'] = list(os.stat(dest))
|
fnd['stat'] = list(os.stat(dest))
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
repo['repo'].close()
|
repo['repo'].close()
|
||||||
return fnd
|
return fnd
|
||||||
return fnd
|
return fnd
|
||||||
|
@ -33,7 +33,7 @@ log = logging.getLogger(__name__)
|
|||||||
|
|
||||||
def find_file(path, saltenv='base', **kwargs):
|
def find_file(path, saltenv='base', **kwargs):
|
||||||
'''
|
'''
|
||||||
Search the environment for the relative path
|
Search the environment for the relative path.
|
||||||
'''
|
'''
|
||||||
if 'env' in kwargs:
|
if 'env' in kwargs:
|
||||||
salt.utils.warn_until(
|
salt.utils.warn_until(
|
||||||
@ -51,6 +51,32 @@ def find_file(path, saltenv='base', **kwargs):
|
|||||||
return fnd
|
return fnd
|
||||||
if saltenv not in __opts__['file_roots']:
|
if saltenv not in __opts__['file_roots']:
|
||||||
return fnd
|
return fnd
|
||||||
|
|
||||||
|
def _add_file_stat(fnd):
|
||||||
|
'''
|
||||||
|
Stat the file and, assuming no errors were found, convert the stat
|
||||||
|
result to a list of values and add to the return dict.
|
||||||
|
|
||||||
|
Converting the stat result to a list, the elements of the list
|
||||||
|
correspond to the following stat_result params:
|
||||||
|
|
||||||
|
0 => st_mode=33188
|
||||||
|
1 => st_ino=10227377
|
||||||
|
2 => st_dev=65026
|
||||||
|
3 => st_nlink=1
|
||||||
|
4 => st_uid=1000
|
||||||
|
5 => st_gid=1000
|
||||||
|
6 => st_size=1056233
|
||||||
|
7 => st_atime=1468284229
|
||||||
|
8 => st_mtime=1456338235
|
||||||
|
9 => st_ctime=1456338235
|
||||||
|
'''
|
||||||
|
try:
|
||||||
|
fnd['stat'] = list(os.stat(fnd['path']))
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
return fnd
|
||||||
|
|
||||||
if 'index' in kwargs:
|
if 'index' in kwargs:
|
||||||
try:
|
try:
|
||||||
root = __opts__['file_roots'][saltenv][int(kwargs['index'])]
|
root = __opts__['file_roots'][saltenv][int(kwargs['index'])]
|
||||||
@ -64,15 +90,14 @@ def find_file(path, saltenv='base', **kwargs):
|
|||||||
if os.path.isfile(full) and not salt.fileserver.is_file_ignored(__opts__, full):
|
if os.path.isfile(full) and not salt.fileserver.is_file_ignored(__opts__, full):
|
||||||
fnd['path'] = full
|
fnd['path'] = full
|
||||||
fnd['rel'] = path
|
fnd['rel'] = path
|
||||||
fnd['stat'] = list(os.stat(full))
|
return _add_file_stat(fnd)
|
||||||
return fnd
|
return fnd
|
||||||
for root in __opts__['file_roots'][saltenv]:
|
for root in __opts__['file_roots'][saltenv]:
|
||||||
full = os.path.join(root, path)
|
full = os.path.join(root, path)
|
||||||
if os.path.isfile(full) and not salt.fileserver.is_file_ignored(__opts__, full):
|
if os.path.isfile(full) and not salt.fileserver.is_file_ignored(__opts__, full):
|
||||||
fnd['path'] = full
|
fnd['path'] = full
|
||||||
fnd['rel'] = path
|
fnd['rel'] = path
|
||||||
fnd['stat'] = list(os.stat(full))
|
return _add_file_stat(fnd)
|
||||||
return fnd
|
|
||||||
return fnd
|
return fnd
|
||||||
|
|
||||||
|
|
||||||
|
@ -583,6 +583,22 @@ def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613
|
|||||||
if os.path.isfile(full):
|
if os.path.isfile(full):
|
||||||
fnd['rel'] = path
|
fnd['rel'] = path
|
||||||
fnd['path'] = full
|
fnd['path'] = full
|
||||||
|
try:
|
||||||
|
# Converting the stat result to a list, the elements of the
|
||||||
|
# list correspond to the following stat_result params:
|
||||||
|
# 0 => st_mode=33188
|
||||||
|
# 1 => st_ino=10227377
|
||||||
|
# 2 => st_dev=65026
|
||||||
|
# 3 => st_nlink=1
|
||||||
|
# 4 => st_uid=1000
|
||||||
|
# 5 => st_gid=1000
|
||||||
|
# 6 => st_size=1056233
|
||||||
|
# 7 => st_atime=1468284229
|
||||||
|
# 8 => st_mtime=1456338235
|
||||||
|
# 9 => st_ctime=1456338235
|
||||||
|
fnd['stat'] = list(os.stat(full))
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
return fnd
|
return fnd
|
||||||
return fnd
|
return fnd
|
||||||
|
|
||||||
|
@ -920,9 +920,20 @@ def __process_multiprocessing_logging_queue(opts, queue):
|
|||||||
salt.utils.appendproctitle('MultiprocessingLoggingQueue')
|
salt.utils.appendproctitle('MultiprocessingLoggingQueue')
|
||||||
if salt.utils.is_windows():
|
if salt.utils.is_windows():
|
||||||
# On Windows, creating a new process doesn't fork (copy the parent
|
# On Windows, creating a new process doesn't fork (copy the parent
|
||||||
# process image). Due to this, we need to setup extended logging
|
# process image). Due to this, we need to setup all of our logging
|
||||||
# inside this process.
|
# inside this process.
|
||||||
setup_temp_logger()
|
setup_temp_logger()
|
||||||
|
setup_console_logger(
|
||||||
|
log_level=opts.get('log_level'),
|
||||||
|
log_format=opts.get('log_fmt_console'),
|
||||||
|
date_format=opts.get('log_datefmt_console')
|
||||||
|
)
|
||||||
|
setup_logfile_logger(
|
||||||
|
opts.get('log_file'),
|
||||||
|
log_level=opts.get('log_level_logfile'),
|
||||||
|
log_format=opts.get('log_fmt_logfile'),
|
||||||
|
date_format=opts.get('log_datefmt_logfile')
|
||||||
|
)
|
||||||
setup_extended_logging(opts)
|
setup_extended_logging(opts)
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
|
@ -936,6 +936,7 @@ class AESFuncs(object):
|
|||||||
'''
|
'''
|
||||||
self.fs_ = salt.fileserver.Fileserver(self.opts)
|
self.fs_ = salt.fileserver.Fileserver(self.opts)
|
||||||
self._serve_file = self.fs_.serve_file
|
self._serve_file = self.fs_.serve_file
|
||||||
|
self._file_find = self.fs_._find_file
|
||||||
self._file_hash = self.fs_.file_hash
|
self._file_hash = self.fs_.file_hash
|
||||||
self._file_list = self.fs_.file_list
|
self._file_list = self.fs_.file_list
|
||||||
self._file_list_emptydirs = self.fs_.file_list_emptydirs
|
self._file_list_emptydirs = self.fs_.file_list_emptydirs
|
||||||
|
@ -1270,15 +1270,6 @@ class Minion(MinionBase):
|
|||||||
This method should be used as a threading target, start the actual
|
This method should be used as a threading target, start the actual
|
||||||
minion side execution.
|
minion side execution.
|
||||||
'''
|
'''
|
||||||
# this seems awkward at first, but it's a workaround for Windows
|
|
||||||
# multiprocessing communication.
|
|
||||||
if sys.platform.startswith('win') and \
|
|
||||||
opts['multiprocessing'] and \
|
|
||||||
not salt.log.setup.is_logging_configured():
|
|
||||||
# We have to re-init the logging system for Windows
|
|
||||||
salt.log.setup.setup_console_logger(log_level=opts.get('log_level', 'info'))
|
|
||||||
if opts.get('log_file'):
|
|
||||||
salt.log.setup.setup_logfile_logger(opts['log_file'], opts.get('log_level_logfile', 'info'))
|
|
||||||
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
|
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
|
||||||
|
|
||||||
if opts['multiprocessing'] and not salt.utils.is_windows():
|
if opts['multiprocessing'] and not salt.utils.is_windows():
|
||||||
@ -1457,15 +1448,6 @@ class Minion(MinionBase):
|
|||||||
minion side execution.
|
minion side execution.
|
||||||
'''
|
'''
|
||||||
salt.utils.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid']))
|
salt.utils.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid']))
|
||||||
# this seems awkward at first, but it's a workaround for Windows
|
|
||||||
# multiprocessing communication.
|
|
||||||
if sys.platform.startswith('win') and \
|
|
||||||
opts['multiprocessing'] and \
|
|
||||||
not salt.log.is_logging_configured():
|
|
||||||
# We have to re-init the logging system for Windows
|
|
||||||
salt.log.setup_console_logger(log_level=opts.get('log_level', 'info'))
|
|
||||||
if opts.get('log_file'):
|
|
||||||
salt.log.setup_logfile_logger(opts['log_file'], opts.get('log_level_logfile', 'info'))
|
|
||||||
ret = {
|
ret = {
|
||||||
'return': {},
|
'return': {},
|
||||||
'success': {},
|
'success': {},
|
||||||
|
@ -97,17 +97,10 @@ def manage_mode(mode):
|
|||||||
|
|
||||||
salt '*' config.manage_mode
|
salt '*' config.manage_mode
|
||||||
'''
|
'''
|
||||||
if mode is None:
|
# config.manage_mode should no longer be invoked from the __salt__ dunder
|
||||||
return None
|
# in Salt code, this function is only being left here for backwards
|
||||||
if not isinstance(mode, six.string_types):
|
# compatibility.
|
||||||
# Make it a string in case it's not
|
return salt.utils.normalize_mode(mode)
|
||||||
mode = str(mode)
|
|
||||||
# Strip any quotes and initial 0, though zero-pad it up to 4
|
|
||||||
ret = mode.strip('"').strip('\'').lstrip('0').zfill(4)
|
|
||||||
if ret[0] != '0':
|
|
||||||
# Always include a leading zero
|
|
||||||
return '0{0}'.format(ret)
|
|
||||||
return ret
|
|
||||||
|
|
||||||
|
|
||||||
def valid_fileproto(uri):
|
def valid_fileproto(uri):
|
||||||
|
@ -265,22 +265,24 @@ def raw_cron(user):
|
|||||||
cmd = 'crontab -l'
|
cmd = 'crontab -l'
|
||||||
else:
|
else:
|
||||||
cmd = 'crontab -l {0}'.format(user)
|
cmd = 'crontab -l {0}'.format(user)
|
||||||
|
# Preserve line endings
|
||||||
lines = __salt__['cmd.run_stdout'](cmd,
|
lines = __salt__['cmd.run_stdout'](cmd,
|
||||||
runas=user,
|
runas=user,
|
||||||
rstrip=False,
|
rstrip=False,
|
||||||
python_shell=False).splitlines()
|
python_shell=False).splitlines(True)
|
||||||
else:
|
else:
|
||||||
if appUser == user:
|
if appUser == user:
|
||||||
cmd = 'crontab -l'
|
cmd = 'crontab -l'
|
||||||
else:
|
else:
|
||||||
cmd = 'crontab -l -u {0}'.format(user)
|
cmd = 'crontab -l -u {0}'.format(user)
|
||||||
|
# Preserve line endings
|
||||||
lines = __salt__['cmd.run_stdout'](cmd,
|
lines = __salt__['cmd.run_stdout'](cmd,
|
||||||
ignore_retcode=True,
|
ignore_retcode=True,
|
||||||
rstrip=False,
|
rstrip=False,
|
||||||
python_shell=False).splitlines()
|
python_shell=False).splitlines(True)
|
||||||
if len(lines) != 0 and lines[0].startswith('# DO NOT EDIT THIS FILE - edit the master and reinstall.'):
|
if len(lines) != 0 and lines[0].startswith('# DO NOT EDIT THIS FILE - edit the master and reinstall.'):
|
||||||
del lines[0:3]
|
del lines[0:3]
|
||||||
return '\n'.join(lines)
|
return ''.join(lines)
|
||||||
|
|
||||||
|
|
||||||
def list_tab(user):
|
def list_tab(user):
|
||||||
|
@ -1149,7 +1149,7 @@ def comment_line(path,
|
|||||||
if not salt.utils.is_windows():
|
if not salt.utils.is_windows():
|
||||||
pre_user = get_user(path)
|
pre_user = get_user(path)
|
||||||
pre_group = get_group(path)
|
pre_group = get_group(path)
|
||||||
pre_mode = __salt__['config.manage_mode'](get_mode(path))
|
pre_mode = salt.utils.normalize_mode(get_mode(path))
|
||||||
|
|
||||||
# Create a copy to read from and to use as a backup later
|
# Create a copy to read from and to use as a backup later
|
||||||
try:
|
try:
|
||||||
@ -1827,7 +1827,7 @@ def replace(path,
|
|||||||
if not salt.utils.is_windows():
|
if not salt.utils.is_windows():
|
||||||
pre_user = get_user(path)
|
pre_user = get_user(path)
|
||||||
pre_group = get_group(path)
|
pre_group = get_group(path)
|
||||||
pre_mode = __salt__['config.manage_mode'](get_mode(path))
|
pre_mode = salt.utils.normalize_mode(get_mode(path))
|
||||||
|
|
||||||
# Avoid TypeErrors by forcing repl to be a string
|
# Avoid TypeErrors by forcing repl to be a string
|
||||||
repl = str(repl)
|
repl = str(repl)
|
||||||
@ -2195,7 +2195,7 @@ def blockreplace(path,
|
|||||||
perms = {}
|
perms = {}
|
||||||
perms['user'] = get_user(path)
|
perms['user'] = get_user(path)
|
||||||
perms['group'] = get_group(path)
|
perms['group'] = get_group(path)
|
||||||
perms['mode'] = __salt__['config.manage_mode'](get_mode(path))
|
perms['mode'] = salt.utils.normalize_mode(get_mode(path))
|
||||||
|
|
||||||
# write new content in the file while avoiding partial reads
|
# write new content in the file while avoiding partial reads
|
||||||
try:
|
try:
|
||||||
@ -2899,7 +2899,7 @@ def copy(src, dst, recurse=False, remove_existing=False):
|
|||||||
if not salt.utils.is_windows():
|
if not salt.utils.is_windows():
|
||||||
pre_user = get_user(src)
|
pre_user = get_user(src)
|
||||||
pre_group = get_group(src)
|
pre_group = get_group(src)
|
||||||
pre_mode = __salt__['config.manage_mode'](get_mode(src))
|
pre_mode = salt.utils.normalize_mode(get_mode(src))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if (os.path.exists(dst) and os.path.isdir(dst)) or os.path.isdir(src):
|
if (os.path.exists(dst) and os.path.isdir(dst)) or os.path.isdir(src):
|
||||||
@ -3734,7 +3734,7 @@ def check_perms(name, ret, user, group, mode, follow_symlinks=False):
|
|||||||
raise CommandExecutionError('{0} does not exist'.format(name))
|
raise CommandExecutionError('{0} does not exist'.format(name))
|
||||||
perms['luser'] = cur['user']
|
perms['luser'] = cur['user']
|
||||||
perms['lgroup'] = cur['group']
|
perms['lgroup'] = cur['group']
|
||||||
perms['lmode'] = __salt__['config.manage_mode'](cur['mode'])
|
perms['lmode'] = salt.utils.normalize_mode(cur['mode'])
|
||||||
|
|
||||||
# Mode changes if needed
|
# Mode changes if needed
|
||||||
if mode is not None:
|
if mode is not None:
|
||||||
@ -3743,13 +3743,13 @@ def check_perms(name, ret, user, group, mode, follow_symlinks=False):
|
|||||||
if os.path.islink(name) and not follow_symlinks:
|
if os.path.islink(name) and not follow_symlinks:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
mode = __salt__['config.manage_mode'](mode)
|
mode = salt.utils.normalize_mode(mode)
|
||||||
if mode != perms['lmode']:
|
if mode != perms['lmode']:
|
||||||
if __opts__['test'] is True:
|
if __opts__['test'] is True:
|
||||||
ret['changes']['mode'] = mode
|
ret['changes']['mode'] = mode
|
||||||
else:
|
else:
|
||||||
set_mode(name, mode)
|
set_mode(name, mode)
|
||||||
if mode != __salt__['config.manage_mode'](get_mode(name)):
|
if mode != salt.utils.normalize_mode(get_mode(name)):
|
||||||
ret['result'] = False
|
ret['result'] = False
|
||||||
ret['comment'].append(
|
ret['comment'].append(
|
||||||
'Failed to change mode to {0}'.format(mode)
|
'Failed to change mode to {0}'.format(mode)
|
||||||
@ -3920,6 +3920,7 @@ def check_managed_changes(
|
|||||||
saltenv,
|
saltenv,
|
||||||
contents=None,
|
contents=None,
|
||||||
skip_verify=False,
|
skip_verify=False,
|
||||||
|
keep_mode=False,
|
||||||
**kwargs):
|
**kwargs):
|
||||||
'''
|
'''
|
||||||
Return a dictionary of what changes need to be made for a file
|
Return a dictionary of what changes need to be made for a file
|
||||||
@ -3956,6 +3957,13 @@ def check_managed_changes(
|
|||||||
if comments:
|
if comments:
|
||||||
__clean_tmp(sfn)
|
__clean_tmp(sfn)
|
||||||
return False, comments
|
return False, comments
|
||||||
|
if sfn and source and keep_mode:
|
||||||
|
if _urlparse(source).scheme in ('salt', 'file') \
|
||||||
|
or source.startswith('/'):
|
||||||
|
try:
|
||||||
|
mode = salt.utils.st_mode_to_octal(os.stat(sfn).st_mode)
|
||||||
|
except Exception as exc:
|
||||||
|
log.warning('Unable to stat %s: %s', sfn, exc)
|
||||||
changes = check_file_meta(name, sfn, source, source_sum, user,
|
changes = check_file_meta(name, sfn, source, source_sum, user,
|
||||||
group, mode, saltenv, contents)
|
group, mode, saltenv, contents)
|
||||||
__clean_tmp(sfn)
|
__clean_tmp(sfn)
|
||||||
@ -4078,8 +4086,8 @@ def check_file_meta(
|
|||||||
and group != lstats['gid']):
|
and group != lstats['gid']):
|
||||||
changes['group'] = group
|
changes['group'] = group
|
||||||
# Normalize the file mode
|
# Normalize the file mode
|
||||||
smode = __salt__['config.manage_mode'](lstats['mode'])
|
smode = salt.utils.normalize_mode(lstats['mode'])
|
||||||
mode = __salt__['config.manage_mode'](mode)
|
mode = salt.utils.normalize_mode(mode)
|
||||||
if mode is not None and mode != smode:
|
if mode is not None and mode != smode:
|
||||||
changes['mode'] = mode
|
changes['mode'] = mode
|
||||||
return changes
|
return changes
|
||||||
@ -4142,7 +4150,8 @@ def manage_file(name,
|
|||||||
contents=None,
|
contents=None,
|
||||||
dir_mode=None,
|
dir_mode=None,
|
||||||
follow_symlinks=True,
|
follow_symlinks=True,
|
||||||
skip_verify=False):
|
skip_verify=False,
|
||||||
|
keep_mode=False):
|
||||||
'''
|
'''
|
||||||
Checks the destination against what was retrieved with get_managed and
|
Checks the destination against what was retrieved with get_managed and
|
||||||
makes the appropriate modifications (if necessary).
|
makes the appropriate modifications (if necessary).
|
||||||
@ -4203,6 +4212,11 @@ def manage_file(name,
|
|||||||
|
|
||||||
.. versionadded:: 2016.3.0
|
.. versionadded:: 2016.3.0
|
||||||
|
|
||||||
|
keep_mode : False
|
||||||
|
If ``True``, and the ``source`` is a file from the Salt fileserver (or
|
||||||
|
a local file on the minion), the mode of the destination file will be
|
||||||
|
set to the mode of the source file.
|
||||||
|
|
||||||
CLI Example:
|
CLI Example:
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
@ -4233,6 +4247,13 @@ def manage_file(name,
|
|||||||
'hash_type': htype,
|
'hash_type': htype,
|
||||||
'hsum': get_hash(sfn, form=htype)
|
'hsum': get_hash(sfn, form=htype)
|
||||||
}
|
}
|
||||||
|
if keep_mode:
|
||||||
|
if _urlparse(source).scheme in ('salt', 'file') \
|
||||||
|
or source.startswith('/'):
|
||||||
|
try:
|
||||||
|
mode = salt.utils.st_mode_to_octal(os.stat(sfn).st_mode)
|
||||||
|
except Exception as exc:
|
||||||
|
log.warning('Unable to stat %s: %s', sfn, exc)
|
||||||
|
|
||||||
# Check changes if the target file exists
|
# Check changes if the target file exists
|
||||||
if os.path.isfile(name) or os.path.islink(name):
|
if os.path.isfile(name) or os.path.islink(name):
|
||||||
|
@ -25,7 +25,7 @@ def __virtual__():
|
|||||||
'''
|
'''
|
||||||
if salt.utils.is_darwin() or salt.utils.is_windows():
|
if salt.utils.is_darwin() or salt.utils.is_windows():
|
||||||
return True
|
return True
|
||||||
return False
|
return (False, 'Module proxy: module only works on Windows or MacOS systems')
|
||||||
|
|
||||||
|
|
||||||
def _get_proxy_osx(function, network_service):
|
def _get_proxy_osx(function, network_service):
|
||||||
|
@ -68,7 +68,7 @@ def __virtual__():
|
|||||||
except Exception:
|
except Exception:
|
||||||
return (False, "Module yumpkg: no yum based system detected")
|
return (False, "Module yumpkg: no yum based system detected")
|
||||||
|
|
||||||
enabled = ('amazon', 'xcp', 'xenserver')
|
enabled = ('amazon', 'xcp', 'xenserver', 'virtuozzolinux')
|
||||||
|
|
||||||
if os_family == 'redhat' or os_grain in enabled:
|
if os_family == 'redhat' or os_grain in enabled:
|
||||||
return __virtualname__
|
return __virtualname__
|
||||||
|
@ -205,6 +205,7 @@ import hashlib
|
|||||||
import os
|
import os
|
||||||
|
|
||||||
# Import salt libs
|
# Import salt libs
|
||||||
|
import salt.utils
|
||||||
import salt.utils.gitfs
|
import salt.utils.gitfs
|
||||||
import salt.utils.dictupdate
|
import salt.utils.dictupdate
|
||||||
from salt.exceptions import FileserverConfigError
|
from salt.exceptions import FileserverConfigError
|
||||||
@ -332,7 +333,7 @@ class _LegacyGitPillar(object):
|
|||||||
|
|
||||||
hash_type = getattr(hashlib, opts.get('hash_type', 'md5'))
|
hash_type = getattr(hashlib, opts.get('hash_type', 'md5'))
|
||||||
hash_str = '{0} {1}'.format(self.branch, self.rp_location)
|
hash_str = '{0} {1}'.format(self.branch, self.rp_location)
|
||||||
repo_hash = hash_type(hash_str).hexdigest()
|
repo_hash = hash_type(salt.utils.to_bytes(hash_str)).hexdigest()
|
||||||
rp_ = os.path.join(self.opts['cachedir'], 'pillar_gitfs', repo_hash)
|
rp_ = os.path.join(self.opts['cachedir'], 'pillar_gitfs', repo_hash)
|
||||||
|
|
||||||
if not os.path.isdir(rp_):
|
if not os.path.isdir(rp_):
|
||||||
|
@ -12,7 +12,7 @@ This module is a concrete implementation of the sql_base
|
|||||||
ext_pillar for SQLCipher.
|
ext_pillar for SQLCipher.
|
||||||
|
|
||||||
:maturity: new
|
:maturity: new
|
||||||
:depends: pysqlcipher
|
:depends: pysqlcipher (for py2) or pysqlcipher3 (for py3)
|
||||||
:platform: all
|
:platform: all
|
||||||
|
|
||||||
Configuring the sqlcipher ext_pillar
|
Configuring the sqlcipher ext_pillar
|
||||||
|
@ -23,6 +23,7 @@ import salt.utils.jid
|
|||||||
import salt.exceptions
|
import salt.exceptions
|
||||||
|
|
||||||
# Import 3rd-party libs
|
# Import 3rd-party libs
|
||||||
|
import msgpack
|
||||||
import salt.ext.six as six
|
import salt.ext.six as six
|
||||||
|
|
||||||
|
|
||||||
@ -478,3 +479,47 @@ def get_endtime(jid):
|
|||||||
with salt.utils.fopen(etpath, 'r') as etfile:
|
with salt.utils.fopen(etpath, 'r') as etfile:
|
||||||
endtime = etfile.read().strip('\n')
|
endtime = etfile.read().strip('\n')
|
||||||
return endtime
|
return endtime
|
||||||
|
|
||||||
|
|
||||||
|
def _reg_dir():
|
||||||
|
'''
|
||||||
|
Return the reg_dir for the given job id
|
||||||
|
'''
|
||||||
|
return os.path.join(__opts__['cachedir'], 'thorium')
|
||||||
|
|
||||||
|
|
||||||
|
def save_reg(data):
|
||||||
|
'''
|
||||||
|
Save the register to msgpack files
|
||||||
|
'''
|
||||||
|
reg_dir = _reg_dir()
|
||||||
|
regfile = os.path.join(reg_dir, 'register')
|
||||||
|
try:
|
||||||
|
if not os.path.exists():
|
||||||
|
os.makedirs(reg_dir)
|
||||||
|
except OSError as exc:
|
||||||
|
if exc.errno == errno.EEXIST:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
try:
|
||||||
|
with salt.utils.fopen(regfile, 'a') as fh_:
|
||||||
|
msgpack.dump(data, fh_)
|
||||||
|
fh_.close()
|
||||||
|
except:
|
||||||
|
log.error('Could not write to msgpack file {0}'.format(__opts__['outdir']))
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def load_reg():
|
||||||
|
'''
|
||||||
|
Load the register from msgpack files
|
||||||
|
'''
|
||||||
|
reg_dir = _reg_dir()
|
||||||
|
regfile = os.path.join(reg_dir, 'register')
|
||||||
|
try:
|
||||||
|
with salt.utils.fopen(regfile, 'r') as fh_:
|
||||||
|
return msgpack.load(fh_)
|
||||||
|
except:
|
||||||
|
log.error('Could not write to msgpack file {0}'.format(__opts__['outdir']))
|
||||||
|
raise
|
||||||
|
@ -469,7 +469,7 @@ def file(name,
|
|||||||
Overrides the default backup mode for the user's crontab.
|
Overrides the default backup mode for the user's crontab.
|
||||||
'''
|
'''
|
||||||
# Initial set up
|
# Initial set up
|
||||||
mode = __salt__['config.manage_mode']('0600')
|
mode = salt.utils.normalize_mode('0600')
|
||||||
owner, group, crontab_dir = _get_cron_info()
|
owner, group, crontab_dir = _get_cron_info()
|
||||||
|
|
||||||
cron_path = salt.utils.mkstemp()
|
cron_path = salt.utils.mkstemp()
|
||||||
|
@ -575,8 +575,8 @@ def _check_dir_meta(name,
|
|||||||
and group != stats.get('gid')):
|
and group != stats.get('gid')):
|
||||||
changes['group'] = group
|
changes['group'] = group
|
||||||
# Normalize the dir mode
|
# Normalize the dir mode
|
||||||
smode = __salt__['config.manage_mode'](stats['mode'])
|
smode = salt.utils.normalize_mode(stats['mode'])
|
||||||
mode = __salt__['config.manage_mode'](mode)
|
mode = salt.utils.normalize_mode(mode)
|
||||||
if mode is not None and mode != smode:
|
if mode is not None and mode != smode:
|
||||||
changes['mode'] = mode
|
changes['mode'] = mode
|
||||||
return changes
|
return changes
|
||||||
@ -839,7 +839,7 @@ def symlink(
|
|||||||
name = os.path.expanduser(name)
|
name = os.path.expanduser(name)
|
||||||
|
|
||||||
# Make sure that leading zeros stripped by YAML loader are added back
|
# Make sure that leading zeros stripped by YAML loader are added back
|
||||||
mode = __salt__['config.manage_mode'](mode)
|
mode = salt.utils.normalize_mode(mode)
|
||||||
|
|
||||||
user = _test_owner(kwargs, user=user)
|
user = _test_owner(kwargs, user=user)
|
||||||
ret = {'name': name,
|
ret = {'name': name,
|
||||||
@ -1254,8 +1254,18 @@ def managed(name,
|
|||||||
is running as on the minion On Windows, this is ignored
|
is running as on the minion On Windows, this is ignored
|
||||||
|
|
||||||
mode
|
mode
|
||||||
The permissions to set on this file, aka 644, 0775, 4664. Not supported
|
The mode to set on this file, e.g. ``644``, ``0775``, or ``4664``.
|
||||||
on Windows
|
|
||||||
|
.. note::
|
||||||
|
This option is **not** supported on Windows.
|
||||||
|
|
||||||
|
.. versionchanged:: Carbon
|
||||||
|
This option can be set to ``keep``, and Salt will keep the mode
|
||||||
|
from the Salt fileserver. This is only supported when the
|
||||||
|
``source`` URL begins with ``salt://``, or for files local to the
|
||||||
|
minion. Because the ``source`` option cannot be used with any of
|
||||||
|
the ``contents`` options, setting the ``mode`` to ``keep`` is also
|
||||||
|
incompatible with the ``contents`` options.
|
||||||
|
|
||||||
template
|
template
|
||||||
If this setting is applied then the named templating engine will be
|
If this setting is applied then the named templating engine will be
|
||||||
@ -1270,7 +1280,8 @@ def managed(name,
|
|||||||
dir_mode
|
dir_mode
|
||||||
If directories are to be created, passing this option specifies the
|
If directories are to be created, passing this option specifies the
|
||||||
permissions for those directories. If this is not set, directories
|
permissions for those directories. If this is not set, directories
|
||||||
will be assigned permissions from the 'mode' argument.
|
will be assigned permissions by adding the execute bit to the mode of
|
||||||
|
the files.
|
||||||
|
|
||||||
replace : True
|
replace : True
|
||||||
If set to ``False`` and the file already exists, the file will not be
|
If set to ``False`` and the file already exists, the file will not be
|
||||||
@ -1478,9 +1489,23 @@ def managed(name,
|
|||||||
'name': name,
|
'name': name,
|
||||||
'result': True}
|
'result': True}
|
||||||
|
|
||||||
content_sources = (contents, contents_pillar, contents_grains)
|
if mode is not None and salt.utils.is_windows():
|
||||||
|
return _error(ret, 'The \'mode\' option is not supported on Windows')
|
||||||
|
|
||||||
|
try:
|
||||||
|
keep_mode = mode.lower() == 'keep'
|
||||||
|
if keep_mode:
|
||||||
|
# We're not hard-coding the mode, so set it to None
|
||||||
|
mode = None
|
||||||
|
except AttributeError:
|
||||||
|
keep_mode = False
|
||||||
|
|
||||||
|
# Make sure that any leading zeros stripped by YAML loader are added back
|
||||||
|
mode = salt.utils.normalize_mode(mode)
|
||||||
|
|
||||||
contents_count = len(
|
contents_count = len(
|
||||||
[x for x in content_sources if x is not None]
|
[x for x in (contents, contents_pillar, contents_grains)
|
||||||
|
if x is not None]
|
||||||
)
|
)
|
||||||
|
|
||||||
if source and contents_count > 0:
|
if source and contents_count > 0:
|
||||||
@ -1489,6 +1514,12 @@ def managed(name,
|
|||||||
'\'source\' cannot be used in combination with \'contents\', '
|
'\'source\' cannot be used in combination with \'contents\', '
|
||||||
'\'contents_pillar\', or \'contents_grains\''
|
'\'contents_pillar\', or \'contents_grains\''
|
||||||
)
|
)
|
||||||
|
elif (mode or keep_mode) and contents_count > 0:
|
||||||
|
return _error(
|
||||||
|
ret,
|
||||||
|
'Mode management cannot be used in combination with \'contents\', '
|
||||||
|
'\'contents_pillar\', or \'contents_grains\''
|
||||||
|
)
|
||||||
elif contents_count > 1:
|
elif contents_count > 1:
|
||||||
return _error(
|
return _error(
|
||||||
ret,
|
ret,
|
||||||
@ -1608,9 +1639,6 @@ def managed(name,
|
|||||||
ret['comment'] = 'Error while applying template on contents'
|
ret['comment'] = 'Error while applying template on contents'
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
# Make sure that leading zeros stripped by YAML loader are added back
|
|
||||||
mode = __salt__['config.manage_mode'](mode)
|
|
||||||
|
|
||||||
if not name:
|
if not name:
|
||||||
return _error(ret, 'Must provide name to file.exists')
|
return _error(ret, 'Must provide name to file.exists')
|
||||||
user = _test_owner(kwargs, user=user)
|
user = _test_owner(kwargs, user=user)
|
||||||
@ -1679,6 +1707,7 @@ def managed(name,
|
|||||||
__env__,
|
__env__,
|
||||||
contents,
|
contents,
|
||||||
skip_verify,
|
skip_verify,
|
||||||
|
keep_mode,
|
||||||
**kwargs
|
**kwargs
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
@ -1766,7 +1795,8 @@ def managed(name,
|
|||||||
contents,
|
contents,
|
||||||
dir_mode,
|
dir_mode,
|
||||||
follow_symlinks,
|
follow_symlinks,
|
||||||
skip_verify)
|
skip_verify,
|
||||||
|
keep_mode)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
ret['changes'] = {}
|
ret['changes'] = {}
|
||||||
log.debug(traceback.format_exc())
|
log.debug(traceback.format_exc())
|
||||||
@ -1823,7 +1853,8 @@ def managed(name,
|
|||||||
contents,
|
contents,
|
||||||
dir_mode,
|
dir_mode,
|
||||||
follow_symlinks,
|
follow_symlinks,
|
||||||
skip_verify)
|
skip_verify,
|
||||||
|
keep_mode)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
ret['changes'] = {}
|
ret['changes'] = {}
|
||||||
log.debug(traceback.format_exc())
|
log.debug(traceback.format_exc())
|
||||||
@ -2044,8 +2075,8 @@ def directory(name,
|
|||||||
file_mode = dir_mode
|
file_mode = dir_mode
|
||||||
|
|
||||||
# Make sure that leading zeros stripped by YAML loader are added back
|
# Make sure that leading zeros stripped by YAML loader are added back
|
||||||
dir_mode = __salt__['config.manage_mode'](dir_mode)
|
dir_mode = salt.utils.normalize_mode(dir_mode)
|
||||||
file_mode = __salt__['config.manage_mode'](file_mode)
|
file_mode = salt.utils.normalize_mode(file_mode)
|
||||||
|
|
||||||
u_check = _check_user(user, group)
|
u_check = _check_user(user, group)
|
||||||
if u_check:
|
if u_check:
|
||||||
@ -2291,16 +2322,31 @@ def recurse(name,
|
|||||||
salt is running as on the minion. On Windows, this is ignored
|
salt is running as on the minion. On Windows, this is ignored
|
||||||
|
|
||||||
dir_mode
|
dir_mode
|
||||||
The permissions mode to set on any directories created. Not supported on
|
The mode to set on any directories created.
|
||||||
Windows
|
|
||||||
|
.. note::
|
||||||
|
This option is **not** supported on Windows.
|
||||||
|
|
||||||
file_mode
|
file_mode
|
||||||
The permissions mode to set on any files created. Not supported on
|
The mode to set on any files created.
|
||||||
Windows
|
Windows
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
This option is **not** supported on Windows.
|
||||||
|
|
||||||
|
.. versionchanged:: Carbon
|
||||||
|
This option can be set to ``keep``, and Salt will keep the mode
|
||||||
|
from the Salt fileserver. This is only supported when the
|
||||||
|
``source`` URL begins with ``salt://``, or for files local to the
|
||||||
|
minion. Because the ``source`` option cannot be used with any of
|
||||||
|
the ``contents`` options, setting the ``mode`` to ``keep`` is also
|
||||||
|
incompatible with the ``contents`` options.
|
||||||
|
|
||||||
sym_mode
|
sym_mode
|
||||||
The permissions mode to set on any symlink created. Not supported on
|
The mode to set on any symlink created.
|
||||||
Windows
|
|
||||||
|
.. note::
|
||||||
|
This option is **not** supported on Windows.
|
||||||
|
|
||||||
template
|
template
|
||||||
If this setting is applied then the named templating engine will be
|
If this setting is applied then the named templating engine will be
|
||||||
@ -2408,9 +2454,22 @@ def recurse(name,
|
|||||||
)
|
)
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
if any([x is not None for x in (dir_mode, file_mode, sym_mode)]) \
|
||||||
|
and salt.utils.is_windows():
|
||||||
|
return _error(ret, 'mode management is not supported on Windows')
|
||||||
|
|
||||||
# Make sure that leading zeros stripped by YAML loader are added back
|
# Make sure that leading zeros stripped by YAML loader are added back
|
||||||
dir_mode = __salt__['config.manage_mode'](dir_mode)
|
dir_mode = salt.utils.normalize_mode(dir_mode)
|
||||||
file_mode = __salt__['config.manage_mode'](file_mode)
|
|
||||||
|
try:
|
||||||
|
keep_mode = file_mode.lower() == 'keep'
|
||||||
|
if keep_mode:
|
||||||
|
# We're not hard-coding the mode, so set it to None
|
||||||
|
file_mode = None
|
||||||
|
except AttributeError:
|
||||||
|
keep_mode = False
|
||||||
|
|
||||||
|
file_mode = salt.utils.normalize_mode(file_mode)
|
||||||
|
|
||||||
u_check = _check_user(user, group)
|
u_check = _check_user(user, group)
|
||||||
if u_check:
|
if u_check:
|
||||||
@ -2509,7 +2568,7 @@ def recurse(name,
|
|||||||
source=source,
|
source=source,
|
||||||
user=user,
|
user=user,
|
||||||
group=group,
|
group=group,
|
||||||
mode=file_mode,
|
mode='keep' if keep_mode else file_mode,
|
||||||
template=template,
|
template=template,
|
||||||
makedirs=True,
|
makedirs=True,
|
||||||
context=context,
|
context=context,
|
||||||
@ -4777,7 +4836,11 @@ def serialize(name,
|
|||||||
salt is running as on the minion
|
salt is running as on the minion
|
||||||
|
|
||||||
mode
|
mode
|
||||||
The permissions to set on this file, aka 644, 0775, 4664
|
The permissions to set on this file, e.g. ``644``, ``0775``, or
|
||||||
|
``4664``.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
This option is **not** supported on Windows.
|
||||||
|
|
||||||
backup
|
backup
|
||||||
Overrides the default backup mode for this specific file.
|
Overrides the default backup mode for this specific file.
|
||||||
@ -4914,6 +4977,9 @@ def serialize(name,
|
|||||||
|
|
||||||
contents += '\n'
|
contents += '\n'
|
||||||
|
|
||||||
|
# Make sure that any leading zeros stripped by YAML loader are added back
|
||||||
|
mode = salt.utils.normalize_mode(mode)
|
||||||
|
|
||||||
if __opts__['test']:
|
if __opts__['test']:
|
||||||
ret['changes'] = __salt__['file.check_managed_changes'](
|
ret['changes'] = __salt__['file.check_managed_changes'](
|
||||||
name=name,
|
name=name,
|
||||||
|
@ -18,6 +18,7 @@ import traceback
|
|||||||
|
|
||||||
# Import Salt libs
|
# Import Salt libs
|
||||||
import salt.state
|
import salt.state
|
||||||
|
import salt.loader
|
||||||
import salt.payload
|
import salt.payload
|
||||||
from salt.exceptions import SaltRenderError
|
from salt.exceptions import SaltRenderError
|
||||||
|
|
||||||
@ -43,7 +44,17 @@ class ThorState(salt.state.HighState):
|
|||||||
opts['file_client'] = 'local'
|
opts['file_client'] = 'local'
|
||||||
self.opts = opts
|
self.opts = opts
|
||||||
salt.state.HighState.__init__(self, self.opts, loader='thorium')
|
salt.state.HighState.__init__(self, self.opts, loader='thorium')
|
||||||
self.state.inject_globals = {'__reg__': {}}
|
|
||||||
|
self.returners = salt.loader.returners(self.opts, {})
|
||||||
|
self.reg_ret = self.opts.get('register_returner', None)
|
||||||
|
if self.reg_ret is not None:
|
||||||
|
try:
|
||||||
|
regdata = self.returners['{0}.load_reg'.format(self.reg_ret)]()
|
||||||
|
except Exception as exc:
|
||||||
|
log.error(exc)
|
||||||
|
regdata = {}
|
||||||
|
|
||||||
|
self.state.inject_globals = {'__reg__': regdata}
|
||||||
self.event = salt.utils.event.get_master_event(
|
self.event = salt.utils.event.get_master_event(
|
||||||
self.opts,
|
self.opts,
|
||||||
self.opts['sock_dir'])
|
self.opts['sock_dir'])
|
||||||
@ -174,4 +185,6 @@ class ThorState(salt.state.HighState):
|
|||||||
if (start - r_start) > recompile:
|
if (start - r_start) > recompile:
|
||||||
cache = self.gather_cache()
|
cache = self.gather_cache()
|
||||||
chunks = self.get_chunks()
|
chunks = self.get_chunks()
|
||||||
|
if self.reg_ret is not None:
|
||||||
|
self.returners['{0}.save_reg'.format(self.reg_ret)](chunks)
|
||||||
r_start = time.time()
|
r_start = time.time()
|
||||||
|
@ -1805,6 +1805,34 @@ def check_state_result(running, recurse=False):
|
|||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
|
||||||
|
def st_mode_to_octal(mode):
|
||||||
|
'''
|
||||||
|
Convert the st_mode value from a stat(2) call (as returned from os.stat())
|
||||||
|
to an octal mode.
|
||||||
|
'''
|
||||||
|
try:
|
||||||
|
return oct(mode)[-4:]
|
||||||
|
except (TypeError, IndexError):
|
||||||
|
return ''
|
||||||
|
|
||||||
|
|
||||||
|
def normalize_mode(mode):
|
||||||
|
'''
|
||||||
|
Return a mode value, normalized to a string and containing a leading zero
|
||||||
|
if it does not have one.
|
||||||
|
|
||||||
|
Allow "keep" as a valid mode (used by file state/module to preserve mode
|
||||||
|
from the Salt fileserver in file states).
|
||||||
|
'''
|
||||||
|
if mode is None:
|
||||||
|
return None
|
||||||
|
if not isinstance(mode, six.string_types):
|
||||||
|
mode = str(mode)
|
||||||
|
# Strip any quotes any initial zeroes, then though zero-pad it up to 4.
|
||||||
|
# This ensures that somethign like '00644' is normalized to '0644'
|
||||||
|
return mode.strip('"').strip('\'').lstrip('0').zfill(4)
|
||||||
|
|
||||||
|
|
||||||
def test_mode(**kwargs):
|
def test_mode(**kwargs):
|
||||||
'''
|
'''
|
||||||
Examines the kwargs passed and returns True if any kwarg which matching
|
Examines the kwargs passed and returns True if any kwarg which matching
|
||||||
|
@ -940,7 +940,7 @@ class GitPython(GitProvider):
|
|||||||
tree = self.get_tree(tgt_env)
|
tree = self.get_tree(tgt_env)
|
||||||
if not tree:
|
if not tree:
|
||||||
# Branch/tag/SHA not found
|
# Branch/tag/SHA not found
|
||||||
return None, None
|
return None, None, None
|
||||||
blob = None
|
blob = None
|
||||||
depth = 0
|
depth = 0
|
||||||
while True:
|
while True:
|
||||||
@ -968,7 +968,9 @@ class GitPython(GitProvider):
|
|||||||
except KeyError:
|
except KeyError:
|
||||||
# File not found or repo_path points to a directory
|
# File not found or repo_path points to a directory
|
||||||
break
|
break
|
||||||
return blob, blob.hexsha if blob is not None else blob
|
if isinstance(blob, git.Blob):
|
||||||
|
return blob, blob.hexsha, blob.mode
|
||||||
|
return None, None, None
|
||||||
|
|
||||||
def get_tree(self, tgt_env):
|
def get_tree(self, tgt_env):
|
||||||
'''
|
'''
|
||||||
@ -1480,29 +1482,33 @@ class Pygit2(GitProvider):
|
|||||||
tree = self.get_tree(tgt_env)
|
tree = self.get_tree(tgt_env)
|
||||||
if not tree:
|
if not tree:
|
||||||
# Branch/tag/SHA not found in repo
|
# Branch/tag/SHA not found in repo
|
||||||
return None, None
|
return None, None, None
|
||||||
blob = None
|
blob = None
|
||||||
|
mode = None
|
||||||
depth = 0
|
depth = 0
|
||||||
while True:
|
while True:
|
||||||
depth += 1
|
depth += 1
|
||||||
if depth > SYMLINK_RECURSE_DEPTH:
|
if depth > SYMLINK_RECURSE_DEPTH:
|
||||||
break
|
break
|
||||||
try:
|
try:
|
||||||
if stat.S_ISLNK(tree[path].filemode):
|
entry = tree[path]
|
||||||
|
mode = entry.filemode
|
||||||
|
if stat.S_ISLNK(mode):
|
||||||
# Path is a symlink. The blob data corresponding to this
|
# Path is a symlink. The blob data corresponding to this
|
||||||
# path's object ID will be the target of the symlink. Follow
|
# path's object ID will be the target of the symlink. Follow
|
||||||
# the symlink and set path to the location indicated
|
# the symlink and set path to the location indicated
|
||||||
# in the blob data.
|
# in the blob data.
|
||||||
link_tgt = self.repo[tree[path].oid].data
|
link_tgt = self.repo[entry.oid].data
|
||||||
path = os.path.normpath(
|
path = os.path.normpath(
|
||||||
os.path.join(os.path.dirname(path), link_tgt)
|
os.path.join(os.path.dirname(path), link_tgt)
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
oid = tree[path].oid
|
blob = self.repo[entry.oid]
|
||||||
blob = self.repo[oid]
|
|
||||||
except KeyError:
|
except KeyError:
|
||||||
break
|
break
|
||||||
return blob, blob.hex if blob is not None else blob
|
if isinstance(blob, pygit2.Blob):
|
||||||
|
return blob, blob.hex, mode
|
||||||
|
return None, None, None
|
||||||
|
|
||||||
def get_tree(self, tgt_env):
|
def get_tree(self, tgt_env):
|
||||||
'''
|
'''
|
||||||
@ -1827,8 +1833,9 @@ class Dulwich(GitProvider): # pylint: disable=abstract-method
|
|||||||
tree = self.get_tree(tgt_env)
|
tree = self.get_tree(tgt_env)
|
||||||
if not tree:
|
if not tree:
|
||||||
# Branch/tag/SHA not found
|
# Branch/tag/SHA not found
|
||||||
return None, None
|
return None, None, None
|
||||||
blob = None
|
blob = None
|
||||||
|
mode = None
|
||||||
depth = 0
|
depth = 0
|
||||||
while True:
|
while True:
|
||||||
depth += 1
|
depth += 1
|
||||||
@ -1855,7 +1862,9 @@ class Dulwich(GitProvider): # pylint: disable=abstract-method
|
|||||||
break
|
break
|
||||||
except KeyError:
|
except KeyError:
|
||||||
break
|
break
|
||||||
return blob, blob.sha().hexdigest() if blob is not None else blob
|
if isinstance(blob, dulwich.objects.Blob):
|
||||||
|
return blob, blob.sha().hexdigest(), mode
|
||||||
|
return None, None, None
|
||||||
|
|
||||||
def get_conf(self):
|
def get_conf(self):
|
||||||
'''
|
'''
|
||||||
@ -2697,10 +2706,24 @@ class GitFS(GitBase):
|
|||||||
if repo.root(tgt_env):
|
if repo.root(tgt_env):
|
||||||
repo_path = os.path.join(repo.root(tgt_env), repo_path)
|
repo_path = os.path.join(repo.root(tgt_env), repo_path)
|
||||||
|
|
||||||
blob, blob_hexsha = repo.find_file(repo_path, tgt_env)
|
blob, blob_hexsha, blob_mode = repo.find_file(repo_path, tgt_env)
|
||||||
if blob is None:
|
if blob is None:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
def _add_file_stat(fnd, mode):
|
||||||
|
'''
|
||||||
|
Add a the mode to the return dict. In other fileserver backends
|
||||||
|
we stat the file to get its mode, and add the stat result
|
||||||
|
(passed through list() for better serialization) to the 'stat'
|
||||||
|
key in the return dict. However, since we aren't using the
|
||||||
|
stat result for anything but the mode at this time, we can
|
||||||
|
avoid unnecessary work by just manually creating the list and
|
||||||
|
not running an os.stat() on all files in the repo.
|
||||||
|
'''
|
||||||
|
if mode is not None:
|
||||||
|
fnd['stat'] = [mode]
|
||||||
|
return fnd
|
||||||
|
|
||||||
salt.fileserver.wait_lock(lk_fn, dest)
|
salt.fileserver.wait_lock(lk_fn, dest)
|
||||||
if os.path.isfile(blobshadest) and os.path.isfile(dest):
|
if os.path.isfile(blobshadest) and os.path.isfile(dest):
|
||||||
with salt.utils.fopen(blobshadest, 'r') as fp_:
|
with salt.utils.fopen(blobshadest, 'r') as fp_:
|
||||||
@ -2708,7 +2731,7 @@ class GitFS(GitBase):
|
|||||||
if sha == blob_hexsha:
|
if sha == blob_hexsha:
|
||||||
fnd['rel'] = path
|
fnd['rel'] = path
|
||||||
fnd['path'] = dest
|
fnd['path'] = dest
|
||||||
return fnd
|
return _add_file_stat(fnd, blob_mode)
|
||||||
with salt.utils.fopen(lk_fn, 'w+') as fp_:
|
with salt.utils.fopen(lk_fn, 'w+') as fp_:
|
||||||
fp_.write('')
|
fp_.write('')
|
||||||
for filename in glob.glob(hashes_glob):
|
for filename in glob.glob(hashes_glob):
|
||||||
@ -2726,7 +2749,7 @@ class GitFS(GitBase):
|
|||||||
pass
|
pass
|
||||||
fnd['rel'] = path
|
fnd['rel'] = path
|
||||||
fnd['path'] = dest
|
fnd['path'] = dest
|
||||||
return fnd
|
return _add_file_stat(fnd, blob_mode)
|
||||||
|
|
||||||
# No matching file was found in tgt_env. Return a dict with empty paths
|
# No matching file was found in tgt_env. Return a dict with empty paths
|
||||||
# so the calling function knows the file could not be found.
|
# so the calling function knows the file could not be found.
|
||||||
@ -2783,7 +2806,7 @@ class GitFS(GitBase):
|
|||||||
load.pop('env')
|
load.pop('env')
|
||||||
|
|
||||||
if not all(x in load for x in ('path', 'saltenv')):
|
if not all(x in load for x in ('path', 'saltenv')):
|
||||||
return ''
|
return '', None
|
||||||
ret = {'hash_type': self.opts['hash_type']}
|
ret = {'hash_type': self.opts['hash_type']}
|
||||||
relpath = fnd['rel']
|
relpath = fnd['rel']
|
||||||
path = fnd['path']
|
path = fnd['path']
|
||||||
|
@ -93,7 +93,7 @@ def _generate_minion_id():
|
|||||||
'::1.*', 'ipv6-.*', 'fe00::.*', 'fe02::.*', '1.0.0.*.ip6.arpa']
|
'::1.*', 'ipv6-.*', 'fe00::.*', 'fe02::.*', '1.0.0.*.ip6.arpa']
|
||||||
|
|
||||||
def append(self, p_object):
|
def append(self, p_object):
|
||||||
if p_object not in self and not self.filter(p_object):
|
if p_object and p_object not in self and not self.filter(p_object):
|
||||||
super(self.__class__, self).append(p_object)
|
super(self.__class__, self).append(p_object)
|
||||||
return self
|
return self
|
||||||
|
|
||||||
@ -111,7 +111,7 @@ def _generate_minion_id():
|
|||||||
def first(self):
|
def first(self):
|
||||||
return self and self[0] or None
|
return self and self[0] or None
|
||||||
|
|
||||||
hosts = DistinctList().append(platform.node()).append(socket.gethostname()).append(socket.getfqdn())
|
hosts = DistinctList().append(socket.getfqdn()).append(platform.node()).append(socket.gethostname())
|
||||||
if not hosts:
|
if not hosts:
|
||||||
try:
|
try:
|
||||||
for a_nfo in socket.getaddrinfo(hosts.first(), None, socket.AF_INET,
|
for a_nfo in socket.getaddrinfo(hosts.first(), None, socket.AF_INET,
|
||||||
|
@ -38,6 +38,7 @@ import salt.utils.jid
|
|||||||
from salt.utils import kinds
|
from salt.utils import kinds
|
||||||
from salt.defaults import DEFAULT_TARGET_DELIM
|
from salt.defaults import DEFAULT_TARGET_DELIM
|
||||||
from salt.utils.validate.path import is_writeable
|
from salt.utils.validate.path import is_writeable
|
||||||
|
from salt.utils.verify import verify_files
|
||||||
import salt.exceptions
|
import salt.exceptions
|
||||||
|
|
||||||
# Import 3rd-party libs
|
# Import 3rd-party libs
|
||||||
@ -595,6 +596,10 @@ class LogLevelMixIn(six.with_metaclass(MixInMeta, object)):
|
|||||||
|
|
||||||
# Setup extended logging right before the last step
|
# Setup extended logging right before the last step
|
||||||
self._mixin_after_parsed_funcs.append(self.__setup_extended_logging)
|
self._mixin_after_parsed_funcs.append(self.__setup_extended_logging)
|
||||||
|
# Setup the console and log file configuration before the MP logging
|
||||||
|
# listener because the MP logging listener may need that config.
|
||||||
|
self._mixin_after_parsed_funcs.append(self.__setup_logfile_logger_config)
|
||||||
|
self._mixin_after_parsed_funcs.append(self.__setup_console_logger_config)
|
||||||
# Setup the multiprocessing log queue listener if enabled
|
# Setup the multiprocessing log queue listener if enabled
|
||||||
self._mixin_after_parsed_funcs.append(self._setup_mp_logging_listener)
|
self._mixin_after_parsed_funcs.append(self._setup_mp_logging_listener)
|
||||||
# Setup the console as the last _mixin_after_parsed_func to run
|
# Setup the console as the last _mixin_after_parsed_func to run
|
||||||
@ -640,7 +645,7 @@ class LogLevelMixIn(six.with_metaclass(MixInMeta, object)):
|
|||||||
# defined default
|
# defined default
|
||||||
self.options.log_level = self._default_logging_level_
|
self.options.log_level = self._default_logging_level_
|
||||||
|
|
||||||
def setup_logfile_logger(self):
|
def __setup_logfile_logger_config(self, *args): # pylint: disable=unused-argument
|
||||||
if self._logfile_loglevel_config_setting_name_ in self.config and not \
|
if self._logfile_loglevel_config_setting_name_ in self.config and not \
|
||||||
self.config.get(self._logfile_loglevel_config_setting_name_):
|
self.config.get(self._logfile_loglevel_config_setting_name_):
|
||||||
# Remove it from config so it inherits from log_level
|
# Remove it from config so it inherits from log_level
|
||||||
@ -673,12 +678,23 @@ class LogLevelMixIn(six.with_metaclass(MixInMeta, object)):
|
|||||||
cli_log_path,
|
cli_log_path,
|
||||||
self.config.get(
|
self.config.get(
|
||||||
# From the config setting
|
# From the config setting
|
||||||
self._logfile_config_setting_name_,
|
self._logfile_config_setting_name_
|
||||||
# From the default setting
|
|
||||||
self._default_logging_logfile_
|
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if self.config['verify_env']:
|
||||||
|
# Verify the logfile if it was explicitly set but do not try to
|
||||||
|
# verify the default
|
||||||
|
if logfile is not None and not logfile.startswith(('tcp://', 'udp://', 'file://')):
|
||||||
|
# Logfile is not using Syslog, verify
|
||||||
|
current_umask = os.umask(0o027)
|
||||||
|
verify_files([logfile], self.config['user'])
|
||||||
|
os.umask(current_umask)
|
||||||
|
|
||||||
|
if logfile is None:
|
||||||
|
# Use the default setting if the logfile wasn't explicity set
|
||||||
|
logfile = self._default_logging_logfile_
|
||||||
|
|
||||||
cli_log_file_fmt = 'cli_{0}_log_file_fmt'.format(
|
cli_log_file_fmt = 'cli_{0}_log_file_fmt'.format(
|
||||||
self.get_prog_name().replace('-', '_')
|
self.get_prog_name().replace('-', '_')
|
||||||
)
|
)
|
||||||
@ -782,6 +798,18 @@ class LogLevelMixIn(six.with_metaclass(MixInMeta, object)):
|
|||||||
# If we haven't changed the logfile path and it's not writeable,
|
# If we haven't changed the logfile path and it's not writeable,
|
||||||
# salt will fail once we try to setup the logfile logging.
|
# salt will fail once we try to setup the logfile logging.
|
||||||
|
|
||||||
|
# Save the settings back to the configuration
|
||||||
|
self.config[self._logfile_config_setting_name_] = logfile
|
||||||
|
self.config[self._logfile_loglevel_config_setting_name_] = loglevel
|
||||||
|
self.config['log_fmt_logfile'] = log_file_fmt
|
||||||
|
self.config['log_datefmt_logfile'] = log_file_datefmt
|
||||||
|
|
||||||
|
def setup_logfile_logger(self):
|
||||||
|
logfile = self.config[self._logfile_config_setting_name_]
|
||||||
|
loglevel = self.config[self._logfile_loglevel_config_setting_name_]
|
||||||
|
log_file_fmt = self.config['log_fmt_logfile']
|
||||||
|
log_file_datefmt = self.config['log_datefmt_logfile']
|
||||||
|
|
||||||
log.setup_logfile_logger(
|
log.setup_logfile_logger(
|
||||||
logfile,
|
logfile,
|
||||||
loglevel,
|
loglevel,
|
||||||
@ -804,11 +832,7 @@ class LogLevelMixIn(six.with_metaclass(MixInMeta, object)):
|
|||||||
self._get_mp_logging_listener_queue()
|
self._get_mp_logging_listener_queue()
|
||||||
)
|
)
|
||||||
|
|
||||||
def __setup_console_logger(self, *args): # pylint: disable=unused-argument
|
def __setup_console_logger_config(self, *args): # pylint: disable=unused-argument
|
||||||
# If daemon is set force console logger to quiet
|
|
||||||
if getattr(self.options, 'daemon', False) is True:
|
|
||||||
return
|
|
||||||
|
|
||||||
# Since we're not going to be a daemon, setup the console logger
|
# Since we're not going to be a daemon, setup the console logger
|
||||||
cli_log_fmt = 'cli_{0}_log_fmt'.format(
|
cli_log_fmt = 'cli_{0}_log_fmt'.format(
|
||||||
self.get_prog_name().replace('-', '_')
|
self.get_prog_name().replace('-', '_')
|
||||||
@ -849,8 +873,20 @@ class LogLevelMixIn(six.with_metaclass(MixInMeta, object)):
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Save the settings back to the configuration
|
||||||
|
self.config['log_fmt_console'] = logfmt
|
||||||
|
self.config['log_datefmt_console'] = datefmt
|
||||||
|
|
||||||
|
def __setup_console_logger(self, *args): # pylint: disable=unused-argument
|
||||||
|
# If daemon is set force console logger to quiet
|
||||||
|
if getattr(self.options, 'daemon', False) is True:
|
||||||
|
return
|
||||||
|
|
||||||
log.setup_console_logger(
|
log.setup_console_logger(
|
||||||
self.config['log_level'], log_format=logfmt, date_format=datefmt
|
self.config['log_level'],
|
||||||
|
log_format=self.config['log_fmt_console'],
|
||||||
|
date_format=self.config['log_datefmt_console']
|
||||||
)
|
)
|
||||||
for name, level in six.iteritems(self.config['log_granular_levels']):
|
for name, level in six.iteritems(self.config['log_granular_levels']):
|
||||||
log.set_logger_level(name, level)
|
log.set_logger_level(name, level)
|
||||||
|
@ -4,12 +4,27 @@ from __future__ import absolute_import
|
|||||||
|
|
||||||
# Import python libs
|
# Import python libs
|
||||||
import logging
|
import logging
|
||||||
import pythoncom
|
|
||||||
import threading
|
import threading
|
||||||
|
|
||||||
|
try:
|
||||||
|
import pythoncom
|
||||||
|
HAS_LIBS = True
|
||||||
|
except ImportError:
|
||||||
|
HAS_LIBS = False
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def __virtual__():
|
||||||
|
'''
|
||||||
|
Only load if required libraries exist
|
||||||
|
'''
|
||||||
|
if not HAS_LIBS:
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
class Com(object):
|
class Com(object):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.need_com_init = not self._is_main_thread()
|
self.need_com_init = not self._is_main_thread()
|
||||||
|
@ -62,7 +62,6 @@ import salt.utils.process
|
|||||||
import salt.log.setup as salt_log_setup
|
import salt.log.setup as salt_log_setup
|
||||||
from salt.utils.verify import verify_env
|
from salt.utils.verify import verify_env
|
||||||
from salt.utils.immutabletypes import freeze
|
from salt.utils.immutabletypes import freeze
|
||||||
from salt.utils.process import SignalHandlingMultiprocessingProcess
|
|
||||||
from salt.utils.nb_popen import NonBlockingPopen
|
from salt.utils.nb_popen import NonBlockingPopen
|
||||||
from salt.exceptions import SaltClientError
|
from salt.exceptions import SaltClientError
|
||||||
|
|
||||||
@ -76,7 +75,11 @@ except ImportError:
|
|||||||
import yaml
|
import yaml
|
||||||
import msgpack
|
import msgpack
|
||||||
import salt.ext.six as six
|
import salt.ext.six as six
|
||||||
import salt.ext.six.moves.socketserver as socketserver # pylint: disable=no-name-in-module
|
|
||||||
|
try:
|
||||||
|
import salt.ext.six.moves.socketserver as socketserver
|
||||||
|
except ImportError:
|
||||||
|
import socketserver
|
||||||
|
|
||||||
if salt.utils.is_windows():
|
if salt.utils.is_windows():
|
||||||
import win32api
|
import win32api
|
||||||
@ -165,7 +168,7 @@ def get_unused_localhost_port():
|
|||||||
usock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
usock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||||
usock.bind(('127.0.0.1', 0))
|
usock.bind(('127.0.0.1', 0))
|
||||||
port = usock.getsockname()[1]
|
port = usock.getsockname()[1]
|
||||||
if port in (54505, 54506, 64505, 64506, 64510, 64511):
|
if port in (54505, 54506, 64505, 64506, 64510, 64511, 64520, 64521):
|
||||||
# These ports are hardcoded in the test configuration
|
# These ports are hardcoded in the test configuration
|
||||||
port = get_unused_localhost_port()
|
port = get_unused_localhost_port()
|
||||||
usock.close()
|
usock.close()
|
||||||
@ -401,7 +404,7 @@ class SaltDaemonScriptBase(SaltScriptBase, ShellTestCase):
|
|||||||
'''
|
'''
|
||||||
Start the daemon subprocess
|
Start the daemon subprocess
|
||||||
'''
|
'''
|
||||||
self._process = SignalHandlingMultiprocessingProcess(
|
self._process = salt.utils.process.SignalHandlingMultiprocessingProcess(
|
||||||
target=self._start, args=(self._running,))
|
target=self._start, args=(self._running,))
|
||||||
self._process.start()
|
self._process.start()
|
||||||
self._running.set()
|
self._running.set()
|
||||||
@ -436,6 +439,7 @@ class SaltDaemonScriptBase(SaltScriptBase, ShellTestCase):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
# Let's begin the shutdown routines
|
# Let's begin the shutdown routines
|
||||||
|
if not sys.platform.startswith('win'):
|
||||||
if terminal.poll() is None:
|
if terminal.poll() is None:
|
||||||
try:
|
try:
|
||||||
log.info('Sending SIGINT to %s %s DAEMON', self.display_name, self.__class__.__name__)
|
log.info('Sending SIGINT to %s %s DAEMON', self.display_name, self.__class__.__name__)
|
||||||
@ -505,6 +509,9 @@ class SaltDaemonScriptBase(SaltScriptBase, ShellTestCase):
|
|||||||
# Lets log and kill any child processes which salt left behind
|
# Lets log and kill any child processes which salt left behind
|
||||||
for child in children[:]:
|
for child in children[:]:
|
||||||
try:
|
try:
|
||||||
|
if sys.platform.startswith('win'):
|
||||||
|
child.kill()
|
||||||
|
else:
|
||||||
child.send_signal(signal.SIGKILL)
|
child.send_signal(signal.SIGKILL)
|
||||||
log.info('Salt left behind the following child process: %s', child.as_dict())
|
log.info('Salt left behind the following child process: %s', child.as_dict())
|
||||||
try:
|
try:
|
||||||
@ -580,6 +587,10 @@ class SaltMinion(SaltDaemonScriptBase):
|
|||||||
return script_args
|
return script_args
|
||||||
|
|
||||||
def get_check_ports(self):
|
def get_check_ports(self):
|
||||||
|
if salt.utils.is_windows():
|
||||||
|
return set([self.config['tcp_pub_port'],
|
||||||
|
self.config['tcp_pull_port']])
|
||||||
|
else:
|
||||||
return set([self.config['id']])
|
return set([self.config['id']])
|
||||||
|
|
||||||
|
|
||||||
@ -978,40 +989,51 @@ class TestDaemon(object):
|
|||||||
running_tests_user = win32api.GetUserName()
|
running_tests_user = win32api.GetUserName()
|
||||||
else:
|
else:
|
||||||
running_tests_user = pwd.getpwuid(os.getuid()).pw_name
|
running_tests_user = pwd.getpwuid(os.getuid()).pw_name
|
||||||
master_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'master'))
|
|
||||||
master_opts['user'] = running_tests_user
|
|
||||||
tests_known_hosts_file = os.path.join(TMP_CONF_DIR, 'salt_ssh_known_hosts')
|
tests_known_hosts_file = os.path.join(TMP_CONF_DIR, 'salt_ssh_known_hosts')
|
||||||
with salt.utils.fopen(tests_known_hosts_file, 'w') as known_hosts:
|
with salt.utils.fopen(tests_known_hosts_file, 'w') as known_hosts:
|
||||||
known_hosts.write('')
|
known_hosts.write('')
|
||||||
|
|
||||||
|
# This master connects to syndic_master via a syndic
|
||||||
|
master_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'master'))
|
||||||
master_opts['known_hosts_file'] = tests_known_hosts_file
|
master_opts['known_hosts_file'] = tests_known_hosts_file
|
||||||
master_opts['conf_dir'] = TMP_CONF_DIR
|
master_opts['cachedir'] = os.path.join(TMP, 'rootdir', 'cache')
|
||||||
|
master_opts['user'] = running_tests_user
|
||||||
|
master_opts['config_dir'] = TMP_CONF_DIR
|
||||||
|
master_opts['root_dir'] = os.path.join(TMP, 'rootdir')
|
||||||
|
master_opts['pki_dir'] = os.path.join(TMP, 'rootdir', 'pki', 'master')
|
||||||
|
|
||||||
minion_config_path = os.path.join(CONF_DIR, 'minion')
|
# This is the syndic for master
|
||||||
minion_opts = salt.config._read_conf_file(minion_config_path)
|
|
||||||
minion_opts['user'] = running_tests_user
|
|
||||||
minion_opts['conf_dir'] = TMP_CONF_DIR
|
|
||||||
|
|
||||||
minion_opts['root_dir'] = master_opts['root_dir'] = os.path.join(TMP, 'rootdir')
|
|
||||||
|
|
||||||
sub_minion_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'sub_minion'))
|
|
||||||
sub_minion_opts['user'] = running_tests_user
|
|
||||||
sub_minion_opts['conf_dir'] = TMP_SUB_MINION_CONF_DIR
|
|
||||||
sub_minion_opts['root_dir'] = os.path.join(TMP, 'rootdir-sub-minion')
|
|
||||||
|
|
||||||
syndic_master_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'syndic_master'))
|
|
||||||
syndic_master_opts['user'] = running_tests_user
|
|
||||||
syndic_master_opts['root_dir'] = os.path.join(TMP, 'rootdir-syndic-master')
|
|
||||||
syndic_master_opts['conf_dir'] = TMP_SYNDIC_MASTER_CONF_DIR
|
|
||||||
|
|
||||||
# The syndic config file has an include setting to include the master configuration
|
|
||||||
# Let's start with a copy of the syndic master configuration
|
# Let's start with a copy of the syndic master configuration
|
||||||
syndic_opts = copy.deepcopy(master_opts)
|
syndic_opts = copy.deepcopy(master_opts)
|
||||||
# Let's update with the syndic configuration
|
# Let's update with the syndic configuration
|
||||||
syndic_opts.update(salt.config._read_conf_file(os.path.join(CONF_DIR, 'syndic')))
|
syndic_opts.update(salt.config._read_conf_file(os.path.join(CONF_DIR, 'syndic')))
|
||||||
# Lets remove the include setting
|
syndic_opts['cachedir'] = os.path.join(TMP, 'rootdir', 'cache')
|
||||||
syndic_opts.pop('include')
|
syndic_opts['config_dir'] = TMP_SYNDIC_MINION_CONF_DIR
|
||||||
syndic_opts['user'] = running_tests_user
|
|
||||||
syndic_opts['conf_dir'] = TMP_SYNDIC_MINION_CONF_DIR
|
# This minion connects to master
|
||||||
|
minion_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'minion'))
|
||||||
|
minion_opts['cachedir'] = os.path.join(TMP, 'rootdir', 'cache')
|
||||||
|
minion_opts['user'] = running_tests_user
|
||||||
|
minion_opts['config_dir'] = TMP_CONF_DIR
|
||||||
|
minion_opts['root_dir'] = os.path.join(TMP, 'rootdir')
|
||||||
|
minion_opts['pki_dir'] = os.path.join(TMP, 'rootdir', 'pki')
|
||||||
|
|
||||||
|
# This sub_minion also connects to master
|
||||||
|
sub_minion_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'sub_minion'))
|
||||||
|
sub_minion_opts['cachedir'] = os.path.join(TMP, 'rootdir-sub-minion', 'cache')
|
||||||
|
sub_minion_opts['user'] = running_tests_user
|
||||||
|
sub_minion_opts['config_dir'] = TMP_SUB_MINION_CONF_DIR
|
||||||
|
sub_minion_opts['root_dir'] = os.path.join(TMP, 'rootdir-sub-minion')
|
||||||
|
sub_minion_opts['pki_dir'] = os.path.join(TMP, 'rootdir-sub-minion', 'pki', 'minion')
|
||||||
|
|
||||||
|
# This is the master of masters
|
||||||
|
syndic_master_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'syndic_master'))
|
||||||
|
syndic_master_opts['cachedir'] = os.path.join(TMP, 'rootdir-syndic-master', 'cache')
|
||||||
|
syndic_master_opts['user'] = running_tests_user
|
||||||
|
syndic_master_opts['config_dir'] = TMP_SYNDIC_MASTER_CONF_DIR
|
||||||
|
syndic_master_opts['root_dir'] = os.path.join(TMP, 'rootdir-syndic-master')
|
||||||
|
syndic_master_opts['pki_dir'] = os.path.join(TMP, 'rootdir-syndic-master', 'pki', 'master')
|
||||||
|
|
||||||
if transport == 'raet':
|
if transport == 'raet':
|
||||||
master_opts['transport'] = 'raet'
|
master_opts['transport'] = 'raet'
|
||||||
@ -1776,12 +1798,12 @@ class ShellCase(AdaptedConfigurationTestCaseMixIn, ShellTestCase, ScriptPathMixi
|
|||||||
except OSError:
|
except OSError:
|
||||||
os.chdir(INTEGRATION_TEST_DIR)
|
os.chdir(INTEGRATION_TEST_DIR)
|
||||||
|
|
||||||
def run_salt(self, arg_str, with_retcode=False, catch_stderr=False):
|
def run_salt(self, arg_str, with_retcode=False, catch_stderr=False, timeout=15): # pylint: disable=W0221
|
||||||
'''
|
'''
|
||||||
Execute salt
|
Execute salt
|
||||||
'''
|
'''
|
||||||
arg_str = '-c {0} {1}'.format(self.get_config_dir(), arg_str)
|
arg_str = '-c {0} {1}'.format(self.get_config_dir(), arg_str)
|
||||||
return self.run_script('salt', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr)
|
return self.run_script('salt', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr, timeout=timeout)
|
||||||
|
|
||||||
def run_ssh(self, arg_str, with_retcode=False, catch_stderr=False):
|
def run_ssh(self, arg_str, with_retcode=False, catch_stderr=False):
|
||||||
'''
|
'''
|
||||||
|
@ -5,14 +5,15 @@
|
|||||||
# Import Python libs
|
# Import Python libs
|
||||||
from __future__ import absolute_import
|
from __future__ import absolute_import
|
||||||
|
|
||||||
# Import Salt Libs
|
|
||||||
import integration
|
|
||||||
|
|
||||||
# Import Salt Testing Libs
|
# Import Salt Testing Libs
|
||||||
from salttesting.helpers import ensure_in_syspath
|
from salttesting.helpers import ensure_in_syspath
|
||||||
|
|
||||||
ensure_in_syspath('../../')
|
ensure_in_syspath('../../')
|
||||||
|
|
||||||
|
# Import Salt Libs
|
||||||
|
import integration
|
||||||
|
import salt.ext.six as six
|
||||||
|
|
||||||
|
|
||||||
class BatchTest(integration.ShellCase):
|
class BatchTest(integration.ShellCase):
|
||||||
'''
|
'''
|
||||||
@ -37,9 +38,11 @@ class BatchTest(integration.ShellCase):
|
|||||||
'retcode:',
|
'retcode:',
|
||||||
' 0',
|
' 0',
|
||||||
' batch testing']
|
' batch testing']
|
||||||
ret = sorted(ret)
|
cmd = self.run_salt('\'*\' test.echo \'batch testing\' -b 50%')
|
||||||
cmd = sorted(self.run_salt('\'*\' test.echo \'batch testing\' -b 50%'))
|
if six.PY3:
|
||||||
self.assertListEqual(cmd, ret)
|
self.assertCountEqual(cmd, ret)
|
||||||
|
else:
|
||||||
|
self.assertListEqual(sorted(cmd), sorted(ret))
|
||||||
|
|
||||||
def test_batch_run_number(self):
|
def test_batch_run_number(self):
|
||||||
'''
|
'''
|
||||||
@ -57,8 +60,11 @@ class BatchTest(integration.ShellCase):
|
|||||||
' True',
|
' True',
|
||||||
'retcode:',
|
'retcode:',
|
||||||
' 0']
|
' 0']
|
||||||
cmd = sorted(self.run_salt('\'*\' test.ping --batch-size 2'))
|
cmd = self.run_salt('\'*\' test.ping --batch-size 2')
|
||||||
self.assertListEqual(cmd, sorted(ret))
|
if six.PY3:
|
||||||
|
self.assertCountEqual(cmd, ret)
|
||||||
|
else:
|
||||||
|
self.assertListEqual(sorted(cmd), sorted(ret))
|
||||||
|
|
||||||
def test_batch_run_grains_targeting(self):
|
def test_batch_run_grains_targeting(self):
|
||||||
'''
|
'''
|
||||||
@ -86,8 +92,11 @@ class BatchTest(integration.ShellCase):
|
|||||||
os_grain = item
|
os_grain = item
|
||||||
|
|
||||||
os_grain = os_grain.strip()
|
os_grain = os_grain.strip()
|
||||||
cmd = sorted(self.run_salt('-G \'os:{0}\' -b 25% test.ping'.format(os_grain)))
|
cmd = self.run_salt('-G \'os:{0}\' -b 25% test.ping'.format(os_grain))
|
||||||
self.assertListEqual(cmd, sorted(ret))
|
if six.PY3:
|
||||||
|
self.assertCountEqual(cmd, ret)
|
||||||
|
else:
|
||||||
|
self.assertListEqual(sorted(cmd), sorted(ret))
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
@ -1,28 +1,32 @@
|
|||||||
|
# Master Settings
|
||||||
|
# Connects to syndic_master via syndic
|
||||||
id: master
|
id: master
|
||||||
user: ubuntu
|
|
||||||
interface: 127.0.0.1
|
interface: 127.0.0.1
|
||||||
publish_port: 64505
|
publish_port: 64505
|
||||||
ret_port: 64506
|
ret_port: 64506
|
||||||
worker_threads: 3
|
worker_threads: 3
|
||||||
root_dir: /tmp/salttest
|
pidfile: master.pid
|
||||||
pidfile: masterpid
|
sock_dir: master_sock
|
||||||
pki_dir: pki
|
|
||||||
cachedir: cache
|
|
||||||
timeout: 3
|
timeout: 3
|
||||||
sock_dir: .salt-unix
|
|
||||||
open_mode: True
|
open_mode: True
|
||||||
syndic_master: localhost
|
|
||||||
fileserver_list_cache_time: 0
|
fileserver_list_cache_time: 0
|
||||||
|
file_buffer_size: 8192
|
||||||
pillar_opts: True
|
pillar_opts: True
|
||||||
|
log_file: master.log
|
||||||
|
log_level_logfile: debug
|
||||||
|
key_logfile: key.log
|
||||||
|
token_file: /tmp/ksfjhdgiuebfgnkefvsikhfjdgvkjahcsidk
|
||||||
|
|
||||||
|
# These settings needed for tests on Windows which defaults
|
||||||
|
# to ipc_mode: tcp
|
||||||
|
tcp_master_pub_port: 64512
|
||||||
|
tcp_master_pull_port: 64513
|
||||||
|
tcp_master_publish_pull: 64514
|
||||||
|
tcp_master_workers: 64515
|
||||||
|
|
||||||
peer:
|
peer:
|
||||||
'.*':
|
'.*':
|
||||||
- 'test.*'
|
- 'test.*'
|
||||||
log_file: master
|
|
||||||
log_level_logfile: debug
|
|
||||||
key_logfile: key
|
|
||||||
token_file: /tmp/ksfjhdgiuebfgnkefvsikhfjdgvkjahcsidk
|
|
||||||
|
|
||||||
file_buffer_size: 8192
|
|
||||||
|
|
||||||
ext_pillar:
|
ext_pillar:
|
||||||
- git: master https://github.com/saltstack/pillar1.git
|
- git: master https://github.com/saltstack/pillar1.git
|
||||||
|
@ -1,17 +1,16 @@
|
|||||||
# basic config
|
# basic config
|
||||||
|
# Connects to master
|
||||||
master: localhost
|
master: localhost
|
||||||
master_port: 64506
|
master_port: 64506
|
||||||
root_dir: /tmp/salttest
|
interface: 127.0.0.1
|
||||||
pki_dir: pki
|
tcp_pub_port: 64510
|
||||||
id: minion
|
tcp_pull_port: 64511
|
||||||
cachedir: cachedir
|
|
||||||
sock_dir: minion_sock
|
sock_dir: minion_sock
|
||||||
#acceptance_wait_time: = 1
|
id: minion
|
||||||
open_mode: True
|
open_mode: True
|
||||||
log_file: minion
|
log_file: minion.log
|
||||||
log_level_logfile: debug
|
log_level_logfile: debug
|
||||||
#loop_interval: 0.05
|
pidfile: minion.pid
|
||||||
config_dir: /tmp/salt-tests-tmpdir
|
|
||||||
|
|
||||||
# module extension
|
# module extension
|
||||||
test.foo: baz
|
test.foo: baz
|
||||||
|
@ -1,15 +1,16 @@
|
|||||||
# basic config
|
# basic config
|
||||||
|
# Connects to master
|
||||||
master: localhost
|
master: localhost
|
||||||
|
interface: 127.0.0.1
|
||||||
master_port: 64506
|
master_port: 64506
|
||||||
root_dir: /tmp/subsalttest
|
tcp_pub_port: 64520
|
||||||
pki_dir: pki
|
tcp_pull_port: 64521
|
||||||
id: sub_minion
|
|
||||||
cachedir: cachedir
|
|
||||||
sock_dir: sub_minion_sock
|
sock_dir: sub_minion_sock
|
||||||
#acceptance_wait_time: 1
|
id: sub_minion
|
||||||
open_mode: True
|
open_mode: True
|
||||||
log_file: sub_minion
|
log_file: sub_minion.log
|
||||||
log_level_logfile: debug
|
log_level_logfile: debug
|
||||||
|
pidfile: sub_minion.pid
|
||||||
|
|
||||||
# module extension
|
# module extension
|
||||||
test.foo: baz
|
test.foo: baz
|
||||||
@ -33,6 +34,3 @@ grains:
|
|||||||
- jamie
|
- jamie
|
||||||
- zoe
|
- zoe
|
||||||
|
|
||||||
ipc_mode: tcp
|
|
||||||
tcp_pub_port: 64510
|
|
||||||
tcp_pull_port: 64511
|
|
||||||
|
@ -1,13 +1,9 @@
|
|||||||
include: master
|
# Syndic Settings
|
||||||
# syndic basic config
|
id: syndic
|
||||||
# same config as master ./except the syndic bits
|
interface: 127.0.0.1
|
||||||
# in the TestCase we add at the top of the configfile the content of ./master
|
|
||||||
# to avoid duplication
|
|
||||||
order_masters: True
|
|
||||||
acceptance_wait_time: 1
|
|
||||||
syndic_log_file: osyndic.log
|
|
||||||
log_level_logfile: debug
|
|
||||||
syndic_pidfile: osyndic.pid
|
|
||||||
syndic_master: localhost
|
syndic_master: localhost
|
||||||
syndic_master_port: 54506
|
syndic_master_port: 54506
|
||||||
id: syndic
|
syndic_log_file: syndic.log
|
||||||
|
syndic_pidfile: syndic.pid
|
||||||
|
tcp_pub_port: 64510
|
||||||
|
tcp_pull_port: 64511
|
||||||
|
@ -1,19 +1,25 @@
|
|||||||
|
# Master Settings
|
||||||
|
# This is the Master of Masters
|
||||||
id: syndic_master
|
id: syndic_master
|
||||||
interface: 127.0.0.1
|
interface: 127.0.0.1
|
||||||
publish_port: 54505
|
publish_port: 54505
|
||||||
ret_port: 54506
|
ret_port: 54506
|
||||||
worker_threads: 3
|
worker_threads: 3
|
||||||
root_dir: /tmp/saltsyndictest
|
pidfile: syndic_master.pid
|
||||||
pidfile: syndicmasterpid
|
sock_dir: syndic_master_sock
|
||||||
pki_dir: pki
|
|
||||||
cachedir: cache
|
|
||||||
timeout: 1
|
timeout: 1
|
||||||
sock_dir: .salt-unix-syndic
|
|
||||||
open_mode: True
|
open_mode: True
|
||||||
order_masters: True
|
|
||||||
fileserver_list_cache_time: 0
|
fileserver_list_cache_time: 0
|
||||||
pillar_opts: True
|
pillar_opts: True
|
||||||
tcp_master_publish_pull: 33305
|
log_file: syndic_master.log
|
||||||
tcp_master_workers: 33306
|
|
||||||
log_file: syndic_master
|
|
||||||
log_level_logfile: debug
|
log_level_logfile: debug
|
||||||
|
|
||||||
|
# These settings needed for tests on Windows which defaults
|
||||||
|
# to ipc_mode: tcp
|
||||||
|
tcp_master_pub_port: 54512
|
||||||
|
tcp_master_pull_port: 54513
|
||||||
|
tcp_master_publish_pull: 54514
|
||||||
|
tcp_master_workers: 54515
|
||||||
|
|
||||||
|
# Syndic Settings
|
||||||
|
order_masters: True
|
||||||
|
@ -24,7 +24,7 @@ class MinionTimeoutTestCase(integration.ShellCase):
|
|||||||
'''
|
'''
|
||||||
# Launch the command
|
# Launch the command
|
||||||
sleep_length = 30
|
sleep_length = 30
|
||||||
ret = self.run_salt('minion test.sleep {0}'.format(sleep_length))
|
ret = self.run_salt('minion test.sleep {0}'.format(sleep_length), timeout=45)
|
||||||
self.assertTrue(isinstance(ret, list), 'Return is not a list. Minion'
|
self.assertTrue(isinstance(ret, list), 'Return is not a list. Minion'
|
||||||
' may have returned error: {0}'.format(ret))
|
' may have returned error: {0}'.format(ret))
|
||||||
self.assertTrue('True' in ret[1], 'Minion did not return True after '
|
self.assertTrue('True' in ret[1], 'Minion did not return True after '
|
||||||
|
@ -50,17 +50,17 @@ class ConfigTest(integration.ModuleCase):
|
|||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
self.run_function('config.manage_mode', ['"775"']), '0775')
|
self.run_function('config.manage_mode', ['"775"']), '0775')
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
self.run_function('config.manage_mode', ['"1775"']), '01775')
|
self.run_function('config.manage_mode', ['"1775"']), '1775')
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
self.run_function('config.manage_mode', ['"0775"']), '0775')
|
self.run_function('config.manage_mode', ['"0775"']), '0775')
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
self.run_function('config.manage_mode', ['"01775"']), '01775')
|
self.run_function('config.manage_mode', ['"01775"']), '1775')
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
self.run_function('config.manage_mode', ['"0"']), '0000')
|
self.run_function('config.manage_mode', ['"0"']), '0000')
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
self.run_function('config.manage_mode', ['775']), '0775')
|
self.run_function('config.manage_mode', ['775']), '0775')
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
self.run_function('config.manage_mode', ['1775']), '01775')
|
self.run_function('config.manage_mode', ['1775']), '1775')
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
self.run_function('config.manage_mode', ['0']), '0000')
|
self.run_function('config.manage_mode', ['0']), '0000')
|
||||||
|
|
||||||
@ -74,12 +74,6 @@ class ConfigTest(integration.ModuleCase):
|
|||||||
'config.option',
|
'config.option',
|
||||||
['master_port']),
|
['master_port']),
|
||||||
64506)
|
64506)
|
||||||
# Master conf opt
|
|
||||||
self.assertEqual(
|
|
||||||
self.run_function(
|
|
||||||
'config.option',
|
|
||||||
['syndic_master']),
|
|
||||||
'localhost')
|
|
||||||
# pillar conf opt
|
# pillar conf opt
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
self.run_function(
|
self.run_function(
|
||||||
|
@ -56,6 +56,66 @@ FILEPILLARDEF = '/tmp/filepillar-defaultvalue'
|
|||||||
FILEPILLARGIT = '/tmp/filepillar-bar'
|
FILEPILLARGIT = '/tmp/filepillar-bar'
|
||||||
|
|
||||||
|
|
||||||
|
def _test_managed_file_mode_keep_helper(testcase, local=False):
|
||||||
|
'''
|
||||||
|
DRY helper function to run the same test with a local or remote path
|
||||||
|
'''
|
||||||
|
rel_path = 'grail/scene33'
|
||||||
|
name = os.path.join(integration.TMP, os.path.basename(rel_path))
|
||||||
|
grail_fs_path = os.path.join(integration.FILES, 'file', 'base', rel_path)
|
||||||
|
grail = 'salt://' + rel_path if not local else grail_fs_path
|
||||||
|
|
||||||
|
# Get the current mode so that we can put the file back the way we
|
||||||
|
# found it when we're done.
|
||||||
|
grail_fs_mode = os.stat(grail_fs_path).st_mode
|
||||||
|
initial_mode = 504 # 0770 octal
|
||||||
|
new_mode_1 = 384 # 0600 octal
|
||||||
|
new_mode_2 = 420 # 0644 octal
|
||||||
|
|
||||||
|
# Set the initial mode, so we can be assured that when we set the mode
|
||||||
|
# to "keep", we're actually changing the permissions of the file to the
|
||||||
|
# new mode.
|
||||||
|
ret = testcase.run_state(
|
||||||
|
'file.managed',
|
||||||
|
name=name,
|
||||||
|
mode=oct(initial_mode),
|
||||||
|
source=grail,
|
||||||
|
)
|
||||||
|
testcase.assertSaltTrueReturn(ret)
|
||||||
|
try:
|
||||||
|
# Update the mode on the fileserver (pass 1)
|
||||||
|
os.chmod(grail_fs_path, new_mode_1)
|
||||||
|
ret = testcase.run_state(
|
||||||
|
'file.managed',
|
||||||
|
name=name,
|
||||||
|
mode='keep',
|
||||||
|
source=grail,
|
||||||
|
)
|
||||||
|
testcase.assertSaltTrueReturn(ret)
|
||||||
|
managed_mode = stat.S_IMODE(os.stat(name).st_mode)
|
||||||
|
testcase.assertEqual(oct(managed_mode), oct(new_mode_1))
|
||||||
|
# Update the mode on the fileserver (pass 2)
|
||||||
|
# This assures us that if the file in file_roots was originally set
|
||||||
|
# to the same mode as new_mode_1, we definitely get an updated mode
|
||||||
|
# this time.
|
||||||
|
os.chmod(grail_fs_path, new_mode_2)
|
||||||
|
ret = testcase.run_state(
|
||||||
|
'file.managed',
|
||||||
|
name=name,
|
||||||
|
mode='keep',
|
||||||
|
source=grail,
|
||||||
|
)
|
||||||
|
testcase.assertSaltTrueReturn(ret)
|
||||||
|
managed_mode = stat.S_IMODE(os.stat(name).st_mode)
|
||||||
|
testcase.assertEqual(oct(managed_mode), oct(new_mode_2))
|
||||||
|
except Exception:
|
||||||
|
raise
|
||||||
|
finally:
|
||||||
|
# Set the mode of the file in the file_roots back to what it
|
||||||
|
# originally was.
|
||||||
|
os.chmod(grail_fs_path, grail_fs_mode)
|
||||||
|
|
||||||
|
|
||||||
class FileTest(integration.ModuleCase, integration.SaltReturnAssertsMixIn):
|
class FileTest(integration.ModuleCase, integration.SaltReturnAssertsMixIn):
|
||||||
'''
|
'''
|
||||||
Validate the file state
|
Validate the file state
|
||||||
@ -165,6 +225,19 @@ class FileTest(integration.ModuleCase, integration.SaltReturnAssertsMixIn):
|
|||||||
self.assertEqual(oct(desired_mode), oct(resulting_mode))
|
self.assertEqual(oct(desired_mode), oct(resulting_mode))
|
||||||
self.assertSaltTrueReturn(ret)
|
self.assertSaltTrueReturn(ret)
|
||||||
|
|
||||||
|
def test_managed_file_mode_keep(self):
|
||||||
|
'''
|
||||||
|
Test using "mode: keep" in a file.managed state
|
||||||
|
'''
|
||||||
|
_test_managed_file_mode_keep_helper(self, local=False)
|
||||||
|
|
||||||
|
def test_managed_file_mode_keep_local_source(self):
|
||||||
|
'''
|
||||||
|
Test using "mode: keep" in a file.managed state, with a local file path
|
||||||
|
as the source.
|
||||||
|
'''
|
||||||
|
_test_managed_file_mode_keep_helper(self, local=True)
|
||||||
|
|
||||||
def test_managed_file_mode_file_exists_replace(self):
|
def test_managed_file_mode_file_exists_replace(self):
|
||||||
'''
|
'''
|
||||||
file.managed, existing file with replace=True, change permissions
|
file.managed, existing file with replace=True, change permissions
|
||||||
|
@ -37,6 +37,7 @@ _PKG_TARGETS = {
|
|||||||
'FreeBSD': ['aalib', 'pth'],
|
'FreeBSD': ['aalib', 'pth'],
|
||||||
'SUSE': ['aalib', 'python-pssh'],
|
'SUSE': ['aalib', 'python-pssh'],
|
||||||
'MacOS': ['libpng', 'jpeg'],
|
'MacOS': ['libpng', 'jpeg'],
|
||||||
|
'Windows': ['firefox', '7zip'],
|
||||||
}
|
}
|
||||||
|
|
||||||
_PKG_TARGETS_32 = {
|
_PKG_TARGETS_32 = {
|
||||||
|
@ -389,15 +389,22 @@ class SaltTestsuiteParser(SaltCoverageTestingParser):
|
|||||||
print_header(' * Salt daemons started')
|
print_header(' * Salt daemons started')
|
||||||
master_conf = TestDaemon.config('master')
|
master_conf = TestDaemon.config('master')
|
||||||
minion_conf = TestDaemon.config('minion')
|
minion_conf = TestDaemon.config('minion')
|
||||||
|
sub_minion_conf = TestDaemon.config('sub_minion')
|
||||||
syndic_conf = TestDaemon.config('syndic')
|
syndic_conf = TestDaemon.config('syndic')
|
||||||
syndic_master_conf = TestDaemon.config('syndic_master')
|
syndic_master_conf = TestDaemon.config('syndic_master')
|
||||||
|
|
||||||
print_header(' * Syndic master configuration values', top=False)
|
print_header(' * Syndic master configuration values (MoM)', top=False)
|
||||||
print('interface: {0}'.format(syndic_master_conf['interface']))
|
print('interface: {0}'.format(syndic_master_conf['interface']))
|
||||||
print('publish port: {0}'.format(syndic_master_conf['publish_port']))
|
print('publish port: {0}'.format(syndic_master_conf['publish_port']))
|
||||||
print('return port: {0}'.format(syndic_master_conf['ret_port']))
|
print('return port: {0}'.format(syndic_master_conf['ret_port']))
|
||||||
print('\n')
|
print('\n')
|
||||||
|
|
||||||
|
print_header(' * Syndic configuration values', top=True)
|
||||||
|
print('interface: {0}'.format(syndic_conf['interface']))
|
||||||
|
print('syndic master: {0}'.format(syndic_conf['syndic_master']))
|
||||||
|
print('syndic master port: {0}'.format(syndic_conf['syndic_master_port']))
|
||||||
|
print('\n')
|
||||||
|
|
||||||
print_header(' * Master configuration values', top=True)
|
print_header(' * Master configuration values', top=True)
|
||||||
print('interface: {0}'.format(master_conf['interface']))
|
print('interface: {0}'.format(master_conf['interface']))
|
||||||
print('publish port: {0}'.format(master_conf['publish_port']))
|
print('publish port: {0}'.format(master_conf['publish_port']))
|
||||||
@ -406,15 +413,24 @@ class SaltTestsuiteParser(SaltCoverageTestingParser):
|
|||||||
|
|
||||||
print_header(' * Minion configuration values', top=True)
|
print_header(' * Minion configuration values', top=True)
|
||||||
print('interface: {0}'.format(minion_conf['interface']))
|
print('interface: {0}'.format(minion_conf['interface']))
|
||||||
|
print('master: {0}'.format(minion_conf['master']))
|
||||||
|
print('master port: {0}'.format(minion_conf['master_port']))
|
||||||
|
if minion_conf['ipc_mode'] == 'tcp':
|
||||||
|
print('tcp pub port: {0}'.format(minion_conf['tcp_pub_port']))
|
||||||
|
print('tcp pull port: {0}'.format(minion_conf['tcp_pull_port']))
|
||||||
print('\n')
|
print('\n')
|
||||||
|
|
||||||
print_header(' * Syndic configuration values', top=True)
|
print_header(' * Sub Minion configuration values', top=True)
|
||||||
print('interface: {0}'.format(syndic_conf['interface']))
|
print('interface: {0}'.format(sub_minion_conf['interface']))
|
||||||
print('syndic master port: {0}'.format(syndic_conf['syndic_master']))
|
print('master: {0}'.format(sub_minion_conf['master']))
|
||||||
|
print('master port: {0}'.format(sub_minion_conf['master_port']))
|
||||||
|
if sub_minion_conf['ipc_mode'] == 'tcp':
|
||||||
|
print('tcp pub port: {0}'.format(sub_minion_conf['tcp_pub_port']))
|
||||||
|
print('tcp pull port: {0}'.format(sub_minion_conf['tcp_pull_port']))
|
||||||
print('\n')
|
print('\n')
|
||||||
|
|
||||||
print_header(' Your client configuration is at {0}'.format(TestDaemon.config_location()))
|
print_header(' Your client configuration is at {0}'.format(TestDaemon.config_location()))
|
||||||
print('To access the minion: `salt -c {0} minion test.ping'.format(TestDaemon.config_location()))
|
print('To access the minion: salt -c {0} minion test.ping'.format(TestDaemon.config_location()))
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
@ -149,22 +149,6 @@ class DimensionDataTestCase(ExtendedTestCase):
|
|||||||
'default'
|
'default'
|
||||||
)
|
)
|
||||||
|
|
||||||
@patch('libcloud.compute.drivers.dimensiondata.DimensionDataNodeDriver.list_nodes', MagicMock(return_value=[]))
|
|
||||||
def test_list_nodes(self):
|
|
||||||
nodes = dimensiondata.list_nodes()
|
|
||||||
self.assertEqual(
|
|
||||||
nodes,
|
|
||||||
{}
|
|
||||||
)
|
|
||||||
|
|
||||||
@patch('libcloud.compute.drivers.dimensiondata.DimensionDataNodeDriver.list_locations', MagicMock(return_value=[]))
|
|
||||||
def test_list_locations(self):
|
|
||||||
locations = dimensiondata.avail_locations()
|
|
||||||
self.assertEqual(
|
|
||||||
locations,
|
|
||||||
{}
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
from integration import run_tests
|
from integration import run_tests
|
||||||
|
@ -324,8 +324,8 @@ class ConfigTestCase(TestCase, integration.AdaptedConfigurationTestCaseMixIn):
|
|||||||
self.assertEqual(syndic_opts['master'], 'localhost')
|
self.assertEqual(syndic_opts['master'], 'localhost')
|
||||||
self.assertEqual(syndic_opts['sock_dir'], os.path.join(root_dir, 'minion_sock'))
|
self.assertEqual(syndic_opts['sock_dir'], os.path.join(root_dir, 'minion_sock'))
|
||||||
self.assertEqual(syndic_opts['cachedir'], os.path.join(root_dir, 'cache'))
|
self.assertEqual(syndic_opts['cachedir'], os.path.join(root_dir, 'cache'))
|
||||||
self.assertEqual(syndic_opts['log_file'], os.path.join(root_dir, 'osyndic.log'))
|
self.assertEqual(syndic_opts['log_file'], os.path.join(root_dir, 'syndic.log'))
|
||||||
self.assertEqual(syndic_opts['pidfile'], os.path.join(root_dir, 'osyndic.pid'))
|
self.assertEqual(syndic_opts['pidfile'], os.path.join(root_dir, 'syndic.pid'))
|
||||||
# Show that the options of localclient that repub to local master
|
# Show that the options of localclient that repub to local master
|
||||||
# are not merged with syndic ones
|
# are not merged with syndic ones
|
||||||
self.assertEqual(syndic_opts['_master_conf_file'], minion_conf_path)
|
self.assertEqual(syndic_opts['_master_conf_file'], minion_conf_path)
|
||||||
|
@ -161,7 +161,7 @@ class CronTestCase(TestCase):
|
|||||||
'salt.modules.cron.raw_cron',
|
'salt.modules.cron.raw_cron',
|
||||||
new=MagicMock(side_effect=get_crontab)
|
new=MagicMock(side_effect=get_crontab)
|
||||||
):
|
):
|
||||||
set_crontab(L + '* * * * * ls\n')
|
set_crontab(L + '* * * * * ls\n\n')
|
||||||
cron.set_job(
|
cron.set_job(
|
||||||
user='root',
|
user='root',
|
||||||
minute='*',
|
minute='*',
|
||||||
@ -179,6 +179,7 @@ class CronTestCase(TestCase):
|
|||||||
c1,
|
c1,
|
||||||
'# Lines below here are managed by Salt, do not edit\n'
|
'# Lines below here are managed by Salt, do not edit\n'
|
||||||
'* * * * * ls\n'
|
'* * * * * ls\n'
|
||||||
|
'\n'
|
||||||
)
|
)
|
||||||
cron.set_job(
|
cron.set_job(
|
||||||
user='root',
|
user='root',
|
||||||
|
@ -12,9 +12,11 @@ ensure_in_syspath('../../')
|
|||||||
|
|
||||||
# Import Salt Libs
|
# Import Salt Libs
|
||||||
from salt.pillar import mysql
|
from salt.pillar import mysql
|
||||||
|
from salt.ext.six import PY3
|
||||||
|
|
||||||
|
|
||||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||||
|
@skipIf(PY3, 'MySQL-python is not compatible with python3')
|
||||||
class MysqlPillarTestCase(TestCase):
|
class MysqlPillarTestCase(TestCase):
|
||||||
maxDiff = None
|
maxDiff = None
|
||||||
|
|
||||||
|
@ -4,8 +4,7 @@
|
|||||||
from __future__ import absolute_import
|
from __future__ import absolute_import
|
||||||
|
|
||||||
# Import Salt Testing libs
|
# Import Salt Testing libs
|
||||||
from salttesting import TestCase, skipIf
|
from salttesting import TestCase
|
||||||
from salttesting.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, call, patch
|
|
||||||
from salttesting.helpers import ensure_in_syspath
|
from salttesting.helpers import ensure_in_syspath
|
||||||
|
|
||||||
ensure_in_syspath('../../')
|
ensure_in_syspath('../../')
|
||||||
@ -16,31 +15,24 @@ from salt.pillar import nodegroups
|
|||||||
fake_minion_id = 'fake_id'
|
fake_minion_id = 'fake_id'
|
||||||
fake_pillar = {}
|
fake_pillar = {}
|
||||||
fake_nodegroups = {
|
fake_nodegroups = {
|
||||||
'a': 'nodegroup_a',
|
'a': fake_minion_id,
|
||||||
'b': 'nodegroup_b',
|
'b': 'nodegroup_b',
|
||||||
}
|
}
|
||||||
fake_opts = {'nodegroups': fake_nodegroups, }
|
fake_opts = {'nodegroups': fake_nodegroups, 'id': fake_minion_id}
|
||||||
fake_pillar_name = 'fake_pillar_name'
|
fake_pillar_name = 'fake_pillar_name'
|
||||||
|
|
||||||
nodegroups.__opts__ = fake_opts
|
nodegroups.__opts__ = fake_opts
|
||||||
|
|
||||||
|
|
||||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
|
||||||
class NodegroupsPillarTestCase(TestCase):
|
class NodegroupsPillarTestCase(TestCase):
|
||||||
'''
|
'''
|
||||||
Tests for salt.pillar.nodegroups
|
Tests for salt.pillar.nodegroups
|
||||||
'''
|
'''
|
||||||
|
|
||||||
def _runner(self, expected_ret, pillar_name=None, nodegroup_matches=None):
|
def _runner(self, expected_ret, pillar_name=None):
|
||||||
pillar_name = pillar_name or fake_pillar_name
|
pillar_name = pillar_name or fake_pillar_name
|
||||||
nodegroup_matches = nodegroup_matches or [True, False, ]
|
|
||||||
mock_nodegroup_match = MagicMock(side_effect=nodegroup_matches)
|
|
||||||
with patch.object(nodegroups.Matcher, 'nodegroup_match', mock_nodegroup_match):
|
|
||||||
actual_ret = nodegroups.ext_pillar(fake_minion_id, fake_pillar, pillar_name=pillar_name)
|
actual_ret = nodegroups.ext_pillar(fake_minion_id, fake_pillar, pillar_name=pillar_name)
|
||||||
self.assertDictEqual(actual_ret, expected_ret)
|
self.assertDictEqual(actual_ret, expected_ret)
|
||||||
fake_nodegroup_count = len(fake_nodegroups)
|
|
||||||
self.assertEqual(mock_nodegroup_match.call_count, fake_nodegroup_count)
|
|
||||||
mock_nodegroup_match.assert_has_calls([call(x, fake_nodegroups) for x in fake_nodegroups.keys()])
|
|
||||||
|
|
||||||
def test_succeeds(self):
|
def test_succeeds(self):
|
||||||
ret = {fake_pillar_name: ['a', ]}
|
ret = {fake_pillar_name: ['a', ]}
|
||||||
|
@ -131,7 +131,7 @@ class TestFileState(TestCase):
|
|||||||
# If the test is failing, check the position of the "contents" param
|
# If the test is failing, check the position of the "contents" param
|
||||||
# in the manage_file() function in salt/modules/file.py, the fix is
|
# in the manage_file() function in salt/modules/file.py, the fix is
|
||||||
# likely as simple as updating the 2nd index below.
|
# likely as simple as updating the 2nd index below.
|
||||||
self.assertEqual(expected, returner.call_args[0][-4])
|
self.assertEqual(expected, returner.call_args[0][-5])
|
||||||
|
|
||||||
|
|
||||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||||
|
@ -251,7 +251,7 @@ class NetworkTestCase(TestCase):
|
|||||||
:return:
|
:return:
|
||||||
'''
|
'''
|
||||||
self.assertEqual(network._generate_minion_id(),
|
self.assertEqual(network._generate_minion_id(),
|
||||||
['nodename', 'hostname', 'hostname.domainname.blank', '1.2.3.4', '5.6.7.8'])
|
['hostname.domainname.blank', 'nodename', 'hostname', '1.2.3.4', '5.6.7.8'])
|
||||||
|
|
||||||
@patch('platform.node', MagicMock(return_value='hostname'))
|
@patch('platform.node', MagicMock(return_value='hostname'))
|
||||||
@patch('socket.gethostname', MagicMock(return_value='hostname'))
|
@patch('socket.gethostname', MagicMock(return_value='hostname'))
|
||||||
@ -270,7 +270,7 @@ class NetworkTestCase(TestCase):
|
|||||||
|
|
||||||
@patch('platform.node', MagicMock(return_value='very.long.and.complex.domain.name'))
|
@patch('platform.node', MagicMock(return_value='very.long.and.complex.domain.name'))
|
||||||
@patch('socket.gethostname', MagicMock(return_value='hostname'))
|
@patch('socket.gethostname', MagicMock(return_value='hostname'))
|
||||||
@patch('socket.getfqdn', MagicMock(return_value='hostname'))
|
@patch('socket.getfqdn', MagicMock(return_value=''))
|
||||||
@patch('socket.getaddrinfo', MagicMock(return_value=[(2, 3, 0, 'hostname', ('127.0.1.1', 0))]))
|
@patch('socket.getaddrinfo', MagicMock(return_value=[(2, 3, 0, 'hostname', ('127.0.1.1', 0))]))
|
||||||
@patch('salt.utils.fopen', MagicMock(return_value=False))
|
@patch('salt.utils.fopen', MagicMock(return_value=False))
|
||||||
@patch('os.path.exists', MagicMock(return_value=False))
|
@patch('os.path.exists', MagicMock(return_value=False))
|
||||||
@ -286,7 +286,7 @@ class NetworkTestCase(TestCase):
|
|||||||
|
|
||||||
@patch('platform.node', MagicMock(return_value='localhost'))
|
@patch('platform.node', MagicMock(return_value='localhost'))
|
||||||
@patch('socket.gethostname', MagicMock(return_value='pick.me'))
|
@patch('socket.gethostname', MagicMock(return_value='pick.me'))
|
||||||
@patch('socket.getfqdn', MagicMock(return_value='hostname'))
|
@patch('socket.getfqdn', MagicMock(return_value='hostname.domainname.blank'))
|
||||||
@patch('socket.getaddrinfo', MagicMock(return_value=[(2, 3, 0, 'hostname', ('127.0.1.1', 0))]))
|
@patch('socket.getaddrinfo', MagicMock(return_value=[(2, 3, 0, 'hostname', ('127.0.1.1', 0))]))
|
||||||
@patch('salt.utils.fopen', MagicMock(return_value=False))
|
@patch('salt.utils.fopen', MagicMock(return_value=False))
|
||||||
@patch('os.path.exists', MagicMock(return_value=False))
|
@patch('os.path.exists', MagicMock(return_value=False))
|
||||||
@ -297,7 +297,7 @@ class NetworkTestCase(TestCase):
|
|||||||
|
|
||||||
:return:
|
:return:
|
||||||
'''
|
'''
|
||||||
self.assertEqual(network.generate_minion_id(), 'pick.me')
|
self.assertEqual(network.generate_minion_id(), 'hostname.domainname.blank')
|
||||||
|
|
||||||
@patch('platform.node', MagicMock(return_value='localhost'))
|
@patch('platform.node', MagicMock(return_value='localhost'))
|
||||||
@patch('socket.gethostname', MagicMock(return_value='ip6-loopback'))
|
@patch('socket.gethostname', MagicMock(return_value='ip6-loopback'))
|
||||||
|
Loading…
Reference in New Issue
Block a user