mirror of
https://github.com/valitydev/salt.git
synced 2024-11-07 17:09:03 +00:00
Merge branch 'carbon' into 'develop'
No conflicts.
This commit is contained in:
commit
e385f55588
17
doc/faq.rst
17
doc/faq.rst
@ -147,14 +147,21 @@ should be opened on our tracker_, with the following information:
|
||||
Why aren't my custom modules/states/etc. available on my Minions?
|
||||
-----------------------------------------------------------------
|
||||
|
||||
Custom modules are only synced to Minions when :mod:`state.apply
|
||||
<salt.modules.state.apply_>`, :mod:`saltutil.sync_modules
|
||||
<salt.modules.saltutil.sync_modules>`, or :mod:`saltutil.sync_all
|
||||
<salt.modules.saltutil.sync_all>` is run. Similarly, custom states are only
|
||||
synced to Minions when :mod:`state.apply <salt.modules.state.apply_>`,
|
||||
Custom modules are synced to Minions when
|
||||
:mod:`saltutil.sync_modules <salt.modules.saltutil.sync_modules>`,
|
||||
or :mod:`saltutil.sync_all <salt.modules.saltutil.sync_all>` is run.
|
||||
Custom modules are also synced by :mod:`state.apply` when run without
|
||||
any arguments.
|
||||
|
||||
|
||||
Similarly, custom states are synced to Minions
|
||||
when :mod:`state.apply <salt.modules.state.apply_>`,
|
||||
:mod:`saltutil.sync_states <salt.modules.saltutil.sync_states>`, or
|
||||
:mod:`saltutil.sync_all <salt.modules.saltutil.sync_all>` is run.
|
||||
|
||||
Custom states are also synced by :mod:`state.apply<salt.modules.state.apply_>`
|
||||
when run without any arguments.
|
||||
|
||||
Other custom types (renderers, outputters, etc.) have similar behavior, see the
|
||||
documentation for the :mod:`saltutil <salt.modules.saltutil>` module for more
|
||||
information.
|
||||
|
@ -1363,6 +1363,36 @@ is impacted.
|
||||
|
||||
fileserver_limit_traversal: False
|
||||
|
||||
.. conf_master:: fileserver_list_cache_time
|
||||
|
||||
``fileserver_list_cache_time``
|
||||
------------------------------
|
||||
|
||||
.. versionadded:: 2014.1.0
|
||||
.. versionchanged:: Carbon
|
||||
The default was changed from ``30`` seconds to ``20``.
|
||||
|
||||
Default: ``20``
|
||||
|
||||
Salt caches the list of files/symlinks/directories for each fileserver backend
|
||||
and environment as they are requested, to guard against a performance
|
||||
bottleneck at scale when many minions all ask the fileserver which files are
|
||||
available simultaneously. This configuration parameter allows for the max age
|
||||
of that cache to be altered.
|
||||
|
||||
Set this value to ``0`` to disable use of this cache altogether, but keep in
|
||||
mind that this may increase the CPU load on the master when running a highstate
|
||||
on a large number of minions.
|
||||
|
||||
.. note::
|
||||
Rather than altering this configuration parameter, it may be advisable to
|
||||
use the :mod:`fileserver.clear_list_cache
|
||||
<salt.runners.fileserver.clear_list_cache>` runner to clear these caches.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
fileserver_list_cache_time: 5
|
||||
|
||||
.. conf_master:: hash_type
|
||||
|
||||
``hash_type``
|
||||
|
@ -154,7 +154,10 @@ def prep_trans_tar(file_client, chunks, file_refs, pillar=None, id_=None):
|
||||
for ref in file_refs[saltenv]:
|
||||
for name in ref:
|
||||
short = salt.utils.url.parse(name)[0]
|
||||
path = file_client.cache_file(name, saltenv, cachedir=cachedir)
|
||||
try:
|
||||
path = file_client.cache_file(name, saltenv, cachedir=cachedir)
|
||||
except IOError:
|
||||
path = ''
|
||||
if path:
|
||||
tgt = os.path.join(env_root, short)
|
||||
tgt_dir = os.path.dirname(tgt)
|
||||
@ -162,7 +165,10 @@ def prep_trans_tar(file_client, chunks, file_refs, pillar=None, id_=None):
|
||||
os.makedirs(tgt_dir)
|
||||
shutil.copy(path, tgt)
|
||||
continue
|
||||
files = file_client.cache_dir(name, saltenv, cachedir=cachedir)
|
||||
try:
|
||||
files = file_client.cache_dir(name, saltenv, cachedir=cachedir)
|
||||
except IOError:
|
||||
files = ''
|
||||
if files:
|
||||
for filename in files:
|
||||
fn = filename[filename.find(short) + len(short):]
|
||||
|
@ -5,6 +5,7 @@ File server pluggable modules and generic backend functions
|
||||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
import collections
|
||||
import errno
|
||||
import fnmatch
|
||||
import logging
|
||||
@ -121,8 +122,8 @@ def check_file_list_cache(opts, form, list_cache, w_lock):
|
||||
age = time.time() - cache_stat.st_mtime
|
||||
else:
|
||||
# if filelist does not exists yet, mark it as expired
|
||||
age = opts.get('fileserver_list_cache_time', 30) + 1
|
||||
if age < opts.get('fileserver_list_cache_time', 30):
|
||||
age = opts.get('fileserver_list_cache_time', 20) + 1
|
||||
if age < opts.get('fileserver_list_cache_time', 20):
|
||||
# Young enough! Load this sucker up!
|
||||
with salt.utils.fopen(list_cache, 'rb') as fp_:
|
||||
log.trace('Returning file_lists cache data from '
|
||||
@ -325,10 +326,18 @@ class Fileserver(object):
|
||||
if not back:
|
||||
back = self.opts['fileserver_backend']
|
||||
else:
|
||||
try:
|
||||
back = back.split(',')
|
||||
except AttributeError:
|
||||
back = six.text_type(back).split(',')
|
||||
if not isinstance(back, list):
|
||||
try:
|
||||
back = back.split(',')
|
||||
except AttributeError:
|
||||
back = six.text_type(back).split(',')
|
||||
|
||||
if isinstance(back, collections.Sequence):
|
||||
# The test suite uses an ImmutableList type (based on
|
||||
# collections.Sequence) for lists, which breaks this function in
|
||||
# the test suite. This normalizes the value from the opts into a
|
||||
# list if it is based on collections.Sequence.
|
||||
back = list(back)
|
||||
|
||||
ret = []
|
||||
if not isinstance(back, list):
|
||||
@ -635,6 +644,88 @@ class Fileserver(object):
|
||||
except (IndexError, TypeError):
|
||||
return '', None
|
||||
|
||||
def clear_file_list_cache(self, load):
|
||||
'''
|
||||
Deletes the file_lists cache files
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt Carbon. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
load.pop('env')
|
||||
|
||||
saltenv = load.get('saltenv', [])
|
||||
if saltenv is not None:
|
||||
if not isinstance(saltenv, list):
|
||||
try:
|
||||
saltenv = [x.strip() for x in saltenv.split(',')]
|
||||
except AttributeError:
|
||||
saltenv = [x.strip() for x in str(saltenv).split(',')]
|
||||
|
||||
for idx, val in enumerate(saltenv):
|
||||
if not isinstance(val, six.string_types):
|
||||
saltenv[idx] = six.text_type(val)
|
||||
|
||||
ret = {}
|
||||
fsb = self._gen_back(load.pop('fsbackend', None))
|
||||
list_cachedir = os.path.join(self.opts['cachedir'], 'file_lists')
|
||||
try:
|
||||
file_list_backends = os.listdir(list_cachedir)
|
||||
except OSError as exc:
|
||||
if exc.errno == errno.ENOENT:
|
||||
log.debug('No file list caches found')
|
||||
return {}
|
||||
else:
|
||||
log.error(
|
||||
'Failed to get list of saltenvs for which the master has '
|
||||
'cached file lists: %s', exc
|
||||
)
|
||||
|
||||
for back in file_list_backends:
|
||||
# Account for the fact that the file_list cache directory for gitfs
|
||||
# is 'git', hgfs is 'hg', etc.
|
||||
back_virtualname = re.sub('fs$', '', back)
|
||||
try:
|
||||
cache_files = os.listdir(os.path.join(list_cachedir, back))
|
||||
except OSError as exc:
|
||||
log.error(
|
||||
'Failed to find file list caches for saltenv \'%s\': %s',
|
||||
back, exc
|
||||
)
|
||||
continue
|
||||
for cache_file in cache_files:
|
||||
try:
|
||||
cache_saltenv, extension = cache_file.rsplit('.', 1)
|
||||
except ValueError:
|
||||
# Filename has no dot in it. Not a cache file, ignore.
|
||||
continue
|
||||
if extension != 'p':
|
||||
# Filename does not end in ".p". Not a cache file, ignore.
|
||||
continue
|
||||
elif back_virtualname not in fsb or \
|
||||
(saltenv is not None and cache_saltenv not in saltenv):
|
||||
log.debug(
|
||||
'Skipping %s file list cache for saltenv \'%s\'',
|
||||
back, cache_saltenv
|
||||
)
|
||||
continue
|
||||
try:
|
||||
os.remove(os.path.join(list_cachedir, back, cache_file))
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.ENOENT:
|
||||
log.error('Failed to remove %s: %s',
|
||||
exc.filename, exc.strerror)
|
||||
else:
|
||||
ret.setdefault(back, []).append(cache_saltenv)
|
||||
log.debug(
|
||||
'Removed %s file list cache for saltenv \'%s\'',
|
||||
cache_saltenv, back
|
||||
)
|
||||
return ret
|
||||
|
||||
def file_list(self, load):
|
||||
'''
|
||||
Return a list of files from the dominant environment
|
||||
|
@ -335,7 +335,8 @@ def _file_lists(load, form):
|
||||
dir_rel_fn = dir_rel_fn.replace('\\', '/')
|
||||
ret['dirs'].append(dir_rel_fn)
|
||||
if len(dirs) == 0 and len(files) == 0:
|
||||
if not salt.fileserver.is_file_ignored(__opts__, dir_rel_fn):
|
||||
if dir_rel_fn not in ('.', '..') \
|
||||
and not salt.fileserver.is_file_ignored(__opts__, dir_rel_fn):
|
||||
ret['empty_dirs'].append(dir_rel_fn)
|
||||
for fname in files:
|
||||
is_link = os.path.islink(os.path.join(root, fname))
|
||||
|
@ -11,7 +11,6 @@ from __future__ import absolute_import
|
||||
|
||||
# Import python libs
|
||||
import sys
|
||||
import atexit
|
||||
import logging
|
||||
import threading
|
||||
import logging.handlers
|
||||
|
@ -931,6 +931,8 @@ def patch_python_logging_handlers():
|
||||
def __process_multiprocessing_logging_queue(opts, queue):
|
||||
import salt.utils
|
||||
salt.utils.appendproctitle('MultiprocessingLoggingQueue')
|
||||
from salt.utils.verify import check_user
|
||||
check_user(opts['user'])
|
||||
if salt.utils.is_windows():
|
||||
# On Windows, creating a new process doesn't fork (copy the parent
|
||||
# process image). Due to this, we need to setup all of our logging
|
||||
|
@ -44,22 +44,21 @@ def install(pkg, target='LocalSystem', store=False, allow_untrusted=False):
|
||||
'''
|
||||
Install a pkg file
|
||||
|
||||
Args:
|
||||
pkg (str): The package to install
|
||||
target (str): The target in which to install the package to
|
||||
store (bool): Should the package be installed as if it was from the
|
||||
store?
|
||||
allow_untrusted (bool): Allow the installation of untrusted packages?
|
||||
|
||||
Returns:
|
||||
dict: A dictionary containing the results of the installation
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' macpackage.install test.pkg
|
||||
|
||||
target
|
||||
The target in which to install the package to
|
||||
|
||||
store
|
||||
Should the package be installed as if it was from the store?
|
||||
|
||||
allow_untrusted
|
||||
Allow the installation of untrusted packages?
|
||||
|
||||
|
||||
'''
|
||||
pkg = _quote(pkg)
|
||||
target = _quote(target)
|
||||
@ -81,21 +80,21 @@ def install(pkg, target='LocalSystem', store=False, allow_untrusted=False):
|
||||
|
||||
def install_app(app, target='/Applications/'):
|
||||
'''
|
||||
Install an app file
|
||||
Install an app file by moving it into the specified Applications directory
|
||||
|
||||
Args:
|
||||
app (str): The location of the .app file
|
||||
target (str): The target in which to install the package to
|
||||
Default is ''/Applications/''
|
||||
|
||||
Returns:
|
||||
str: The results of the rsync command
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' macpackage.install_app /tmp/tmp.app /Applications/
|
||||
|
||||
app
|
||||
The location of the .app file
|
||||
|
||||
target
|
||||
The target in which to install the package to
|
||||
|
||||
|
||||
'''
|
||||
app = _quote(app)
|
||||
target = _quote(target)
|
||||
@ -117,18 +116,19 @@ def install_app(app, target='/Applications/'):
|
||||
|
||||
def uninstall_app(app):
|
||||
'''
|
||||
Uninstall an app file
|
||||
Uninstall an app file by removing it from the Applications directory
|
||||
|
||||
Args:
|
||||
app (str): The location of the .app file
|
||||
|
||||
Returns:
|
||||
bool: True if successful, otherwise False
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' macpackage.uninstall_app /Applications/app.app
|
||||
|
||||
app
|
||||
The location of the .app file
|
||||
|
||||
|
||||
'''
|
||||
|
||||
return __salt__['file.remove'](app)
|
||||
@ -139,8 +139,18 @@ def mount(dmg):
|
||||
Attempt to mount a dmg file to a temporary location and return the
|
||||
location of the pkg file inside
|
||||
|
||||
dmg
|
||||
The location of the dmg file to mount
|
||||
Args:
|
||||
dmg (str): The location of the dmg file to mount
|
||||
|
||||
Returns:
|
||||
tuple: Tuple containing the results of the command along with the mount
|
||||
point
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' macpackage.mount /tmp/software.dmg
|
||||
'''
|
||||
|
||||
temp_dir = __salt__['temp.dir'](prefix='dmg-')
|
||||
@ -154,8 +164,17 @@ def unmount(mountpoint):
|
||||
'''
|
||||
Attempt to unmount a dmg file from a temporary location
|
||||
|
||||
mountpoint
|
||||
The location of the mount point
|
||||
Args:
|
||||
mountpoint (str): The location of the mount point
|
||||
|
||||
Returns:
|
||||
str: The results of the hdutil detach command
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' macpackage.unmount /dev/disk2
|
||||
'''
|
||||
|
||||
cmd = 'hdiutil detach "{0}"'.format(mountpoint)
|
||||
@ -167,6 +186,14 @@ def installed_pkgs():
|
||||
'''
|
||||
Return the list of installed packages on the machine
|
||||
|
||||
Returns:
|
||||
list: List of installed packages
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' macpackage.installed_pkgs
|
||||
'''
|
||||
|
||||
cmd = 'pkgutil --pkgs'
|
||||
@ -176,12 +203,19 @@ def installed_pkgs():
|
||||
|
||||
def get_pkg_id(pkg):
|
||||
'''
|
||||
Attempt to get the package id from a .pkg file
|
||||
Attempt to get the package ID from a .pkg file
|
||||
|
||||
Returns all of the package ids if the pkg file contains multiple
|
||||
Args:
|
||||
pkg (str): The location of the pkg file
|
||||
|
||||
pkg
|
||||
The location of the pkg file
|
||||
Returns:
|
||||
list: List of all of the package IDs
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' macpackage.get_pkg_id /tmp/test.pkg
|
||||
'''
|
||||
pkg = _quote(pkg)
|
||||
package_ids = []
|
||||
@ -217,12 +251,19 @@ def get_pkg_id(pkg):
|
||||
|
||||
def get_mpkg_ids(mpkg):
|
||||
'''
|
||||
Attempt to get the package ids from a mounted .mpkg file
|
||||
Attempt to get the package IDs from a mounted .mpkg file
|
||||
|
||||
Returns all of the package ids if the pkg file contains multiple
|
||||
Args:
|
||||
mpkg (str): The location of the mounted mpkg file
|
||||
|
||||
pkg
|
||||
The location of the mounted mpkg file
|
||||
Returns:
|
||||
list: List of package IDs
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' macpackage.get_mpkg_ids /dev/disk2
|
||||
'''
|
||||
mpkg = _quote(mpkg)
|
||||
package_infos = []
|
||||
|
@ -185,6 +185,11 @@ def _extract_json(npm_output):
|
||||
lines = lines[1:]
|
||||
while lines and not lines[-1].startswith('}') and not lines[-1].startswith(']'):
|
||||
lines = lines[:-1]
|
||||
# Mac OSX with fsevents includes the following line in the return
|
||||
# when a new module is installed which is invalid JSON:
|
||||
# [fsevents] Success: "..."
|
||||
while lines and lines[0].startswith('[fsevents]'):
|
||||
lines = lines[1:]
|
||||
try:
|
||||
return json.loads(''.join(lines))
|
||||
except ValueError:
|
||||
|
@ -12,7 +12,6 @@ A module to manage software on Windows
|
||||
# Import python future libs
|
||||
from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
import errno
|
||||
import os
|
||||
import time
|
||||
#import locale
|
||||
@ -1404,6 +1403,9 @@ def get_repo_data(saltenv='base'):
|
||||
# return __context__['winrepo.data']
|
||||
(repo_remote, repocache_dir) = _get_repo_src_dest(saltenv)
|
||||
winrepo = 'winrepo.p'
|
||||
if not os.path.exists(os.path.join(repocache_dir, winrepo)):
|
||||
log.debug('No winrepo.p cache file. Refresh pkg db now.')
|
||||
refresh_db(saltenv=saltenv)
|
||||
try:
|
||||
with salt.utils.fopen(
|
||||
os.path.join(repocache_dir, winrepo), 'rb') as repofile:
|
||||
@ -1416,13 +1418,6 @@ def get_repo_data(saltenv='base'):
|
||||
except IOError as exc:
|
||||
log.error('Not able to read repo file')
|
||||
log.exception(exc)
|
||||
if exc.errno == errno.ENOENT:
|
||||
# File doesn't exist
|
||||
raise CommandExecutionError(
|
||||
'Windows repo cache doesn\'t exist, pkg.refresh_db likely '
|
||||
'needed'
|
||||
)
|
||||
|
||||
return {}
|
||||
|
||||
|
||||
|
@ -39,6 +39,114 @@ def envs(backend=None, sources=False):
|
||||
return fileserver.envs(back=backend, sources=sources)
|
||||
|
||||
|
||||
def clear_file_list_cache(saltenv=None, backend=None):
|
||||
'''
|
||||
.. versionadded:: Carbon
|
||||
|
||||
The Salt fileserver caches the files/directories/symlinks for each
|
||||
fileserver backend and environment as they are requested. This is done to
|
||||
help the fileserver scale better. Without this caching, when
|
||||
hundreds/thousands of minions simultaneously ask the master what files are
|
||||
available, this would cause the master's CPU load to spike as it obtains
|
||||
the same information separately for each minion.
|
||||
|
||||
saltenv
|
||||
By default, this runner will clear the file list caches for all
|
||||
environments. This argument allows for a list of environments to be
|
||||
passed, to clear more selectively. This list can be passed either as a
|
||||
comma-separated string, or a Python list.
|
||||
|
||||
backend
|
||||
Similar to the ``saltenv`` parameter, this argument will restrict the
|
||||
cache clearing to specific fileserver backends (the default behavior is
|
||||
to clear from all enabled fileserver backends). This list can be passed
|
||||
either as a comma-separated string, or a Python list.
|
||||
|
||||
.. note:
|
||||
The maximum age for the cached file lists (i.e. the age at which the
|
||||
cache will be disregarded and rebuilt) is defined by the
|
||||
:conf_master:`fileserver_list_cache_time` configuration parameter.
|
||||
|
||||
Since the ability to clear these caches is often required by users writing
|
||||
custom runners which add/remove files, this runner can easily be called
|
||||
from within a custom runner using any of the following examples:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# Clear all file list caches
|
||||
__salt__['fileserver.clear_file_list_cache']()
|
||||
# Clear just the 'base' saltenv file list caches
|
||||
__salt__['fileserver.clear_file_list_cache'](saltenv='base')
|
||||
# Clear just the 'base' saltenv file list caches from just the 'roots'
|
||||
# fileserver backend
|
||||
__salt__['fileserver.clear_file_list_cache'](saltenv='base', backend='roots')
|
||||
# Clear all file list caches from the 'roots' fileserver backend
|
||||
__salt__['fileserver.clear_file_list_cache'](backend='roots')
|
||||
|
||||
.. note::
|
||||
In runners, the ``__salt__`` dictionary will likely be renamed to
|
||||
``__runner__`` in a future Salt release to distinguish runner functions
|
||||
from remote execution functions. See `this GitHub issue`_ for
|
||||
discussion/updates on this.
|
||||
|
||||
.. _`this GitHub issue`: https://github.com/saltstack/salt/issues/34958
|
||||
|
||||
If using Salt's Python API (not a runner), the following examples are
|
||||
equivalent to the ones above:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import salt.config
|
||||
import salt.runner
|
||||
|
||||
opts = salt.config.master_config('/etc/salt/master')
|
||||
opts['fun'] = 'fileserver.clear_file_list_cache'
|
||||
|
||||
# Clear all file list_caches
|
||||
opts['arg'] = [] # No arguments
|
||||
runner = salt.runner.Runner(opts)
|
||||
cleared = runner.run()
|
||||
|
||||
# Clear just the 'base' saltenv file list caches
|
||||
opts['arg'] = ['base', None]
|
||||
runner = salt.runner.Runner(opts)
|
||||
cleared = runner.run()
|
||||
|
||||
# Clear just the 'base' saltenv file list caches from just the 'roots'
|
||||
# fileserver backend
|
||||
opts['arg'] = ['base', 'roots']
|
||||
runner = salt.runner.Runner(opts)
|
||||
cleared = runner.run()
|
||||
|
||||
# Clear all file list caches from the 'roots' fileserver backend
|
||||
opts['arg'] = [None, 'roots']
|
||||
runner = salt.runner.Runner(opts)
|
||||
cleared = runner.run()
|
||||
|
||||
|
||||
This function will return a dictionary showing a list of environments which
|
||||
were cleared for each backend. An empty return dictionary means that no
|
||||
changes were made.
|
||||
|
||||
CLI Examples:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Clear all file list caches
|
||||
salt-run fileserver.clear_file_list_cache
|
||||
# Clear just the 'base' saltenv file list caches
|
||||
salt-run fileserver.clear_file_list_cache saltenv=base
|
||||
# Clear just the 'base' saltenv file list caches from just the 'roots'
|
||||
# fileserver backend
|
||||
salt-run fileserver.clear_file_list_cache saltenv=base backend=roots
|
||||
# Clear all file list caches from the 'roots' fileserver backend
|
||||
salt-run fileserver.clear_file_list_cache backend=roots
|
||||
'''
|
||||
fileserver = salt.fileserver.Fileserver(__opts__)
|
||||
load = {'saltenv': saltenv, 'fsbackend': backend}
|
||||
return fileserver.clear_file_list_cache(load=load)
|
||||
|
||||
|
||||
def file_list(saltenv='base', backend=None):
|
||||
'''
|
||||
Return a list of files from the salt fileserver
|
||||
|
@ -495,7 +495,8 @@ def image_present(name,
|
||||
force=False,
|
||||
insecure_registry=False,
|
||||
client_timeout=CLIENT_TIMEOUT,
|
||||
dockerfile=None):
|
||||
dockerfile=None,
|
||||
**kwargs):
|
||||
'''
|
||||
Ensure that an image is present. The image can either be pulled from a
|
||||
Docker registry, built from a Dockerfile, or loaded from a saved image.
|
||||
|
@ -429,6 +429,10 @@ class ProcessManager(object):
|
||||
'''
|
||||
Kill all of the children
|
||||
'''
|
||||
# first lets reset signal handlers to default one to prevent running this twice
|
||||
signal.signal(signal.SIGTERM, signal.SIG_IGN)
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
|
||||
# check that this is the correct process, children inherit this
|
||||
# handler, if we are in a child lets just run the original handler
|
||||
if os.getpid() != self._pid:
|
||||
@ -460,7 +464,10 @@ class ProcessManager(object):
|
||||
log.trace('Terminating pid {0}: {1}'.format(pid, p_map['Process']))
|
||||
if args:
|
||||
# escalate the signal to the process
|
||||
os.kill(pid, args[0])
|
||||
try:
|
||||
os.kill(pid, args[0])
|
||||
except OSError:
|
||||
pass
|
||||
try:
|
||||
p_map['Process'].terminate()
|
||||
except OSError as exc:
|
||||
@ -676,6 +683,8 @@ class SignalHandlingMultiprocessingProcess(MultiprocessingProcess):
|
||||
signal.signal(signal.SIGTERM, self._handle_signals)
|
||||
|
||||
def _handle_signals(self, signum, sigframe):
|
||||
signal.signal(signal.SIGTERM, signal.SIG_IGN)
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
msg = '{0} received a '.format(self.__class__.__name__)
|
||||
if signum == signal.SIGINT:
|
||||
msg += 'SIGINT'
|
||||
|
@ -7,6 +7,7 @@ integration tests for mac_service
|
||||
from __future__ import absolute_import, print_function
|
||||
|
||||
# Import Salt Testing libs
|
||||
from salttesting import skipIf
|
||||
from salttesting.helpers import ensure_in_syspath, destructiveTest
|
||||
ensure_in_syspath('../../')
|
||||
|
||||
@ -15,6 +16,11 @@ import integration
|
||||
import salt.utils
|
||||
|
||||
|
||||
@skipIf(not salt.utils.is_darwin(), 'Test only available on Mac OS X')
|
||||
@skipIf(not salt.utils.which('launchctl'), 'Test requires launchctl binary')
|
||||
@skipIf(not salt.utils.which('plutil'), 'Test requires plutil binary')
|
||||
@skipIf(salt.utils.get_uid(salt.utils.get_user()) != 0,
|
||||
'Test requires root')
|
||||
class MacServiceModuleTest(integration.ModuleCase):
|
||||
'''
|
||||
Validate the mac_service module
|
||||
@ -24,26 +30,14 @@ class MacServiceModuleTest(integration.ModuleCase):
|
||||
|
||||
def setUp(self):
|
||||
'''
|
||||
Get current settings
|
||||
Get current state of the test service
|
||||
'''
|
||||
if not salt.utils.is_darwin():
|
||||
self.skipTest('Test only available on Mac OS X')
|
||||
|
||||
if not salt.utils.which('launchctl'):
|
||||
self.skipTest('Test requires launchctl binary')
|
||||
|
||||
if not salt.utils.which('plutil'):
|
||||
self.skipTest('Test requires plutil binary')
|
||||
|
||||
if salt.utils.get_uid(salt.utils.get_user()) != 0:
|
||||
self.skipTest('Test requires root')
|
||||
|
||||
self.SERVICE_ENABLED = self.run_function('service.enabled',
|
||||
[self.SERVICE_NAME])
|
||||
|
||||
def tearDown(self):
|
||||
'''
|
||||
Reset to original settings
|
||||
Reset the test service to the original state
|
||||
'''
|
||||
if self.SERVICE_ENABLED:
|
||||
self.run_function('service.start', [self.SERVICE_NAME])
|
||||
@ -70,17 +64,18 @@ class MacServiceModuleTest(integration.ModuleCase):
|
||||
'''
|
||||
# Expected Functionality
|
||||
self.assertTrue(
|
||||
self.run_function('service.launchctl', ['error', 'bootstrap', 64]))
|
||||
self.run_function('service.launchctl',
|
||||
['error', 'bootstrap', '64']))
|
||||
self.assertEqual(
|
||||
self.run_function('service.launchctl',
|
||||
['error', 'bootstrap', 64],
|
||||
['error', 'bootstrap', '64'],
|
||||
return_stdout=True),
|
||||
'64: unknown error code')
|
||||
|
||||
# Raise an error
|
||||
self.assertIn(
|
||||
' Failed to error service',
|
||||
self.run_function('service.launchctl', ['error']))
|
||||
'Failed to error service',
|
||||
self.run_function('service.launchctl', ['error', 'bootstrap']))
|
||||
|
||||
def test_list(self):
|
||||
'''
|
||||
|
@ -4,6 +4,7 @@ Tests for the fileserver runner
|
||||
'''
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import contextlib
|
||||
|
||||
# Import Salt Testing libs
|
||||
from salttesting.helpers import ensure_in_syspath
|
||||
@ -13,7 +14,7 @@ ensure_in_syspath('../../')
|
||||
import integration
|
||||
|
||||
|
||||
class ManageTest(integration.ShellCase):
|
||||
class FileserverTest(integration.ShellCase):
|
||||
'''
|
||||
Test the fileserver runner
|
||||
'''
|
||||
@ -23,18 +24,17 @@ class ManageTest(integration.ShellCase):
|
||||
'''
|
||||
ret = self.run_run_plus(fun='fileserver.dir_list')
|
||||
self.assertIsInstance(ret['return'], list)
|
||||
self.assertTrue('_modules' in ret['return'])
|
||||
|
||||
# Backend submitted as a string
|
||||
ret = self.run_run_plus(
|
||||
fun='fileserver.dir_list',
|
||||
backend='roots')
|
||||
ret = self.run_run_plus(fun='fileserver.dir_list', backend='roots')
|
||||
self.assertIsInstance(ret['return'], list)
|
||||
self.assertTrue('_modules' in ret['return'])
|
||||
|
||||
# Backend submitted as a list
|
||||
ret = self.run_run_plus(
|
||||
fun='fileserver.dir_list',
|
||||
backend=['roots'])
|
||||
ret = self.run_run_plus(fun='fileserver.dir_list', backend=['roots'])
|
||||
self.assertIsInstance(ret['return'], list)
|
||||
self.assertTrue('_modules' in ret['return'])
|
||||
|
||||
def test_empty_dir_list(self):
|
||||
'''
|
||||
@ -42,18 +42,21 @@ class ManageTest(integration.ShellCase):
|
||||
'''
|
||||
ret = self.run_run_plus(fun='fileserver.empty_dir_list')
|
||||
self.assertIsInstance(ret['return'], list)
|
||||
self.assertEqual(ret['return'], [])
|
||||
|
||||
# Backend submitted as a string
|
||||
ret = self.run_run_plus(
|
||||
fun='fileserver.empty_dir_list',
|
||||
backend='roots')
|
||||
self.assertIsInstance(ret['return'], list)
|
||||
self.assertEqual(ret['return'], [])
|
||||
|
||||
# Backend submitted as a list
|
||||
ret = self.run_run_plus(
|
||||
fun='fileserver.empty_dir_list',
|
||||
backend=['roots'])
|
||||
self.assertIsInstance(ret['return'], list)
|
||||
self.assertEqual(ret['return'], [])
|
||||
|
||||
def test_envs(self):
|
||||
'''
|
||||
@ -70,20 +73,87 @@ class ManageTest(integration.ShellCase):
|
||||
ret = self.run_run_plus(fun='fileserver.envs', backend=['roots'])
|
||||
self.assertIsInstance(ret['return'], list)
|
||||
|
||||
def test_clear_file_list_cache(self):
|
||||
'''
|
||||
fileserver.clear_file_list_cache
|
||||
|
||||
If this test fails, then something may have changed in the test suite
|
||||
and we may have more than just "roots" configured in the fileserver
|
||||
backends. This assert will need to be updated accordingly.
|
||||
'''
|
||||
@contextlib.contextmanager
|
||||
def gen_cache():
|
||||
'''
|
||||
Create file_list cache so we have something to clear
|
||||
'''
|
||||
self.run_run_plus(fun='fileserver.file_list')
|
||||
yield
|
||||
|
||||
# Test with no arguments
|
||||
with gen_cache():
|
||||
ret = self.run_run_plus(fun='fileserver.clear_file_list_cache')
|
||||
self.assertEqual(ret['return'], {'roots': ['base']})
|
||||
|
||||
# Test with backend passed as string
|
||||
with gen_cache():
|
||||
ret = self.run_run_plus(fun='fileserver.clear_file_list_cache',
|
||||
backend='roots')
|
||||
self.assertEqual(ret['return'], {'roots': ['base']})
|
||||
|
||||
# Test with backend passed as list
|
||||
with gen_cache():
|
||||
ret = self.run_run_plus(fun='fileserver.clear_file_list_cache',
|
||||
backend=['roots'])
|
||||
self.assertEqual(ret['return'], {'roots': ['base']})
|
||||
|
||||
# Test with backend passed as string, but with a nonsense backend
|
||||
with gen_cache():
|
||||
ret = self.run_run_plus(fun='fileserver.clear_file_list_cache',
|
||||
backend='notarealbackend')
|
||||
self.assertEqual(ret['return'], {})
|
||||
|
||||
# Test with saltenv passed as string
|
||||
with gen_cache():
|
||||
ret = self.run_run_plus(fun='fileserver.clear_file_list_cache',
|
||||
saltenv='base')
|
||||
self.assertEqual(ret['return'], {'roots': ['base']})
|
||||
|
||||
# Test with saltenv passed as list
|
||||
with gen_cache():
|
||||
ret = self.run_run_plus(fun='fileserver.clear_file_list_cache',
|
||||
saltenv=['base'])
|
||||
self.assertEqual(ret['return'], {'roots': ['base']})
|
||||
|
||||
# Test with saltenv passed as string, but with a nonsense saltenv
|
||||
with gen_cache():
|
||||
ret = self.run_run_plus(fun='fileserver.clear_file_list_cache',
|
||||
saltenv='notarealsaltenv')
|
||||
self.assertEqual(ret['return'], {})
|
||||
|
||||
# Test with both backend and saltenv passed
|
||||
with gen_cache():
|
||||
ret = self.run_run_plus(fun='fileserver.clear_file_list_cache',
|
||||
backend='roots',
|
||||
saltenv='base')
|
||||
self.assertEqual(ret['return'], {'roots': ['base']})
|
||||
|
||||
def test_file_list(self):
|
||||
'''
|
||||
fileserver.file_list
|
||||
'''
|
||||
ret = self.run_run_plus(fun='fileserver.file_list')
|
||||
self.assertIsInstance(ret['return'], list)
|
||||
self.assertTrue('grail/scene33' in ret['return'])
|
||||
|
||||
# Backend submitted as a string
|
||||
ret = self.run_run_plus(fun='fileserver.file_list', backend='roots')
|
||||
self.assertIsInstance(ret['return'], list)
|
||||
self.assertTrue('grail/scene33' in ret['return'])
|
||||
|
||||
# Backend submitted as a list
|
||||
ret = self.run_run_plus(fun='fileserver.file_list', backend=['roots'])
|
||||
self.assertIsInstance(ret['return'], list)
|
||||
self.assertTrue('grail/scene33' in ret['return'])
|
||||
|
||||
def test_symlink_list(self):
|
||||
'''
|
||||
@ -91,14 +161,17 @@ class ManageTest(integration.ShellCase):
|
||||
'''
|
||||
ret = self.run_run_plus(fun='fileserver.symlink_list')
|
||||
self.assertIsInstance(ret['return'], dict)
|
||||
self.assertTrue('dest_sym' in ret['return'])
|
||||
|
||||
# Backend submitted as a string
|
||||
ret = self.run_run_plus(fun='fileserver.symlink_list', backend='roots')
|
||||
self.assertIsInstance(ret['return'], dict)
|
||||
self.assertTrue('dest_sym' in ret['return'])
|
||||
|
||||
# Backend submitted as a list
|
||||
ret = self.run_run_plus(fun='fileserver.symlink_list', backend=['roots'])
|
||||
self.assertIsInstance(ret['return'], dict)
|
||||
self.assertTrue('dest_sym' in ret['return'])
|
||||
|
||||
def test_update(self):
|
||||
'''
|
||||
@ -117,4 +190,4 @@ class ManageTest(integration.ShellCase):
|
||||
|
||||
if __name__ == '__main__':
|
||||
from integration import run_tests
|
||||
run_tests(ManageTest)
|
||||
run_tests(FileserverTest)
|
||||
|
@ -5,13 +5,15 @@ Tests for the archive state
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
import os
|
||||
import platform
|
||||
import socket
|
||||
import shutil
|
||||
import threading
|
||||
import tornado.ioloop
|
||||
import tornado.web
|
||||
|
||||
# Import Salt Testing libs
|
||||
from salttesting import TestCase
|
||||
from salttesting import skipIf
|
||||
from salttesting.helpers import ensure_in_syspath
|
||||
ensure_in_syspath('../../')
|
||||
|
||||
@ -30,13 +32,17 @@ ARCHIVE_TAR_SOURCE = 'http://localhost:{0}/custom.tar.gz'.format(PORT)
|
||||
UNTAR_FILE = ARCHIVE_DIR + 'custom/README'
|
||||
ARCHIVE_TAR_HASH = 'md5=7643861ac07c30fe7d2310e9f25ca514'
|
||||
STATE_DIR = os.path.join(integration.FILES, 'file', 'base')
|
||||
if '7' in platform.dist()[1]:
|
||||
REDHAT7 = True
|
||||
else:
|
||||
REDHAT7 = False
|
||||
|
||||
|
||||
class SetupWebServer(TestCase):
|
||||
@skipIf(not REDHAT7, 'Only run on redhat7 for now due to hanging issues on other OS')
|
||||
class ArchiveTest(integration.ModuleCase,
|
||||
integration.SaltReturnAssertsMixIn):
|
||||
'''
|
||||
Setup and Teardown of Web Server
|
||||
Only need to set this up once not
|
||||
before all tests
|
||||
Validate the archive state
|
||||
'''
|
||||
@classmethod
|
||||
def webserver(cls):
|
||||
@ -51,22 +57,26 @@ class SetupWebServer(TestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
'''
|
||||
start tornado app on thread
|
||||
and wait till its running
|
||||
'''
|
||||
cls.server_thread = threading.Thread(target=cls.webserver)
|
||||
cls.server_thread.daemon = True
|
||||
cls.server_thread.start()
|
||||
# check if tornado app is up
|
||||
port_closed = True
|
||||
while port_closed:
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
result = sock.connect_ex(('127.0.0.1', PORT))
|
||||
if result == 0:
|
||||
port_closed = False
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
tornado.ioloop.IOLoop.instance().stop()
|
||||
cls.server_thread.join()
|
||||
|
||||
|
||||
class ArchiveTest(SetupWebServer,
|
||||
integration.ModuleCase,
|
||||
integration.SaltReturnAssertsMixIn):
|
||||
'''
|
||||
Validate the archive state
|
||||
'''
|
||||
def _check_ext_remove(self, dir, file):
|
||||
'''
|
||||
function to check if file was extracted
|
||||
|
Loading…
Reference in New Issue
Block a user