Merge branch 'develop' into improvement-spm-base_paths

This commit is contained in:
plastikos 2018-05-17 23:09:05 -06:00 committed by GitHub
commit 2ddd589f68
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
72 changed files with 10965 additions and 3163 deletions

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-API" "1" "Feb 23, 2018" "2018.3.0" "Salt"
.TH "SALT-API" "1" "May 09, 2018" "2018.3.1" "Salt"
.SH NAME
salt-api \- salt-api Command
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-CALL" "1" "Feb 23, 2018" "2018.3.0" "Salt"
.TH "SALT-CALL" "1" "May 09, 2018" "2018.3.1" "Salt"
.SH NAME
salt-call \- salt-call Documentation
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-CLOUD" "1" "Feb 23, 2018" "2018.3.0" "Salt"
.TH "SALT-CLOUD" "1" "May 09, 2018" "2018.3.1" "Salt"
.SH NAME
salt-cloud \- Salt Cloud Command
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-CP" "1" "Feb 23, 2018" "2018.3.0" "Salt"
.TH "SALT-CP" "1" "May 09, 2018" "2018.3.1" "Salt"
.SH NAME
salt-cp \- salt-cp Documentation
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-KEY" "1" "Feb 23, 2018" "2018.3.0" "Salt"
.TH "SALT-KEY" "1" "May 09, 2018" "2018.3.1" "Salt"
.SH NAME
salt-key \- salt-key Documentation
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-MASTER" "1" "Feb 23, 2018" "2018.3.0" "Salt"
.TH "SALT-MASTER" "1" "May 09, 2018" "2018.3.1" "Salt"
.SH NAME
salt-master \- salt-master Documentation
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-MINION" "1" "Feb 23, 2018" "2018.3.0" "Salt"
.TH "SALT-MINION" "1" "May 09, 2018" "2018.3.1" "Salt"
.SH NAME
salt-minion \- salt-minion Documentation
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-PROXY" "1" "Feb 23, 2018" "2018.3.0" "Salt"
.TH "SALT-PROXY" "1" "May 09, 2018" "2018.3.1" "Salt"
.SH NAME
salt-proxy \- salt-proxy Documentation
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-RUN" "1" "Feb 23, 2018" "2018.3.0" "Salt"
.TH "SALT-RUN" "1" "May 09, 2018" "2018.3.1" "Salt"
.SH NAME
salt-run \- salt-run Documentation
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-SSH" "1" "Feb 23, 2018" "2018.3.0" "Salt"
.TH "SALT-SSH" "1" "May 09, 2018" "2018.3.1" "Salt"
.SH NAME
salt-ssh \- salt-ssh Documentation
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-SYNDIC" "1" "Feb 23, 2018" "2018.3.0" "Salt"
.TH "SALT-SYNDIC" "1" "May 09, 2018" "2018.3.1" "Salt"
.SH NAME
salt-syndic \- salt-syndic Documentation
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-UNITY" "1" "Feb 23, 2018" "2018.3.0" "Salt"
.TH "SALT-UNITY" "1" "May 09, 2018" "2018.3.1" "Salt"
.SH NAME
salt-unity \- salt-unity Command
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT" "1" "Feb 23, 2018" "2018.3.0" "Salt"
.TH "SALT" "1" "May 09, 2018" "2018.3.1" "Salt"
.SH NAME
salt \- salt
.

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SPM" "1" "Feb 23, 2018" "2018.3.0" "Salt"
.TH "SPM" "1" "May 09, 2018" "2018.3.1" "Salt"
.SH NAME
spm \- Salt Package Manager Command
.

View File

@ -125,7 +125,6 @@ state modules
influxdb_database
influxdb_retention_policy
influxdb_user
infoblox
infoblox_a
infoblox_cname
infoblox_host_record

View File

@ -1,5 +0,0 @@
salt.states.infoblox module
===========================
.. automodule:: salt.states.infoblox
:members:

View File

@ -410,10 +410,11 @@ exactly like the ``require`` requisite (the watching state will execute if
service.running:
- watch_any:
- file: /etc/apache2/sites-available/site1.conf
- file: /etc/apache2/sites-available/site2.conf
- file: apache2-site2
file.managed:
- name: /etc/apache2/sites-available/site1.conf
- source: salt://apache2/files/site1.conf
apache2-site2:
file.managed:
- name: /etc/apache2/sites-available/site2.conf
- source: salt://apache2/files/site2.conf

View File

@ -351,6 +351,7 @@ This driver can be configured using the ``/etc/openstack/clouds.yml`` file with
`os-client-config <https://docs.openstack.org/os-client-config/latest/>`
.. code-block:: yaml
myopenstack:
driver: openstack
region_name: RegionOne
@ -359,6 +360,7 @@ This driver can be configured using the ``/etc/openstack/clouds.yml`` file with
Or by just configuring the same auth block directly in the cloud provider config.
.. code-block:: yaml
myopenstack:
driver: openstack
region_name: RegionOne

View File

@ -699,15 +699,24 @@ repository to be served up from the Salt fileserver path
Mountpoints can also be configured on a :ref:`per-remote basis
<gitfs-per-remote-config>`.
Using gitfs in Masterless Mode
==============================
Since 2014.7.0, gitfs can be used in masterless mode. To do so, simply add the
gitfs configuration parameters (and set :conf_master:`fileserver_backend`) in
the _minion_ config file instead of the master config file.
Using gitfs Alongside Other Backends
====================================
Sometimes it may make sense to use multiple backends; for instance, if ``sls``
files are stored in git but larger files are stored directly on the master.
The cascading lookup logic used for multiple remotes is also used with
multiple backends. If the ``fileserver_backend`` option contains
multiple backends:
The cascading lookup logic used for multiple remotes is also used with multiple
backends. If the :conf_master:`fileserver_backend` option contains multiple
backends:
.. code-block:: yaml
@ -719,7 +728,6 @@ Then the ``roots`` backend (the default backend of files in ``/srv/salt``) will
be searched first for the requested file; then, if it is not found on the
master, each configured git remote will be searched.
Branches, Environments, and Top Files
=====================================

View File

@ -75,7 +75,7 @@ set -l salt_programs_select salt salt-cp
for program in $salt_programs_select
complete -c $program -f -s G -l grain -d "Instead of using shell globs to evaluate the target use a grain value to identify targets, the syntax for the target is the grain key followed by a globexpression: \"os:Arch*\""
complete -c $program -f -l grain-pcre -d "Instead of using shell globs to evaluate the target use a grain value to identify targets, the syntax for the target is the grain key followed by a pcre regular expression: \"os:Arch.*\""
complete -c $program -f -s L -l list -d "Instead of using shell globs to evaluate the target servers, take a comma or space delimited list of servers."
complete -c $program -f -s L -l list -d "Instead of using shell globs to evaluate the target servers, take a comma or whitespace delimited list of servers."
complete -c $program -f -s N -l nodegroup -d "Instead of using shell globs to evaluate the target use one of the predefined nodegroups to identify a list of targets."
complete -c $program -f -s E -l pcre -d "Instead of using shell globs to evaluate the target servers, use pcre regular expressions"
complete -c $program -f -s R -l range -d "Instead of using shell globs to evaluate the target use a range expression to identify targets. Range expressions look like %cluster"

View File

@ -119,7 +119,7 @@ _target_opt_pat=(
_target_options=(
"$_target_opt_pat[2]"{-E,--pcre}'[use pcre regular expressions]:pcre:'
"$_target_opt_pat[2]"{-L,--list}'[take a comma or space delimited list of servers.]:list:'
"$_target_opt_pat[2]"{-L,--list}'[take a comma or whitespace delimited list of servers.]:list:'
"$_target_opt_pat[2]"{-G,--grain}'[use a grain value to identify targets]:Grains:'
"$_target_opt_pat[2]--grain-pcre[use a grain value to identify targets.]:pcre:"
"$_target_opt_pat[2]"{-N,--nodegroup}'[use one of the predefined nodegroups to identify a list of targets.]:Nodegroup:'

View File

@ -1916,7 +1916,8 @@ class Map(Cloud):
pmap = self.map_providers_parallel(cached=cached)
exist = set()
defined = set()
for profile_name, nodes in six.iteritems(self.rendered_map):
rendered_map = copy.deepcopy(self.rendered_map)
for profile_name, nodes in six.iteritems(rendered_map):
if profile_name not in self.opts['profiles']:
msg = (
'The required profile, \'{0}\', defined in the map '
@ -1934,21 +1935,23 @@ class Map(Cloud):
profile_data = self.opts['profiles'].get(profile_name)
# Get associated provider data, in case something like size
# or image is specified in the provider file. See issue #32510.
alias, driver = profile_data.get('provider').split(':')
provider_details = self.opts['providers'][alias][driver].copy()
del provider_details['profiles']
# Update the provider details information with profile data
# Profile data should override provider data, if defined.
# This keeps map file data definitions consistent with -p usage.
provider_details.update(profile_data)
profile_data = provider_details
for nodename, overrides in six.iteritems(nodes):
# Get the VM name
nodedata = copy.deepcopy(profile_data)
# Get associated provider data, in case something like size
# or image is specified in the provider file. See issue #32510.
if 'provider' in overrides and overrides['provider'] != profile_data['provider']:
alias, driver = overrides.get('provider').split(':')
else:
alias, driver = profile_data.get('provider').split(':')
provider_details = copy.deepcopy(self.opts['providers'][alias][driver])
del provider_details['profiles']
# Update the provider details information with profile data
# Profile data and node overrides should override provider data, if defined.
# This keeps map file data definitions consistent with -p usage.
salt.utils.dictupdate.update(provider_details, profile_data)
nodedata = copy.deepcopy(provider_details)
# Update profile data with the map overrides
for setting in ('grains', 'master', 'minion', 'volumes',
'requires'):

View File

@ -47,6 +47,7 @@ from salt.ext.six.moves.urllib.parse import urlparse, urlunparse
# pylint: enable=no-name-in-module,import-error
log = logging.getLogger(__name__)
MAX_FILENAME_LENGTH = 255
def get_file_client(opts, pillar=False):
@ -832,6 +833,9 @@ class Client(object):
else:
file_name = url_data.path
if len(file_name) > MAX_FILENAME_LENGTH:
file_name = salt.utils.hashutils.sha256_digest(file_name)
return salt.utils.path.join(
cachedir,
'extrn_files',

View File

@ -5,7 +5,12 @@
.. versionadded:: 0.17.0
This module provides a `Sentry`_ logging handler.
This module provides a `Sentry`_ logging handler. Sentry is an open source
error tracking platform that provides deep context about exceptions that
happen in production. Details about stack traces along with the context
variables available at the time of the exeption are easily browsable and
filterable from the online interface. For more details please see
`Sentry`_.
.. admonition:: Note
@ -41,6 +46,11 @@
- cpuarch
- ec2.tags.environment
.. admonition:: Note
The ``public_key`` and ``secret_key`` variables are not supported with
Sentry > 3.0. The `DSN`_ key should be used instead.
All the client configuration keys are supported, please see the
`Raven client documentation`_.

View File

@ -551,6 +551,10 @@ def associate_vpc_with_hosted_zone(HostedZoneId=None, Name=None, VPCId=None,
r = conn.associate_vpc_with_hosted_zone(**args)
return _wait_for_sync(r['ChangeInfo']['Id'], conn)
except ClientError as e:
if e.response.get('Error', {}).get('Code') == 'ConflictingDomainExists':
log.debug('VPC Association already exists.')
# return True since the current state is the desired one
return True
if tries and e.response.get('Error', {}).get('Code') == 'Throttling':
log.debug('Throttled by AWS API.')
time.sleep(3)

View File

@ -513,10 +513,18 @@ def _run(cmd,
for k, v in six.iteritems(env_runas)
)
env_runas.update(env)
# Fix platforms like Solaris that don't set a USER env var in the
# user's default environment as obtained above.
if env_runas.get('USER') != runas:
env_runas['USER'] = runas
# Fix some corner cases where shelling out to get the user's
# environment returns the wrong home directory.
runas_home = os.path.expanduser('~{0}'.format(runas))
if env_runas.get('HOME') != runas_home:
env_runas['HOME'] = runas_home
env = env_runas
except ValueError as exc:
log.exception('Error raised retrieving environment for user %s', runas)

View File

@ -2082,6 +2082,16 @@ def port(name, private_port=None):
name
Container name or ID
.. versionchanged:: Fluorine
This value can now be a pattern expression (using the
pattern-matching characters defined in fnmatch_). If a pattern
expression is used, this function will return a dictionary mapping
container names which match the pattern to the mappings for those
containers. When no pattern expression is used, a dictionary of the
mappings for the specified container name will be returned.
.. _fnmatch: https://docs.python.org/2/library/fnmatch.html
private_port : None
If specified, get information for that specific port. Can be specified
either as a port number (i.e. ``5000``), or as a port number plus the
@ -2104,12 +2114,10 @@ def port(name, private_port=None):
salt myminion docker.port mycontainer 5000
salt myminion docker.port mycontainer 5000/udp
'''
# docker.client.Client.port() doesn't do what we need, so just inspect the
# container and get the information from there. It's what they're already
# doing (poorly) anyway.
mappings = inspect_container(name).get('NetworkSettings', {}).get('Ports', {})
if not mappings:
return {}
pattern_used = bool(re.search(r'[*?\[]', name))
names = fnmatch.filter(list_containers(all=True), name) \
if pattern_used \
else [name]
if private_port is None:
pattern = '*'
@ -2132,7 +2140,17 @@ def port(name, private_port=None):
except AttributeError:
raise SaltInvocationError(err)
return dict((x, mappings[x]) for x in fnmatch.filter(mappings, pattern))
ret = {}
for c_name in names:
# docker.client.Client.port() doesn't do what we need, so just inspect
# the container and get the information from there. It's what they're
# already doing (poorly) anyway.
mappings = inspect_container(c_name).get(
'NetworkSettings', {}).get('Ports', {})
ret[c_name] = dict((x, mappings[x])
for x in fnmatch.filter(mappings, pattern))
return ret.get(name, {}) if not pattern_used else ret
def ps_(filters=None, **kwargs):
@ -3210,6 +3228,7 @@ def run_container(image,
CLI Examples:
.. code-block:: bash
salt myminion docker.run_container myuser/myimage command=/usr/local/bin/myscript.sh
# Run container in the background
salt myminion docker.run_container myuser/myimage command=/usr/local/bin/myscript.sh bg=True

View File

@ -9,10 +9,12 @@ Glance module for interacting with OpenStack Glance
Example configuration
.. code-block:: yaml
glance:
cloud: default
.. code-block:: yaml
glance:
auth:
username: admin

View File

@ -9,10 +9,12 @@ Keystone module for interacting with OpenStack Keystone
Example configuration
.. code-block:: yaml
keystone:
cloud: default
.. code-block:: yaml
keystone:
auth:
username: admin

View File

@ -9,10 +9,12 @@ Neutron module for interacting with OpenStack Neutron
Example configuration
.. code-block:: yaml
neutron:
cloud: default
.. code-block:: yaml
neutron:
auth:
username: admin

View File

@ -460,13 +460,10 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
Path to requirements
bin_env
Path to pip bin or path to virtualenv. If doing a system install,
and want to use a specific pip bin (pip-2.7, pip-2.6, etc..) just
specify the pip bin you want.
.. note::
If installing into a virtualenv, just use the path to the
virtualenv (e.g. ``/home/code/path/to/virtualenv/``)
Path to pip (or to a virtualenv). This can be used to specify the path
to the pip to use when more than one Python release is installed (e.g.
``/usr/bin/pip-2.7`` or ``/usr/bin/pip-2.6``. If a directory path is
specified, it is assumed to be a virtualenv.
use_wheel
Prefer wheel archives (requires pip>=1.4)
@ -569,7 +566,7 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
The user under which to run pip
cwd
Current working directory to run pip from
Directory from which to run pip
pre_releases
Include pre-releases in the available versions
@ -941,36 +938,38 @@ def uninstall(pkgs=None,
saltenv='base',
use_vt=False):
'''
Uninstall packages with pip
Uninstall packages individually or from a pip requirements file. Uninstall
packages globally or from a virtualenv.
Uninstall packages individually or from a pip requirements file
pkgs
comma separated list of packages to install
requirements
path to requirements.
Path to requirements file
bin_env
path to pip bin or path to virtualenv. If doing an uninstall from
the system python and want to use a specific pip bin (pip-2.7,
pip-2.6, etc..) just specify the pip bin you want.
If uninstalling from a virtualenv, just use the path to the virtualenv
(/home/code/path/to/virtualenv/)
Path to pip (or to a virtualenv). This can be used to specify the path
to the pip to use when more than one Python release is installed (e.g.
``/usr/bin/pip-2.7`` or ``/usr/bin/pip-2.6``. If a directory path is
specified, it is assumed to be a virtualenv.
log
Log file where a complete (maximum verbosity) record will be kept
proxy
Specify a proxy in the form
user:passwd@proxy.server:port. Note that the
user:password@ is optional and required only if you
are behind an authenticated proxy. If you provide
user@proxy.server:port then you will be prompted for a
password.
Specify a proxy in the format ``user:passwd@proxy.server:port``. Note
that the ``user:password@`` is optional and required only if you are
behind an authenticated proxy. If you provide
``user@proxy.server:port`` then you will be prompted for a password.
timeout
Set the socket timeout (default 15 seconds)
user
The user under which to run pip
cwd
Current working directory to run pip from
Directory from which to run pip
use_vt
Use VT terminal emulation (see output while installing)
@ -982,7 +981,6 @@ def uninstall(pkgs=None,
salt '*' pip.uninstall requirements=/path/to/requirements.txt
salt '*' pip.uninstall <package name> bin_env=/path/to/virtualenv
salt '*' pip.uninstall <package name> bin_env=/path/to/pip_bin
'''
cmd = _get_pip_bin(bin_env)
cmd.extend(['uninstall', '-y'])
@ -1065,32 +1063,27 @@ def freeze(bin_env=None,
virtualenv
bin_env
path to pip bin or path to virtualenv. If doing an uninstall from
the system python and want to use a specific pip bin (pip-2.7,
pip-2.6, etc..) just specify the pip bin you want.
If uninstalling from a virtualenv, just use the path to the virtualenv
(/home/code/path/to/virtualenv/)
Path to pip (or to a virtualenv). This can be used to specify the path
to the pip to use when more than one Python release is installed (e.g.
``/usr/bin/pip-2.7`` or ``/usr/bin/pip-2.6``. If a directory path is
specified, it is assumed to be a virtualenv.
user
The user under which to run pip
cwd
Current working directory to run pip from
Directory from which to run pip
.. note::
If the version of pip available is older than 8.0.3, the list will not
include the packages pip, wheel, setuptools, or distribute even if they
are installed.
include the packages ``pip``, ``wheel``, ``setuptools``, or
``distribute`` even if they are installed.
CLI Example:
.. code-block:: bash
salt '*' pip.freeze /home/code/path/to/virtualenv/
.. versionchanged:: 2016.11.2
The packages pip, wheel, setuptools, and distribute are included if the
installed pip is new enough.
salt '*' pip.freeze bin_env=/home/code/path/to/virtualenv
'''
cmd = _get_pip_bin(bin_env)
cmd.append('freeze')
@ -1135,21 +1128,16 @@ def list_(prefix=None,
.. note::
If the version of pip available is older than 8.0.3, the packages
wheel, setuptools, and distribute will not be reported by this function
even if they are installed. Unlike
:py:func:`pip.freeze <salt.modules.pip.freeze>`, this function always
reports the version of pip which is installed.
``wheel``, ``setuptools``, and ``distribute`` will not be reported by
this function even if they are installed. Unlike :py:func:`pip.freeze
<salt.modules.pip.freeze>`, this function always reports the version of
pip which is installed.
CLI Example:
.. code-block:: bash
salt '*' pip.list salt
.. versionchanged:: 2016.11.2
The packages wheel, setuptools, and distribute are included if the
installed pip is new enough.
'''
packages = {}
@ -1458,9 +1446,10 @@ def list_all_versions(pkg,
The package to check
bin_env
Path to pip bin or path to virtualenv. If doing a system install,
and want to use a specific pip bin (pip-2.7, pip-2.6, etc..) just
specify the pip bin you want.
Path to pip (or to a virtualenv). This can be used to specify the path
to the pip to use when more than one Python release is installed (e.g.
``/usr/bin/pip-2.7`` or ``/usr/bin/pip-2.6``. If a directory path is
specified, it is assumed to be a virtualenv.
include_alpha
Include alpha versions in the list
@ -1475,7 +1464,7 @@ def list_all_versions(pkg,
The user under which to run pip
cwd
Current working directory to run pip from
Directory from which to run pip
index_url
Base URL of Python Package Index

View File

@ -989,7 +989,7 @@ def diskusage(*args):
elif __grains__['kernel'] in ('FreeBSD', 'SunOS'):
ifile = __salt__['cmd.run']('mount -p').splitlines()
else:
ifile = []
raise CommandExecutionError('status.diskusage not yet supported on this platform')
for line in ifile:
comps = line.split()

View File

@ -331,6 +331,7 @@ def version(*names, **kwargs):
dict: The package name(s) with the installed versions.
.. code-block:: cfg
{['<version>', '<version>', ]} OR
{'<package name>': ['<version>', '<version>', ]}

View File

@ -236,16 +236,7 @@ def get_grains():
'''
Retrieve facts from the network device.
'''
refresh_needed = False
refresh_needed = refresh_needed or (not DETAILS.get('grains_cache', {}))
refresh_needed = refresh_needed or (not DETAILS.get('grains_cache', {}).get('result', False))
refresh_needed = refresh_needed or (not DETAILS.get('grains_cache', {}).get('out', {}))
if refresh_needed:
facts = call('get_facts', **{})
DETAILS['grains_cache'] = facts
return DETAILS.get('grains_cache', {})
return call('get_facts', **{})
def grains_refresh():

View File

@ -11,6 +11,7 @@
from __future__ import absolute_import, print_function, unicode_literals
import datetime
import logging
import yaml
from yaml.constructor import ConstructorError
@ -22,6 +23,8 @@ from salt.utils.odict import OrderedDict
__all__ = ['deserialize', 'serialize', 'available']
log = logging.getLogger(__name__)
available = True
# prefer C bindings over python when available
@ -46,14 +49,17 @@ def deserialize(stream_or_string, **options):
try:
return yaml.load(stream_or_string, **options)
except ScannerError as error:
log.exception('Error encountered while deserializing')
err_type = ERROR_MAP.get(error.problem, 'Unknown yaml render error')
line_num = error.problem_mark.line + 1
raise DeserializationError(err_type,
line_num,
error.problem_mark.buffer)
except ConstructorError as error:
log.exception('Error encountered while deserializing')
raise DeserializationError(error)
except Exception as error:
log.exception('Error encountered while deserializing')
raise DeserializationError(error)
@ -74,6 +80,7 @@ def serialize(obj, **options):
return response[:-1]
return response
except Exception as error:
log.exception('Error encountered while serializing')
raise SerializationError(error)
@ -108,7 +115,6 @@ Loader.add_multi_constructor('tag:yaml.org,2002:set', Loader.construct_yaml_set)
Loader.add_multi_constructor('tag:yaml.org,2002:str', Loader.construct_yaml_str)
Loader.add_multi_constructor('tag:yaml.org,2002:seq', Loader.construct_yaml_seq)
Loader.add_multi_constructor('tag:yaml.org,2002:map', Loader.construct_yaml_map)
Loader.add_multi_constructor(None, Loader.construct_undefined)
class Dumper(BaseDumper): # pylint: disable=W0232

View File

@ -150,14 +150,17 @@ def deserialize(stream_or_string, **options):
try:
return yaml.load(stream_or_string, **options)
except ScannerError as error:
log.exception('Error encountered while deserializing')
err_type = ERROR_MAP.get(error.problem, 'Unknown yaml render error')
line_num = error.problem_mark.line + 1
raise DeserializationError(err_type,
line_num,
error.problem_mark.buffer)
except ConstructorError as error:
log.exception('Error encountered while deserializing')
raise DeserializationError(error)
except Exception as error:
log.exception('Error encountered while deserializing')
raise DeserializationError(error)
@ -178,6 +181,7 @@ def serialize(obj, **options):
return response[:-1]
return response
except Exception as error:
log.exception('Error encountered while serializing')
raise SerializationError(error)
@ -322,7 +326,6 @@ Loader.add_multi_constructor('tag:yaml.org,2002:pairs', Loader.construct_yaml_pa
Loader.add_multi_constructor('tag:yaml.org,2002:set', Loader.construct_yaml_set)
Loader.add_multi_constructor('tag:yaml.org,2002:seq', Loader.construct_yaml_seq)
Loader.add_multi_constructor('tag:yaml.org,2002:map', Loader.construct_yaml_map)
Loader.add_multi_constructor(None, Loader.construct_undefined)
class SLSMap(OrderedDict):

View File

@ -3443,43 +3443,45 @@ class BaseHighState(object):
'Specified SLS {0} on local filesystem cannot '
'be found.'.format(sls)
)
state = None
if not fn_:
errors.append(
'Specified SLS {0} in saltenv {1} is not '
'available on the salt master or through a configured '
'fileserver'.format(sls, saltenv)
)
state = None
try:
state = compile_template(fn_,
self.state.rend,
self.state.opts['renderer'],
self.state.opts['renderer_blacklist'],
self.state.opts['renderer_whitelist'],
saltenv,
sls,
rendered_sls=mods
)
except SaltRenderError as exc:
msg = 'Rendering SLS \'{0}:{1}\' failed: {2}'.format(
saltenv, sls, exc
)
log.critical(msg)
errors.append(msg)
except Exception as exc:
msg = 'Rendering SLS {0} failed, render error: {1}'.format(
sls, exc
)
log.critical(
msg,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
errors.append('{0}\n{1}'.format(msg, traceback.format_exc()))
try:
mods.add('{0}:{1}'.format(saltenv, sls))
except AttributeError:
pass
else:
try:
state = compile_template(fn_,
self.state.rend,
self.state.opts['renderer'],
self.state.opts['renderer_blacklist'],
self.state.opts['renderer_whitelist'],
saltenv,
sls,
rendered_sls=mods
)
except SaltRenderError as exc:
msg = 'Rendering SLS \'{0}:{1}\' failed: {2}'.format(
saltenv, sls, exc
)
log.critical(msg)
errors.append(msg)
except Exception as exc:
msg = 'Rendering SLS {0} failed, render error: {1}'.format(
sls, exc
)
log.critical(
msg,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
errors.append('{0}\n{1}'.format(msg, traceback.format_exc()))
try:
mods.add('{0}:{1}'.format(saltenv, sls))
except AttributeError:
pass
if state:
if not isinstance(state, dict):
errors.append(
@ -3902,7 +3904,8 @@ class BaseHighState(object):
err += self.verify_tops(top)
matches = self.top_matches(top)
if not matches:
msg = 'No Top file or master_tops data matches found.'
msg = ('No Top file or master_tops data matches found. Please see '
'master log for details.')
ret[tag_name]['comment'] = msg
return ret
matches = self.matches_whitelist(matches, whitelist)

View File

@ -349,9 +349,10 @@ def present(name,
# Only add to the changes dict if layers were pulled
ret['changes'] = image_update
error = False
try:
__salt__['docker.inspect_image'](full_image)
error = False
except CommandExecutionError as exc:
msg = exc.__str__()
if '404' not in msg:

View File

@ -1269,6 +1269,9 @@ def symlink(
renamed to the backupname. If the backupname already
exists and force is False, the state will fail. Otherwise, the
backupname will be removed first.
An absolute path OR a basename file/directory name must be provided.
The latter will be placed relative to the symlink destination's parent
directory.
makedirs
If the location of the symlink does not already have a parent directory
@ -1400,15 +1403,32 @@ def symlink(
elif os.path.isfile(name) or os.path.isdir(name):
# It is not a link, but a file or dir
if backupname is not None:
if not os.path.isabs(backupname):
if backupname == os.path.basename(backupname):
backupname = os.path.join(
os.path.dirname(os.path.normpath(name)),
backupname)
else:
return _error(ret, (('Backupname must be an absolute path '
'or a file name: {0}').format(backupname)))
# Make a backup first
if os.path.lexists(backupname):
if not force:
return _error(ret, ((
'File exists where the backup target {0} should go'
).format(backupname)))
return _error(ret, (('Symlink & backup dest exists and Force not set.'
' {0} -> {1} - backup: {2}').format(
name, target, backupname)))
else:
__salt__['file.remove'](backupname)
os.rename(name, backupname)
try:
__salt__['file.move'](name, backupname)
except Exception as exc:
ret['changes'] = {}
log.debug(
'Encountered error renaming %s to %s',
name, backupname, exc_info=True
)
return _error(ret, ('Unable to rename {0} to backup {1} -> '
': {2}'.format(name, backupname, exc)))
elif force:
# Remove whatever is in the way
if __salt__['file.is_link'](name):

View File

@ -235,7 +235,7 @@ def present(name,
salt.utils.versions.warn_until(
'Neon',
'The \'prune_services\' argument default is currently True, '
'but will be changed to True in future releases.')
'but will be changed to False in the Neon release.')
ret = _present(name, block_icmp, prune_block_icmp, default, masquerade, ports, prune_ports,
port_fwd, prune_port_fwd, services, prune_services, interfaces, prune_interfaces,

View File

@ -48,6 +48,7 @@ def present(name,
**Example:**
.. code-block:: yaml
example user present in influxdb:
influxdb_user.present:
- name: example

View File

@ -1088,7 +1088,7 @@ class TargetOptionsMixIn(six.with_metaclass(MixInMeta, object)):
default=False,
action='store_true',
help=('Instead of using shell globs to evaluate the target '
'servers, take a comma or space delimited list of '
'servers, take a comma or whitespace delimited list of '
'servers.')
)
group.add_option(

View File

@ -14,7 +14,7 @@
'''
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, unicode_literals, print_function
import logging
# Import salt libs

View File

@ -15,7 +15,7 @@
'''
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, unicode_literals, print_function
import logging
# Import salt libs

View File

@ -4,7 +4,7 @@
'''
# Import Python Libs
from __future__ import absolute_import
from __future__ import absolute_import, unicode_literals, print_function
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin

View File

@ -4,7 +4,7 @@
'''
# Import Python Libs
from __future__ import absolute_import
from __future__ import absolute_import, unicode_literals, print_function
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin

View File

@ -49,7 +49,9 @@ class StatusModuleTest(ModuleCase):
status.diskusage
'''
ret = self.run_function('status.diskusage')
if salt.utils.platform.is_windows():
if salt.utils.platform.is_darwin():
self.assertIn('not yet supported on this platform', ret)
elif salt.utils.platform.is_windows():
self.assertTrue(isinstance(ret['percent'], float))
else:
self.assertIn('total', str(ret))

View File

@ -3,7 +3,7 @@
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
tests.integration.states.pip
tests.integration.states.pip_state
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
@ -300,7 +300,7 @@ class PipStateTest(ModuleCase, SaltReturnAssertsMixin):
# pip install passing the package name in `name`
ret = self.run_state(
'pip.installed', name='pep8', user=username, bin_env=venv_dir,
no_cache_dir=True, password='PassWord1!')
password='PassWord1!')
self.assertSaltTrueReturn(ret)
if HAS_PWD:
@ -350,7 +350,7 @@ class PipStateTest(ModuleCase, SaltReturnAssertsMixin):
ret = self.run_state(
'pip.installed', name='', user=username, bin_env=venv_dir,
requirements='salt://issue-6912-requirements.txt',
no_cache_dir=True, password='PassWord1!')
password='PassWord1!')
self.assertSaltTrueReturn(ret)
if HAS_PWD:

View File

@ -20,6 +20,7 @@ import tempfile
import time
import salt.utils.files
import salt.utils.platform
import salt.utils.process
import salt.utils.psutil_compat as psutils
import salt.utils.yaml
@ -28,6 +29,7 @@ from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
from tests.support.unit import TestCase
from tests.support.helpers import win32_kill_process_tree
from tests.support.paths import CODE_DIR
from tests.support.processes import terminate_process, terminate_process_list
@ -413,9 +415,6 @@ class TestProgram(six.with_metaclass(TestProgramMeta, object)):
popen_kwargs['preexec_fn'] = detach_from_parent_group
elif sys.platform.lower().startswith('win') and timeout is not None:
raise RuntimeError('Timeout is not supported under windows')
self.argv = [self.program]
self.argv.extend(args)
log.debug('TestProgram.run: %s Environment %s', self.argv, env_delta)
@ -430,16 +429,26 @@ class TestProgram(six.with_metaclass(TestProgramMeta, object)):
if datetime.now() > stop_at:
if term_sent is False:
# Kill the process group since sending the term signal
# would only terminate the shell, not the command
# executed in the shell
os.killpg(os.getpgid(process.pid), signal.SIGINT)
term_sent = True
continue
if salt.utils.platform.is_windows():
_, alive = win32_kill_process_tree(process.pid)
if alive:
log.error("Child processes still alive: %s", alive)
else:
# Kill the process group since sending the term signal
# would only terminate the shell, not the command
# executed in the shell
os.killpg(os.getpgid(process.pid), signal.SIGINT)
term_sent = True
continue
try:
# As a last resort, kill the process group
os.killpg(os.getpgid(process.pid), signal.SIGKILL)
if salt.utils.platform.is_windows():
_, alive = win32_kill_process_tree(process.pid)
if alive:
log.error("Child processes still alive: %s", alive)
else:
# As a last resort, kill the process group
os.killpg(os.getpgid(process.pid), signal.SIGKILL)
process.wait()
except OSError as exc:
if exc.errno != errno.ESRCH:

View File

@ -743,6 +743,9 @@ class SaltTestsuiteParser(SaltCoverageTestingParser):
with TestDaemon(self):
if self.options.name:
for name in self.options.name:
name = name.strip()
if not name:
continue
if os.path.isfile(name):
if not name.endswith('.py'):
continue

View File

@ -29,13 +29,14 @@ from datetime import datetime, timedelta
# Import salt testing libs
from tests.support.unit import TestCase
from tests.support.helpers import RedirectStdStreams, requires_sshd_server
from tests.support.helpers import (
RedirectStdStreams, requires_sshd_server, win32_kill_process_tree
)
from tests.support.runtests import RUNTIME_VARS
from tests.support.mixins import AdaptedConfigurationTestCaseMixin, SaltClientTestCaseMixin
from tests.support.paths import ScriptPathMixin, INTEGRATION_TEST_DIR, CODE_DIR, PYEXEC, SCRIPT_DIR
# Import 3rd-party libs
import salt.utils.json
from salt.ext import six
from salt.ext.six.moves import cStringIO # pylint: disable=import-error
@ -287,11 +288,11 @@ class ShellTestCase(TestCase, AdaptedConfigurationTestCaseMixin):
popen_kwargs['preexec_fn'] = detach_from_parent_group
elif sys.platform.lower().startswith('win') and timeout is not None:
raise RuntimeError('Timeout is not supported under windows')
process = subprocess.Popen(cmd, **popen_kwargs)
# Late import
import salt.utils.platform
if timeout is not None:
stop_at = datetime.now() + timedelta(seconds=timeout)
term_sent = False
@ -303,13 +304,23 @@ class ShellTestCase(TestCase, AdaptedConfigurationTestCaseMixin):
# Kill the process group since sending the term signal
# would only terminate the shell, not the command
# executed in the shell
os.killpg(os.getpgid(process.pid), signal.SIGINT)
if salt.utils.platform.is_windows():
_, alive = win32_kill_process_tree(process.pid)
if alive:
log.error("Child processes still alive: %s", alive)
else:
os.killpg(os.getpgid(process.pid), signal.SIGINT)
term_sent = True
continue
try:
# As a last resort, kill the process group
os.killpg(os.getpgid(process.pid), signal.SIGKILL)
if salt.utils.platform.is_windows():
_, alive = win32_kill_process_tree(process.pid)
if alive:
log.error("Child processes still alive: %s", alive)
else:
os.killpg(os.getpgid(process.pid), signal.SIGINT)
except OSError as exc:
if exc.errno != errno.ESRCH:
# If errno is not "no such process", raise
@ -855,6 +866,10 @@ class SSHCase(ShellCase):
wipe=wipe, raw=raw)
log.debug('SSHCase run_function executed %s with arg %s', function, arg)
log.debug('SSHCase JSON return: %s', ret)
# Late import
import salt.utils.json
try:
return salt.utils.json.loads(ret)['localhost']
except Exception:

View File

@ -1572,3 +1572,23 @@ class Webserver(object):
'''
self.ioloop.add_callback(self.ioloop.stop)
self.server_thread.join()
def win32_kill_process_tree(pid, sig=signal.SIGTERM, include_parent=True,
timeout=None, on_terminate=None):
'''
Kill a process tree (including grandchildren) with signal "sig" and return
a (gone, still_alive) tuple. "on_terminate", if specified, is a callabck
function which is called as soon as a child terminates.
'''
if pid == os.getpid():
raise RuntimeError("I refuse to kill myself")
parent = psutil.Process(pid)
children = parent.children(recursive=True)
if include_parent:
children.append(parent)
for p in children:
p.send_signal(sig)
gone, alive = psutil.wait_procs(children, timeout=timeout,
callback=on_terminate)
return (gone, alive)

View File

@ -1,8 +1,6 @@
# -*- coding: utf-8 -*-
'''
:codeauthor: `Anthony Shaw <anthonyshaw@apache.org>`
tests.unit.cloud.clouds.dimensiondata_test
tests.unit.cloud.test_libcloudfuncs
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''

View File

@ -0,0 +1,203 @@
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Eric Radman <ericshane@eradman.com>`
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from tests.support.unit import skipIf, TestCase
from tests.support.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON,
)
# Import Salt libs
import salt.cloud
EXAMPLE_PROVIDERS = {
'nyc_vcenter': {'vmware': {'driver': 'vmware',
'password': '123456',
'url': 'vca1.saltstack.com',
'minion': {
'master': 'providermaster',
'grains': {
'providergrain': True
}
},
'profiles': {},
'user': 'root'}},
'nj_vcenter': {'vmware': {'driver': 'vmware',
'password': '333',
'profiles': {},
'minion': {
'master': 'providermaster',
'grains': {
'providergrain': True
}
},
'image': 'rhel6_64prod',
'url': 'vca2.saltstack.com',
'user': 'root'}}
}
EXAMPLE_PROFILES = {
'nyc-vm': {'cluster': 'nycvirt',
'datastore': 'datastore1',
'devices': {'disk': {'Hard disk 1': {'controller': 'SCSI controller 1',
'size': 20}},
'network': {'Network Adapter 1': {'mac': '88:88:88:88:88:42',
'name': 'vlan50',
'switch_type': 'standard'}},
'scsi': {'SCSI controller 1': {'type': 'paravirtual'}}},
'extra_config': {'mem.hotadd': 'yes'},
'folder': 'coreinfra',
'image': 'rhel6_64Guest',
'minion': {
'master': 'profilemaster',
'grains': {
'profilegrain': True
}
},
'memory': '8GB',
'num_cpus': 2,
'power_on': True,
'profile': 'nyc-vm',
'provider': 'nyc_vcenter:vmware',
'resourcepool': 'Resources'},
'nj-vm': {'cluster': 'njvirt',
'folder': 'coreinfra',
'image': 'rhel6_64Guest',
'memory': '8GB',
'num_cpus': 2,
'power_on': True,
'profile': 'nj-vm',
'provider': 'nj_vcenter:vmware',
'resourcepool': 'Resources'},
}
EXAMPLE_MAP = {
'nyc-vm': {'db1': {'cpus': 4,
'devices': {'disk': {'Hard disk 1': {'size': 40}},
'network': {'Network Adapter 1': {'mac': '22:4a:b2:92:b3:eb'}}},
'memory': '16GB',
'minion': {
'master': 'mapmaster',
'grains': {
'mapgrain': True
}
},
'name': 'db1'},
'db2': {'name': 'db2',
'password': '456',
'provider': 'nj_vcenter:vmware'}},
'nj-vm': {'db3': {'name': 'db3',
'password': '789',
}}
}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class MapConfTest(TestCase):
'''
Validate evaluation of salt-cloud map configuration
'''
def test_cloud_map_merge_conf(self):
'''
Ensure that nested values can be selectivly overridden in a map file
'''
with patch('salt.config.check_driver_dependencies', MagicMock(return_value=True)), \
patch('salt.cloud.Map.read', MagicMock(return_value=EXAMPLE_MAP)):
self.maxDiff = None
opts = {'extension_modules': '/var/cache/salt/master/extmods',
'providers': EXAMPLE_PROVIDERS, 'profiles': EXAMPLE_PROFILES}
cloud_map = salt.cloud.Map(opts)
merged_profile = {
'create': {'db1': {'cluster': 'nycvirt',
'cpus': 4,
'datastore': 'datastore1',
'devices': {'disk': {'Hard disk 1': {'controller': 'SCSI controller 1',
'size': 40}},
'network': {'Network Adapter 1': {'mac': '22:4a:b2:92:b3:eb',
'name': 'vlan50',
'switch_type': 'standard'}},
'scsi': {'SCSI controller 1': {'type': 'paravirtual'}}},
'driver': 'vmware',
'extra_config': {'mem.hotadd': 'yes'},
'folder': 'coreinfra',
'image': 'rhel6_64Guest',
'memory': '16GB',
'minion': {'grains': {'mapgrain': True,
'profilegrain': True,
'providergrain': True},
'master': 'mapmaster'},
'name': 'db1',
'num_cpus': 2,
'password': '123456',
'power_on': True,
'profile': 'nyc-vm',
'provider': 'nyc_vcenter:vmware',
'resourcepool': 'Resources',
'url': 'vca1.saltstack.com',
'user': 'root'},
'db2': {'cluster': 'nycvirt',
'datastore': 'datastore1',
'devices': {'disk': {'Hard disk 1': {'controller': 'SCSI controller 1',
'size': 20}},
'network': {'Network Adapter 1': {'mac': '88:88:88:88:88:42',
'name': 'vlan50',
'switch_type': 'standard'}},
'scsi': {'SCSI controller 1': {'type': 'paravirtual'}}},
'driver': 'vmware',
'extra_config': {'mem.hotadd': 'yes'},
'folder': 'coreinfra',
'image': 'rhel6_64Guest',
'memory': '8GB',
'minion': {'grains': {'profilegrain': True,
'providergrain': True},
'master': 'profilemaster'},
'name': 'db2',
'num_cpus': 2,
'password': '456',
'power_on': True,
'profile': 'nyc-vm',
'provider': 'nj_vcenter:vmware',
'resourcepool': 'Resources',
'url': 'vca2.saltstack.com',
'user': 'root'},
'db3': {'cluster': 'njvirt',
'driver': 'vmware',
'folder': 'coreinfra',
'image': 'rhel6_64Guest',
'memory': '8GB',
'minion': {'grains': {'providergrain': True},
'master': 'providermaster'},
'name': 'db3',
'num_cpus': 2,
'password': '789',
'power_on': True,
'profile': 'nj-vm',
'provider': 'nj_vcenter:vmware',
'resourcepool': 'Resources',
'url': 'vca2.saltstack.com',
'user': 'root'}}
}
# what we assert above w.r.t db2 using nj_vcenter:vmware provider:
# - url is from the overriden nj_vcenter provider, not nyc_vcenter
# - image from provider is still overridden by the nyc-vm profile
# - password from map override is still overriding both the provider and profile password
#
# what we assert above about grain handling ( and provider/profile/map data in general )
# - provider grains are able to be overridden by profile data
# - provider grain sare overridden by map data
# - profile data is overriden by map data
# ie, the provider->profile->map inheritance works as expected
map_data = cloud_map.map_data()
self.assertEqual(map_data, merged_profile)

View File

@ -889,9 +889,9 @@ SwapTotal: 4789244 kB'''
test virtual grain with cmd virt-what
'''
virt = 'kvm'
with patch.object(salt.utils, 'is_windows',
with patch.object(salt.utils.platform, 'is_windows',
MagicMock(return_value=False)):
with patch.object(salt.utils, 'which',
with patch.object(salt.utils.path, 'which',
MagicMock(return_value=True)):
with patch.dict(core.__salt__, {'cmd.run_all':
MagicMock(return_value={'pid': 78,

View File

@ -1053,3 +1053,104 @@ class DockerTestCase(TestCase, LoaderModuleMockMixin):
call('prune_volumes', filters={'label': ['foo', 'bar=baz']}),
]
)
def test_port(self):
'''
Test docker.port function. Note that this test case does not test what
happens when a specific container name is passed and that container
does not exist. When that happens, the Docker API will just raise a 404
error. Since we're using as side_effect to mock
docker.inspect_container, it would be meaningless to code raising an
exception into it and then test that we raised that exception.
'''
ports = {
'foo': {
'5555/tcp': [
{'HostIp': '0.0.0.0', 'HostPort': '32768'}
],
'6666/tcp': [
{'HostIp': '0.0.0.0', 'HostPort': '32769'}
],
},
'bar': {
'4444/udp': [
{'HostIp': '0.0.0.0', 'HostPort': '32767'}
],
'5555/tcp': [
{'HostIp': '0.0.0.0', 'HostPort': '32768'}
],
'6666/tcp': [
{'HostIp': '0.0.0.0', 'HostPort': '32769'}
],
},
'baz': {
'5555/tcp': [
{'HostIp': '0.0.0.0', 'HostPort': '32768'}
],
'6666/udp': [
{'HostIp': '0.0.0.0', 'HostPort': '32769'}
],
},
}
list_mock = MagicMock(return_value=['bar', 'baz', 'foo'])
inspect_mock = MagicMock(
side_effect=lambda x: {'NetworkSettings': {'Ports': ports.get(x)}}
)
with patch.object(docker_mod, 'list_containers', list_mock), \
patch.object(docker_mod, 'inspect_container', inspect_mock):
# Test with specific container name
ret = docker_mod.port('foo')
self.assertEqual(ret, ports['foo'])
# Test with specific container name and filtering on port
ret = docker_mod.port('foo', private_port='5555/tcp')
self.assertEqual(ret, {'5555/tcp': ports['foo']['5555/tcp']})
# Test using pattern expression
ret = docker_mod.port('ba*')
self.assertEqual(ret, {'bar': ports['bar'], 'baz': ports['baz']})
ret = docker_mod.port('ba?')
self.assertEqual(ret, {'bar': ports['bar'], 'baz': ports['baz']})
ret = docker_mod.port('ba[rz]')
self.assertEqual(ret, {'bar': ports['bar'], 'baz': ports['baz']})
# Test using pattern expression and port filtering
ret = docker_mod.port('ba*', private_port='6666/tcp')
self.assertEqual(
ret,
{'bar': {'6666/tcp': ports['bar']['6666/tcp']}, 'baz': {}}
)
ret = docker_mod.port('ba?', private_port='6666/tcp')
self.assertEqual(
ret,
{'bar': {'6666/tcp': ports['bar']['6666/tcp']}, 'baz': {}}
)
ret = docker_mod.port('ba[rz]', private_port='6666/tcp')
self.assertEqual(
ret,
{'bar': {'6666/tcp': ports['bar']['6666/tcp']}, 'baz': {}}
)
ret = docker_mod.port('*')
self.assertEqual(ret, ports)
ret = docker_mod.port('*', private_port='5555/tcp')
self.assertEqual(
ret,
{'foo': {'5555/tcp': ports['foo']['5555/tcp']},
'bar': {'5555/tcp': ports['bar']['5555/tcp']},
'baz': {'5555/tcp': ports['baz']['5555/tcp']}}
)
ret = docker_mod.port('*', private_port=6666)
self.assertEqual(
ret,
{'foo': {'6666/tcp': ports['foo']['6666/tcp']},
'bar': {'6666/tcp': ports['bar']['6666/tcp']},
'baz': {'6666/udp': ports['baz']['6666/udp']}}
)
ret = docker_mod.port('*', private_port='6666/tcp')
self.assertEqual(
ret,
{'foo': {'6666/tcp': ports['foo']['6666/tcp']},
'bar': {'6666/tcp': ports['bar']['6666/tcp']},
'baz': {}}
)

View File

@ -262,6 +262,26 @@ class TestFileState(TestCase, LoaderModuleMockMixin):
group=group, backupname='SALT'),
ret)
with patch.dict(filestate.__salt__, {'config.manage_mode': mock_t,
'file.user_to_uid': mock_uid,
'file.group_to_gid': mock_gid,
'file.is_link': mock_f,
'file.readlink': mock_target,
'user.info': mock_empty,
'user.current': mock_user}):
with patch.dict(filestate.__opts__, {'test': False}):
with patch.object(os.path, 'isabs', mock_t):
with patch.object(os.path, 'isabs', mock_f):
comt = ('Backupname must be an absolute path '
'or a file name: {0}').format('tmp/SALT')
ret.update({'comment': comt,
'result': False,
'pchanges': {'new': name}})
self.assertDictEqual(filestate.symlink
(name, target, user=user,
group=group, backupname='tmp/SALT'),
ret)
with patch.dict(filestate.__salt__, {'config.manage_mode': mock_t,
'file.user_to_uid': mock_uid,
'file.group_to_gid': mock_gid,

View File

@ -50,3 +50,14 @@ class FileclientTestCase(TestCase):
with self.assertRaises(OSError):
with Client(self.opts)._cache_loc('testfile') as c_ref_itr:
assert c_ref_itr == '/__test__/files/base/testfile'
def test_extrn_path_with_long_filename(self):
safe_file_name = os.path.split(Client(self.opts)._extrn_path('https://test.com/' + ('A' * 254), 'base'))[-1]
assert safe_file_name == 'A' * 254
oversized_file_name = os.path.split(Client(self.opts)._extrn_path('https://test.com/' + ('A' * 255), 'base'))[-1]
assert len(oversized_file_name) < 256
assert oversized_file_name != 'A' * 255
oversized_file_with_query_params = os.path.split(Client(self.opts)._extrn_path('https://test.com/file?' + ('A' * 255), 'base'))[-1]
assert len(oversized_file_with_query_params) < 256

View File

@ -1,79 +0,0 @@
# -*- coding: utf-8 -*-
'''
tests.unit.file_test
~~~~~~~~~~~~~~~~~~~~
'''
# Import pytohn libs
from __future__ import absolute_import
import os
import copy
import shutil
import tempfile
# Import Salt Testing libs
from tests.support.unit import TestCase
# Import Salt libs
from salt.ext import six
import salt.utils.files
class FilesTestCase(TestCase):
STRUCTURE = {
'foo': {
'foofile.txt': 'fooSTRUCTURE'
},
'bar': {
'barfile.txt': 'barSTRUCTURE'
}
}
def _create_temp_structure(self, temp_directory, structure):
for folder, files in six.iteritems(structure):
current_directory = os.path.join(temp_directory, folder)
os.makedirs(current_directory)
for name, content in six.iteritems(files):
path = os.path.join(temp_directory, folder, name)
with salt.utils.files.fopen(path, 'w+') as fh:
fh.write(content)
def _validate_folder_structure_and_contents(self, target_directory,
desired_structure):
for folder, files in six.iteritems(desired_structure):
for name, content in six.iteritems(files):
path = os.path.join(target_directory, folder, name)
with salt.utils.files.fopen(path) as fh:
assert fh.read().strip() == content
def setUp(self):
super(FilesTestCase, self).setUp()
self.temp_dir = tempfile.mkdtemp()
self._create_temp_structure(self.temp_dir,
self.STRUCTURE)
def tearDown(self):
super(FilesTestCase, self).tearDown()
shutil.rmtree(self.temp_dir)
def test_recursive_copy(self):
test_target_directory = tempfile.mkdtemp()
TARGET_STRUCTURE = {
'foo': {
'foo.txt': 'fooTARGET_STRUCTURE'
},
'baz': {
'baz.txt': 'bazTARGET_STRUCTURE'
}
}
self._create_temp_structure(test_target_directory, TARGET_STRUCTURE)
try:
salt.utils.files.recursive_copy(self.temp_dir, test_target_directory)
DESIRED_STRUCTURE = copy.copy(TARGET_STRUCTURE)
DESIRED_STRUCTURE.update(self.STRUCTURE)
self._validate_folder_structure_and_contents(
test_target_directory,
DESIRED_STRUCTURE
)
finally:
shutil.rmtree(test_target_directory)

View File

@ -1,115 +0,0 @@
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Eric Radman <ericshane@eradman.com>`
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from tests.support.unit import skipIf, TestCase
from tests.support.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON,
)
# Import Salt libs
import salt.cloud
EXAMPLE_PROVIDERS = {
'nyc_vcenter': {'vmware': {'driver': 'vmware',
'password': '123456',
'profiles': {'nyc-vm': {'cluster': 'nycvirt',
'datastore': 'datastore1',
'devices': {'disk': {'Hard disk 1': {'controller': 'SCSI controller 1',
'size': 20}},
'network': {'Network Adapter 1': {'mac': '44:44:44:44:44:42',
'name': 'vlan50',
'switch_type': 'standard'}},
'scsi': {'SCSI controller 1': {'type': 'paravirtual'}}},
'extra_config': {'mem.hotadd': 'yes'},
'folder': 'coreinfra',
'image': 'rhel6_64Guest',
'memory': '8GB',
'num_cpus': 2,
'power_on': True,
'profile': 'nyc-vm',
'provider': 'nyc_vcenter:vmware',
'resourcepool': 'Resources'}},
'url': 'vca1.saltstack.com',
'user': 'root'}}
}
EXAMPLE_PROFILES = {
'nyc-vm': {'cluster': 'nycvirt',
'datastore': 'datastore1',
'devices': {'disk': {'Hard disk 1': {'controller': 'SCSI controller 1',
'size': 20}},
'network': {'Network Adapter 1': {'mac': '44:44:44:44:44:42',
'name': 'vlan50',
'switch_type': 'standard'}},
'scsi': {'SCSI controller 1': {'type': 'paravirtual'}}},
'extra_config': {'mem.hotadd': 'yes'},
'folder': 'coreinfra',
'image': 'rhel6_64Guest',
'memory': '8GB',
'num_cpus': 2,
'power_on': True,
'profile': 'nyc-vm',
'provider': 'nyc_vcenter:vmware',
'resourcepool': 'Resources'}
}
EXAMPLE_MAP = {
'nyc-vm': {'db1': {'cpus': 4,
'devices': {'disk': {'Hard disk 1': {'size': 40}},
'network': {'Network Adapter 1': {'mac': '22:4a:b2:92:b3:eb'}}},
'memory': '16GB',
'name': 'db1'}}
}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class MapConfTest(TestCase):
'''
Validate evaluation of salt-cloud map configuration
'''
def test_cloud_map_merge_conf(self):
'''
Ensure that nested values can be selectivly overridden in a map file
'''
with patch('salt.config.check_driver_dependencies', MagicMock(return_value=True)), \
patch('salt.cloud.Map.read', MagicMock(return_value=EXAMPLE_MAP)):
self.maxDiff = None
opts = {'extension_modules': '/var/cache/salt/master/extmods',
'providers': EXAMPLE_PROVIDERS, 'profiles': EXAMPLE_PROFILES}
cloud_map = salt.cloud.Map(opts)
merged_profile = {
'create': {'db1': {'cluster': 'nycvirt',
'cpus': 4,
'datastore': 'datastore1',
'devices': {'disk': {'Hard disk 1': {'controller': 'SCSI controller 1',
'size': 40}},
'network': {'Network Adapter 1': {'mac': '22:4a:b2:92:b3:eb',
'name': 'vlan50',
'switch_type': 'standard'}},
'scsi': {'SCSI controller 1': {'type': 'paravirtual'}}},
'driver': 'vmware',
'extra_config': {'mem.hotadd': 'yes'},
'folder': 'coreinfra',
'image': 'rhel6_64Guest',
'memory': '16GB',
'name': 'db1',
'num_cpus': 2,
'password': '123456',
'power_on': True,
'profile': 'nyc-vm',
'provider': 'nyc_vcenter:vmware',
'resourcepool': 'Resources',
'url': 'vca1.saltstack.com',
'user': 'root'}}
}
self.assertEqual(cloud_map.map_data(), merged_profile)

View File

@ -1,175 +0,0 @@
# -*- coding: utf-8 -*-
'''
:codeauthor: :email: `Mike Place <mp@saltstack.com>`
tests.unit.target_test
~~~~~~~~~~~~~~~~~~~~~~
'''
# Import Python libs
from __future__ import absolute_import
import sys
# Import Salt libs
import salt.utils.minions
import salt.config
# Import Salt Testing libs
from tests.support.unit import TestCase, skipIf
import logging
log = logging.getLogger(__name__)
class CkMinionTestCase(TestCase):
def setUp(self):
self.ck_ = salt.utils.minions.CkMinions(salt.config.DEFAULT_MASTER_OPTS)
def tearDown(self):
self.ck_ = None
#TODO This is just a stub for upcoming tests
@skipIf(sys.version_info < (2, 7), 'Python 2.7 needed for dictionary equality assertions')
class TargetParseTestCase(TestCase):
def test_parse_grains_target(self):
'''
Ensure proper parsing for grains
'''
g_tgt = 'G@a:b'
ret = salt.utils.minions.parse_target(g_tgt)
self.assertDictEqual(ret, {'engine': 'G', 'pattern': 'a:b', 'delimiter': None})
def test_parse_grains_pcre_target(self):
'''
Ensure proper parsing for grains PCRE matching
'''
p_tgt = 'P@a:b'
ret = salt.utils.minions.parse_target(p_tgt)
self.assertDictEqual(ret, {'engine': 'P', 'pattern': 'a:b', 'delimiter': None})
def test_parse_pillar_pcre_target(self):
'''
Ensure proper parsing for pillar PCRE matching
'''
j_tgt = 'J@a:b'
ret = salt.utils.minions.parse_target(j_tgt)
self.assertDictEqual(ret, {'engine': 'J', 'pattern': 'a:b', 'delimiter': None})
def test_parse_list_target(self):
'''
Ensure proper parsing for list matching
'''
l_tgt = 'L@a:b'
ret = salt.utils.minions.parse_target(l_tgt)
self.assertDictEqual(ret, {'engine': 'L', 'pattern': 'a:b', 'delimiter': None})
def test_parse_nodegroup_target(self):
'''
Ensure proper parsing for pillar matching
'''
n_tgt = 'N@a:b'
ret = salt.utils.minions.parse_target(n_tgt)
self.assertDictEqual(ret, {'engine': 'N', 'pattern': 'a:b', 'delimiter': None})
def test_parse_subnet_target(self):
'''
Ensure proper parsing for subnet matching
'''
s_tgt = 'S@a:b'
ret = salt.utils.minions.parse_target(s_tgt)
self.assertDictEqual(ret, {'engine': 'S', 'pattern': 'a:b', 'delimiter': None})
def test_parse_minion_pcre_target(self):
'''
Ensure proper parsing for minion PCRE matching
'''
e_tgt = 'E@a:b'
ret = salt.utils.minions.parse_target(e_tgt)
self.assertDictEqual(ret, {'engine': 'E', 'pattern': 'a:b', 'delimiter': None})
def test_parse_range_target(self):
'''
Ensure proper parsing for range matching
'''
r_tgt = 'R@a:b'
ret = salt.utils.minions.parse_target(r_tgt)
self.assertDictEqual(ret, {'engine': 'R', 'pattern': 'a:b', 'delimiter': None})
def test_parse_multiword_target(self):
'''
Ensure proper parsing for multi-word targets
Refs https://github.com/saltstack/salt/issues/37231
'''
mw_tgt = 'G@a:b c'
ret = salt.utils.minions.parse_target(mw_tgt)
self.assertEqual(ret['pattern'], 'a:b c')
class NodegroupCompTest(TestCase):
'''
Test nodegroup comparisons found in
salt.utils.minions.nodgroup_comp()
'''
def test_simple_nodegroup(self):
'''
Smoke test a very simple nodegroup. No recursion.
'''
simple_nodegroup = {'group1': 'L@foo.domain.com,bar.domain.com,baz.domain.com or bl*.domain.com'}
ret = salt.utils.minions.nodegroup_comp('group1', simple_nodegroup)
expected_ret = ['L@foo.domain.com,bar.domain.com,baz.domain.com', 'or', 'bl*.domain.com']
self.assertListEqual(ret, expected_ret)
def test_simple_expression_nodegroup(self):
'''
Smoke test a nodegroup with a simple expression. No recursion.
'''
simple_nodegroup = {'group1': '[foo,bar,baz].domain.com'}
ret = salt.utils.minions.nodegroup_comp('group1', simple_nodegroup)
expected_ret = ['E@[foo,bar,baz].domain.com']
self.assertListEqual(ret, expected_ret)
def test_simple_recurse(self):
'''
Test a case where one nodegroup contains a second nodegroup
'''
referenced_nodegroups = {
'group1': 'L@foo.domain.com,bar.domain.com,baz.domain.com or bl*.domain.com',
'group2': 'G@os:Debian and N@group1'
}
ret = salt.utils.minions.nodegroup_comp('group2', referenced_nodegroups)
expected_ret = [
'(',
'G@os:Debian',
'and',
'(',
'L@foo.domain.com,bar.domain.com,baz.domain.com',
'or',
'bl*.domain.com',
')',
')'
]
self.assertListEqual(ret, expected_ret)
def test_circular_nodegroup_reference(self):
'''
Test to see what happens if A refers to B
and B in turn refers back to A
'''
referenced_nodegroups = {
'group1': 'N@group2',
'group2': 'N@group1'
}
# If this works, it should also print an error to the console
ret = salt.utils.minions.nodegroup_comp('group1', referenced_nodegroups)
self.assertEqual(ret, [])

View File

@ -1,10 +1,11 @@
# -*- coding: utf-8 -*-
'''
Unit Tests for functions located in salt.utils.files.py.
Unit Tests for functions located in salt/utils/files.py
'''
# Import python libs
from __future__ import absolute_import, unicode_literals, print_function
import copy
import os
# Import Salt libs
@ -21,7 +22,7 @@ from tests.support.mock import (
)
class FilesUtilTestCase(TestCase):
class FilesTestCase(TestCase):
'''
Test case for files util.
'''
@ -94,3 +95,54 @@ class FilesUtilTestCase(TestCase):
'fopen() should have been prevented from opening a file '
'using {0} as the filename'.format(invalid_fn)
)
def _create_temp_structure(self, temp_directory, structure):
for folder, files in six.iteritems(structure):
current_directory = os.path.join(temp_directory, folder)
os.makedirs(current_directory)
for name, content in six.iteritems(files):
path = os.path.join(temp_directory, folder, name)
with salt.utils.files.fopen(path, 'w+') as fh:
fh.write(content)
def _validate_folder_structure_and_contents(self, target_directory,
desired_structure):
for folder, files in six.iteritems(desired_structure):
for name, content in six.iteritems(files):
path = os.path.join(target_directory, folder, name)
with salt.utils.files.fopen(path) as fh:
assert fh.read().strip() == content
@with_tempdir()
@with_tempdir()
def test_recursive_copy(self, src, dest):
src_structure = {
'foo': {
'foofile.txt': 'fooSTRUCTURE'
},
'bar': {
'barfile.txt': 'barSTRUCTURE'
}
}
dest_structure = {
'foo': {
'foo.txt': 'fooTARGET_STRUCTURE'
},
'baz': {
'baz.txt': 'bazTARGET_STRUCTURE'
}
}
# Create the file structures in both src and dest dirs
self._create_temp_structure(src, src_structure)
self._create_temp_structure(dest, dest_structure)
# Perform the recursive copy
salt.utils.files.recursive_copy(src, dest)
# Confirm results match expected results
desired_structure = copy.copy(dest_structure)
desired_structure.update(src_structure)
self._validate_folder_structure_and_contents(
dest,
desired_structure)

View File

@ -2,12 +2,13 @@
# Import python libs
from __future__ import absolute_import, unicode_literals
import sys
# Import Salt Libs
import salt.utils.minions as minions
import salt.utils.minions
# Import Salt Testing Libs
from tests.support.unit import TestCase
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
patch,
MagicMock,
@ -38,7 +39,7 @@ class MinionsTestCase(TestCase):
'''
for nodegroup in NODEGROUPS:
expected = EXPECTED[nodegroup]
ret = minions.nodegroup_comp(nodegroup, NODEGROUPS)
ret = salt.utils.minions.nodegroup_comp(nodegroup, NODEGROUPS)
self.assertEqual(ret, expected)
@ -47,7 +48,7 @@ class CkMinionsTestCase(TestCase):
TestCase for salt.utils.minions.CkMinions class
'''
def setUp(self):
self.ckminions = minions.CkMinions({})
self.ckminions = salt.utils.minions.CkMinions({})
def test_spec_check(self):
# Test spec-only rule
@ -366,3 +367,145 @@ class CkMinionsTestCase(TestCase):
args = ['1', '2']
ret = self.ckminions.auth_check(auth_list, 'test.arg', args, 'runner')
self.assertTrue(ret)
@skipIf(sys.version_info < (2, 7), 'Python 2.7 needed for dictionary equality assertions')
class TargetParseTestCase(TestCase):
def test_parse_grains_target(self):
'''
Ensure proper parsing for grains
'''
g_tgt = 'G@a:b'
ret = salt.utils.minions.parse_target(g_tgt)
self.assertDictEqual(ret, {'engine': 'G', 'pattern': 'a:b', 'delimiter': None})
def test_parse_grains_pcre_target(self):
'''
Ensure proper parsing for grains PCRE matching
'''
p_tgt = 'P@a:b'
ret = salt.utils.minions.parse_target(p_tgt)
self.assertDictEqual(ret, {'engine': 'P', 'pattern': 'a:b', 'delimiter': None})
def test_parse_pillar_pcre_target(self):
'''
Ensure proper parsing for pillar PCRE matching
'''
j_tgt = 'J@a:b'
ret = salt.utils.minions.parse_target(j_tgt)
self.assertDictEqual(ret, {'engine': 'J', 'pattern': 'a:b', 'delimiter': None})
def test_parse_list_target(self):
'''
Ensure proper parsing for list matching
'''
l_tgt = 'L@a:b'
ret = salt.utils.minions.parse_target(l_tgt)
self.assertDictEqual(ret, {'engine': 'L', 'pattern': 'a:b', 'delimiter': None})
def test_parse_nodegroup_target(self):
'''
Ensure proper parsing for pillar matching
'''
n_tgt = 'N@a:b'
ret = salt.utils.minions.parse_target(n_tgt)
self.assertDictEqual(ret, {'engine': 'N', 'pattern': 'a:b', 'delimiter': None})
def test_parse_subnet_target(self):
'''
Ensure proper parsing for subnet matching
'''
s_tgt = 'S@a:b'
ret = salt.utils.minions.parse_target(s_tgt)
self.assertDictEqual(ret, {'engine': 'S', 'pattern': 'a:b', 'delimiter': None})
def test_parse_minion_pcre_target(self):
'''
Ensure proper parsing for minion PCRE matching
'''
e_tgt = 'E@a:b'
ret = salt.utils.minions.parse_target(e_tgt)
self.assertDictEqual(ret, {'engine': 'E', 'pattern': 'a:b', 'delimiter': None})
def test_parse_range_target(self):
'''
Ensure proper parsing for range matching
'''
r_tgt = 'R@a:b'
ret = salt.utils.minions.parse_target(r_tgt)
self.assertDictEqual(ret, {'engine': 'R', 'pattern': 'a:b', 'delimiter': None})
def test_parse_multiword_target(self):
'''
Ensure proper parsing for multi-word targets
Refs https://github.com/saltstack/salt/issues/37231
'''
mw_tgt = 'G@a:b c'
ret = salt.utils.minions.parse_target(mw_tgt)
self.assertEqual(ret['pattern'], 'a:b c')
class NodegroupCompTest(TestCase):
'''
Test nodegroup comparisons found in
salt.utils.minions.nodgroup_comp()
'''
def test_simple_nodegroup(self):
'''
Smoke test a very simple nodegroup. No recursion.
'''
simple_nodegroup = {'group1': 'L@foo.domain.com,bar.domain.com,baz.domain.com or bl*.domain.com'}
ret = salt.utils.minions.nodegroup_comp('group1', simple_nodegroup)
expected_ret = ['L@foo.domain.com,bar.domain.com,baz.domain.com', 'or', 'bl*.domain.com']
self.assertListEqual(ret, expected_ret)
def test_simple_expression_nodegroup(self):
'''
Smoke test a nodegroup with a simple expression. No recursion.
'''
simple_nodegroup = {'group1': '[foo,bar,baz].domain.com'}
ret = salt.utils.minions.nodegroup_comp('group1', simple_nodegroup)
expected_ret = ['E@[foo,bar,baz].domain.com']
self.assertListEqual(ret, expected_ret)
def test_simple_recurse(self):
'''
Test a case where one nodegroup contains a second nodegroup
'''
referenced_nodegroups = {
'group1': 'L@foo.domain.com,bar.domain.com,baz.domain.com or bl*.domain.com',
'group2': 'G@os:Debian and N@group1'
}
ret = salt.utils.minions.nodegroup_comp('group2', referenced_nodegroups)
expected_ret = [
'(',
'G@os:Debian',
'and',
'(',
'L@foo.domain.com,bar.domain.com,baz.domain.com',
'or',
'bl*.domain.com',
')',
')'
]
self.assertListEqual(ret, expected_ret)
def test_circular_nodegroup_reference(self):
'''
Test to see what happens if A refers to B
and B in turn refers back to A
'''
referenced_nodegroups = {
'group1': 'N@group2',
'group2': 'N@group1'
}
# If this works, it should also print an error to the console
ret = salt.utils.minions.nodegroup_comp('group1', referenced_nodegroups)
self.assertEqual(ret, [])

View File

@ -37,7 +37,7 @@ integration.runners.test_jobs
integration.runners.test_salt
integration.sdb.test_env
integration.states.test_host
integration.states.test_pip
integration.states.test_pip_state
integration.states.test_reg
integration.states.test_renderers
integration.utils.testprogram