mirror of
https://github.com/valitydev/salt.git
synced 2024-11-07 17:09:03 +00:00
Merge branch 'develop' into deprecate-sdecode
This commit is contained in:
commit
daf3c7975d
@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SALT-API" "1" "Feb 23, 2018" "2018.3.0" "Salt"
|
||||
.TH "SALT-API" "1" "May 09, 2018" "2018.3.1" "Salt"
|
||||
.SH NAME
|
||||
salt-api \- salt-api Command
|
||||
.
|
||||
|
@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SALT-CALL" "1" "Feb 23, 2018" "2018.3.0" "Salt"
|
||||
.TH "SALT-CALL" "1" "May 09, 2018" "2018.3.1" "Salt"
|
||||
.SH NAME
|
||||
salt-call \- salt-call Documentation
|
||||
.
|
||||
|
@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SALT-CLOUD" "1" "Feb 23, 2018" "2018.3.0" "Salt"
|
||||
.TH "SALT-CLOUD" "1" "May 09, 2018" "2018.3.1" "Salt"
|
||||
.SH NAME
|
||||
salt-cloud \- Salt Cloud Command
|
||||
.
|
||||
|
@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SALT-CP" "1" "Feb 23, 2018" "2018.3.0" "Salt"
|
||||
.TH "SALT-CP" "1" "May 09, 2018" "2018.3.1" "Salt"
|
||||
.SH NAME
|
||||
salt-cp \- salt-cp Documentation
|
||||
.
|
||||
|
@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SALT-KEY" "1" "Feb 23, 2018" "2018.3.0" "Salt"
|
||||
.TH "SALT-KEY" "1" "May 09, 2018" "2018.3.1" "Salt"
|
||||
.SH NAME
|
||||
salt-key \- salt-key Documentation
|
||||
.
|
||||
|
@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SALT-MASTER" "1" "Feb 23, 2018" "2018.3.0" "Salt"
|
||||
.TH "SALT-MASTER" "1" "May 09, 2018" "2018.3.1" "Salt"
|
||||
.SH NAME
|
||||
salt-master \- salt-master Documentation
|
||||
.
|
||||
|
@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SALT-MINION" "1" "Feb 23, 2018" "2018.3.0" "Salt"
|
||||
.TH "SALT-MINION" "1" "May 09, 2018" "2018.3.1" "Salt"
|
||||
.SH NAME
|
||||
salt-minion \- salt-minion Documentation
|
||||
.
|
||||
|
@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SALT-PROXY" "1" "Feb 23, 2018" "2018.3.0" "Salt"
|
||||
.TH "SALT-PROXY" "1" "May 09, 2018" "2018.3.1" "Salt"
|
||||
.SH NAME
|
||||
salt-proxy \- salt-proxy Documentation
|
||||
.
|
||||
|
@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SALT-RUN" "1" "Feb 23, 2018" "2018.3.0" "Salt"
|
||||
.TH "SALT-RUN" "1" "May 09, 2018" "2018.3.1" "Salt"
|
||||
.SH NAME
|
||||
salt-run \- salt-run Documentation
|
||||
.
|
||||
|
@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SALT-SSH" "1" "Feb 23, 2018" "2018.3.0" "Salt"
|
||||
.TH "SALT-SSH" "1" "May 09, 2018" "2018.3.1" "Salt"
|
||||
.SH NAME
|
||||
salt-ssh \- salt-ssh Documentation
|
||||
.
|
||||
|
@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SALT-SYNDIC" "1" "Feb 23, 2018" "2018.3.0" "Salt"
|
||||
.TH "SALT-SYNDIC" "1" "May 09, 2018" "2018.3.1" "Salt"
|
||||
.SH NAME
|
||||
salt-syndic \- salt-syndic Documentation
|
||||
.
|
||||
|
@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SALT-UNITY" "1" "Feb 23, 2018" "2018.3.0" "Salt"
|
||||
.TH "SALT-UNITY" "1" "May 09, 2018" "2018.3.1" "Salt"
|
||||
.SH NAME
|
||||
salt-unity \- salt-unity Command
|
||||
.
|
||||
|
@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SALT" "1" "Feb 23, 2018" "2018.3.0" "Salt"
|
||||
.TH "SALT" "1" "May 09, 2018" "2018.3.1" "Salt"
|
||||
.SH NAME
|
||||
salt \- salt
|
||||
.
|
||||
|
12722
doc/man/salt.7
12722
doc/man/salt.7
File diff suppressed because it is too large
Load Diff
@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SPM" "1" "Feb 23, 2018" "2018.3.0" "Salt"
|
||||
.TH "SPM" "1" "May 09, 2018" "2018.3.1" "Salt"
|
||||
.SH NAME
|
||||
spm \- Salt Package Manager Command
|
||||
.
|
||||
|
@ -125,7 +125,6 @@ state modules
|
||||
influxdb_database
|
||||
influxdb_retention_policy
|
||||
influxdb_user
|
||||
infoblox
|
||||
infoblox_a
|
||||
infoblox_cname
|
||||
infoblox_host_record
|
||||
|
@ -1,5 +0,0 @@
|
||||
salt.states.infoblox module
|
||||
===========================
|
||||
|
||||
.. automodule:: salt.states.infoblox
|
||||
:members:
|
@ -410,10 +410,11 @@ exactly like the ``require`` requisite (the watching state will execute if
|
||||
service.running:
|
||||
- watch_any:
|
||||
- file: /etc/apache2/sites-available/site1.conf
|
||||
- file: /etc/apache2/sites-available/site2.conf
|
||||
- file: apache2-site2
|
||||
file.managed:
|
||||
- name: /etc/apache2/sites-available/site1.conf
|
||||
- source: salt://apache2/files/site1.conf
|
||||
apache2-site2:
|
||||
file.managed:
|
||||
- name: /etc/apache2/sites-available/site2.conf
|
||||
- source: salt://apache2/files/site2.conf
|
||||
|
@ -351,6 +351,7 @@ This driver can be configured using the ``/etc/openstack/clouds.yml`` file with
|
||||
`os-client-config <https://docs.openstack.org/os-client-config/latest/>`
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
myopenstack:
|
||||
driver: openstack
|
||||
region_name: RegionOne
|
||||
@ -359,6 +360,7 @@ This driver can be configured using the ``/etc/openstack/clouds.yml`` file with
|
||||
Or by just configuring the same auth block directly in the cloud provider config.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
myopenstack:
|
||||
driver: openstack
|
||||
region_name: RegionOne
|
||||
|
@ -699,15 +699,24 @@ repository to be served up from the Salt fileserver path
|
||||
Mountpoints can also be configured on a :ref:`per-remote basis
|
||||
<gitfs-per-remote-config>`.
|
||||
|
||||
|
||||
Using gitfs in Masterless Mode
|
||||
==============================
|
||||
|
||||
Since 2014.7.0, gitfs can be used in masterless mode. To do so, simply add the
|
||||
gitfs configuration parameters (and set :conf_master:`fileserver_backend`) in
|
||||
the _minion_ config file instead of the master config file.
|
||||
|
||||
|
||||
Using gitfs Alongside Other Backends
|
||||
====================================
|
||||
|
||||
Sometimes it may make sense to use multiple backends; for instance, if ``sls``
|
||||
files are stored in git but larger files are stored directly on the master.
|
||||
|
||||
The cascading lookup logic used for multiple remotes is also used with
|
||||
multiple backends. If the ``fileserver_backend`` option contains
|
||||
multiple backends:
|
||||
The cascading lookup logic used for multiple remotes is also used with multiple
|
||||
backends. If the :conf_master:`fileserver_backend` option contains multiple
|
||||
backends:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
@ -719,7 +728,6 @@ Then the ``roots`` backend (the default backend of files in ``/srv/salt``) will
|
||||
be searched first for the requested file; then, if it is not found on the
|
||||
master, each configured git remote will be searched.
|
||||
|
||||
|
||||
Branches, Environments, and Top Files
|
||||
=====================================
|
||||
|
||||
|
@ -22,6 +22,7 @@ BASE_THORIUM_ROOTS_DIR = os.path.join(SRV_ROOT_DIR, 'thorium')
|
||||
BASE_MASTER_ROOTS_DIR = os.path.join(SRV_ROOT_DIR, 'salt-master')
|
||||
LOGS_DIR = os.path.join(ROOT_DIR, 'var', 'log', 'salt')
|
||||
PIDFILE_DIR = os.path.join(ROOT_DIR, 'var', 'run')
|
||||
SPM_FORMULA_PATH = os.path.join(ROOT_DIR, 'spm', 'salt')
|
||||
SPM_PILLAR_PATH = os.path.join(ROOT_DIR, 'spm', 'pillar')
|
||||
SPM_REACTOR_PATH = os.path.join(ROOT_DIR, 'spm', 'reactor')
|
||||
SPM_PARENT_PATH = os.path.join(ROOT_DIR, 'spm')
|
||||
SPM_FORMULA_PATH = os.path.join(SPM_PARENT_PATH, 'salt')
|
||||
SPM_PILLAR_PATH = os.path.join(SPM_PARENT_PATH, 'pillar')
|
||||
SPM_REACTOR_PATH = os.path.join(SPM_PARENT_PATH, 'reactor')
|
||||
|
@ -1916,7 +1916,8 @@ class Map(Cloud):
|
||||
pmap = self.map_providers_parallel(cached=cached)
|
||||
exist = set()
|
||||
defined = set()
|
||||
for profile_name, nodes in six.iteritems(self.rendered_map):
|
||||
rendered_map = copy.deepcopy(self.rendered_map)
|
||||
for profile_name, nodes in six.iteritems(rendered_map):
|
||||
if profile_name not in self.opts['profiles']:
|
||||
msg = (
|
||||
'The required profile, \'{0}\', defined in the map '
|
||||
@ -1934,21 +1935,23 @@ class Map(Cloud):
|
||||
|
||||
profile_data = self.opts['profiles'].get(profile_name)
|
||||
|
||||
# Get associated provider data, in case something like size
|
||||
# or image is specified in the provider file. See issue #32510.
|
||||
alias, driver = profile_data.get('provider').split(':')
|
||||
provider_details = self.opts['providers'][alias][driver].copy()
|
||||
del provider_details['profiles']
|
||||
|
||||
# Update the provider details information with profile data
|
||||
# Profile data should override provider data, if defined.
|
||||
# This keeps map file data definitions consistent with -p usage.
|
||||
provider_details.update(profile_data)
|
||||
profile_data = provider_details
|
||||
|
||||
for nodename, overrides in six.iteritems(nodes):
|
||||
# Get the VM name
|
||||
nodedata = copy.deepcopy(profile_data)
|
||||
# Get associated provider data, in case something like size
|
||||
# or image is specified in the provider file. See issue #32510.
|
||||
if 'provider' in overrides and overrides['provider'] != profile_data['provider']:
|
||||
alias, driver = overrides.get('provider').split(':')
|
||||
else:
|
||||
alias, driver = profile_data.get('provider').split(':')
|
||||
|
||||
provider_details = copy.deepcopy(self.opts['providers'][alias][driver])
|
||||
del provider_details['profiles']
|
||||
|
||||
# Update the provider details information with profile data
|
||||
# Profile data and node overrides should override provider data, if defined.
|
||||
# This keeps map file data definitions consistent with -p usage.
|
||||
salt.utils.dictupdate.update(provider_details, profile_data)
|
||||
nodedata = copy.deepcopy(provider_details)
|
||||
|
||||
# Update profile data with the map overrides
|
||||
for setting in ('grains', 'master', 'minion', 'volumes',
|
||||
'requires'):
|
||||
|
@ -1914,15 +1914,15 @@ DEFAULT_API_OPTS = {
|
||||
DEFAULT_SPM_OPTS = {
|
||||
# ----- Salt master settings overridden by SPM --------------------->
|
||||
'spm_conf_file': os.path.join(salt.syspaths.CONFIG_DIR, 'spm'),
|
||||
'formula_path': '/srv/spm/salt',
|
||||
'pillar_path': '/srv/spm/pillar',
|
||||
'reactor_path': '/srv/spm/reactor',
|
||||
'formula_path': salt.syspaths.SPM_FORMULA_PATH,
|
||||
'pillar_path': salt.syspaths.SPM_PILLAR_PATH,
|
||||
'reactor_path': salt.syspaths.SPM_REACTOR_PATH,
|
||||
'spm_logfile': os.path.join(salt.syspaths.LOGS_DIR, 'spm'),
|
||||
'spm_default_include': 'spm.d/*.conf',
|
||||
# spm_repos_config also includes a .d/ directory
|
||||
'spm_repos_config': '/etc/salt/spm.repos',
|
||||
'spm_cache_dir': os.path.join(salt.syspaths.CACHE_DIR, 'spm'),
|
||||
'spm_build_dir': '/srv/spm_build',
|
||||
'spm_build_dir': os.path.join(salt.syspaths.SRV_ROOT_DIR, 'spm_build'),
|
||||
'spm_build_exclude': ['CVS', '.hg', '.git', '.svn'],
|
||||
'spm_db': os.path.join(salt.syspaths.CACHE_DIR, 'spm', 'packages.db'),
|
||||
'cache': 'localfs',
|
||||
|
@ -5,7 +5,12 @@
|
||||
|
||||
.. versionadded:: 0.17.0
|
||||
|
||||
This module provides a `Sentry`_ logging handler.
|
||||
This module provides a `Sentry`_ logging handler. Sentry is an open source
|
||||
error tracking platform that provides deep context about exceptions that
|
||||
happen in production. Details about stack traces along with the context
|
||||
variables available at the time of the exeption are easily browsable and
|
||||
filterable from the online interface. For more details please see
|
||||
`Sentry`_.
|
||||
|
||||
.. admonition:: Note
|
||||
|
||||
@ -41,6 +46,11 @@
|
||||
- cpuarch
|
||||
- ec2.tags.environment
|
||||
|
||||
.. admonition:: Note
|
||||
|
||||
The ``public_key`` and ``secret_key`` variables are not supported with
|
||||
Sentry > 3.0. The `DSN`_ key should be used instead.
|
||||
|
||||
All the client configuration keys are supported, please see the
|
||||
`Raven client documentation`_.
|
||||
|
||||
|
@ -551,6 +551,10 @@ def associate_vpc_with_hosted_zone(HostedZoneId=None, Name=None, VPCId=None,
|
||||
r = conn.associate_vpc_with_hosted_zone(**args)
|
||||
return _wait_for_sync(r['ChangeInfo']['Id'], conn)
|
||||
except ClientError as e:
|
||||
if e.response.get('Error', {}).get('Code') == 'ConflictingDomainExists':
|
||||
log.debug('VPC Association already exists.')
|
||||
# return True since the current state is the desired one
|
||||
return True
|
||||
if tries and e.response.get('Error', {}).get('Code') == 'Throttling':
|
||||
log.debug('Throttled by AWS API.')
|
||||
time.sleep(3)
|
||||
|
@ -513,10 +513,18 @@ def _run(cmd,
|
||||
for k, v in six.iteritems(env_runas)
|
||||
)
|
||||
env_runas.update(env)
|
||||
|
||||
# Fix platforms like Solaris that don't set a USER env var in the
|
||||
# user's default environment as obtained above.
|
||||
if env_runas.get('USER') != runas:
|
||||
env_runas['USER'] = runas
|
||||
|
||||
# Fix some corner cases where shelling out to get the user's
|
||||
# environment returns the wrong home directory.
|
||||
runas_home = os.path.expanduser('~{0}'.format(runas))
|
||||
if env_runas.get('HOME') != runas_home:
|
||||
env_runas['HOME'] = runas_home
|
||||
|
||||
env = env_runas
|
||||
except ValueError as exc:
|
||||
log.exception('Error raised retrieving environment for user %s', runas)
|
||||
|
@ -2082,6 +2082,16 @@ def port(name, private_port=None):
|
||||
name
|
||||
Container name or ID
|
||||
|
||||
.. versionchanged:: Fluorine
|
||||
This value can now be a pattern expression (using the
|
||||
pattern-matching characters defined in fnmatch_). If a pattern
|
||||
expression is used, this function will return a dictionary mapping
|
||||
container names which match the pattern to the mappings for those
|
||||
containers. When no pattern expression is used, a dictionary of the
|
||||
mappings for the specified container name will be returned.
|
||||
|
||||
.. _fnmatch: https://docs.python.org/2/library/fnmatch.html
|
||||
|
||||
private_port : None
|
||||
If specified, get information for that specific port. Can be specified
|
||||
either as a port number (i.e. ``5000``), or as a port number plus the
|
||||
@ -2104,12 +2114,10 @@ def port(name, private_port=None):
|
||||
salt myminion docker.port mycontainer 5000
|
||||
salt myminion docker.port mycontainer 5000/udp
|
||||
'''
|
||||
# docker.client.Client.port() doesn't do what we need, so just inspect the
|
||||
# container and get the information from there. It's what they're already
|
||||
# doing (poorly) anyway.
|
||||
mappings = inspect_container(name).get('NetworkSettings', {}).get('Ports', {})
|
||||
if not mappings:
|
||||
return {}
|
||||
pattern_used = bool(re.search(r'[*?\[]', name))
|
||||
names = fnmatch.filter(list_containers(all=True), name) \
|
||||
if pattern_used \
|
||||
else [name]
|
||||
|
||||
if private_port is None:
|
||||
pattern = '*'
|
||||
@ -2132,7 +2140,17 @@ def port(name, private_port=None):
|
||||
except AttributeError:
|
||||
raise SaltInvocationError(err)
|
||||
|
||||
return dict((x, mappings[x]) for x in fnmatch.filter(mappings, pattern))
|
||||
ret = {}
|
||||
for c_name in names:
|
||||
# docker.client.Client.port() doesn't do what we need, so just inspect
|
||||
# the container and get the information from there. It's what they're
|
||||
# already doing (poorly) anyway.
|
||||
mappings = inspect_container(c_name).get(
|
||||
'NetworkSettings', {}).get('Ports', {})
|
||||
ret[c_name] = dict((x, mappings[x])
|
||||
for x in fnmatch.filter(mappings, pattern))
|
||||
|
||||
return ret.get(name, {}) if not pattern_used else ret
|
||||
|
||||
|
||||
def ps_(filters=None, **kwargs):
|
||||
@ -3210,6 +3228,7 @@ def run_container(image,
|
||||
CLI Examples:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt myminion docker.run_container myuser/myimage command=/usr/local/bin/myscript.sh
|
||||
# Run container in the background
|
||||
salt myminion docker.run_container myuser/myimage command=/usr/local/bin/myscript.sh bg=True
|
||||
|
@ -5133,7 +5133,7 @@ def manage_file(name,
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' file.manage_file /etc/httpd/conf.d/httpd.conf '' '{}' salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root root '755' base ''
|
||||
salt '*' file.manage_file /etc/httpd/conf.d/httpd.conf '' '{}' salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root root '755' '' base ''
|
||||
|
||||
.. versionchanged:: 2014.7.0
|
||||
``follow_symlinks`` option added
|
||||
|
@ -9,10 +9,12 @@ Glance module for interacting with OpenStack Glance
|
||||
Example configuration
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
glance:
|
||||
cloud: default
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
glance:
|
||||
auth:
|
||||
username: admin
|
||||
|
@ -9,10 +9,12 @@ Keystone module for interacting with OpenStack Keystone
|
||||
Example configuration
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
keystone:
|
||||
cloud: default
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
keystone:
|
||||
auth:
|
||||
username: admin
|
||||
|
@ -9,10 +9,12 @@ Neutron module for interacting with OpenStack Neutron
|
||||
Example configuration
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
neutron:
|
||||
cloud: default
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
neutron:
|
||||
auth:
|
||||
username: admin
|
||||
|
@ -460,13 +460,10 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
|
||||
Path to requirements
|
||||
|
||||
bin_env
|
||||
Path to pip bin or path to virtualenv. If doing a system install,
|
||||
and want to use a specific pip bin (pip-2.7, pip-2.6, etc..) just
|
||||
specify the pip bin you want.
|
||||
|
||||
.. note::
|
||||
If installing into a virtualenv, just use the path to the
|
||||
virtualenv (e.g. ``/home/code/path/to/virtualenv/``)
|
||||
Path to pip (or to a virtualenv). This can be used to specify the path
|
||||
to the pip to use when more than one Python release is installed (e.g.
|
||||
``/usr/bin/pip-2.7`` or ``/usr/bin/pip-2.6``. If a directory path is
|
||||
specified, it is assumed to be a virtualenv.
|
||||
|
||||
use_wheel
|
||||
Prefer wheel archives (requires pip>=1.4)
|
||||
@ -569,7 +566,7 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
|
||||
The user under which to run pip
|
||||
|
||||
cwd
|
||||
Current working directory to run pip from
|
||||
Directory from which to run pip
|
||||
|
||||
pre_releases
|
||||
Include pre-releases in the available versions
|
||||
@ -941,36 +938,38 @@ def uninstall(pkgs=None,
|
||||
saltenv='base',
|
||||
use_vt=False):
|
||||
'''
|
||||
Uninstall packages with pip
|
||||
|
||||
Uninstall packages individually or from a pip requirements file. Uninstall
|
||||
packages globally or from a virtualenv.
|
||||
Uninstall packages individually or from a pip requirements file
|
||||
|
||||
pkgs
|
||||
comma separated list of packages to install
|
||||
|
||||
requirements
|
||||
path to requirements.
|
||||
Path to requirements file
|
||||
|
||||
bin_env
|
||||
path to pip bin or path to virtualenv. If doing an uninstall from
|
||||
the system python and want to use a specific pip bin (pip-2.7,
|
||||
pip-2.6, etc..) just specify the pip bin you want.
|
||||
If uninstalling from a virtualenv, just use the path to the virtualenv
|
||||
(/home/code/path/to/virtualenv/)
|
||||
Path to pip (or to a virtualenv). This can be used to specify the path
|
||||
to the pip to use when more than one Python release is installed (e.g.
|
||||
``/usr/bin/pip-2.7`` or ``/usr/bin/pip-2.6``. If a directory path is
|
||||
specified, it is assumed to be a virtualenv.
|
||||
|
||||
log
|
||||
Log file where a complete (maximum verbosity) record will be kept
|
||||
|
||||
proxy
|
||||
Specify a proxy in the form
|
||||
user:passwd@proxy.server:port. Note that the
|
||||
user:password@ is optional and required only if you
|
||||
are behind an authenticated proxy. If you provide
|
||||
user@proxy.server:port then you will be prompted for a
|
||||
password.
|
||||
Specify a proxy in the format ``user:passwd@proxy.server:port``. Note
|
||||
that the ``user:password@`` is optional and required only if you are
|
||||
behind an authenticated proxy. If you provide
|
||||
``user@proxy.server:port`` then you will be prompted for a password.
|
||||
|
||||
timeout
|
||||
Set the socket timeout (default 15 seconds)
|
||||
|
||||
user
|
||||
The user under which to run pip
|
||||
|
||||
cwd
|
||||
Current working directory to run pip from
|
||||
Directory from which to run pip
|
||||
|
||||
use_vt
|
||||
Use VT terminal emulation (see output while installing)
|
||||
|
||||
@ -982,7 +981,6 @@ def uninstall(pkgs=None,
|
||||
salt '*' pip.uninstall requirements=/path/to/requirements.txt
|
||||
salt '*' pip.uninstall <package name> bin_env=/path/to/virtualenv
|
||||
salt '*' pip.uninstall <package name> bin_env=/path/to/pip_bin
|
||||
|
||||
'''
|
||||
cmd = _get_pip_bin(bin_env)
|
||||
cmd.extend(['uninstall', '-y'])
|
||||
@ -1065,32 +1063,27 @@ def freeze(bin_env=None,
|
||||
virtualenv
|
||||
|
||||
bin_env
|
||||
path to pip bin or path to virtualenv. If doing an uninstall from
|
||||
the system python and want to use a specific pip bin (pip-2.7,
|
||||
pip-2.6, etc..) just specify the pip bin you want.
|
||||
If uninstalling from a virtualenv, just use the path to the virtualenv
|
||||
(/home/code/path/to/virtualenv/)
|
||||
Path to pip (or to a virtualenv). This can be used to specify the path
|
||||
to the pip to use when more than one Python release is installed (e.g.
|
||||
``/usr/bin/pip-2.7`` or ``/usr/bin/pip-2.6``. If a directory path is
|
||||
specified, it is assumed to be a virtualenv.
|
||||
|
||||
user
|
||||
The user under which to run pip
|
||||
|
||||
cwd
|
||||
Current working directory to run pip from
|
||||
Directory from which to run pip
|
||||
|
||||
.. note::
|
||||
|
||||
If the version of pip available is older than 8.0.3, the list will not
|
||||
include the packages pip, wheel, setuptools, or distribute even if they
|
||||
are installed.
|
||||
include the packages ``pip``, ``wheel``, ``setuptools``, or
|
||||
``distribute`` even if they are installed.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' pip.freeze /home/code/path/to/virtualenv/
|
||||
|
||||
.. versionchanged:: 2016.11.2
|
||||
|
||||
The packages pip, wheel, setuptools, and distribute are included if the
|
||||
installed pip is new enough.
|
||||
salt '*' pip.freeze bin_env=/home/code/path/to/virtualenv
|
||||
'''
|
||||
cmd = _get_pip_bin(bin_env)
|
||||
cmd.append('freeze')
|
||||
@ -1135,21 +1128,16 @@ def list_(prefix=None,
|
||||
.. note::
|
||||
|
||||
If the version of pip available is older than 8.0.3, the packages
|
||||
wheel, setuptools, and distribute will not be reported by this function
|
||||
even if they are installed. Unlike
|
||||
:py:func:`pip.freeze <salt.modules.pip.freeze>`, this function always
|
||||
reports the version of pip which is installed.
|
||||
``wheel``, ``setuptools``, and ``distribute`` will not be reported by
|
||||
this function even if they are installed. Unlike :py:func:`pip.freeze
|
||||
<salt.modules.pip.freeze>`, this function always reports the version of
|
||||
pip which is installed.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' pip.list salt
|
||||
|
||||
.. versionchanged:: 2016.11.2
|
||||
|
||||
The packages wheel, setuptools, and distribute are included if the
|
||||
installed pip is new enough.
|
||||
'''
|
||||
packages = {}
|
||||
|
||||
@ -1458,9 +1446,10 @@ def list_all_versions(pkg,
|
||||
The package to check
|
||||
|
||||
bin_env
|
||||
Path to pip bin or path to virtualenv. If doing a system install,
|
||||
and want to use a specific pip bin (pip-2.7, pip-2.6, etc..) just
|
||||
specify the pip bin you want.
|
||||
Path to pip (or to a virtualenv). This can be used to specify the path
|
||||
to the pip to use when more than one Python release is installed (e.g.
|
||||
``/usr/bin/pip-2.7`` or ``/usr/bin/pip-2.6``. If a directory path is
|
||||
specified, it is assumed to be a virtualenv.
|
||||
|
||||
include_alpha
|
||||
Include alpha versions in the list
|
||||
@ -1475,7 +1464,7 @@ def list_all_versions(pkg,
|
||||
The user under which to run pip
|
||||
|
||||
cwd
|
||||
Current working directory to run pip from
|
||||
Directory from which to run pip
|
||||
|
||||
index_url
|
||||
Base URL of Python Package Index
|
||||
|
@ -995,7 +995,6 @@ def _role_cmd_args(name,
|
||||
connlimit=None,
|
||||
inherit=None,
|
||||
createdb=None,
|
||||
createuser=None,
|
||||
createroles=None,
|
||||
superuser=None,
|
||||
groups=None,
|
||||
@ -1003,8 +1002,6 @@ def _role_cmd_args(name,
|
||||
rolepassword=None,
|
||||
valid_until=None,
|
||||
db_role=None):
|
||||
if createuser is not None and superuser is None:
|
||||
superuser = createuser
|
||||
if inherit is None:
|
||||
if typ_ in ['user', 'group']:
|
||||
inherit = True
|
||||
@ -1088,7 +1085,6 @@ def _role_create(name,
|
||||
password=None,
|
||||
createdb=None,
|
||||
createroles=None,
|
||||
createuser=None,
|
||||
encrypted=None,
|
||||
superuser=None,
|
||||
login=None,
|
||||
@ -1121,7 +1117,6 @@ def _role_create(name,
|
||||
inherit=inherit,
|
||||
createdb=createdb,
|
||||
createroles=createroles,
|
||||
createuser=createuser,
|
||||
superuser=superuser,
|
||||
groups=groups,
|
||||
replication=replication,
|
||||
@ -1143,7 +1138,6 @@ def user_create(username,
|
||||
maintenance_db=None,
|
||||
password=None,
|
||||
createdb=None,
|
||||
createuser=None,
|
||||
createroles=None,
|
||||
inherit=None,
|
||||
login=None,
|
||||
@ -1174,7 +1168,6 @@ def user_create(username,
|
||||
maintenance_db=maintenance_db,
|
||||
password=password,
|
||||
createdb=createdb,
|
||||
createuser=createuser,
|
||||
createroles=createroles,
|
||||
inherit=inherit,
|
||||
login=login,
|
||||
@ -1195,7 +1188,6 @@ def _role_update(name,
|
||||
maintenance_db=None,
|
||||
password=None,
|
||||
createdb=None,
|
||||
createuser=None,
|
||||
typ_='role',
|
||||
createroles=None,
|
||||
inherit=None,
|
||||
@ -1235,7 +1227,6 @@ def _role_update(name,
|
||||
connlimit=connlimit,
|
||||
inherit=inherit,
|
||||
createdb=createdb,
|
||||
createuser=createuser,
|
||||
createroles=createroles,
|
||||
superuser=superuser,
|
||||
groups=groups,
|
||||
@ -1259,7 +1250,6 @@ def user_update(username,
|
||||
maintenance_db=None,
|
||||
password=None,
|
||||
createdb=None,
|
||||
createuser=None,
|
||||
createroles=None,
|
||||
encrypted=None,
|
||||
superuser=None,
|
||||
@ -1293,7 +1283,6 @@ def user_update(username,
|
||||
login=login,
|
||||
connlimit=connlimit,
|
||||
createdb=createdb,
|
||||
createuser=createuser,
|
||||
createroles=createroles,
|
||||
encrypted=encrypted,
|
||||
superuser=superuser,
|
||||
@ -1740,7 +1729,6 @@ def group_create(groupname,
|
||||
maintenance_db=None,
|
||||
password=None,
|
||||
createdb=None,
|
||||
createuser=None,
|
||||
createroles=None,
|
||||
encrypted=None,
|
||||
login=None,
|
||||
@ -1771,7 +1759,6 @@ def group_create(groupname,
|
||||
password=password,
|
||||
createdb=createdb,
|
||||
createroles=createroles,
|
||||
createuser=createuser,
|
||||
encrypted=encrypted,
|
||||
login=login,
|
||||
inherit=inherit,
|
||||
@ -1790,7 +1777,6 @@ def group_update(groupname,
|
||||
password=None,
|
||||
createdb=None,
|
||||
createroles=None,
|
||||
createuser=None,
|
||||
encrypted=None,
|
||||
inherit=None,
|
||||
login=None,
|
||||
@ -1819,7 +1805,6 @@ def group_update(groupname,
|
||||
createdb=createdb,
|
||||
typ_='group',
|
||||
createroles=createroles,
|
||||
createuser=createuser,
|
||||
encrypted=encrypted,
|
||||
login=login,
|
||||
inherit=inherit,
|
||||
|
@ -453,7 +453,7 @@ def diff(package, path):
|
||||
return res
|
||||
|
||||
|
||||
def info(*packages, **attr):
|
||||
def info(*packages, **kwargs):
|
||||
'''
|
||||
Return a detailed package(s) summary information.
|
||||
If no packages specified, all packages will be returned.
|
||||
@ -467,6 +467,9 @@ def info(*packages, **attr):
|
||||
version, vendor, release, build_date, build_date_time_t, install_date, install_date_time_t,
|
||||
build_host, group, source_rpm, arch, epoch, size, license, signature, packager, url, summary, description.
|
||||
|
||||
:param all_versions:
|
||||
Return information for all installed versions of the packages
|
||||
|
||||
:return:
|
||||
|
||||
CLI example:
|
||||
@ -476,7 +479,9 @@ def info(*packages, **attr):
|
||||
salt '*' lowpkg.info apache2 bash
|
||||
salt '*' lowpkg.info apache2 bash attr=version
|
||||
salt '*' lowpkg.info apache2 bash attr=version,build_date_iso,size
|
||||
salt '*' lowpkg.info apache2 bash attr=version,build_date_iso,size all_versions=True
|
||||
'''
|
||||
all_versions = kwargs.get('all_versions', False)
|
||||
# LONGSIZE is not a valid tag for all versions of rpm. If LONGSIZE isn't
|
||||
# available, then we can just use SIZE for older versions. See Issue #31366.
|
||||
rpm_tags = __salt__['cmd.run_stdout'](
|
||||
@ -516,7 +521,7 @@ def info(*packages, **attr):
|
||||
"edition": "edition: %|EPOCH?{%{EPOCH}:}|%{VERSION}-%{RELEASE}\\n",
|
||||
}
|
||||
|
||||
attr = attr.get('attr', None) and attr['attr'].split(",") or None
|
||||
attr = kwargs.get('attr', None) and kwargs['attr'].split(",") or None
|
||||
query = list()
|
||||
if attr:
|
||||
for attr_k in attr:
|
||||
@ -610,8 +615,13 @@ def info(*packages, **attr):
|
||||
if pkg_name.startswith('gpg-pubkey'):
|
||||
continue
|
||||
if pkg_name not in ret:
|
||||
ret[pkg_name] = pkg_data.copy()
|
||||
del ret[pkg_name]['edition']
|
||||
if all_versions:
|
||||
ret[pkg_name] = [pkg_data.copy()]
|
||||
else:
|
||||
ret[pkg_name] = pkg_data.copy()
|
||||
del ret[pkg_name]['edition']
|
||||
elif all_versions:
|
||||
ret[pkg_name].append(pkg_data.copy())
|
||||
|
||||
return ret
|
||||
|
||||
|
@ -989,7 +989,7 @@ def diskusage(*args):
|
||||
elif __grains__['kernel'] in ('FreeBSD', 'SunOS'):
|
||||
ifile = __salt__['cmd.run']('mount -p').splitlines()
|
||||
else:
|
||||
ifile = []
|
||||
raise CommandExecutionError('status.diskusage not yet supported on this platform')
|
||||
|
||||
for line in ifile:
|
||||
comps = line.split()
|
||||
|
@ -331,6 +331,7 @@ def version(*names, **kwargs):
|
||||
dict: The package name(s) with the installed versions.
|
||||
|
||||
.. code-block:: cfg
|
||||
|
||||
{['<version>', '<version>', ]} OR
|
||||
{'<package name>': ['<version>', '<version>', ]}
|
||||
|
||||
|
@ -1009,31 +1009,39 @@ def list_downloaded():
|
||||
return ret
|
||||
|
||||
|
||||
def info_installed(*names):
|
||||
def info_installed(*names, **kwargs):
|
||||
'''
|
||||
.. versionadded:: 2015.8.1
|
||||
|
||||
Return the information of the named package(s), installed on the system.
|
||||
|
||||
:param all_versions:
|
||||
Include information for all versions of the packages installed on the minion.
|
||||
|
||||
CLI example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' pkg.info_installed <package1>
|
||||
salt '*' pkg.info_installed <package1> <package2> <package3> ...
|
||||
salt '*' pkg.info_installed <package1> <package2> <package3> all_versions=True
|
||||
'''
|
||||
all_versions = kwargs.get('all_versions', False)
|
||||
ret = dict()
|
||||
for pkg_name, pkg_nfo in __salt__['lowpkg.info'](*names).items():
|
||||
t_nfo = dict()
|
||||
# Translate dpkg-specific keys to a common structure
|
||||
for key, value in pkg_nfo.items():
|
||||
if key == 'source_rpm':
|
||||
t_nfo['source'] = value
|
||||
for pkg_name, pkgs_nfo in __salt__['lowpkg.info'](*names, **kwargs).items():
|
||||
pkg_nfo = pkgs_nfo if all_versions else [pkgs_nfo]
|
||||
for _nfo in pkg_nfo:
|
||||
t_nfo = dict()
|
||||
# Translate dpkg-specific keys to a common structure
|
||||
for key, value in _nfo.items():
|
||||
if key == 'source_rpm':
|
||||
t_nfo['source'] = value
|
||||
else:
|
||||
t_nfo[key] = value
|
||||
if not all_versions:
|
||||
ret[pkg_name] = t_nfo
|
||||
else:
|
||||
t_nfo[key] = value
|
||||
|
||||
ret[pkg_name] = t_nfo
|
||||
|
||||
ret.setdefault(pkg_name, []).append(t_nfo)
|
||||
return ret
|
||||
|
||||
|
||||
@ -1957,7 +1965,24 @@ def remove(name=None, pkgs=None, **kwargs): # pylint: disable=W0613
|
||||
raise CommandExecutionError(exc)
|
||||
|
||||
old = list_pkgs()
|
||||
targets = [x for x in pkg_params if x in old]
|
||||
targets = []
|
||||
for target in pkg_params:
|
||||
# Check if package version set to be removed is actually installed:
|
||||
# old[target] contains a comma-separated list of installed versions
|
||||
if target in old and not pkg_params[target]:
|
||||
targets.append(target)
|
||||
elif target in old and pkg_params[target] in old[target].split(','):
|
||||
arch = ''
|
||||
pkgname = target
|
||||
try:
|
||||
namepart, archpart = target.rsplit('.', 1)
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
if archpart in salt.utils.pkg.rpm.ARCHES:
|
||||
arch = '.' + archpart
|
||||
pkgname = namepart
|
||||
targets.append('{0}-{1}{2}'.format(pkgname, pkg_params[target], arch))
|
||||
if not targets:
|
||||
return {}
|
||||
|
||||
|
@ -470,28 +470,37 @@ def info_installed(*names, **kwargs):
|
||||
Valid attributes are:
|
||||
ignore, report
|
||||
|
||||
:param all_versions:
|
||||
Include information for all versions of the packages installed on the minion.
|
||||
|
||||
CLI example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' pkg.info_installed <package1>
|
||||
salt '*' pkg.info_installed <package1> <package2> <package3> ...
|
||||
salt '*' pkg.info_installed <package1> attr=version,vendor
|
||||
salt '*' pkg.info_installed <package1> <package2> <package3> all_versions=True
|
||||
salt '*' pkg.info_installed <package1> attr=version,vendor all_versions=True
|
||||
salt '*' pkg.info_installed <package1> <package2> <package3> ... attr=version,vendor
|
||||
salt '*' pkg.info_installed <package1> <package2> <package3> ... attr=version,vendor errors=ignore
|
||||
salt '*' pkg.info_installed <package1> <package2> <package3> ... attr=version,vendor errors=report
|
||||
'''
|
||||
all_versions = kwargs.get('all_versions', False)
|
||||
ret = dict()
|
||||
for pkg_name, pkg_nfo in __salt__['lowpkg.info'](*names, **kwargs).items():
|
||||
t_nfo = dict()
|
||||
# Translate dpkg-specific keys to a common structure
|
||||
for key, value in six.iteritems(pkg_nfo):
|
||||
if key == 'source_rpm':
|
||||
t_nfo['source'] = value
|
||||
for pkg_name, pkgs_nfo in __salt__['lowpkg.info'](*names, **kwargs).items():
|
||||
pkg_nfo = pkgs_nfo if all_versions else [pkgs_nfo]
|
||||
for _nfo in pkg_nfo:
|
||||
t_nfo = dict()
|
||||
# Translate dpkg-specific keys to a common structure
|
||||
for key, value in six.iteritems(_nfo):
|
||||
if key == 'source_rpm':
|
||||
t_nfo['source'] = value
|
||||
else:
|
||||
t_nfo[key] = value
|
||||
if not all_versions:
|
||||
ret[pkg_name] = t_nfo
|
||||
else:
|
||||
t_nfo[key] = value
|
||||
ret[pkg_name] = t_nfo
|
||||
|
||||
ret.setdefault(pkg_name, []).append(t_nfo)
|
||||
return ret
|
||||
|
||||
|
||||
@ -1494,7 +1503,14 @@ def _uninstall(name=None, pkgs=None):
|
||||
raise CommandExecutionError(exc)
|
||||
|
||||
old = list_pkgs()
|
||||
targets = [target for target in pkg_params if target in old]
|
||||
targets = []
|
||||
for target in pkg_params:
|
||||
# Check if package version set to be removed is actually installed:
|
||||
# old[target] contains a comma-separated list of installed versions
|
||||
if target in old and pkg_params[target] in old[target].split(','):
|
||||
targets.append(target + "-" + pkg_params[target])
|
||||
elif target in old and not pkg_params[target]:
|
||||
targets.append(target)
|
||||
if not targets:
|
||||
return {}
|
||||
|
||||
@ -1517,6 +1533,32 @@ def _uninstall(name=None, pkgs=None):
|
||||
return ret
|
||||
|
||||
|
||||
def normalize_name(name):
|
||||
'''
|
||||
Strips the architecture from the specified package name, if necessary.
|
||||
Circumstances where this would be done include:
|
||||
|
||||
* If the arch is 32 bit and the package name ends in a 32-bit arch.
|
||||
* If the arch matches the OS arch, or is ``noarch``.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' pkg.normalize_name zsh.x86_64
|
||||
'''
|
||||
try:
|
||||
arch = name.rsplit('.', 1)[-1]
|
||||
if arch not in salt.utils.pkg.rpm.ARCHES + ('noarch',):
|
||||
return name
|
||||
except ValueError:
|
||||
return name
|
||||
if arch in (__grains__['osarch'], 'noarch') \
|
||||
or salt.utils.pkg.rpm.check_32(arch, osarch=__grains__['osarch']):
|
||||
return name[:-(len(arch) + 1)]
|
||||
return name
|
||||
|
||||
|
||||
def remove(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
|
||||
|
@ -236,16 +236,7 @@ def get_grains():
|
||||
'''
|
||||
Retrieve facts from the network device.
|
||||
'''
|
||||
refresh_needed = False
|
||||
refresh_needed = refresh_needed or (not DETAILS.get('grains_cache', {}))
|
||||
refresh_needed = refresh_needed or (not DETAILS.get('grains_cache', {}).get('result', False))
|
||||
refresh_needed = refresh_needed or (not DETAILS.get('grains_cache', {}).get('out', {}))
|
||||
|
||||
if refresh_needed:
|
||||
facts = call('get_facts', **{})
|
||||
DETAILS['grains_cache'] = facts
|
||||
|
||||
return DETAILS.get('grains_cache', {})
|
||||
return call('get_facts', **{})
|
||||
|
||||
|
||||
def grains_refresh():
|
||||
|
@ -444,7 +444,10 @@ def clean_old_jobs():
|
||||
hours_difference = (time.time() - jid_ctime) / 3600.0
|
||||
if hours_difference > __opts__['keep_jobs'] and os.path.exists(t_path):
|
||||
# Remove the entire f_path from the original JID dir
|
||||
shutil.rmtree(f_path)
|
||||
try:
|
||||
shutil.rmtree(f_path)
|
||||
except OSError as err:
|
||||
log.error('Unable to remove %s: %s', t_path, err)
|
||||
|
||||
# Remove empty JID dirs from job cache, if they're old enough.
|
||||
# JID dirs may be empty either from a previous cache-clean with the bug
|
||||
|
@ -11,6 +11,7 @@
|
||||
|
||||
from __future__ import absolute_import, print_function, unicode_literals
|
||||
import datetime
|
||||
import logging
|
||||
|
||||
import yaml
|
||||
from yaml.constructor import ConstructorError
|
||||
@ -22,6 +23,8 @@ from salt.utils.odict import OrderedDict
|
||||
|
||||
__all__ = ['deserialize', 'serialize', 'available']
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
available = True
|
||||
|
||||
# prefer C bindings over python when available
|
||||
@ -46,14 +49,17 @@ def deserialize(stream_or_string, **options):
|
||||
try:
|
||||
return yaml.load(stream_or_string, **options)
|
||||
except ScannerError as error:
|
||||
log.exception('Error encountered while deserializing')
|
||||
err_type = ERROR_MAP.get(error.problem, 'Unknown yaml render error')
|
||||
line_num = error.problem_mark.line + 1
|
||||
raise DeserializationError(err_type,
|
||||
line_num,
|
||||
error.problem_mark.buffer)
|
||||
except ConstructorError as error:
|
||||
log.exception('Error encountered while deserializing')
|
||||
raise DeserializationError(error)
|
||||
except Exception as error:
|
||||
log.exception('Error encountered while deserializing')
|
||||
raise DeserializationError(error)
|
||||
|
||||
|
||||
@ -74,6 +80,7 @@ def serialize(obj, **options):
|
||||
return response[:-1]
|
||||
return response
|
||||
except Exception as error:
|
||||
log.exception('Error encountered while serializing')
|
||||
raise SerializationError(error)
|
||||
|
||||
|
||||
@ -108,7 +115,6 @@ Loader.add_multi_constructor('tag:yaml.org,2002:set', Loader.construct_yaml_set)
|
||||
Loader.add_multi_constructor('tag:yaml.org,2002:str', Loader.construct_yaml_str)
|
||||
Loader.add_multi_constructor('tag:yaml.org,2002:seq', Loader.construct_yaml_seq)
|
||||
Loader.add_multi_constructor('tag:yaml.org,2002:map', Loader.construct_yaml_map)
|
||||
Loader.add_multi_constructor(None, Loader.construct_undefined)
|
||||
|
||||
|
||||
class Dumper(BaseDumper): # pylint: disable=W0232
|
||||
|
@ -150,14 +150,17 @@ def deserialize(stream_or_string, **options):
|
||||
try:
|
||||
return yaml.load(stream_or_string, **options)
|
||||
except ScannerError as error:
|
||||
log.exception('Error encountered while deserializing')
|
||||
err_type = ERROR_MAP.get(error.problem, 'Unknown yaml render error')
|
||||
line_num = error.problem_mark.line + 1
|
||||
raise DeserializationError(err_type,
|
||||
line_num,
|
||||
error.problem_mark.buffer)
|
||||
except ConstructorError as error:
|
||||
log.exception('Error encountered while deserializing')
|
||||
raise DeserializationError(error)
|
||||
except Exception as error:
|
||||
log.exception('Error encountered while deserializing')
|
||||
raise DeserializationError(error)
|
||||
|
||||
|
||||
@ -178,6 +181,7 @@ def serialize(obj, **options):
|
||||
return response[:-1]
|
||||
return response
|
||||
except Exception as error:
|
||||
log.exception('Error encountered while serializing')
|
||||
raise SerializationError(error)
|
||||
|
||||
|
||||
@ -322,7 +326,6 @@ Loader.add_multi_constructor('tag:yaml.org,2002:pairs', Loader.construct_yaml_pa
|
||||
Loader.add_multi_constructor('tag:yaml.org,2002:set', Loader.construct_yaml_set)
|
||||
Loader.add_multi_constructor('tag:yaml.org,2002:seq', Loader.construct_yaml_seq)
|
||||
Loader.add_multi_constructor('tag:yaml.org,2002:map', Loader.construct_yaml_map)
|
||||
Loader.add_multi_constructor(None, Loader.construct_undefined)
|
||||
|
||||
|
||||
class SLSMap(OrderedDict):
|
||||
|
@ -3444,43 +3444,45 @@ class BaseHighState(object):
|
||||
'Specified SLS {0} on local filesystem cannot '
|
||||
'be found.'.format(sls)
|
||||
)
|
||||
state = None
|
||||
if not fn_:
|
||||
errors.append(
|
||||
'Specified SLS {0} in saltenv {1} is not '
|
||||
'available on the salt master or through a configured '
|
||||
'fileserver'.format(sls, saltenv)
|
||||
)
|
||||
state = None
|
||||
try:
|
||||
state = compile_template(fn_,
|
||||
self.state.rend,
|
||||
self.state.opts['renderer'],
|
||||
self.state.opts['renderer_blacklist'],
|
||||
self.state.opts['renderer_whitelist'],
|
||||
saltenv,
|
||||
sls,
|
||||
rendered_sls=mods
|
||||
)
|
||||
except SaltRenderError as exc:
|
||||
msg = 'Rendering SLS \'{0}:{1}\' failed: {2}'.format(
|
||||
saltenv, sls, exc
|
||||
)
|
||||
log.critical(msg)
|
||||
errors.append(msg)
|
||||
except Exception as exc:
|
||||
msg = 'Rendering SLS {0} failed, render error: {1}'.format(
|
||||
sls, exc
|
||||
)
|
||||
log.critical(
|
||||
msg,
|
||||
# Show the traceback if the debug logging level is enabled
|
||||
exc_info_on_loglevel=logging.DEBUG
|
||||
)
|
||||
errors.append('{0}\n{1}'.format(msg, traceback.format_exc()))
|
||||
try:
|
||||
mods.add('{0}:{1}'.format(saltenv, sls))
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
try:
|
||||
state = compile_template(fn_,
|
||||
self.state.rend,
|
||||
self.state.opts['renderer'],
|
||||
self.state.opts['renderer_blacklist'],
|
||||
self.state.opts['renderer_whitelist'],
|
||||
saltenv,
|
||||
sls,
|
||||
rendered_sls=mods
|
||||
)
|
||||
except SaltRenderError as exc:
|
||||
msg = 'Rendering SLS \'{0}:{1}\' failed: {2}'.format(
|
||||
saltenv, sls, exc
|
||||
)
|
||||
log.critical(msg)
|
||||
errors.append(msg)
|
||||
except Exception as exc:
|
||||
msg = 'Rendering SLS {0} failed, render error: {1}'.format(
|
||||
sls, exc
|
||||
)
|
||||
log.critical(
|
||||
msg,
|
||||
# Show the traceback if the debug logging level is enabled
|
||||
exc_info_on_loglevel=logging.DEBUG
|
||||
)
|
||||
errors.append('{0}\n{1}'.format(msg, traceback.format_exc()))
|
||||
try:
|
||||
mods.add('{0}:{1}'.format(saltenv, sls))
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
if state:
|
||||
if not isinstance(state, dict):
|
||||
errors.append(
|
||||
@ -3903,7 +3905,8 @@ class BaseHighState(object):
|
||||
err += self.verify_tops(top)
|
||||
matches = self.top_matches(top)
|
||||
if not matches:
|
||||
msg = 'No Top file or master_tops data matches found.'
|
||||
msg = ('No Top file or master_tops data matches found. Please see '
|
||||
'master log for details.')
|
||||
ret[tag_name]['comment'] = msg
|
||||
return ret
|
||||
matches = self.matches_whitelist(matches, whitelist)
|
||||
|
@ -275,9 +275,10 @@ def index_template_present(name, definition, check_definition=False):
|
||||
ret['comment'] = 'Cannot create index template {0}, {1}'.format(name, output)
|
||||
else:
|
||||
if check_definition:
|
||||
definition_parsed = salt.utils.json.loads(definition)
|
||||
definition_to_diff = {'aliases': {}, 'mappings': {}, 'settings': {}}
|
||||
definition_to_diff.update(definition)
|
||||
current_template = __salt__['elasticsearch.index_template_get'](name=name)[name]
|
||||
diff = __utils__['dictdiffer.deep_diff'](current_template, definition_parsed)
|
||||
diff = __utils__['dictdiffer.deep_diff'](current_template, definition_to_diff)
|
||||
if len(diff) != 0:
|
||||
if __opts__['test']:
|
||||
ret['comment'] = 'Index template {0} exist but need to be updated'.format(name)
|
||||
|
@ -235,7 +235,7 @@ def present(name,
|
||||
salt.utils.versions.warn_until(
|
||||
'Neon',
|
||||
'The \'prune_services\' argument default is currently True, '
|
||||
'but will be changed to True in future releases.')
|
||||
'but will be changed to False in the Neon release.')
|
||||
|
||||
ret = _present(name, block_icmp, prune_block_icmp, default, masquerade, ports, prune_ports,
|
||||
port_fwd, prune_port_fwd, services, prune_services, interfaces, prune_interfaces,
|
||||
|
@ -48,6 +48,7 @@ def present(name,
|
||||
**Example:**
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
example user present in influxdb:
|
||||
influxdb_user.present:
|
||||
- name: example
|
||||
|
@ -415,6 +415,16 @@ def _find_remove_targets(name=None,
|
||||
|
||||
if __grains__['os'] == 'FreeBSD' and origin:
|
||||
cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == pkgname]
|
||||
elif __grains__['os_family'] == 'Suse':
|
||||
# On SUSE systems. Zypper returns packages without "arch" in name
|
||||
try:
|
||||
namepart, archpart = pkgname.rsplit('.', 1)
|
||||
except ValueError:
|
||||
cver = cur_pkgs.get(pkgname, [])
|
||||
else:
|
||||
if archpart in salt.utils.pkg.rpm.ARCHES + ("noarch",):
|
||||
pkgname = namepart
|
||||
cver = cur_pkgs.get(pkgname, [])
|
||||
else:
|
||||
cver = cur_pkgs.get(pkgname, [])
|
||||
|
||||
@ -844,6 +854,17 @@ def _verify_install(desired, new_pkgs, ignore_epoch=False, new_caps=None):
|
||||
cver = new_pkgs.get(pkgname.split('%')[0])
|
||||
elif __grains__['os_family'] == 'Debian':
|
||||
cver = new_pkgs.get(pkgname.split('=')[0])
|
||||
elif __grains__['os_family'] == 'Suse':
|
||||
# On SUSE systems. Zypper returns packages without "arch" in name
|
||||
try:
|
||||
namepart, archpart = pkgname.rsplit('.', 1)
|
||||
except ValueError:
|
||||
cver = new_pkgs.get(pkgname)
|
||||
else:
|
||||
if archpart in salt.utils.pkg.rpm.ARCHES + ("noarch",):
|
||||
cver = new_pkgs.get(namepart)
|
||||
else:
|
||||
cver = new_pkgs.get(pkgname)
|
||||
else:
|
||||
cver = new_pkgs.get(pkgname)
|
||||
if not cver and pkgname in new_caps:
|
||||
@ -2666,7 +2687,17 @@ def _uninstall(
|
||||
|
||||
changes = __salt__['pkg.{0}'.format(action)](name, pkgs=pkgs, version=version, **kwargs)
|
||||
new = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs)
|
||||
failed = [x for x in pkg_params if x in new]
|
||||
failed = []
|
||||
for x in pkg_params:
|
||||
if __grains__['os_family'] in ['Suse', 'RedHat']:
|
||||
# Check if the package version set to be removed is actually removed:
|
||||
if x in new and not pkg_params[x]:
|
||||
failed.append(x)
|
||||
elif x in new and pkg_params[x] in new[x]:
|
||||
failed.append(x + "-" + pkg_params[x])
|
||||
elif x in new:
|
||||
failed.append(x)
|
||||
|
||||
if action == 'purge':
|
||||
new_removed = __salt__['pkg.list_pkgs'](versions_as_list=True,
|
||||
removed=True,
|
||||
|
@ -36,7 +36,6 @@ def __virtual__():
|
||||
def present(name,
|
||||
createdb=None,
|
||||
createroles=None,
|
||||
createuser=None,
|
||||
encrypted=None,
|
||||
superuser=None,
|
||||
inherit=None,
|
||||
@ -66,10 +65,6 @@ def present(name,
|
||||
createroles
|
||||
Is the group allowed to create other roles/users
|
||||
|
||||
createuser
|
||||
Alias to create roles, and history problem, in pgsql normally
|
||||
createuser == superuser
|
||||
|
||||
encrypted
|
||||
Should the password be encrypted in the system catalog?
|
||||
|
||||
@ -131,8 +126,6 @@ def present(name,
|
||||
'result': True,
|
||||
'comment': 'Group {0} is already present'.format(name)}
|
||||
|
||||
if createuser:
|
||||
createroles = True
|
||||
# default to encrypted passwords
|
||||
if encrypted is not False:
|
||||
encrypted = postgres._DEFAULT_PASSWORDS_ENCRYPTION
|
||||
|
@ -37,7 +37,6 @@ def __virtual__():
|
||||
def present(name,
|
||||
createdb=None,
|
||||
createroles=None,
|
||||
createuser=None,
|
||||
encrypted=None,
|
||||
superuser=None,
|
||||
replication=None,
|
||||
@ -69,9 +68,6 @@ def present(name,
|
||||
createroles
|
||||
Is the user allowed to create other users?
|
||||
|
||||
createuser
|
||||
Alias to create roles
|
||||
|
||||
encrypted
|
||||
Should the password be encrypted in the system catalog?
|
||||
|
||||
@ -142,8 +138,6 @@ def present(name,
|
||||
'result': True,
|
||||
'comment': 'User {0} is already present'.format(name)}
|
||||
|
||||
if createuser:
|
||||
createroles = True
|
||||
# default to encrypted passwords
|
||||
if encrypted is not False:
|
||||
encrypted = postgres._DEFAULT_PASSWORDS_ENCRYPTION
|
||||
|
@ -36,8 +36,8 @@ except ImportError:
|
||||
'SRV_ROOT_DIR', 'BASE_FILE_ROOTS_DIR', 'HOME_DIR',
|
||||
'BASE_PILLAR_ROOTS_DIR', 'BASE_THORIUM_ROOTS_DIR',
|
||||
'BASE_MASTER_ROOTS_DIR', 'LOGS_DIR', 'PIDFILE_DIR',
|
||||
'SPM_FORMULA_PATH', 'SPM_PILLAR_PATH', 'SPM_REACTOR_PATH',
|
||||
'SHARE_DIR'):
|
||||
'SPM_PARENT_PATH', 'SPM_FORMULA_PATH',
|
||||
'SPM_PILLAR_PATH', 'SPM_REACTOR_PATH', 'SHARE_DIR'):
|
||||
setattr(__generated_syspaths, key, None)
|
||||
|
||||
|
||||
@ -124,17 +124,21 @@ PIDFILE_DIR = __generated_syspaths.PIDFILE_DIR
|
||||
if PIDFILE_DIR is None:
|
||||
PIDFILE_DIR = os.path.join(ROOT_DIR, 'var', 'run')
|
||||
|
||||
SPM_PARENT_PATH = __generated_syspaths.SPM_PARENT_PATH
|
||||
if SPM_PARENT_PATH is None:
|
||||
SPM_PARENT_PATH = os.path.join(SRV_ROOT_DIR, 'spm')
|
||||
|
||||
SPM_FORMULA_PATH = __generated_syspaths.SPM_FORMULA_PATH
|
||||
if SPM_FORMULA_PATH is None:
|
||||
SPM_FORMULA_PATH = os.path.join(SRV_ROOT_DIR, 'spm', 'salt')
|
||||
SPM_FORMULA_PATH = os.path.join(SPM_PARENT_PATH, 'salt')
|
||||
|
||||
SPM_PILLAR_PATH = __generated_syspaths.SPM_PILLAR_PATH
|
||||
if SPM_PILLAR_PATH is None:
|
||||
SPM_PILLAR_PATH = os.path.join(SRV_ROOT_DIR, 'spm', 'pillar')
|
||||
SPM_PILLAR_PATH = os.path.join(SPM_PARENT_PATH, 'pillar')
|
||||
|
||||
SPM_REACTOR_PATH = __generated_syspaths.SPM_REACTOR_PATH
|
||||
if SPM_REACTOR_PATH is None:
|
||||
SPM_REACTOR_PATH = os.path.join(SRV_ROOT_DIR, 'spm', 'reactor')
|
||||
SPM_REACTOR_PATH = os.path.join(SPM_PARENT_PATH, 'reactor')
|
||||
|
||||
HOME_DIR = __generated_syspaths.HOME_DIR
|
||||
if HOME_DIR is None:
|
||||
@ -157,6 +161,7 @@ __all__ = [
|
||||
'INSTALL_DIR',
|
||||
'CLOUD_DIR',
|
||||
'BOOTSTRAP',
|
||||
'SPM_PARENT_PATH',
|
||||
'SPM_FORMULA_PATH',
|
||||
'SPM_PILLAR_PATH',
|
||||
'SPM_REACTOR_PATH'
|
||||
|
3
setup.py
3
setup.py
@ -229,6 +229,7 @@ class GenerateSaltSyspaths(Command):
|
||||
base_thorium_roots_dir=self.distribution.salt_base_thorium_roots_dir,
|
||||
logs_dir=self.distribution.salt_logs_dir,
|
||||
pidfile_dir=self.distribution.salt_pidfile_dir,
|
||||
spm_parent_path=self.distribution.salt_spm_parent_dir,
|
||||
spm_formula_path=self.distribution.salt_spm_formula_dir,
|
||||
spm_pillar_path=self.distribution.salt_spm_pillar_dir,
|
||||
spm_reactor_path=self.distribution.salt_spm_reactor_dir,
|
||||
@ -650,6 +651,7 @@ BASE_MASTER_ROOTS_DIR = {base_master_roots_dir!r}
|
||||
BASE_THORIUM_ROOTS_DIR = {base_thorium_roots_dir!r}
|
||||
LOGS_DIR = {logs_dir!r}
|
||||
PIDFILE_DIR = {pidfile_dir!r}
|
||||
SPM_PARENT_PATH = {spm_parent_path!r}
|
||||
SPM_FORMULA_PATH = {spm_formula_path!r}
|
||||
SPM_PILLAR_PATH = {spm_pillar_path!r}
|
||||
SPM_REACTOR_PATH = {spm_reactor_path!r}
|
||||
@ -818,6 +820,7 @@ class SaltDistribution(distutils.dist.Distribution):
|
||||
self.salt_base_master_roots_dir = None
|
||||
self.salt_logs_dir = None
|
||||
self.salt_pidfile_dir = None
|
||||
self.salt_spm_parent_dir = None
|
||||
self.salt_spm_formula_dir = None
|
||||
self.salt_spm_pillar_dir = None
|
||||
self.salt_spm_reactor_dir = None
|
||||
|
@ -14,7 +14,7 @@
|
||||
|
||||
'''
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
from __future__ import absolute_import, unicode_literals, print_function
|
||||
import logging
|
||||
|
||||
# Import salt libs
|
||||
|
@ -15,7 +15,7 @@
|
||||
'''
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
from __future__ import absolute_import, unicode_literals, print_function
|
||||
import logging
|
||||
|
||||
# Import salt libs
|
||||
|
@ -4,7 +4,7 @@
|
||||
'''
|
||||
|
||||
# Import Python Libs
|
||||
from __future__ import absolute_import
|
||||
from __future__ import absolute_import, unicode_literals, print_function
|
||||
|
||||
# Import Salt Testing Libs
|
||||
from tests.support.mixins import LoaderModuleMockMixin
|
||||
|
@ -4,7 +4,7 @@
|
||||
'''
|
||||
|
||||
# Import Python Libs
|
||||
from __future__ import absolute_import
|
||||
from __future__ import absolute_import, unicode_literals, print_function
|
||||
|
||||
# Import Salt Testing Libs
|
||||
from tests.support.mixins import LoaderModuleMockMixin
|
||||
|
@ -49,7 +49,9 @@ class StatusModuleTest(ModuleCase):
|
||||
status.diskusage
|
||||
'''
|
||||
ret = self.run_function('status.diskusage')
|
||||
if salt.utils.platform.is_windows():
|
||||
if salt.utils.platform.is_darwin():
|
||||
self.assertIn('not yet supported on this platform', ret)
|
||||
elif salt.utils.platform.is_windows():
|
||||
self.assertTrue(isinstance(ret['percent'], float))
|
||||
else:
|
||||
self.assertIn('total', str(ret))
|
||||
|
@ -3,7 +3,7 @@
|
||||
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
|
||||
|
||||
|
||||
tests.integration.states.pip
|
||||
tests.integration.states.pip_state
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
'''
|
||||
|
||||
@ -300,7 +300,7 @@ class PipStateTest(ModuleCase, SaltReturnAssertsMixin):
|
||||
# pip install passing the package name in `name`
|
||||
ret = self.run_state(
|
||||
'pip.installed', name='pep8', user=username, bin_env=venv_dir,
|
||||
no_cache_dir=True, password='PassWord1!')
|
||||
password='PassWord1!')
|
||||
self.assertSaltTrueReturn(ret)
|
||||
|
||||
if HAS_PWD:
|
||||
@ -350,7 +350,7 @@ class PipStateTest(ModuleCase, SaltReturnAssertsMixin):
|
||||
ret = self.run_state(
|
||||
'pip.installed', name='', user=username, bin_env=venv_dir,
|
||||
requirements='salt://issue-6912-requirements.txt',
|
||||
no_cache_dir=True, password='PassWord1!')
|
||||
password='PassWord1!')
|
||||
self.assertSaltTrueReturn(ret)
|
||||
|
||||
if HAS_PWD:
|
@ -20,6 +20,7 @@ import tempfile
|
||||
import time
|
||||
|
||||
import salt.utils.files
|
||||
import salt.utils.platform
|
||||
import salt.utils.process
|
||||
import salt.utils.psutil_compat as psutils
|
||||
import salt.utils.yaml
|
||||
@ -28,6 +29,7 @@ from salt.ext import six
|
||||
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
|
||||
|
||||
from tests.support.unit import TestCase
|
||||
from tests.support.helpers import win32_kill_process_tree
|
||||
from tests.support.paths import CODE_DIR
|
||||
from tests.support.processes import terminate_process, terminate_process_list
|
||||
|
||||
@ -413,9 +415,6 @@ class TestProgram(six.with_metaclass(TestProgramMeta, object)):
|
||||
|
||||
popen_kwargs['preexec_fn'] = detach_from_parent_group
|
||||
|
||||
elif sys.platform.lower().startswith('win') and timeout is not None:
|
||||
raise RuntimeError('Timeout is not supported under windows')
|
||||
|
||||
self.argv = [self.program]
|
||||
self.argv.extend(args)
|
||||
log.debug('TestProgram.run: %s Environment %s', self.argv, env_delta)
|
||||
@ -430,16 +429,26 @@ class TestProgram(six.with_metaclass(TestProgramMeta, object)):
|
||||
|
||||
if datetime.now() > stop_at:
|
||||
if term_sent is False:
|
||||
# Kill the process group since sending the term signal
|
||||
# would only terminate the shell, not the command
|
||||
# executed in the shell
|
||||
os.killpg(os.getpgid(process.pid), signal.SIGINT)
|
||||
term_sent = True
|
||||
continue
|
||||
if salt.utils.platform.is_windows():
|
||||
_, alive = win32_kill_process_tree(process.pid)
|
||||
if alive:
|
||||
log.error("Child processes still alive: %s", alive)
|
||||
else:
|
||||
# Kill the process group since sending the term signal
|
||||
# would only terminate the shell, not the command
|
||||
# executed in the shell
|
||||
os.killpg(os.getpgid(process.pid), signal.SIGINT)
|
||||
term_sent = True
|
||||
continue
|
||||
|
||||
try:
|
||||
# As a last resort, kill the process group
|
||||
os.killpg(os.getpgid(process.pid), signal.SIGKILL)
|
||||
if salt.utils.platform.is_windows():
|
||||
_, alive = win32_kill_process_tree(process.pid)
|
||||
if alive:
|
||||
log.error("Child processes still alive: %s", alive)
|
||||
else:
|
||||
# As a last resort, kill the process group
|
||||
os.killpg(os.getpgid(process.pid), signal.SIGKILL)
|
||||
process.wait()
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.ESRCH:
|
||||
|
@ -743,6 +743,9 @@ class SaltTestsuiteParser(SaltCoverageTestingParser):
|
||||
with TestDaemon(self):
|
||||
if self.options.name:
|
||||
for name in self.options.name:
|
||||
name = name.strip()
|
||||
if not name:
|
||||
continue
|
||||
if os.path.isfile(name):
|
||||
if not name.endswith('.py'):
|
||||
continue
|
||||
|
@ -29,13 +29,14 @@ from datetime import datetime, timedelta
|
||||
|
||||
# Import salt testing libs
|
||||
from tests.support.unit import TestCase
|
||||
from tests.support.helpers import RedirectStdStreams, requires_sshd_server
|
||||
from tests.support.helpers import (
|
||||
RedirectStdStreams, requires_sshd_server, win32_kill_process_tree
|
||||
)
|
||||
from tests.support.runtests import RUNTIME_VARS
|
||||
from tests.support.mixins import AdaptedConfigurationTestCaseMixin, SaltClientTestCaseMixin
|
||||
from tests.support.paths import ScriptPathMixin, INTEGRATION_TEST_DIR, CODE_DIR, PYEXEC, SCRIPT_DIR
|
||||
|
||||
# Import 3rd-party libs
|
||||
import salt.utils.json
|
||||
from salt.ext import six
|
||||
from salt.ext.six.moves import cStringIO # pylint: disable=import-error
|
||||
|
||||
@ -287,11 +288,11 @@ class ShellTestCase(TestCase, AdaptedConfigurationTestCaseMixin):
|
||||
|
||||
popen_kwargs['preexec_fn'] = detach_from_parent_group
|
||||
|
||||
elif sys.platform.lower().startswith('win') and timeout is not None:
|
||||
raise RuntimeError('Timeout is not supported under windows')
|
||||
|
||||
process = subprocess.Popen(cmd, **popen_kwargs)
|
||||
|
||||
# Late import
|
||||
import salt.utils.platform
|
||||
|
||||
if timeout is not None:
|
||||
stop_at = datetime.now() + timedelta(seconds=timeout)
|
||||
term_sent = False
|
||||
@ -303,13 +304,23 @@ class ShellTestCase(TestCase, AdaptedConfigurationTestCaseMixin):
|
||||
# Kill the process group since sending the term signal
|
||||
# would only terminate the shell, not the command
|
||||
# executed in the shell
|
||||
os.killpg(os.getpgid(process.pid), signal.SIGINT)
|
||||
if salt.utils.platform.is_windows():
|
||||
_, alive = win32_kill_process_tree(process.pid)
|
||||
if alive:
|
||||
log.error("Child processes still alive: %s", alive)
|
||||
else:
|
||||
os.killpg(os.getpgid(process.pid), signal.SIGINT)
|
||||
term_sent = True
|
||||
continue
|
||||
|
||||
try:
|
||||
# As a last resort, kill the process group
|
||||
os.killpg(os.getpgid(process.pid), signal.SIGKILL)
|
||||
if salt.utils.platform.is_windows():
|
||||
_, alive = win32_kill_process_tree(process.pid)
|
||||
if alive:
|
||||
log.error("Child processes still alive: %s", alive)
|
||||
else:
|
||||
os.killpg(os.getpgid(process.pid), signal.SIGINT)
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.ESRCH:
|
||||
# If errno is not "no such process", raise
|
||||
@ -855,6 +866,10 @@ class SSHCase(ShellCase):
|
||||
wipe=wipe, raw=raw)
|
||||
log.debug('SSHCase run_function executed %s with arg %s', function, arg)
|
||||
log.debug('SSHCase JSON return: %s', ret)
|
||||
|
||||
# Late import
|
||||
import salt.utils.json
|
||||
|
||||
try:
|
||||
return salt.utils.json.loads(ret)['localhost']
|
||||
except Exception:
|
||||
|
@ -1572,3 +1572,23 @@ class Webserver(object):
|
||||
'''
|
||||
self.ioloop.add_callback(self.ioloop.stop)
|
||||
self.server_thread.join()
|
||||
|
||||
|
||||
def win32_kill_process_tree(pid, sig=signal.SIGTERM, include_parent=True,
|
||||
timeout=None, on_terminate=None):
|
||||
'''
|
||||
Kill a process tree (including grandchildren) with signal "sig" and return
|
||||
a (gone, still_alive) tuple. "on_terminate", if specified, is a callabck
|
||||
function which is called as soon as a child terminates.
|
||||
'''
|
||||
if pid == os.getpid():
|
||||
raise RuntimeError("I refuse to kill myself")
|
||||
parent = psutil.Process(pid)
|
||||
children = parent.children(recursive=True)
|
||||
if include_parent:
|
||||
children.append(parent)
|
||||
for p in children:
|
||||
p.send_signal(sig)
|
||||
gone, alive = psutil.wait_procs(children, timeout=timeout,
|
||||
callback=on_terminate)
|
||||
return (gone, alive)
|
||||
|
@ -1,8 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
:codeauthor: `Anthony Shaw <anthonyshaw@apache.org>`
|
||||
|
||||
tests.unit.cloud.clouds.dimensiondata_test
|
||||
tests.unit.cloud.test_libcloudfuncs
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
'''
|
||||
|
203
tests/unit/cloud/test_map_conf.py
Normal file
203
tests/unit/cloud/test_map_conf.py
Normal file
@ -0,0 +1,203 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
:codeauthor: :email:`Eric Radman <ericshane@eradman.com>`
|
||||
'''
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Salt Testing libs
|
||||
from tests.support.unit import skipIf, TestCase
|
||||
from tests.support.mock import (
|
||||
MagicMock,
|
||||
patch,
|
||||
NO_MOCK,
|
||||
NO_MOCK_REASON,
|
||||
)
|
||||
|
||||
# Import Salt libs
|
||||
import salt.cloud
|
||||
|
||||
EXAMPLE_PROVIDERS = {
|
||||
'nyc_vcenter': {'vmware': {'driver': 'vmware',
|
||||
'password': '123456',
|
||||
'url': 'vca1.saltstack.com',
|
||||
'minion': {
|
||||
'master': 'providermaster',
|
||||
'grains': {
|
||||
'providergrain': True
|
||||
}
|
||||
},
|
||||
'profiles': {},
|
||||
'user': 'root'}},
|
||||
'nj_vcenter': {'vmware': {'driver': 'vmware',
|
||||
'password': '333',
|
||||
'profiles': {},
|
||||
'minion': {
|
||||
'master': 'providermaster',
|
||||
'grains': {
|
||||
'providergrain': True
|
||||
}
|
||||
},
|
||||
'image': 'rhel6_64prod',
|
||||
'url': 'vca2.saltstack.com',
|
||||
'user': 'root'}}
|
||||
}
|
||||
|
||||
EXAMPLE_PROFILES = {
|
||||
'nyc-vm': {'cluster': 'nycvirt',
|
||||
'datastore': 'datastore1',
|
||||
'devices': {'disk': {'Hard disk 1': {'controller': 'SCSI controller 1',
|
||||
'size': 20}},
|
||||
'network': {'Network Adapter 1': {'mac': '88:88:88:88:88:42',
|
||||
'name': 'vlan50',
|
||||
'switch_type': 'standard'}},
|
||||
'scsi': {'SCSI controller 1': {'type': 'paravirtual'}}},
|
||||
'extra_config': {'mem.hotadd': 'yes'},
|
||||
'folder': 'coreinfra',
|
||||
'image': 'rhel6_64Guest',
|
||||
'minion': {
|
||||
'master': 'profilemaster',
|
||||
'grains': {
|
||||
'profilegrain': True
|
||||
}
|
||||
},
|
||||
'memory': '8GB',
|
||||
'num_cpus': 2,
|
||||
'power_on': True,
|
||||
'profile': 'nyc-vm',
|
||||
'provider': 'nyc_vcenter:vmware',
|
||||
'resourcepool': 'Resources'},
|
||||
'nj-vm': {'cluster': 'njvirt',
|
||||
'folder': 'coreinfra',
|
||||
'image': 'rhel6_64Guest',
|
||||
'memory': '8GB',
|
||||
'num_cpus': 2,
|
||||
'power_on': True,
|
||||
'profile': 'nj-vm',
|
||||
'provider': 'nj_vcenter:vmware',
|
||||
'resourcepool': 'Resources'},
|
||||
|
||||
}
|
||||
|
||||
EXAMPLE_MAP = {
|
||||
'nyc-vm': {'db1': {'cpus': 4,
|
||||
'devices': {'disk': {'Hard disk 1': {'size': 40}},
|
||||
'network': {'Network Adapter 1': {'mac': '22:4a:b2:92:b3:eb'}}},
|
||||
'memory': '16GB',
|
||||
'minion': {
|
||||
'master': 'mapmaster',
|
||||
'grains': {
|
||||
'mapgrain': True
|
||||
}
|
||||
},
|
||||
'name': 'db1'},
|
||||
'db2': {'name': 'db2',
|
||||
'password': '456',
|
||||
'provider': 'nj_vcenter:vmware'}},
|
||||
'nj-vm': {'db3': {'name': 'db3',
|
||||
'password': '789',
|
||||
}}
|
||||
}
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
class MapConfTest(TestCase):
|
||||
'''
|
||||
Validate evaluation of salt-cloud map configuration
|
||||
'''
|
||||
|
||||
def test_cloud_map_merge_conf(self):
|
||||
'''
|
||||
Ensure that nested values can be selectivly overridden in a map file
|
||||
'''
|
||||
with patch('salt.config.check_driver_dependencies', MagicMock(return_value=True)), \
|
||||
patch('salt.cloud.Map.read', MagicMock(return_value=EXAMPLE_MAP)):
|
||||
self.maxDiff = None
|
||||
opts = {'extension_modules': '/var/cache/salt/master/extmods',
|
||||
'providers': EXAMPLE_PROVIDERS, 'profiles': EXAMPLE_PROFILES}
|
||||
cloud_map = salt.cloud.Map(opts)
|
||||
|
||||
merged_profile = {
|
||||
'create': {'db1': {'cluster': 'nycvirt',
|
||||
'cpus': 4,
|
||||
'datastore': 'datastore1',
|
||||
'devices': {'disk': {'Hard disk 1': {'controller': 'SCSI controller 1',
|
||||
'size': 40}},
|
||||
'network': {'Network Adapter 1': {'mac': '22:4a:b2:92:b3:eb',
|
||||
'name': 'vlan50',
|
||||
'switch_type': 'standard'}},
|
||||
'scsi': {'SCSI controller 1': {'type': 'paravirtual'}}},
|
||||
'driver': 'vmware',
|
||||
'extra_config': {'mem.hotadd': 'yes'},
|
||||
'folder': 'coreinfra',
|
||||
'image': 'rhel6_64Guest',
|
||||
'memory': '16GB',
|
||||
'minion': {'grains': {'mapgrain': True,
|
||||
'profilegrain': True,
|
||||
'providergrain': True},
|
||||
'master': 'mapmaster'},
|
||||
'name': 'db1',
|
||||
'num_cpus': 2,
|
||||
'password': '123456',
|
||||
'power_on': True,
|
||||
'profile': 'nyc-vm',
|
||||
'provider': 'nyc_vcenter:vmware',
|
||||
'resourcepool': 'Resources',
|
||||
'url': 'vca1.saltstack.com',
|
||||
'user': 'root'},
|
||||
'db2': {'cluster': 'nycvirt',
|
||||
'datastore': 'datastore1',
|
||||
'devices': {'disk': {'Hard disk 1': {'controller': 'SCSI controller 1',
|
||||
'size': 20}},
|
||||
'network': {'Network Adapter 1': {'mac': '88:88:88:88:88:42',
|
||||
'name': 'vlan50',
|
||||
'switch_type': 'standard'}},
|
||||
'scsi': {'SCSI controller 1': {'type': 'paravirtual'}}},
|
||||
'driver': 'vmware',
|
||||
'extra_config': {'mem.hotadd': 'yes'},
|
||||
'folder': 'coreinfra',
|
||||
'image': 'rhel6_64Guest',
|
||||
'memory': '8GB',
|
||||
'minion': {'grains': {'profilegrain': True,
|
||||
'providergrain': True},
|
||||
'master': 'profilemaster'},
|
||||
'name': 'db2',
|
||||
'num_cpus': 2,
|
||||
'password': '456',
|
||||
'power_on': True,
|
||||
'profile': 'nyc-vm',
|
||||
'provider': 'nj_vcenter:vmware',
|
||||
'resourcepool': 'Resources',
|
||||
'url': 'vca2.saltstack.com',
|
||||
'user': 'root'},
|
||||
'db3': {'cluster': 'njvirt',
|
||||
'driver': 'vmware',
|
||||
'folder': 'coreinfra',
|
||||
'image': 'rhel6_64Guest',
|
||||
'memory': '8GB',
|
||||
'minion': {'grains': {'providergrain': True},
|
||||
'master': 'providermaster'},
|
||||
'name': 'db3',
|
||||
'num_cpus': 2,
|
||||
'password': '789',
|
||||
'power_on': True,
|
||||
'profile': 'nj-vm',
|
||||
'provider': 'nj_vcenter:vmware',
|
||||
'resourcepool': 'Resources',
|
||||
'url': 'vca2.saltstack.com',
|
||||
'user': 'root'}}
|
||||
}
|
||||
|
||||
# what we assert above w.r.t db2 using nj_vcenter:vmware provider:
|
||||
# - url is from the overriden nj_vcenter provider, not nyc_vcenter
|
||||
# - image from provider is still overridden by the nyc-vm profile
|
||||
# - password from map override is still overriding both the provider and profile password
|
||||
#
|
||||
# what we assert above about grain handling ( and provider/profile/map data in general )
|
||||
# - provider grains are able to be overridden by profile data
|
||||
# - provider grain sare overridden by map data
|
||||
# - profile data is overriden by map data
|
||||
# ie, the provider->profile->map inheritance works as expected
|
||||
map_data = cloud_map.map_data()
|
||||
self.assertEqual(map_data, merged_profile)
|
@ -889,9 +889,9 @@ SwapTotal: 4789244 kB'''
|
||||
test virtual grain with cmd virt-what
|
||||
'''
|
||||
virt = 'kvm'
|
||||
with patch.object(salt.utils, 'is_windows',
|
||||
with patch.object(salt.utils.platform, 'is_windows',
|
||||
MagicMock(return_value=False)):
|
||||
with patch.object(salt.utils, 'which',
|
||||
with patch.object(salt.utils.path, 'which',
|
||||
MagicMock(return_value=True)):
|
||||
with patch.dict(core.__salt__, {'cmd.run_all':
|
||||
MagicMock(return_value={'pid': 78,
|
||||
|
@ -1053,3 +1053,104 @@ class DockerTestCase(TestCase, LoaderModuleMockMixin):
|
||||
call('prune_volumes', filters={'label': ['foo', 'bar=baz']}),
|
||||
]
|
||||
)
|
||||
|
||||
def test_port(self):
|
||||
'''
|
||||
Test docker.port function. Note that this test case does not test what
|
||||
happens when a specific container name is passed and that container
|
||||
does not exist. When that happens, the Docker API will just raise a 404
|
||||
error. Since we're using as side_effect to mock
|
||||
docker.inspect_container, it would be meaningless to code raising an
|
||||
exception into it and then test that we raised that exception.
|
||||
'''
|
||||
ports = {
|
||||
'foo': {
|
||||
'5555/tcp': [
|
||||
{'HostIp': '0.0.0.0', 'HostPort': '32768'}
|
||||
],
|
||||
'6666/tcp': [
|
||||
{'HostIp': '0.0.0.0', 'HostPort': '32769'}
|
||||
],
|
||||
},
|
||||
'bar': {
|
||||
'4444/udp': [
|
||||
{'HostIp': '0.0.0.0', 'HostPort': '32767'}
|
||||
],
|
||||
'5555/tcp': [
|
||||
{'HostIp': '0.0.0.0', 'HostPort': '32768'}
|
||||
],
|
||||
'6666/tcp': [
|
||||
{'HostIp': '0.0.0.0', 'HostPort': '32769'}
|
||||
],
|
||||
},
|
||||
'baz': {
|
||||
'5555/tcp': [
|
||||
{'HostIp': '0.0.0.0', 'HostPort': '32768'}
|
||||
],
|
||||
'6666/udp': [
|
||||
{'HostIp': '0.0.0.0', 'HostPort': '32769'}
|
||||
],
|
||||
},
|
||||
}
|
||||
list_mock = MagicMock(return_value=['bar', 'baz', 'foo'])
|
||||
inspect_mock = MagicMock(
|
||||
side_effect=lambda x: {'NetworkSettings': {'Ports': ports.get(x)}}
|
||||
)
|
||||
with patch.object(docker_mod, 'list_containers', list_mock), \
|
||||
patch.object(docker_mod, 'inspect_container', inspect_mock):
|
||||
|
||||
# Test with specific container name
|
||||
ret = docker_mod.port('foo')
|
||||
self.assertEqual(ret, ports['foo'])
|
||||
|
||||
# Test with specific container name and filtering on port
|
||||
ret = docker_mod.port('foo', private_port='5555/tcp')
|
||||
self.assertEqual(ret, {'5555/tcp': ports['foo']['5555/tcp']})
|
||||
|
||||
# Test using pattern expression
|
||||
ret = docker_mod.port('ba*')
|
||||
self.assertEqual(ret, {'bar': ports['bar'], 'baz': ports['baz']})
|
||||
ret = docker_mod.port('ba?')
|
||||
self.assertEqual(ret, {'bar': ports['bar'], 'baz': ports['baz']})
|
||||
ret = docker_mod.port('ba[rz]')
|
||||
self.assertEqual(ret, {'bar': ports['bar'], 'baz': ports['baz']})
|
||||
|
||||
# Test using pattern expression and port filtering
|
||||
ret = docker_mod.port('ba*', private_port='6666/tcp')
|
||||
self.assertEqual(
|
||||
ret,
|
||||
{'bar': {'6666/tcp': ports['bar']['6666/tcp']}, 'baz': {}}
|
||||
)
|
||||
ret = docker_mod.port('ba?', private_port='6666/tcp')
|
||||
self.assertEqual(
|
||||
ret,
|
||||
{'bar': {'6666/tcp': ports['bar']['6666/tcp']}, 'baz': {}}
|
||||
)
|
||||
ret = docker_mod.port('ba[rz]', private_port='6666/tcp')
|
||||
self.assertEqual(
|
||||
ret,
|
||||
{'bar': {'6666/tcp': ports['bar']['6666/tcp']}, 'baz': {}}
|
||||
)
|
||||
ret = docker_mod.port('*')
|
||||
self.assertEqual(ret, ports)
|
||||
ret = docker_mod.port('*', private_port='5555/tcp')
|
||||
self.assertEqual(
|
||||
ret,
|
||||
{'foo': {'5555/tcp': ports['foo']['5555/tcp']},
|
||||
'bar': {'5555/tcp': ports['bar']['5555/tcp']},
|
||||
'baz': {'5555/tcp': ports['baz']['5555/tcp']}}
|
||||
)
|
||||
ret = docker_mod.port('*', private_port=6666)
|
||||
self.assertEqual(
|
||||
ret,
|
||||
{'foo': {'6666/tcp': ports['foo']['6666/tcp']},
|
||||
'bar': {'6666/tcp': ports['bar']['6666/tcp']},
|
||||
'baz': {'6666/udp': ports['baz']['6666/udp']}}
|
||||
)
|
||||
ret = docker_mod.port('*', private_port='6666/tcp')
|
||||
self.assertEqual(
|
||||
ret,
|
||||
{'foo': {'6666/tcp': ports['foo']['6666/tcp']},
|
||||
'bar': {'6666/tcp': ports['bar']['6666/tcp']},
|
||||
'baz': {}}
|
||||
)
|
||||
|
@ -249,7 +249,6 @@ class PostgresTestCase(TestCase, LoaderModuleMockMixin):
|
||||
maintenance_db='maint_db',
|
||||
password='foo',
|
||||
createdb=False,
|
||||
createuser=False,
|
||||
encrypted=False,
|
||||
superuser=False,
|
||||
replication=False,
|
||||
@ -299,7 +298,6 @@ class PostgresTestCase(TestCase, LoaderModuleMockMixin):
|
||||
maintenance_db='maint_db',
|
||||
password='foo',
|
||||
createdb=False,
|
||||
createuser=False,
|
||||
encrypted=False,
|
||||
replication=False,
|
||||
rolepassword='test_role_pass',
|
||||
@ -331,7 +329,6 @@ class PostgresTestCase(TestCase, LoaderModuleMockMixin):
|
||||
login=True,
|
||||
createdb=False,
|
||||
createroles=False,
|
||||
createuser=False,
|
||||
encrypted=False,
|
||||
superuser=False,
|
||||
replication=False,
|
||||
@ -466,7 +463,6 @@ class PostgresTestCase(TestCase, LoaderModuleMockMixin):
|
||||
password='test_pass',
|
||||
createdb=False,
|
||||
createroles=False,
|
||||
createuser=False,
|
||||
encrypted=False,
|
||||
inherit=True,
|
||||
login=True,
|
||||
@ -505,7 +501,6 @@ class PostgresTestCase(TestCase, LoaderModuleMockMixin):
|
||||
password='test_pass',
|
||||
createdb=False,
|
||||
createroles=True,
|
||||
createuser=False,
|
||||
encrypted=False,
|
||||
inherit=True,
|
||||
login=True,
|
||||
@ -540,7 +535,6 @@ class PostgresTestCase(TestCase, LoaderModuleMockMixin):
|
||||
password='test_pass',
|
||||
createdb=False,
|
||||
createroles=True,
|
||||
createuser=False,
|
||||
encrypted=False,
|
||||
inherit=True,
|
||||
login=True,
|
||||
@ -576,7 +570,6 @@ class PostgresTestCase(TestCase, LoaderModuleMockMixin):
|
||||
password='test_pass',
|
||||
createdb=False,
|
||||
createroles=True,
|
||||
createuser=False,
|
||||
encrypted=True,
|
||||
inherit=True,
|
||||
login=True,
|
||||
|
@ -611,3 +611,53 @@ class YumTestCase(TestCase, LoaderModuleMockMixin):
|
||||
'--exclude=kernel*', 'upgrade'],
|
||||
output_loglevel='trace',
|
||||
python_shell=False)
|
||||
|
||||
def test_info_installed_with_all_versions(self):
|
||||
'''
|
||||
Test the return information of all versions for the named package(s), installed on the system.
|
||||
|
||||
:return:
|
||||
'''
|
||||
run_out = {
|
||||
'virgo-dummy': [
|
||||
{'build_date': '2015-07-09T10:55:19Z',
|
||||
'vendor': 'openSUSE Build Service',
|
||||
'description': 'This is the Virgo dummy package used for testing SUSE Manager',
|
||||
'license': 'GPL-2.0', 'build_host': 'sheep05', 'url': 'http://www.suse.com',
|
||||
'build_date_time_t': 1436432119, 'relocations': '(not relocatable)',
|
||||
'source_rpm': 'virgo-dummy-1.0-1.1.src.rpm', 'install_date': '2016-02-23T16:31:57Z',
|
||||
'install_date_time_t': 1456241517, 'summary': 'Virgo dummy package', 'version': '1.0',
|
||||
'signature': 'DSA/SHA1, Thu Jul 9 08:55:33 2015, Key ID 27fa41bd8a7c64f9',
|
||||
'release': '1.1', 'group': 'Applications/System', 'arch': 'i686', 'size': '17992'},
|
||||
{'build_date': '2015-07-09T10:15:19Z',
|
||||
'vendor': 'openSUSE Build Service',
|
||||
'description': 'This is the Virgo dummy package used for testing SUSE Manager',
|
||||
'license': 'GPL-2.0', 'build_host': 'sheep05', 'url': 'http://www.suse.com',
|
||||
'build_date_time_t': 1436432119, 'relocations': '(not relocatable)',
|
||||
'source_rpm': 'virgo-dummy-1.0-1.1.src.rpm', 'install_date': '2016-02-23T16:31:57Z',
|
||||
'install_date_time_t': 14562415127, 'summary': 'Virgo dummy package', 'version': '1.0',
|
||||
'signature': 'DSA/SHA1, Thu Jul 9 08:55:33 2015, Key ID 27fa41bd8a7c64f9',
|
||||
'release': '1.1', 'group': 'Applications/System', 'arch': 'x86_64', 'size': '13124'}
|
||||
],
|
||||
'libopenssl1_0_0': [
|
||||
{'build_date': '2015-11-04T23:20:34Z', 'vendor': 'SUSE LLC <https://www.suse.com/>',
|
||||
'description': 'The OpenSSL Project is a collaborative effort.',
|
||||
'license': 'OpenSSL', 'build_host': 'sheep11', 'url': 'https://www.openssl.org/',
|
||||
'build_date_time_t': 1446675634, 'relocations': '(not relocatable)',
|
||||
'source_rpm': 'openssl-1.0.1i-34.1.src.rpm', 'install_date': '2016-02-23T16:31:35Z',
|
||||
'install_date_time_t': 1456241495, 'summary': 'Secure Sockets and Transport Layer Security',
|
||||
'version': '1.0.1i', 'signature': 'RSA/SHA256, Wed Nov 4 22:21:34 2015, Key ID 70af9e8139db7c82',
|
||||
'release': '34.1', 'group': 'Productivity/Networking/Security', 'packager': 'https://www.suse.com/',
|
||||
'arch': 'x86_64', 'size': '2576912'}
|
||||
]
|
||||
}
|
||||
with patch.dict(yumpkg.__salt__, {'lowpkg.info': MagicMock(return_value=run_out)}):
|
||||
installed = yumpkg.info_installed(all_versions=True)
|
||||
# Test overall products length
|
||||
self.assertEqual(len(installed), 2)
|
||||
|
||||
# Test multiple versions for the same package
|
||||
for pkg_name, pkg_info_list in installed.items():
|
||||
self.assertEqual(len(pkg_info_list), 2 if pkg_name == "virgo-dummy" else 1)
|
||||
for info in pkg_info_list:
|
||||
self.assertTrue(info['arch'] in ('x86_64', 'i686'))
|
||||
|
@ -327,6 +327,56 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
|
||||
installed = zypper.info_installed()
|
||||
self.assertEqual(installed['vīrgô']['description'], 'vīrgô d€šçripţiǫñ')
|
||||
|
||||
def test_info_installed_with_all_versions(self):
|
||||
'''
|
||||
Test the return information of all versions for the named package(s), installed on the system.
|
||||
|
||||
:return:
|
||||
'''
|
||||
run_out = {
|
||||
'virgo-dummy': [
|
||||
{'build_date': '2015-07-09T10:55:19Z',
|
||||
'vendor': 'openSUSE Build Service',
|
||||
'description': 'This is the Virgo dummy package used for testing SUSE Manager',
|
||||
'license': 'GPL-2.0', 'build_host': 'sheep05', 'url': 'http://www.suse.com',
|
||||
'build_date_time_t': 1436432119, 'relocations': '(not relocatable)',
|
||||
'source_rpm': 'virgo-dummy-1.0-1.1.src.rpm', 'install_date': '2016-02-23T16:31:57Z',
|
||||
'install_date_time_t': 1456241517, 'summary': 'Virgo dummy package', 'version': '1.0',
|
||||
'signature': 'DSA/SHA1, Thu Jul 9 08:55:33 2015, Key ID 27fa41bd8a7c64f9',
|
||||
'release': '1.1', 'group': 'Applications/System', 'arch': 'i686', 'size': '17992'},
|
||||
{'build_date': '2015-07-09T10:15:19Z',
|
||||
'vendor': 'openSUSE Build Service',
|
||||
'description': 'This is the Virgo dummy package used for testing SUSE Manager',
|
||||
'license': 'GPL-2.0', 'build_host': 'sheep05', 'url': 'http://www.suse.com',
|
||||
'build_date_time_t': 1436432119, 'relocations': '(not relocatable)',
|
||||
'source_rpm': 'virgo-dummy-1.0-1.1.src.rpm', 'install_date': '2016-02-23T16:31:57Z',
|
||||
'install_date_time_t': 14562415127, 'summary': 'Virgo dummy package', 'version': '1.0',
|
||||
'signature': 'DSA/SHA1, Thu Jul 9 08:55:33 2015, Key ID 27fa41bd8a7c64f9',
|
||||
'release': '1.1', 'group': 'Applications/System', 'arch': 'x86_64', 'size': '13124'}
|
||||
],
|
||||
'libopenssl1_0_0': [
|
||||
{'build_date': '2015-11-04T23:20:34Z', 'vendor': 'SUSE LLC <https://www.suse.com/>',
|
||||
'description': 'The OpenSSL Project is a collaborative effort.',
|
||||
'license': 'OpenSSL', 'build_host': 'sheep11', 'url': 'https://www.openssl.org/',
|
||||
'build_date_time_t': 1446675634, 'relocations': '(not relocatable)',
|
||||
'source_rpm': 'openssl-1.0.1i-34.1.src.rpm', 'install_date': '2016-02-23T16:31:35Z',
|
||||
'install_date_time_t': 1456241495, 'summary': 'Secure Sockets and Transport Layer Security',
|
||||
'version': '1.0.1i', 'signature': 'RSA/SHA256, Wed Nov 4 22:21:34 2015, Key ID 70af9e8139db7c82',
|
||||
'release': '34.1', 'group': 'Productivity/Networking/Security', 'packager': 'https://www.suse.com/',
|
||||
'arch': 'x86_64', 'size': '2576912'}
|
||||
]
|
||||
}
|
||||
with patch.dict(zypper.__salt__, {'lowpkg.info': MagicMock(return_value=run_out)}):
|
||||
installed = zypper.info_installed(all_versions=True)
|
||||
# Test overall products length
|
||||
self.assertEqual(len(installed), 2)
|
||||
|
||||
# Test multiple versions for the same package
|
||||
for pkg_name, pkg_info_list in installed.items():
|
||||
self.assertEqual(len(pkg_info_list), 2 if pkg_name == "virgo-dummy" else 1)
|
||||
for info in pkg_info_list:
|
||||
self.assertTrue(info['arch'] in ('x86_64', 'i686'))
|
||||
|
||||
def test_info_available(self):
|
||||
'''
|
||||
Test return the information of the named package available for the system.
|
||||
|
@ -1,79 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
tests.unit.file_test
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
'''
|
||||
# Import pytohn libs
|
||||
from __future__ import absolute_import
|
||||
import os
|
||||
import copy
|
||||
import shutil
|
||||
import tempfile
|
||||
|
||||
# Import Salt Testing libs
|
||||
from tests.support.unit import TestCase
|
||||
|
||||
# Import Salt libs
|
||||
from salt.ext import six
|
||||
import salt.utils.files
|
||||
|
||||
|
||||
class FilesTestCase(TestCase):
|
||||
|
||||
STRUCTURE = {
|
||||
'foo': {
|
||||
'foofile.txt': 'fooSTRUCTURE'
|
||||
},
|
||||
'bar': {
|
||||
'barfile.txt': 'barSTRUCTURE'
|
||||
}
|
||||
}
|
||||
|
||||
def _create_temp_structure(self, temp_directory, structure):
|
||||
for folder, files in six.iteritems(structure):
|
||||
current_directory = os.path.join(temp_directory, folder)
|
||||
os.makedirs(current_directory)
|
||||
for name, content in six.iteritems(files):
|
||||
path = os.path.join(temp_directory, folder, name)
|
||||
with salt.utils.files.fopen(path, 'w+') as fh:
|
||||
fh.write(content)
|
||||
|
||||
def _validate_folder_structure_and_contents(self, target_directory,
|
||||
desired_structure):
|
||||
for folder, files in six.iteritems(desired_structure):
|
||||
for name, content in six.iteritems(files):
|
||||
path = os.path.join(target_directory, folder, name)
|
||||
with salt.utils.files.fopen(path) as fh:
|
||||
assert fh.read().strip() == content
|
||||
|
||||
def setUp(self):
|
||||
super(FilesTestCase, self).setUp()
|
||||
self.temp_dir = tempfile.mkdtemp()
|
||||
self._create_temp_structure(self.temp_dir,
|
||||
self.STRUCTURE)
|
||||
|
||||
def tearDown(self):
|
||||
super(FilesTestCase, self).tearDown()
|
||||
shutil.rmtree(self.temp_dir)
|
||||
|
||||
def test_recursive_copy(self):
|
||||
test_target_directory = tempfile.mkdtemp()
|
||||
TARGET_STRUCTURE = {
|
||||
'foo': {
|
||||
'foo.txt': 'fooTARGET_STRUCTURE'
|
||||
},
|
||||
'baz': {
|
||||
'baz.txt': 'bazTARGET_STRUCTURE'
|
||||
}
|
||||
}
|
||||
self._create_temp_structure(test_target_directory, TARGET_STRUCTURE)
|
||||
try:
|
||||
salt.utils.files.recursive_copy(self.temp_dir, test_target_directory)
|
||||
DESIRED_STRUCTURE = copy.copy(TARGET_STRUCTURE)
|
||||
DESIRED_STRUCTURE.update(self.STRUCTURE)
|
||||
self._validate_folder_structure_and_contents(
|
||||
test_target_directory,
|
||||
DESIRED_STRUCTURE
|
||||
)
|
||||
finally:
|
||||
shutil.rmtree(test_target_directory)
|
@ -1,115 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
:codeauthor: :email:`Eric Radman <ericshane@eradman.com>`
|
||||
'''
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Salt Testing libs
|
||||
from tests.support.unit import skipIf, TestCase
|
||||
from tests.support.mock import (
|
||||
MagicMock,
|
||||
patch,
|
||||
NO_MOCK,
|
||||
NO_MOCK_REASON,
|
||||
)
|
||||
|
||||
# Import Salt libs
|
||||
import salt.cloud
|
||||
|
||||
EXAMPLE_PROVIDERS = {
|
||||
'nyc_vcenter': {'vmware': {'driver': 'vmware',
|
||||
'password': '123456',
|
||||
'profiles': {'nyc-vm': {'cluster': 'nycvirt',
|
||||
'datastore': 'datastore1',
|
||||
'devices': {'disk': {'Hard disk 1': {'controller': 'SCSI controller 1',
|
||||
'size': 20}},
|
||||
'network': {'Network Adapter 1': {'mac': '44:44:44:44:44:42',
|
||||
'name': 'vlan50',
|
||||
'switch_type': 'standard'}},
|
||||
'scsi': {'SCSI controller 1': {'type': 'paravirtual'}}},
|
||||
'extra_config': {'mem.hotadd': 'yes'},
|
||||
'folder': 'coreinfra',
|
||||
'image': 'rhel6_64Guest',
|
||||
'memory': '8GB',
|
||||
'num_cpus': 2,
|
||||
'power_on': True,
|
||||
'profile': 'nyc-vm',
|
||||
'provider': 'nyc_vcenter:vmware',
|
||||
'resourcepool': 'Resources'}},
|
||||
'url': 'vca1.saltstack.com',
|
||||
'user': 'root'}}
|
||||
}
|
||||
|
||||
EXAMPLE_PROFILES = {
|
||||
'nyc-vm': {'cluster': 'nycvirt',
|
||||
'datastore': 'datastore1',
|
||||
'devices': {'disk': {'Hard disk 1': {'controller': 'SCSI controller 1',
|
||||
'size': 20}},
|
||||
'network': {'Network Adapter 1': {'mac': '44:44:44:44:44:42',
|
||||
'name': 'vlan50',
|
||||
'switch_type': 'standard'}},
|
||||
'scsi': {'SCSI controller 1': {'type': 'paravirtual'}}},
|
||||
'extra_config': {'mem.hotadd': 'yes'},
|
||||
'folder': 'coreinfra',
|
||||
'image': 'rhel6_64Guest',
|
||||
'memory': '8GB',
|
||||
'num_cpus': 2,
|
||||
'power_on': True,
|
||||
'profile': 'nyc-vm',
|
||||
'provider': 'nyc_vcenter:vmware',
|
||||
'resourcepool': 'Resources'}
|
||||
}
|
||||
|
||||
EXAMPLE_MAP = {
|
||||
'nyc-vm': {'db1': {'cpus': 4,
|
||||
'devices': {'disk': {'Hard disk 1': {'size': 40}},
|
||||
'network': {'Network Adapter 1': {'mac': '22:4a:b2:92:b3:eb'}}},
|
||||
'memory': '16GB',
|
||||
'name': 'db1'}}
|
||||
}
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
class MapConfTest(TestCase):
|
||||
'''
|
||||
Validate evaluation of salt-cloud map configuration
|
||||
'''
|
||||
|
||||
def test_cloud_map_merge_conf(self):
|
||||
'''
|
||||
Ensure that nested values can be selectivly overridden in a map file
|
||||
'''
|
||||
with patch('salt.config.check_driver_dependencies', MagicMock(return_value=True)), \
|
||||
patch('salt.cloud.Map.read', MagicMock(return_value=EXAMPLE_MAP)):
|
||||
self.maxDiff = None
|
||||
opts = {'extension_modules': '/var/cache/salt/master/extmods',
|
||||
'providers': EXAMPLE_PROVIDERS, 'profiles': EXAMPLE_PROFILES}
|
||||
cloud_map = salt.cloud.Map(opts)
|
||||
merged_profile = {
|
||||
'create': {'db1': {'cluster': 'nycvirt',
|
||||
'cpus': 4,
|
||||
'datastore': 'datastore1',
|
||||
'devices': {'disk': {'Hard disk 1': {'controller': 'SCSI controller 1',
|
||||
'size': 40}},
|
||||
'network': {'Network Adapter 1': {'mac': '22:4a:b2:92:b3:eb',
|
||||
'name': 'vlan50',
|
||||
'switch_type': 'standard'}},
|
||||
'scsi': {'SCSI controller 1': {'type': 'paravirtual'}}},
|
||||
'driver': 'vmware',
|
||||
'extra_config': {'mem.hotadd': 'yes'},
|
||||
'folder': 'coreinfra',
|
||||
'image': 'rhel6_64Guest',
|
||||
'memory': '16GB',
|
||||
'name': 'db1',
|
||||
'num_cpus': 2,
|
||||
'password': '123456',
|
||||
'power_on': True,
|
||||
'profile': 'nyc-vm',
|
||||
'provider': 'nyc_vcenter:vmware',
|
||||
'resourcepool': 'Resources',
|
||||
'url': 'vca1.saltstack.com',
|
||||
'user': 'root'}}
|
||||
}
|
||||
self.assertEqual(cloud_map.map_data(), merged_profile)
|
@ -1,175 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
:codeauthor: :email: `Mike Place <mp@saltstack.com>`
|
||||
|
||||
tests.unit.target_test
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
'''
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import sys
|
||||
|
||||
# Import Salt libs
|
||||
import salt.utils.minions
|
||||
import salt.config
|
||||
|
||||
# Import Salt Testing libs
|
||||
from tests.support.unit import TestCase, skipIf
|
||||
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CkMinionTestCase(TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.ck_ = salt.utils.minions.CkMinions(salt.config.DEFAULT_MASTER_OPTS)
|
||||
|
||||
def tearDown(self):
|
||||
self.ck_ = None
|
||||
|
||||
#TODO This is just a stub for upcoming tests
|
||||
|
||||
|
||||
@skipIf(sys.version_info < (2, 7), 'Python 2.7 needed for dictionary equality assertions')
|
||||
class TargetParseTestCase(TestCase):
|
||||
|
||||
def test_parse_grains_target(self):
|
||||
'''
|
||||
Ensure proper parsing for grains
|
||||
'''
|
||||
g_tgt = 'G@a:b'
|
||||
ret = salt.utils.minions.parse_target(g_tgt)
|
||||
self.assertDictEqual(ret, {'engine': 'G', 'pattern': 'a:b', 'delimiter': None})
|
||||
|
||||
def test_parse_grains_pcre_target(self):
|
||||
'''
|
||||
Ensure proper parsing for grains PCRE matching
|
||||
'''
|
||||
p_tgt = 'P@a:b'
|
||||
ret = salt.utils.minions.parse_target(p_tgt)
|
||||
self.assertDictEqual(ret, {'engine': 'P', 'pattern': 'a:b', 'delimiter': None})
|
||||
|
||||
def test_parse_pillar_pcre_target(self):
|
||||
'''
|
||||
Ensure proper parsing for pillar PCRE matching
|
||||
'''
|
||||
j_tgt = 'J@a:b'
|
||||
ret = salt.utils.minions.parse_target(j_tgt)
|
||||
self.assertDictEqual(ret, {'engine': 'J', 'pattern': 'a:b', 'delimiter': None})
|
||||
|
||||
def test_parse_list_target(self):
|
||||
'''
|
||||
Ensure proper parsing for list matching
|
||||
'''
|
||||
l_tgt = 'L@a:b'
|
||||
ret = salt.utils.minions.parse_target(l_tgt)
|
||||
self.assertDictEqual(ret, {'engine': 'L', 'pattern': 'a:b', 'delimiter': None})
|
||||
|
||||
def test_parse_nodegroup_target(self):
|
||||
'''
|
||||
Ensure proper parsing for pillar matching
|
||||
'''
|
||||
n_tgt = 'N@a:b'
|
||||
ret = salt.utils.minions.parse_target(n_tgt)
|
||||
self.assertDictEqual(ret, {'engine': 'N', 'pattern': 'a:b', 'delimiter': None})
|
||||
|
||||
def test_parse_subnet_target(self):
|
||||
'''
|
||||
Ensure proper parsing for subnet matching
|
||||
'''
|
||||
s_tgt = 'S@a:b'
|
||||
ret = salt.utils.minions.parse_target(s_tgt)
|
||||
self.assertDictEqual(ret, {'engine': 'S', 'pattern': 'a:b', 'delimiter': None})
|
||||
|
||||
def test_parse_minion_pcre_target(self):
|
||||
'''
|
||||
Ensure proper parsing for minion PCRE matching
|
||||
'''
|
||||
e_tgt = 'E@a:b'
|
||||
ret = salt.utils.minions.parse_target(e_tgt)
|
||||
self.assertDictEqual(ret, {'engine': 'E', 'pattern': 'a:b', 'delimiter': None})
|
||||
|
||||
def test_parse_range_target(self):
|
||||
'''
|
||||
Ensure proper parsing for range matching
|
||||
'''
|
||||
r_tgt = 'R@a:b'
|
||||
ret = salt.utils.minions.parse_target(r_tgt)
|
||||
self.assertDictEqual(ret, {'engine': 'R', 'pattern': 'a:b', 'delimiter': None})
|
||||
|
||||
def test_parse_multiword_target(self):
|
||||
'''
|
||||
Ensure proper parsing for multi-word targets
|
||||
|
||||
Refs https://github.com/saltstack/salt/issues/37231
|
||||
'''
|
||||
mw_tgt = 'G@a:b c'
|
||||
ret = salt.utils.minions.parse_target(mw_tgt)
|
||||
self.assertEqual(ret['pattern'], 'a:b c')
|
||||
|
||||
|
||||
class NodegroupCompTest(TestCase):
|
||||
'''
|
||||
Test nodegroup comparisons found in
|
||||
salt.utils.minions.nodgroup_comp()
|
||||
'''
|
||||
|
||||
def test_simple_nodegroup(self):
|
||||
'''
|
||||
Smoke test a very simple nodegroup. No recursion.
|
||||
'''
|
||||
simple_nodegroup = {'group1': 'L@foo.domain.com,bar.domain.com,baz.domain.com or bl*.domain.com'}
|
||||
|
||||
ret = salt.utils.minions.nodegroup_comp('group1', simple_nodegroup)
|
||||
expected_ret = ['L@foo.domain.com,bar.domain.com,baz.domain.com', 'or', 'bl*.domain.com']
|
||||
self.assertListEqual(ret, expected_ret)
|
||||
|
||||
def test_simple_expression_nodegroup(self):
|
||||
'''
|
||||
Smoke test a nodegroup with a simple expression. No recursion.
|
||||
'''
|
||||
simple_nodegroup = {'group1': '[foo,bar,baz].domain.com'}
|
||||
|
||||
ret = salt.utils.minions.nodegroup_comp('group1', simple_nodegroup)
|
||||
expected_ret = ['E@[foo,bar,baz].domain.com']
|
||||
self.assertListEqual(ret, expected_ret)
|
||||
|
||||
def test_simple_recurse(self):
|
||||
'''
|
||||
Test a case where one nodegroup contains a second nodegroup
|
||||
'''
|
||||
referenced_nodegroups = {
|
||||
'group1': 'L@foo.domain.com,bar.domain.com,baz.domain.com or bl*.domain.com',
|
||||
'group2': 'G@os:Debian and N@group1'
|
||||
}
|
||||
|
||||
ret = salt.utils.minions.nodegroup_comp('group2', referenced_nodegroups)
|
||||
expected_ret = [
|
||||
'(',
|
||||
'G@os:Debian',
|
||||
'and',
|
||||
'(',
|
||||
'L@foo.domain.com,bar.domain.com,baz.domain.com',
|
||||
'or',
|
||||
'bl*.domain.com',
|
||||
')',
|
||||
')'
|
||||
]
|
||||
self.assertListEqual(ret, expected_ret)
|
||||
|
||||
def test_circular_nodegroup_reference(self):
|
||||
'''
|
||||
Test to see what happens if A refers to B
|
||||
and B in turn refers back to A
|
||||
'''
|
||||
referenced_nodegroups = {
|
||||
'group1': 'N@group2',
|
||||
'group2': 'N@group1'
|
||||
}
|
||||
|
||||
# If this works, it should also print an error to the console
|
||||
ret = salt.utils.minions.nodegroup_comp('group1', referenced_nodegroups)
|
||||
self.assertEqual(ret, [])
|
@ -1,10 +1,11 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Unit Tests for functions located in salt.utils.files.py.
|
||||
Unit Tests for functions located in salt/utils/files.py
|
||||
'''
|
||||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import, unicode_literals, print_function
|
||||
import copy
|
||||
import os
|
||||
|
||||
# Import Salt libs
|
||||
@ -21,7 +22,7 @@ from tests.support.mock import (
|
||||
)
|
||||
|
||||
|
||||
class FilesUtilTestCase(TestCase):
|
||||
class FilesTestCase(TestCase):
|
||||
'''
|
||||
Test case for files util.
|
||||
'''
|
||||
@ -94,3 +95,54 @@ class FilesUtilTestCase(TestCase):
|
||||
'fopen() should have been prevented from opening a file '
|
||||
'using {0} as the filename'.format(invalid_fn)
|
||||
)
|
||||
|
||||
def _create_temp_structure(self, temp_directory, structure):
|
||||
for folder, files in six.iteritems(structure):
|
||||
current_directory = os.path.join(temp_directory, folder)
|
||||
os.makedirs(current_directory)
|
||||
for name, content in six.iteritems(files):
|
||||
path = os.path.join(temp_directory, folder, name)
|
||||
with salt.utils.files.fopen(path, 'w+') as fh:
|
||||
fh.write(content)
|
||||
|
||||
def _validate_folder_structure_and_contents(self, target_directory,
|
||||
desired_structure):
|
||||
for folder, files in six.iteritems(desired_structure):
|
||||
for name, content in six.iteritems(files):
|
||||
path = os.path.join(target_directory, folder, name)
|
||||
with salt.utils.files.fopen(path) as fh:
|
||||
assert fh.read().strip() == content
|
||||
|
||||
@with_tempdir()
|
||||
@with_tempdir()
|
||||
def test_recursive_copy(self, src, dest):
|
||||
src_structure = {
|
||||
'foo': {
|
||||
'foofile.txt': 'fooSTRUCTURE'
|
||||
},
|
||||
'bar': {
|
||||
'barfile.txt': 'barSTRUCTURE'
|
||||
}
|
||||
}
|
||||
dest_structure = {
|
||||
'foo': {
|
||||
'foo.txt': 'fooTARGET_STRUCTURE'
|
||||
},
|
||||
'baz': {
|
||||
'baz.txt': 'bazTARGET_STRUCTURE'
|
||||
}
|
||||
}
|
||||
|
||||
# Create the file structures in both src and dest dirs
|
||||
self._create_temp_structure(src, src_structure)
|
||||
self._create_temp_structure(dest, dest_structure)
|
||||
|
||||
# Perform the recursive copy
|
||||
salt.utils.files.recursive_copy(src, dest)
|
||||
|
||||
# Confirm results match expected results
|
||||
desired_structure = copy.copy(dest_structure)
|
||||
desired_structure.update(src_structure)
|
||||
self._validate_folder_structure_and_contents(
|
||||
dest,
|
||||
desired_structure)
|
||||
|
@ -2,12 +2,13 @@
|
||||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
import sys
|
||||
|
||||
# Import Salt Libs
|
||||
import salt.utils.minions as minions
|
||||
import salt.utils.minions
|
||||
|
||||
# Import Salt Testing Libs
|
||||
from tests.support.unit import TestCase
|
||||
from tests.support.unit import TestCase, skipIf
|
||||
from tests.support.mock import (
|
||||
patch,
|
||||
MagicMock,
|
||||
@ -38,7 +39,7 @@ class MinionsTestCase(TestCase):
|
||||
'''
|
||||
for nodegroup in NODEGROUPS:
|
||||
expected = EXPECTED[nodegroup]
|
||||
ret = minions.nodegroup_comp(nodegroup, NODEGROUPS)
|
||||
ret = salt.utils.minions.nodegroup_comp(nodegroup, NODEGROUPS)
|
||||
self.assertEqual(ret, expected)
|
||||
|
||||
|
||||
@ -47,7 +48,7 @@ class CkMinionsTestCase(TestCase):
|
||||
TestCase for salt.utils.minions.CkMinions class
|
||||
'''
|
||||
def setUp(self):
|
||||
self.ckminions = minions.CkMinions({})
|
||||
self.ckminions = salt.utils.minions.CkMinions({})
|
||||
|
||||
def test_spec_check(self):
|
||||
# Test spec-only rule
|
||||
@ -366,3 +367,145 @@ class CkMinionsTestCase(TestCase):
|
||||
args = ['1', '2']
|
||||
ret = self.ckminions.auth_check(auth_list, 'test.arg', args, 'runner')
|
||||
self.assertTrue(ret)
|
||||
|
||||
|
||||
@skipIf(sys.version_info < (2, 7), 'Python 2.7 needed for dictionary equality assertions')
|
||||
class TargetParseTestCase(TestCase):
|
||||
|
||||
def test_parse_grains_target(self):
|
||||
'''
|
||||
Ensure proper parsing for grains
|
||||
'''
|
||||
g_tgt = 'G@a:b'
|
||||
ret = salt.utils.minions.parse_target(g_tgt)
|
||||
self.assertDictEqual(ret, {'engine': 'G', 'pattern': 'a:b', 'delimiter': None})
|
||||
|
||||
def test_parse_grains_pcre_target(self):
|
||||
'''
|
||||
Ensure proper parsing for grains PCRE matching
|
||||
'''
|
||||
p_tgt = 'P@a:b'
|
||||
ret = salt.utils.minions.parse_target(p_tgt)
|
||||
self.assertDictEqual(ret, {'engine': 'P', 'pattern': 'a:b', 'delimiter': None})
|
||||
|
||||
def test_parse_pillar_pcre_target(self):
|
||||
'''
|
||||
Ensure proper parsing for pillar PCRE matching
|
||||
'''
|
||||
j_tgt = 'J@a:b'
|
||||
ret = salt.utils.minions.parse_target(j_tgt)
|
||||
self.assertDictEqual(ret, {'engine': 'J', 'pattern': 'a:b', 'delimiter': None})
|
||||
|
||||
def test_parse_list_target(self):
|
||||
'''
|
||||
Ensure proper parsing for list matching
|
||||
'''
|
||||
l_tgt = 'L@a:b'
|
||||
ret = salt.utils.minions.parse_target(l_tgt)
|
||||
self.assertDictEqual(ret, {'engine': 'L', 'pattern': 'a:b', 'delimiter': None})
|
||||
|
||||
def test_parse_nodegroup_target(self):
|
||||
'''
|
||||
Ensure proper parsing for pillar matching
|
||||
'''
|
||||
n_tgt = 'N@a:b'
|
||||
ret = salt.utils.minions.parse_target(n_tgt)
|
||||
self.assertDictEqual(ret, {'engine': 'N', 'pattern': 'a:b', 'delimiter': None})
|
||||
|
||||
def test_parse_subnet_target(self):
|
||||
'''
|
||||
Ensure proper parsing for subnet matching
|
||||
'''
|
||||
s_tgt = 'S@a:b'
|
||||
ret = salt.utils.minions.parse_target(s_tgt)
|
||||
self.assertDictEqual(ret, {'engine': 'S', 'pattern': 'a:b', 'delimiter': None})
|
||||
|
||||
def test_parse_minion_pcre_target(self):
|
||||
'''
|
||||
Ensure proper parsing for minion PCRE matching
|
||||
'''
|
||||
e_tgt = 'E@a:b'
|
||||
ret = salt.utils.minions.parse_target(e_tgt)
|
||||
self.assertDictEqual(ret, {'engine': 'E', 'pattern': 'a:b', 'delimiter': None})
|
||||
|
||||
def test_parse_range_target(self):
|
||||
'''
|
||||
Ensure proper parsing for range matching
|
||||
'''
|
||||
r_tgt = 'R@a:b'
|
||||
ret = salt.utils.minions.parse_target(r_tgt)
|
||||
self.assertDictEqual(ret, {'engine': 'R', 'pattern': 'a:b', 'delimiter': None})
|
||||
|
||||
def test_parse_multiword_target(self):
|
||||
'''
|
||||
Ensure proper parsing for multi-word targets
|
||||
|
||||
Refs https://github.com/saltstack/salt/issues/37231
|
||||
'''
|
||||
mw_tgt = 'G@a:b c'
|
||||
ret = salt.utils.minions.parse_target(mw_tgt)
|
||||
self.assertEqual(ret['pattern'], 'a:b c')
|
||||
|
||||
|
||||
class NodegroupCompTest(TestCase):
|
||||
'''
|
||||
Test nodegroup comparisons found in
|
||||
salt.utils.minions.nodgroup_comp()
|
||||
'''
|
||||
|
||||
def test_simple_nodegroup(self):
|
||||
'''
|
||||
Smoke test a very simple nodegroup. No recursion.
|
||||
'''
|
||||
simple_nodegroup = {'group1': 'L@foo.domain.com,bar.domain.com,baz.domain.com or bl*.domain.com'}
|
||||
|
||||
ret = salt.utils.minions.nodegroup_comp('group1', simple_nodegroup)
|
||||
expected_ret = ['L@foo.domain.com,bar.domain.com,baz.domain.com', 'or', 'bl*.domain.com']
|
||||
self.assertListEqual(ret, expected_ret)
|
||||
|
||||
def test_simple_expression_nodegroup(self):
|
||||
'''
|
||||
Smoke test a nodegroup with a simple expression. No recursion.
|
||||
'''
|
||||
simple_nodegroup = {'group1': '[foo,bar,baz].domain.com'}
|
||||
|
||||
ret = salt.utils.minions.nodegroup_comp('group1', simple_nodegroup)
|
||||
expected_ret = ['E@[foo,bar,baz].domain.com']
|
||||
self.assertListEqual(ret, expected_ret)
|
||||
|
||||
def test_simple_recurse(self):
|
||||
'''
|
||||
Test a case where one nodegroup contains a second nodegroup
|
||||
'''
|
||||
referenced_nodegroups = {
|
||||
'group1': 'L@foo.domain.com,bar.domain.com,baz.domain.com or bl*.domain.com',
|
||||
'group2': 'G@os:Debian and N@group1'
|
||||
}
|
||||
|
||||
ret = salt.utils.minions.nodegroup_comp('group2', referenced_nodegroups)
|
||||
expected_ret = [
|
||||
'(',
|
||||
'G@os:Debian',
|
||||
'and',
|
||||
'(',
|
||||
'L@foo.domain.com,bar.domain.com,baz.domain.com',
|
||||
'or',
|
||||
'bl*.domain.com',
|
||||
')',
|
||||
')'
|
||||
]
|
||||
self.assertListEqual(ret, expected_ret)
|
||||
|
||||
def test_circular_nodegroup_reference(self):
|
||||
'''
|
||||
Test to see what happens if A refers to B
|
||||
and B in turn refers back to A
|
||||
'''
|
||||
referenced_nodegroups = {
|
||||
'group1': 'N@group2',
|
||||
'group2': 'N@group1'
|
||||
}
|
||||
|
||||
# If this works, it should also print an error to the console
|
||||
ret = salt.utils.minions.nodegroup_comp('group1', referenced_nodegroups)
|
||||
self.assertEqual(ret, [])
|
||||
|
@ -37,7 +37,7 @@ integration.runners.test_jobs
|
||||
integration.runners.test_salt
|
||||
integration.sdb.test_env
|
||||
integration.states.test_host
|
||||
integration.states.test_pip
|
||||
integration.states.test_pip_state
|
||||
integration.states.test_reg
|
||||
integration.states.test_renderers
|
||||
integration.utils.testprogram
|
||||
|
Loading…
Reference in New Issue
Block a user