Merge branch '2017.7' into 'oxygen'

Conflicts:
  - salt/cloud/clouds/dimensiondata.py
  - salt/config/__init__.py
  - salt/modules/yumpkg.py
  - salt/pillar/ec2_pillar.py
  - salt/utils/docker/__init__.py
  - salt/utils/vault.py
  - tests/integration/states/test_docker_container.py
  - tests/unit/modules/test_yumpkg.py
  - tests/unit/templates/test_jinja.py
  - tests/unit/utils/test_docker.py
This commit is contained in:
rallytime 2018-02-14 16:31:31 -05:00
commit e060a74fd8
No known key found for this signature in database
GPG Key ID: E8F1A4B90D0DEA19
18 changed files with 1045 additions and 167 deletions

View File

@ -3,3 +3,78 @@ Salt 2017.7.4 Release Notes
===========================
Version 2017.7.4 is a bugfix release for :ref:`2017.7.0 <release-2017-7-0>`.
Changes for v2017.7.3..v2017.7.4
---------------------------------------------------------------
Extended changelog courtesy of Todd Stansell (https://github.com/tjstansell/salt-changelogs):
*Generated at: 2018-02-13T16:29:07Z*
Statistics:
- Total Merges: **4**
- Total Issue references: **3**
- Total PR references: **7**
Changes:
- **PR** `#45981`_: (*gtmanfred*) use local config for vault when masterless
@ *2018-02-13T15:22:01Z*
- **ISSUE** `#45976`_: (*grobinson-blockchain*) 6a5e0f9 introduces regression that breaks Vault module for salt masterless
| refs: `#45981`_
* ca76a0b328 Merge pull request `#45981`_ from gtmanfred/2017.7.3
* 0d448457dc apparently local is not set by default
* 2a92f4bc16 use local config for vault when masterless
- **PR** `#45953`_: (*rallytime*) Back-port `#45928`_ to 2017.7.3
@ *2018-02-09T22:29:10Z*
- **ISSUE** `#45915`_: (*MatthiasKuehneEllerhold*) 2017.7.3: Salt-SSH & Vault Pillar: Permission denied "minion.pem"
| refs: `#45928`_
- **PR** `#45928`_: (*garethgreenaway*) [2017.7] Fixing vault when used with pillar over salt-ssh
| refs: `#45953`_
* 6530649dbc Merge pull request `#45953`_ from rallytime/`bp-45928`_-2017.7.3
* 85363189d1 Fixing vault when used with pillar over salt-ssh
- **PR** `#45934`_: (*rallytime*) Back-port `#45902`_ to 2017.7.3
@ *2018-02-09T16:31:08Z*
- **ISSUE** `#45893`_: (*CrackerJackMack*) archive.extracted ValueError "No path specified" in 2017.7.3
| refs: `#45902`_
- **PR** `#45902`_: (*terminalmage*) Check the effective saltenv for cached archive
| refs: `#45934`_
* fb378cebb0 Merge pull request `#45934`_ from rallytime/`bp-45902`_
* bb83e8b345 Add regression test for issue 45893
* cdda66d759 Remove duplicated section in docstring and fix example
* 4b6351cda6 Check the effective saltenv for cached archive
- **PR** `#45935`_: (*rallytime*) Back-port `#45742`_ to 2017.7.3
@ *2018-02-09T14:02:26Z*
- **PR** `#45742`_: (*marccardinal*) list.copy() is not compatible with python 2.7
| refs: `#45935`_
* 0d74151c71 Merge pull request `#45935`_ from rallytime/`bp-45742`_
* 6a0b5f7af3 Removed the chained copy
* ad1150fad4 list.copy() is not compatible with python 2.7
.. _`#45742`: https://github.com/saltstack/salt/pull/45742
.. _`#45893`: https://github.com/saltstack/salt/issues/45893
.. _`#45902`: https://github.com/saltstack/salt/pull/45902
.. _`#45915`: https://github.com/saltstack/salt/issues/45915
.. _`#45928`: https://github.com/saltstack/salt/pull/45928
.. _`#45934`: https://github.com/saltstack/salt/pull/45934
.. _`#45935`: https://github.com/saltstack/salt/pull/45935
.. _`#45953`: https://github.com/saltstack/salt/pull/45953
.. _`#45976`: https://github.com/saltstack/salt/issues/45976
.. _`#45981`: https://github.com/saltstack/salt/pull/45981
.. _`bp-45742`: https://github.com/saltstack/salt/pull/45742
.. _`bp-45902`: https://github.com/saltstack/salt/pull/45902
.. _`bp-45928`: https://github.com/saltstack/salt/pull/45928

View File

@ -32,7 +32,7 @@ from salt.utils.versions import LooseVersion as _LooseVersion
# Import libcloud
try:
import libcloud
from libcloud.compute.base import NodeState
from libcloud.compute.base import NodeDriver, NodeState
from libcloud.compute.base import NodeAuthPassword
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
@ -52,9 +52,6 @@ try:
except ImportError:
HAS_LIBCLOUD = False
# Import generic libcloud functions
# from salt.cloud.libcloudfuncs import *
# Import salt.cloud libs
from salt.cloud.libcloudfuncs import * # pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
from salt.utils.functools import namespaced_function
@ -217,7 +214,6 @@ def create(vm_):
log.info('Creating Cloud VM %s', vm_['name'])
conn = get_conn()
rootPw = NodeAuthPassword(vm_['auth'])
location = conn.ex_get_location_by_id(vm_['location'])
images = conn.list_images(location=location)
@ -248,15 +244,13 @@ def create(vm_):
kwargs = {
'name': vm_['name'],
'image': image,
'auth': rootPw,
'ex_description': vm_['description'],
'ex_network_domain': network_domain,
'ex_vlan': vlan,
'ex_is_started': vm_['is_started']
}
event_data = kwargs.copy()
del event_data['auth']
event_data = _to_event_data(kwargs)
__utils__['cloud.fire_event'](
'event',
@ -267,6 +261,10 @@ def create(vm_):
transport=__opts__['transport']
)
# Initial password (excluded from event payload)
initial_password = NodeAuthPassword(vm_['auth'])
kwargs['auth'] = initial_password
try:
data = conn.create_node(**kwargs)
except Exception as exc:
@ -280,7 +278,7 @@ def create(vm_):
return False
try:
data = salt.utils.cloud.wait_for_ip(
data = __utils__['cloud.wait_for_ip'](
_query_node_data,
update_args=(vm_, data),
timeout=config.get_cloud_config_value(
@ -306,7 +304,7 @@ def create(vm_):
ip_address = preferred_ip(vm_, data.public_ips)
log.debug('Using IP address %s', ip_address)
if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips':
if __utils__['cloud.get_salt_interface'](vm_, __opts__) == 'private_ips':
salt_ip_address = preferred_ip(vm_, data.private_ips)
log.info('Salt interface set to: %s', salt_ip_address)
else:
@ -322,7 +320,7 @@ def create(vm_):
vm_['ssh_host'] = ip_address
vm_['password'] = vm_['auth']
ret = salt.utils.cloud.bootstrap(vm_, __opts__)
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data.__dict__)
@ -414,11 +412,13 @@ def create_lb(kwargs=None, call=None):
log.debug('Network Domain: %s', network_domain.id)
lb_conn.ex_set_current_network_domain(network_domain.id)
event_data = _to_event_data(kwargs)
__utils__['cloud.fire_event'](
'event',
'create load_balancer',
'salt/cloud/loadbalancer/creating',
args=kwargs,
args=event_data,
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
@ -427,11 +427,13 @@ def create_lb(kwargs=None, call=None):
name, port, protocol, algorithm, members
)
event_data = _to_event_data(kwargs)
__utils__['cloud.fire_event'](
'event',
'created load_balancer',
'salt/cloud/loadbalancer/created',
args=kwargs,
args=event_data,
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
@ -573,3 +575,46 @@ def get_lb_conn(dd_driver=None):
'Missing dimensiondata_driver for get_lb_conn method.'
)
return get_driver_lb(Provider_lb.DIMENSIONDATA)(user_id, key, region=region)
def _to_event_data(obj):
'''
Convert the specified object into a form that can be serialised by msgpack as event data.
:param obj: The object to convert.
'''
if obj is None:
return None
if isinstance(obj, bool):
return obj
if isinstance(obj, int):
return obj
if isinstance(obj, float):
return obj
if isinstance(obj, str):
return obj
if isinstance(obj, bytes):
return obj
if isinstance(obj, dict):
return obj
if isinstance(obj, NodeDriver): # Special case for NodeDriver (cyclic references)
return obj.name
if isinstance(obj, list):
return [_to_event_data(item) for item in obj]
event_data = {}
for attribute_name in dir(obj):
if attribute_name.startswith('_'):
continue
attribute_value = getattr(obj, attribute_name)
if callable(attribute_value): # Strip out methods
continue
event_data[attribute_name] = _to_event_data(attribute_value)
return event_data

View File

@ -281,6 +281,7 @@ VALID_OPTS = {
# Location of the files a minion should look for. Set to 'local' to never ask the master.
'file_client': six.string_types,
'local': bool,
# When using a local file_client, this parameter is used to allow the client to connect to
# a master for remote execution.
@ -1243,6 +1244,7 @@ DEFAULT_MINION_OPTS = {
'base': [salt.syspaths.BASE_THORIUM_ROOTS_DIR],
},
'file_client': 'remote',
'local': False,
'use_master_when_local': False,
'file_roots': {
'base': [salt.syspaths.BASE_FILE_ROOTS_DIR,

View File

@ -34,7 +34,7 @@ def __get_aliases_filename():
'''
Return the path to the appropriate aliases file
'''
return __salt__['config.option']('aliases.file')
return os.path.realpath(__salt__['config.option']('aliases.file'))
def __parse_aliases():

View File

@ -422,7 +422,7 @@ def _run(cmd,
elif __grains__['os_family'] in ['Solaris']:
env_cmd = ('su', '-', runas, '-c', sys.executable)
elif __grains__['os_family'] in ['AIX']:
env_cmd = ('su', runas, '-c', sys.executable)
env_cmd = ('su', '-', runas, '-c', sys.executable)
else:
env_cmd = ('su', '-s', shell, '-', runas, '-c', sys.executable)
env_bytes = salt.utils.stringutils.to_bytes(subprocess.Popen(

View File

@ -308,13 +308,13 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
if pkgs is None:
version_num = kwargs.get('version')
variant_spec = kwargs.get('variant')
spec = None
spec = {}
if version_num:
spec = (spec or '') + '@' + version_num
spec['version'] = version_num
if variant_spec:
spec = (spec or '') + variant_spec
spec['variant'] = variant_spec
pkg_params = {name: spec}
@ -323,7 +323,14 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
formulas_array = []
for pname, pparams in six.iteritems(pkg_params):
formulas_array.append(pname + (pparams or ''))
formulas_array.append(pname)
if pparams:
if 'version' in pparams:
formulas_array.append('@' + pparams['version'])
if 'variant' in pparams:
formulas_array.append(pparams['variant'])
old = list_pkgs()
cmd = ['port', 'install']

View File

@ -456,7 +456,7 @@ def fcontext_get_policy(name, filetype=None, sel_type=None, sel_user=None, sel_l
'''
if filetype:
_validate_filetype(filetype)
re_spacer = '[ ]{2,}'
re_spacer = '[ ]+'
cmd_kwargs = {'spacer': re_spacer,
'filespec': re.escape(name),
'sel_user': sel_user or '[^:]+',
@ -469,11 +469,14 @@ def fcontext_get_policy(name, filetype=None, sel_type=None, sel_user=None, sel_l
current_entry_text = __salt__['cmd.shell'](cmd, ignore_retcode=True)
if current_entry_text == '':
return None
ret = {}
current_entry_list = re.split(re_spacer, current_entry_text)
ret['filespec'] = current_entry_list[0]
ret['filetype'] = current_entry_list[1]
ret.update(_context_string_to_dict(current_entry_list[2]))
parts = re.match(r'^({filespec}) +([a-z ]+) (.*)$'.format(**{'filespec': re.escape(name)}), current_entry_text)
ret = {
'filespec': parts.group(1).strip(),
'filetype': parts.group(2).strip(),
}
ret.update(_context_string_to_dict(parts.group(3).strip()))
return ret
@ -517,7 +520,9 @@ def fcontext_add_or_delete_policy(action, name, filetype=None, sel_type=None, se
if action not in ['add', 'delete']:
raise SaltInvocationError('Actions supported are "add" and "delete", not "{0}".'.format(action))
cmd = 'semanage fcontext --{0}'.format(action)
if filetype is not None:
# "semanage --ftype a" isn't valid on Centos 6,
# don't pass --ftype since "a" is the default filetype.
if filetype is not None and filetype != 'a':
_validate_filetype(filetype)
cmd += ' --ftype {0}'.format(filetype)
if sel_type is not None:

View File

@ -926,8 +926,8 @@ def highstate(test=None, queue=False, **kwargs):
.. code-block:: bash
salt '*' state.higstate exclude=bar,baz
salt '*' state.higstate exclude=foo*
salt '*' state.highstate exclude=bar,baz
salt '*' state.highstate exclude=foo*
salt '*' state.highstate exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
saltenv

View File

@ -211,25 +211,29 @@ def _check_versionlock():
)
def _get_repo_options(**kwargs):
def _get_options(**kwargs):
'''
Returns a list of '--enablerepo' and '--disablerepo' options to be used
in the yum command, based on the kwargs.
Returns a list of options to be used in the yum/dnf command, based on the
kwargs passed.
'''
# Get repo options from the kwargs
fromrepo = kwargs.pop('fromrepo', '')
repo = kwargs.pop('repo', '')
disablerepo = kwargs.pop('disablerepo', '')
enablerepo = kwargs.pop('enablerepo', '')
disableexcludes = kwargs.pop('disableexcludes', '')
branch = kwargs.pop('branch', '')
get_extra_options = kwargs.pop('get_extra_options', False)
# Support old 'repo' argument
if repo and not fromrepo:
fromrepo = repo
ret = []
if fromrepo:
log.info('Restricting to repo \'%s\'', fromrepo)
ret.extend(['--disablerepo=*', '--enablerepo=' + fromrepo])
ret.extend(['--disablerepo=*', '--enablerepo={0}'.format(fromrepo)])
else:
if disablerepo:
targets = [disablerepo] \
@ -245,58 +249,30 @@ def _get_repo_options(**kwargs):
else enablerepo
log.info('Enabling repo(s): %s', ', '.join(targets))
ret.extend(['--enablerepo={0}'.format(x) for x in targets])
return ret
if disableexcludes:
log.info('Disabling excludes for \'%s\'', disableexcludes)
ret.append('--disableexcludes={0}'.format(disableexcludes))
def _get_excludes_option(**kwargs):
'''
Returns a list of '--disableexcludes' option to be used in the yum command,
based on the kwargs.
'''
disable_excludes = kwargs.pop('disableexcludes', '')
ret = []
if disable_excludes:
log.info('Disabling excludes for \'%s\'', disable_excludes)
ret.append('--disableexcludes={0}'.format(disable_excludes))
return ret
def _get_branch_option(**kwargs):
'''
Returns a list of '--branch' option to be used in the yum command,
based on the kwargs. This feature requires 'branch' plugin for YUM.
'''
branch = kwargs.pop('branch', '')
ret = []
if branch:
log.info('Adding branch \'%s\'', branch)
ret.append('--branch=\'{0}\''.format(branch))
return ret
ret.append('--branch={0}'.format(branch))
if get_extra_options:
# sorting here to make order uniform, makes unit testing more reliable
for key in sorted(kwargs):
if key.startswith('__'):
continue
value = kwargs[key]
if isinstance(value, six.string_types):
log.info('Found extra option --%s=%s', key, value)
ret.append('--{0}={1}'.format(key, value))
elif value is True:
log.info('Found extra option --%s', key)
ret.append('--{0}'.format(key))
if ret:
log.info('Adding extra options: %s', ret)
def _get_extra_options(**kwargs):
'''
Returns list of extra options for yum
'''
ret = []
kwargs = salt.utils.args.clean_kwargs(**kwargs)
# Remove already handled options from kwargs
fromrepo = kwargs.pop('fromrepo', '')
repo = kwargs.pop('repo', '')
disablerepo = kwargs.pop('disablerepo', '')
enablerepo = kwargs.pop('enablerepo', '')
disable_excludes = kwargs.pop('disableexcludes', '')
branch = kwargs.pop('branch', '')
for key, value in six.iteritems(kwargs):
if isinstance(value, six.string_types):
log.info('Adding extra option --%s=\'%s\'', key, value)
ret.append('--{0}=\'{1}\''.format(key, value))
elif value is True:
log.info('Adding extra option --%s', key)
ret.append('--{0}'.format(key))
log.info('Adding extra options %s', ret)
return ret
@ -460,8 +436,7 @@ def latest_version(*names, **kwargs):
if len(names) == 0:
return ''
repo_arg = _get_repo_options(**kwargs)
exclude_arg = _get_excludes_option(**kwargs)
options = _get_options(**kwargs)
# Refresh before looking for the latest version available
if refresh:
@ -471,8 +446,7 @@ def latest_version(*names, **kwargs):
# Get available versions for specified package(s)
cmd = [_yum(), '--quiet']
cmd.extend(repo_arg)
cmd.extend(exclude_arg)
cmd.extend(options)
cmd.extend(['list', 'available'])
cmd.extend(names)
out = __salt__['cmd.run_all'](cmd,
@ -818,7 +792,7 @@ def list_repo_pkgs(*args, **kwargs):
disablerepo = kwargs.pop('disablerepo', '') or ''
enablerepo = kwargs.pop('enablerepo', '') or ''
repo_arg = _get_repo_options(fromrepo=fromrepo, **kwargs)
repo_arg = _get_options(fromrepo=fromrepo, **kwargs)
if fromrepo and not isinstance(fromrepo, list):
try:
@ -970,15 +944,13 @@ def list_upgrades(refresh=True, **kwargs):
salt '*' pkg.list_upgrades
'''
repo_arg = _get_repo_options(**kwargs)
exclude_arg = _get_excludes_option(**kwargs)
options = _get_options(**kwargs)
if salt.utils.data.is_true(refresh):
refresh_db(check_update=False, **kwargs)
cmd = [_yum(), '--quiet']
cmd.extend(repo_arg)
cmd.extend(exclude_arg)
cmd.extend(options)
cmd.extend(['list', 'upgrades' if _yum() == 'dnf' else 'updates'])
out = __salt__['cmd.run_all'](cmd,
output_loglevel='trace',
@ -1096,21 +1068,19 @@ def refresh_db(**kwargs):
check_update_ = kwargs.pop('check_update', True)
repo_arg = _get_repo_options(**kwargs)
exclude_arg = _get_excludes_option(**kwargs)
branch_arg = _get_branch_option(**kwargs)
options = _get_options(**kwargs)
clean_cmd = [_yum(), '--quiet', 'clean', 'expire-cache']
update_cmd = [_yum(), '--quiet', 'check-update']
if __grains__.get('os_family') == 'RedHat' and __grains__.get('osmajorrelease') == '7':
# This feature is disable because it is not used by Salt and lasts a lot with using large repo like EPEL
if __grains__.get('os_family') == 'RedHat' \
and __grains__.get('osmajorrelease') == 7:
# This feature is disabled because it is not used by Salt and adds a
# lot of extra time to the command with large repos like EPEL
update_cmd.append('--setopt=autocheck_running_kernel=false')
for args in (repo_arg, exclude_arg, branch_arg):
if args:
clean_cmd.extend(args)
update_cmd.extend(args)
clean_cmd.extend(options)
update_cmd.extend(options)
__salt__['cmd.run'](clean_cmd, python_shell=False)
if check_update_:
@ -1162,6 +1132,7 @@ def install(name=None,
reinstall=False,
normalize=True,
update_holds=False,
saltenv='base',
ignore_epoch=False,
**kwargs):
'''
@ -1343,9 +1314,7 @@ def install(name=None,
'version': '<new-version>',
'arch': '<new-arch>'}}}
'''
repo_arg = _get_repo_options(**kwargs)
exclude_arg = _get_excludes_option(**kwargs)
branch_arg = _get_branch_option(**kwargs)
options = _get_options(**kwargs)
if salt.utils.data.is_true(refresh):
refresh_db(**kwargs)
@ -1353,7 +1322,7 @@ def install(name=None,
try:
pkg_params, pkg_type = __salt__['pkg_resource.parse_targets'](
name, pkgs, sources, normalize=normalize, **kwargs
name, pkgs, sources, saltenv=saltenv, normalize=normalize
)
except MinionError as exc:
raise CommandExecutionError(exc)
@ -1580,9 +1549,7 @@ def install(name=None,
'''
DRY function to add args common to all yum/dnf commands
'''
for arg in (repo_arg, exclude_arg, branch_arg):
if arg:
cmd.extend(arg)
cmd.extend(options)
if skip_verify:
cmd.append('--nogpgcheck')
if downloadonly:
@ -1847,17 +1814,14 @@ def upgrade(name=None,
.. note::
To add extra arguments to the ``yum upgrade`` command, pass them as key
word arguments. For arguments without assignments, pass ``True``
word arguments. For arguments without assignments, pass ``True``
.. code-block:: bash
salt '*' pkg.upgrade security=True exclude='kernel*'
'''
repo_arg = _get_repo_options(**kwargs)
exclude_arg = _get_excludes_option(**kwargs)
branch_arg = _get_branch_option(**kwargs)
extra_args = _get_extra_options(**kwargs)
options = _get_options(get_extra_options=True, **kwargs)
if salt.utils.data.is_true(refresh):
refresh_db(**kwargs)
@ -1886,9 +1850,7 @@ def upgrade(name=None,
and __salt__['config.get']('systemd.scope', True):
cmd.extend(['systemd-run', '--scope'])
cmd.extend([_yum(), '--quiet', '-y'])
for args in (repo_arg, exclude_arg, branch_arg, extra_args):
if args:
cmd.extend(args)
cmd.extend(options)
if skip_verify:
cmd.append('--nogpgcheck')
cmd.append('upgrade')

View File

@ -1,19 +1,37 @@
# -*- coding: utf-8 -*-
#-*- coding: utf-8 -*-
'''
Retrieve EC2 instance data for minions.
Retrieve EC2 instance data for minions for ec2_tags and ec2_tags_list
The minion id must be the instance-id retrieved from AWS. As an
option, use_grain can be set to True. This allows the use of an
The minion id must be the AWS instance-id or value in 'tag_key'.
For example set 'tag_key' to 'Name', to have the minion-id matched against the
tag 'Name'. The tag contents must be unique. The value of tag_value can
be 'uqdn' or 'asis'. if 'uqdn' strips any domain before comparison.
The option use_grain can be set to True. This allows the use of an
instance-id grain instead of the minion-id. Since this is a potential
security risk, the configuration can be further expanded to include
a list of minions that are trusted to only allow the alternate id
of the instances to specific hosts. There is no glob matching at
this time.
The optional 'tag_list_key' indicates which keys should be added to
'ec2_tags_list' and be split by tag_list_sep (default `;`). If a tag key is
included in 'tag_list_key' it is removed from ec2_tags. If a tag does not
exist it is still included as an empty list.
Note: restart the salt-master for changes to take effect.
.. code-block:: yaml
ext_pillar:
- ec2_pillar:
tag_key: 'Name'
tag_value: 'asis'
tag_list_key:
- Role
tag_list_sep: ';'
use_grain: True
minion_ids:
- trusted-minion-1
@ -31,6 +49,8 @@ the instance.
from __future__ import absolute_import, print_function, unicode_literals
import re
import logging
import salt.ext.six as six
from salt.ext.six.moves import range
# Import salt libs
from salt.utils.versions import StrictVersion as _StrictVersion
@ -47,6 +67,9 @@ except ImportError:
# Set up logging
log = logging.getLogger(__name__)
# DEBUG boto is far too verbose
logging.getLogger('boto').setLevel(logging.WARNING)
def __virtual__():
'''
@ -59,7 +82,7 @@ def __virtual__():
required_boto_version = _StrictVersion('2.8.0')
if boto_version < required_boto_version:
log.error("%s: installed boto version %s < %s, can't retrieve instance data",
__name__, boto_version, required_boto_version)
__name__, boto_version, required_boto_version)
return False
return True
@ -76,64 +99,145 @@ def _get_instance_info():
def ext_pillar(minion_id,
pillar, # pylint: disable=W0613
use_grain=False,
minion_ids=None):
minion_ids=None,
tag_key=None,
tag_value='asis',
tag_list_key=None,
tag_list_sep=';'):
'''
Execute a command and read the output as YAML
'''
valid_tag_value = ['uqdn', 'asis']
log.debug("Querying EC2 tags for minion id %s", minion_id)
# meta-data:instance-id
grain_instance_id = __grains__.get('meta-data', {}).get('instance-id', None)
if not grain_instance_id:
# dynamic:instance-identity:document:instanceId
grain_instance_id = \
__grains__.get('dynamic', {}).get('instance-identity', {}).get('document', {}).get('instance-id', None)
if grain_instance_id and re.search(r'^i-([0-9a-z]{17}|[0-9a-z]{8})$', grain_instance_id) is None:
log.error('External pillar %s, instance-id \'%s\' is not valid for '
'\'%s\'', __name__, grain_instance_id, minion_id)
grain_instance_id = None # invalid instance id found, remove it from use.
# If minion_id is not in the format of an AWS EC2 instance, check to see
# if there is a grain named 'instance-id' use that. Because this is a
# security risk, the master config must contain a use_grain: True option
# for this external pillar, which defaults to no
if re.search(r'^i-([0-9a-z]{17}|[0-9a-z]{8})$', minion_id) is None:
if 'instance-id' not in __grains__:
log.debug("Minion-id is not in AWS instance-id formation, and there "
"is no instance-id grain for minion %s", minion_id)
return {}
if not use_grain:
log.debug("Minion-id is not in AWS instance-id formation, and option "
"not set to use instance-id grain, for minion %s, use_grain "
"is %s", minion_id, use_grain)
return {}
log.debug("use_grain set to %s", use_grain)
if minion_ids is not None and minion_id not in minion_ids:
log.debug("Minion-id is not in AWS instance ID format, and minion_ids "
"is set in the ec2_pillar configuration, but minion %s is "
"not in the list of allowed minions %s", minion_id, minion_ids)
return {}
if re.search(r'^i-([0-9a-z]{17}|[0-9a-z]{8})$', __grains__['instance-id']) is not None:
minion_id = __grains__['instance-id']
log.debug("Minion-id is not in AWS instance ID format, but a grain"
" is, so using %s as the minion ID", minion_id)
# Check AWS Tag restrictions .i.e. letters, spaces, and numbers and + - = . _ : / @
if tag_key and re.match(r'[\w=.:/@-]+$', tag_key) is None:
log.error('External pillar %s, tag_key \'%s\' is not valid ',
__name__, tag_key if isinstance(tag_key, six.text_type) else 'non-string')
return {}
if tag_key and tag_value not in valid_tag_value:
log.error('External pillar %s, tag_value \'%s\' is not valid must be one '
'of %s', __name__, tag_value, ' '.join(valid_tag_value))
return {}
if not tag_key:
base_msg = ('External pillar %s, querying EC2 tags for minion id \'%s\' '
'against instance-id', __name__, minion_id)
else:
base_msg = ('External pillar %s, querying EC2 tags for minion id \'%s\' '
'against instance-id or \'%s\' against \'%s\'', __name__, minion_id, tag_key, tag_value)
log.debug(base_msg)
find_filter = None
find_id = None
if re.search(r'^i-([0-9a-z]{17}|[0-9a-z]{8})$', minion_id) is not None:
find_filter = None
find_id = minion_id
elif tag_key:
if tag_value == 'uqdn':
find_filter = {'tag:{0}'.format(tag_key): minion_id.split('.', 1)[0]}
else:
log.debug("Nether minion id nor a grain named instance-id is in "
"AWS format, can't query EC2 tags for minion %s", minion_id)
return {}
find_filter = {'tag:{0}'.format(tag_key): minion_id}
if grain_instance_id:
# we have an untrusted grain_instance_id, use it to narrow the search
# even more. Combination will be unique even if uqdn is set.
find_filter.update({'instance-id': grain_instance_id})
# Add this if running state is not dependant on EC2Config
# find_filter.update('instance-state-name': 'running')
m = boto.utils.get_instance_metadata(timeout=0.1, num_retries=1)
if len(m.keys()) < 1:
log.info("%s: not an EC2 instance, skipping", __name__)
return None
# no minion-id is instance-id and no suitable filter, try use_grain if enabled
if not find_filter and not find_id and use_grain:
if not grain_instance_id:
log.debug('Minion-id is not in AWS instance-id formation, and there '
'is no instance-id grain for minion %s', minion_id)
return {}
if minion_ids is not None and minion_id not in minion_ids:
log.debug('Minion-id is not in AWS instance ID format, and minion_ids '
'is set in the ec2_pillar configuration, but minion %s is '
'not in the list of allowed minions %s', minion_id, minion_ids)
return {}
find_id = grain_instance_id
if not (find_filter or find_id):
log.debug('External pillar %s, querying EC2 tags for minion id \'%s\' against '
'instance-id or \'%s\' against \'%s\' noughthing to match against',
__name__, minion_id, tag_key, tag_value)
return {}
myself = boto.utils.get_instance_metadata(timeout=0.1, num_retries=1)
if len(myself.keys()) < 1:
log.info("%s: salt master not an EC2 instance, skipping", __name__)
return {}
# Get the Master's instance info, primarily the region
(instance_id, region) = _get_instance_info()
(_, region) = _get_instance_info()
try:
conn = boto.ec2.connect_to_region(region)
except boto.exception as e: # pylint: disable=E0712
log.error("%s: invalid AWS credentials.", __name__)
return None
except boto.exception.AWSConnectionError as exc:
log.error('%s: invalid AWS credentials, %s', __name__, exc)
return {}
except:
raise
if conn is None:
log.error('%s: Could not connect to region %s', __name__, region)
return {}
tags = {}
try:
_tags = conn.get_all_tags(filters={'resource-type': 'instance',
'resource-id': minion_id})
for tag in _tags:
tags[tag.name] = tag.value
except IndexError as e:
log.error("Couldn't retrieve instance information: %s", e)
return None
if find_id:
instance_data = conn.get_only_instances(instance_ids=[find_id], dry_run=False)
else:
# filters and max_results can not be used togther.
instance_data = conn.get_only_instances(filters=find_filter, dry_run=False)
return {'ec2_tags': tags}
except boto.exception.EC2ResponseError as exc:
log.error('%s failed with \'%s\'', base_msg, exc)
return {}
if not instance_data:
log.debug('%s no match using \'%s\'', base_msg, find_id if find_id else find_filter)
return {}
# Find a active instance, i.e. ignore terminated and stopped instances
active_inst = []
for inst in range(0, len(instance_data)):
if instance_data[inst].state not in ['terminated', 'stopped']:
active_inst.append(inst)
valid_inst = len(active_inst)
if not valid_inst:
log.debug('%s match found but not active \'%s\'', base_msg, find_id if find_id else find_filter)
return {}
if valid_inst > 1:
log.error('%s multiple matches, ignored, using \'%s\'', base_msg, find_id if find_id else find_filter)
return {}
instance = instance_data[active_inst[0]]
if instance.tags:
ec2_tags = instance.tags
ec2_tags_list = {}
log.debug('External pillar %s, for minion id \'%s\', tags: %s', __name__, minion_id, instance.tags)
if tag_list_key and isinstance(tag_list_key, list):
for item in tag_list_key:
if item in ec2_tags:
ec2_tags_list[item] = ec2_tags[item].split(tag_list_sep)
del ec2_tags[item] # make sure its only in ec2_tags_list
else:
ec2_tags_list[item] = [] # always return a result
return {'ec2_tags': ec2_tags, 'ec2_tags_list': ec2_tags_list}
return {}

View File

@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
'''
The module used to execute states in salt. A state is unlike a module
execution in that instead of just executing a command it ensure that a
certain state is present on the system.
The State Compiler is used to execute states in Salt. A state is unlike
an execution module in that instead of just executing a command, it
ensures that a certain state is present on the system.
The data sent to the state calls is as follows:
{ 'state': '<state module name>',

View File

@ -124,7 +124,7 @@ def _get_vault_connection():
if 'vault' in __opts__ and __opts__.get('__role', 'minion') == 'master':
return _use_local_config()
elif '_ssh_version' in __opts__:
elif any((__opts__['local'], __opts__['file_client'] == 'local', __opts__['master_type'] == 'disable')):
return _use_local_config()
else:
log.debug('Contacting master for Vault connection details')

View File

@ -0,0 +1,137 @@
# -*- coding: utf-8 -*-
'''
Integration tests for the Dimension Data cloud provider
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import random
import string
# Import Salt Testing Libs
from tests.support.case import ShellCase
from tests.support.paths import FILES
from tests.support.helpers import expensiveTest
# Import Salt Libs
from salt.config import cloud_providers_config
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
# Create the cloud instance name to be used throughout the tests
INSTANCE_NAME = _random_name('CLOUD-TEST-')
PROVIDER_NAME = 'dimensiondata'
def _random_name(size=6):
'''
Generates a random cloud instance name
'''
return 'cloud-test-' + ''.join(
random.choice(string.ascii_lowercase + string.digits)
for x in range(size)
)
class DimensionDataTest(ShellCase):
'''
Integration tests for the Dimension Data cloud provider in Salt-Cloud
'''
@expensiveTest
def setUp(self):
'''
Sets up the test requirements
'''
super(DimensionDataTest, self).setUp()
# check if appropriate cloud provider and profile files are present
profile_str = 'dimensiondata-config'
providers = self.run_cloud('--list-providers')
if profile_str + ':' not in providers:
self.skipTest(
'Configuration file for {0} was not found. Check {0}.conf files '
'in tests/integration/files/conf/cloud.*.d/ to run these tests.'
.format(PROVIDER_NAME)
)
# check if user_id, key, and region are present
config = cloud_providers_config(
os.path.join(
FILES,
'conf',
'cloud.providers.d',
PROVIDER_NAME + '.conf'
)
)
user_id = config[profile_str][PROVIDER_NAME]['user_id']
key = config[profile_str][PROVIDER_NAME]['key']
region = config[profile_str][PROVIDER_NAME]['region']
if user_id == '' or key == '' or region == '':
self.skipTest(
'A user Id, password, and a region '
'must be provided to run these tests. Check '
'tests/integration/files/conf/cloud.providers.d/{0}.conf'
.format(PROVIDER_NAME)
)
def test_list_images(self):
'''
Tests the return of running the --list-images command for the dimensiondata cloud provider
'''
image_list = self.run_cloud('--list-images {0}'.format(PROVIDER_NAME))
self.assertIn(
'Ubuntu 14.04 2 CPU',
[i.strip() for i in image_list]
)
def test_list_locations(self):
'''
Tests the return of running the --list-locations command for the dimensiondata cloud provider
'''
_list_locations = self.run_cloud('--list-locations {0}'.format(PROVIDER_NAME))
self.assertIn(
'Australia - Melbourne MCP2',
[i.strip() for i in _list_locations]
)
def test_list_sizes(self):
'''
Tests the return of running the --list-sizes command for the dimensiondata cloud provider
'''
_list_sizes = self.run_cloud('--list-sizes {0}'.format(PROVIDER_NAME))
self.assertIn(
'default',
[i.strip() for i in _list_sizes]
)
def test_instance(self):
'''
Test creating an instance on Dimension Data's cloud
'''
# check if instance with salt installed returned
try:
self.assertIn(
INSTANCE_NAME,
[i.strip() for i in self.run_cloud('-p dimensiondata-test {0}'.format(INSTANCE_NAME), timeout=500)]
)
except AssertionError:
self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500)
raise
# delete the instance
try:
self.assertIn(
'True',
[i.strip() for i in self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500)]
)
except AssertionError:
raise
# Final clean-up of created instance, in case something went wrong.
# This was originally in a tearDown function, but that didn't make sense
# To run this for each test when not all tests create instances.
if INSTANCE_NAME in [i.strip() for i in self.run_cloud('--query')]:
self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500)

View File

@ -0,0 +1,11 @@
dimensiondata-test:
provider: dimensiondata-config
image: 42816eb2-9846-4483-95c3-7d7fbddebf2c
size: default
location: AU10
is_started: yes
description: 'Salt Ubuntu test'
network_domain: ''
vlan: ''
ssh_interface: private_ips
auth: ''

View File

@ -0,0 +1,5 @@
dimensiondata-config:
driver: dimensiondata
user_id: ''
key: ''
region: 'dd-au'

View File

@ -0,0 +1,87 @@
# -*- coding: utf-8 -*-
# Import Salt Testing Libs
from __future__ import absolute_import
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt libs
import salt.modules.selinux as selinux
@skipIf(NO_MOCK, NO_MOCK_REASON)
class SelinuxModuleTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.modules.selinux
'''
def setup_loader_modules(self):
return {selinux: {}}
def test_fcontext_get_policy_parsing(self):
'''
Test to verify that the parsing of the semanage output into fields is
correct. Added with #45784.
'''
cases = [
{
'semanage_out': '/var/www(/.*)? all files system_u:object_r:httpd_sys_content_t:s0',
'name': '/var/www(/.*)?',
'filetype': 'all files',
'sel_user': 'system_u',
'sel_role': 'object_r',
'sel_type': 'httpd_sys_content_t',
'sel_level': 's0'
},
{
'semanage_out': '/var/www(/.*)? all files system_u:object_r:httpd_sys_content_t:s0',
'name': '/var/www(/.*)?',
'filetype': 'all files',
'sel_user': 'system_u',
'sel_role': 'object_r',
'sel_type': 'httpd_sys_content_t',
'sel_level': 's0'
},
{
'semanage_out': '/var/lib/dhcp3? directory system_u:object_r:dhcp_state_t:s0',
'name': '/var/lib/dhcp3?',
'filetype': 'directory',
'sel_user': 'system_u',
'sel_role': 'object_r',
'sel_type': 'dhcp_state_t',
'sel_level': 's0'
},
{
'semanage_out': '/var/lib/dhcp3? directory system_u:object_r:dhcp_state_t:s0',
'name': '/var/lib/dhcp3?',
'filetype': 'directory',
'sel_user': 'system_u',
'sel_role': 'object_r',
'sel_type': 'dhcp_state_t',
'sel_level': 's0'
},
{
'semanage_out': '/var/lib/dhcp3? directory system_u:object_r:dhcp_state_t:s0',
'name': '/var/lib/dhcp3?',
'filetype': 'directory',
'sel_user': 'system_u',
'sel_role': 'object_r',
'sel_type': 'dhcp_state_t',
'sel_level': 's0'
}
]
for case in cases:
with patch.dict(selinux.__salt__, {'cmd.shell': MagicMock(return_value=case['semanage_out'])}):
ret = selinux.fcontext_get_policy(case['name'])
self.assertEqual(ret['filespec'], case['name'])
self.assertEqual(ret['filetype'], case['filetype'])
self.assertEqual(ret['sel_user'], case['sel_user'])
self.assertEqual(ret['sel_role'], case['sel_role'])
self.assertEqual(ret['sel_type'], case['sel_type'])
self.assertEqual(ret['sel_level'], case['sel_level'])

View File

@ -8,6 +8,7 @@ import os
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
Mock,
MagicMock,
patch,
NO_MOCK,
@ -18,6 +19,39 @@ from tests.support.mock import (
import salt.modules.yumpkg as yumpkg
import salt.modules.pkg_resource as pkg_resource
LIST_REPOS = {
'base': {
'file': '/etc/yum.repos.d/CentOS-Base.repo',
'gpgcheck': '1',
'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7',
'mirrorlist': 'http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=os&infra=$infra',
'name': 'CentOS-$releasever - Base'
},
'base-source': {
'baseurl': 'http://vault.centos.org/centos/$releasever/os/Source/',
'enabled': '0',
'file': '/etc/yum.repos.d/CentOS-Sources.repo',
'gpgcheck': '1',
'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7',
'name': 'CentOS-$releasever - Base Sources'
},
'updates': {
'file': '/etc/yum.repos.d/CentOS-Base.repo',
'gpgcheck': '1',
'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7',
'mirrorlist': 'http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=updates&infra=$infra',
'name': 'CentOS-$releasever - Updates'
},
'updates-source': {
'baseurl': 'http://vault.centos.org/centos/$releasever/updates/Source/',
'enabled': '0',
'file': '/etc/yum.repos.d/CentOS-Sources.repo',
'gpgcheck': '1',
'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7',
'name': 'CentOS-$releasever - Updates Sources'
}
}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class YumTestCase(TestCase, LoaderModuleMockMixin):
@ -25,7 +59,18 @@ class YumTestCase(TestCase, LoaderModuleMockMixin):
Test cases for salt.modules.yumpkg
'''
def setup_loader_modules(self):
return {yumpkg: {'rpm': None}}
return {
yumpkg: {
'__context__': {
'yum_bin': 'yum',
},
'__grains__': {
'osarch': 'x86_64',
'os_family': 'RedHat',
'osmajorrelease': 7,
},
}
}
def test_list_pkgs(self):
'''
@ -186,3 +231,373 @@ class YumTestCase(TestCase, LoaderModuleMockMixin):
}}.items():
self.assertTrue(pkgs.get(pkg_name))
self.assertEqual(pkgs[pkg_name], [pkg_attr])
def test_latest_version_with_options(self):
with patch.object(yumpkg, 'list_pkgs', MagicMock(return_value={})):
# with fromrepo
cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.latest_version(
'foo',
refresh=False,
fromrepo='good',
branch='foo')
cmd.assert_called_once_with(
['yum', '--quiet', '--disablerepo=*', '--enablerepo=good',
'--branch=foo', 'list', 'available', 'foo'],
ignore_retcode=True,
output_loglevel='trace',
python_shell=False)
# without fromrepo
cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.latest_version(
'foo',
refresh=False,
enablerepo='good',
disablerepo='bad',
branch='foo')
cmd.assert_called_once_with(
['yum', '--quiet', '--disablerepo=bad', '--enablerepo=good',
'--branch=foo', 'list', 'available', 'foo'],
ignore_retcode=True,
output_loglevel='trace',
python_shell=False)
def test_list_repo_pkgs_with_options(self):
'''
Test list_repo_pkgs with and without fromrepo
NOTE: mock_calls is a stack. The most recent call is indexed
with 0, while the first call would have the highest index.
'''
really_old_yum = MagicMock(return_value='3.2.0')
older_yum = MagicMock(return_value='3.4.0')
newer_yum = MagicMock(return_value='3.4.5')
list_repos_mock = MagicMock(return_value=LIST_REPOS)
kwargs = {'output_loglevel': 'trace',
'ignore_retcode': True,
'python_shell': False}
with patch.object(yumpkg, 'list_repos', list_repos_mock):
# Test with really old yum. The fromrepo argument has no effect on
# the yum commands we'd run.
with patch.dict(yumpkg.__salt__, {'cmd.run': really_old_yum}):
cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.list_repo_pkgs('foo')
# We should have called cmd.run_all twice
self.assertEqual(len(cmd.mock_calls), 2)
# Check args from first call
self.assertEqual(
cmd.mock_calls[1][1],
(['yum', '--quiet', 'list', 'available'],)
)
# Check kwargs from first call
self.assertEqual(cmd.mock_calls[1][2], kwargs)
# Check args from second call
self.assertEqual(
cmd.mock_calls[0][1],
(['yum', '--quiet', 'list', 'installed'],)
)
# Check kwargs from second call
self.assertEqual(cmd.mock_calls[0][2], kwargs)
# Test with really old yum. The fromrepo argument has no effect on
# the yum commands we'd run.
with patch.dict(yumpkg.__salt__, {'cmd.run': older_yum}):
cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.list_repo_pkgs('foo')
# We should have called cmd.run_all twice
self.assertEqual(len(cmd.mock_calls), 2)
# Check args from first call
self.assertEqual(
cmd.mock_calls[1][1],
(['yum', '--quiet', '--showduplicates', 'list', 'available'],)
)
# Check kwargs from first call
self.assertEqual(cmd.mock_calls[1][2], kwargs)
# Check args from second call
self.assertEqual(
cmd.mock_calls[0][1],
(['yum', '--quiet', '--showduplicates', 'list', 'installed'],)
)
# Check kwargs from second call
self.assertEqual(cmd.mock_calls[0][2], kwargs)
# Test with newer yum. We should run one yum command per repo, so
# fromrepo would limit how many calls we make.
with patch.dict(yumpkg.__salt__, {'cmd.run': newer_yum}):
# When fromrepo is used, we would only run one yum command, for
# that specific repo.
cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.list_repo_pkgs('foo', fromrepo='base')
# We should have called cmd.run_all once
self.assertEqual(len(cmd.mock_calls), 1)
# Check args
self.assertEqual(
cmd.mock_calls[0][1],
(['yum', '--quiet', '--showduplicates',
'repository-packages', 'base', 'list', 'foo'],)
)
# Check kwargs
self.assertEqual(cmd.mock_calls[0][2], kwargs)
# Test enabling base-source and disabling updates. We should
# get two calls, one for each enabled repo. Because dict
# iteration order will vary, different Python versions will be
# do them in different orders, which is OK, but it will just
# mean that we will have to check both the first and second
# mock call both times.
cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.list_repo_pkgs(
'foo',
enablerepo='base-source',
disablerepo='updates')
# We should have called cmd.run_all twice
self.assertEqual(len(cmd.mock_calls), 2)
for repo in ('base', 'base-source'):
for index in (0, 1):
try:
# Check args
self.assertEqual(
cmd.mock_calls[index][1],
(['yum', '--quiet', '--showduplicates',
'repository-packages', repo, 'list',
'foo'],)
)
# Check kwargs
self.assertEqual(cmd.mock_calls[index][2], kwargs)
break
except AssertionError:
continue
else:
self.fail("repo '{0}' not checked".format(repo))
def test_list_upgrades_dnf(self):
'''
The subcommand should be "upgrades" with dnf
'''
with patch.dict(yumpkg.__context__, {'yum_bin': 'dnf'}):
# with fromrepo
cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.list_upgrades(
refresh=False,
fromrepo='good',
branch='foo')
cmd.assert_called_once_with(
['dnf', '--quiet', '--disablerepo=*', '--enablerepo=good',
'--branch=foo', 'list', 'upgrades'],
output_loglevel='trace',
ignore_retcode=True,
python_shell=False)
# without fromrepo
cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.list_upgrades(
refresh=False,
enablerepo='good',
disablerepo='bad',
branch='foo')
cmd.assert_called_once_with(
['dnf', '--quiet', '--disablerepo=bad', '--enablerepo=good',
'--branch=foo', 'list', 'upgrades'],
output_loglevel='trace',
ignore_retcode=True,
python_shell=False)
def test_list_upgrades_yum(self):
'''
The subcommand should be "updates" with yum
'''
# with fromrepo
cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.list_upgrades(
refresh=False,
fromrepo='good',
branch='foo')
cmd.assert_called_once_with(
['yum', '--quiet', '--disablerepo=*', '--enablerepo=good',
'--branch=foo', 'list', 'updates'],
output_loglevel='trace',
ignore_retcode=True,
python_shell=False)
# without fromrepo
cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.list_upgrades(
refresh=False,
enablerepo='good',
disablerepo='bad',
branch='foo')
cmd.assert_called_once_with(
['yum', '--quiet', '--disablerepo=bad', '--enablerepo=good',
'--branch=foo', 'list', 'updates'],
output_loglevel='trace',
ignore_retcode=True,
python_shell=False)
def test_refresh_db_with_options(self):
with patch('salt.utils.pkg.clear_rtag', Mock()):
# With check_update=True we will do a cmd.run to run the clean_cmd, and
# then a separate cmd.retcode to check for updates.
# with fromrepo
clean_cmd = Mock()
update_cmd = MagicMock(return_value=0)
with patch.dict(yumpkg.__salt__, {'cmd.run': clean_cmd,
'cmd.retcode': update_cmd}):
yumpkg.refresh_db(
check_update=True,
fromrepo='good',
branch='foo')
clean_cmd.assert_called_once_with(
['yum', '--quiet', 'clean', 'expire-cache', '--disablerepo=*',
'--enablerepo=good', '--branch=foo'],
python_shell=False)
update_cmd.assert_called_once_with(
['yum', '--quiet', 'check-update',
'--setopt=autocheck_running_kernel=false', '--disablerepo=*',
'--enablerepo=good', '--branch=foo'],
output_loglevel='trace',
ignore_retcode=True,
python_shell=False)
# without fromrepo
clean_cmd = Mock()
update_cmd = MagicMock(return_value=0)
with patch.dict(yumpkg.__salt__, {'cmd.run': clean_cmd,
'cmd.retcode': update_cmd}):
yumpkg.refresh_db(
check_update=True,
enablerepo='good',
disablerepo='bad',
branch='foo')
clean_cmd.assert_called_once_with(
['yum', '--quiet', 'clean', 'expire-cache', '--disablerepo=bad',
'--enablerepo=good', '--branch=foo'],
python_shell=False)
update_cmd.assert_called_once_with(
['yum', '--quiet', 'check-update',
'--setopt=autocheck_running_kernel=false', '--disablerepo=bad',
'--enablerepo=good', '--branch=foo'],
output_loglevel='trace',
ignore_retcode=True,
python_shell=False)
# With check_update=False we will just do a cmd.run for the clean_cmd
# with fromrepo
clean_cmd = Mock()
with patch.dict(yumpkg.__salt__, {'cmd.run': clean_cmd}):
yumpkg.refresh_db(
check_update=False,
fromrepo='good',
branch='foo')
clean_cmd.assert_called_once_with(
['yum', '--quiet', 'clean', 'expire-cache', '--disablerepo=*',
'--enablerepo=good', '--branch=foo'],
python_shell=False)
# without fromrepo
clean_cmd = Mock()
with patch.dict(yumpkg.__salt__, {'cmd.run': clean_cmd}):
yumpkg.refresh_db(
check_update=False,
enablerepo='good',
disablerepo='bad',
branch='foo')
clean_cmd.assert_called_once_with(
['yum', '--quiet', 'clean', 'expire-cache', '--disablerepo=bad',
'--enablerepo=good', '--branch=foo'],
python_shell=False)
def test_install_with_options(self):
parse_targets = MagicMock(return_value=({'foo': None}, 'repository'))
with patch.object(yumpkg, 'list_pkgs', MagicMock(return_value={})), \
patch.object(yumpkg, 'list_holds', MagicMock(return_value=[])), \
patch.dict(yumpkg.__salt__, {'pkg_resource.parse_targets': parse_targets}), \
patch('salt.utils.systemd.has_scope', MagicMock(return_value=False)):
# with fromrepo
cmd = MagicMock(return_value={'retcode': 0})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.install(
refresh=False,
fromrepo='good',
branch='foo')
cmd.assert_called_once_with(
['yum', '-y', '--disablerepo=*', '--enablerepo=good',
'--branch=foo', 'install', 'foo'],
output_loglevel='trace',
python_shell=False,
redirect_stderr=True)
# without fromrepo
cmd = MagicMock(return_value={'retcode': 0})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.install(
refresh=False,
enablerepo='good',
disablerepo='bad',
branch='foo')
cmd.assert_called_once_with(
['yum', '-y', '--disablerepo=bad', '--enablerepo=good',
'--branch=foo', 'install', 'foo'],
output_loglevel='trace',
python_shell=False,
redirect_stderr=True)
def test_upgrade_with_options(self):
with patch.object(yumpkg, 'list_pkgs', MagicMock(return_value={})), \
patch('salt.utils.systemd.has_scope', MagicMock(return_value=False)):
# with fromrepo
cmd = MagicMock(return_value={'retcode': 0})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.upgrade(
refresh=False,
fromrepo='good',
exclude='kernel*',
branch='foo')
cmd.assert_called_once_with(
['yum', '--quiet', '-y', '--disablerepo=*', '--enablerepo=good',
'--branch=foo', '--exclude=kernel*', 'upgrade'],
output_loglevel='trace',
python_shell=False)
# without fromrepo
cmd = MagicMock(return_value={'retcode': 0})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.upgrade(
refresh=False,
enablerepo='good',
disablerepo='bad',
exclude='kernel*',
branch='foo')
cmd.assert_called_once_with(
['yum', '--quiet', '-y', '--disablerepo=bad', '--enablerepo=good',
'--branch=foo', '--exclude=kernel*', 'upgrade'],
output_loglevel='trace',
python_shell=False)

View File

@ -773,6 +773,29 @@ class TranslateBase(TestCase):
ret[key] = val
return ret
@staticmethod
def normalize_ports(ret):
'''
When we translate exposed ports, we can end up with a mixture of ints
(representing TCP ports) and tuples (representing UDP ports). Python 2
will sort an iterable containing these mixed types, but Python 3 will
not. This helper is used to munge the ports in the return data so that
the resulting list is sorted in a way that can reliably be compared to
the expected results in the test.
This helper should only be needed for port_bindings and ports.
'''
if 'ports' in ret[0]:
tcp_ports = []
udp_ports = []
for item in ret[0]['ports']:
if isinstance(item, six.integer_types):
tcp_ports.append(item)
else:
udp_ports.append(item)
ret[0]['ports'] = sorted(tcp_ports) + sorted(udp_ports)
return ret
def tearDown(self):
'''
Test skip_translate kwarg