Merge branch '2016.3' into '2016.11'

Conflicts:
  - salt/modules/dockerng.py
  - salt/states/dockerng.py
This commit is contained in:
rallytime 2017-02-21 13:59:14 -07:00
commit c613d19e76
21 changed files with 604 additions and 86 deletions

View File

@ -89,12 +89,33 @@ A simpler returner, such as Slack or HipChat, requires:
Step 2: Configure the Returner
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
After you understand the configuration and have the external system ready, add
the returner configuration settings to the Salt Minion configuration file for
the External Job Cache, or to the Salt Master configuration file for the Master
Job Cache.
After you understand the configuration and have the external system ready, the
configuration requirements must be declared.
For example, MySQL requires:
External Job Cache
""""""""""""""""""
The returner configuration settings can be declared in the Salt Minion
configuration file, the Minion's pillar data, or the Minion's grains.
If ``external_job_cache`` configuration settings are specified in more than
one place, the options are retrieved in the following order. The first
configuration location that is found is the one that will be used.
- Minion configuration file
- Minion's grains
- Minion's pillar data
Master Job Cache
""""""""""""""""
The returner configuration settings for the Master Job Cache should be
declared in the Salt Master's configuration file.
Configuration File Examples
"""""""""""""""""""""""""""
MySQL requires:
.. code-block:: yaml

View File

@ -101,11 +101,13 @@ releases pygit2_ 0.20.3 and libgit2_ 0.20.0 is the recommended combination.
RedHat Pygit2 Issues
~~~~~~~~~~~~~~~~~~~~
Around the time of the release of RedHat 7.3, RedHat effectively broke pygit2_
by upgrading python-cffi_ to a release incompatible with the version of pygit2_
available in their repositories. This prevents Python from importing the
pygit2_ module at all, leading to a master that refuses to start, and leaving
the following errors in the master log file:
The release of RedHat/CentOS 7.3 upgraded both ``python-cffi`` and
``http-parser``, both of which are dependencies for pygit2_/libgit2_. Both
pygit2_ and libgit2_ (which are from the EPEL repository and not managed
directly by RedHat) need to be rebuilt against these updated dependencies.
The below errors will show up in the master log if an incompatible
``python-pygit2`` package is installed:
.. code-block:: text
@ -114,34 +116,37 @@ the following errors in the master log file:
2017-02-10 09:07:34,907 [salt.utils.gitfs ][CRITICAL][11211] No suitable gitfs provider module is installed.
2017-02-10 09:07:34,912 [salt.master ][CRITICAL][11211] Master failed pre flight checks, exiting
This issue has been reported on the `RedHat Bugzilla`_. In the meantime, you
can work around it by downgrading python-cffi_. To do this, go to `this page`_
and download the appropriate python-cffi_ 0.8.6 RPM. Then copy that RPM to the
master and downgrade using the ``rpm`` command. For example:
The below errors will show up in the master log if an incompatible ``libgit2``
package is installed:
.. code-block:: text
2017-02-15 18:04:45,211 [salt.utils.gitfs ][ERROR ][6211] Error occurred fetching gitfs remote 'https://foo.com/bar.git': No Content-Type header in response
As of 15 February 2017, ``python-pygit2`` has been rebuilt and is in the stable
EPEL repository. However, ``libgit2`` remains broken (a `bug report`_ has been
filed to get it rebuilt).
In the meantime, you can work around this by downgrading ``http-parser``. To do
this, go to `this page`_ and download the appropriate ``http-parser`` RPM for
the OS architecture you are using (x86_64, etc.). Then downgrade using the
``rpm`` command. For example:
.. code-block:: bash
# rpm -Uvh --oldpackage python-cffi-0.8.6-1.el7.x86_64.rpm
[root@784e8a8c5028 /]# curl --silent -O https://kojipkgs.fedoraproject.org//packages/http-parser/2.0/5.20121128gitcd01361.el7/x86_64/http-parser-2.0-5.20121128gitcd01361.el7.x86_64.rpm
[root@784e8a8c5028 /]# rpm -Uvh --oldpackage http-parser-2.0-5.20121128gitcd01361.el7.x86_64.rpm
Preparing... ################################# [100%]
Updating / installing...
1:python-cffi-0.8.6-1.el7 ################################# [ 50%]
1:http-parser-2.0-5.20121128gitcd01################################# [ 50%]
Cleaning up / removing...
2:python-cffi-1.6.0-5.el7 ################################# [100%]
# rpm -q python-cffi
python-cffi-0.8.6-1.el7.x86_64
2:http-parser-2.7.1-3.el7 ################################# [100%]
To confirm that pygit2_ is now "fixed", you can test trying to import it like so:
A restart of the salt-master daemon may be required to allow http(s)
repositories to continue to be fetched.
.. code-block:: bash
# python -c 'import pygit2'
#
If the command produces no output, then your master should work when you start
it again.
.. _`this page`: https://koji.fedoraproject.org/koji/buildinfo?buildID=569520
.. _`RedHat Bugzilla`: https://bugzilla.redhat.com/show_bug.cgi?id=1400668
.. _`this page`: https://koji.fedoraproject.org/koji/buildinfo?buildID=703753
.. _`bug report`: https://bugzilla.redhat.com/show_bug.cgi?id=1422583
GitPython

View File

@ -231,6 +231,8 @@ def create(vm_):
)
for private_ip in private:
private_ip = preferred_ip(vm_, [private_ip])
if private_ip is False:
continue
if salt.utils.cloud.is_public_ip(private_ip):
log.warning('%s is a public IP', private_ip)
data.public_ips.append(private_ip)

View File

@ -886,6 +886,8 @@ def create(vm_):
)
for private_ip in private:
private_ip = preferred_ip(vm_, [private_ip])
if private_ip is False:
continue
if salt.utils.cloud.is_public_ip(private_ip):
log.warning('{0} is a public IP'.format(private_ip))
data.public_ips.append(private_ip)

View File

@ -727,6 +727,8 @@ def create(vm_):
)
for private_ip in private:
private_ip = preferred_ip(vm_, [private_ip])
if private_ip is False:
continue
if salt.utils.cloud.is_public_ip(private_ip):
log.warning('{0} is a public IP'.format(private_ip))
data.public_ips.append(private_ip)

View File

@ -295,6 +295,8 @@ def create(vm_):
)
for private_ip in private:
private_ip = preferred_ip(vm_, [private_ip])
if private_ip is False:
continue
if salt.utils.cloud.is_public_ip(private_ip):
log.warning('{0} is a public IP'.format(private_ip))
data.public_ips.append(private_ip)

View File

@ -4,6 +4,12 @@ Connection module for Amazon CloudTrail
.. versionadded:: 2016.3.0
:depends:
- boto
- boto3
The dependencies listed above can be installed via package or pip.
:configuration: This module accepts explicit Lambda credentials but can also
utilize IAM roles assigned to the instance through Instance Profiles.
Dynamic credentials are then automatically obtained from AWS API and no
@ -39,8 +45,6 @@ Connection module for Amazon CloudTrail
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
:depends: boto3
'''
# keep lint from choking on _get_conn and _cache_id
#pylint: disable=E0602

View File

@ -4,6 +4,12 @@ Connection module for Amazon IoT
.. versionadded:: 2016.3.0
:depends:
- boto
- boto3
The dependencies listed above can be installed via package or pip.
:configuration: This module accepts explicit Lambda credentials but can also
utilize IAM roles assigned to the instance through Instance Profiles.
Dynamic credentials are then automatically obtained from AWS API and no
@ -39,8 +45,6 @@ Connection module for Amazon IoT
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
:depends: boto3
'''
# keep lint from choking on _get_conn and _cache_id
#pylint: disable=E0602

View File

@ -4,6 +4,12 @@ Connection module for Amazon Lambda
.. versionadded:: 2016.3.0
:depends:
- boto
- boto3
The dependencies listed above can be installed via package or pip.
:configuration: This module accepts explicit Lambda credentials but can also
utilize IAM roles assigned to the instance through Instance Profiles.
Dynamic credentials are then automatically obtained from AWS API and no
@ -69,8 +75,6 @@ Connection module for Amazon Lambda
error:
message: error message
:depends: boto3
'''
# keep lint from choking on _get_conn and _cache_id
#pylint: disable=E0602

View File

@ -4,6 +4,12 @@ Connection module for Amazon S3 Buckets
.. versionadded:: 2016.3.0
:depends:
- boto
- boto3
The dependencies listed above can be installed via package or pip.
:configuration: This module accepts explicit Lambda credentials but can also
utilize IAM roles assigned to the instance through Instance Profiles.
Dynamic credentials are then automatically obtained from AWS API and no
@ -39,8 +45,6 @@ Connection module for Amazon S3 Buckets
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
:depends: boto3
'''
# keep lint from choking on _get_conn and _cache_id
#pylint: disable=E0602

View File

@ -208,6 +208,14 @@ Functions
Executing Commands Within a Running Container
---------------------------------------------
.. note::
With the release of Docker 1.13.1, the Execution Driver has been removed.
Starting in versions 2016.3.6, 2016.11.4, and Nitrogen, Salt defaults to
using ``docker exec`` to run commands in containers, however for older Salt
releases it will be necessary to set the ``docker.exec_driver`` config
option to either ``docker-exec`` or ``nsenter`` for Docker versions 1.13.1
and newer.
Multiple methods exist for executing commands within Docker containers:
- lxc-attach_: Default for older versions of docker
@ -269,7 +277,6 @@ import distutils.version # pylint: disable=import-error,no-name-in-module,unuse
import fnmatch
import functools
import gzip
import inspect as inspect_module
import io
import json
import logging
@ -289,6 +296,7 @@ import subprocess
from salt.exceptions import CommandExecutionError, SaltInvocationError
import salt.ext.six as six
from salt.ext.six.moves import map # pylint: disable=import-error,redefined-builtin
from salt.utils.args import get_function_argspec as _argspec
import salt.utils
import salt.utils.decorators
import salt.utils.thin
@ -302,11 +310,22 @@ import salt.client.ssh.state
# pylint: disable=import-error
try:
import docker
import docker.utils
HAS_DOCKER_PY = True
except ImportError:
HAS_DOCKER_PY = False
# These next two imports are only necessary to have access to the needed
# functions so that we can get argspecs for the container config, host config,
# and networking config (see the get_client_args() function).
try:
import docker.types
except ImportError:
pass
try:
import docker.utils
except ImportError:
pass
try:
if six.PY2:
import backports.lzma as lzma
@ -886,10 +905,12 @@ def _get_exec_driver():
__context__[contextkey] = from_config
return from_config
# For old versions of docker, lxc was the only supported driver.
# This is a sane default.
driver = info().get('ExecutionDriver', 'lxc-')
if driver.startswith('lxc-'):
# The execution driver was removed in Docker 1.13.1, docker-exec is now
# the default.
driver = info().get('ExecutionDriver', 'docker-exec')
if driver == 'docker-exec':
__context__[contextkey] = driver
elif driver.startswith('lxc-'):
__context__[contextkey] = 'lxc-attach'
elif driver.startswith('native-') and HAS_NSENTER:
__context__[contextkey] = 'nsenter'
@ -3138,7 +3159,7 @@ def create(image,
# https://docs.docker.com/engine/reference/api/docker_remote_api_v1.15/#create-a-container
if salt.utils.version_cmp(version()['ApiVersion'], '1.15') > 0:
client = __context__['docker.client']
host_config_args = inspect_module.getargspec(docker.utils.create_host_config).args
host_config_args = get_client_args()['host_config']
create_kwargs['host_config'] = client.create_host_config(
**dict((arg, create_kwargs.pop(arg, None)) for arg in host_config_args if arg != 'version')
)
@ -5733,7 +5754,6 @@ def call(name, function, *args, **kwargs):
.. code-block:: bash
salt myminion dockerng.call test.ping
salt myminion test.arg arg1 arg2 key1=val1
The container does not need to have Salt installed, but Python
@ -5911,3 +5931,72 @@ def sls_build(name, base='opensuse/python', mods=None, saltenv='base',
__salt__['dockerng.stop'](id_)
return __salt__['dockerng.commit'](id_, name)
def get_client_args():
'''
.. versionadded:: 2016.3.6,2016.11.4,Nitrogen
Returns the args for docker-py's `low-level API`_, organized by container
config, host config, and networking config.
.. _`low-level API`: http://docker-py.readthedocs.io/en/stable/api.html
salt myminion docker.get_client_args
'''
try:
config_args = _argspec(docker.types.ContainerConfig.__init__).args
except AttributeError:
try:
config_args = _argspec(docker.utils.create_container_config).args
except AttributeError:
raise CommandExecutionError(
'Failed to get create_container_config argspec'
)
try:
host_config_args = \
_argspec(docker.types.HostConfig.__init__).args
except AttributeError:
try:
host_config_args = _argspec(docker.utils.create_host_config).args
except AttributeError:
raise CommandExecutionError(
'Failed to get create_host_config argspec'
)
try:
endpoint_config_args = \
_argspec(docker.types.EndpointConfig.__init__).args
except AttributeError:
try:
endpoint_config_args = \
_argspec(docker.utils.create_endpoint_config).args
except AttributeError:
raise CommandExecutionError(
'Failed to get create_host_config argspec'
)
for arglist in (config_args, host_config_args, endpoint_config_args):
try:
# The API version is passed automagically by the API code that
# imports these classes/functions and is not an arg that we will be
# passing, so remove it if present.
arglist.remove('version')
except ValueError:
pass
# Remove any args in host or networking config from the main config dict.
# This keeps us from accidentally allowing args that have been moved from
# the container config to the host config (but are still accepted by
# create_container_config so warnings can be issued).
for arglist in (host_config_args, endpoint_config_args):
for item in arglist:
try:
config_args.remove(item)
except ValueError:
# Arg is not in config_args
pass
return {'config': config_args,
'host_config': host_config_args,
'networking_config': endpoint_config_args}

View File

@ -203,7 +203,7 @@ def item(*args, **kwargs):
return ret
def setvals(grains, destructive=False):
def setvals(grains, destructive=False, refresh=True):
'''
Set new grains values in the grains config file
@ -211,6 +211,10 @@ def setvals(grains, destructive=False):
If an operation results in a key being removed, delete the key, too.
Defaults to False.
refresh
Refresh minion grains using saltutil.sync_grains.
Defaults to True.
CLI Example:
.. code-block:: bash
@ -286,12 +290,12 @@ def setvals(grains, destructive=False):
log.error(msg.format(fn_))
if not __opts__.get('local', False):
# Sync the grains
__salt__['saltutil.sync_grains']()
__salt__['saltutil.sync_grains'](refresh=refresh)
# Return the grains we just set to confirm everything was OK
return new_grains
def setval(key, val, destructive=False):
def setval(key, val, destructive=False, refresh=True):
'''
Set a grains value in the grains config file
@ -305,6 +309,10 @@ def setval(key, val, destructive=False):
If an operation results in a key being removed, delete the key, too.
Defaults to False.
refresh
Refresh minion grains using saltutil.sync_grains.
Defaults to True.
CLI Example:
.. code-block:: bash
@ -312,7 +320,7 @@ def setval(key, val, destructive=False):
salt '*' grains.setval key val
salt '*' grains.setval key "{'sub-key': 'val', 'sub-key2': 'val2'}"
'''
return setvals({key: val}, destructive)
return setvals({key: val}, destructive, refresh)
def append(key, val, convert=False, delimiter=DEFAULT_TARGET_DELIM):

105
salt/modules/openscap.py Normal file
View File

@ -0,0 +1,105 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import tempfile
import shlex
import shutil
from subprocess import Popen, PIPE
from salt.client import Caller
ArgumentParser = object
try:
import argparse # pylint: disable=minimum-python-version
ArgumentParser = argparse.ArgumentParser
HAS_ARGPARSE = True
except ImportError: # python 2.6
HAS_ARGPARSE = False
_XCCDF_MAP = {
'eval': {
'parser_arguments': [
(('--profile',), {'required': True}),
],
'cmd_pattern': (
"oscap xccdf eval "
"--oval-results --results results.xml --report report.html "
"--profile {0} {1}"
)
}
}
def __virtual__():
return HAS_ARGPARSE, 'argparse module is required.'
class _ArgumentParser(ArgumentParser):
def __init__(self, action=None, *args, **kwargs):
super(_ArgumentParser, self).__init__(*args, prog='oscap', **kwargs)
self.add_argument('action', choices=['eval'])
add_arg = None
for params, kwparams in _XCCDF_MAP['eval']['parser_arguments']:
self.add_argument(*params, **kwparams)
def error(self, message, *args, **kwargs):
raise Exception(message)
_OSCAP_EXIT_CODES_MAP = {
0: True, # all rules pass
1: False, # there is an error during evaluation
2: True # there is at least one rule with either fail or unknown result
}
def xccdf(params):
'''
Run ``oscap xccdf`` commands on minions.
It uses cp.push_dir to upload the generated files to the salt master
in the master's minion files cachedir
(defaults to ``/var/cache/salt/master/minions/minion-id/files``)
It needs ``file_recv`` set to ``True`` in the master configuration file.
CLI Example:
.. code-block:: bash
salt '*' openscap.xccdf "eval --profile Default /usr/share/openscap/scap-yast2sec-xccdf.xml"
'''
params = shlex.split(params)
policy = params[-1]
success = True
error = None
upload_dir = None
action = None
try:
parser = _ArgumentParser()
action = parser.parse_known_args(params)[0].action
args, argv = _ArgumentParser(action=action).parse_known_args(args=params)
except Exception as err:
success = False
error = str(err)
if success:
cmd = _XCCDF_MAP[action]['cmd_pattern'].format(args.profile, policy)
tempdir = tempfile.mkdtemp()
proc = Popen(
shlex.split(cmd), stdout=PIPE, stderr=PIPE, cwd=tempdir)
(stdoutdata, stderrdata) = proc.communicate()
success = _OSCAP_EXIT_CODES_MAP[proc.returncode]
if success:
caller = Caller()
caller.cmd('cp.push_dir', tempdir)
shutil.rmtree(tempdir, ignore_errors=True)
upload_dir = tempdir
else:
error = stderrdata
return dict(success=success, upload_dir=upload_dir, error=error)

View File

@ -8,7 +8,11 @@ Manage CloudTrail Objects
Create and destroy CloudTrail objects. Be aware that this interacts with Amazon's services,
and so may incur charges.
This module uses ``boto3``, which can be installed via package, or pip.
:depends:
- boto
- boto3
The dependencies listed above can be installed via package or pip.
This module accepts explicit vpc credentials but can also utilize
IAM roles assigned to the instance through Instance Profiles. Dynamic

View File

@ -8,7 +8,11 @@ Manage IoT Objects
Create and destroy IoT objects. Be aware that this interacts with Amazon's services,
and so may incur charges.
This module uses ``boto3``, which can be installed via package, or pip.
:depends:
- boto
- boto3
The dependencies listed above can be installed via package or pip.
This module accepts explicit vpc credentials but can also utilize
IAM roles assigned to the instance through Instance Profiles. Dynamic

View File

@ -8,7 +8,11 @@ Manage Lambda Functions
Create and destroy Lambda Functions. Be aware that this interacts with Amazon's services,
and so may incur charges.
This module uses ``boto3``, which can be installed via package, or pip.
:depends:
- boto
- boto3
The dependencies listed above can be installed via package or pip.
This module accepts explicit vpc credentials but can also utilize
IAM roles assigned to the instance through Instance Profiles. Dynamic

View File

@ -8,7 +8,11 @@ Manage S3 Buckets
Create and destroy S3 buckets. Be aware that this interacts with Amazon's services,
and so may incur charges.
This module uses ``boto3``, which can be installed via package, or pip.
:depends:
- boto
- boto3
The dependencies listed above can be installed via package or pip.
This module accepts explicit vpc credentials but can also utilize
IAM roles assigned to the instance through Instance Profiles. Dynamic

View File

@ -433,6 +433,21 @@ def _compare(actual, create_kwargs, defaults_from_image):
if data != actual_data:
ret.update({item: {'old': actual_data, 'new': data}})
continue
elif item == 'security_opt':
if actual_data is None:
actual_data = []
if data is None:
data = []
actual_data = sorted(set(actual_data))
desired_data = sorted(set(data))
log.trace('dockerng.running ({0}): munged actual value: {1}'
.format(item, actual_data))
log.trace('dockerng.running ({0}): munged desired value: {1}'
.format(item, desired_data))
if actual_data != desired_data:
ret.update({item: {'old': actual_data,
'new': desired_data}})
continue
elif item in ('cmd', 'command', 'entrypoint'):
if (actual_data is None and item not in create_kwargs and
_image_get(config['image_path'])):

View File

@ -17,6 +17,7 @@ from __future__ import absolute_import
# Import python libs
import logging
import os
# Import salt libs
import salt.utils
@ -319,6 +320,15 @@ def package_installed(name,
'comment': '',
'changes': {}}
# Fail if using a non-existent package path
if '~' not in name and not os.path.exists(name):
if __opts__['test']:
ret['result'] = None
else:
ret['result'] = False
ret['comment'] = 'Package path {0} does not exist'.format(name)
return ret
old = __salt__['dism.installed_packages']()
# Get package info so we can see if it's already installed
@ -387,6 +397,15 @@ def package_removed(name, image=None, restart=False):
'comment': '',
'changes': {}}
# Fail if using a non-existent package path
if '~' not in name and not os.path.exists(name):
if __opts__['test']:
ret['result'] = None
else:
ret['result'] = False
ret['comment'] = 'Package path {0} does not exist'.format(name)
return ret
old = __salt__['dism.installed_packages']()
# Get package info so we can see if it's already removed

View File

@ -0,0 +1,207 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from subprocess import PIPE
from salt.modules import openscap
from salttesting import skipIf, TestCase
from salttesting.mock import (
Mock,
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
@skipIf(NO_MOCK, NO_MOCK_REASON)
class OpenscapTestCase(TestCase):
random_temp_dir = '/tmp/unique-name'
policy_file = '/usr/share/openscap/policy-file-xccdf.xml'
def setUp(self):
patchers = [
patch('salt.modules.openscap.Caller', MagicMock()),
patch('salt.modules.openscap.shutil.rmtree', Mock()),
patch(
'salt.modules.openscap.tempfile.mkdtemp',
Mock(return_value=self.random_temp_dir)
),
]
for patcher in patchers:
self.apply_patch(patcher)
def apply_patch(self, patcher):
patcher.start()
self.addCleanup(patcher.stop)
@patch(
'salt.modules.openscap.Popen',
MagicMock(
return_value=Mock(
**{'returncode': 0, 'communicate.return_value': ('', '')}
)
)
)
def test_openscap_xccdf_eval_success(self):
response = openscap.xccdf(
'eval --profile Default {0}'.format(self.policy_file))
self.assertEqual(openscap.tempfile.mkdtemp.call_count, 1)
expected_cmd = [
'oscap',
'xccdf',
'eval',
'--oval-results',
'--results', 'results.xml',
'--report', 'report.html',
'--profile', 'Default',
self.policy_file
]
openscap.Popen.assert_called_once_with(
expected_cmd,
cwd=openscap.tempfile.mkdtemp.return_value,
stderr=PIPE,
stdout=PIPE)
openscap.Caller().cmd.assert_called_once_with(
'cp.push_dir', self.random_temp_dir)
self.assertEqual(openscap.shutil.rmtree.call_count, 1)
self.assertEqual(
response,
{
'upload_dir': self.random_temp_dir,
'error': None, 'success': True
}
)
@patch(
'salt.modules.openscap.Popen',
MagicMock(
return_value=Mock(
**{'returncode': 2, 'communicate.return_value': ('', '')}
)
)
)
def test_openscap_xccdf_eval_success_with_failing_rules(self):
response = openscap.xccdf(
'eval --profile Default {0}'.format(self.policy_file))
self.assertEqual(openscap.tempfile.mkdtemp.call_count, 1)
expected_cmd = [
'oscap',
'xccdf',
'eval',
'--oval-results',
'--results', 'results.xml',
'--report', 'report.html',
'--profile', 'Default',
self.policy_file
]
openscap.Popen.assert_called_once_with(
expected_cmd,
cwd=openscap.tempfile.mkdtemp.return_value,
stderr=PIPE,
stdout=PIPE)
openscap.Caller().cmd.assert_called_once_with(
'cp.push_dir', self.random_temp_dir)
self.assertEqual(openscap.shutil.rmtree.call_count, 1)
self.assertEqual(
response,
{
'upload_dir': self.random_temp_dir,
'error': None,
'success': True
}
)
def test_openscap_xccdf_eval_fail_no_profile(self):
response = openscap.xccdf(
'eval --param Default /unknown/param')
self.assertEqual(
response,
{
'error': 'argument --profile is required',
'upload_dir': None,
'success': False
}
)
@patch(
'salt.modules.openscap.Popen',
MagicMock(
return_value=Mock(
**{'returncode': 2, 'communicate.return_value': ('', '')}
)
)
)
def test_openscap_xccdf_eval_success_ignore_unknown_params(self):
response = openscap.xccdf(
'eval --profile Default --param Default /policy/file')
self.assertEqual(
response,
{
'upload_dir': self.random_temp_dir,
'error': None,
'success': True
}
)
expected_cmd = [
'oscap',
'xccdf',
'eval',
'--oval-results',
'--results', 'results.xml',
'--report', 'report.html',
'--profile', 'Default',
'/policy/file'
]
openscap.Popen.assert_called_once_with(
expected_cmd,
cwd=openscap.tempfile.mkdtemp.return_value,
stderr=PIPE,
stdout=PIPE)
@patch(
'salt.modules.openscap.Popen',
MagicMock(
return_value=Mock(**{
'returncode': 1,
'communicate.return_value': ('', 'evaluation error')
})
)
)
def test_openscap_xccdf_eval_evaluation_error(self):
response = openscap.xccdf(
'eval --profile Default {0}'.format(self.policy_file))
self.assertEqual(
response,
{
'upload_dir': None,
'error': 'evaluation error',
'success': False
}
)
@patch(
'salt.modules.openscap.Popen',
MagicMock(
return_value=Mock(**{
'returncode': 1,
'communicate.return_value': ('', 'evaluation error')
})
)
)
def test_openscap_xccdf_eval_fail_not_implemented_action(self):
response = openscap.xccdf('info {0}'.format(self.policy_file))
self.assertEqual(
response,
{
'upload_dir': None,
'error': "argument action: invalid choice: 'info' (choose from 'eval')",
'success': False
}
)

View File

@ -95,11 +95,12 @@ class WinDismTestCase(TestCase):
dism.__salt__, {'dism.installed_capabilities': mock_installed,
'dism.add_capability': mock_add}):
out = dism.capability_installed('Capa2', 'somewhere', True)
with patch.dict(dism.__opts__, {'test': False}):
out = dism.capability_installed('Capa2', 'somewhere', True)
mock_installed.assert_called_once_with()
assert not mock_add.called
self.assertEqual(out, expected)
mock_installed.assert_called_once_with()
assert not mock_add.called
self.assertEqual(out, expected)
def test_capability_removed(self):
'''
@ -360,13 +361,14 @@ class WinDismTestCase(TestCase):
'dism.add_package': mock_add,
'dism.package_info': mock_info}):
with patch.dict(dism.__opts__, {'test': False}):
with patch('os.path.exists'):
out = dism.package_installed('Pack2')
out = dism.package_installed('Pack2')
mock_installed.assert_called_with()
mock_add.assert_called_once_with(
'Pack2', False, False, None, False)
self.assertEqual(out, expected)
mock_installed.assert_called_with()
mock_add.assert_called_once_with(
'Pack2', False, False, None, False)
self.assertEqual(out, expected)
def test_package_installed_failure(self):
'''
@ -390,13 +392,14 @@ class WinDismTestCase(TestCase):
'dism.add_package': mock_add,
'dism.package_info': mock_info}):
with patch.dict(dism.__opts__, {'test': False}):
with patch('os.path.exists'):
out = dism.package_installed('Pack2')
out = dism.package_installed('Pack2')
mock_installed.assert_called_with()
mock_add.assert_called_once_with(
'Pack2', False, False, None, False)
self.assertEqual(out, expected)
mock_installed.assert_called_with()
mock_add.assert_called_once_with(
'Pack2', False, False, None, False)
self.assertEqual(out, expected)
def test_package_installed_installed(self):
'''
@ -418,12 +421,14 @@ class WinDismTestCase(TestCase):
dism.__salt__, {'dism.installed_packages': mock_installed,
'dism.add_package': mock_add,
'dism.package_info': mock_info}):
with patch.dict(dism.__opts__, {'test': False}):
with patch('os.path.exists'):
out = dism.package_installed('Pack2')
out = dism.package_installed('Pack2')
mock_installed.assert_called_once_with()
assert not mock_add.called
self.assertEqual(out, expected)
mock_installed.assert_called_once_with()
assert not mock_add.called
self.assertEqual(out, expected)
def test_package_removed(self):
'''
@ -448,13 +453,14 @@ class WinDismTestCase(TestCase):
'dism.remove_package': mock_remove,
'dism.package_info': mock_info}):
with patch.dict(dism.__opts__, {'test': False}):
with patch('os.path.exists'):
out = dism.package_removed('Pack2')
out = dism.package_removed('Pack2')
mock_removed.assert_called_with()
mock_remove.assert_called_once_with(
'Pack2', None, False)
self.assertEqual(out, expected)
mock_removed.assert_called_with()
mock_remove.assert_called_once_with(
'Pack2', None, False)
self.assertEqual(out, expected)
def test_package_removed_failure(self):
'''
@ -478,13 +484,14 @@ class WinDismTestCase(TestCase):
'dism.remove_package': mock_remove,
'dism.package_info': mock_info}):
with patch.dict(dism.__opts__, {'test': False}):
with patch('os.path.exists'):
out = dism.package_removed('Pack2')
out = dism.package_removed('Pack2')
mock_removed.assert_called_with()
mock_remove.assert_called_once_with(
'Pack2', None, False)
self.assertEqual(out, expected)
mock_removed.assert_called_with()
mock_remove.assert_called_once_with(
'Pack2', None, False)
self.assertEqual(out, expected)
def test_package_removed_removed(self):
'''
@ -507,11 +514,13 @@ class WinDismTestCase(TestCase):
'dism.remove_package': mock_remove,
'dism.package_info': mock_info}):
out = dism.package_removed('Pack2')
with patch.dict(dism.__opts__, {'test': False}):
with patch('os.path.exists'):
out = dism.package_removed('Pack2')
mock_removed.assert_called_once_with()
assert not mock_remove.called
self.assertEqual(out, expected)
mock_removed.assert_called_once_with()
assert not mock_remove.called
self.assertEqual(out, expected)
if __name__ == '__main__':