Merge remote-tracking branch 'saltstack/2018.3' into issue-50221

This commit is contained in:
Daniel A. Wozniak 2019-01-23 08:26:20 -07:00
commit a5255f0eb2
No known key found for this signature in database
GPG Key ID: 166B9D2C06C82D61
77 changed files with 2631 additions and 378 deletions

11
Pipfile
View File

@ -20,17 +20,18 @@ boto = ">=2.32.1"
boto3 = ">=1.2.1"
moto = ">=0.3.6"
SaltPyLint = ">=v2017.3.6"
pytest = ">=3.5.0"
pytest = ">=4.0.1"
pytest-cov = "*"
pytest-salt = "==2018.12.8"
pytest-timeout = ">=1.3.3"
pytest-tempdir = ">=2018.8.11"
pytest-helpers-namespace = ">=2017.11.11"
[packages.futures]
# Required by Tornado to handle threads stuff.
version = ">=2.0"
markers = "python_version < '3.0'"
[dev-packages.pytest-salt]
git = "git://github.com/saltstack/pytest-salt.git"
ref = "master"
[dev-packages.httpretty]
# httpretty Needs to be here for now even though it's a dependency of boto.
# A pip install on a fresh system will decide to target httpretty 0.8.10 to

View File

@ -2,7 +2,7 @@
What is SaltStack?
==================
SaltStack makes software for complex systems management at scale.
SaltStack makes software for complex systems management at scale.
SaltStack is the company that created and maintains the Salt Open
project and develops and sells SaltStack Enterprise software, services
and support. Easy enough to get running in minutes, scalable enough to

View File

@ -71,6 +71,14 @@
{%- endmacro %}
<html>
<head>
<!-- Google Tag Manager -->
<script>(function(w,d,s,l,i){w[l]=w[l]||[];w[l].push({'gtm.start':
new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0],
j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src=
'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f);
})(window,document,'script','dataLayer','GTM-MCK7VL3');</script>
<!-- End Google Tag Manager -->
<meta charset="{{ encoding }}">
{{ metatags }}
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
@ -120,6 +128,11 @@
</head>
<body class="index">
<!-- Google Tag Manager (noscript) -->
<noscript><iframe src="https://www.googletagmanager.com/ns.html?id=GTM-MCK7VL3"
height="0" width="0" style="display:none;visibility:hidden"></iframe></noscript>
<!-- End Google Tag Manager (noscript) -->
<!--[if lt IE 8]>
<p>You are using an <strong>outdated</strong> browser. Please <a href="http://browsehappy.com/">upgrade your browser.</a></p>
<![endif]-->

View File

@ -6,12 +6,9 @@ Output Options
Pass in an alternative outputter to display the return of data. This
outputter can be any of the available outputters:
``grains``, ``highstate``, ``json``, ``key``, ``overstatestage``, ``pprint``, ``raw``, ``txt``, ``yaml``
Some outputters are formatted only for data returned from specific
functions; for instance, the ``grains`` outputter will not work for non-grains
data.
``highstate``, ``json``, ``key``, ``overstatestage``, ``pprint``, ``raw``, ``txt``, ``yaml``, and :ref:`many others <all-salt.output>`.
Some outputters are formatted only for data returned from specific functions.
If an outputter is used that does not support the data passed into it, then
Salt will fall back on the ``pprint`` outputter and display the return data
using the Python ``pprint`` standard library module.

View File

@ -22,6 +22,7 @@ Follow one of the below links for further information and examples
overstatestage
pony
pprint_out
profile
progress
raw
table_out

View File

@ -0,0 +1,6 @@
===================
salt.output.profile
===================
.. automodule:: salt.output.profile
:members:

View File

@ -130,7 +130,7 @@ Cloud ``salt.cloud.clouds`` (:ref:`index <all-salt.clouds>`) ``
Engine ``salt.engines`` (:ref:`index <engines>`) ``engines`` ``engines_dirs``
Execution ``salt.modules`` (:ref:`index <all-salt.modules>`) ``modules`` ``module_dirs``
Executor ``salt.executors`` (:ref:`index <all-salt.executors>`) ``executors`` [#no-fs]_ ``executor_dirs``
File Server ``salt.fileserver`` (:ref:`index <file-server>`) ``fileserver`` [#no-fs]_ ``fileserver_dirs``
File Server ``salt.fileserver`` (:ref:`index <file-server>`) ``fileserver`` ``fileserver_dirs``
Grain ``salt.grains`` (:ref:`index <all-salt.grains>`) ``grains`` ``grains_dirs``
Log Handler ``salt.log.handlers`` (:ref:`index <external-logging-handlers>`) ``log_handlers`` ``log_handlers_dirs``
Net API ``salt.netapi`` (:ref:`index <all-netapi-modules>`) ``netapi`` [#no-fs]_ ``netapi_dirs``
@ -143,13 +143,13 @@ Returner ``salt.returners`` (:ref:`index <all-salt.returners>`) ``
Roster ``salt.roster`` (:ref:`index <all-salt.roster>`) ``roster`` ``roster_dirs``
Runner ``salt.runners`` (:ref:`index <all-salt.runners>`) ``runners`` ``runner_dirs``
SDB ``salt.sdb`` (:ref:`index <all-salt.sdb>`) ``sdb`` ``sdb_dirs``
Search ``salt.search`` ``search`` [#no-fs]_ ``search_dirs``
Serializer ``salt.serializers`` (:ref:`index <all-salt.serializers>`) ``serializers`` [#no-fs]_ ``serializers_dirs``
SPM pkgdb ``salt.spm.pkgdb`` ``pkgdb`` [#no-fs]_ ``pkgdb_dirs``
SPM pkgfiles ``salt.spm.pkgfiles`` ``pkgfiles`` [#no-fs]_ ``pkgfiles_dirs``
SSH Wrapper ``salt.client.ssh.wrapper`` ``wrapper`` [#no-fs]_ ``wrapper_dirs``
State ``salt.states`` (:ref:`index <all-salt.states>`) ``states`` ``states_dirs``
Thorium ``salt.thorium`` (:ref:`index <all-salt.thorium>`) ``thorium`` [#no-fs]_ ``thorium_dirs``
Thorium ``salt.thorium`` (:ref:`index <all-salt.thorium>`) ``thorium`` ``thorium_dirs``
Tokens ``salt.tokens`` ``tokens`` ``tokens_dirs``
Top ``salt.tops`` (:ref:`index <all-salt.tops>`) ``tops`` ``top_dirs``
Util ``salt.utils`` ``utils`` ``utils_dirs``
Wheel ``salt.wheels`` (:ref:`index <all-salt.wheel>`) ``wheel`` ``wheel_dirs``
@ -223,6 +223,12 @@ object.
Executor
--------
.. toctree::
:maxdepth: 1
:glob:
/ref/executors/index
Executors control how execution modules get called. The default is to just call
them, but this can be customized.
@ -322,11 +328,6 @@ SDB
SDB is a way to store data that's not associated with a minion. See
:ref:`Storing Data in Other Databases <sdb>`.
Search
------
A system for indexing the file server and pillars. Removed in 2018.3.
Serializer
----------
@ -375,6 +376,16 @@ Thorium
Modules for use in the :ref:`Thorium <thorium-reactor>` event reactor.
Tokens
------
Token stores for :ref:`External Authentication <acl-eauth>`. See the
:py:mod:`salt.tokens` docstring for details.
.. note:
The runner to load tokens modules is
:py:func:`saltutil.sync_eauth_tokens <salt.runners.saltutil.sync_eauth_tokens>`.
Tops
----

View File

@ -87,6 +87,13 @@ the context into the included file is required:
.. code-block:: jinja
{% from 'lib.sls' import test with context %}
Includes must use full paths, like so:
.. code-block:: jinja
:caption: spam/eggs.jinja
{% include 'spam/foobar.jinja' %}
Including Context During Include/Import
---------------------------------------

View File

@ -219,6 +219,10 @@ configuration file: ``/etc/salt/master`` and setting the ``timeout`` value to
change the default timeout for all commands, and then restarting the
salt-master service.
If a ``state.apply`` run takes too long, you can find a bottleneck by adding the
:py:mod:`--out=profile <salt.output.profile>` option.
Salt Master Auth Flooding
=========================

View File

@ -152,3 +152,6 @@ salt-minion service.
Modifying the minion timeout value is not required when running commands
from a Salt Master. It is only required when running commands locally on
the minion.
If a ``state.apply`` run takes too long, you can find a bottleneck by adding the
:py:mod:`--out=profile <salt.output.profile>` option.

View File

@ -531,7 +531,13 @@ Global Remotes
The ``all_saltenvs`` per-remote configuration parameter overrides the logic
Salt uses to map branches/tags to fileserver environments (i.e. saltenvs). This
allows a single branch/tag to appear in *all* saltenvs.
allows a single branch/tag to appear in *all* GitFS saltenvs.
.. note::
``all_saltenvs`` only works *within* GitFS. That is, files in a branch
configured using ``all_saltenvs`` will *not* show up in a fileserver
environment defined via some other fileserver backend (e.g.
:conf_master:`file_roots`).
This is very useful in particular when working with :ref:`salt formulas
<conventions-formula>`. Prior to the addition of this feature, it was necessary

View File

@ -2,8 +2,6 @@
mock>=2.0.0
SaltPyLint>=v2017.3.6
pytest>=3.5.0
git+https://github.com/saltstack/pytest-salt.git@master#egg=pytest-salt
testinfra>=1.7.0,!=1.17.0
# httpretty Needs to be here for now even though it's a dependency of boto.

View File

@ -1,4 +1,7 @@
pytest>=3.5.0
pytest-helpers-namespace
pytest-tempdir
# PyTest
pytest >= 4.0.1
pytest-cov
pytest-salt == 2018.12.8
pytest-timeout >= 1.3.3
pytest-tempdir >= 2018.8.11
pytest-helpers-namespace >= 2017.11.11

View File

@ -323,7 +323,14 @@ def groups(username, **kwargs):
'''
group_list = []
bind = auth(username, kwargs.get('password', None))
# If bind credentials are configured, use them instead of user's
if _config('binddn', mandatory=False) and _config('bindpw', mandatory=False):
bind = _bind_for_search(anonymous=_config('anonymous', mandatory=False))
else:
bind = _bind(username, kwargs.get('password', ''),
anonymous=_config('auth_by_group_membership_only', mandatory=False)
and _config('anonymous', mandatory=False))
if bind:
log.debug('ldap bind to determine group membership succeeded!')

View File

@ -265,7 +265,7 @@ def avail_locations(conn=None, call=None): # pylint: disable=unused-argument
webconn = get_conn(WebSiteManagementClient)
ret = {}
regions = webconn.global_model.get_subscription_geo_regions()
regions = webconn.list_geo_regions()
if hasattr(regions, 'value'):
regions = regions.value
for location in regions: # pylint: disable=no-member
@ -533,7 +533,7 @@ def list_nodes_select(conn=None, call=None): # pylint: disable=unused-argument
)
def show_instance(name, resource_group=None, call=None): # pylint: disable=unused-argument
def show_instance(name, kwargs=None, call=None): # pylint: disable=unused-argument
'''
Show the details from the provider concerning an instance
'''
@ -547,6 +547,12 @@ def show_instance(name, resource_group=None, call=None): # pylint: disable=unus
compconn = get_conn()
data = None
resource_group = None
# check if there is a resource_group specified
if kwargs:
resource_group = kwargs.get('resource_group', None)
if resource_group is None:
for group in list_resource_groups():
try:
@ -555,8 +561,13 @@ def show_instance(name, resource_group=None, call=None): # pylint: disable=unus
resource_group = group
except CloudError:
continue
else:
try:
instance = compconn.virtual_machines.get(resource_group, name)
data = object_to_dict(instance)
except CloudError:
pass
# Find under which cloud service the name is listed, if any
if data is None:
return {}
@ -568,7 +579,7 @@ def show_instance(name, resource_group=None, call=None): # pylint: disable=unus
data['network_profile']['network_interfaces'] = []
for iface in data['network_profile']['network_interfaces']:
iface_name = iface.id.split('/')[-1]
iface_name = iface['id'].split('/')[-1]
iface_data = show_interface(kwargs={
'resource_group': resource_group,
'iface_name': iface_name,

View File

@ -1,4 +1,9 @@
#!/bin/sh -
# WARNING: Changes to this file in the salt repo will be overwritten!
# Please submit pull requests against the salt-bootstrap repo:
# https://github.com/saltstack/salt-bootstrap
#======================================================================================================================
# vim: softtabstop=4 shiftwidth=4 expandtab fenc=utf-8 spell spelllang=en cc=120
#======================================================================================================================
@ -18,7 +23,7 @@
#======================================================================================================================
set -o nounset # Treat unset variables as an error
__ScriptVersion="2018.08.15"
__ScriptVersion="2019.01.08"
__ScriptName="bootstrap-salt.sh"
__ScriptFullName="$0"
@ -585,14 +590,14 @@ elif [ "$ITYPE" = "stable" ]; then
if [ "$#" -eq 0 ];then
STABLE_REV="latest"
else
if [ "$(echo "$1" | grep -E '^(latest|1\.6|1\.7|2014\.1|2014\.7|2015\.5|2015\.8|2016\.3|2016\.11|2017\.7|2018\.3)$')" != "" ]; then
if [ "$(echo "$1" | grep -E '^(latest|1\.6|1\.7|2014\.1|2014\.7|2015\.5|2015\.8|2016\.3|2016\.11|2017\.7|2018\.3|2019\.2)$')" != "" ]; then
STABLE_REV="$1"
shift
elif [ "$(echo "$1" | grep -E '^([0-9]*\.[0-9]*\.[0-9]*)$')" != "" ]; then
STABLE_REV="archive/$1"
shift
else
echo "Unknown stable version: $1 (valid: 1.6, 1.7, 2014.1, 2014.7, 2015.5, 2015.8, 2016.3, 2016.11, 2017.7, 2018.3, latest, \$MAJOR.\$MINOR.\$PATCH)"
echo "Unknown stable version: $1 (valid: 1.6, 1.7, 2014.1, 2014.7, 2015.5, 2015.8, 2016.3, 2016.11, 2017.7, 2018.3, 2019.2, latest, \$MAJOR.\$MINOR.\$PATCH)"
exit 1
fi
fi
@ -1269,6 +1274,7 @@ __ubuntu_derivatives_translation() {
linuxmint_13_ubuntu_base="12.04"
linuxmint_17_ubuntu_base="14.04"
linuxmint_18_ubuntu_base="16.04"
linuxmint_19_ubuntu_base="18.04"
linaro_12_ubuntu_base="12.04"
elementary_os_02_ubuntu_base="12.04"
neon_16_ubuntu_base="16.04"
@ -1632,7 +1638,8 @@ __check_end_of_life_versions() {
amazon*linux*ami)
# Amazon Linux versions lower than 2012.0X no longer supported
if [ "$DISTRO_MAJOR_VERSION" -lt 2012 ]; then
# Except for Amazon Linux 2, which reset the major version counter
if [ "$DISTRO_MAJOR_VERSION" -lt 2012 ] && [ "$DISTRO_MAJOR_VERSION" -gt 10 ]; then
echoerror "End of life distributions are not supported."
echoerror "Please consider upgrading to the next stable. See:"
echoerror " https://aws.amazon.com/amazon-linux-ami/"
@ -1797,24 +1804,32 @@ __function_defined() {
# process is finished so the script doesn't exit on a locked proc.
#----------------------------------------------------------------------------------------------------------------------
__wait_for_apt(){
echodebug "Checking if apt process is currently running."
# Timeout set at 15 minutes
WAIT_TIMEOUT=900
while ps -C apt,apt-get,aptitude,dpkg >/dev/null; do
sleep 1
WAIT_TIMEOUT=$((WAIT_TIMEOUT - 1))
# Run our passed in apt command
"${@}"
APT_RETURN=$?
# If timeout reaches 0, abort.
if [ "$WAIT_TIMEOUT" -eq 0 ]; then
echoerror "Apt, apt-get, aptitude, or dpkg process is taking too long."
echoerror "Bootstrap script cannot proceed. Aborting."
return 1
fi
# If our exit code from apt is 100, then we're waiting on a lock
while [ $APT_RETURN -eq 100 ]; do
echoinfo "Aware of the lock. Patiently waiting $WAIT_TIMEOUT more seconds..."
sleep 1
WAIT_TIMEOUT=$((WAIT_TIMEOUT - 1))
# If timeout reaches 0, abort.
if [ "$WAIT_TIMEOUT" -eq 0 ]; then
echoerror "Apt, apt-get, aptitude, or dpkg process is taking too long."
echoerror "Bootstrap script cannot proceed. Aborting."
return 1
else
# Try running apt again until our return code != 100
"${@}"
APT_RETURN=$?
fi
done
echodebug "No apt processes are currently running."
return $APT_RETURN
}
#--- FUNCTION -------------------------------------------------------------------------------------------------------
@ -1823,8 +1838,7 @@ __wait_for_apt(){
# PARAMETERS: packages
#----------------------------------------------------------------------------------------------------------------------
__apt_get_install_noinput() {
__wait_for_apt
apt-get install -y -o DPkg::Options::=--force-confold "${@}"; return $?
__wait_for_apt apt-get install -y -o DPkg::Options::=--force-confold "${@}"; return $?
} # ---------- end of function __apt_get_install_noinput ----------
@ -1833,8 +1847,7 @@ __apt_get_install_noinput() {
# DESCRIPTION: (DRY) apt-get upgrade with noinput options
#----------------------------------------------------------------------------------------------------------------------
__apt_get_upgrade_noinput() {
__wait_for_apt
apt-get upgrade -y -o DPkg::Options::=--force-confold; return $?
__wait_for_apt apt-get upgrade -y -o DPkg::Options::=--force-confold; return $?
} # ---------- end of function __apt_get_upgrade_noinput ----------
@ -1844,11 +1857,10 @@ __apt_get_upgrade_noinput() {
# PARAMETERS: url
#----------------------------------------------------------------------------------------------------------------------
__apt_key_fetch() {
__wait_for_apt
url=$1
# shellcheck disable=SC2086
apt-key adv ${_GPG_ARGS} --fetch-keys "$url"; return $?
__wait_for_apt apt-key adv ${_GPG_ARGS} --fetch-keys "$url"; return $?
} # ---------- end of function __apt_key_fetch ----------
@ -2633,8 +2645,7 @@ __install_saltstack_ubuntu_repository() {
__apt_key_fetch "$SALTSTACK_UBUNTU_URL/SALTSTACK-GPG-KEY.pub" || return 1
__wait_for_apt
apt-get update || return 1
__wait_for_apt apt-get update || return 1
}
install_ubuntu_deps() {
@ -2646,8 +2657,7 @@ install_ubuntu_deps() {
__enable_universe_repository || return 1
__wait_for_apt
apt-get update || return 1
__wait_for_apt apt-get update || return 1
fi
__PACKAGES=''
@ -2703,8 +2713,7 @@ install_ubuntu_stable_deps() {
# No user interaction, libc6 restart services for example
export DEBIAN_FRONTEND=noninteractive
__wait_for_apt
apt-get update || return 1
__wait_for_apt apt-get update || return 1
if [ "${_UPGRADE_SYS}" -eq $BS_TRUE ]; then
if [ "${_INSECURE_DL}" -eq $BS_TRUE ]; then
@ -2724,8 +2733,7 @@ install_ubuntu_stable_deps() {
}
install_ubuntu_git_deps() {
__wait_for_apt
apt-get update || return 1
__wait_for_apt apt-get update || return 1
if ! __check_command_exists git; then
__apt_get_install_noinput git-core || return 1
@ -3032,8 +3040,7 @@ __install_saltstack_debian_repository() {
__apt_key_fetch "$SALTSTACK_DEBIAN_URL/SALTSTACK-GPG-KEY.pub" || return 1
__wait_for_apt
apt-get update || return 1
__wait_for_apt apt-get update || return 1
}
install_debian_deps() {
@ -3044,8 +3051,7 @@ install_debian_deps() {
# No user interaction, libc6 restart services for example
export DEBIAN_FRONTEND=noninteractive
__wait_for_apt
apt-get update || return 1
__wait_for_apt apt-get update || return 1
if [ "${_UPGRADE_SYS}" -eq $BS_TRUE ]; then
# Try to update GPG keys first if allowed
@ -3164,8 +3170,7 @@ install_debian_8_git_deps() {
/etc/apt/sources.list.d/backports.list
fi
__wait_for_apt
apt-get update || return 1
__wait_for_apt apt-get update || return 1
# python-tornado package should be installed from backports repo
__PACKAGES="${__PACKAGES} python-backports.ssl-match-hostname python-tornado/jessie-backports"
@ -3415,36 +3420,33 @@ install_debian_check_services() {
#
install_fedora_deps() {
if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then
dnf -y update || return 1
fi
__PACKAGES="${__PACKAGES:=}"
if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then
# Packages are named python3-<whatever>
PY_PKG_VER=3
__PACKAGES="python3-m2crypto python3-PyYAML"
__PACKAGES="${__PACKAGES} python3-m2crypto python3-PyYAML"
else
PY_PKG_VER=2
__PACKAGES="m2crypto"
__PACKAGES="${__PACKAGES} m2crypto"
if [ "$DISTRO_MAJOR_VERSION" -ge 28 ]; then
__PACKAGES="${__PACKAGES} python2-pyyaml"
else
__PACKAGES="${__PACKAGES} PyYAML"
fi
fi
__PACKAGES="${__PACKAGES} procps-ng dnf-utils libyaml python${PY_PKG_VER}-crypto python${PY_PKG_VER}-jinja2"
__PACKAGES="${__PACKAGES} dnf-utils libyaml procps-ng python${PY_PKG_VER}-crypto python${PY_PKG_VER}-jinja2"
__PACKAGES="${__PACKAGES} python${PY_PKG_VER}-msgpack python${PY_PKG_VER}-requests python${PY_PKG_VER}-zmq"
# shellcheck disable=SC2086
dnf install -y ${__PACKAGES} || return 1
if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then
dnf -y update || return 1
fi
if [ "${_EXTRA_PACKAGES}" != "" ]; then
echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}"
# shellcheck disable=SC2086
dnf install -y ${_EXTRA_PACKAGES} || return 1
fi
# shellcheck disable=SC2086
dnf install -y ${__PACKAGES} ${_EXTRA_PACKAGES} || return 1
return 0
}
@ -3494,36 +3496,38 @@ install_fedora_git_deps() {
PY_PKG_VER=2
fi
__PACKAGES="${__PACKAGES:=}"
if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then
dnf install -y ca-certificates || return 1
__PACKAGES="${__PACKAGES} ca-certificates"
fi
if ! __check_command_exists git; then
__PACKAGES="${__PACKAGES} git"
fi
if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then
__PACKAGES="${__PACKAGES} python${PY_PKG_VER}-libcloud python${PY_PKG_VER}-netaddr"
fi
__PACKAGES="${__PACKAGES} python${PY_PKG_VER}-systemd"
# Fedora 28+ ships with tornado 5.0+ which is broken for salt on py3
# https://github.com/saltstack/salt-bootstrap/issues/1220
if [ "${PY_PKG_VER}" -lt 3 ] || [ "$DISTRO_MAJOR_VERSION" -lt 28 ]; then
__PACKAGES="${__PACKAGES} python${PY_PKG_VER}-tornado"
fi
install_fedora_deps || return 1
if ! __check_command_exists git; then
dnf install -y git || return 1
fi
__git_clone_and_checkout || return 1
__PACKAGES="python${PY_PKG_VER}-systemd"
# Fedora 28+ needs tornado <5.0 from pip
# https://github.com/saltstack/salt-bootstrap/issues/1220
if [ "${PY_PKG_VER}" -eq 3 ] && [ "$DISTRO_MAJOR_VERSION" -ge 28 ]; then
__check_pip_allowed "You need to allow pip based installations (-P) for Tornado <5.0 in order to install Salt on Python 3"
grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" | while IFS='
' read -r dep; do
"${_PY_EXE}" -m pip install "${dep}" || return 1
done
else
__PACKAGES="${__PACKAGES} python${PY_PKG_VER}-tornado"
fi
if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then
__PACKAGES="${__PACKAGES} python${PY_PKG_VER}-libcloud python${PY_PKG_VER}-netaddr"
fi
# shellcheck disable=SC2086
dnf install -y ${__PACKAGES} || return 1
# Let's trigger config_salt()
if [ "$_TEMP_CONFIG_DIR" = "null" ]; then
_TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/"
@ -4681,6 +4685,138 @@ install_amazon_linux_ami_git_deps() {
return 0
}
install_amazon_linux_ami_2_git_deps() {
if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then
yum -y install ca-certificates || return 1
fi
PIP_EXE='pip'
if __check_command_exists python2.7; then
if ! __check_command_exists pip2.7; then
__yum_install_noinput python2-pip
fi
PIP_EXE='/bin/pip'
_PY_EXE='python2.7'
fi
install_amazon_linux_ami_2_deps || return 1
if ! __check_command_exists git; then
__yum_install_noinput git || return 1
fi
__git_clone_and_checkout || return 1
__PACKAGES=""
__PIP_PACKAGES=""
if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then
__check_pip_allowed "You need to allow pip based installations (-P) in order to install apache-libcloud"
__PACKAGES="${__PACKAGES} python27-pip"
__PIP_PACKAGES="${__PIP_PACKAGES} apache-libcloud>=$_LIBCLOUD_MIN_VERSION"
fi
if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then
# We're on the develop branch, install whichever tornado is on the requirements file
__REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")"
if [ "${__REQUIRED_TORNADO}" != "" ]; then
__PACKAGES="${__PACKAGES} ${pkg_append}-tornado"
fi
fi
if [ "${__PACKAGES}" != "" ]; then
# shellcheck disable=SC2086
__yum_install_noinput ${__PACKAGES} || return 1
fi
if [ "${__PIP_PACKAGES}" != "" ]; then
# shellcheck disable=SC2086
${PIP_EXE} install ${__PIP_PACKAGES} || return 1
fi
# Let's trigger config_salt()
if [ "$_TEMP_CONFIG_DIR" = "null" ]; then
_TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/"
CONFIG_SALT_FUNC="config_salt"
fi
return 0
}
install_amazon_linux_ami_2_deps() {
# Shim to figure out if we're using old (rhel) or new (aws) rpms.
_USEAWS=$BS_FALSE
pkg_append="python"
if [ "$ITYPE" = "stable" ]; then
repo_rev="$STABLE_REV"
else
repo_rev="latest"
fi
if echo $repo_rev | grep -E -q '^archive'; then
year=$(echo "$repo_rev" | cut -d '/' -f 2 | cut -c1-4)
else
year=$(echo "$repo_rev" | cut -c1-4)
fi
if echo "$repo_rev" | grep -E -q '^(latest|2016\.11)$' || \
[ "$year" -gt 2016 ]; then
_USEAWS=$BS_TRUE
pkg_append="python"
fi
# We need to install yum-utils before doing anything else when installing on
# Amazon Linux ECS-optimized images. See issue #974.
__yum_install_noinput yum-utils
# Do upgrade early
if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then
yum -y update || return 1
fi
if [ $_DISABLE_REPOS -eq $BS_FALSE ] || [ "$_CUSTOM_REPO_URL" != "null" ]; then
__REPO_FILENAME="saltstack-repo.repo"
base_url="$HTTP_VAL://${_REPO_URL}/yum/redhat/7/\$basearch/$repo_rev/"
base_url="$HTTP_VAL://${_REPO_URL}/yum/amazon/2/\$basearch/latest/"
gpg_key="${base_url}SALTSTACK-GPG-KEY.pub
${base_url}base/RPM-GPG-KEY-CentOS-7"
repo_name="SaltStack repo for Amazon Linux 2.0"
# This should prob be refactored to use __install_saltstack_rhel_repository()
# With args passed in to do the right thing. Reformatted to be more like the
# amazon linux yum file.
if [ ! -s "/etc/yum.repos.d/${__REPO_FILENAME}" ]; then
cat <<_eof > "/etc/yum.repos.d/${__REPO_FILENAME}"
[saltstack-repo]
name=$repo_name
failovermethod=priority
priority=10
gpgcheck=1
gpgkey=$gpg_key
baseurl=$base_url
_eof
fi
fi
# Package python-ordereddict-1.1-2.el6.noarch is obsoleted by python26-2.6.9-2.88.amzn1.x86_64
# which is already installed
__PACKAGES="m2crypto ${pkg_append}-crypto ${pkg_append}-jinja2 PyYAML"
__PACKAGES="${__PACKAGES} ${pkg_append}-msgpack ${pkg_append}-requests ${pkg_append}-zmq"
__PACKAGES="${__PACKAGES} ${pkg_append}-futures"
# shellcheck disable=SC2086
__yum_install_noinput ${__PACKAGES} || return 1
if [ "${_EXTRA_PACKAGES}" != "" ]; then
echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}"
# shellcheck disable=SC2086
__yum_install_noinput ${_EXTRA_PACKAGES} || return 1
fi
}
install_amazon_linux_ami_stable() {
install_centos_stable || return 1
return 0
@ -4715,6 +4851,41 @@ install_amazon_linux_ami_testing_post() {
install_centos_testing_post || return 1
return 0
}
install_amazon_linux_ami_2_stable() {
install_centos_stable || return 1
return 0
}
install_amazon_linux_ami_2_stable_post() {
install_centos_stable_post || return 1
return 0
}
install_amazon_linux_ami_2_restart_daemons() {
install_centos_restart_daemons || return 1
return 0
}
install_amazon_linux_ami_2_git() {
install_centos_git || return 1
return 0
}
install_amazon_linux_ami_2_git_post() {
install_centos_git_post || return 1
return 0
}
install_amazon_linux_ami_2_testing() {
install_centos_testing || return 1
return 0
}
install_amazon_linux_ami_2_testing_post() {
install_centos_testing_post || return 1
return 0
}
#
# Ended Amazon Linux AMI Install Functions
#
@ -5336,7 +5507,8 @@ install_openbsd_restart_daemons() {
# SmartOS Install Functions
#
install_smartos_deps() {
pkgin -y install zeromq py27-crypto py27-m2crypto py27-msgpack py27-yaml py27-jinja2 py27-zmq py27-requests || return 1
smartos_deps="$(pkgin show-deps salt | grep '^\s' | grep -v '\snot' | xargs) py27-m2crypto"
pkgin -y install "${smartos_deps}" || return 1
# Set _SALT_ETC_DIR to SmartOS default if they didn't specify
_SALT_ETC_DIR=${BS_SALT_ETC_DIR:-/opt/local/etc/salt}

View File

@ -9,6 +9,7 @@ authenticating peers
# the Array class, which has incompatibilities with it.
from __future__ import absolute_import, print_function
import os
import random
import sys
import copy
import time
@ -727,6 +728,10 @@ class AsyncAuth(object):
'minion.\nOr restart the Salt Master in open mode to '
'clean out the keys. The Salt Minion will now exit.'
)
# Add a random sleep here for systems that are using a
# a service manager to immediately restart the service
# to avoid overloading the system
time.sleep(random.randint(10, 20))
sys.exit(salt.defaults.exitcodes.EX_NOPERM)
# has the master returned that its maxed out with minions?
elif payload['load']['ret'] == 'full':

View File

@ -303,8 +303,8 @@ def _file_lists(load, form):
except os.error:
log.critical('Unable to make cachedir %s', list_cachedir)
return []
list_cache = os.path.join(list_cachedir, '{0}.p'.format(load['saltenv']))
w_lock = os.path.join(list_cachedir, '.{0}.w'.format(load['saltenv']))
list_cache = os.path.join(list_cachedir, '{0}.p'.format(salt.utils.files.safe_filename_leaf(load['saltenv'])))
w_lock = os.path.join(list_cachedir, '.{0}.w'.format(salt.utils.files.safe_filename_leaf(load['saltenv'])))
cache_match, refresh_cache, save_cache = \
salt.fileserver.check_file_list_cache(
__opts__, form, list_cache, w_lock

View File

@ -1003,10 +1003,11 @@ def _virtual(osdata):
if 'QEMU Virtual CPU' in model:
grains['virtual'] = 'kvm'
elif osdata['kernel'] == 'OpenBSD':
if osdata['manufacturer'] in ['QEMU', 'Red Hat']:
grains['virtual'] = 'kvm'
if osdata['manufacturer'] == 'OpenBSD':
grains['virtual'] = 'vmm'
if 'manufacturer' in osdata:
if osdata['manufacturer'] in ['QEMU', 'Red Hat', 'Joyent']:
grains['virtual'] = 'kvm'
if osdata['manufacturer'] == 'OpenBSD':
grains['virtual'] = 'vmm'
elif osdata['kernel'] == 'SunOS':
if grains['virtual'] == 'LDOM':
roles = []

View File

@ -1704,7 +1704,7 @@ class LazyLoader(salt.utils.lazy.LazyDict):
if not isinstance(key, six.string_types):
raise KeyError('The key must be a string.')
if '.' not in key:
raise KeyError('The key \'%s\' should contain a \'.\'', key)
raise KeyError('The key \'{0}\' should contain a \'.\''.format(key))
mod_name, _ = key.split('.', 1)
with self._lock:
# It is possible that the key is in the dictionary after

View File

@ -135,9 +135,7 @@ def setup_handlers():
transport_registry = TransportRegistry(default_transports)
url = urlparse(dsn)
if not transport_registry.supported_scheme(url.scheme):
raise ValueError(
'Unsupported Sentry DSN scheme: %s', url.scheme
)
raise ValueError('Unsupported Sentry DSN scheme: {0}'.format(url.scheme))
except ValueError as exc:
log.info(
'Raven failed to parse the configuration provided DSN: %s', exc

View File

@ -27,6 +27,7 @@ from binascii import crc32
# pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.ext import six
from salt._compat import ipaddress
from salt.utils.network import parse_host_port
from salt.ext.six.moves import range
from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO
@ -243,27 +244,29 @@ def resolve_dns(opts, fallback=True):
def prep_ip_port(opts):
'''
parse host:port values from opts['master'] and return valid:
master: ip address or hostname as a string
master_port: (optional) master returner port as integer
e.g.:
- master: 'localhost:1234' -> {'master': 'localhost', 'master_port': 1234}
- master: '127.0.0.1:1234' -> {'master': '127.0.0.1', 'master_port' :1234}
- master: '[::1]:1234' -> {'master': '::1', 'master_port': 1234}
- master: 'fe80::a00:27ff:fedc:ba98' -> {'master': 'fe80::a00:27ff:fedc:ba98'}
'''
ret = {}
# Use given master IP if "ip_only" is set or if master_ip is an ipv6 address without
# a port specified. The is_ipv6 check returns False if brackets are used in the IP
# definition such as master: '[::1]:1234'.
if opts['master_uri_format'] == 'ip_only' or salt.utils.network.is_ipv6(opts['master']):
ret['master'] = opts['master']
if opts['master_uri_format'] == 'ip_only':
ret['master'] = ipaddress.ip_address(opts['master'])
else:
ip_port = opts['master'].rsplit(':', 1)
if len(ip_port) == 1:
# e.g. master: mysaltmaster
ret['master'] = ip_port[0]
else:
# e.g. master: localhost:1234
# e.g. master: 127.0.0.1:1234
# e.g. master: [::1]:1234
# Strip off brackets for ipv6 support
ret['master'] = ip_port[0].strip('[]')
host, port = parse_host_port(opts['master'])
ret = {'master': host}
if port:
ret.update({'master_port': port})
# Cast port back to an int! Otherwise a TypeError is thrown
# on some of the socket calls elsewhere in the minion and utils code.
ret['master_port'] = int(ip_port[1])
return ret

View File

@ -388,6 +388,9 @@ def _get_snapshot_version_metadata(artifactory_url, repository, group_id, artifa
extension = snapshot_version.find('extension').text
value = snapshot_version.find('value').text
extension_version_dict[extension] = value
if snapshot_version.find('classifier') is not None:
classifier = snapshot_version.find('classifier').text
extension_version_dict[classifier] = value
return {
'snapshot_versions': extension_version_dict

View File

@ -19,8 +19,7 @@ The firewall configuration is generated by Capirca_.
.. _Capirca: https://github.com/google/capirca
Capirca is not yet available on PyPI threrefore it has to be installed
directly form Git: ``pip install -e git+git@github.com:google/capirca.git#egg=aclgen``.
To install Capirca, execute: ``pip install capirca``.
'''
from __future__ import absolute_import, print_function, unicode_literals
@ -34,7 +33,10 @@ log = logging.getLogger(__file__)
# Import third party libs
from salt.ext import six
try:
import aclgen
import capirca
import capirca.aclgen
import capirca.lib.policy
import capirca.lib.aclgenerator
HAS_CAPIRCA = True
except ImportError:
HAS_CAPIRCA = False
@ -69,10 +71,12 @@ def __virtual__():
# module globals
# ------------------------------------------------------------------------------
# define the default values for all possible term fields
# we could also extract them from the `policy` module, inspecting the `Policy`
# class, but that might be overkill & it would make the code less obvious.
# we can revisit this later if necessary.
_TERM_FIELDS = {
'action': [],
'address': [],
@ -161,7 +165,19 @@ _SERVICES = {}
if HAS_CAPIRCA:
class _Policy(aclgen.policy.Policy):
_TempTerm = capirca.lib.policy.Term
def _add_object(self, obj):
return
setattr(_TempTerm, 'AddObject', _add_object)
dumy_term = _TempTerm(None)
for item in dir(dumy_term):
if hasattr(item, '__func__') or item.startswith('_') or item != item.lower():
continue
_TERM_FIELDS[item] = getattr(dumy_term, item)
class _Policy(capirca.lib.policy.Policy):
'''
Extending the Capirca Policy class to allow inserting custom filters.
'''
@ -169,7 +185,7 @@ if HAS_CAPIRCA:
self.filters = []
self.filename = ''
class _Term(aclgen.policy.Term):
class _Term(capirca.lib.policy.Term):
'''
Extending the Capirca Term class to allow setting field valued on the fly.
'''
@ -186,10 +202,10 @@ def _import_platform_generator(platform):
for a class inheriting the `ACLGenerator` class.
'''
log.debug('Using platform: {plat}'.format(plat=platform))
for mod_name, mod_obj in inspect.getmembers(aclgen):
for mod_name, mod_obj in inspect.getmembers(capirca.aclgen):
if mod_name == platform and inspect.ismodule(mod_obj):
for plat_obj_name, plat_obj in inspect.getmembers(mod_obj): # pylint: disable=unused-variable
if inspect.isclass(plat_obj) and issubclass(plat_obj, aclgen.aclgenerator.ACLGenerator):
if inspect.isclass(plat_obj) and issubclass(plat_obj, capirca.lib.aclgenerator.ACLGenerator):
log.debug('Identified Capirca class {cls} for {plat}'.format(
cls=plat_obj,
plat=platform))
@ -366,7 +382,11 @@ def _clean_term_opts(term_opts):
# IP-type fields need to be transformed
ip_values = []
for addr in value:
ip_values.append(aclgen.policy.nacaddr.IP(addr))
if six.PY2:
addr = six.text_type(addr)
# Adding this, as ipaddress would complain about valid
# addresses not being valid. #pythonIsFun
ip_values.append(capirca.lib.policy.nacaddr.IP(addr))
value = ip_values[:]
clean_opts[field] = value
return clean_opts
@ -427,7 +447,7 @@ def _merge_list_of_dict(first, second, prepend=True):
if first and not second:
return first
# Determine overlaps
# So we don't change the position of the existing terms/filters
# So we dont change the position of the existing terms/filters
overlaps = []
merged = []
appended = []
@ -514,7 +534,7 @@ def _get_policy_object(platform,
continue # go to the next filter
filter_name = filter_.keys()[0]
filter_config = filter_.values()[0]
header = aclgen.policy.Header() # same header everywhere
header = capirca.lib.policy.Header() # same header everywhere
target_opts = [
platform,
filter_name
@ -524,7 +544,7 @@ def _get_policy_object(platform,
filter_options = _make_it_list({}, filter_name, filter_options)
# make sure the filter options are sent as list
target_opts.extend(filter_options)
target = aclgen.policy.Target(target_opts)
target = capirca.lib.policy.Target(target_opts)
header.AddObject(target)
filter_terms = []
for term_ in filter_config.get('terms', []):

View File

@ -1913,9 +1913,11 @@ def get_network_settings():
hostname = _parse_hostname()
domainname = _parse_domainname()
searchdomain = _parse_searchdomain()
settings['hostname'] = hostname
settings['domainname'] = domainname
settings['searchdomain'] = searchdomain
else:
settings = _parse_current_network_settings()

View File

@ -22,6 +22,7 @@ import re
# Import salt libs
import salt.utils.args
import salt.utils.compat
import salt.utils.data
import salt.utils.functools
import salt.utils.path
@ -31,9 +32,6 @@ import salt.utils.versions
from salt.exceptions import CommandExecutionError, MinionError
from salt.ext import six
# Workaround for 'reload' builtin of py2.7
if six.PY3:
from importlib import reload # pylint: disable=no-name-in-module
# Import third party libs
HAS_PORTAGE = False
@ -69,13 +67,13 @@ def __virtual__():
def _vartree():
import portage # pylint: disable=3rd-party-module-not-gated
portage = reload(portage)
portage = salt.utils.compat.reload(portage)
return portage.db[portage.root]['vartree']
def _porttree():
import portage # pylint: disable=3rd-party-module-not-gated
portage = reload(portage)
portage = salt.utils.compat.reload(portage)
return portage.db[portage.root]['porttree']

View File

@ -226,6 +226,7 @@ def _resolve_user_group_names(opts):
if _info and _param in _info:
_id = _info[_param]
opts[ind] = _param + '=' + six.text_type(_id)
opts[ind] = opts[ind].replace('\\040', '\\ ')
return opts
@ -727,7 +728,7 @@ def set_fstab(
'name': name,
'device': device.replace('\\ ', '\\040'),
'fstype': fstype,
'opts': opts,
'opts': opts.replace('\\ ', '\\040'),
'dump': dump,
'pass_num': pass_num,
}

View File

@ -81,7 +81,11 @@ __grants__ = [
'ALL PRIVILEGES',
'ALTER',
'ALTER ROUTINE',
'BACKUP_ADMIN',
'BINLOG_ADMIN',
'CONNECTION_ADMIN',
'CREATE',
'CREATE ROLE',
'CREATE ROUTINE',
'CREATE TABLESPACE',
'CREATE TEMPORARY TABLES',
@ -89,26 +93,37 @@ __grants__ = [
'CREATE VIEW',
'DELETE',
'DROP',
'DROP ROLE',
'ENCRYPTION_KEY_ADMIN',
'EVENT',
'EXECUTE',
'FILE',
'GRANT OPTION',
'GROUP_REPLICATION_ADMIN',
'INDEX',
'INSERT',
'LOCK TABLES',
'PERSIST_RO_VARIABLES_ADMIN',
'PROCESS',
'REFERENCES',
'RELOAD',
'REPLICATION CLIENT',
'REPLICATION SLAVE',
'REPLICATION_SLAVE_ADMIN',
'RESOURCE_GROUP_ADMIN',
'RESOURCE_GROUP_USER',
'ROLE_ADMIN',
'SELECT',
'SET_USER_ID',
'SHOW DATABASES',
'SHOW VIEW',
'SHUTDOWN',
'SUPER',
'SYSTEM_VARIABLES_ADMIN',
'TRIGGER',
'UPDATE',
'USAGE'
'USAGE',
'XA_RECOVER_ADMIN'
]
__ssl_options_parameterized__ = [
@ -121,6 +136,52 @@ __ssl_options__ = __ssl_options_parameterized__ + [
'X509'
]
__all_privileges__ = [
'ALTER',
'ALTER ROUTINE',
'BACKUP_ADMIN',
'BINLOG_ADMIN',
'CONNECTION_ADMIN',
'CREATE',
'CREATE ROLE',
'CREATE ROUTINE',
'CREATE TABLESPACE',
'CREATE TEMPORARY TABLES',
'CREATE USER',
'CREATE VIEW',
'DELETE',
'DROP',
'DROP ROLE',
'ENCRYPTION_KEY_ADMIN',
'EVENT',
'EXECUTE',
'FILE',
'GROUP_REPLICATION_ADMIN',
'INDEX',
'INSERT',
'LOCK TABLES',
'PERSIST_RO_VARIABLES_ADMIN',
'PROCESS',
'REFERENCES',
'RELOAD',
'REPLICATION CLIENT',
'REPLICATION SLAVE',
'REPLICATION_SLAVE_ADMIN',
'RESOURCE_GROUP_ADMIN',
'RESOURCE_GROUP_USER',
'ROLE_ADMIN',
'SELECT',
'SET_USER_ID',
'SHOW DATABASES',
'SHOW VIEW',
'SHUTDOWN',
'SUPER',
'SYSTEM_VARIABLES_ADMIN',
'TRIGGER',
'UPDATE',
'XA_RECOVER_ADMIN'
]
r'''
DEVELOPER NOTE: ABOUT arguments management, escapes, formats, arguments and
security of SQL.
@ -810,7 +871,7 @@ def version(**connection_args):
return ''
try:
return cur.fetchone()[0]
return salt.utils.data.decode(cur.fetchone()[0])
except IndexError:
return ''
@ -1789,12 +1850,12 @@ def user_grants(user,
def grant_exists(grant,
database,
user,
host='localhost',
grant_option=False,
escape=True,
**connection_args):
database,
user,
host='localhost',
grant_option=False,
escape=True,
**connection_args):
'''
Checks to see if a grant exists in the database
@ -1805,6 +1866,14 @@ def grant_exists(grant,
salt '*' mysql.grant_exists \
'SELECT,INSERT,UPDATE,...' 'database.*' 'frank' 'localhost'
'''
server_version = version(**connection_args)
if 'ALL' in grant:
if salt.utils.versions.version_cmp(server_version, '8.0') >= 0:
grant = ','.join([i for i in __all_privileges__])
else:
grant = 'ALL PRIVILEGES'
try:
target = __grant_generate(
grant, database, user, host, grant_option, escape
@ -1820,15 +1889,27 @@ def grant_exists(grant,
'this could also indicate a connection error. Check your configuration.')
return False
target_tokens = None
# Combine grants that match the same database
_grants = {}
for grant in grants:
try:
if not target_tokens: # Avoid the overhead of re-calc in loop
target_tokens = _grant_to_tokens(target)
grant_tokens = _grant_to_tokens(grant)
grant_token = _grant_to_tokens(grant)
if grant_token['database'] not in _grants:
_grants[grant_token['database']] = {'user': grant_token['user'],
'database': grant_token['database'],
'host': grant_token['host'],
'grant': grant_token['grant']}
else:
_grants[grant_token['database']]['grant'].extend(grant_token['grant'])
target_tokens = _grant_to_tokens(target)
for database, grant_tokens in _grants.items():
try:
_grant_tokens = {}
_target_tokens = {}
_grant_matches = [True if i in grant_tokens['grant']
else False for i in target_tokens['grant']]
for item in ['user', 'database', 'host']:
_grant_tokens[item] = grant_tokens[item].replace('"', '').replace('\\', '').replace('`', '')
_target_tokens[item] = target_tokens[item].replace('"', '').replace('\\', '').replace('`', '')
@ -1836,7 +1917,7 @@ def grant_exists(grant,
if _grant_tokens['user'] == _target_tokens['user'] and \
_grant_tokens['database'] == _target_tokens['database'] and \
_grant_tokens['host'] == _target_tokens['host'] and \
set(grant_tokens['grant']) >= set(target_tokens['grant']):
all(_grant_matches):
return True
else:
log.debug('grants mismatch \'%s\'<>\'%s\'', grant_tokens, target_tokens)

View File

@ -19,6 +19,8 @@ The firewall configuration is generated by Capirca_.
.. _Capirca: https://github.com/google/capirca
To install Capirca, execute: ``pip install capirca``.
To be able to load configuration on network devices,
it requires NAPALM_ library to be installed: ``pip install napalm``.
Please check Installation_ for complete details.
@ -34,7 +36,10 @@ log = logging.getLogger(__file__)
# Import third party libs
try:
# pylint: disable=W0611
import aclgen
import capirca
import capirca.aclgen
import capirca.lib.policy
import capirca.lib.aclgenerator
HAS_CAPIRCA = True
# pylint: enable=W0611
except ImportError:

View File

@ -59,7 +59,7 @@ def cmd(command, *args, **kwargs):
proxy_cmd = '.'.join([proxy_prefix, command])
if proxy_cmd not in __proxy__:
return False
for k in kwargs:
for k in list(kwargs):
if k.startswith('__pub_'):
kwargs.pop(k)
return __proxy__[proxy_cmd](*args, **kwargs)

View File

@ -29,7 +29,7 @@ import requests
import salt.exceptions
import salt.utils.json
API_ENDPOINT = "https://api.opsgenie.com/v1/json/saltstack?apiKey="
API_ENDPOINT = "https://api.opsgenie.com/v2/alerts"
log = logging.getLogger(__name__)
@ -68,14 +68,14 @@ def post_data(api_key=None, name='OpsGenie Execution Module', reason=None,
functionality you must provide name field for both states like in
this case.
'''
if api_key is None or reason is None or action_type is None:
if api_key is None or reason is None:
raise salt.exceptions.SaltInvocationError(
'API Key or Reason or Action Type cannot be None.')
'API Key or Reason cannot be None.')
data = dict()
data['name'] = name
data['reason'] = reason
data['actionType'] = action_type
data['alias'] = name
data['message'] = reason
# data['actions'] = action_type
data['cpuModel'] = __grains__['cpu_model']
data['cpuArch'] = __grains__['cpuarch']
data['fqdn'] = __grains__['fqdn']
@ -93,8 +93,17 @@ def post_data(api_key=None, name='OpsGenie Execution Module', reason=None,
log.debug('Below data will be posted:\n%s', data)
log.debug('API Key: %s \t API Endpoint: %s', api_key, API_ENDPOINT)
response = requests.post(
url=API_ENDPOINT + api_key,
data=salt.utils.json.dumps(data),
headers={'Content-Type': 'application/json'})
if action_type == "Create":
response = requests.post(
url=API_ENDPOINT,
data=salt.utils.json.dumps(data),
headers={'Content-Type': 'application/json',
'Authorization': 'GenieKey ' + api_key})
else:
response = requests.post(
url=API_ENDPOINT + "/" + name + "/close?identifierType=alias",
data=salt.utils.json.dumps(data),
headers={'Content-Type': 'application/json',
'Authorization': 'GenieKey ' + api_key})
return response.status_code, response.text

View File

@ -10,6 +10,7 @@ import os
import shutil
# Import salt libs
import salt.utils.compat
import salt.utils.data
import salt.utils.files
import salt.utils.path
@ -57,7 +58,7 @@ def _get_portage():
portage module must be reloaded or it can't catch the changes
in portage.* which had been added after when the module was loaded
'''
return reload(portage)
return salt.utils.compat.reload(portage)
def _porttree():

View File

@ -349,8 +349,8 @@ def set_date(name, date):
salt '*' shadow.set_date username 0
'''
cmd = 'chage -d {0} {1}'.format(date, name)
return not __salt__['cmd.run'](cmd, python_shell=False)
cmd = ['chage', '-d', date, name]
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
def set_expire(name, expire):
@ -367,8 +367,8 @@ def set_expire(name, expire):
salt '*' shadow.set_expire username -1
'''
cmd = 'chage -E {0} {1}'.format(expire, name)
return not __salt__['cmd.run'](cmd, python_shell=False)
cmd = ['chage', '-E', expire, name]
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
def list_users():

View File

@ -412,7 +412,7 @@ def create_snapshot(config='root', snapshot_type='single', pre_number=None,
cleanup_algorithm, userdata)
else:
raise CommandExecutionError(
"Invalid snapshot type '{0}'", format(snapshot_type))
"Invalid snapshot type '{0}'".format(snapshot_type))
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while listing changed files: {0}'

View File

@ -357,6 +357,9 @@ def gets_service_instance_via_proxy(fn):
local_service_instance = \
salt.utils.vmware.get_service_instance(
*connection_details)
# Tuples are immutable, so if we want to change what
# was passed in, we need to first convert to a list.
args = list(args)
args[idx] = local_service_instance
else:
# case 2: Not enough positional parameters so

View File

@ -0,0 +1,179 @@
# -*- coding: utf-8 -*-
'''
A salt module for modifying the audit policies on the machine
Though this module does not set group policy for auditing, it displays how all
auditing configuration is applied on the machine, either set directly or via
local or domain group policy.
.. versionadded:: 2018.3.4
.. versionadded:: 2019.2.1
This module allows you to view and modify the audit settings as they are applied
on the machine. The audit settings are broken down into nine categories:
- Account Logon
- Account Management
- Detailed Tracking
- DS Access
- Logon/Logoff
- Object Access
- Policy Change
- Privilege Use
- System
The ``get_settings`` function will return the subcategories for all nine of
the above categories in one dictionary along with their auditing status.
To modify a setting you only need to specify the subcategory name and the value
you wish to set. Valid settings are:
- No Auditing
- Success
- Failure
- Success and Failure
CLI Example:
.. code-block:: bash
# Get current state of all audit settings
salt * auditpol.get_settings
# Get the current state of all audit settings in the "Account Logon"
# category
salt * auditpol.get_settings category="Account Logon"
# Get current state of the "Credential Validation" setting
salt * auditpol.get_setting name="Credential Validation"
# Set the state of the "Credential Validation" setting to Success and
# Failure
salt * auditpol.set_setting name="Credential Validation" value="Success and Failure"
# Set the state of the "Credential Validation" setting to No Auditing
salt * auditpol.set_setting name="Credential Validation" value="No Auditing"
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import Salt libs
import salt.utils.platform
# Define the module's virtual name
__virtualname__ = 'auditpol'
def __virtual__():
'''
Only works on Windows systems
'''
if not salt.utils.platform.is_windows():
return False, "Module win_auditpol: module only available on Windows"
return __virtualname__
def get_settings(category='All'):
'''
Get the current configuration for all audit settings specified in the
category
Args:
category (str):
One of the nine categories to return. Can also be ``All`` to return
the settings for all categories. Valid options are:
- Account Logon
- Account Management
- Detailed Tracking
- DS Access
- Logon/Logoff
- Object Access
- Policy Change
- Privilege Use
- System
- All
Default value is ``All``
Returns:
dict: A dictionary containing all subcategories for the specified
category along with their current configuration
Raises:
KeyError: On invalid category
CommandExecutionError: If an error is encountered retrieving the settings
CLI Example:
.. code-block:: bash
# Get current state of all audit settings
salt * auditipol.get_settings
# Get the current state of all audit settings in the "Account Logon"
# category
salt * auditpol.get_settings "Account Logon"
'''
return __utils__['auditpol.get_settings'](category=category)
def get_setting(name):
'''
Get the current configuration for the named audit setting
Args:
name (str): The name of the setting to retrieve
Returns:
str: The current configuration for the named setting
Raises:
KeyError: On invalid setting name
CommandExecutionError: If an error is encountered retrieving the settings
CLI Example:
.. code-block:: bash
# Get current state of the "Credential Validation" setting
salt * auditpol.get_setting "Credential Validation"
'''
return __utils__['auditpol.get_setting'](name=name)
def set_setting(name, value):
'''
Set the configuration for the named audit setting
Args:
name (str):
The name of the setting to configure
value (str):
The configuration for the named value. Valid options are:
- No Auditing
- Success
- Failure
- Success and Failure
Returns:
bool: True if successful
Raises:
KeyError: On invalid ``name`` or ``value``
CommandExecutionError: If an error is encountered modifying the setting
CLI Example:
.. code-block:: bash
# Set the state of the "Credential Validation" setting to Success and
# Failure
salt * auditpol.set_setting "Credential Validation" "Success and Failure"
# Set the state of the "Credential Validation" setting to No Auditing
salt * auditpol.set_setting "Credential Validation" "No Auditing"
'''
return __utils__['auditpol.set_setting'](name=name, value=value)

View File

@ -39,12 +39,14 @@ Current known limitations
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
import csv
import io
import os
import logging
import re
import locale
import ctypes
import tempfile
import time
# Import Salt libs
@ -280,6 +282,19 @@ class _policy_info(object):
netsh advfirewall>set help
netsh advfirewall>set domain help
AdvAudit Mechanism
------------------
The Advanced Audit Policies are configured using a combination of the
auditpol command-line utility and modifying the audit.csv file in two
locations. The value of this key is a dict with the following make-up:
====== ===================================
Key Value
====== ===================================
Option The Advanced Audit Policy to modify
====== ===================================
Transforms
----------
@ -310,6 +325,13 @@ class _policy_info(object):
'Not Defined': 'Not Defined',
None: 'Not Defined',
}
self.advanced_audit_lookup = {
0: 'No Auditing',
1: 'Success',
2: 'Failure',
3: 'Success and Failure',
None: 'Not Configured',
}
self.sc_removal_lookup = {
0: 'No Action',
1: 'Lock Workstation',
@ -372,6 +394,18 @@ class _policy_info(object):
'value_lookup': True,
},
}
self.advanced_audit_transform = {
'Get': '_dict_lookup',
'Put': '_dict_lookup',
'GetArgs': {
'lookup': self.advanced_audit_lookup,
'value_lookup': False,
},
'PutArgs': {
'lookup': self.advanced_audit_lookup,
'value_lookup': True,
},
}
self.enabled_one_disabled_zero_strings = {
'0': 'Disabled',
'1': 'Enabled',
@ -418,6 +452,13 @@ class _policy_info(object):
'Local Policies',
'Audit Policy'
]
self.advanced_audit_policy_gpedit_path = [
'Computer Configuration',
'Windows Settings',
'Security Settings',
'Advanced Audit Policy Configuration',
'System Audit Policies - Local Group Policy Object'
]
self.account_lockout_policy_gpedit_path = [
'Computer Configuration',
'Windows Settings',
@ -2603,6 +2644,11 @@ class _policy_info(object):
'Put': '_minutes_to_seconds'
},
},
########## LEGACY AUDIT POLICIES ##########
# To use these set the following policy to DISABLED
# "Audit: Force audit policy subcategory settings (Windows Vista or later) to override audit policy category settings"
# or it's alias...
# SceNoApplyLegacyAuditPolicy
'AuditAccountLogon': {
'Policy': 'Audit account logon events',
'lgpo_section': self.audit_policy_gpedit_path,
@ -2693,6 +2739,557 @@ class _policy_info(object):
},
'Transform': self.audit_transform,
},
########## END OF LEGACY AUDIT POLICIES ##########
########## ADVANCED AUDIT POLICIES ##########
# Advanced Audit Policies
# To use these set the following policy to ENABLED
# "Audit: Force audit policy subcategory settings (Windows
# Vista or later) to override audit policy category
# settings"
# or it's alias...
# SceNoApplyLegacyAuditPolicy
# Account Logon Section
'AuditCredentialValidation': {
'Policy': 'Audit Credential Validation',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Credential Validation',
},
'Transform': self.advanced_audit_transform,
},
'AuditKerberosAuthenticationService': {
'Policy': 'Audit Kerberos Authentication Service',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Kerberos Authentication Service',
},
'Transform': self.advanced_audit_transform,
},
'AuditKerberosServiceTicketOperations': {
'Policy': 'Audit Kerberos Service Ticket Operations',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Kerberos Service Ticket Operations',
},
'Transform': self.advanced_audit_transform,
},
'AuditOtherAccountLogonEvents': {
'Policy': 'Audit Other Account Logon Events',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Other Account Logon Events',
},
'Transform': self.advanced_audit_transform,
},
# Account Management Section
'AuditApplicationGroupManagement': {
'Policy': 'Audit Application Group Management',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Application Group Management',
},
'Transform': self.advanced_audit_transform,
},
'AuditComputerAccountManagement': {
'Policy': 'Audit Computer Account Management',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Computer Account Management',
},
'Transform': self.advanced_audit_transform,
},
'AuditDistributionGroupManagement': {
'Policy': 'Audit Distribution Group Management',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Distribution Group Management',
},
'Transform': self.advanced_audit_transform,
},
'AuditOtherAccountManagementEvents': {
'Policy': 'Audit Other Account Management Events',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Other Account Management Events',
},
'Transform': self.advanced_audit_transform,
},
'AuditSecurityGroupManagement': {
'Policy': 'Audit Security Group Management',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Security Group Management',
},
'Transform': self.advanced_audit_transform,
},
'AuditUserAccountManagement': {
'Policy': 'Audit User Account Management',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit User Account Management',
},
'Transform': self.advanced_audit_transform,
},
# Detailed Tracking Settings
'AuditDPAPIActivity': {
'Policy': 'Audit DPAPI Activity',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit DPAPI Activity',
},
'Transform': self.advanced_audit_transform,
},
'AuditPNPActivity': {
'Policy': 'Audit PNP Activity',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit PNP Activity',
},
'Transform': self.advanced_audit_transform,
},
'AuditProcessCreation': {
'Policy': 'Audit Process Creation',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Process Creation',
},
'Transform': self.advanced_audit_transform,
},
'AuditProcessTermination': {
'Policy': 'Audit Process Termination',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Process Termination',
},
'Transform': self.advanced_audit_transform,
},
'AuditRPCEvents': {
'Policy': 'Audit RPC Events',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit RPC Events',
},
'Transform': self.advanced_audit_transform,
},
'AuditTokenRightAdjusted': {
'Policy': 'Audit Token Right Adjusted',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Token Right Adjusted',
},
'Transform': self.advanced_audit_transform,
},
# DS Access Section
'AuditDetailedDirectoryServiceReplication': {
'Policy': 'Audit Detailed Directory Service Replication',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Detailed Directory Service Replication',
},
'Transform': self.advanced_audit_transform,
},
'AuditDirectoryServiceAccess': {
'Policy': 'Audit Directory Service Access',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Directory Service Access',
},
'Transform': self.advanced_audit_transform,
},
'AuditDirectoryServiceChanges': {
'Policy': 'Audit Directory Service Changes',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Directory Service Changes',
},
'Transform': self.advanced_audit_transform,
},
'AuditDirectoryServiceReplication': {
'Policy': 'Audit Directory Service Replication',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Directory Service Replication',
},
'Transform': self.advanced_audit_transform,
},
# Logon/Logoff Section
'AuditAccountLockout': {
'Policy': 'Audit Account Lockout',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Account Lockout',
},
'Transform': self.advanced_audit_transform,
},
'AuditUserDeviceClaims': {
'Policy': 'Audit User / Device Claims',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit User / Device Claims',
},
'Transform': self.advanced_audit_transform,
},
'AuditGroupMembership': {
'Policy': 'Audit Group Membership',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Group Membership',
},
'Transform': self.advanced_audit_transform,
},
'AuditIPsecExtendedMode': {
'Policy': 'Audit IPsec Extended Mode',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit IPsec Extended Mode',
},
'Transform': self.advanced_audit_transform,
},
'AuditIPsecMainMode': {
'Policy': 'Audit IPsec Main Mode',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit IPsec Main Mode',
},
'Transform': self.advanced_audit_transform,
},
'AuditIPsecQuickMode': {
'Policy': 'Audit IPsec Quick Mode',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit IPsec Quick Mode',
},
'Transform': self.advanced_audit_transform,
},
'AuditLogoff': {
'Policy': 'Audit Logoff',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Logoff',
},
'Transform': self.advanced_audit_transform,
},
'AuditLogon': {
'Policy': 'Audit Logon',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Logon',
},
'Transform': self.advanced_audit_transform,
},
'AuditNetworkPolicyServer': {
'Policy': 'Audit Network Policy Server',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Network Policy Server',
},
'Transform': self.advanced_audit_transform,
},
'AuditOtherLogonLogoffEvents': {
'Policy': 'Audit Other Logon/Logoff Events',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Other Logon/Logoff Events',
},
'Transform': self.advanced_audit_transform,
},
'AuditSpecialLogon': {
'Policy': 'Audit Special Logon',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Special Logon',
},
'Transform': self.advanced_audit_transform,
},
# Object Access Section
'AuditApplicationGenerated': {
'Policy': 'Audit Application Generated',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Application Generated',
},
'Transform': self.advanced_audit_transform,
},
'AuditCertificationServices': {
'Policy': 'Audit Certification Services',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Certification Services',
},
'Transform': self.advanced_audit_transform,
},
'AuditDetailedFileShare': {
'Policy': 'Audit Detailed File Share',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Detailed File Share',
},
'Transform': self.advanced_audit_transform,
},
'AuditFileShare': {
'Policy': 'Audit File Share',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit File Share',
},
'Transform': self.advanced_audit_transform,
},
'AuditFileSystem': {
'Policy': 'Audit File System',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit File System',
},
'Transform': self.advanced_audit_transform,
},
'AuditFilteringPlatformConnection': {
'Policy': 'Audit Filtering Platform Connection',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Filtering Platform Connection',
},
'Transform': self.advanced_audit_transform,
},
'AuditFilteringPlatformPacketDrop': {
'Policy': 'Audit Filtering Platform Packet Drop',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Filtering Platform Packet Drop',
},
'Transform': self.advanced_audit_transform,
},
'AuditHandleManipulation': {
'Policy': 'Audit Handle Manipulation',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Handle Manipulation',
},
'Transform': self.advanced_audit_transform,
},
'AuditKernelObject': {
'Policy': 'Audit Kernel Object',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Kernel Object',
},
'Transform': self.advanced_audit_transform,
},
'AuditOtherObjectAccessEvents': {
'Policy': 'Audit Other Object Access Events',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Other Object Access Events',
},
'Transform': self.advanced_audit_transform,
},
'AuditRegistry': {
'Policy': 'Audit Registry',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Registry',
},
'Transform': self.advanced_audit_transform,
},
'AuditRemovableStorage': {
'Policy': 'Audit Removable Storage',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Removable Storage',
},
'Transform': self.advanced_audit_transform,
},
'AuditSAM': {
'Policy': 'Audit SAM',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit SAM',
},
'Transform': self.advanced_audit_transform,
},
'AuditCentralAccessPolicyStaging': {
'Policy': 'Audit Central Access Policy Staging',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Central Access Policy Staging',
},
'Transform': self.advanced_audit_transform,
},
# Policy Change Section
'AuditAuditPolicyChange': {
'Policy': 'Audit Audit Policy Change',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Audit Policy Change',
},
'Transform': self.advanced_audit_transform,
},
'AuditAuthenticationPolicyChange': {
'Policy': 'Audit Authentication Policy Change',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Authentication Policy Change',
},
'Transform': self.advanced_audit_transform,
},
'AuditAuthorizationPolicyChange': {
'Policy': 'Audit Authorization Policy Change',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Authorization Policy Change',
},
'Transform': self.advanced_audit_transform,
},
'AuditFilteringPlatformPolicyChange': {
'Policy': 'Audit Filtering Platform Policy Change',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Filtering Platform Policy Change',
},
'Transform': self.advanced_audit_transform,
},
'AuditMPSSVCRuleLevelPolicyChange': {
'Policy': 'Audit MPSSVC Rule-Level Policy Change',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit MPSSVC Rule-Level Policy Change',
},
'Transform': self.advanced_audit_transform,
},
'AuditOtherPolicyChangeEvents': {
'Policy': 'Audit Other Policy Change Events',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Other Policy Change Events',
},
'Transform': self.advanced_audit_transform,
},
# Privilege Use Section
'AuditNonSensitivePrivilegeUse': {
'Policy': 'Audit Non Sensitive Privilege Use',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Non Sensitive Privilege Use',
},
'Transform': self.advanced_audit_transform,
},
'AuditOtherPrivilegeUseEvents': {
'Policy': 'Audit Other Privilege Use Events',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Other Privilege Use Events',
},
'Transform': self.advanced_audit_transform,
},
'AuditSensitivePrivilegeUse': {
'Policy': 'Audit Sensitive Privilege Use',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Sensitive Privilege Use',
},
'Transform': self.advanced_audit_transform,
},
# System Section
'AuditIPsecDriver': {
'Policy': 'Audit IPsec Driver',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit IPsec Driver',
},
'Transform': self.advanced_audit_transform,
},
'AuditOtherSystemEvents': {
'Policy': 'Audit Other System Events',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Other System Events',
},
'Transform': self.advanced_audit_transform,
},
'AuditSecurityStateChange': {
'Policy': 'Audit Security State Change',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Security State Change',
},
'Transform': self.advanced_audit_transform,
},
'AuditSecuritySystemExtension': {
'Policy': 'Audit Security System Extension',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit Security System Extension',
},
'Transform': self.advanced_audit_transform,
},
'AuditSystemIntegrity': {
'Policy': 'Audit System Integrity',
'lgpo_section': self.advanced_audit_policy_gpedit_path,
'Settings': self.advanced_audit_lookup.keys(),
'AdvAudit': {
'Option': 'Audit System Integrity',
},
'Transform': self.advanced_audit_transform,
},
########## END OF ADVANCED AUDIT POLICIES ##########
'SeTrustedCredManAccessPrivilege': {
'Policy': 'Access Credential Manager as a trusted '
'caller',
@ -4349,6 +4946,296 @@ def _buildElementNsmap(using_elements):
return thisMap
def _get_audit_defaults(option=None):
'''
Loads audit.csv defaults into a dict in __context__ called
'lgpo.audit_defaults'. The dictionary includes fieldnames and all
configurable policies as keys. The values are used to create/modify the
``audit.csv`` file. The first entry is `fieldnames` used to create the
header for the csv file. The rest of the entries are the audit policy names.
Sample data follows:
{
'fieldnames': ['Machine Name',
'Policy Target',
'Subcategory',
'Subcategory GUID',
'Inclusion Setting',
'Exclusion Setting',
'Setting Value'],
'Audit Sensitive Privilege Use': {'Auditpol Name': 'Sensitive Privilege Use',
'Exclusion Setting': '',
'Inclusion Setting': 'No Auditing',
'Machine Name': 'WIN-8FGT3E045SE',
'Policy Target': 'System',
'Setting Value': '0',
'Subcategory': u'Audit Sensitive Privilege Use',
'Subcategory GUID': '{0CCE9228-69AE-11D9-BED3-505054503030}'},
'Audit Special Logon': {'Auditpol Name': 'Special Logon',
'Exclusion Setting': '',
'Inclusion Setting': 'No Auditing',
'Machine Name': 'WIN-8FGT3E045SE',
'Policy Target': 'System',
'Setting Value': '0',
'Subcategory': u'Audit Special Logon',
'Subcategory GUID': '{0CCE921B-69AE-11D9-BED3-505054503030}'},
'Audit System Integrity': {'Auditpol Name': 'System Integrity',
'Exclusion Setting': '',
'Inclusion Setting': 'No Auditing',
'Machine Name': 'WIN-8FGT3E045SE',
'Policy Target': 'System',
'Setting Value': '0',
'Subcategory': u'Audit System Integrity',
'Subcategory GUID': '{0CCE9212-69AE-11D9-BED3-505054503030}'},
...
}
.. note::
`Auditpol Name` designates the value to use when setting the value with
the auditpol command
Args:
option (str): The item from the dictionary to return. If ``None`` the
entire dictionary is returned. Default is ``None``
Returns:
dict: If ``None`` or one of the audit settings is passed
list: If ``fieldnames`` is passed
'''
if 'lgpo.audit_defaults' not in __context__:
# Get available setting names and GUIDs
# This is used to get the fieldnames and GUIDs for individual policies
log.debug('Loading auditpol defaults into __context__')
dump = __utils__['auditpol.get_auditpol_dump']()
reader = csv.DictReader(dump)
audit_defaults = {'fieldnames': reader.fieldnames}
for row in reader:
row['Machine Name'] = ''
row['Auditpol Name'] = row['Subcategory']
# Special handling for snowflake scenarios where the audit.csv names
# don't match the auditpol names
if row['Subcategory'] == 'Central Policy Staging':
row['Subcategory'] = 'Audit Central Access Policy Staging'
elif row['Subcategory'] == 'Plug and Play Events':
row['Subcategory'] = 'Audit PNP Activity'
elif row['Subcategory'] == 'Token Right Adjusted Events':
row['Subcategory'] = 'Audit Token Right Adjusted'
else:
row['Subcategory'] = 'Audit {0}'.format(row['Subcategory'])
audit_defaults[row['Subcategory']] = row
__context__['lgpo.audit_defaults'] = audit_defaults
if option:
return __context__['lgpo.audit_defaults'][option]
else:
return __context__['lgpo.audit_defaults']
def _findOptionValueAdvAudit(option):
'''
Get the Advanced Auditing policy as configured in
``C:\\Windows\\Security\\Audit\\audit.csv``
Args:
option (str): The name of the setting as it appears in audit.csv
Returns:
bool: ``True`` if successful, otherwise ``False``
'''
if 'lgpo.adv_audit_data' not in __context__:
system_root = os.environ.get('SystemRoot', 'C:\\Windows')
f_audit = os.path.join(system_root, 'security', 'audit', 'audit.csv')
f_audit_gpo = os.path.join(system_root, 'System32', 'GroupPolicy',
'Machine', 'Microsoft', 'Windows NT',
'Audit', 'audit.csv')
# Make sure there is an existing audit.csv file on the machine
if not __salt__['file.file_exists'](f_audit):
if __salt__['file.file_exists'](f_audit_gpo):
# If the GPO audit.csv exists, we'll use that one
__salt__['file.copy'](f_audit_gpo, f_audit)
else:
field_names = _get_audit_defaults('fieldnames')
# If the file doesn't exist anywhere, create it with default
# fieldnames
__salt__['file.touch'](f_audit)
__salt__['file.append'](f_audit, ','.join(field_names))
audit_settings = {}
with salt.utils.files.fopen(f_audit, mode='r') as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
audit_settings.update(
{row['Subcategory']: row['Setting Value']})
__context__['lgpo.adv_audit_data'] = audit_settings
return __context__['lgpo.adv_audit_data'].get(option, None)
def _set_audit_file_data(option, value):
'''
Helper function that sets the Advanced Audit settings in the two .csv files
on Windows. Those files are located at:
C:\\Windows\\Security\\Audit\\audit.csv
C:\\Windows\\System32\\GroupPolicy\\Machine\\Microsoft\\Windows NT\\Audit\\audit.csv
Args:
option (str): The name of the option to set
value (str): The value to set. ['None', '0', '1', '2', '3']
Returns:
bool: ``True`` if successful, otherwise ``False``
'''
# Set up some paths here
system_root = os.environ.get('SystemRoot', 'C:\\Windows')
f_audit = os.path.join(system_root, 'security', 'audit', 'audit.csv')
f_audit_gpo = os.path.join(system_root, 'System32', 'GroupPolicy',
'Machine', 'Microsoft', 'Windows NT',
'Audit', 'audit.csv')
f_temp = tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.csv',
prefix='audit')
# Lookup dict for "Inclusion Setting" field
auditpol_values = {'None': 'No Auditing',
'0': 'No Auditing',
'1': 'Success',
'2': 'Failure',
'3': 'Success and Failure'}
try:
# Open the existing audit.csv and load the csv `reader`
with salt.utils.files.fopen(f_audit, mode='r') as csv_file:
reader = csv.DictReader(csv_file)
# Open the temporary .csv and load the csv `writer`
with salt.utils.files.fopen(f_temp.name, mode='w') as tmp_file:
writer = csv.DictWriter(tmp_file, fieldnames=reader.fieldnames)
# Write the header values (labels)
writer.writeheader()
value_written = False
# Loop through the current audit.csv and write the changes to
# the temp csv file for existing settings
for row in reader:
# If the row matches the value we're setting, update it with
# the new value
if row['Subcategory'] == option:
if not value == 'None':
# The value is not None, make the change
row['Inclusion Setting'] = auditpol_values[value]
row['Setting Value'] = value
log.debug('LGPO: Setting {0} to {1}'
''.format(option, value))
writer.writerow(row)
else:
# value is None, remove it by not writing it to the
# temp file
log.debug('LGPO: Removing {0}'.format(option))
value_written = True
# If it's not the value we're setting, just write it
else:
writer.writerow(row)
# If a value was not written, it is a new setting not found in
# the existing audit.cvs file. Add the new setting with values
# from the defaults
if not value_written:
if not value == 'None':
# value is not None, write the new value
log.debug('LGPO: Setting {0} to {1}'
''.format(option, value))
defaults = _get_audit_defaults(option)
writer.writerow({
'Machine Name': defaults['Machine Name'],
'Policy Target': defaults['Policy Target'],
'Subcategory': defaults['Subcategory'],
'Subcategory GUID': defaults['Subcategory GUID'],
'Inclusion Setting': auditpol_values[value],
'Exclusion Setting': defaults['Exclusion Setting'],
'Setting Value': value})
value_written = True
if value_written:
# Copy the temporary csv file over the existing audit.csv in both
# locations if a value was written
__salt__['file.copy'](f_temp.name, f_audit, remove_existing=True)
__salt__['file.copy'](f_temp.name, f_audit_gpo, remove_existing=True)
finally:
f_temp.close()
__salt__['file.remove'](f_temp.name)
return value_written
def _set_auditpol_data(option, value):
'''
Helper function that updates the current applied settings to match what has
just been set in the audit.csv files. We're doing it this way instead of
running `gpupdate`
Args:
option (str): The name of the option to set
value (str): The value to set. ['None', '0', '1', '2', '3']
Returns:
bool: ``True`` if successful, otherwise ``False``
'''
auditpol_values = {'None': 'No Auditing',
'0': 'No Auditing',
'1': 'Success',
'2': 'Failure',
'3': 'Success and Failure'}
defaults = _get_audit_defaults(option)
return __utils__['auditpol.set_setting'](
name=defaults['Auditpol Name'],
value=auditpol_values[value])
def _setOptionValueAdvAudit(option, value):
'''
Helper function to update the Advanced Audit policy on the machine. This
function modifies the two ``audit.csv`` files in the following locations:
C:\\Windows\\Security\\Audit\\audit.csv
C:\\Windows\\System32\\GroupPolicy\\Machine\\Microsoft\\Windows NT\\Audit\\audit.csv
Then it applies those settings using ``auditpol``
After that, it updates ``__context__`` with the new setting
Args:
option (str): The name of the option to set
value (str): The value to set. ['None', '0', '1', '2', '3']
Returns:
bool: ``True`` if successful, otherwise ``False``
'''
# Set the values in both audit.csv files
if not _set_audit_file_data(option=option, value=value):
raise CommandExecutionError('Failed to set audit.csv option: {0}'
''.format(option))
# Apply the settings locally
if not _set_auditpol_data(option=option, value=value):
# Only log this error, it will be in effect the next time the machine
# updates its policy
log.debug('Failed to apply audit setting: {0}'.format(option))
# Update __context__
if value is None:
log.debug('LGPO: Removing Advanced Audit data: {0}'.format(option))
__context__['lgpo.adv_audit_data'].pop(option)
else:
log.debug('LGPO: Updating Advanced Audit data: {0}: {1}'
''.format(option, value))
__context__['lgpo.adv_audit_data'][option] = value
return True
def _findOptionValueNetSH(profile, option):
if 'lgpo.netsh_data' not in __context__:
__context__['lgpo.netsh_data'] = {}
@ -6770,7 +7657,10 @@ def get(policy_class=None, return_full_policy_names=True,
class_vals[policy_name] = _findOptionValueNetSH(
profile=_pol['NetSH']['Profile'],
option=_pol['NetSH']['Option'])
elif 'AdvAudit' in _pol:
# get value from auditpol
class_vals[policy_name] = _findOptionValueAdvAudit(
option=_pol['AdvAudit']['Option'])
elif 'NetUserModal' in _pol:
# get value from UserNetMod
if _pol['NetUserModal']['Modal'] not in modal_returns:
@ -6993,6 +7883,7 @@ def set_(computer_policy=None, user_policy=None,
for p_class in policies:
_secedits = {}
_netshs = {}
_advaudits = {}
_modal_sets = {}
_admTemplateData = {}
_regedits = {}
@ -7041,6 +7932,12 @@ def set_(computer_policy=None, user_policy=None,
'option': _pol['NetSH']['Option'],
'value': six.text_type(_value)
})
elif 'AdvAudit' in _pol:
# set value with advaudit
_advaudits.setdefault(policy_name, {
'option': _pol['AdvAudit']['Option'],
'value': six.text_type(_value)
})
elif 'NetUserModal' in _pol:
# set value via NetUserModal
log.debug('%s is a NetUserModal policy', policy_name)
@ -7237,6 +8134,13 @@ def set_(computer_policy=None, user_policy=None,
log.debug(_netshs[setting])
_setOptionValueNetSH(**_netshs[setting])
if _advaudits:
# We've got AdvAudit settings to make
for setting in _advaudits:
log.debug('Setting Advanced Audit policy: {0}'.format(setting))
log.debug(_advaudits[setting])
_setOptionValueAdvAudit(**_advaudits[setting])
if _modal_sets:
# we've got modalsets to make
log.debug(_modal_sets)

View File

@ -671,7 +671,7 @@ def read_crl(crl):
text = get_pem_entry(text, pem_type='X509 CRL')
crltempfile = tempfile.NamedTemporaryFile()
crltempfile.write(text)
crltempfile.write(salt.utils.stringutils.to_str(text))
crltempfile.flush()
crlparsed = _parse_openssl_crl(crltempfile.name)
crltempfile.close()
@ -776,21 +776,22 @@ def write_pem(text, path, overwrite=True, pem_type=None):
text = get_pem_entry(text, pem_type=pem_type)
_dhparams = ''
_private_key = ''
if pem_type and pem_type == 'CERTIFICATE' and os.path.isfile(path) and \
not overwrite:
if pem_type and pem_type == 'CERTIFICATE' and os.path.isfile(path) and not overwrite:
_filecontents = _text_or_file(path)
try:
_dhparams = get_pem_entry(_filecontents, 'DH PARAMETERS')
except salt.exceptions.SaltInvocationError:
pass
except salt.exceptions.SaltInvocationError as err:
log.debug("Error when getting DH PARAMETERS: %s", err)
log.trace(err, exc_info=err)
try:
_private_key = get_pem_entry(_filecontents, '(?:RSA )?PRIVATE KEY')
except salt.exceptions.SaltInvocationError:
pass
except salt.exceptions.SaltInvocationError as err:
log.debug("Error when getting PRIVATE KEY: %s", err)
log.trace(err, exc_info=err)
with salt.utils.files.fopen(path, 'w') as _fp:
if pem_type and pem_type == 'CERTIFICATE' and _private_key:
_fp.write(salt.utils.stringutils.to_str(_private_key))
_fp.write(text)
_fp.write(salt.utils.stringutils.to_str(text))
if pem_type and pem_type == 'CERTIFICATE' and _dhparams:
_fp.write(salt.utils.stringutils.to_str(_dhparams))
return 'PEM written to {0}'.format(path)
@ -1375,9 +1376,9 @@ def create_certificate(
pem_type='CERTIFICATE REQUEST').replace('\n', '')
if 'public_key' in kwargs:
# Strip newlines to make passing through as cli functions easier
kwargs['public_key'] = get_public_key(
kwargs['public_key'] = salt.utils.stringutils.to_str(get_public_key(
kwargs['public_key'],
passphrase=kwargs['public_key_passphrase']).replace('\n', '')
passphrase=kwargs['public_key_passphrase'])).replace('\n', '')
# Remove system entries in kwargs
# Including listen_in and preqreuired because they are not included
@ -1778,13 +1779,13 @@ def verify_crl(crl, cert):
crltext = _text_or_file(crl)
crltext = get_pem_entry(crltext, pem_type='X509 CRL')
crltempfile = tempfile.NamedTemporaryFile()
crltempfile.write(crltext)
crltempfile.write(salt.utils.stringutils.to_str(crltext))
crltempfile.flush()
certtext = _text_or_file(cert)
certtext = get_pem_entry(certtext, pem_type='CERTIFICATE')
certtempfile = tempfile.NamedTemporaryFile()
certtempfile.write(certtext)
certtempfile.write(salt.utils.stringutils.to_str(certtext))
certtempfile.flush()
cmd = ('openssl crl -noout -in {0} -CAfile {1}'.format(

View File

@ -1,4 +1,31 @@
# -*- coding: utf-8 -*-
'''
Display profiling data in a table format
========================================
Show profile data for returners that would normally show a highstate output.
salt MINION state.apply something --out=profile
Attempt to output the returns of state.sls and state.highstate as a table of
names, modules and durations that looks somewhat like the following::
name mod.fun duration (ms)
--------------------------------------------------------
I-fail-unless-stmt other.function -1.0000
old-minion-config grains.list_present 1.1200
salt-data group.present 48.3800
/etc/salt/minion file.managed 63.1450
To get the above appearance, use settings something like these::
out.table.separate_rows: False
out.table.justify: left
out.table.delim: ' '
out.table.prefix: ''
out.table.suffix: ''
'''
from __future__ import absolute_import, print_function, unicode_literals
import salt.output.table_out as table_out
@ -39,28 +66,7 @@ def _find_durations(data, name_max=60):
def output(data, **kwargs):
'''
Show profile data for returners that would normally show a highstate output.
salt globhere state.sls something --out=profile
Attempt to output the returns of state.sls and state.highstate as a table of
names, modules and durations that looks somewhat like the following:
name mod.fun duration (ms)
--------------------------------------------------------
I-fail-unless-stmt other.function -1.0000
old-minion-config grains.list_present 1.1200
salt-data group.present 48.3800
/etc/salt/minion file.managed 63.1450
To get the above appearance, use settings something like these:
out.table.separate_rows: False
out.table.justify: left
out.table.delim: ' '
out.table.prefix: ''
out.table.suffix: ''
Display the profiling data in a table format.
'''
rows = _find_durations(data)

View File

@ -7,10 +7,8 @@ The following Type: "Zabbix trapper" with "Type of information" Text items are r
.. code-block:: cfg
Key: salt.trap.info
Key: salt.trap.average
Key: salt.trap.warning
Key: salt.trap.high
Key: salt.trap.disaster
To use the Zabbix returner, append '--return zabbix' to the salt command. ex:
@ -21,15 +19,10 @@ To use the Zabbix returner, append '--return zabbix' to the salt command. ex:
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
# Import Salt libs
from salt.ext import six
import salt.utils.files
# Get logging started
log = logging.getLogger(__name__)
# Define the module's virtual name
@ -55,37 +48,24 @@ def zbx():
return False
def zabbix_send(key, host, output):
with salt.utils.files.fopen(zbx()['zabbix_config'], 'r') as file_handle:
for line in file_handle:
if "ServerActive" in line:
flag = "true"
server = line.rsplit('=')
server = server[1].rsplit(',')
for s in server:
cmd = zbx()['sender'] + " -z " + s.replace('\n', '') + " -s " + host + " -k " + key + " -o \"" + output +"\""
__salt__['cmd.shell'](cmd)
break
else:
flag = "false"
if flag == 'false':
cmd = zbx()['sender'] + " -c " + zbx()['config'] + " -s " + host + " -k " + key + " -o \"" + output +"\""
def zabbix_send(key, output):
cmd = zbx()['sender'] + " -c " + zbx()['config'] + " -k " + key + " -o \"" + output +"\""
__salt__['cmd.shell'](cmd)
def returner(ret):
changes = False
errors = False
job_minion_id = ret['id']
host = job_minion_id
if type(ret['return']) is dict:
for state, item in six.iteritems(ret['return']):
if 'comment' in item and 'name' in item and not item['result']:
if 'comment' in item and 'name' in item and item['result'] is False:
errors = True
zabbix_send("salt.trap.high", host, 'SALT:\nname: {0}\ncomment: {1}'.format(item['name'], item['comment']))
if 'comment' in item and 'name' in item and item['changes']:
zabbix_send("salt.trap.high", 'SALT:\nname: {0}\ncomment: {1}'.format(item['name'], item['comment']))
elif 'comment' in item and 'name' in item and item['changes']:
changes = True
zabbix_send("salt.trap.warning", host, 'SALT:\nname: {0}\ncomment: {1}'.format(item['name'], item['comment']))
zabbix_send("salt.trap.warning", 'SALT:\nname: {0}\ncomment: {1}'.format(item['name'], item['comment']))
if not changes and not errors:
zabbix_send("salt.trap.info", host, 'SALT {0} OK'.format(job_minion_id))
zabbix_send("salt.trap.info", 'SALT {0} OK'.format(job_minion_id))

View File

@ -234,7 +234,7 @@ def index_template_absent(name):
def index_template_present(name, definition, check_definition=False):
'''
Ensure that the named index templat eis present.
Ensure that the named index template is present.
name
Name of the index to add
@ -248,7 +248,7 @@ def index_template_present(name, definition, check_definition=False):
.. code-block:: yaml
mytestindex2_template:
elasticsearch_index_template.present:
elasticsearch.index_template_present:
- definition:
template: logstash-*
order: 1

View File

@ -2503,6 +2503,7 @@ def managed(name,
ret, 'Defaults must be formed as a dict')
if not replace and os.path.exists(name):
ret_perms = {}
# Check and set the permissions if necessary
if salt.utils.platform.is_windows():
ret = __salt__['file.check_perms'](
@ -2514,10 +2515,19 @@ def managed(name,
inheritance=win_inheritance,
reset=win_perms_reset)
else:
ret, _ = __salt__['file.check_perms'](
ret, ret_perms = __salt__['file.check_perms'](
name, ret, user, group, mode, attrs, follow_symlinks)
if __opts__['test']:
ret['comment'] = 'File {0} not updated'.format(name)
if isinstance(ret_perms, dict) and \
'lmode' in ret_perms and \
mode != ret_perms['lmode']:
ret['comment'] = ('File {0} will be updated with permissions '
'{1} from its current '
'state of {2}'.format(name,
mode,
ret_perms['lmode']))
else:
ret['comment'] = 'File {0} not updated'.format(name)
elif not ret['changes'] and ret['result']:
ret['comment'] = ('File {0} exists with proper permissions. '
'No changes made.'.format(name))

View File

@ -749,6 +749,12 @@ def latest(name,
ret,
'Failed to check remote refs: {0}'.format(_strip_exc(exc))
)
except NameError as exc:
if 'global name' in exc.message:
raise CommandExecutionError(
'Failed to check remote refs: You may need to install '
'GitPython or PyGit2')
raise
if 'HEAD' in all_remote_refs:
head_rev = all_remote_refs['HEAD']

View File

@ -3,7 +3,7 @@
Network ACL
===========
Manage the firewall configuration on the network device namaged through NAPALM.
Manage the firewall configuration on the network device managed through NAPALM.
The firewall configuration is generated by Capirca_.
.. _Capirca: https://github.com/google/capirca
@ -18,7 +18,13 @@ The firewall configuration is generated by Capirca_.
Dependencies
------------
Capirca: ``pip install -e git+git@github.com:google/capirca.git#egg=aclgen``
Capirca
~~~~~~~
To install Capirca, execute: ``pip install capirca``.
NAPALM
~~~~~~
To be able to load configuration on network devices,
it requires NAPALM_ library to be installed: ``pip install napalm``.
@ -35,7 +41,10 @@ log = logging.getLogger(__file__)
# Import third party libs
try:
# pylint: disable=W0611
import aclgen
import capirca
import capirca.aclgen
import capirca.lib.policy
import capirca.lib.aclgenerator
HAS_CAPIRCA = True
# pylint: enable=W0611
except ImportError:

View File

@ -86,9 +86,7 @@ def create_alert(name=None, api_key=None, reason=None, action_type="Create"):
if __opts__['test'] is True:
ret[
'comment'] = 'Test: {0} alert request will be processed ' \
'using the API Key="{1}".'.format(
action_type,
api_key)
'using the API Key="{1}".'.format(action_type, api_key)
# Return ``None`` when running with ``test=true``.
ret['result'] = None

View File

@ -284,6 +284,8 @@ def state(name,
cmd_kw['tgt_type'] = tgt_type
cmd_kw['ssh'] = ssh
if 'roster' in kwargs:
cmd_kw['roster'] = kwargs['roster']
cmd_kw['expect_minions'] = expect_minions
if highstate:
fun = 'state.highstate'

View File

@ -33,6 +33,7 @@ import salt.transport.server
import salt.transport.mixins.auth
from salt.ext import six
from salt.exceptions import SaltReqTimeoutError
from salt._compat import ipaddress
from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO, LIBZMQ_VERSION_INFO
import zmq.error
@ -71,33 +72,38 @@ def _get_master_uri(master_ip,
'''
Return the ZeroMQ URI to connect the Minion to the Master.
It supports different source IP / port, given the ZeroMQ syntax:
// Connecting using a IP address and bind to an IP address
rc = zmq_connect(socket, "tcp://192.168.1.17:5555;192.168.1.1:5555"); assert (rc == 0);
Source: http://api.zeromq.org/4-1:zmq-tcp
'''
if LIBZMQ_VERSION_INFO >= (4, 1, 6) and ZMQ_VERSION_INFO >= (16, 0, 1):
# The source:port syntax for ZeroMQ has been added in libzmq 4.1.6
# which is included in the pyzmq wheels starting with 16.0.1.
if source_ip or source_port:
if source_ip and source_port:
return 'tcp://{source_ip}:{source_port};{master_ip}:{master_port}'.format(
source_ip=source_ip, source_port=source_port,
master_ip=master_ip, master_port=master_port)
elif source_ip and not source_port:
return 'tcp://{source_ip}:0;{master_ip}:{master_port}'.format(
source_ip=source_ip,
master_ip=master_ip, master_port=master_port)
elif not source_ip and source_port:
return 'tcp://0.0.0.0:{source_port};{master_ip}:{master_port}'.format(
source_port=source_port,
master_ip=master_ip, master_port=master_port)
from salt.utils.zeromq import ip_bracket
master_uri = 'tcp://{master_ip}:{master_port}'.format(
master_ip=ip_bracket(master_ip), master_port=master_port)
if source_ip or source_port:
log.warning('Unable to connect to the Master using a specific source IP / port')
log.warning('Consider upgrading to pyzmq >= 16.0.1 and libzmq >= 4.1.6')
return 'tcp://{master_ip}:{master_port}'.format(
master_ip=master_ip, master_port=master_port)
if LIBZMQ_VERSION_INFO >= (4, 1, 6) and ZMQ_VERSION_INFO >= (16, 0, 1):
# The source:port syntax for ZeroMQ has been added in libzmq 4.1.6
# which is included in the pyzmq wheels starting with 16.0.1.
if source_ip and source_port:
master_uri = 'tcp://{source_ip}:{source_port};{master_ip}:{master_port}'.format(
source_ip=ip_bracket(source_ip), source_port=source_port,
master_ip=ip_bracket(master_ip), master_port=master_port)
elif source_ip and not source_port:
master_uri = 'tcp://{source_ip}:0;{master_ip}:{master_port}'.format(
source_ip=ip_bracket(source_ip),
master_ip=ip_bracket(master_ip), master_port=master_port)
elif source_port and not source_ip:
ip_any = '0.0.0.0' if ipaddress.ip_address(master_ip).version == 4 else ip_bracket('::')
master_uri = 'tcp://{ip_any}:{source_port};{master_ip}:{master_port}'.format(
ip_any=ip_any, source_port=source_port,
master_ip=ip_bracket(master_ip), master_port=master_port)
else:
log.warning('Unable to connect to the Master using a specific source IP / port')
log.warning('Consider upgrading to pyzmq >= 16.0.1 and libzmq >= 4.1.6')
log.warning('Specific source IP / port for connecting to master returner port: configuraion ignored')
return master_uri
class AsyncZeroMQReqChannel(salt.transport.client.ReqChannel):

View File

@ -8,6 +8,7 @@ from __future__ import absolute_import, print_function, unicode_literals
import sys
import copy
import types
import importlib
# Import salt libs
import salt.loader
@ -58,3 +59,13 @@ def cmp(x, y):
Return negative if x<y, zero if x==y, positive if x>y.
'''
return (x > y) - (x < y)
def reload(mod):
'''
Compatibility helper function to replace the ``reload`` builtin from Python 2.
'''
try:
return importlib.reload(mod)
except AttributeError:
return reload(mod)

View File

@ -149,7 +149,7 @@ def nodegroup_comp(nodegroup, nodegroups, skip=None, first_call=True):
# No compound operators found in nodegroup definition. Check for
# group type specifiers
group_type_re = re.compile('^[A-Z]@')
regex_chars = ['(', '[', '{', '\\', '?''}])']
regex_chars = ['(', '[', '{', '\\', '?', '}', ']', ')']
if not [x for x in ret if '*' in x or group_type_re.match(x)]:
# No group type specifiers and no wildcards.
# Treat this as an expression.

View File

@ -8,7 +8,6 @@ Utilities for accessing storage container blobs on Azure
# Import python libs
from __future__ import absolute_import, unicode_literals
import logging
import inspect
# Import azure libs
HAS_LIBS = False
@ -19,7 +18,6 @@ except ImportError:
pass
# Import salt libs
from salt.ext import six
from salt.exceptions import SaltSystemExit
log = logging.getLogger(__name__)
@ -178,25 +176,13 @@ def object_to_dict(obj):
if isinstance(obj, list) or isinstance(obj, tuple):
ret = []
for item in obj:
#ret.append(obj.__dict__[item])
ret.append(object_to_dict(obj))
elif isinstance(obj, six.text_type):
ret = obj.encode('ascii', 'replace'),
elif isinstance(obj, six.string_types):
ret = obj
else:
ret.append(object_to_dict(item))
elif hasattr(obj, '__dict__'):
ret = {}
for item in obj.__dict__:
if item.startswith('_'):
continue
# This is ugly, but inspect.isclass() doesn't seem to work
try:
if inspect.isclass(obj) or 'class' in six.text_type(type(obj.__dict__.get(item))):
ret[item] = object_to_dict(obj.__dict__[item])
elif isinstance(obj.__dict__[item], six.text_type):
ret[item] = obj.__dict__[item].encode('ascii', 'replace')
else:
ret[item] = obj.__dict__[item]
except AttributeError:
ret[item] = obj.get(item)
ret[item] = object_to_dict(obj.__dict__[item])
else:
ret = obj
return ret

View File

@ -58,10 +58,10 @@ except (ImportError, OSError, AttributeError, TypeError):
def sanitize_host(host):
'''
Sanitize host string.
https://tools.ietf.org/html/rfc1123#section-2.1
'''
return ''.join([
c for c in host[0:255] if c in (ascii_letters + digits + '.-')
])
RFC952_characters = ascii_letters + digits + ".-"
return "".join([c for c in host[0:255] if c in RFC952_characters])
def isportopen(host, port):
@ -137,7 +137,11 @@ def _generate_minion_id():
def first(self):
return self and self[0] or None
hosts = DistinctList().append(socket.getfqdn()).append(platform.node()).append(socket.gethostname())
hostname = socket.gethostname()
hosts = DistinctList().append(
salt.utils.stringutils.to_unicode(socket.getfqdn(salt.utils.stringutils.to_bytes(hostname)))
).append(platform.node()).append(hostname)
if not hosts:
try:
for a_nfo in socket.getaddrinfo(hosts.first() or 'localhost', None, socket.AF_INET,
@ -1870,14 +1874,14 @@ def dns_check(addr, port, safe=False, ipv6=None):
if h[0] == socket.AF_INET6 and ipv6 is False:
continue
candidate_addr = salt.utils.zeromq.ip_bracket(h[4][0])
candidate_addr = h[4][0]
if h[0] != socket.AF_INET6 or ipv6 is not None:
candidates.append(candidate_addr)
try:
s = socket.socket(h[0], socket.SOCK_STREAM)
s.connect((candidate_addr.strip('[]'), port))
s.connect((candidate_addr, port))
s.close()
resolved = candidate_addr
@ -1906,3 +1910,55 @@ def dns_check(addr, port, safe=False, ipv6=None):
raise SaltClientError()
raise SaltSystemExit(code=42, msg=err)
return resolved
def parse_host_port(host_port):
"""
Takes a string argument specifying host or host:port.
Returns a (hostname, port) or (ip_address, port) tuple. If no port is given,
the second (port) element of the returned tuple will be None.
host:port argument, for example, is accepted in the forms of:
- hostname
- hostname:1234
- hostname.domain.tld
- hostname.domain.tld:5678
- [1234::5]:5678
- 1234::5
- 10.11.12.13:4567
- 10.11.12.13
"""
host, port = None, None # default
_s_ = host_port[:]
if _s_[0] == "[":
if "]" in host_port:
host, _s_ = _s_.lstrip("[").rsplit("]", 1)
host = ipaddress.IPv6Address(host)
if _s_[0] == ":":
port = int(_s_.lstrip(":"))
else:
if len(_s_) > 1:
raise ValueError('found ambiguous "{}" port in "{}"'.format(_s_, host_port))
else:
if _s_.count(":") == 1:
host, _hostport_separator_, port = _s_.partition(":")
try:
port = int(port)
except ValueError as _e_:
log.error('host_port "%s" port value "%s" is not an integer.', host_port, port)
raise _e_
else:
host = _s_
try:
if not isinstance(host, ipaddress._BaseAddress):
host_ip = ipaddress.ip_address(host)
host = host_ip
except ValueError:
log.debug('"%s" Not an IP address? Assuming it is a hostname.', host)
if host != sanitize_host(host):
log.error('bad hostname: "%s"', host)
raise ValueError('bad hostname: "{}"'.format(host))
return host, port

View File

@ -333,7 +333,9 @@ def build_whitespace_split_regex(text):
lexer = shlex.shlex(text)
lexer.whitespace_split = True
lexer.commenters = ''
if '\'' in text:
if r"'\"" in text:
lexer.quotes = ''
elif '\'' in text:
lexer.quotes = '"'
elif '"' in text:
lexer.quotes = '\''

View File

@ -14,13 +14,11 @@ import re
import time
# Import salt libs
import salt.utils.compat
import salt.utils.data
from salt.utils.timeout import wait_for
import salt.ext.six as six
# Workaround for 'reload' builtin of py2.7
if six.PY3:
from importlib import reload # pylint: disable=no-name-in-module
log = logging.getLogger(__name__)
@ -140,7 +138,7 @@ def vb_get_manager():
'''
global _virtualboxManager
if _virtualboxManager is None and HAS_LIBS:
reload(vboxapi)
salt.utils.compat.reload(vboxapi)
_virtualboxManager = vboxapi.VirtualBoxManager(None, None)
return _virtualboxManager

View File

@ -686,7 +686,7 @@ class Terminal(object):
stdout = None
else:
if self.stream_stdout:
self.stream_stdout.write(salt.utils.data.encode(stdout))
self.stream_stdout.write(salt.utils.stringutils.to_str(stdout))
self.stream_stdout.flush()
if self.stdout_logger:

View File

@ -0,0 +1,308 @@
# -*- coding: utf-8 -*-
r'''
A salt util for modifying the audit policies on the machine. This util is used
by the ``win_auditpol`` and ``win_lgpo`` modules.
Though this utility does not set group policy for auditing, it displays how all
auditing configuration is applied on the machine, either set directly or via
local or domain group policy.
.. versionadded:: 2018.3.4
.. versionadded:: 2019.2.1
This util allows you to view and modify the audit settings as they are applied
on the machine. The audit settings are broken down into nine categories:
- Account Logon
- Account Management
- Detailed Tracking
- DS Access
- Logon/Logoff
- Object Access
- Policy Change
- Privilege Use
- System
The ``get_settings`` function will return the subcategories for all nine of
the above categories in one dictionary along with their auditing status.
To modify a setting you only need to specify the subcategory name and the value
you wish to set. Valid settings are:
- No Auditing
- Success
- Failure
- Success and Failure
Usage:
.. code-block:: python
import salt.utils.win_lgpo_auditpol
# Get current state of all audit settings
salt.utils.win_lgpo_auditpol.get_settings()
# Get the current state of all audit settings in the "Account Logon"
# category
salt.utils.win_lgpo_auditpol.get_settings(category="Account Logon")
# Get current state of the "Credential Validation" setting
salt.utils.win_lgpo_auditpol.get_setting(name='Credential Validation')
# Set the state of the "Credential Validation" setting to Success and
# Failure
salt.utils.win_lgpo_auditpol.set_setting(name='Credential Validation',
value='Success and Failure')
# Set the state of the "Credential Validation" setting to No Auditing
salt.utils.win_lgpo_auditpol.set_setting(name='Credential Validation',
value='No Auditing')
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
import logging
import re
import tempfile
# Import Salt libs
import salt.modules.cmdmod
import salt.utils.files
import salt.utils.platform
from salt.exceptions import CommandExecutionError
# Import 3rd Party libs
from salt.ext.six.moves import zip
log = logging.getLogger(__name__)
__virtualname__ = 'auditpol'
categories = ['Account Logon',
'Account Management',
'Detailed Tracking',
'DS Access',
'Logon/Logoff',
'Object Access',
'Policy Change',
'Privilege Use',
'System']
settings = {'No Auditing': '/success:disable /failure:disable',
'Success': '/success:enable /failure:disable',
'Failure': '/success:disable /failure:enable',
'Success and Failure': '/success:enable /failure:enable'}
# Although utils are often directly imported, it is also possible to use the
# loader.
def __virtual__():
'''
Only load if on a Windows system
'''
if not salt.utils.platform.is_windows():
return False, 'This utility only available on Windows'
return __virtualname__
def _auditpol_cmd(cmd):
'''
Helper function for running the auditpol command
Args:
cmd (str): the auditpol command to run
Returns:
list: A list containing each line of the return (splitlines)
Raises:
CommandExecutionError: If the command encounters an error
'''
ret = salt.modules.cmdmod.run_all(cmd='auditpol {0}'.format(cmd),
python_shell=True)
if ret['retcode'] == 0:
return ret['stdout'].splitlines()
msg = 'Error executing auditpol command: {0}\n'.format(cmd)
msg += '\n'.join(ret['stdout'])
raise CommandExecutionError(msg)
def get_settings(category='All'):
'''
Get the current configuration for all audit settings specified in the
category
Args:
category (str):
One of the nine categories to return. Can also be ``All`` to return
the settings for all categories. Valid options are:
- Account Logon
- Account Management
- Detailed Tracking
- DS Access
- Logon/Logoff
- Object Access
- Policy Change
- Privilege Use
- System
- All
Default value is ``All``
Returns:
dict: A dictionary containing all subcategories for the specified
category along with their current configuration
Raises:
KeyError: On invalid category
CommandExecutionError: If an error is encountered retrieving the settings
Usage:
.. code-block:: python
import salt.utils.win_lgpo_auditpol
# Get current state of all audit settings
salt.utils.win_lgpo_auditpol.get_settings()
# Get the current state of all audit settings in the "Account Logon"
# category
salt.utils.win_lgpo_auditpol.get_settings(category="Account Logon")
'''
# Parameter validation
if category.lower() in ['all', '*']:
category = '*'
elif category.lower() not in [x.lower() for x in categories]:
raise KeyError('Invalid category: "{0}"'.format(category))
cmd = '/get /category:"{0}"'.format(category)
results = _auditpol_cmd(cmd)
ret = {}
# Skip the first 2 lines
for line in results[3:]:
if ' ' in line.strip():
ret.update(dict(list(zip(*[iter(re.split(r"\s{2,}", line.strip()))]*2))))
return ret
def get_setting(name):
'''
Get the current configuration for the named audit setting
Args:
name (str): The name of the setting to retrieve
Returns:
str: The current configuration for the named setting
Raises:
KeyError: On invalid setting name
CommandExecutionError: If an error is encountered retrieving the settings
Usage:
.. code-block:: python
import salt.utils.win_lgpo_auditpol
# Get current state of the "Credential Validation" setting
salt.utils.win_lgpo_auditpol.get_setting(name='Credential Validation')
'''
current_settings = get_settings(category='All')
for setting in current_settings:
if name.lower() == setting.lower():
return current_settings[setting]
raise KeyError('Invalid name: {0}'.format(name))
def _get_valid_names():
if 'auditpol.valid_names' not in __context__:
settings = get_settings(category='All')
__context__['auditpol.valid_names'] = [k.lower() for k in settings]
return __context__['auditpol.valid_names']
def set_setting(name, value):
'''
Set the configuration for the named audit setting
Args:
name (str):
The name of the setting to configure
value (str):
The configuration for the named value. Valid options are:
- No Auditing
- Success
- Failure
- Success and Failure
Returns:
bool: True if successful
Raises:
KeyError: On invalid ``name`` or ``value``
CommandExecutionError: If an error is encountered modifying the setting
Usage:
.. code-block:: python
import salt.utils.win_lgpo_auditpol
# Set the state of the "Credential Validation" setting to Success and
# Failure
salt.utils.win_lgpo_auditpol.set_setting(name='Credential Validation',
value='Success and Failure')
# Set the state of the "Credential Validation" setting to No Auditing
salt.utils.win_lgpo_auditpol.set_setting(name='Credential Validation',
value='No Auditing')
'''
# Input validation
if name.lower() not in _get_valid_names():
raise KeyError('Invalid name: {0}'.format(name))
for setting in settings:
if value.lower() == setting.lower():
cmd = '/set /subcategory:"{0}" {1}'.format(name, settings[setting])
break
else:
raise KeyError('Invalid setting value: {0}'.format(value))
_auditpol_cmd(cmd)
return True
def get_auditpol_dump():
'''
Gets the contents of an auditpol /backup. Used by the LGPO module to get
fieldnames and GUIDs for Advanced Audit policies.
Returns:
list: A list of lines form the backup file
Usage:
.. code-block:: python
import salt.utils.win_lgpo_auditpol
dump = salt.utils.win_lgpo_auditpol.get_auditpol_dump()
'''
# Just get a temporary file name
# NamedTemporaryFile will delete the file it creates by default on Windows
with tempfile.NamedTemporaryFile(suffix='.csv') as tmp_file:
csv_file = tmp_file.name
cmd = '/backup /file:{0}'.format(csv_file)
_auditpol_cmd(cmd)
with salt.utils.files.fopen(csv_file) as fp:
return fp.readlines()

View File

@ -81,6 +81,19 @@ from salt.ext.six.moves import zip
log = logging.getLogger(__name__)
__hostname__ = socket.gethostname()
__virtualname__ = 'netsh'
# Although utils are often directly imported, it is also possible to use the
# loader.
def __virtual__():
'''
Only load if on a Windows system
'''
if not salt.utils.platform.is_windows():
return False, 'This utility only available on Windows'
return __virtualname__
def _netsh_file(content):

View File

@ -8,6 +8,7 @@ from __future__ import absolute_import, print_function, unicode_literals
import logging
import tornado.ioloop
from salt.exceptions import SaltSystemExit
from salt._compat import ipaddress
log = logging.getLogger(__name__)
@ -82,6 +83,5 @@ def ip_bracket(addr):
Convert IP address representation to ZMQ (URL) format. ZMQ expects
brackets around IPv6 literals, since they are used in URLs.
'''
if addr and ':' in addr and not addr.startswith('['):
return '[{0}]'.format(addr)
return addr
addr = ipaddress.ip_address(addr)
return ('[{}]' if addr.version == 6 else '{}').format(addr)

View File

@ -874,7 +874,10 @@ class SaltDistribution(distutils.dist.Distribution):
self.name = 'salt-ssh' if PACKAGED_FOR_SALT_SSH else 'salt'
self.salt_version = __version__ # pylint: disable=undefined-variable
self.description = 'Portable, distributed, remote execution and configuration management system'
with open(SALT_LONG_DESCRIPTION_FILE) as f:
kwargs = {}
if IS_PY3:
kwargs['encoding'] = 'utf-8'
with open(SALT_LONG_DESCRIPTION_FILE, **kwargs) as f:
self.long_description = f.read()
self.long_description_content_type = 'text/x-rst'
self.author = 'Thomas S Hatch'

View File

@ -0,0 +1,4 @@
/tmp/vimrc:
file.append:
- sources:
- salt://test/files/vimrc.stub

View File

@ -0,0 +1,8 @@
set number
syntax on
set paste
set ruler
if has("autocmd")
au BufReadPost * if line("'\"") > 1 && line("'\"") <= line("$") | exe "normal! g'\"" | endif
endif

View File

@ -6,45 +6,51 @@ Tests for minion blackout
# Import Python libs
from __future__ import absolute_import
import os
from time import sleep
import time
import textwrap
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.paths import PILLAR_DIR
from tests.support.helpers import destructiveTest, flaky
from tests.support.helpers import flaky
# Import Salt libs
import salt.utils.files
BLACKOUT_PILLAR = os.path.join(PILLAR_DIR, 'base', 'blackout.sls')
@destructiveTest
class MinionBlackoutTestCase(ModuleCase):
'''
Test minion blackout functionality
'''
@classmethod
def setUpClass(cls):
cls.blackout_pillar = os.path.join(PILLAR_DIR, 'base', 'blackout.sls')
def tearDown(self):
self.end_blackout(sleep=False)
# Be sure to also refresh the sub_minion pillar
self.run_function('saltutil.refresh_pillar', minion_tgt='sub_minion')
time.sleep(10) # wait for minion to exit blackout mode
def begin_blackout(self, blackout_data='minion_blackout: True'):
'''
setup minion blackout mode
'''
with salt.utils.files.fopen(BLACKOUT_PILLAR, 'w') as wfh:
with salt.utils.files.fopen(self.blackout_pillar, 'w') as wfh:
wfh.write(blackout_data)
self.run_function('saltutil.refresh_pillar')
sleep(10) # wait for minion to enter blackout mode
time.sleep(10) # wait for minion to enter blackout mode
def end_blackout(self):
def end_blackout(self, sleep=True):
'''
takedown minion blackout mode
'''
with salt.utils.files.fopen(BLACKOUT_PILLAR, 'w') as blackout_pillar:
blackout_pillar.write(textwrap.dedent('''\
minion_blackout: False
'''))
with salt.utils.files.fopen(self.blackout_pillar, 'w') as wfh:
wfh.write('minion_blackout: False\n')
self.run_function('saltutil.refresh_pillar')
sleep(10) # wait for minion to exit blackout mode
if sleep:
time.sleep(10) # wait for minion to exit blackout mode
@flaky
def test_blackout(self):
@ -66,22 +72,19 @@ class MinionBlackoutTestCase(ModuleCase):
'''
Test that minion blackout whitelist works
'''
try:
self.begin_blackout(textwrap.dedent('''\
minion_blackout: True
minion_blackout_whitelist:
- test.ping
- test.fib
'''))
self.begin_blackout(textwrap.dedent('''\
minion_blackout: True
minion_blackout_whitelist:
- test.ping
- test.fib
'''))
ping_ret = self.run_function('test.ping')
self.assertEqual(ping_ret, True)
ping_ret = self.run_function('test.ping')
self.assertEqual(ping_ret, True)
fib_ret = self.run_function('test.fib', [7])
self.assertTrue(isinstance(fib_ret, list))
self.assertEqual(fib_ret[0], 13)
finally:
self.end_blackout()
fib_ret = self.run_function('test.fib', [7])
self.assertTrue(isinstance(fib_ret, list))
self.assertEqual(fib_ret[0], 13)
@flaky
def test_blackout_nonwhitelist(self):
@ -89,18 +92,15 @@ class MinionBlackoutTestCase(ModuleCase):
Test that minion refuses to run non-whitelisted functions during
blackout whitelist
'''
try:
self.begin_blackout(textwrap.dedent('''\
minion_blackout: True
minion_blackout_whitelist:
- test.ping
- test.fib
'''))
self.begin_blackout(textwrap.dedent('''\
minion_blackout: True
minion_blackout_whitelist:
- test.ping
- test.fib
'''))
state_ret = self.run_function('state.apply')
self.assertIn('Minion in blackout mode.', state_ret)
state_ret = self.run_function('state.apply')
self.assertIn('Minion in blackout mode.', state_ret)
cloud_ret = self.run_function('cloud.query', ['list_nodes_full'])
self.assertIn('Minion in blackout mode.', cloud_ret)
finally:
self.end_blackout()
cloud_ret = self.run_function('cloud.query', ['list_nodes_full'])
self.assertIn('Minion in blackout mode.', cloud_ret)

View File

@ -2550,6 +2550,22 @@ class FileTest(ModuleCase, SaltReturnAssertsMixin):
contents = fp.read()
assert contents == expected
def test_managed_file_issue_51208(self):
'''
Test to ensure we can handle a file with escaped double-quotes
'''
name = os.path.join(TMP, 'issue_51208.txt')
ret = self.run_state(
'file.managed', name=name, source='salt://issue-51208/vimrc.stub'
)
src = os.path.join(BASE_FILES, 'issue-51208', 'vimrc.stub')
with salt.utils.files.fopen(src, 'r') as fp_:
master_data = fp_.read()
with salt.utils.files.fopen(name, 'r') as fp_:
minion_data = fp_.read()
self.assertEqual(master_data, minion_data)
self.assertSaltTrueReturn(ret)
class BlockreplaceTest(ModuleCase, SaltReturnAssertsMixin):
marker_start = '# start'

View File

@ -67,7 +67,7 @@ class LDAPAuthTestCase(TestCase):
'''
self.opts['auth.ldap.freeipa'] = True
with patch.dict(salt.auth.ldap.__opts__, self.opts):
with patch('salt.auth.ldap.auth', return_value=Bind):
with patch('salt.auth.ldap._bind', return_value=Bind):
self.assertIn('saltusers', salt.auth.ldap.groups('saltuser', password='password'))
def test_groups(self):
@ -75,7 +75,7 @@ class LDAPAuthTestCase(TestCase):
test groups in ldap
'''
with patch.dict(salt.auth.ldap.__opts__, self.opts):
with patch('salt.auth.ldap.auth', return_value=Bind):
with patch('salt.auth.ldap._bind', return_value=Bind):
self.assertIn('saltusers', salt.auth.ldap.groups('saltuser', password='password'))
def test_groups_activedirectory(self):
@ -84,7 +84,7 @@ class LDAPAuthTestCase(TestCase):
'''
self.opts['auth.ldap.activedirectory'] = True
with patch.dict(salt.auth.ldap.__opts__, self.opts):
with patch('salt.auth.ldap.auth', return_value=Bind):
with patch('salt.auth.ldap._bind', return_value=Bind):
self.assertIn('saltusers', salt.auth.ldap.groups('saltuser', password='password'))
def test_auth_nopass(self):

View File

@ -153,6 +153,17 @@ class RootsTest(TestCase, AdaptedConfigurationTestCaseMixin, LoaderModuleMockMix
ret = roots.file_list_emptydirs({'saltenv': 'base'})
self.assertIn('empty_dir', ret)
def test_file_list_with_slash(self):
opts = {'file_roots': copy.copy(self.opts['file_roots'])}
opts['file_roots']['foo/bar'] = opts['file_roots']['base']
load = {
'saltenv': 'foo/bar',
}
with patch.dict(roots.__opts__, opts):
ret = roots.file_list(load)
self.assertIn('testfile', ret)
self.assertIn(UNICODE_FILENAME, ret)
def test_dir_list(self):
ret = roots.dir_list({'saltenv': 'base'})
self.assertIn('empty_dir', ret)

View File

@ -19,12 +19,14 @@ from tests.support.mock import (
# Import Salt Libs
import salt.modules.debian_ip as debian_ip
import salt.utils.platform
# Import third party libs
import jinja2.exceptions
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(salt.utils.platform.is_windows(), 'Do not run these tests on Windows')
class DebianIpTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.modules.debian_ip
@ -474,14 +476,17 @@ class DebianIpTestCase(TestCase, LoaderModuleMockMixin):
patch('salt.modules.debian_ip._parse_hostname',
MagicMock(return_value='SaltStack')), \
patch('salt.modules.debian_ip._parse_domainname',
MagicMock(return_value='saltstack.com')):
MagicMock(return_value='saltstack.com')), \
patch('salt.modules.debian_ip._parse_searchdomain',
MagicMock(return_value='test.saltstack.com')):
mock_avai = MagicMock(return_value=True)
with patch.dict(debian_ip.__salt__, {'service.available': mock_avai,
'service.status': mock_avai}):
self.assertEqual(debian_ip.get_network_settings(),
['NETWORKING=yes\n',
'HOSTNAME=SaltStack\n',
'DOMAIN=saltstack.com\n'])
[u'NETWORKING=yes\n',
u'HOSTNAME=SaltStack\n',
u'DOMAIN=saltstack.com\n',
u'SEARCH=test.saltstack.com\n'])
mock = MagicMock(side_effect=jinja2.exceptions.TemplateNotFound
('error'))

View File

@ -24,6 +24,52 @@ try:
except Exception:
NO_MYSQL = True
__all_privileges__ = [
'ALTER',
'ALTER ROUTINE',
'BACKUP_ADMIN',
'BINLOG_ADMIN',
'CONNECTION_ADMIN',
'CREATE',
'CREATE ROLE',
'CREATE ROUTINE',
'CREATE TABLESPACE',
'CREATE TEMPORARY TABLES',
'CREATE USER',
'CREATE VIEW',
'DELETE',
'DROP',
'DROP ROLE',
'ENCRYPTION_KEY_ADMIN',
'EVENT',
'EXECUTE',
'FILE',
'GROUP_REPLICATION_ADMIN',
'INDEX',
'INSERT',
'LOCK TABLES',
'PERSIST_RO_VARIABLES_ADMIN',
'PROCESS',
'REFERENCES',
'RELOAD',
'REPLICATION CLIENT',
'REPLICATION SLAVE',
'REPLICATION_SLAVE_ADMIN',
'RESOURCE_GROUP_ADMIN',
'RESOURCE_GROUP_USER',
'ROLE_ADMIN',
'SELECT',
'SET_USER_ID',
'SHOW DATABASES',
'SHOW VIEW',
'SHUTDOWN',
'SUPER',
'SYSTEM_VARIABLES_ADMIN',
'TRIGGER',
'UPDATE',
'XA_RECOVER_ADMIN'
]
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(NO_MYSQL, 'Install MySQL bindings before running MySQL unit tests.')
@ -256,15 +302,16 @@ class MySQLTestCase(TestCase, LoaderModuleMockMixin):
"GRANT SELECT ON `testdb`.`testtabletwo` TO 'testuer'@'%'",
"GRANT SELECT ON `testdb`.`testtablethree` TO 'testuser'@'%'",
]
mock = MagicMock(return_value=mock_grants)
with patch.object(mysql, 'user_grants', return_value=mock_grants) as mock_user_grants:
ret = mysql.grant_exists(
'SELECT, INSERT, UPDATE',
'testdb.testtableone',
'testuser',
'%'
)
self.assertEqual(ret, True)
with patch.object(mysql, 'version', return_value='5.6.41'):
mock = MagicMock(return_value=mock_grants)
with patch.object(mysql, 'user_grants', return_value=mock_grants) as mock_user_grants:
ret = mysql.grant_exists(
'SELECT, INSERT, UPDATE',
'testdb.testtableone',
'testuser',
'%'
)
self.assertEqual(ret, True)
def test_grant_exists_false(self):
'''
@ -275,15 +322,47 @@ class MySQLTestCase(TestCase, LoaderModuleMockMixin):
"GRANT SELECT, INSERT, UPDATE ON `testdb`.`testtableone` TO 'testuser'@'%'",
"GRANT SELECT ON `testdb`.`testtablethree` TO 'testuser'@'%'",
]
mock = MagicMock(return_value=mock_grants)
with patch.object(mysql, 'user_grants', return_value=mock_grants) as mock_user_grants:
ret = mysql.grant_exists(
'SELECT',
'testdb.testtabletwo',
'testuser',
'%'
)
self.assertEqual(ret, False)
with patch.object(mysql, 'version', return_value='5.6.41'):
mock = MagicMock(return_value=mock_grants)
with patch.object(mysql, 'user_grants', return_value=mock_grants) as mock_user_grants:
ret = mysql.grant_exists(
'SELECT',
'testdb.testtabletwo',
'testuser',
'%'
)
self.assertEqual(ret, False)
def test_grant_exists_all(self):
'''
Test to ensure that we can find a grant that exists
'''
mock_grants = [
"GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, SHUTDOWN, PROCESS, FILE, REFERENCES, INDEX, ALTER, SHOW DATABASES, SUPER, CREATE TEMPORARY TABLES, LOCK TABLES, EXECUTE, REPLICATION SLAVE, REPLICATION CLIENT, CREATE VIEW, SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER, CREATE TABLESPACE, CREATE ROLE, DROP ROLE ON testdb.testtableone TO `testuser`@`%`",
"GRANT BACKUP_ADMIN,BINLOG_ADMIN,CONNECTION_ADMIN,ENCRYPTION_KEY_ADMIN,GROUP_REPLICATION_ADMIN,PERSIST_RO_VARIABLES_ADMIN,REPLICATION_SLAVE_ADMIN,RESOURCE_GROUP_ADMIN,RESOURCE_GROUP_USER,ROLE_ADMIN,SET_USER_ID,SYSTEM_VARIABLES_ADMIN,XA_RECOVER_ADMIN ON testdb.testtableone TO `testuser`@`%`"
]
with patch.object(mysql, 'version', return_value='8.0.10'):
mock = MagicMock(return_value=mock_grants)
with patch.object(mysql, 'user_grants', return_value=mock_grants) as mock_user_grants:
ret = mysql.grant_exists(
'ALL',
'testdb.testtableone',
'testuser',
'%'
)
self.assertEqual(ret, True)
mock_grants = ["GRANT ALL PRIVILEGES ON testdb.testtableone TO `testuser`@`%`"]
with patch.object(mysql, 'version', return_value='5.6.41'):
mock = MagicMock(return_value=mock_grants)
with patch.object(mysql, 'user_grants', return_value=mock_grants) as mock_user_grants:
ret = mysql.grant_exists(
'ALL PRIVILEGES',
'testdb.testtableone',
'testuser',
'%'
)
self.assertEqual(ret, True)
@skipIf(True, 'TODO: Mock up user_grants()')
def test_grant_add(self):

View File

@ -758,6 +758,28 @@ class TestFileState(TestCase, LoaderModuleMockMixin):
(name, user=user, group=group),
ret)
if salt.utils.platform.is_windows():
mock_ret = MagicMock(return_value=ret)
comt = ('File {0} not updated'.format(name))
else:
perms = {'luser': user,
'lmode': '0644',
'lgroup': group}
mock_ret = MagicMock(return_value=(ret, perms))
comt = ('File {0} will be updated with '
'permissions 0400 from its current '
'state of 0644'.format(name))
with patch.dict(filestate.__salt__,
{'file.check_perms': mock_ret}):
with patch.object(os.path, 'exists', mock_t):
with patch.dict(filestate.__opts__, {'test': True}):
ret.update({'comment': comt})
self.assertDictEqual(filestate.managed
(name, user=user,
group=group,
mode=400), ret)
# 'directory' function tests: 1
def test_directory(self):

View File

@ -0,0 +1,84 @@
# -*- coding: utf-8 -*-
'''
:codeauthor: Erik Johnson <erik@saltstack.com>
'''
# Import Python libs
from __future__ import absolute_import
import logging
import os
# Import Salt Testing Libs
from tests.support.helpers import with_tempdir
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
Mock,
MagicMock,
patch,
DEFAULT,
NO_MOCK,
NO_MOCK_REASON,
)
# Import Salt Libs
import salt.states.git as git_state # Don't potentially shadow GitPython
log = logging.getLogger(__name__)
@skipIf(NO_MOCK, NO_MOCK_REASON)
class GitTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.states.git
'''
def setup_loader_modules(self):
return {
git_state: {
'__env__': 'base',
'__opts__': {'test': False},
'__salt__': {},
}
}
@with_tempdir()
def test_latest_no_diff_for_bare_repo(self, target):
'''
This test ensures that we don't attempt to diff when cloning a repo
using either bare=True or mirror=True.
'''
name = 'https://foo.com/bar/baz.git'
gitdir = os.path.join(target, 'refs')
isdir_mock = MagicMock(
side_effect=lambda path: DEFAULT if path != gitdir else True)
branches = ['foo', 'bar', 'baz']
tags = ['v1.1.0', 'v.1.1.1', 'v1.2.0']
local_head = 'b9ef06ab6b7524eb7c27d740dbbd5109c6d75ee4'
remote_head = 'eef672c1ec9b8e613905dbcd22a4612e31162807'
git_diff = Mock()
dunder_salt = {
'git.current_branch': MagicMock(return_value=branches[0]),
'git.diff': git_diff,
'git.fetch': MagicMock(return_value={}),
'git.is_worktree': MagicMock(return_value=False),
'git.list_branches': MagicMock(return_value=branches),
'git.list_tags': MagicMock(return_value=tags),
'git.remote_refs': MagicMock(return_value={'HEAD': remote_head}),
'git.remotes': MagicMock(return_value={
'origin': {'fetch': name, 'push': name},
}),
'git.rev_parse': MagicMock(side_effect=git_state.CommandExecutionError()),
'git.revision': MagicMock(return_value=local_head),
'git.version': MagicMock(return_value='1.8.3.1'),
}
with patch('os.path.isdir', isdir_mock), \
patch.dict(git_state.__salt__, dunder_salt):
result = git_state.latest(
name=name,
target=target,
mirror=True, # mirror=True implies bare=True
)
assert result['result'] is True, result
git_diff.assert_not_called()

View File

@ -282,6 +282,24 @@ class SaltmodTestCase(TestCase, LoaderModuleMockMixin):
with patch.dict(saltmod.__salt__, {'saltutil.wheel': wheel_mock}):
self.assertDictEqual(saltmod.wheel(name), ret)
def test_state_ssh(self):
'''
Test saltmod passes roster to saltutil.cmd
'''
origcmd = saltmod.__salt__['saltutil.cmd']
cmd_kwargs = {}
cmd_args = []
def cmd_mock(*args, **kwargs):
cmd_args.extend(args)
cmd_kwargs.update(kwargs)
return origcmd(*args, **kwargs)
with patch.dict(saltmod.__salt__, {'saltutil.cmd': cmd_mock}):
ret = saltmod.state('state.sls', tgt='*', ssh=True, highstate=True, roster='my_roster')
assert 'roster' in cmd_kwargs
assert cmd_kwargs['roster'] == 'my_roster'
@skipIf(NO_MOCK, NO_MOCK_REASON)
class StatemodTests(TestCase, LoaderModuleMockMixin):

View File

@ -240,6 +240,7 @@ class UserTestCase(TestCase, LoaderModuleMockMixin):
'shadow.default_hash': shadow_hash,
'file.group_to_gid': MagicMock(side_effect=['foo']),
'file.gid_to_group': MagicMock(side_effect=[5000])}
def mock_exists(*args):
return True

View File

@ -8,7 +8,7 @@ import shutil
# salt testing libs
from tests.support.unit import TestCase, skipIf
from tests.support.mock import(
from tests.support.mock import (
patch,
mock_open,
NO_MOCK,

View File

@ -305,17 +305,13 @@ class LazyLoaderSingleItem(TestCase):
'''
Checks that a KeyError is raised when the function key does not contain a '.'
'''
key = 'testing_no_dot'
expected = "The key '{0}' should contain a '.'".format(key)
with self.assertRaises(KeyError) as err:
inspect.isfunction(self.loader['testing_no_dot'])
if six.PY2:
self.assertEqual(err.exception[0],
'The key \'%s\' should contain a \'.\'')
else:
self.assertEqual(
six.text_type(err.exception),
six.text_type(("The key '%s' should contain a '.'", 'testing_no_dot'))
)
result = err.exception.args[0]
assert result == expected, result
module_template = '''

View File

@ -317,11 +317,15 @@ class ZMQConfigTest(TestCase):
'''
test _get_master_uri method
'''
m_ip = '127.0.0.1'
m_port = 4505
s_ip = '111.1.0.1'
s_port = 4058
m_ip6 = '1234:5678::9abc'
s_ip6 = '1234:5678::1:9abc'
with patch('salt.transport.zeromq.LIBZMQ_VERSION_INFO', (4, 1, 6)), \
patch('salt.transport.zeromq.ZMQ_VERSION_INFO', (16, 0, 1)):
# pass in both source_ip and source_port
@ -330,15 +334,27 @@ class ZMQConfigTest(TestCase):
source_ip=s_ip,
source_port=s_port) == 'tcp://{0}:{1};{2}:{3}'.format(s_ip, s_port, m_ip, m_port)
assert salt.transport.zeromq._get_master_uri(master_ip=m_ip6,
master_port=m_port,
source_ip=s_ip6,
source_port=s_port) == 'tcp://[{0}]:{1};[{2}]:{3}'.format(s_ip6, s_port, m_ip6, m_port)
# source ip and source_port empty
assert salt.transport.zeromq._get_master_uri(master_ip=m_ip,
master_port=m_port) == 'tcp://{0}:{1}'.format(m_ip, m_port)
assert salt.transport.zeromq._get_master_uri(master_ip=m_ip6,
master_port=m_port) == 'tcp://[{0}]:{1}'.format(m_ip6, m_port)
# pass in only source_ip
assert salt.transport.zeromq._get_master_uri(master_ip=m_ip,
master_port=m_port,
source_ip=s_ip) == 'tcp://{0}:0;{1}:{2}'.format(s_ip, m_ip, m_port)
assert salt.transport.zeromq._get_master_uri(master_ip=m_ip6,
master_port=m_port,
source_ip=s_ip6) == 'tcp://[{0}]:0;[{1}]:{2}'.format(s_ip6, m_ip6, m_port)
# pass in only source_port
assert salt.transport.zeromq._get_master_uri(master_ip=m_ip,
master_port=m_port,

View File

@ -18,6 +18,7 @@ from tests.support.mock import (
# Import salt libs
import salt.utils.network as network
from salt._compat import ipaddress
log = logging.getLogger(__name__)
@ -202,6 +203,35 @@ class NetworkTestCase(TestCase):
self.assertFalse(network.is_ipv6('10.0.1.2'))
self.assertFalse(network.is_ipv6('2001.0db8.85a3.0000.0000.8a2e.0370.7334'))
def test_parse_host_port(self):
_ip = ipaddress.ip_address
good_host_ports = {
'10.10.0.3': (_ip('10.10.0.3'), None),
'10.10.0.3:1234': (_ip('10.10.0.3'), 1234),
'2001:0db8:85a3::8a2e:0370:7334': (_ip('2001:0db8:85a3::8a2e:0370:7334'), None),
'[2001:0db8:85a3::8a2e:0370:7334]:1234': (_ip('2001:0db8:85a3::8a2e:0370:7334'), 1234),
'2001:0db8:85a3::7334': (_ip('2001:0db8:85a3::7334'), None),
'[2001:0db8:85a3::7334]:1234': (_ip('2001:0db8:85a3::7334'), 1234)
}
bad_host_ports = [
'10.10.0.3/24',
'10.10.0.3::1234',
'2001:0db8:0370:7334',
'2001:0db8:0370::7334]:1234',
'2001:0db8:0370:0:a:b:c:d:1234'
]
for host_port, assertion_value in good_host_ports.items():
host = port = None
host, port = network.parse_host_port(host_port)
self.assertEqual((host, port), assertion_value)
for host_port in bad_host_ports:
try:
self.assertRaises(ValueError, network.parse_host_port, host_port)
except AssertionError as _e_:
log.error('bad host_port value: "%s" failed to trigger ValueError exception', host_port)
raise _e_
def test_is_subnet(self):
for subnet_data in (IPV4_SUBNETS, IPV6_SUBNETS):
for item in subnet_data[True]:
@ -534,3 +564,15 @@ class NetworkTestCase(TestCase):
self.assertRaises(ValueError, network.mac_str_to_bytes, 'a0:b0:c0:d0:e0:fg')
self.assertEqual(b'\x10\x08\x06\x04\x02\x00', network.mac_str_to_bytes('100806040200'))
self.assertEqual(b'\xf8\xe7\xd6\xc5\xb4\xa3', network.mac_str_to_bytes('f8e7d6c5b4a3'))
def test_generate_minion_id_with_long_hostname(self):
'''
Validate the fix for:
https://github.com/saltstack/salt/issues/51160
'''
long_name = 'localhost-abcdefghijklmnopqrstuvwxyz-abcdefghijklmnopqrstuvwxyz'
with patch('socket.gethostname', MagicMock(return_value=long_name)):
# An exception is raised if unicode is passed to socket.getfqdn
minion_id = network.generate_minion_id()
assert minion_id != '', minion_id

View File

@ -17,6 +17,7 @@ from tests.support.unit import TestCase, skipIf
from tests.support.mock import patch, NO_MOCK, NO_MOCK_REASON
# Import Salt libs
import salt.utils.compat
import salt.utils.path
import salt.utils.platform
from salt.exceptions import CommandNotFoundError
@ -125,7 +126,7 @@ class PathJoinTestCase(TestCase):
platform.system = lambda: "windows"
for module in (ntpath, os, os.path, tempfile):
reload(module)
salt.utils.compat.reload(module)
def __unpatch_path(self):
del sys.modules['nt']
@ -133,7 +134,7 @@ class PathJoinTestCase(TestCase):
platform.system = self.PLATFORM_FUNC
for module in (posixpath, os, os.path, tempfile, platform):
reload(module)
salt.utils.compat.reload(module)
@skipIf(NO_MOCK, NO_MOCK_REASON)

View File

@ -0,0 +1,99 @@
# -*- coding: utf-8 -*-
# Import Python Libs
from __future__ import absolute_import, unicode_literals, print_function
import random
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock
from tests.support.unit import TestCase, skipIf
# Import Salt Libs
import salt.modules.cmdmod
import salt.utils.platform
import salt.utils.win_lgpo_auditpol as win_lgpo_auditpol
settings = ['No Auditing', 'Success', 'Failure', 'Success and Failure']
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(not salt.utils.platform.is_windows(), 'System is not Windows')
class WinLgpoAuditpolTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
return {
win_lgpo_auditpol: {
'__context__': {},
'__salt__': {
'cmd.run_all': salt.modules.cmdmod.run_all
}}}
def test_get_settings(self):
names = win_lgpo_auditpol._get_valid_names()
ret = win_lgpo_auditpol.get_settings(category='All')
for name in names:
self.assertIn(name, [k.lower() for k in ret])
def test_get_settings_invalid_category(self):
self.assertRaises(
KeyError,
win_lgpo_auditpol.get_settings,
category='Fake Category')
def test_get_setting(self):
names = win_lgpo_auditpol._get_valid_names()
for name in names:
ret = win_lgpo_auditpol.get_setting(name)
self.assertIn(ret, settings)
def test_get_setting_invalid_name(self):
self.assertRaises(
KeyError,
win_lgpo_auditpol.get_setting,
name='Fake Name')
def test_set_setting(self):
names = ['Credential Validation', 'IPsec Driver', 'File System', 'SAM']
mock_set = MagicMock(return_value={'retcode': 0, 'stdout': 'Success'})
with patch.object(salt.modules.cmdmod, 'run_all', mock_set):
with patch.object(win_lgpo_auditpol, '_get_valid_names',
return_value=[k.lower() for k in names]):
for name in names:
value = random.choice(settings)
win_lgpo_auditpol.set_setting(name=name, value=value)
switches = win_lgpo_auditpol.settings[value]
cmd = 'auditpol /set /subcategory:"{0}" {1}' \
''.format(name, switches)
mock_set.assert_called_once_with(cmd=cmd, python_shell=True)
mock_set.reset_mock()
def test_set_setting_invalid_setting(self):
names = ['Credential Validation', 'IPsec Driver', 'File System']
with patch.object(win_lgpo_auditpol, '_get_valid_names',
return_value=[k.lower() for k in names]):
self.assertRaises(
KeyError,
win_lgpo_auditpol.set_setting,
name='Fake Name',
value='No Auditing')
def test_set_setting_invalid_value(self):
names = ['Credential Validation', 'IPsec Driver', 'File System']
with patch.object(win_lgpo_auditpol, '_get_valid_names',
return_value=[k.lower() for k in names]):
self.assertRaises(
KeyError,
win_lgpo_auditpol.set_setting,
name='Credential Validation',
value='Fake Value')
def test_get_auditpol_dump(self):
names = win_lgpo_auditpol._get_valid_names()
dump = win_lgpo_auditpol.get_auditpol_dump()
for name in names:
found = False
for line in dump:
if name.lower() in line.lower():
found = True
break
self.assertTrue(found)

View File

@ -50,6 +50,7 @@ class ValidateNetTestCase(TestCase):
Test IPv6 address validation
'''
true_addrs = [
'::',
'::1',
'::1/32',
'::1/32',
@ -62,6 +63,8 @@ class ValidateNetTestCase(TestCase):
'::1/0',
'::1/32d',
'::1/129',
'2a03:4000:c:10aa:1017:f00d:aaaa:a:4506',
'2a03::1::2',
]
for addr in true_addrs: