Merge branch '2018.3' of https://github.com/saltstack/salt into requests_2018.3

This commit is contained in:
twangboy 2018-12-27 12:00:49 -07:00
commit 242389b151
No known key found for this signature in database
GPG Key ID: 93FF3BDEB278C9EB
142 changed files with 7926 additions and 1735 deletions

View File

@ -29,7 +29,7 @@ pipeline {
}
stage('build') {
steps {
sh 'eval "$(pyenv init -)"; make -C doc clean html'
sh 'eval "$(pyenv init -)"; make SPHINXOPTS="-W" -C doc clean html'
}
}
}
@ -48,8 +48,8 @@ pipeline {
description: 'The docs job has failed',
status: 'FAILURE',
context: "jenkins/pr/docs"
slackSend channel: "#jenkins-prod-pr",
color: '#FF0000',
slackSend channel: "#jenkins-prod-pr",
color: '#FF0000',
message: "FAILED: PR-Job: '${env.JOB_NAME} [${env.BUILD_NUMBER}]' (${env.BUILD_URL})"
}
}

120
.ci/lint
View File

@ -3,7 +3,7 @@ pipeline {
options {
timestamps()
ansiColor('xterm')
timeout(time: 1, unit: 'HOURS')
timeout(time: 3, unit: 'HOURS')
}
environment {
PYENV_ROOT = "/usr/local/pyenv"
@ -14,7 +14,7 @@ pipeline {
stage('github-pending') {
steps {
githubNotify credentialsId: 'test-jenkins-credentials',
description: 'Testing lint...',
description: 'Python lint on changes begins...',
status: 'PENDING',
context: "jenkins/pr/lint"
}
@ -24,12 +24,15 @@ pipeline {
sh '''
# Need -M to detect renames otherwise they are reported as Delete and Add, need -C to detect copies, -C includes -M
# -M is on by default in git 2.9+
git diff --name-status -l99999 -C "origin/$CHANGE_TARGET" "origin/$BRANCH_NAME" > file-list-status.log
git diff --name-status -l99999 -C "origin/$CHANGE_TARGET" > file-list-status.log
# the -l increase the search limit, lets use awk so we do not need to repeat the search above.
gawk 'BEGIN {FS="\\t"} {if ($1 != "D") {print $NF}}' file-list-status.log > file-list-changed.log
gawk 'BEGIN {FS="\\t"} {if ($1 == "D") {print $NF}}' file-list-status.log > file-list-deleted.log
(git diff --name-status -l99999 -C "origin/$CHANGE_TARGET";echo "---";git diff --name-status -l99999 -C "origin/$BRANCH_NAME";printenv|grep -E '=[0-9a-z]{40,}+$|COMMIT=|BRANCH') > file-list-experiment.log
touch pylint-report-salt.log pylint-report-tests.log
(git diff --name-status -l99999 -C "origin/$CHANGE_TARGET" "origin/$BRANCH_NAME";echo "---";git diff --name-status -l99999 -C "origin/$BRANCH_NAME";printenv|grep -E '=[0-9a-z]{40,}+$|COMMIT=|BRANCH') > file-list-experiment.log
echo 254 > pylint-salt-chg.exit # assume failure
echo 254 > pylint-salt-full.exit # assume failure
echo 254 > pylint-tests-chg.exit # assume failure
echo 254 > pylint-tests-full.exit # assume failure
eval "$(pyenv init -)"
pyenv --version
pyenv install --skip-existing 2.7.14
@ -41,63 +44,126 @@ pipeline {
archiveArtifacts artifacts: 'file-list-status.log,file-list-changed.log,file-list-deleted.log,file-list-experiment.log'
}
}
stage('linting') {
failFast false
stage('linting chg') {
parallel {
stage('salt linting') {
stage('lint salt chg') {
when {
expression { return readFile('file-list-changed.log') =~ /(?i)(^|\n)(salt\/.*\.py|setup\.py)\n/ }
}
steps {
sh '''
eval "$(pyenv init - --no-rehash)"
grep -Ei '^salt/.*\\.py$|^setup\\.py$' file-list-changed.log | xargs -r '--delimiter=\\n' tox -e pylint-salt | tee pylint-report-salt.log
# tee makes the exit/return code always 0
grep -Ei '^salt/.*\\.py$|^setup\\.py$' file-list-changed.log | (xargs -r '--delimiter=\\n' tox -e pylint-salt ; echo "$?" > pylint-salt-chg.exit) | tee pylint-report-salt-chg.log
# remove color escape coding
sed -ri 's/\\x1B\\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g' pylint-report-salt.log
sed -ri 's/\\x1B\\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g' pylint-report-salt-chg.log
read rc_exit < pylint-salt-chg.exit
exit "$rc_exit"
'''
archiveArtifacts artifacts: 'pylint-report-salt.log'
}
}
stage('test linting') {
stage('lint test chg') {
when {
expression { return readFile('file-list-changed.log') =~ /(?i)(^|\n)tests\/.*\.py\n/ }
}
steps {
sh '''
eval "$(pyenv init - --no-rehash)"
grep -Ei '^tests/.*\\.py$' file-list-changed.log | xargs -r '--delimiter=\\n' tox -e pylint-tests | tee pylint-report-tests.log
# tee makes the exit/return code always 0
grep -Ei '^tests/.*\\.py$' file-list-changed.log | (xargs -r '--delimiter=\\n' tox -e pylint-tests ; echo "$?" > pylint-tests-chg.exit) | tee pylint-report-tests-chg.log
# remove color escape coding
sed -ri 's/\\x1B\\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g' pylint-report-tests.log
sed -ri 's/\\x1B\\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g' pylint-report-tests-chg.log
read rc_exit < pylint-tests-chg.exit
exit "$rc_exit"
'''
archiveArtifacts artifacts: 'pylint-report-tests.log'
}
}
}
post {
always {
archiveArtifacts artifacts: 'pylint-report-*-chg.log', allowEmptyArchive: true
step([$class: 'WarningsPublisher',
parserConfigurations: [[
parserName: 'PyLint',
pattern: 'pylint-report-*-chg.log'
]],
failedTotalAll: '0',
useDeltaValues: false,
canRunOnFailed: true,
usePreviousBuildAsReference: true
])
}
}
}
stage('linting all') {
// perform a full linit if this is a merge forward and the change only lint passed.
when {
expression { return params.CHANGE_BRANCH =~ /(?i)^merge[._-]/ }
}
parallel {
stage('setup full') {
steps {
githubNotify credentialsId: 'test-jenkins-credentials',
description: 'Python lint on everything begins...',
status: 'PENDING',
context: "jenkins/pr/lint"
}
}
stage('lint salt full') {
steps {
sh '''
eval "$(pyenv init - --no-rehash)"
(tox -e pylint-salt ; echo "$?" > pylint-salt-full.exit) | tee pylint-report-salt-full.log
# remove color escape coding
sed -ri 's/\\x1B\\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g' pylint-report-salt-full.log
read rc_exit < pylint-salt-full.exit
exit "$rc_exit"
'''
}
}
stage('lint test full') {
steps {
sh '''
eval "$(pyenv init - --no-rehash)"
(tox -e pylint-tests ; echo "$?" > pylint-tests-full.exit) | tee pylint-report-tests-full.log
# remove color escape coding
sed -ri 's/\\x1B\\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g' pylint-report-tests-full.log
read rc_exit < pylint-tests-full.exit
exit "$rc_exit"
'''
}
}
}
post {
always {
archiveArtifacts artifacts: 'pylint-report-*-full.log', allowEmptyArchive: true
step([$class: 'WarningsPublisher',
parserConfigurations: [[
parserName: 'PyLint',
pattern: 'pylint-report-*-full.log'
]],
failedTotalAll: '0',
useDeltaValues: false,
canRunOnFailed: true,
usePreviousBuildAsReference: true
])
}
}
}
}
post {
always {
step([$class: 'WarningsPublisher',
parserConfigurations: [[
parserName: 'PyLint',
pattern: 'pylint-report*.log'
]],
failedTotalAll: '0',
useDeltaValues: false,
canRunOnFailed: true,
usePreviousBuildAsReference: true
])
cleanWs()
}
success {
githubNotify credentialsId: 'test-jenkins-credentials',
description: 'The lint job has passed',
description: 'Python lint test has passed',
status: 'SUCCESS',
context: "jenkins/pr/lint"
}
failure {
githubNotify credentialsId: 'test-jenkins-credentials',
description: 'The lint job has failed',
description: 'Python lint test has failed',
status: 'FAILURE',
context: "jenkins/pr/lint"
slackSend channel: "#jenkins-prod-pr",

4
.github/CODEOWNERS vendored
View File

@ -13,6 +13,7 @@ salt/*/*boto* @saltstack/team-boto
# Team Core
requirements/* @saltstack/team-core
rfcs/* @saltstack/team-core
salt/auth/* @saltstack/team-core
salt/cache/* @saltstack/team-core
salt/cli/* @saltstack/team-core
@ -73,3 +74,6 @@ salt/modules/reg.py @saltstack/team-windows
salt/states/reg.py @saltstack/team-windows
tests/*/*win* @saltstack/team-windows
tests/*/test_reg.py @saltstack/team-windows
# Jenkins Integration
.ci/* @saltstack/saltstack-sre-team @saltstack/team-core

View File

@ -30,7 +30,7 @@ provisioner:
salt_install: bootstrap
salt_version: latest
salt_bootstrap_url: https://bootstrap.saltstack.com
salt_bootstrap_options: -X -p rsync stable <%= version %>
salt_bootstrap_options: -X -p rsync git v<%= version %>
log_level: info
sudo: true
require_chef: false

View File

@ -2,7 +2,8 @@
source 'https://rubygems.org'
gem 'test-kitchen', '~>1.21'
# Point this back at the test-kitchen package after 1.23.3 is relased
gem 'test-kitchen', :git => 'https://github.com/dwoz/test-kitchen.git', :branch => 'winrm_opts'
gem 'kitchen-salt', '~>0.2'
gem 'kitchen-sync'
gem 'git'

View File

@ -7,7 +7,7 @@ from docutils.parsers.rst import Directive
from docutils.statemachine import ViewList
from sphinx import addnodes
from sphinx.directives import ObjectDescription, Directive
from sphinx.directives import ObjectDescription
from sphinx.domains import Domain, ObjType
from sphinx.domains.python import PyObject
from sphinx.locale import l_, _

View File

@ -256,7 +256,7 @@
<!--
<a href="https://saltstack.com/saltstack-enterprise/" target="_blank"><img class="nolightbox footer-banner center" src="{{ pathto('_static/images/enterprise_ad.jpg', 1) }}"/></a>
-->
<a href="http://saltconf.com" target="_blank"><img class="nolightbox footer-banner center" src="{{ pathto('_static/images/DOCBANNER.jpg', 1) }}"/></a>
<a href="http://saltconf.com/saltconf18-speakers/" target="_blank"><img class="nolightbox footer-banner center" src="{{ pathto('_static/images/DOCBANNER.png', 1) }}"/></a>
</div>
{% endif %}
</div>

Binary file not shown.

Before

Width:  |  Height:  |  Size: 497 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 767 KiB

View File

@ -3,7 +3,6 @@
'''
Sphinx documentation for Salt
'''
import functools
import sys
import os
import types
@ -12,7 +11,6 @@ import time
from sphinx.directives import TocTree
# pylint: disable=R0903
class Mock(object):
'''
Mock out specified imports.
@ -24,7 +22,7 @@ class Mock(object):
http://read-the-docs.readthedocs.org/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules
'''
def __init__(self, mapping=None, *args, **kwargs):
def __init__(self, mapping=None, *args, **kwargs): # pylint: disable=unused-argument
"""
Mapping allows autodoc to bypass the Mock object, but actually assign
a specific value, expected by a specific attribute returned.
@ -40,14 +38,12 @@ class Mock(object):
return Mock(mapping=self.__mapping)
def __getattr__(self, name):
#__mapping = {'total': 0}
data = None
if name in self.__mapping:
data = self.__mapping.get(name)
elif name in ('__file__', '__path__'):
data = '/dev/null'
elif name == '__qualname__':
raise AttributeError("'Mock' object has no attribute '__qualname__'")
elif name in ('__mro_entries__', '__qualname__'):
raise AttributeError("'Mock' object has no attribute '%s'" % (name))
else:
data = Mock(mapping=self.__mapping)
return data
@ -55,10 +51,30 @@ class Mock(object):
def __iter__(self):
return self
def next(self):
@staticmethod
def __next__():
raise StopIteration
# pylint: enable=R0903
# For Python 2
next = __next__
def mock_decorator_with_params(*oargs, **okwargs): # pylint: disable=unused-argument
'''
Optionally mock a decorator that takes parameters
E.g.:
@blah(stuff=True)
def things():
pass
'''
def inner(fn, *iargs, **ikwargs): # pylint: disable=unused-argument
if hasattr(fn, '__call__'):
return fn
return Mock()
return inner
MOCK_MODULES = [
# Python stdlib
@ -153,7 +169,6 @@ MOCK_MODULES = [
'napalm',
'dson',
'jnpr',
'json',
'lxml',
'lxml.etree',
'jnpr.junos',
@ -178,40 +193,24 @@ MOCK_MODULES = [
'msgpack',
]
MOCK_MODULES_MAPPING = {
'cherrypy': {'config': mock_decorator_with_params},
'ntsecuritycon': {
'STANDARD_RIGHTS_REQUIRED': 0,
'SYNCHRONIZE': 0,
},
'psutil': {'total': 0}, # Otherwise it will crash Sphinx
}
for mod_name in MOCK_MODULES:
if mod_name == 'psutil':
mock = Mock(mapping={'total': 0}) # Otherwise it will crash Sphinx
else:
mock = Mock()
sys.modules[mod_name] = mock
def mock_decorator_with_params(*oargs, **okwargs):
'''
Optionally mock a decorator that takes parameters
E.g.:
@blah(stuff=True)
def things():
pass
'''
def inner(fn, *iargs, **ikwargs):
if hasattr(fn, '__call__'):
return fn
else:
return Mock()
return inner
sys.modules[mod_name] = Mock(mapping=MOCK_MODULES_MAPPING.get(mod_name))
# Define a fake version attribute for the following libs.
sys.modules['libcloud'].__version__ = '0.0.0'
sys.modules['msgpack'].version = (1, 0, 0)
sys.modules['psutil'].version_info = (3, 0, 0)
sys.modules['pymongo'].version = '0.0.0'
sys.modules['ntsecuritycon'].STANDARD_RIGHTS_REQUIRED = 0
sys.modules['ntsecuritycon'].SYNCHRONIZE = 0
# Define a fake version attribute for the following libs.
sys.modules['cherrypy'].config = mock_decorator_with_params
sys.modules['tornado'].version_info = (0, 0, 0)
# -- Add paths to PYTHONPATH ---------------------------------------------------
@ -223,12 +222,12 @@ except NameError:
docs_basepath = os.path.abspath(os.path.dirname('.'))
addtl_paths = (
os.pardir, # salt itself (for autodoc)
'_ext', # custom Sphinx extensions
os.pardir, # salt itself (for autodoc)
'_ext', # custom Sphinx extensions
)
for path in addtl_paths:
sys.path.insert(0, os.path.abspath(os.path.join(docs_basepath, path)))
for addtl_path in addtl_paths:
sys.path.insert(0, os.path.abspath(os.path.join(docs_basepath, addtl_path)))
# We're now able to import salt
@ -302,12 +301,12 @@ extensions = [
'sphinx.ext.intersphinx',
'httpdomain',
'youtube',
'saltautodoc', # Must be AFTER autodoc
'shorturls',
#'saltautodoc', # Must be AFTER autodoc
#'shorturls',
]
try:
import sphinxcontrib.spelling
import sphinxcontrib.spelling # false positive, pylint: disable=unused-import
except ImportError:
pass
else:
@ -371,7 +370,8 @@ gettext_compact = False
### HTML options
html_theme = os.environ.get('HTML_THEME', 'saltstack2') # set 'HTML_THEME=saltstack' to use previous theme
# set 'HTML_THEME=saltstack' to use previous theme
html_theme = os.environ.get('HTML_THEME', 'saltstack2')
html_theme_path = ['_themes']
html_title = u''
html_short_title = 'Salt'
@ -439,7 +439,7 @@ html_show_copyright = True
### Latex options
latex_documents = [
('contents', 'Salt.tex', 'Salt Documentation', 'SaltStack, Inc.', 'manual'),
('contents', 'Salt.tex', 'Salt Documentation', 'SaltStack, Inc.', 'manual'),
]
latex_logo = '_static/salt-logo.png'
@ -447,7 +447,7 @@ latex_logo = '_static/salt-logo.png'
latex_elements = {
'inputenc': '', # use XeTeX instead of the inputenc LaTeX package.
'utf8extra': '',
'preamble': '''
'preamble': r'''
\usepackage{fontspec}
\setsansfont{Linux Biolinum O}
\setromanfont{Linux Libertine O}
@ -459,35 +459,36 @@ latex_elements = {
### Linkcheck options
linkcheck_ignore = [r'http://127.0.0.1',
r'http://salt:\d+',
r'http://local:\d+',
r'https://console.aws.amazon.com',
r'http://192.168.33.10',
r'http://domain:\d+',
r'http://123.456.789.012:\d+',
r'http://localhost',
r'https://groups.google.com/forum/#!forum/salt-users',
r'http://logstash.net/docs/latest/inputs/udp',
r'http://logstash.net/docs/latest/inputs/zeromq',
r'http://www.youtube.com/saltstack',
r'https://raven.readthedocs.io',
r'https://getsentry.com',
r'https://salt-cloud.readthedocs.io',
r'https://salt.readthedocs.io',
r'http://www.pip-installer.org/',
r'http://www.windowsazure.com/',
r'https://github.com/watching',
r'dash-feed://',
r'https://github.com/saltstack/salt/',
r'http://bootstrap.saltstack.org',
r'https://bootstrap.saltstack.com',
r'https://raw.githubusercontent.com/saltstack/salt-bootstrap/stable/bootstrap-salt.sh',
r'media.readthedocs.org/dash/salt/latest/salt.xml',
r'https://portal.aws.amazon.com/gp/aws/securityCredentials',
r'https://help.github.com/articles/fork-a-repo',
r'dash-feed://https%3A//media.readthedocs.org/dash/salt/latest/salt.xml'
]
linkcheck_ignore = [
r'http://127.0.0.1',
r'http://salt:\d+',
r'http://local:\d+',
r'https://console.aws.amazon.com',
r'http://192.168.33.10',
r'http://domain:\d+',
r'http://123.456.789.012:\d+',
r'http://localhost',
r'https://groups.google.com/forum/#!forum/salt-users',
r'http://logstash.net/docs/latest/inputs/udp',
r'http://logstash.net/docs/latest/inputs/zeromq',
r'http://www.youtube.com/saltstack',
r'https://raven.readthedocs.io',
r'https://getsentry.com',
r'https://salt-cloud.readthedocs.io',
r'https://salt.readthedocs.io',
r'http://www.pip-installer.org/',
r'http://www.windowsazure.com/',
r'https://github.com/watching',
r'dash-feed://',
r'https://github.com/saltstack/salt/',
r'http://bootstrap.saltstack.org',
r'https://bootstrap.saltstack.com',
r'https://raw.githubusercontent.com/saltstack/salt-bootstrap/stable/bootstrap-salt.sh',
r'media.readthedocs.org/dash/salt/latest/salt.xml',
r'https://portal.aws.amazon.com/gp/aws/securityCredentials',
r'https://help.github.com/articles/fork-a-repo',
r'dash-feed://https%3A//media.readthedocs.org/dash/salt/latest/salt.xml',
]
linkcheck_anchors = False
@ -531,6 +532,7 @@ epub_tocdup = False
def skip_mod_init_member(app, what, name, obj, skip, options):
# pylint: disable=too-many-arguments,unused-argument
if name.startswith('_'):
return True
if isinstance(obj, types.FunctionType) and obj.__name__ == 'mod_init':

View File

@ -92,13 +92,13 @@ RunnerClient
------------
.. autoclass:: salt.runner.RunnerClient
:members: cmd, async, cmd_sync, cmd_async
:members: cmd, asynchronous, cmd_sync, cmd_async
WheelClient
-----------
.. autoclass:: salt.wheel.WheelClient
:members: cmd, async, cmd_sync, cmd_async
:members: cmd, asynchronous, cmd_sync, cmd_async
CloudClient
-----------

View File

@ -16,7 +16,7 @@ engine modules
ircbot
junos_syslog
logentries
logstash
logstash_engine
napalm_syslog
reactor
redis_sentinel

View File

@ -1,6 +0,0 @@
=====================
salt.engines.logstash
=====================
.. automodule:: salt.engines.logstash
:members:

View File

@ -0,0 +1,6 @@
============================
salt.engines.logstash_engine
============================
.. automodule:: salt.engines.logstash_engine
:members:

View File

@ -1,57 +0,0 @@
.. _module-sync:
.. _dynamic-module-distribution:
===========================
Dynamic Module Distribution
===========================
.. versionadded:: 0.9.5
Custom Salt execution, state, and other modules can be distributed to Salt
minions using the Salt file server.
Under the root of any environment defined via the :conf_master:`file_roots`
option on the master server directories corresponding to the type of module can
be used.
The directories are prepended with an underscore:
- :file:`_beacons`
- :file:`_clouds`
- :file:`_engines`
- :file:`_grains`
- :file:`_modules`
- :file:`_output`
- :file:`_proxy`
- :file:`_renderers`
- :file:`_returners`
- :file:`_states`
- :file:`_tops`
- :file:`_utils`
The contents of these directories need to be synced over to the minions after
Python modules have been created in them. There are a number of ways to sync
the modules.
Sync Via States
===============
The minion configuration contains an option ``autoload_dynamic_modules``
which defaults to ``True``. This option makes the state system refresh all
dynamic modules when states are run. To disable this behavior set
:conf_minion:`autoload_dynamic_modules` to ``False`` in the minion config.
When dynamic modules are autoloaded via states, modules only pertinent to
the environments matched in the master's top file are downloaded.
This is important to remember, because modules can be manually loaded from
any specific environment that environment specific modules will be loaded
when a state run is executed.
Sync Via the saltutil Module
============================
The saltutil module has a number of functions that can be used to sync all
or specific dynamic modules. The saltutil module function ``saltutil.sync_all``
will sync all module types over to a minion. For more information see:
:mod:`salt.modules.saltutil`

View File

@ -290,6 +290,8 @@ All beacons are configured using a similar process of enabling the beacon,
writing a reactor SLS (and state SLS if needed), and mapping a beacon event to
the reactor SLS.
.. _writing-beacons:
Writing Beacon Plugins
======================
@ -360,5 +362,5 @@ new execution modules and functions to back specific beacons.
Distributing Custom Beacons
---------------------------
Custom beacons can be distributed to minions using ``saltutil``, see
:ref:`Dynamic Module Distribution <dynamic-module-distribution>`.
Custom beacons can be distributed to minions via the standard methods, see
:ref:`Modular Systems <modular-systems>`.

View File

@ -526,6 +526,19 @@ GPG key with ``git`` locally, and linking the GPG key to your GitHub account.
Once these steps are completed, the commit signing verification will look like
the example in GitHub's `GPG Signature Verification feature announcement`_.
Bootstrap Script Changes
------------------------
Salt's Bootstrap Script, known as `bootstrap-salt.sh`_ in the Salt repo, has it's own
repository, contributing guidelines, and release cadence.
All changes to the Bootstrap Script should be made to `salt-bootstrap repo`_. Any
pull requests made to the `bootstrap-salt.sh`_ file in the Salt repository will be
automatically overwritten upon the next stable release of the Bootstrap Script.
For more information on the release process or how to contribute to the Bootstrap
Script, see the Bootstrap Script's `Contributing Guidelines`_.
.. _`saltstack/salt`: https://github.com/saltstack/salt
.. _`GitHub Fork a Repo Guide`: https://help.github.com/articles/fork-a-repo
.. _`GitHub issue tracker`: https://github.com/saltstack/salt/issues
@ -537,3 +550,6 @@ the example in GitHub's `GPG Signature Verification feature announcement`_.
.. _GPG Probot: https://probot.github.io/apps/gpg/
.. _help articles: https://help.github.com/articles/signing-commits-with-gpg/
.. _GPG Signature Verification feature announcement: https://github.com/blog/2144-gpg-signature-verification
.. _bootstrap-salt.sh: https://github.com/saltstack/salt/blob/develop/salt/cloud/deploy/bootstrap-salt.sh
.. _salt-bootstrap repo: https://github.com/saltstack/salt-bootstrap
.. _Contributing Guidelines: https://github.com/saltstack/salt-bootstrap/blob/develop/CONTRIBUTING.md

View File

@ -1,122 +0,0 @@
.. _dunder-dictionaries:
===================
Dunder Dictionaries
===================
Salt provides several special "dunder" dictionaries as a convenience for Salt
development. These include ``__opts__``, ``__context__``, ``__salt__``, and
others. This document will describe each dictionary and detail where they exist
and what information and/or functionality they provide.
__opts__
--------
Available in
~~~~~~~~~~~~
- All loader modules
The ``__opts__`` dictionary contains all of the options passed in the
configuration file for the master or minion.
.. note::
In many places in salt, instead of pulling raw data from the __opts__
dict, configuration data should be pulled from the salt `get` functions
such as config.get, aka - __salt__['config.get']('foo:bar')
The `get` functions also allow for dict traversal via the *:* delimiter.
Consider using get functions whenever using __opts__ or __pillar__ and
__grains__ (when using grains for configuration data)
The configuration file data made available in the ``__opts__`` dictionary is the
configuration data relative to the running daemon. If the modules are loaded and
executed by the master, then the master configuration data is available, if the
modules are executed by the minion, then the minion configuration is
available. Any additional information passed into the respective configuration
files is made available
__salt__
--------
Available in
~~~~~~~~~~~~
- Execution Modules
- State Modules
- Returners
- Runners
- SDB Modules
``__salt__`` contains the execution module functions. This allows for all
functions to be called as they have been set up by the salt loader.
.. code-block:: python
__salt__['cmd.run']('fdisk -l')
__salt__['network.ip_addrs']()
.. note::
When used in runners, ``__salt__`` references other runner modules, and not
execution modules.
__grains__
----------
Available in
~~~~~~~~~~~~
- Execution Modules
- State Modules
- Returners
- External Pillar
The ``__grains__`` dictionary contains the grains data generated by the minion
that is currently being worked with. In execution modules, state modules and
returners this is the grains of the minion running the calls, when generating
the external pillar the ``__grains__`` is the grains data from the minion that
the pillar is being generated for.
__pillar__
-----------
Available in
~~~~~~~~~~~~
- Execution Modules
- State Modules
- Returners
The ``__pillar__`` dictionary contains the pillar for the respective minion.
__context__
-----------
``__context__`` exists in state modules and execution modules.
During a state run the ``__context__`` dictionary persists across all states
that are run and then is destroyed when the state ends.
When running an execution module ``__context__`` persists across all module
executions until the modules are refreshed; such as when
:py:func:`saltutil.sync_all <salt.modules.saltutil.sync_all>` or
:py:func:`state.apply <salt.modules.state.apply_>` are executed.
A great place to see how to use ``__context__`` is in the cp.py module in
salt/modules/cp.py. The fileclient authenticates with the master when it is
instantiated and then is used to copy files to the minion. Rather than create a
new fileclient for each file that is to be copied down, one instance of the
fileclient is instantiated in the ``__context__`` dictionary and is reused for
each file. Here is an example from salt/modules/cp.py:
.. code-block:: python
if not 'cp.fileclient' in __context__:
__context__['cp.fileclient'] = salt.fileclient.get_file_client(__opts__)
.. note:: Because __context__ may or may not have been destroyed, always be
sure to check for the existence of the key in __context__ and
generate the key before using it.

View File

@ -7,6 +7,7 @@ Developing Salt
:glob:
*
modules/index
extend/index
tests/*
raet/index

View File

@ -1,170 +0,0 @@
===============
Modular Systems
===============
When first working with Salt, it is not always clear where all of the modular
components are and what they do. Salt comes loaded with more modular systems
than many users are aware of, making Salt very easy to extend in many places.
The most commonly used modular systems are execution modules and states. But
the modular systems extend well beyond the more easily exposed components
and are often added to Salt to make the complete system more flexible.
Execution Modules
=================
Execution modules make up the core of the functionality used by Salt to
interact with client systems. The execution modules create the core system
management library used by all Salt systems, including states, which
interact with minion systems.
Execution modules are completely open ended in their execution. They can
be used to do anything required on a minion, from installing packages to
detecting information about the system. The only restraint in execution
modules is that the defined functions always return a JSON serializable
object.
For a list of all built in execution modules, click :ref:`here
<all-salt.modules>`
For information on writing execution modules, see :ref:`this page
<writing-execution-modules>`.
Interactive Debugging
=====================
Sometimes debugging with ``print()`` and extra logs sprinkled everywhere is not
the best strategy.
IPython is a helpful debug tool that has an interactive python environment
which can be embedded in python programs.
First the system will require IPython to be installed.
.. code-block:: bash
# Debian
apt-get install ipython
# Arch Linux
pacman -Syu ipython2
# RHEL/CentOS (via EPEL)
yum install python-ipython
Now, in the troubling python module, add the following line at a location where
the debugger should be started:
.. code-block:: python
test = 'test123'
import IPython; IPython.embed_kernel()
After running a Salt command that hits that line, the following will show up in
the log file:
.. code-block:: text
[CRITICAL] To connect another client to this kernel, use:
[IPKernelApp] --existing kernel-31271.json
Now on the system that invoked ``embed_kernel``, run the following command from
a shell:
.. code-block:: bash
# NOTE: use ipython2 instead of ipython for Arch Linux
ipython console --existing
This provides a console that has access to all the vars and functions, and even
supports tab-completion.
.. code-block:: python
print(test)
test123
To exit IPython and continue running Salt, press ``Ctrl-d`` to logout.
State Modules
=============
State modules are used to define the state interfaces used by Salt States.
These modules are restrictive in that they must follow a number of rules to
function properly.
.. note::
State modules define the available routines in sls files. If calling
an execution module directly is desired, take a look at the `module`
state.
Auth
====
The auth module system allows for external authentication routines to be easily
added into Salt. The `auth` function needs to be implemented to satisfy the
requirements of an auth module. Use the ``pam`` module as an example.
Fileserver
==========
The fileserver module system is used to create fileserver backends used by the
Salt Master. These modules need to implement the functions used in the
fileserver subsystem. Use the ``gitfs`` module as an example.
Grains
======
Grain modules define extra routines to populate grains data. All defined
public functions will be executed and MUST return a Python dict object. The
dict keys will be added to the grains made available to the minion.
Output
======
The output modules supply the outputter system with routines to display data
in the terminal. These modules are very simple and only require the `output`
function to execute. The default system outputter is the ``nested`` module.
Pillar
======
Used to define optional external pillar systems. The pillar generated via
the filesystem pillar is passed into external pillars. This is commonly used
as a bridge to database data for pillar, but is also the backend to the libvirt
state used to generate and sign libvirt certificates on the fly.
Renderers
=========
Renderers are the system used to render sls files into salt highdata for the
state compiler. They can be as simple as the ``py`` renderer and as complex as
``stateconf`` and ``pydsl``.
Returners
=========
Returners are used to send data from minions to external sources, commonly
databases. A full returner will implement all routines to be supported as an
external job cache. Use the ``redis`` returner as an example.
Runners
=======
Runners are purely master-side execution sequences.
Tops
====
Tops modules are used to convert external data sources into top file data for
the state system.
Wheel
=====
The wheel system is used to manage master side management routines. These
routines are primarily intended for the API to enable master configuration.

View File

@ -0,0 +1,25 @@
=====================
Configuration Options
=====================
A number of configuration options can affect the load process. This is a quick
list of them:
* ``autoload_dynamic_modules`` (:conf_minion:`Minion <autoload_dynamic_modules>`)
* ``cython_enable`` (:conf_minion:`Minion <cython_enable>`, :conf_master:`Master <cython_enable>`)
* ``disable_modules`` (:conf_minion:`Minion <disable_modules>`)
* ``disable_returners`` (:conf_minion:`Minion <disable_returners>`)
* ``enable_zip_modules`` (:conf_minion:`Minion <enable_zip_modules>`)
* ``extension_modules`` (:conf_master:`Master <extension_modules>`)
* ``extmod_whitelist`` (:conf_minion:`Minion <extmod_whitelist>`, :conf_master:`Master <extmod_whitelist>`)
* ``extmod_blacklist`` (:conf_minion:`Minion <extmod_blacklist>`, :conf_master:`Master <extmod_blacklist>`)
* ``whitelist_modules`` (:conf_minion:`Minion <enable_whitelist_modules>`)
* ``grains_dirs`` (:conf_minion:`Minion <grains_dirs>`)
* ``module_dirs`` (:conf_minion:`Minion <module_dirs>`, :conf_master:`Master <module_dirs>`)
* ``outputter_dirs`` (:conf_minion:`Minion <outputter_dirs>`, :conf_master:`Master <outputter_dirs>`)
* ``providers`` (:conf_minion:`Minion <providers>`)
* ``render_dirs`` (:conf_minion:`Minion <render_dirs>`)
* ``returner_dirs`` (:conf_minion:`Minion <returner_dirs>`)
* ``runner_dirs`` (:conf_master:`Master <runner_dirs>`)
* ``states_dirs`` (:conf_minion:`Minion <states_dirs>`)
* ``utils_dirs`` (:conf_minion:`Minion <utils_dirs>`)

View File

@ -0,0 +1,237 @@
======================
Developing New Modules
======================
Interactive Debugging
=====================
Sometimes debugging with ``print()`` and extra logs sprinkled everywhere is not
the best strategy.
IPython is a helpful debug tool that has an interactive python environment
which can be embedded in python programs.
First the system will require IPython to be installed.
.. code-block:: bash
# Debian
apt-get install ipython
# Arch Linux
pacman -Syu ipython2
# RHEL/CentOS (via EPEL)
yum install python-ipython
Now, in the troubling python module, add the following line at a location where
the debugger should be started:
.. code-block:: python
test = 'test123'
import IPython; IPython.embed_kernel()
After running a Salt command that hits that line, the following will show up in
the log file:
.. code-block:: text
[CRITICAL] To connect another client to this kernel, use:
[IPKernelApp] --existing kernel-31271.json
Now on the system that invoked ``embed_kernel``, run the following command from
a shell:
.. code-block:: bash
# NOTE: use ipython2 instead of ipython for Arch Linux
ipython console --existing
This provides a console that has access to all the vars and functions, and even
supports tab-completion.
.. code-block:: python
print(test)
test123
To exit IPython and continue running Salt, press ``Ctrl-d`` to logout.
Special Module Contents
=======================
These are things that may be defined by the module to influence various things.
__virtual__
-----------
__virtual_aliases__
-------------------
__virtualname__
---------------
__init__
--------
Called before ``__virtual__()``
__proxyenabled__
----------------
grains and proxy modules
__proxyenabled__ as a list containing the names of the proxy types that the module supports.
__load__
--------
__func_alias__
--------------
__outputter__
-------------
.. _dunder-dictionaries:
Dunder Dictionaries
===================
Salt provides several special "dunder" dictionaries as a convenience for Salt
development. These include ``__opts__``, ``__context__``, ``__salt__``, and
others. This document will describe each dictionary and detail where they exist
and what information and/or functionality they provide.
The following dunder dictionaries are always defined, but may be empty
* ``__context__``
* ``__grains__``
* ``__pillar__``
* ``__opts__``
__opts__
--------
Defined in: All modules
The ``__opts__`` dictionary contains all of the options passed in the
configuration file for the master or minion.
.. note::
In many places in salt, instead of pulling raw data from the __opts__
dict, configuration data should be pulled from the salt `get` functions
such as config.get, aka - ``__salt__['config.get']('foo:bar')``
The `get` functions also allow for dict traversal via the *:* delimiter.
Consider using get functions whenever using ``__opts__`` or ``__pillar__``
and ``__grains__`` (when using grains for configuration data)
The configuration file data made available in the ``__opts__`` dictionary is the
configuration data relative to the running daemon. If the modules are loaded and
executed by the master, then the master configuration data is available, if the
modules are executed by the minion, then the minion configuration is
available. Any additional information passed into the respective configuration
files is made available
__salt__
--------
Defined in: Auth, Beacons, Engines, Execution, Executors, Outputters, Pillars,
Proxies, Renderers, Returners, Runners, SDB, SSH Wrappers, State, Thorium
``__salt__`` contains the execution module functions. This allows for all
functions to be called as they have been set up by the salt loader.
.. code-block:: python
__salt__['cmd.run']('fdisk -l')
__salt__['network.ip_addrs']()
.. note::
When used in runners or outputters, ``__salt__`` references other
runner/outputter modules, and not execution modules.
__grains__
----------
Filled in for: Execution, Pillar, Renderer, Returner, SSH Wrapper, State.
The ``__grains__`` dictionary contains the grains data generated by the minion
that is currently being worked with. In execution modules, state modules and
returners this is the grains of the minion running the calls, when generating
the external pillar the ``__grains__`` is the grains data from the minion that
the pillar is being generated for.
While ``__grains__`` is defined for every module, it's only filled in for some.
__pillar__
-----------
Filled in for: Execution, Returner, SSH Wrapper, State
The ``__pillar__`` dictionary contains the pillar for the respective minion.
While ``__pillar__`` is defined for every module, it's only filled in for some.
__context__
-----------
During a state run the ``__context__`` dictionary persists across all states
that are run and then is destroyed when the state ends.
When running an execution module ``__context__`` persists across all module
executions until the modules are refreshed; such as when
:py:func:`saltutil.sync_all <salt.modules.saltutil.sync_all>` or
:py:func:`state.apply <salt.modules.state.apply_>` are executed.
A great place to see how to use ``__context__`` is in the cp.py module in
salt/modules/cp.py. The fileclient authenticates with the master when it is
instantiated and then is used to copy files to the minion. Rather than create a
new fileclient for each file that is to be copied down, one instance of the
fileclient is instantiated in the ``__context__`` dictionary and is reused for
each file. Here is an example from salt/modules/cp.py:
.. code-block:: python
if not 'cp.fileclient' in __context__:
__context__['cp.fileclient'] = salt.fileclient.get_file_client(__opts__)
.. note:: Because __context__ may or may not have been destroyed, always be
sure to check for the existence of the key in __context__ and
generate the key before using it.
__utils__
---------
Defined in: Cloud, Engine, Execution, File Server, Pillar, Proxy, Runner, SDB.
__proxy__
---------
Defined in: Beacon, Engine, Execution, Executor, Proxy, Renderer, Returner, State, Util
__runners__
-----------
Defined in: Engine, Roster, Thorium
__ret__
-------
Defined in: Proxy, Search
__thorium__
-----------
Defined in: Thorium
__states__
----------
Defined in: Renderers, State
__serializers__
---------------
Defined in: State
__sdb__
-------
Defined in: SDB

View File

@ -153,7 +153,7 @@ Using our example above:
ext_pillar( id, pillar, 'some argument' ) # example_a
ext_pillar( id, pillar, 'argumentA', 'argumentB' ) # example_b
ext_pillar( id, pillar, keyA='valueA', keyB='valueB' } ) # example_c
ext_pillar( id, pillar, keyA='valueA', keyB='valueB' ) # example_c
In the ``example_a`` case, ``pillar`` will contain the items from the

View File

@ -0,0 +1,394 @@
.. _modular-systems:
===============
Modular Systems
===============
When first working with Salt, it is not always clear where all of the modular
components are and what they do. Salt comes loaded with more modular systems
than many users are aware of, making Salt very easy to extend in many places.
The most commonly used modular systems are execution modules and states. But
the modular systems extend well beyond the more easily exposed components
and are often added to Salt to make the complete system more flexible.
.. toctree::
:maxdepth: 2
:glob:
developing
configuration
Loading Modules
===============
Modules come primarily from several sources:
* The Salt package itself
* The Salt File Server
* The extmods directory
* Secondary packages installed
Using one source to override another is not supported.
The Salt Package
----------------
Salt itself ships with a large number of modules. These are part of the Salt
package itself and don't require the user to do anything to use them. (Although
a number of them have additional dependencies and/or configuration.)
The Salt File Server
--------------------
The user may add modules by simply placing them in special directories in their
:ref:`fileserver <file-server>`.
The name of the directory inside of the file server is the directory name
prepended by underscore, such as:
- :file:`_grains`
- :file:`_modules`
- :file:`_states`
Modules must be synced before they can be used. This can happen a few ways,
discussed below.
.. note:
Using saltenvs besides ``base`` may not work in all contexts.
Sync Via States
~~~~~~~~~~~~~~~
The minion configuration contains an option ``autoload_dynamic_modules``
which defaults to ``True``. This option makes the state system refresh all
dynamic modules when states are run. To disable this behavior set
:conf_minion:`autoload_dynamic_modules` to ``False`` in the minion config.
When dynamic modules are autoloaded via states, only the modules defined in the
same saltenvs as the states currently being run.
Sync Via the saltutil Module
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The saltutil module has a number of functions that can be used to sync all
or specific dynamic modules. The ``saltutil.sync_*``
:py:mod:`execution functions <salt.modules.saltutil>` and
:py:mod:`runner functions <salt.runners.saltutil>` can be used to sync modules
to minions and the master, respectively.
The extmods Directory
---------------------
Any files places in the directory set by ``extension_modules`` settings
(:conf_minion:`minion <extension_modules>`,
:conf_master:`master <extension_modules>`, default
``/var/cache/salt/*/extmods``) can also be loaded as modules. Note that these
directories are also used by the ``saltutil.sync_*`` functions (mentioned
above) and files may be overwritten.
Secondary Packages
------------------
Third-party packages may also add modules to Salt if they are installed in the
same system and Python environment as the Salt Minion or Master.
This is done via setuptools entry points:
.. code-block:: python
setup(
# ...
entry_points={
'salt.loader': [
'module_dirs=spirofs.loader:module',
],
},
# ...
)
Note that these are not synced from the Salt Master to the Minions. They must be
installed indepdendently on each Minion.
Module Types
============
The specific names used by each loading method above are as follows. See sections below
for a short summary of each of these systems.
.. _module-name-table:
============ ================================================================ ========================= =====================
Module Type Salt Package Name FS/Directory Name Entry Point
============ ================================================================ ========================= =====================
Auth ``salt.auth`` (:ref:`index <external-logging-handlers>`) ``auth`` [#no-fs]_ ``auth_dirs``
Beacon ``salt.beacons`` (:ref:`index <beacons>`) ``beacons`` ``beacons_dirs``
Cache ``salt.cache`` (:ref:`index <all-salt.cache>`) ``cache`` ``cache_dirs``
Cloud ``salt.cloud.clouds`` (:ref:`index <all-salt.clouds>`) ``clouds`` ``cloud_dirs``
Engine ``salt.engines`` (:ref:`index <engines>`) ``engines`` ``engines_dirs``
Execution ``salt.modules`` (:ref:`index <all-salt.modules>`) ``modules`` ``module_dirs``
Executor ``salt.executors`` (:ref:`index <all-salt.executors>`) ``executors`` [#no-fs]_ ``executor_dirs``
File Server ``salt.fileserver`` (:ref:`index <file-server>`) ``fileserver`` [#no-fs]_ ``fileserver_dirs``
Grain ``salt.grains`` (:ref:`index <all-salt.grains>`) ``grains`` ``grains_dirs``
Log Handler ``salt.log.handlers`` (:ref:`index <external-logging-handlers>`) ``log_handlers`` ``log_handlers_dirs``
Net API ``salt.netapi`` (:ref:`index <all-netapi-modules>`) ``netapi`` [#no-fs]_ ``netapi_dirs``
Outputter ``salt.output`` (:ref:`index <all-salt.output>`) ``output`` ``outputter_dirs``
Pillar ``salt.pillar`` (:ref:`index <all-salt.pillars>`) ``pillar`` ``pillar_dirs``
Proxy ``salt.proxy`` (:ref:`index <all-salt.proxy>`) ``proxy`` ``proxy_dirs``
Queue ``salt.queues`` (:ref:`index <all-salt.queues>`) ``queues`` ``queue_dirs``
Renderer ``salt.renderers`` (:ref:`index <all-salt.renderers>`) ``renderers`` ``render_dirs``
Returner ``salt.returners`` (:ref:`index <all-salt.returners>`) ``returners`` ``returner_dirs``
Roster ``salt.roster`` (:ref:`index <all-salt.roster>`) ``roster`` ``roster_dirs``
Runner ``salt.runners`` (:ref:`index <all-salt.runners>`) ``runners`` ``runner_dirs``
SDB ``salt.sdb`` (:ref:`index <all-salt.sdb>`) ``sdb`` ``sdb_dirs``
Search ``salt.search`` ``search`` [#no-fs]_ ``search_dirs``
Serializer ``salt.serializers`` (:ref:`index <all-salt.serializers>`) ``serializers`` [#no-fs]_ ``serializers_dirs``
SPM pkgdb ``salt.spm.pkgdb`` ``pkgdb`` [#no-fs]_ ``pkgdb_dirs``
SPM pkgfiles ``salt.spm.pkgfiles`` ``pkgfiles`` [#no-fs]_ ``pkgfiles_dirs``
SSH Wrapper ``salt.client.ssh.wrapper`` ``wrapper`` [#no-fs]_ ``wrapper_dirs``
State ``salt.states`` (:ref:`index <all-salt.states>`) ``states`` ``states_dirs``
Thorium ``salt.thorium`` (:ref:`index <all-salt.thorium>`) ``thorium`` [#no-fs]_ ``thorium_dirs``
Top ``salt.tops`` (:ref:`index <all-salt.tops>`) ``tops`` ``top_dirs``
Util ``salt.utils`` ``utils`` ``utils_dirs``
Wheel ``salt.wheels`` (:ref:`index <all-salt.wheel>`) ``wheel`` ``wheel_dirs``
============ ================================================================ ========================= =====================
.. [#no-fs] These modules cannot be loaded from the Salt File Server.
.. note:
While it is possible to import modules directly with the import statement,
it is strongly recommended that the appropriate
:ref:`dunder dictionary <dunder-dictionaries>` is used to access them
instead. This is because a number of factors affect module names, module
selection, and module overloading.
Auth
----
The auth module system allows for external authentication routines to be easily
added into Salt. The `auth` function needs to be implemented to satisfy the
requirements of an auth module. Use the ``pam`` module as an example.
See :ref:`External Authentication System <acl-eauth>` for more about
authentication in Salt.
Beacon
------
* :ref:`Writing Beacons <writing-beacons>`
Beacons are polled by the Salt event loop to monitor non-salt processes. See
:ref:`Beacons <beacons>` for more information about the beacon system.
Cache
-----
The minion cache is used by the master to store various information about
minions. See :ref:`Minion Data Cache <cache>` for more information.
Cloud
-----
Cloud modules are backend implementations used by :ref:`Salt Cloud <salt-cloud>`.
Engine
------
Engines are open-ended services managed by the Salt daemon (both master and
minion). They may interact with event loop, call other modules, or a variety of
non-salt tasks. See :ref:`Salt Engines <engines>` for complete details.
Execution
---------
.. toctree::
:maxdepth: 1
:glob:
/ref/modules/index
Execution modules make up the core of the functionality used by Salt to
interact with client systems. The execution modules create the core system
management library used by all Salt systems, including states, which
interact with minion systems.
Execution modules are completely open ended in their execution. They can
be used to do anything required on a minion, from installing packages to
detecting information about the system. The only restraint in execution
modules is that the defined functions always return a JSON serializable
object.
Executor
--------
Executors control how execution modules get called. The default is to just call
them, but this can be customized.
File Server
-----------
The file server module system is used to create file server backends used by the
Salt Master. These modules need to implement the functions used in the
fileserver subsystem. Use the ``gitfs`` module as an example.
See :ref:`File Server Backends <file-server-backends>` for more information.
Grains
------
* :ref:`writing-grains`
Grain modules define extra routines to populate grains data. All defined
public functions will be executed and MUST return a Python dict object. The
dict keys will be added to the grains made available to the minion.
See :ref:`Grains <grains>` for more.
Log Handler
-----------
Log handlers allows the logs from salt (master or minion) to be sent to log
aggregation systems.
Net API
-------
Net API modules are the actual server implementation used by Salt API.
Output
------
The output modules supply the outputter system with routines to display data
in the terminal. These modules are very simple and only require the `output`
function to execute. The default system outputter is the ``nested`` module.
Pillar
------
.. toctree::
:maxdepth: 1
:glob:
external_pillars
Used to define optional external pillar systems. The pillar generated via
the filesystem pillar is passed into external pillars. This is commonly used
as a bridge to database data for pillar, but is also the backend to the libvirt
state used to generate and sign libvirt certificates on the fly.
Proxy
-----
:ref:`Proxy Minions <proxy-minion>` are a way to manage devices that cannot run
a full minion directly.
Renderers
---------
Renderers are the system used to render sls files into salt highdata for the
state compiler. They can be as simple as the ``py`` renderer and as complex as
``stateconf`` and ``pydsl``.
Returners
---------
Returners are used to send data from minions to external sources, commonly
databases. A full returner will implement all routines to be supported as an
external job cache. Use the ``redis`` returner as an example.
Roster
------
The :ref:`Roster system <ssh-roster>` is used by Salt SSH to enumerate devices.
Runners
-------
.. toctree::
:maxdepth: 1
:glob:
/ref/runners/index
Runners are purely master-side execution sequences.
SDB
---
* :ref:`Writing SDB Modules <sdb-writing-modules>`
SDB is a way to store data that's not associated with a minion. See
:ref:`Storing Data in Other Databases <sdb>`.
Search
------
A system for indexing the file server and pillars. Removed in 2018.3.
Serializer
----------
Primarily used with :py:func:`file.serialize <salt.states.file.serialize>`.
State
-----
.. toctree::
:maxdepth: 1
:glob:
/ref/states/index
State modules are used to define the state interfaces used by Salt States.
These modules are restrictive in that they must follow a number of rules to
function properly.
.. note::
State modules define the available routines in sls files. If calling
an execution module directly is desired, take a look at the `module`
state.
SPM pkgdb
---------
* :ref:`SPM Development Guide: Package Database <spm-development-pkgdb>`
pkgdb modules provides storage backends to the package database.
SPM pkgfiles
------------
* :ref:`SPM Development Guide: Package Database <spm-development-pkgfiles>`
pkgfiles modules handle the actual installation.
SSH Wrapper
-----------
Replacement execution modules for :ref:`Salt SSH <salt-ssh>`.
Thorium
-------
Modules for use in the :ref:`Thorium <thorium-reactor>` event reactor.
Tops
----
Tops modules are used to convert external data sources into top file data for
the state system.
Util
----
Just utility modules to use with other modules via ``__utils__`` (see
:ref:`Dunder Dictionaries <dunder-dictionaries>`).
Wheel
-----
The wheel system is used to manage master side management routines. These
routines are primarily intended for the API to enable master configuration.

View File

@ -13,3 +13,5 @@ Security Fix
CVE-2018-15751 Remote command execution and incorrect access control when using salt-api.
CVE-2018-15750 Directory traversal vulnerability when using salt-api. Allows an attacker to determine what files exist on a server when querying /run or /events.
Credit and thanks for discovery and responsible disclosure: nullbr4in, xcuter, koredge, loupos, blackcon, Naver Business Platform

View File

@ -20,6 +20,8 @@ CVE-2018-15751 Remote command execution and incorrect access control when using
CVE-2018-15750 Directory traversal vulnerability when using salt-api. Allows an attacker to determine what files exist on a server when querying /run or /events.
Credit and thanks for discovery and responsible disclosure: nullbr4in, xcuter, koredge, loupos, blackcon, Naver Business Platform
New win_snmp behavior
=====================

View File

@ -50,6 +50,8 @@ CVE-2018-15751 Remote command execution and incorrect access control when using
CVE-2018-15750 Directory traversal vulnerability when using salt-api. Allows an attacker to determine what files exist on a server when querying /run or /events.
Credit and thanks for discovery and responsible disclosure: nullbr4in, xcuter, koredge, loupos, blackcon, Naver Business Platform
Changes to win_timezone
=======================
@ -92,7 +94,7 @@ Example:
cmd.run 'echo '\''h=\"baz\"'\''' runas=macuser
Changelog for v2018.3.2..v2018.3.3
=================================================================
==================================
*Generated at: 2018-09-21 17:45:27 UTC*
@ -507,7 +509,7 @@ Changelog for v2018.3.2..v2018.3.3
* 3d26affa10 Fix remaining file state integration tests (py3)
* **PR** `#49171`_: (`Ch3LL`_) [2018.3.3] cherry pick `#49103`_
* **PR** `#49171`_: (`Ch3LL`_) [2018.3.3] cherry pick `#49103`_
@ *2018-08-17 20:23:32 UTC*
* **PR** `#49103`_: (`dwoz`_) Install the launcher so we can execute py files (refs: `#49171`_)
@ -1630,7 +1632,7 @@ Changelog for v2018.3.2..v2018.3.3
* **ISSUE** `#46896`_: (`Poil`_) Proxy + file.managed => Comment: Failed to cache xxx invalid arguments to setopt (refs: `#48754`_)
* **PR** `#48754`_: (`lomeroe`_) send proxy/ca_cert parameters as strings (not unicode) to tornado httpclient
* **PR** `#48754`_: (`lomeroe`_) send proxy/ca_cert parameters as strings (not unicode) to tornado httpclient
@ *2018-07-25 14:55:42 UTC*
* 030c921914 Merge pull request `#48754`_ from lomeroe/fix-tornado-proxy
@ -3075,7 +3077,7 @@ Changelog for v2018.3.2..v2018.3.3
* dae65da256 Merge branch '2018.3.1' into '2018.3'
* **PR** `#48186`_: (`rallytime`_) Add autodoc module for saltcheck.py
* **PR** `#48186`_: (`rallytime`_) Add autodoc module for saltcheck.py
@ *2018-06-19 19:03:55 UTC*
* 5b4897f050 Merge pull request `#48186`_ from rallytime/saltcheck-docs
@ -3362,11 +3364,11 @@ Changelog for v2018.3.2..v2018.3.3
* **PR** `#48109`_: (`rallytime`_) Back-port `#47851`_ to 2018.3
@ *2018-06-14 13:09:04 UTC*
* **PR** `#47851`_: (`rares-pop`_) Fixup! add master.py:FileserverUpdate **kwargs (refs: `#48109`_)
* **PR** `#47851`_: (`rares-pop`_) Fixup! add master.py:FileserverUpdate \*\*kwargs (refs: `#48109`_)
* 2902ee0b14 Merge pull request `#48109`_ from rallytime/bp-47851
* e9dc30bf8e Fixup! add master.py:FileserverUpdate **kwargs
* e9dc30bf8e Fixup! add master.py:FileserverUpdate \*\*kwargs
* **ISSUE** `#47925`_: (`JonGriggs`_) GitFS looking for files in the master branch only (refs: `#47943`_)
@ -3377,7 +3379,7 @@ Changelog for v2018.3.2..v2018.3.3
* 534e1a7100 Merge branch '2018.3' into issue47925
* **PR** `#48089`_: (`rallytime`_) Update release versions for the 2018.3 branch
* **PR** `#48089`_: (`rallytime`_) Update release versions for the 2018.3 branch
@ *2018-06-13 14:03:44 UTC*
* 9e1d0040e4 Merge pull request `#48089`_ from rallytime/update_version_doc_2018.3

View File

@ -4,3 +4,11 @@ In Progress: Salt 2018.3.4 Release Notes
Version 2018.3.4 is an **unreleased** bugfix release for :ref:`2018.3.0 <release-2018-3-0>`.
This release is still in progress and has not been released yet.
State Changes
=============
- The :py:func:`host.present <salt.states.host.present>` state can now remove
the specified hostname from IPs not specified in the state. This can be done
by setting the newly-added ``clean`` argument to ``True``.

View File

@ -144,6 +144,7 @@ When writing Salt modules, it is not recommended to call ``sdb.get`` directly,
as it requires the user to provide values in SDB, using a specific URI. Use
``config.get`` instead.
.. _sdb-writing-modules:
Writing SDB Modules
===================

View File

@ -20,6 +20,7 @@ marked as required are crucial to SPM's core functionality, while arguments that
are marked as optional are provided as a benefit to the module, if it needs to
use them.
.. _spm-development-pkgdb:
Package Database
----------------
@ -146,6 +147,8 @@ The only argument that is expected is ``db_``, which is the package database
file.
.. _spm-development-pkgfiles:
Package Files
-------------
By default, package files are installed using the ``local`` module. This module

View File

@ -90,9 +90,9 @@ shorthand for having to type out complicated compound expressions.
.. code-block:: yaml
nodegroups:
 group1: 'L@foo.domain.com,bar.domain.com,baz.domain.com and bl*.domain.com'
 group2: 'G@os:Debian and foo.domain.com'
 group3: 'G@os:Debian and N@group1'
group1: 'L@foo.domain.com,bar.domain.com,baz.domain.com and bl*.domain.com'
group2: 'G@os:Debian and foo.domain.com'
group3: 'G@os:Debian and N@group1'
Advanced Targeting Methods

View File

@ -4,386 +4,40 @@
Salt Bootstrap
==============
The Salt Bootstrap script allows for a user to install the Salt Minion or
Master on a variety of system distributions and versions. This shell script
known as ``bootstrap-salt.sh`` runs through a series of checks to determine
the operating system type and version. It then installs the Salt binaries
using the appropriate methods. The Salt Bootstrap script installs the
minimum number of packages required to run Salt. This means that in the event
you run the bootstrap to install via package, Git will not be installed.
Installing the minimum number of packages helps ensure the script stays as
lightweight as possible, assuming the user will install any other required
packages after the Salt binaries are present on the system. The script source
is available on GitHub: https://github.com/saltstack/salt-bootstrap
The Salt Bootstrap Script allows a user to install the Salt Minion or Master
on a variety of system distributions and versions.
The Salt Bootstrap Script is a shell script is known as ``bootstrap-salt.sh``.
It runs through a series of checks to determine the operating system type and
version. It then installs the Salt binaries using the appropriate methods.
Supported Operating Systems
---------------------------
The Salt Bootstrap Script installs the minimum number of packages required to
run Salt. This means that in the event you run the bootstrap to install via
package, Git will not be installed. Installing the minimum number of packages
helps ensure the script stays as lightweight as possible, assuming the user
will install any other required packages after the Salt binaries are present
on the system.
The Salt Bootstrap Script is maintained in a separate repo from Salt, complete
with its own issues, pull requests, contributing guidelines, release protocol,
etc.
To learn more, please see the Salt Bootstrap repo links:
- `Salt Bootstrap repo`_
- `README`_: includes supported operating systems, example usage, and more.
- `Contributing Guidelines`_
- `Release Process`_
.. note::
In the event you do not see your distribution or version available please
review the develop branch on GitHub as it may contain updates that are
not present in the stable release:
https://github.com/saltstack/salt-bootstrap/tree/develop
Debian and derivatives
~~~~~~~~~~~~~~~~~~~~~~
- Debian GNU/Linux 7/8
- Linux Mint Debian Edition 1 (based on Debian 8)
- Kali Linux 1.0 (based on Debian 7)
Red Hat family
~~~~~~~~~~~~~~
- Amazon Linux 2012.09/2013.03/2013.09/2014.03/2014.09
- CentOS 5/6/7
- Fedora 17/18/20/21/22
- Oracle Linux 5/6/7
- Red Hat Enterprise Linux 5/6/7
- Scientific Linux 5/6/7
SUSE family
~~~~~~~~~~~
- openSUSE 12/13
- openSUSE Leap 42
- openSUSE Tumbleweed 2015
- SUSE Linux Enterprise Server 11 SP1/11 SP2/11 SP3/12
Ubuntu and derivatives
~~~~~~~~~~~~~~~~~~~~~~
- Elementary OS 0.2 (based on Ubuntu 12.04)
- Linaro 12.04
- Linux Mint 13/14/16/17
- Trisquel GNU/Linux 6 (based on Ubuntu 12.04)
- Ubuntu 10.x/11.x/12.x/13.x/14.x/15.x/16.x
Other Linux distro
~~~~~~~~~~~~~~~~~~
- Arch Linux
- Gentoo
UNIX systems
~~~~~~~~~~~~
**BSD**:
- OpenBSD
- FreeBSD 9/10/11
**SunOS**:
- SmartOS
Example Usage
-------------
If you're looking for the *one-liner* to install Salt, please scroll to the
bottom and use the instructions for `Installing via an Insecure One-Liner`_
.. note::
In every two-step example, you would be well-served to examine the downloaded file and examine
it to ensure that it does what you expect.
The Salt Bootstrap script has a wide variety of options that can be passed as
well as several ways of obtaining the bootstrap script itself.
.. note::
These examples below show how to bootstrap Salt directly from GitHub or other Git repository.
Run the script without any parameters to get latest stable Salt packages for your system from
`SaltStack corporate repository`_. See first example in the `Install using wget`_ section.
.. _`SaltStack corporate repository`: https://repo.saltstack.com/
Install using curl
~~~~~~~~~~~~~~~~~~
Using ``curl`` to install latest development version from GitHub:
.. code-block:: bash
curl -o bootstrap-salt.sh -L https://bootstrap.saltstack.com
sudo sh bootstrap-salt.sh git develop
If you want to install a specific release version (based on the Git tags):
.. code-block:: bash
curl -o bootstrap-salt.sh -L https://bootstrap.saltstack.com
sudo sh bootstrap-salt.sh git v2015.8.8
To install a specific branch from a Git fork:
.. code-block:: bash
curl -o bootstrap-salt.sh -L https://bootstrap.saltstack.com
sudo sh bootstrap-salt.sh -g https://github.com/myuser/salt.git git mybranch
If all you want is to install a ``salt-master`` using latest Git:
.. code-block:: bash
curl -o bootstrap-salt.sh -L https://bootstrap.saltstack.com
sudo sh bootstrap-salt.sh -M -N git develop
If your host has Internet access only via HTTP proxy:
.. code-block:: bash
PROXY='http://user:password@myproxy.example.com:3128'
curl -o bootstrap-salt.sh -L -x "$PROXY" https://bootstrap.saltstack.com
sudo sh bootstrap-salt.sh -G -H "$PROXY" git
Install using wget
~~~~~~~~~~~~~~~~~~
Using ``wget`` to install your distribution's stable packages:
.. code-block:: bash
wget -O bootstrap-salt.sh https://bootstrap.saltstack.com
sudo sh bootstrap-salt.sh
Downloading the script from develop branch:
.. code-block:: bash
wget -O bootstrap-salt.sh https://bootstrap.saltstack.com/develop
sudo sh bootstrap-salt.sh
Installing a specific version from git using ``wget``:
.. code-block:: bash
wget -O bootstrap-salt.sh https://bootstrap.saltstack.com
sudo sh bootstrap-salt.sh -P git v2015.8.8
.. note::
On the above example we added `-P` which will allow PIP packages to be installed if required but
it's not a necessary flag for Git based bootstraps.
Install using Python
~~~~~~~~~~~~~~~~~~~~
If you already have Python installed, ``python 2.6``, then it's as easy as:
.. code-block:: bash
python -m urllib "https://bootstrap.saltstack.com" > bootstrap-salt.sh
sudo sh bootstrap-salt.sh git develop
All Python versions should support the following in-line code:
.. code-block:: bash
python -c 'import urllib; print urllib.urlopen("https://bootstrap.saltstack.com").read()' > bootstrap-salt.sh
sudo sh bootstrap-salt.sh git develop
Install using fetch
~~~~~~~~~~~~~~~~~~~
On a FreeBSD base system you usually don't have either of the above binaries available. You **do**
have ``fetch`` available though:
.. code-block:: bash
fetch -o bootstrap-salt.sh https://bootstrap.saltstack.com
sudo sh bootstrap-salt.sh
If you have any SSL issues install ``ca_root_nssp``:
.. code-block:: bash
pkg install ca_root_nssp
And either copy the certificates to the place where fetch can find them:
.. code-block:: bash
cp /usr/local/share/certs/ca-root-nss.crt /etc/ssl/cert.pem
Or link them to the right place:
.. code-block:: bash
ln -s /usr/local/share/certs/ca-root-nss.crt /etc/ssl/cert.pem
Installing via an Insecure One-Liner
------------------------------------
The following examples illustrate how to install Salt via a one-liner.
.. note::
Warning! These methods do not involve a verification step and assume that
the delivered file is trustworthy.
Any of the example above which use two-lines can be made to run in a single-line
configuration with minor modifications.
For example, using ``curl`` to install your distribution's stable packages:
.. code-block:: bash
curl -L https://bootstrap.saltstack.com | sudo sh
Using ``wget`` to install your distribution's stable packages:
.. code-block:: bash
wget -O - https://bootstrap.saltstack.com | sudo sh
Installing the latest develop branch of Salt:
.. code-block:: bash
curl -L https://bootstrap.saltstack.com | sudo sh -s -- git develop
Command Line Options
--------------------
Here's a summary of the command line options:
.. code-block:: text
$ sh bootstrap-salt.sh -h
Installation types:
- stable Install latest stable release. This is the default
install type
- stable [branch] Install latest version on a branch. Only supported
for packages available at repo.saltstack.com
- stable [version] Install a specific version. Only supported for
packages available at repo.saltstack.com
- daily Ubuntu specific: configure SaltStack Daily PPA
- testing RHEL-family specific: configure EPEL testing repo
- git Install from the head of the develop branch
- git [ref] Install from any git ref (such as a branch, tag, or
commit)
Examples:
- bootstrap-salt.sh
- bootstrap-salt.sh stable
- bootstrap-salt.sh stable 2017.7
- bootstrap-salt.sh stable 2017.7.2
- bootstrap-salt.sh daily
- bootstrap-salt.sh testing
- bootstrap-salt.sh git
- bootstrap-salt.sh git 2017.7
- bootstrap-salt.sh git v2017.7.2
- bootstrap-salt.sh git 06f249901a2e2f1ed310d58ea3921a129f214358
Options:
-h Display this message
-v Display script version
-n No colours
-D Show debug output
-c Temporary configuration directory
-g Salt Git repository URL. Default: https://github.com/saltstack/salt.git
-w Install packages from downstream package repository rather than
upstream, saltstack package repository. This is currently only
implemented for SUSE.
-k Temporary directory holding the minion keys which will pre-seed
the master.
-s Sleep time used when waiting for daemons to start, restart and when
checking for the services running. Default: 3
-L Also install salt-cloud and required python-libcloud package
-M Also install salt-master
-S Also install salt-syndic
-N Do not install salt-minion
-X Do not start daemons after installation
-d Disables checking if Salt services are enabled to start on system boot.
You can also do this by touching /tmp/disable_salt_checks on the target
host. Default: ${BS_FALSE}
-P Allow pip based installations. On some distributions the required salt
packages or its dependencies are not available as a package for that
distribution. Using this flag allows the script to use pip as a last
resort method. NOTE: This only works for functions which actually
implement pip based installations.
-U If set, fully upgrade the system prior to bootstrapping Salt
-I If set, allow insecure connections while downloading any files. For
example, pass '--no-check-certificate' to 'wget' or '--insecure' to
'curl'. On Debian and Ubuntu, using this option with -U allows one to obtain
GnuPG archive keys insecurely if distro has changed release signatures.
-F Allow copied files to overwrite existing (config, init.d, etc)
-K If set, keep the temporary files in the temporary directories specified
with -c and -k
-C Only run the configuration function. Implies -F (forced overwrite).
To overwrite Master or Syndic configs, -M or -S, respectively, must
also be specified. Salt installation will be omitted, but some of the
dependencies could be installed to write configuration with -j or -J.
-A Pass the salt-master DNS name or IP. This will be stored under
${BS_SALT_ETC_DIR}/minion.d/99-master-address.conf
-i Pass the salt-minion id. This will be stored under
${BS_SALT_ETC_DIR}/minion_id
-p Extra-package to install while installing Salt dependencies. One package
per -p flag. You're responsible for providing the proper package name.
-H Use the specified HTTP proxy for all download URLs (including https://).
For example: http://myproxy.example.com:3128
-Z Enable additional package repository for newer ZeroMQ
(only available for RHEL/CentOS/Fedora/Ubuntu based distributions)
-b Assume that dependencies are already installed and software sources are
set up. If git is selected, git tree is still checked out as dependency
step.
-f Force shallow cloning for git installations.
This may result in an "n/a" in the version number.
-l Disable ssl checks. When passed, switches "https" calls to "http" where
possible.
-V Install Salt into virtualenv
(only available for Ubuntu based distributions)
-a Pip install all Python pkg dependencies for Salt. Requires -V to install
all pip pkgs into the virtualenv.
(Only available for Ubuntu based distributions)
-r Disable all repository configuration performed by this script. This
option assumes all necessary repository configuration is already present
on the system.
-R Specify a custom repository URL. Assumes the custom repository URL
points to a repository that mirrors Salt packages located at
repo.saltstack.com. The option passed with -R replaces the
"repo.saltstack.com". If -R is passed, -r is also set. Currently only
works on CentOS/RHEL and Debian based distributions.
-J Replace the Master config file with data passed in as a JSON string. If
a Master config file is found, a reasonable effort will be made to save
the file with a ".bak" extension. If used in conjunction with -C or -F,
no ".bak" file will be created as either of those options will force
a complete overwrite of the file.
-j Replace the Minion config file with data passed in as a JSON string. If
a Minion config file is found, a reasonable effort will be made to save
the file with a ".bak" extension. If used in conjunction with -C or -F,
no ".bak" file will be created as either of those options will force
a complete overwrite of the file.
-q Quiet salt installation from git (setup.py install -q)
-x Changes the python version used to install a git version of salt. Currently
this is considered experimental and has only been tested on Centos 6. This
only works for git installations.
-y Installs a different python version on host. Currently this has only been
tested with Centos 6 and is considered experimental. This will install the
ius repo on the box if disable repo is false. This must be used in conjunction
with -x <pythonversion>. For example:
sh bootstrap.sh -P -y -x python2.7 git v2016.11.3
The above will install python27 and install the git version of salt using the
python2.7 executable. This only works for git and pip installations.
The Salt Bootstrap script can be found in the Salt repo under the
``salt/cloud/deploy/bootstrap-salt.sh`` path. Any changes to this file
will be overwritten! Bug fixes and feature additions must be submitted
via the `Salt Bootstrap repo`_. Please see the Salt Bootstrap Script's
`Release Process`_ for more information.
.. _Salt Bootstrap repo: https://github.com/saltstack/salt-bootstrap
.. _README: https://github.com/saltstack/salt-bootstrap#bootstrapping-salt
.. _Contributing Guidelines: https://github.com/saltstack/salt-bootstrap/blob/develop/CONTRIBUTING.md
.. _Release Process: https://github.com/saltstack/salt-bootstrap/blob/develop/CONTRIBUTING.md#release-information

View File

@ -230,81 +230,7 @@ class IPv6InterfaceScoped(ipaddress.IPv6Interface, IPv6AddressScoped):
self.hostmask = self.network.hostmask
def ip_address(address):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Address or IPv6Address object.
Raises:
ValueError: if the *address* passed isn't either a v4 or a v6
address
"""
try:
return ipaddress.IPv4Address(address)
except (ipaddress.AddressValueError, ipaddress.NetmaskValueError) as err:
log.debug('Error while parsing IPv4 address: %s', address)
log.debug(err)
try:
return IPv6AddressScoped(address)
except (ipaddress.AddressValueError, ipaddress.NetmaskValueError) as err:
log.debug('Error while parsing IPv6 address: %s', address)
log.debug(err)
if isinstance(address, bytes):
raise ipaddress.AddressValueError('{} does not appear to be an IPv4 or IPv6 address. '
'Did you pass in a bytes (str in Python 2) instead '
'of a unicode object?'.format(repr(address)))
raise ValueError('{} does not appear to be an IPv4 or IPv6 address'.format(repr(address)))
def ip_interface(address):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Interface or IPv6Interface object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address.
Notes:
The IPv?Interface classes describe an Address on a particular
Network, so they're basically a combination of both the Address
and Network classes.
"""
try:
return ipaddress.IPv4Interface(address)
except (ipaddress.AddressValueError, ipaddress.NetmaskValueError) as err:
log.debug('Error while getting IPv4 interface for address %s', address)
log.debug(err)
try:
return ipaddress.IPv6Interface(address)
except (ipaddress.AddressValueError, ipaddress.NetmaskValueError) as err:
log.debug('Error while getting IPv6 interface for address %s', address)
log.debug(err)
raise ValueError('{} does not appear to be an IPv4 or IPv6 interface'.format(address))
if ipaddress:
ipaddress.IPv6Address = IPv6AddressScoped
if sys.version_info.major == 2:
ipaddress.IPv6Interface = IPv6InterfaceScoped
ipaddress.ip_address = ip_address
ipaddress.ip_interface = ip_interface

View File

@ -283,12 +283,14 @@ def auth(username, password):
log.error('LDAP authentication requires python-ldap module')
return False
bind = None
# If bind credentials are configured, verify that we receive a valid bind
if _config('binddn', mandatory=False) and _config('bindpw', mandatory=False):
bind = _bind_for_search(anonymous=_config('anonymous', mandatory=False))
search_bind = _bind_for_search(anonymous=_config('anonymous', mandatory=False))
# If username & password are not None, attempt to verify they are valid
if bind and username and password:
if search_bind and username and password:
bind = _bind(username, password,
anonymous=_config('auth_by_group_membership_only', mandatory=False)
and _config('anonymous', mandatory=False))

View File

@ -41,6 +41,16 @@ def validate(config):
_config = {}
list(map(_config.update, config))
if 'emitatstartup' in _config:
if not isinstance(_config['emitatstartup'], bool):
return False, ('Configuration for load beacon option '
'emitatstartup must be a boolean.')
if 'onchangeonly' in _config:
if not isinstance(_config['onchangeonly'], bool):
return False, ('Configuration for load beacon option '
'onchangeonly must be a boolean.')
if 'averages' not in _config:
return False, ('Averages configuration is required'
' for load beacon.')
@ -61,6 +71,7 @@ def validate(config):
return False, ('Configuration for load beacon: '
'1m, 5m and 15m items must be '
'a list of two items.')
return True, 'Valid beacon configuration'
@ -118,7 +129,7 @@ def beacon(config):
if not LAST_STATUS:
for k in ['1m', '5m', '15m']:
LAST_STATUS[k] = avg_dict[k]
if not config['emitatstartup']:
if not _config['emitatstartup']:
log.debug("Don't emit because emitatstartup is False")
return ret

View File

@ -303,19 +303,23 @@ def main(argv): # pylint: disable=W0613
if OPTIONS.cmd_umask is not None:
old_umask = os.umask(OPTIONS.cmd_umask) # pylint: disable=blacklisted-function
if OPTIONS.tty:
proc = subprocess.Popen(salt_argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Returns bytes instead of string on python 3
stdout, _ = subprocess.Popen(salt_argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
stdout, _ = proc.communicate()
sys.stdout.write(stdout.decode(encoding=get_system_encoding(), errors="replace"))
sys.stdout.flush()
retcode = proc.returncode
if OPTIONS.wipe:
shutil.rmtree(OPTIONS.saltdir)
elif OPTIONS.wipe:
subprocess.call(salt_argv)
retcode = subprocess.call(salt_argv)
shutil.rmtree(OPTIONS.saltdir)
else:
subprocess.call(salt_argv)
retcode = subprocess.call(salt_argv)
if OPTIONS.cmd_umask is not None:
os.umask(old_umask) # pylint: disable=blacklisted-function
return retcode
if __name__ == '__main__':
sys.exit(main(sys.argv))

View File

@ -56,7 +56,7 @@ def execute(opts, data, func, args, kwargs):
'salt-call',
'--out', 'json',
'--metadata',
'-c', salt.syspaths.CONFIG_DIR,
'-c', opts.get('config_dir'),
'--',
data.get('fun')]
if data['fun'] in ('state.sls', 'state.highstate', 'state.apply'):

View File

@ -373,7 +373,7 @@ class Fileserver(object):
# Avoid error logging when performing lookups in the LazyDict by
# instead doing the membership check on the result of a call to its
# .keys() attribute rather than on the LaztDict itself.
# .keys() attribute rather than on the LazyDict itself.
server_funcs = self.servers.keys()
try:
subtract_only = all((x.startswith('-') for x in back))

View File

@ -909,7 +909,7 @@ def _virtual(osdata):
# Tested on CentOS 5.4 / 2.6.18-164.15.1.el5xen
grains['virtual_subtype'] = 'Xen Dom0'
else:
if grains.get('productname', '') == 'HVM domU':
if osdata.get('productname', '') == 'HVM domU':
# Requires dmidecode!
grains['virtual_subtype'] = 'Xen HVM DomU'
elif os.path.isfile('/proc/xen/capabilities') and \
@ -926,9 +926,8 @@ def _virtual(osdata):
elif isdir('/sys/bus/xen'):
if 'xen:' in __salt__['cmd.run']('dmesg').lower():
grains['virtual_subtype'] = 'Xen PV DomU'
elif os.listdir('/sys/bus/xen/drivers'):
# An actual DomU will have several drivers
# whereas a paravirt ops kernel will not.
elif os.path.isfile('/sys/bus/xen/drivers/xenconsole'):
# An actual DomU will have the xenconsole driver
grains['virtual_subtype'] = 'Xen PV DomU'
# If a Dom0 or DomU was detected, obviously this is xen
if 'dom' in grains.get('virtual_subtype', '').lower():
@ -1174,25 +1173,34 @@ def _windows_platform_data():
os_release = platform.release()
kernel_version = platform.version()
info = salt.utils.win_osinfo.get_os_version_info()
server = {'Vista': '2008Server',
'7': '2008ServerR2',
'8': '2012Server',
'8.1': '2012ServerR2',
'10': '2016Server'}
# Starting with Python 2.7.12 and 3.5.2 the `platform.uname()` function
# started reporting the Desktop version instead of the Server version on
# Server versions of Windows, so we need to look those up
# So, if you find a Server Platform that's a key in the server
# dictionary, then lookup the actual Server Release.
# (Product Type 1 is Desktop, Everything else is Server)
if info['ProductType'] > 1 and os_release in server:
os_release = server[os_release]
service_pack = None
if info['ServicePackMajor'] > 0:
service_pack = ''.join(['SP', six.text_type(info['ServicePackMajor'])])
# This creates the osrelease grain based on the Windows Operating
# System Product Name. As long as Microsoft maintains a similar format
# this should be future proof
version = 'Unknown'
release = ''
if 'Server' in osinfo.Caption:
for item in osinfo.Caption.split(' '):
# If it's all digits, then it's version
if re.match(r'\d+', item):
version = item
# If it starts with R and then numbers, it's the release
# ie: R2
if re.match(r'^R\d+$', item):
release = item
os_release = '{0}Server{1}'.format(version, release)
else:
for item in osinfo.Caption.split(' '):
# If it's a number, decimal number, Thin or Vista, then it's the
# version
if re.match(r'^(\d+(\.\d+)?)|Thin|Vista$', item):
version = item
os_release = version
grains = {
'kernelrelease': _clean_value('kernelrelease', osinfo.Version),
'kernelversion': _clean_value('kernelversion', kernel_version),
@ -1365,6 +1373,7 @@ _OS_FAMILY_MAP = {
'GCEL': 'Debian',
'Linaro': 'Debian',
'elementary OS': 'Debian',
'elementary': 'Debian',
'Univention': 'Debian',
'ScientificLinux': 'RedHat',
'Raspbian': 'Debian',

View File

@ -139,6 +139,18 @@ def static_loader(
return ret
def _format_entrypoint_target(ep):
'''
Makes a string describing the target of an EntryPoint object.
Base strongly on EntryPoint.__str__().
'''
s = ep.module_name
if ep.attrs:
s += ':' + '.'.join(ep.attrs)
return s
def _module_dirs(
opts,
ext_type,
@ -161,9 +173,13 @@ def _module_dirs(
ext_type_types.extend(opts[ext_type_dirs])
if HAS_PKG_RESOURCES and ext_type_dirs:
for entry_point in pkg_resources.iter_entry_points('salt.loader', ext_type_dirs):
loaded_entry_point = entry_point.load()
for path in loaded_entry_point():
ext_type_types.append(path)
try:
loaded_entry_point = entry_point.load()
for path in loaded_entry_point():
ext_type_types.append(path)
except Exception as exc:
log.error("Error getting module directories from %s: %s", _format_entrypoint_target(entry_point), exc)
log.debug("Full backtrace for module directories error", exc_info=True)
cli_module_dirs = []
# The dirs can be any module dir, or a in-tree _{ext_type} dir

View File

@ -250,8 +250,12 @@ class Maintenance(salt.utils.process.SignalHandlingMultiprocessingProcess):
keys.append(fn_)
log.debug('Writing master key cache')
# Write a temporary file securely
with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache')) as cache_file:
self.serial.dump(keys, cache_file)
if six.PY2:
with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache')) as cache_file:
self.serial.dump(keys, cache_file)
else:
with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache'), mode='wb') as cache_file:
self.serial.dump(keys, cache_file)
def handle_key_rotate(self, now):
'''

View File

@ -169,7 +169,13 @@ def cert(name,
res = __salt__['cmd.run_all'](' '.join(cmd))
if res['retcode'] != 0:
return {'result': False, 'comment': 'Certificate {0} renewal failed with:\n{1}'.format(name, res['stderr'])}
if 'expand' in res['stderr']:
cmd.append('--expand')
res = __salt__['cmd.run_all'](' '.join(cmd))
if res['retcode'] != 0:
return {'result': False, 'comment': 'Certificate {0} renewal failed with:\n{1}'.format(name, res['stderr'])}
else:
return {'result': False, 'comment': 'Certificate {0} renewal failed with:\n{1}'.format(name, res['stderr'])}
if 'no action taken' in res['stdout']:
comment = 'Certificate {0} unchanged'.format(cert_file)

View File

@ -42,6 +42,8 @@ except ImportError:
# Import salt libs
import salt.utils.args
import salt.utils.data
import salt.utils.stringutils
from salt.exceptions import SaltInvocationError
log = logging.getLogger(__name__)
@ -239,6 +241,7 @@ def execute(context=None, lens=None, commands=(), load_path=None):
'see debug log for details: {0}'.format(arg)
return ret
args = salt.utils.data.decode(args, to_str=True)
log.debug('%s: %s', method, args)
func = getattr(aug, method)
@ -492,7 +495,7 @@ def ls(path, load_path=None): # pylint: disable=C0103
def _match(path):
''' Internal match function '''
try:
matches = aug.match(path)
matches = aug.match(salt.utils.stringutils.to_str(path))
except RuntimeError:
return {}

View File

@ -393,7 +393,7 @@ def __within(within=None, errmsg=None, dtype=None):
def __space_delimited_list(value):
'''validate that a value contains one or more space-delimited values'''
if isinstance(value, str):
if isinstance(value, six.string_types):
value = value.strip().split()
if hasattr(value, '__iter__') and value != []:
@ -407,6 +407,7 @@ SALT_ATTR_TO_DEBIAN_ATTR_MAP = {
'search': 'dns-search',
'hwaddr': 'hwaddress', # TODO: this limits bootp functionality
'ipaddr': 'address',
'ipaddrs': 'addresses',
}
@ -423,6 +424,7 @@ IPV4_ATTR_MAP = {
'proto': __within(IPV4_VALID_PROTO, dtype=six.text_type),
# ipv4 static & manual
'address': __ipv4_quad,
'addresses': __anything,
'netmask': __ipv4_netmask,
'broadcast': __ipv4_quad,
'metric': __int,
@ -473,6 +475,7 @@ IPV6_ATTR_MAP = {
'proto': __within(IPV6_VALID_PROTO),
# ipv6 static & manual
'address': __ipv6,
'addresses': __anything,
'netmask': __ipv6_netmask,
'broadcast': __ipv6,
'gateway': __ipv6, # supports a colon-delimited list
@ -626,7 +629,12 @@ def _parse_interfaces(interface_files=None):
attrname = attr
(valid, value, errmsg) = _validate_interface_option(
attr, valuestr, addrfam)
iface_dict[attrname] = value
if attrname == 'address' and 'address' in iface_dict:
if 'addresses' not in iface_dict:
iface_dict['addresses'] = []
iface_dict['addresses'].append(value)
else:
iface_dict[attrname] = value
elif attr in _REV_ETHTOOL_CONFIG_OPTS:
if 'ethtool' not in iface_dict:

View File

@ -939,6 +939,9 @@ def compare_containers(first, second, ignore=None):
if item == 'Ulimits':
val1 = _ulimit_sort(val1)
val2 = _ulimit_sort(val2)
if item == 'Env':
val1 = sorted(val1)
val2 = sorted(val2)
if val1 != val2:
ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2}
# Check for optionally-present items that were in the second container
@ -965,6 +968,9 @@ def compare_containers(first, second, ignore=None):
if item == 'Ulimits':
val1 = _ulimit_sort(val1)
val2 = _ulimit_sort(val2)
if item == 'Env':
val1 = sorted(val1)
val2 = sorted(val2)
if val1 != val2:
ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2}
return ret
@ -5617,6 +5623,7 @@ def pause(name):
.format(name))}
return _change_state(name, 'pause', 'paused')
freeze = salt.utils.functools.alias_function(pause, 'freeze')
@ -5819,6 +5826,7 @@ def unpause(name):
.format(name))}
return _change_state(name, 'unpause', 'running')
unfreeze = salt.utils.functools.alias_function(unpause, 'unfreeze')

View File

@ -793,6 +793,7 @@ def get_source_sum(file_name='',
ret = extract_hash(hash_fn, '', file_name, source, source_hash_name)
if ret is None:
_invalid_source_hash_format()
ret['hsum'] = ret['hsum'].lower()
return ret
else:
# The source_hash is a hash expression
@ -836,6 +837,7 @@ def get_source_sum(file_name='',
)
)
ret['hsum'] = ret['hsum'].lower()
return ret
@ -1694,6 +1696,8 @@ def _starts_till(src, probe, strip_comments=True):
if not src or not probe:
return no_match
src = src.rstrip('\n\r')
probe = probe.rstrip('\n\r')
if src == probe:
return equal
@ -2275,6 +2279,8 @@ def replace(path,
# Just search; bail as early as a match is found
if re.search(cpattern, r_data):
return True # `with` block handles file closure
else:
return False
else:
result, nrepl = re.subn(cpattern,
repl.replace('\\', '\\\\') if backslash_literal else repl,
@ -6001,6 +6007,7 @@ def list_backups(path, limit=None):
[files[x] for x in sorted(files, reverse=True)[:limit]]
)))
list_backup = salt.utils.functools.alias_function(list_backups, 'list_backup')
@ -6173,6 +6180,7 @@ def delete_backup(path, backup_id):
return ret
remove_backup = salt.utils.functools.alias_function(delete_backup, 'remove_backup')

View File

@ -5,6 +5,7 @@ Manage the information in the hosts file
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import errno
import os
# Import salt libs
@ -22,7 +23,12 @@ def __get_hosts_filename():
'''
Return the path to the appropriate hosts file
'''
return __salt__['config.option']('hosts.file')
try:
return __context__['hosts.__get_hosts_filename']
except KeyError:
__context__['hosts.__get_hosts_filename'] = \
__salt__['config.option']('hosts.file')
return __context__['hosts.__get_hosts_filename']
def _get_or_create_hostfile():
@ -43,26 +49,35 @@ def _list_hosts():
'''
Return the hosts found in the hosts file in as an OrderedDict
'''
count = 0
hfn = __get_hosts_filename()
ret = odict.OrderedDict()
if not os.path.isfile(hfn):
try:
return __context__['hosts._list_hosts']
except KeyError:
count = 0
hfn = __get_hosts_filename()
ret = odict.OrderedDict()
try:
with salt.utils.files.fopen(hfn) as ifile:
for line in ifile:
line = salt.utils.stringutils.to_unicode(line).strip()
if not line:
continue
if line.startswith('#'):
ret.setdefault('comment-{0}'.format(count), []).append(line)
count += 1
continue
if '#' in line:
line = line[:line.index('#')].strip()
comps = line.split()
ip = comps.pop(0)
ret.setdefault(ip, []).extend(comps)
except (IOError, OSError) as exc:
salt.utils.files.process_read_exception(exc, hfn, ignore=errno.ENOENT)
# Don't set __context__ since we weren't able to read from the
# hosts file.
return ret
__context__['hosts._list_hosts'] = ret
return ret
with salt.utils.files.fopen(hfn) as ifile:
for line in ifile:
line = salt.utils.stringutils.to_unicode(line).strip()
if not line:
continue
if line.startswith('#'):
ret.setdefault('comment-{0}'.format(count), []).append(line)
count += 1
continue
if '#' in line:
line = line[:line.index('#')].strip()
comps = line.split()
ip = comps.pop(0)
ret.setdefault(ip, []).extend(comps)
return ret
def list_hosts():
@ -133,7 +148,10 @@ def has_pair(ip, alias):
salt '*' hosts.has_pair <ip> <alias>
'''
hosts = _list_hosts()
return ip in hosts and alias in hosts[ip]
try:
return alias in hosts[ip]
except KeyError:
return False
def set_host(ip, alias):
@ -157,6 +175,9 @@ def set_host(ip, alias):
if not os.path.isfile(hfn):
return False
# Make sure future calls to _list_hosts() will re-read the file
__context__.pop('hosts._list_hosts', None)
line_to_add = salt.utils.stringutils.to_bytes(
ip + '\t\t' + alias + os.linesep
)
@ -203,6 +224,8 @@ def rm_host(ip, alias):
'''
if not has_pair(ip, alias):
return True
# Make sure future calls to _list_hosts() will re-read the file
__context__.pop('hosts._list_hosts', None)
hfn = _get_or_create_hostfile()
with salt.utils.files.fopen(hfn, 'rb') as fp_:
lines = fp_.readlines()
@ -251,6 +274,10 @@ def add_host(ip, alias):
return True
hosts = _list_hosts()
# Make sure future calls to _list_hosts() will re-read the file
__context__.pop('hosts._list_hosts', None)
inserted = False
for i, h in six.iteritems(hosts):
for j in range(len(h)):

View File

@ -10,11 +10,11 @@ Module to provide icinga2 compatibility to salt.
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import subprocess
# Import Salt libs
import salt.utils.path
import salt.utils.platform
from salt.utils.icinga2 import get_certs_path
log = logging.getLogger(__name__)
@ -32,18 +32,6 @@ def __virtual__():
return (False, 'Icinga2 not installed.')
def _execute(cmd, ret_code=False):
process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
if ret_code:
return process.wait()
output, error = process.communicate()
if output:
log.debug(output)
return output
log.debug(error)
return error
def generate_ticket(domain):
'''
Generate and save an icinga2 ticket.
@ -58,7 +46,7 @@ def generate_ticket(domain):
salt '*' icinga2.generate_ticket domain.tld
'''
result = _execute(["icinga2", "pki", "ticket", "--cn", domain])
result = __salt__['cmd.run_all'](["icinga2", "pki", "ticket", "--cn", domain], python_shell=False)
return result
@ -76,7 +64,7 @@ def generate_cert(domain):
salt '*' icinga2.generate_cert domain.tld
'''
result = _execute(["icinga2", "pki", "new-cert", "--cn", domain, "--key", "/etc/icinga2/pki/{0}.key".format(domain), "--cert", "/etc/icinga2/pki/{0}.crt".format(domain)], ret_code=True)
result = __salt__['cmd.run_all'](["icinga2", "pki", "new-cert", "--cn", domain, "--key", "{0}{1}.key".format(get_certs_path(), domain), "--cert", "{0}{1}.crt".format(get_certs_path(), domain)], python_shell=False)
return result
@ -94,8 +82,8 @@ def save_cert(domain, master):
salt '*' icinga2.save_cert domain.tld master.domain.tld
'''
result = _execute(["icinga2", "pki", "save-cert", "--key", "/etc/icinga2/pki/{0}.key".format(domain), "--cert", "/etc/icinga2/pki/{0}.cert".format(domain), "--trustedcert",
"/etc/icinga2/pki/trusted-master.crt", "--host", master], ret_code=True)
result = __salt__['cmd.run_all'](["icinga2", "pki", "save-cert", "--key", "{0}{1}.key".format(get_certs_path(), domain), "--cert", "{0}{1}.cert".format(get_certs_path(), domain), "--trustedcert",
"{0}trusted-master.crt".format(get_certs_path()), "--host", master], python_shell=False)
return result
@ -114,8 +102,8 @@ def request_cert(domain, master, ticket, port):
salt '*' icinga2.request_cert domain.tld master.domain.tld TICKET_ID
'''
result = _execute(["icinga2", "pki", "request", "--host", master, "--port", port, "--ticket", ticket, "--key", "/etc/icinga2/pki/{0}.key".format(domain), "--cert",
"/etc/icinga2/pki/{0}.crt".format(domain), "--trustedcert", "/etc/icinga2/pki/trusted-master.crt", "--ca", "/etc/icinga2/pki/ca.crt"], ret_code=True)
result = __salt__['cmd.run_all'](["icinga2", "pki", "request", "--host", master, "--port", port, "--ticket", ticket, "--key", "{0}{1}.key".format(get_certs_path(), domain), "--cert",
"{0}{1}.crt".format(get_certs_path(), domain), "--trustedcert", "{0}trusted-master.crt".format(get_certs_path()), "--ca", "{0}ca.crt".format(get_certs_path())], python_shell=False)
return result
@ -134,6 +122,6 @@ def node_setup(domain, master, ticket):
salt '*' icinga2.node_setup domain.tld master.domain.tld TICKET_ID
'''
result = _execute(["icinga2", "node", "setup", "--ticket", ticket, "--endpoint", master, "--zone", domain, "--master_host", master, "--trustedcert", "/etc/icinga2/pki/trusted-master.crt"],
ret_code=True)
result = __salt__['cmd.run_all'](["icinga2", "node", "setup", "--ticket", ticket, "--endpoint", master, "--zone", domain, "--master_host", master, "--trustedcert", "{0}trusted-master.crt".format(get_certs_path())],
python_shell=False)
return result

View File

@ -332,7 +332,7 @@ def _connect(**kwargs):
try:
dbc = MySQLdb.connect(**connargs)
except MySQLdb.OperationalError as exc:
err = 'MySQL Error {0}: {1}'.format(*exc)
err = 'MySQL Error {0}: {1}'.format(*exc.args)
__context__['mysql.error'] = err
log.error(err)
return None
@ -648,7 +648,7 @@ def query(database, query, **connection_args):
try:
affected = _execute(cur, query)
except MySQLdb.OperationalError as exc:
err = 'MySQL Error {0}: {1}'.format(*exc)
err = 'MySQL Error {0}: {1}'.format(*exc.args)
__context__['mysql.error'] = err
log.error(err)
return False
@ -773,7 +773,7 @@ def status(**connection_args):
try:
_execute(cur, qry)
except MySQLdb.OperationalError as exc:
err = 'MySQL Error {0}: {1}'.format(*exc)
err = 'MySQL Error {0}: {1}'.format(*exc.args)
__context__['mysql.error'] = err
log.error(err)
return {}
@ -804,7 +804,7 @@ def version(**connection_args):
try:
_execute(cur, qry)
except MySQLdb.OperationalError as exc:
err = 'MySQL Error {0}: {1}'.format(*exc)
err = 'MySQL Error {0}: {1}'.format(*exc.args)
__context__['mysql.error'] = err
log.error(err)
return ''
@ -837,7 +837,7 @@ def slave_lag(**connection_args):
try:
_execute(cur, qry)
except MySQLdb.OperationalError as exc:
err = 'MySQL Error {0}: {1}'.format(*exc)
err = 'MySQL Error {0}: {1}'.format(*exc.args)
__context__['mysql.error'] = err
log.error(err)
return -3
@ -922,7 +922,7 @@ def db_list(**connection_args):
try:
_execute(cur, qry)
except MySQLdb.OperationalError as exc:
err = 'MySQL Error {0}: {1}'.format(*exc)
err = 'MySQL Error {0}: {1}'.format(*exc.args)
__context__['mysql.error'] = err
log.error(err)
return []
@ -1011,7 +1011,7 @@ def db_tables(name, **connection_args):
try:
_execute(cur, qry)
except MySQLdb.OperationalError as exc:
err = 'MySQL Error {0}: {1}'.format(*exc)
err = 'MySQL Error {0}: {1}'.format(*exc.args)
__context__['mysql.error'] = err
log.error(err)
return []
@ -1046,7 +1046,7 @@ def db_exists(name, **connection_args):
try:
_execute(cur, qry, args)
except MySQLdb.OperationalError as exc:
err = 'MySQL Error {0}: {1}'.format(*exc)
err = 'MySQL Error {0}: {1}'.format(*exc.args)
__context__['mysql.error'] = err
log.error(err)
return False
@ -1101,7 +1101,7 @@ def db_create(name, character_set=None, collate=None, **connection_args):
log.info('DB \'%s\' created', name)
return True
except MySQLdb.OperationalError as exc:
err = 'MySQL Error {0}: {1}'.format(*exc)
err = 'MySQL Error {0}: {1}'.format(*exc.args)
__context__['mysql.error'] = err
log.error(err)
return False
@ -1137,7 +1137,7 @@ def db_remove(name, **connection_args):
try:
_execute(cur, qry)
except MySQLdb.OperationalError as exc:
err = 'MySQL Error {0}: {1}'.format(*exc)
err = 'MySQL Error {0}: {1}'.format(*exc.args)
__context__['mysql.error'] = err
log.error(err)
return False
@ -1169,7 +1169,7 @@ def user_list(**connection_args):
qry = 'SELECT User,Host FROM mysql.user'
_execute(cur, qry)
except MySQLdb.OperationalError as exc:
err = 'MySQL Error {0}: {1}'.format(*exc)
err = 'MySQL Error {0}: {1}'.format(*exc.args)
__context__['mysql.error'] = err
log.error(err)
return []
@ -1247,12 +1247,12 @@ def user_exists(user,
args['password'] = password_hash
if run_verify:
if not verify_login(user, host, password):
if not verify_login(user, password, **connection_args):
return False
try:
_execute(cur, qry, args)
except MySQLdb.OperationalError as exc:
err = 'MySQL Error {0}: {1}'.format(*exc)
err = 'MySQL Error {0}: {1}'.format(*exc.args)
__context__['mysql.error'] = err
log.error(err)
return False
@ -1284,7 +1284,7 @@ def user_info(user, host='localhost', **connection_args):
try:
_execute(cur, qry, args)
except MySQLdb.OperationalError as exc:
err = 'MySQL Error {0}: {1}'.format(*exc)
err = 'MySQL Error {0}: {1}'.format(*exc.args)
__context__['mysql.error'] = err
log.error(err)
return False
@ -1384,7 +1384,7 @@ def user_create(user,
try:
_execute(cur, qry, args)
except MySQLdb.OperationalError as exc:
err = 'MySQL Error {0}: {1}'.format(*exc)
err = 'MySQL Error {0}: {1}'.format(*exc.args)
__context__['mysql.error'] = err
log.error(err)
return False
@ -1499,7 +1499,7 @@ def user_chpass(user,
try:
result = _execute(cur, qry, args)
except MySQLdb.OperationalError as exc:
err = 'MySQL Error {0}: {1}'.format(*exc)
err = 'MySQL Error {0}: {1}'.format(*exc.args)
__context__['mysql.error'] = err
log.error(err)
return False
@ -1554,7 +1554,7 @@ def user_remove(user,
try:
_execute(cur, qry, args)
except MySQLdb.OperationalError as exc:
err = 'MySQL Error {0}: {1}'.format(*exc)
err = 'MySQL Error {0}: {1}'.format(*exc.args)
__context__['mysql.error'] = err
log.error(err)
return False
@ -1772,13 +1772,13 @@ def user_grants(user,
try:
_execute(cur, qry, args)
except MySQLdb.OperationalError as exc:
err = 'MySQL Error {0}: {1}'.format(*exc)
err = 'MySQL Error {0}: {1}'.format(*exc.args)
__context__['mysql.error'] = err
log.error(err)
return False
ret = []
results = cur.fetchall()
results = salt.utils.data.decode(cur.fetchall())
for grant in results:
tmp = grant[0].split(' IDENTIFIED BY')[0]
if 'WITH GRANT OPTION' in grant[0] and 'WITH GRANT OPTION' not in tmp:
@ -1886,7 +1886,7 @@ def grant_add(grant,
try:
_execute(cur, qry['qry'], qry['args'])
except (MySQLdb.OperationalError, MySQLdb.ProgrammingError) as exc:
err = 'MySQL Error {0}: {1}'.format(*exc)
err = 'MySQL Error {0}: {1}'.format(*exc.args)
__context__['mysql.error'] = err
log.error(err)
return False
@ -1960,7 +1960,7 @@ def grant_revoke(grant,
try:
_execute(cur, qry, args)
except MySQLdb.OperationalError as exc:
err = 'MySQL Error {0}: {1}'.format(*exc)
err = 'MySQL Error {0}: {1}'.format(*exc.args)
__context__['mysql.error'] = err
log.error(err)
return False
@ -2235,7 +2235,7 @@ def showglobal(**connection_args):
return rtnv
def verify_login(user, host='localhost', password=None, **connection_args):
def verify_login(user, password=None, **connection_args):
'''
Attempt to login using the provided credentials.
If successful, return true. Otherwise, return False.
@ -2244,11 +2244,10 @@ def verify_login(user, host='localhost', password=None, **connection_args):
.. code-block:: bash
salt '*' mysql.verify_login root localhost password
salt '*' mysql.verify_login root password
'''
# Override the connection args
# Override the connection args for username and password
connection_args['connection_user'] = user
connection_args['connection_host'] = host
connection_args['connection_pass'] = password
dbc = _connect(**connection_args)

View File

@ -14,6 +14,7 @@ import datetime
import re
# Import salt libs
import salt.utils.data
from salt.exceptions import SaltInvocationError, CommandExecutionError
# Import third party libs
@ -53,9 +54,9 @@ def _get_proc_cmdline(proc):
It's backward compatible with < 2.0 versions of psutil.
'''
try:
return proc.cmdline() if PSUTIL2 else proc.cmdline
return salt.utils.data.decode(proc.cmdline() if PSUTIL2 else proc.cmdline)
except (psutil.NoSuchProcess, psutil.AccessDenied):
return ''
return []
def _get_proc_create_time(proc):
@ -65,7 +66,7 @@ def _get_proc_create_time(proc):
It's backward compatible with < 2.0 versions of psutil.
'''
try:
return proc.create_time() if PSUTIL2 else proc.create_time
return salt.utils.data.decode(proc.create_time() if PSUTIL2 else proc.create_time)
except (psutil.NoSuchProcess, psutil.AccessDenied):
return None
@ -77,7 +78,7 @@ def _get_proc_name(proc):
It's backward compatible with < 2.0 versions of psutil.
'''
try:
return proc.name() if PSUTIL2 else proc.name
return salt.utils.data.decode(proc.name() if PSUTIL2 else proc.name)
except (psutil.NoSuchProcess, psutil.AccessDenied):
return []
@ -89,7 +90,7 @@ def _get_proc_status(proc):
It's backward compatible with < 2.0 versions of psutil.
'''
try:
return proc.status() if PSUTIL2 else proc.status
return salt.utils.data.decode(proc.status() if PSUTIL2 else proc.status)
except (psutil.NoSuchProcess, psutil.AccessDenied):
return None
@ -101,7 +102,7 @@ def _get_proc_username(proc):
It's backward compatible with < 2.0 versions of psutil.
'''
try:
return proc.username() if PSUTIL2 else proc.username
return salt.utils.data.decode(proc.username() if PSUTIL2 else proc.username)
except (psutil.NoSuchProcess, psutil.AccessDenied, KeyError):
return None

View File

@ -637,7 +637,7 @@ def apply_(mods=None, **kwargs):
.. code-block:: bash
salt '*' state.apply test pillar='{"foo": "bar"}'
salt '*' state.apply stuff pillar='{"foo": "bar"}'
.. note::
Values passed this way will override Pillar values set via
@ -680,11 +680,11 @@ def apply_(mods=None, **kwargs):
.. code-block:: bash
# Run the states configured in salt://test.sls (or salt://test/init.sls)
salt '*' state.apply test
# Run the states configured in salt://test.sls (or salt://test/init.sls)
# Run the states configured in salt://stuff.sls (or salt://stuff/init.sls)
salt '*' state.apply stuff
# Run the states configured in salt://stuff.sls (or salt://stuff/init.sls)
# and salt://pkgs.sls (or salt://pkgs/init.sls).
salt '*' state.apply test,pkgs
salt '*' state.apply stuff,pkgs
The following additional arguments are also accepted when applying
individual SLS files:
@ -704,7 +704,7 @@ def apply_(mods=None, **kwargs):
.. code-block:: bash
salt '*' state.apply test pillar='{"foo": "bar"}'
salt '*' state.apply stuff pillar='{"foo": "bar"}'
.. note::
Values passed this way will override Pillar values set via
@ -755,7 +755,7 @@ def apply_(mods=None, **kwargs):
.. code-block:: bash
salt '*' state.apply test localconfig=/path/to/minion.yml
salt '*' state.apply stuff localconfig=/path/to/minion.yml
sync_mods
If specified, the desired custom module types will be synced prior to
@ -763,8 +763,8 @@ def apply_(mods=None, **kwargs):
.. code-block:: bash
salt '*' state.apply test sync_mods=states,modules
salt '*' state.apply test sync_mods=all
salt '*' state.apply stuff sync_mods=states,modules
salt '*' state.apply stuff sync_mods=all
.. note::
This option is ignored when no SLS files are specified, as a
@ -792,8 +792,8 @@ def request(mods=None,
.. code-block:: bash
salt '*' state.request
salt '*' state.request test
salt '*' state.request test,pkgs
salt '*' state.request stuff
salt '*' state.request stuff,pkgs
'''
kwargs['test'] = True
ret = apply_(mods, **kwargs)
@ -929,7 +929,7 @@ def highstate(test=None, queue=False, **kwargs):
.. code-block:: bash
salt '*' state.apply test pillar='{"foo": "bar"}'
salt '*' state.apply stuff pillar='{"foo": "bar"}'
.. note::
Values passed this way will override Pillar values set via
@ -1109,7 +1109,7 @@ def sls(mods, test=None, exclude=None, queue=False, sync_mods=None, **kwargs):
.. code-block:: bash
salt '*' state.apply test pillar='{"foo": "bar"}'
salt '*' state.apply stuff pillar='{"foo": "bar"}'
.. note::
Values passed this way will override existing Pillar values set via
@ -1195,8 +1195,8 @@ def sls(mods, test=None, exclude=None, queue=False, sync_mods=None, **kwargs):
.. code-block:: bash
salt '*' state.sls test sync_mods=states,modules
salt '*' state.sls test sync_mods=all
salt '*' state.sls stuff sync_mods=states,modules
salt '*' state.sls stuff sync_mods=all
.. versionadded:: 2017.7.8,2018.3.3,Fluorine
@ -1727,7 +1727,7 @@ def show_low_sls(mods, test=None, queue=False, **kwargs):
.. code-block:: bash
salt '*' state.show_low_sls test pillar='{"foo": "bar"}'
salt '*' state.show_low_sls stuff pillar='{"foo": "bar"}'
.. note::
Values passed this way will override Pillar values set via

View File

@ -25,7 +25,9 @@ from salt.ext import six
HAS_LIBS = False
try:
import twilio
if twilio.__version__ > 5:
# Grab version, ensure elements are ints
twilio_version = tuple([int(x) for x in twilio.__version_info__])
if twilio_version > (5, ):
TWILIO_5 = False
from twilio.rest import Client as TwilioRestClient
from twilio.rest import TwilioException as TwilioRestException

View File

@ -10,6 +10,7 @@ import re
# Import Salt libs
import salt.utils.platform
from salt.exceptions import CommandExecutionError
import salt.utils.win_lgpo_netsh
# Define the module's virtual name
__virtualname__ = 'firewall'
@ -285,7 +286,7 @@ def delete_rule(name=None,
salt '*' firewall.delete_rule 'test' '8080' 'tcp' 'in'
# Delete the incoming tcp port 8000 from 192.168.0.1 in the rule named
# 'test_remote_ip`
# 'test_remote_ip'
salt '*' firewall.delete_rule 'test_remote_ip' '8000' 'tcp' 'in' '192.168.0.1'
# Delete all rules for local port 80:
@ -342,3 +343,436 @@ def rule_exists(name):
return True
except CommandExecutionError:
return False
def get_settings(profile, section, store='local'):
'''
Get the firewall property from the specified profile in the specified store
as returned by ``netsh advfirewall``.
.. versionadded:: 2018.3.4
.. versionadded:: Fluorine
Args:
profile (str):
The firewall profile to query. Valid options are:
- domain
- public
- private
section (str):
The property to query within the selected profile. Valid options
are:
- firewallpolicy : inbound/outbound behavior
- logging : firewall logging settings
- settings : firewall properties
- state : firewalls state (on | off)
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the properties for the specified profile
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
CLI Example:
.. code-block:: bash
# Get the inbound/outbound firewall settings for connections on the
# local domain profile
salt * win_firewall.get_settings domain firewallpolicy
# Get the inbound/outbound firewall settings for connections on the
# domain profile as defined by local group policy
salt * win_firewall.get_settings domain firewallpolicy lgpo
'''
return salt.utils.win_lgpo_netsh.get_settings(profile=profile,
section=section,
store=store)
def get_all_settings(domain, store='local'):
'''
Gets all the properties for the specified profile in the specified store
.. versionadded:: 2018.3.4
.. versionadded:: Fluorine
Args:
profile (str):
The firewall profile to query. Valid options are:
- domain
- public
- private
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the specified settings
CLI Example:
.. code-block:: bash
# Get all firewall settings for connections on the domain profile
salt * win_firewall.get_all_settings domain
# Get all firewall settings for connections on the domain profile as
# defined by local group policy
salt * win_firewall.get_all_settings domain lgpo
'''
return salt.utils.win_lgpo_netsh.get_all_settings(profile=domain,
store=store)
def get_all_profiles(store='local'):
'''
Gets all properties for all profiles in the specified store
.. versionadded:: 2018.3.4
.. versionadded:: Fluorine
Args:
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the specified settings for each profile
CLI Example:
.. code-block:: bash
# Get all firewall settings for all profiles
salt * firewall.get_all_settings
# Get all firewall settings for all profiles as defined by local group
# policy
salt * firewall.get_all_settings lgpo
'''
return salt.utils.win_lgpo_netsh.get_all_profiles(store=store)
def set_firewall_settings(profile, inbound=None, outbound=None, store='local'):
'''
Set the firewall inbound/outbound settings for the specified profile and
store
.. versionadded:: 2018.3.4
.. versionadded:: Fluorine
Args:
profile (str):
The firewall profile to query. Valid options are:
- domain
- public
- private
inbound (str):
The inbound setting. If ``None`` is passed, the setting will remain
unchanged. Valid values are:
- blockinbound
- blockinboundalways
- allowinbound
- notconfigured
Default is ``None``
outbound (str):
The outbound setting. If ``None`` is passed, the setting will remain
unchanged. Valid values are:
- allowoutbound
- blockoutbound
- notconfigured
Default is ``None``
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
CLI Example:
.. code-block:: bash
# Set the inbound setting for the domain profile to block inbound
# connections
salt * firewall.set_firewall_settings domain='domain' inbound='blockinbound'
# Set the outbound setting for the domain profile to allow outbound
# connections
salt * firewall.set_firewall_settings domain='domain' outbound='allowoutbound'
# Set inbound/outbound settings for the domain profile in the group
# policy to block inbound and allow outbound
salt * firewall.set_firewall_settings domain='domain' inbound='blockinbound' outbound='allowoutbound' store='lgpo'
'''
return salt.utils.win_lgpo_netsh.set_firewall_settings(profile=profile,
inbound=inbound,
outbound=outbound,
store=store)
def set_logging_settings(profile, setting, value, store='local'):
r'''
Configure logging settings for the Windows firewall.
.. versionadded:: 2018.3.4
.. versionadded:: Fluorine
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
setting (str):
The logging setting to configure. Valid options are:
- allowedconnections
- droppedconnections
- filename
- maxfilesize
value (str):
The value to apply to the setting. Valid values are dependent upon
the setting being configured. Valid options are:
allowedconnections:
- enable
- disable
- notconfigured
droppedconnections:
- enable
- disable
- notconfigured
filename:
- Full path and name of the firewall log file
- notconfigured
maxfilesize:
- 1 - 32767
- notconfigured
.. note::
``notconfigured`` can only be used when using the lgpo store
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
CLI Example:
.. code-block:: bash
# Log allowed connections and set that in local group policy
salt * firewall.set_logging_settings domain allowedconnections enable lgpo
# Don't log dropped connections
salt * firewall.set_logging_settings profile=private setting=droppedconnections value=disable
# Set the location of the log file
salt * firewall.set_logging_settings domain filename C:\windows\logs\firewall.log
# You can also use environment variables
salt * firewall.set_logging_settings domain filename %systemroot%\system32\LogFiles\Firewall\pfirewall.log
# Set the max file size of the log to 2048 Kb
salt * firewall.set_logging_settings domain maxfilesize 2048
'''
return salt.utils.win_lgpo_netsh.set_logging_settings(profile=profile,
setting=setting,
value=value,
store=store)
def set_settings(profile, setting, value, store='local'):
'''
Configure firewall settings.
.. versionadded:: 2018.3.4
.. versionadded:: Fluorine
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
setting (str):
The firewall setting to configure. Valid options are:
- localfirewallrules
- localconsecrules
- inboundusernotification
- remotemanagement
- unicastresponsetomulticast
value (str):
The value to apply to the setting. Valid options are
- enable
- disable
- notconfigured
.. note::
``notconfigured`` can only be used when using the lgpo store
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
CLI Example:
.. code-block:: bash
# Merge local rules with those distributed through group policy
salt * firewall.set_settings domain localfirewallrules enable
# Allow remote management of Windows Firewall
salt * firewall.set_settings domain remotemanagement enable
'''
return salt.utils.win_lgpo_netsh.set_settings(profile=profile,
setting=setting,
value=value,
store=store)
def set_state(profile, state, store='local'):
'''
Configure the firewall state.
.. versionadded:: 2018.3.4
.. versionadded:: Fluorine
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
state (str):
The firewall state. Valid options are:
- on
- off
- notconfigured
.. note::
``notconfigured`` can only be used when using the lgpo store
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
CLI Example:
.. code-block:: bash
# Turn the firewall off when the domain profile is active
salt * firewall.set_state domain off
# Turn the firewall on when the public profile is active and set that in
# the local group policy
salt * firewall.set_state public on lgpo
'''
return salt.utils.win_lgpo_netsh.set_state(profile=profile,
state=state,
store=store)

File diff suppressed because it is too large Load Diff

93
salt/modules/win_wusa.py Normal file
View File

@ -0,0 +1,93 @@
# -*- coding: utf-8 -*-
'''
Microsoft Update files management via wusa.exe
:maintainer: Thomas Lemarchand
:platform: Windows
:depends: PowerShell
.. versionadded:: Neon
'''
# Import python libs
from __future__ import absolute_import, unicode_literals
import logging
# Import salt libs
import salt.utils.platform
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'win_wusa'
def __virtual__():
'''
Load only on Windows
'''
if not salt.utils.platform.is_windows():
return False, 'Only available on Windows systems'
powershell_info = __salt__['cmd.shell_info'](shell='powershell', list_modules=False)
if not powershell_info['installed']:
return False, 'PowerShell not available'
return __virtualname__
def is_installed(kb):
'''
Check if a specific KB is installed.
CLI Example:
.. code-block:: bash
salt '*' win_wusa.is_installed KB123456
'''
get_hotfix_result = __salt__['cmd.powershell_all']('Get-HotFix -Id {0}'.format(kb), ignore_retcode=True)
return get_hotfix_result['retcode'] == 0
def install(path):
'''
Install a KB from a .msu file.
Some KBs will need a reboot, but this function does not manage it.
You may have to manage reboot yourself after installation.
CLI Example:
.. code-block:: bash
salt '*' win_wusa.install C:/temp/KB123456.msu
'''
return __salt__['cmd.run_all']('wusa.exe {0} /quiet /norestart'.format(path), ignore_retcode=True)
def uninstall(kb):
'''
Uninstall a specific KB.
CLI Example:
.. code-block:: bash
salt '*' win_wusa.uninstall KB123456
'''
return __salt__['cmd.run_all']('wusa.exe /uninstall /kb:{0} /quiet /norestart'.format(kb[2:]), ignore_retcode=True)
def list_kbs():
'''
Return a list of dictionaries, one dictionary for each installed KB.
The HotFixID key contains the ID of the KB.
CLI Example:
.. code-block:: bash
salt '*' win_wusa.list_kbs
'''
return __salt__['cmd.powershell']('Get-HotFix')

View File

@ -502,6 +502,8 @@ class BaseSaltAPIHandler(tornado.web.RequestHandler): # pylint: disable=W0223
'''
# timeout all the futures
self.timeout_futures()
# clear local_client objects to disconnect event publisher's IOStream connections
del self.saltclients
def on_connection_close(self):
'''
@ -931,14 +933,27 @@ class SaltAPIHandler(BaseSaltAPIHandler): # pylint: disable=W0223
'''
Dispatch local client commands
'''
# Generate jid before triggering a job to subscribe all returns from minions
chunk['jid'] = salt.utils.jid.gen_jid(self.application.opts)
# Generate jid and find all minions before triggering a job to subscribe all returns from minions
chunk['jid'] = salt.utils.jid.gen_jid(self.application.opts) if not chunk.get('jid', None) else chunk['jid']
minions = set(self.ckminions.check_minions(chunk['tgt'], chunk.get('tgt_type', 'glob')))
def subscribe_minion(minion):
salt_evt = self.application.event_listener.get_event(
self,
tag='salt/job/{}/ret/{}'.format(chunk['jid'], minion),
matcher=EventListener.exact_matcher)
syndic_evt = self.application.event_listener.get_event(
self,
tag='syndic/job/{}/ret/{}'.format(chunk['jid'], minion),
matcher=EventListener.exact_matcher)
return salt_evt, syndic_evt
# start listening for the event before we fire the job to avoid races
events = [
self.application.event_listener.get_event(self, tag='salt/job/'+chunk['jid']),
self.application.event_listener.get_event(self, tag='syndic/job/'+chunk['jid']),
]
events = []
for minion in minions:
salt_evt, syndic_evt = subscribe_minion(minion)
events.append(salt_evt)
events.append(syndic_evt)
f_call = self._format_call_run_job_async(chunk)
# fire a job off
@ -954,6 +969,12 @@ class SaltAPIHandler(BaseSaltAPIHandler): # pylint: disable=W0223
pass
raise tornado.gen.Return('No minions matched the target. No command was sent, no jid was assigned.')
# get_event for missing minion
for minion in list(set(pub_data['minions']) - set(minions)):
salt_evt, syndic_evt = subscribe_minion(minion)
events.append(salt_evt)
events.append(syndic_evt)
# Map of minion_id -> returned for all minions we think we need to wait on
minions = {m: False for m in pub_data['minions']}
@ -1008,7 +1029,10 @@ class SaltAPIHandler(BaseSaltAPIHandler): # pylint: disable=W0223
cancel_inflight_futures()
raise tornado.gen.Return(chunk_ret)
continue
f_result = f.result()
if f in events:
events.remove(f)
# if this is a start, then we need to add it to the pile
if f_result['tag'].endswith('/new'):
for minion_id in f_result['data']['minions']:
@ -1018,7 +1042,6 @@ class SaltAPIHandler(BaseSaltAPIHandler): # pylint: disable=W0223
chunk_ret[f_result['data']['id']] = f_result['data']['return']
# clear finished event future
minions[f_result['data']['id']] = True
# if there are no more minions to wait for, then we are done
if not more_todo() and min_wait_time.done():
cancel_inflight_futures()
@ -1027,11 +1050,6 @@ class SaltAPIHandler(BaseSaltAPIHandler): # pylint: disable=W0223
except TimeoutException:
pass
if f == events[0]:
events[0] = self.application.event_listener.get_event(self, tag='salt/job/'+chunk['jid'])
else:
events[1] = self.application.event_listener.get_event(self, tag='syndic/job/'+chunk['jid'])
@tornado.gen.coroutine
def job_not_running(self, jid, tgt, tgt_type, minions, is_finished):
'''

View File

@ -327,6 +327,29 @@ mountpoint to ``web/`` (and restart the ``salt-master`` daemon).
:conf_master:`git_pillar_includes` is not disabled.
- Content from mounted git_pillar repos can only be referenced by a top
file in the same pillar environment.
- Salt versions prior to 2018.3.4 ignore the ``root`` parameter when
``mountpoint`` is set.
.. _git-pillar-all_saltenvs:
all_saltenvs
~~~~~~~~~~~~
.. versionadded:: 2018.3.4
When ``__env__`` is specified as the branch name, ``all_saltenvs`` per-remote configuration parameter overrides the logic Salt uses to map branches/tags to pillar environments (i.e. pillarenvs). This allows a single branch/tag to appear in all saltenvs. Example:
.. code-block:: yaml
ext_pillar:
- git:
- __env__ https://mydomain.tld/top.git
- all_saltenvs: master
- __env__ https://mydomain.tld/pillar-nginx.git:
- mountpoint: web/server/
- __env__ https://mydomain.tld/pillar-appdata.git:
- mountpoint: web/server/
'''
from __future__ import absolute_import, print_function, unicode_literals
@ -346,7 +369,7 @@ from salt.pillar import Pillar
from salt.ext import six
PER_REMOTE_OVERRIDES = ('env', 'root', 'ssl_verify', 'refspecs')
PER_REMOTE_ONLY = ('name', 'mountpoint')
PER_REMOTE_ONLY = ('name', 'mountpoint', 'all_saltenvs')
GLOBAL_ONLY = ('base', 'branch')
# Set up logging

View File

@ -1,8 +1,5 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
=================
Nodegroups Pillar
=================

View File

@ -364,8 +364,8 @@ def key_value_to_tree(data):
for flatkey, value in six.iteritems(data):
t = tree
keys = flatkey.split(__opts__['pepa_delimiter'])
for key in keys:
if key == keys[-1]:
for i, key in enumerate(keys, 1):
if i == len(keys):
t[key] = value
else:
t = t.setdefault(key, {})

View File

@ -29,19 +29,20 @@ def __virtual__():
def ext_pillar(minion_id, pillar, *args, **kwargs):
'''
Node definitions path will be retrieved from args - or set to default -
then added to 'salt_data' dict that is passed to the 'get_pillars' function.
'salt_data' dict is a convenient way to pass all the required datas to the function
It contains:
- __opts__
- __salt__
- __grains__
- __pillar__
- minion_id
- path
If successfull the function will return a pillar dict for minion_id
Compile pillar data
'''
# Node definitions path will be retrieved from args (or set to default),
# then added to 'salt_data' dict that is passed to the 'get_pillars'
# function. The dictionary contains:
# - __opts__
# - __salt__
# - __grains__
# - __pillar__
# - minion_id
# - path
#
# If successful, the function will return a pillar dict for minion_id.
# If path has not been set, make a default
for i in args:
if 'path' not in i:

View File

@ -47,8 +47,8 @@ execution functions, grains, pillar, etc. They are:
``/srv/salt/foo/bar/baz.sls``, then ``__sls__`` in that file will be
``foo.bar.baz``.
The global context ``data`` (same as context ``{{ data }}`` for states written
with Jinja + YAML). The following YAML + Jinja state declaration:
When writing a reactor SLS file the global context ``data`` (same as context ``{{ data }}``
for states written with Jinja + YAML) is available. The following YAML + Jinja state declaration:
.. code-block:: jinja

View File

@ -137,7 +137,10 @@ def _fetch_option(cfg, ret_config, virtualname, attr_name):
if not ret_config:
# Using the default configuration key
if isinstance(cfg, dict):
return c_cfg.get(attr_name, cfg.get(default_cfg_key))
if default_cfg_key in cfg:
return cfg[default_cfg_key]
else:
return c_cfg.get(attr_name)
else:
return c_cfg.get(attr_name, cfg(default_cfg_key))

View File

@ -1736,14 +1736,15 @@ class State(object):
try:
ret = self.states[cdata['full']](*cdata['args'],
**cdata['kwargs'])
except Exception:
except Exception as exc:
log.debug('An exception occurred in this state: %s', exc,
exc_info_on_loglevel=logging.DEBUG)
trb = traceback.format_exc()
ret = {
'result': False,
'name': name,
'changes': {},
'comment': 'An exception occurred in this state: {0}'.format(
trb)
'comment': 'An exception occurred in this state: {0}'.format(trb)
}
utc_finish_time = datetime.datetime.utcnow()
@ -1919,7 +1920,9 @@ class State(object):
self.states.inject_globals = {}
if 'check_cmd' in low and '{0[state]}.mod_run_check_cmd'.format(low) not in self.states:
ret.update(self._run_check_cmd(low))
except Exception:
except Exception as exc:
log.debug('An exception occurred in this state: %s', exc,
exc_info_on_loglevel=logging.DEBUG)
trb = traceback.format_exc()
# There are a number of possibilities to not have the cdata
# populated with what we might have expected, so just be smart
@ -1934,8 +1937,7 @@ class State(object):
'result': False,
'name': name,
'changes': {},
'comment': 'An exception occurred in this state: {0}'.format(
trb)
'comment': 'An exception occurred in this state: {0}'.format(trb)
}
finally:
if low.get('__prereq__'):

View File

@ -202,6 +202,12 @@ def set_(name, path):
path
is the location of one of the alternative target files.
(e.g. /usr/bin/less)
.. code-block:: yaml
foo:
alternatives.set:
- path: /usr/bin/foo-2.0
'''
ret = {'name': name,
'path': path,

View File

@ -16,8 +16,6 @@
# limitations under the License.
r'''
:codeauthor: :email:`Bo Maryniuk <bo@suse.de>`
Execution of Ansible modules from within states
===============================================

View File

@ -84,12 +84,18 @@ def downloaded(name, artifact, target_dir='/tmp', target_file=None, use_literal_
'changes': {},
'comment': ''}
try:
fetch_result = __fetch_from_artifactory(artifact, target_dir, target_file, use_literal_group_id)
except Exception as exc:
ret['result'] = False
ret['comment'] = six.text_type(exc)
return ret
if 'test' in __opts__ and __opts__['test'] is True:
fetch_result = {}
fetch_result['status'] = True
fetch_result['comment'] = 'Artifact would be downloaded from URL: {0}'.format(artifact['artifactory_url'])
fetch_result['changes'] = {}
else:
try:
fetch_result = __fetch_from_artifactory(artifact, target_dir, target_file, use_literal_group_id)
except Exception as exc:
ret['result'] = False
ret['comment'] = six.text_type(exc)
return ret
log.debug('fetch_result = %s', fetch_result)

View File

@ -5,11 +5,6 @@ Management of Gentoo configuration using eselect
A state module to manage Gentoo configuration via eselect
.. code-block:: yaml
profile:
eselect.set:
target: hardened/linux/amd64
'''
# Import Python libs
@ -44,6 +39,11 @@ def set_(name, target, module_parameter=None, action_parameter=None):
action_parameter
additional params passed to the defined action
.. code-block:: yaml
profile:
eselect.set:
- target: hardened/linux/amd64
'''
ret = {'changes': {},
'comment': '',

View File

@ -126,7 +126,6 @@ __virtualname__ = 'etcd'
# Function aliases
__func_alias__ = {
'set_': 'set',
'rm_': 'rm'
}
# Import third party libs
@ -147,12 +146,13 @@ def __virtual__():
def set_(name, value, profile=None, **kwargs):
'''
Set a key in etcd and can be called as ``set``.
Set a key in etcd
name
The etcd key name, for example: ``/foo/bar/baz``.
value
The value the key should contain.
profile
Optional, defaults to ``None``. Sets the etcd profile to use which has
been defined in the Salt Master config.
@ -263,14 +263,16 @@ def directory(name, profile=None, **kwargs):
return rtn
def rm_(name, recurse=False, profile=None, **kwargs):
def rm(name, recurse=False, profile=None, **kwargs):
'''
Deletes a key from etcd. This function is also aliased as ``rm``.
Deletes a key from etcd
name
The etcd key name to remove, for example ``/foo/bar/baz``.
recurse
Optional, defaults to ``False``. If ``True`` performs a recursive delete.
profile
Optional, defaults to ``None``. Sets the etcd profile to use which has
been defined in the Salt Master config.
@ -353,7 +355,7 @@ def mod_watch(name, **kwargs):
# Watch to rm etcd key
if kwargs.get('sfun') in ['wait_rm_key', 'wait_rm']:
return rm_(
return rm(
name,
kwargs.get('profile'))

View File

@ -677,6 +677,7 @@ def _check_directory(name,
group=None,
recurse=False,
mode=None,
file_mode=None,
clean=False,
require=False,
exclude_pat=None,
@ -712,6 +713,7 @@ def _check_directory(name,
if check_files:
for fname in files:
fchange = {}
mode = file_mode
path = os.path.join(root, fname)
stats = __salt__['file.stats'](
path, None, follow_symlinks
@ -720,6 +722,8 @@ def _check_directory(name,
fchange['user'] = user
if group is not None and group != stats.get('group'):
fchange['group'] = group
if mode is not None and mode != stats.get('mode'):
fchange['mode'] = mode
if fchange:
changes[path] = fchange
if check_dirs:
@ -3119,8 +3123,8 @@ def directory(name,
win_perms_reset=win_perms_reset)
else:
presult, pcomment, pchanges = _check_directory(
name, user, group, recurse or [], dir_mode, clean, require,
exclude_pat, max_depth, follow_symlinks)
name, user, group, recurse or [], dir_mode, file_mode, clean,
require, exclude_pat, max_depth, follow_symlinks)
if pchanges:
ret['pchanges'].update(pchanges)

View File

@ -16,6 +16,9 @@ you can specify what ruby version and gemset to target.
'''
from __future__ import absolute_import, unicode_literals, print_function
import salt.utils
import re
import logging
log = logging.getLogger(__name__)
@ -84,10 +87,29 @@ def installed(name, # pylint: disable=C0103
'Use of argument ruby found, but neither rvm or rbenv is installed'
)
gems = __salt__['gem.list'](name, ruby, gem_bin=gem_bin, runas=user)
if name in gems and version is not None and str(version) in gems[name]:
ret['result'] = True
ret['comment'] = 'Gem is already installed.'
return ret
if name in gems and version is not None:
match = re.match(r'(>=|>|<|<=)', version)
if match:
# Grab the comparison
cmpr = match.group()
# Clear out 'default:' and any whitespace
installed_version = re.sub('default: ', '', gems[name][0]).strip()
# Clear out comparison from version and whitespace
desired_version = re.sub(cmpr, '', version).strip()
if salt.utils.compare_versions(installed_version,
cmpr,
desired_version):
ret['result'] = True
ret['comment'] = 'Installed Gem meets version requirements.'
return ret
else:
if str(version) in gems[name]:
ret['result'] = True
ret['comment'] = 'Gem is already installed.'
return ret
elif name in gems and version is None:
ret['result'] = True
ret['comment'] = 'Gem is already installed.'

View File

@ -281,8 +281,8 @@ def latest(name,
identity=None,
https_user=None,
https_pass=None,
onlyif=False,
unless=False,
onlyif=None,
unless=None,
refspec_branch='*',
refspec_tag='*',
output_encoding=None,
@ -2197,8 +2197,8 @@ def detached(name,
identity=None,
https_user=None,
https_pass=None,
onlyif=False,
unless=False,
onlyif=None,
unless=None,
output_encoding=None,
**kwargs):
'''
@ -3409,18 +3409,65 @@ def mod_run_check(cmd_kwargs, onlyif, unless):
Otherwise, returns ``True``
'''
cmd_kwargs = copy.deepcopy(cmd_kwargs)
cmd_kwargs['python_shell'] = True
if onlyif:
if __salt__['cmd.retcode'](onlyif, **cmd_kwargs) != 0:
cmd_kwargs.update({
'use_vt': False,
'bg': False,
'ignore_retcode': True,
'python_shell': True,
})
if onlyif is not None:
if not isinstance(onlyif, list):
onlyif = [onlyif]
for command in onlyif:
if not isinstance(command, six.string_types) and command:
# Boolean or some other non-string which resolves to True
continue
try:
if __salt__['cmd.retcode'](command, **cmd_kwargs) == 0:
# Command exited with a zero retcode
continue
except Exception as exc:
log.exception(
'The following onlyif command raised an error: %s',
command
)
return {
'comment': 'onlyif raised error ({0}), see log for '
'more details'.format(exc),
'result': False
}
return {'comment': 'onlyif condition is false',
'skip_watch': True,
'result': True}
if unless:
if __salt__['cmd.retcode'](unless, **cmd_kwargs) == 0:
if unless is not None:
if not isinstance(unless, list):
unless = [unless]
for command in unless:
if not isinstance(command, six.string_types) and not command:
# Boolean or some other non-string which resolves to False
break
try:
if __salt__['cmd.retcode'](command, **cmd_kwargs) != 0:
# Command exited with a non-zero retcode
break
except Exception as exc:
log.exception(
'The following unless command raised an error: %s',
command
)
return {
'comment': 'unless raised error ({0}), see log for '
'more details'.format(exc),
'result': False
}
else:
return {'comment': 'unless condition is true',
'skip_watch': True,
'result': True}
# No reason to stop, return True
return True

View File

@ -247,6 +247,7 @@ def absent(name, orgname=None, profile='grafana'):
_IGNORED_DASHBOARD_FIELDS = [
'id',
'uid',
'originalTitle',
'version',
]

View File

@ -67,7 +67,7 @@ from salt.ext import six
import salt.utils.validate.net
def present(name, ip): # pylint: disable=C0103
def present(name, ip, clean=False): # pylint: disable=C0103
'''
Ensures that the named host is present with the given ip
@ -75,36 +75,92 @@ def present(name, ip): # pylint: disable=C0103
The host to assign an ip to
ip
The ip addr(s) to apply to the host
The ip addr(s) to apply to the host. Can be a single IP or a list of IP
addresses.
clean : False
Remove any entries which don't match those configured in the ``ip``
option.
.. versionadded:: 2018.3.4
'''
ret = {'name': name,
'changes': {},
'result': None,
'result': None if __opts__['test'] else True,
'comment': ''}
if not isinstance(ip, list):
ip = [ip]
all_hosts = __salt__['hosts.list_hosts']()
comments = []
for _ip in ip:
if __salt__['hosts.has_pair'](_ip, name):
ret['result'] = True
comments.append('Host {0} ({1}) already present'.format(name, _ip))
to_add = set()
to_remove = set()
# First check for IPs not currently in the hosts file
to_add.update([(addr, name) for addr in ip if addr not in all_hosts])
# Now sweep through the hosts file and look for entries matching either the
# IP address(es) or hostname.
for addr, aliases in six.iteritems(all_hosts):
if addr not in ip:
if name in aliases:
# Found match for hostname, but the corresponding IP is not in
# our list, so we need to remove it.
if clean:
to_remove.add((addr, name))
else:
ret.setdefault('warnings', []).append(
'Host {0} present for IP address {1}. To get rid of '
'this warning, either run this state with \'clean\' '
'set to True to remove {0} from {1}, or add {1} to '
'the \'ip\' argument.'.format(name, addr)
)
else:
if __opts__['test']:
comments.append('Host {0} ({1}) needs to be added/updated'.format(name, _ip))
if name in aliases:
# No changes needed for this IP address and hostname
comments.append(
'Host {0} ({1}) already present'.format(name, addr)
)
else:
if salt.utils.validate.net.ipv4_addr(_ip) or salt.utils.validate.net.ipv6_addr(_ip):
if __salt__['hosts.add_host'](_ip, name):
ret['changes'] = {'host': name}
ret['result'] = True
comments.append('Added host {0} ({1})'.format(name, _ip))
else:
ret['result'] = False
comments.append('Failed to set host')
# IP address listed in hosts file, but hostname is not present.
# We will need to add it.
if salt.utils.validate.net.ip_addr(addr):
to_add.add((addr, name))
else:
ret['result'] = False
comments.append('Invalid IP Address for {0} ({1})'.format(name, _ip))
comments.append(
'Invalid IP Address for {0} ({1})'.format(name, addr)
)
for addr, name in to_add:
if __opts__['test']:
comments.append(
'Host {0} ({1}) would be added'.format(name, addr)
)
else:
if __salt__['hosts.add_host'](addr, name):
comments.append('Added host {0} ({1})'.format(name, addr))
else:
ret['result'] = False
comments.append('Failed to add host {0} ({1})'.format(name, addr))
continue
ret['changes'].setdefault('added', {}).setdefault(addr, []).append(name)
for addr, name in to_remove:
if __opts__['test']:
comments.append(
'Host {0} ({1}) would be removed'.format(name, addr)
)
else:
if __salt__['hosts.rm_host'](addr, name):
comments.append('Removed host {0} ({1})'.format(name, addr))
else:
ret['result'] = False
comments.append('Failed to remove host {0} ({1})'.format(name, addr))
continue
ret['changes'].setdefault('removed', {}).setdefault(addr, []).append(name)
ret['comment'] = '\n'.join(comments)
return ret

View File

@ -27,6 +27,7 @@ import os.path
from salt.ext import six
import salt.utils.files
import salt.utils.stringutils
from salt.utils.icinga2 import get_certs_path
def __virtual__():
@ -103,8 +104,9 @@ def generate_ticket(name, output=None, grain=None, key=None, overwrite=True):
return ret
# Executing the command.
ticket = __salt__['icinga2.generate_ticket'](name).strip()
if ticket:
ticket_res = __salt__['icinga2.generate_ticket'](name)
ticket = ticket_res['stdout']
if not ticket_res['retcode']:
ret['comment'] = six.text_type(ticket)
if output == 'grain':
@ -140,8 +142,8 @@ def generate_cert(name):
'changes': {},
'result': True,
'comment': ''}
cert = "/etc/icinga2/pki/{0}.crt".format(name)
key = "/etc/icinga2/pki/{0}.key".format(name)
cert = "{0}{1}.crt".format(get_certs_path(), name)
key = "{0}{1}.key".format(get_certs_path(), name)
# Checking if execution is needed.
if os.path.isfile(cert) and os.path.isfile(key):
@ -154,7 +156,7 @@ def generate_cert(name):
# Executing the command.
cert_save = __salt__['icinga2.generate_cert'](name)
if not cert_save:
if not cert_save['retcode']:
ret['comment'] = "Certificate and key generated"
ret['changes']['cert'] = "Executed. Certificate saved: {0}".format(cert)
ret['changes']['key'] = "Executed. Key saved: {0}".format(key)
@ -175,7 +177,7 @@ def save_cert(name, master):
'changes': {},
'result': True,
'comment': ''}
cert = "/etc/icinga2/pki/trusted-master.crt"
cert = "{0}trusted-master.crt".format(get_certs_path())
# Checking if execution is needed.
if os.path.isfile(cert):
@ -188,7 +190,7 @@ def save_cert(name, master):
# Executing the command.
cert_save = __salt__['icinga2.save_cert'](name, master)
if not cert_save:
if not cert_save['retcode']:
ret['comment'] = "Certificate for icinga2 master saved"
ret['changes']['cert'] = "Executed. Certificate saved: {0}".format(cert)
return ret
@ -214,7 +216,7 @@ def request_cert(name, master, ticket, port="5665"):
'changes': {},
'result': True,
'comment': ''}
cert = "/etc/icinga2/pki/ca.crt"
cert = "{0}ca.crt".format(get_certs_path())
# Checking if execution is needed.
if os.path.isfile(cert):
@ -227,12 +229,12 @@ def request_cert(name, master, ticket, port="5665"):
# Executing the command.
cert_request = __salt__['icinga2.request_cert'](name, master, ticket, port)
if not cert_request:
if not cert_request['retcode']:
ret['comment'] = "Certificate request from icinga2 master executed"
ret['changes']['cert'] = "Executed. Certificate requested: {0}".format(cert)
return ret
ret['comment'] = "FAILED. Certificate requested failed with exit code: {0}".format(cert_request)
ret['comment'] = "FAILED. Certificate requested failed with output: {0}".format(cert_request['stdout'])
ret['result'] = False
return ret
@ -254,8 +256,8 @@ def node_setup(name, master, ticket):
'changes': {},
'result': True,
'comment': ''}
cert = "/etc/icinga2/pki/{0}.crt.orig".format(name)
key = "/etc/icinga2/pki/{0}.key.orig".format(name)
cert = "{0}{1}.crt.orig".format(get_certs_path(), name)
key = "{0}{1}.key.orig".format(get_certs_path(), name)
# Checking if execution is needed.
if os.path.isfile(cert) and os.path.isfile(cert):
@ -268,11 +270,11 @@ def node_setup(name, master, ticket):
# Executing the command.
node_setup = __salt__['icinga2.node_setup'](name, master, ticket)
if not node_setup:
if not node_setup['retcode']:
ret['comment'] = "Node setup executed."
ret['changes']['cert'] = "Node setup finished successfully."
return ret
ret['comment'] = "FAILED. Node setup failed with exit code: {0}".format(node_setup)
ret['comment'] = "FAILED. Node setup failed with outpu: {0}".format(node_setup['stdout'])
ret['result'] = False
return ret

View File

@ -62,20 +62,20 @@ def set_(name, key, value, setting=None, conf_file=_DEFAULT_CONF):
.. code-block:: yaml
logrotate-rotate:
logrotate.set:
- key: rotate
- value: 2
logrotate.set:
- key: rotate
- value: 2
Example of usage specifying all available arguments:
.. code-block:: yaml
logrotate-wtmp-rotate:
logrotate.set:
- key: /var/log/wtmp
- value: rotate
- setting: 2
- conf_file: /etc/logrotate.conf
logrotate.set:
- key: /var/log/wtmp
- value: rotate
- setting: 2
- conf_file: /etc/logrotate.conf
'''
ret = {'name': name,
'changes': dict(),

View File

@ -9,12 +9,30 @@ all interfaces are ignored unless specified.
.. note::
Prior to version 2014.1.0, only RedHat-based systems (RHEL,
CentOS, Scientific Linux, etc.) are supported. Support for Debian/Ubuntu is
new in 2014.1.0 and should be considered experimental.
RedHat-based systems (RHEL, CentOS, Scientific, etc.)
have been supported since version 2014.1.0.
Debian-based systems (Debian, Ubuntu, etc.) have been
supported since version 2017.7.0. The following options
are not supported: ipaddr_start, and ipaddr_end.
Other platforms are not yet supported.
.. note::
On Debian-based systems, networking configuration can be specified
in `/etc/network/interfaces` or via included files such as (by default)
`/etc/network/interfaces.d/*`. This can be problematic for configuration
management. It is recommended to use either `file.managed` *or*
`network.managed`.
If using `network.managed`, it can be useful to ensure `interfaces.d/`
is empty. This can be done using:
/etc/network/interfaces.d:
file.directory:
- clean: True
.. code-block:: yaml
system:
@ -31,9 +49,17 @@ all interfaces are ignored unless specified.
network.managed:
- enabled: True
- type: eth
- proto: none
- ipaddr: 10.1.0.1
- proto: static
- ipaddr: 10.1.0.7
- netmask: 255.255.255.0
- gateway: 10.1.0.1
- enable_ipv6: true
- ipv6proto: static
- ipv6ipaddrs:
- 2001:db8:dead:beef::3/64
- 2001:db8:dead:beef::7/64
- ipv6gateway: 2001:db8:dead:beef::1
- ipv6netmask: 64
- dns:
- 8.8.8.8
- 8.8.4.4
@ -121,12 +147,11 @@ all interfaces are ignored unless specified.
- type: bond
- ipaddr: 10.1.0.1
- netmask: 255.255.255.0
- mode: active-backup
- mode: gre
- proto: static
- dns:
- 8.8.8.8
- 8.8.4.4
- ipv6:
- enabled: False
- slaves: eth2 eth3
- require:
@ -202,6 +227,62 @@ all interfaces are ignored unless specified.
- require:
- network: eth4
eth6:
network.managed:
- type: eth
- noifupdown: True
# IPv4
- proto: static
- ipaddr: 192.168.4.9
- netmask: 255.255.255.0
- gateway: 192.168.4.1
- enable_ipv6: True
# IPv6
- ipv6proto: static
- ipv6ipaddr: 2001:db8:dead:c0::3
- ipv6netmask: 64
- ipv6gateway: 2001:db8:dead:c0::1
# override shared; makes those options v4-only
- ipv6ttl: 15
# Shared
- mtu: 1480
- ttl: 18
- dns:
- 8.8.8.8
- 8.8.4.4
eth7:
- type: eth
- proto: static
- ipaddr: 10.1.0.7
- netmask: 255.255.255.0
- gateway: 10.1.0.1
- enable_ipv6: True
- ipv6proto: static
- ipv6ipaddr: 2001:db8:dead:beef::3
- ipv6netmask: 64
- ipv6gateway: 2001:db8:dead:beef::1
- noifupdown: True
eth8:
network.managed:
- enabled: True
- type: eth
- proto: static
- enable_ipv6: true
- ipv6proto: static
- ipv6ipaddrs:
- 2001:db8:dead:beef::3/64
- 2001:db8:dead:beef::7/64
- ipv6gateway: 2001:db8:dead:beef::1
- ipv6netmask: 64
- dns:
- 8.8.8.8
- 8.8.4.4
system:
network.system:
- enabled: True
@ -217,17 +298,11 @@ all interfaces are ignored unless specified.
network.managed:
- name: lo
- type: eth
- proto: loopback
- onboot: yes
- userctl: no
- ipv6_autoconf: no
- enable_ipv6: true
- ipaddrs:
- 127.0.0.1/8
- 10.1.0.4/32
- 10.1.0.12/32
- ipv6addrs:
- fc00::1/128
- fc00::100/128
.. note::
Apply changes to hostname immediately.

View File

@ -422,9 +422,16 @@ def running(name,
else:
before_toggle_enable_status = True
unmask_ret = {'comment': ''}
if unmask:
unmask_ret = unmasked(name, unmask_runtime)
# See if the service is already running
if before_toggle_status:
ret['comment'] = 'The service {0} is already running'.format(name)
ret['comment'] = '\n'.join(
[_f for _f in ['The service {0} is already running'.format(name),
unmask_ret['comment']] if _f]
)
if enable is True and not before_toggle_enable_status:
ret.update(_enable(name, None, **kwargs))
elif enable is False and before_toggle_enable_status:
@ -434,7 +441,9 @@ def running(name,
# Run the tests
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Service {0} is set to start'.format(name)
ret['comment'] = '\n'.join(
[_f for _f in ['Service {0} is set to start'.format(name),
unmask_ret['comment']] if _f])
return ret
# Conditionally add systemd-specific args to call to service.start
@ -494,6 +503,9 @@ def running(name,
.format(ret['comment'], init_delay)
)
if unmask:
ret['comment'] = '\n'.join([ret['comment'], unmask_ret['comment']])
return ret

View File

@ -338,6 +338,7 @@ def dead(name,
else:
# process name doesn't exist
ret['comment'] = "Service {0} doesn't exist".format(name)
return ret
if is_stopped is True:
ret['comment'] = "Service {0} is not running".format(name)

View File

@ -141,13 +141,13 @@ def _changes(name,
change['empty_password'] = True
if date is not None and lshad['lstchg'] != date:
change['date'] = date
if mindays and mindays is not 0 and lshad['min'] != mindays:
if mindays is not None and lshad['min'] != mindays:
change['mindays'] = mindays
if maxdays and maxdays is not 999999 and lshad['max'] != maxdays:
if maxdays is not None and lshad['max'] != maxdays:
change['maxdays'] = maxdays
if inactdays and inactdays is not 0 and lshad['inact'] != inactdays:
if inactdays is not None and lshad['inact'] != inactdays:
change['inactdays'] = inactdays
if warndays and warndays is not 7 and lshad['warn'] != warndays:
if warndays is not None and lshad['warn'] != warndays:
change['warndays'] = warndays
if expire and lshad['expire'] != expire:
change['expire'] = expire

View File

@ -252,9 +252,18 @@ def set_(name,
if policy_data and policy_data['output_section'] in current_policy:
for policy_name, policy_setting in six.iteritems(policy_data['requested_policy']):
currently_set = False
# Check Case sensitive first (faster)
if policy_name in current_policy[policy_data['output_section']]:
currently_set = True
pol_id = policy_name
# Check case insensitive
elif policy_name.lower() in (k.lower() for k in current_policy[policy_data['output_section']]):
for p_name in current_policy[policy_data['output_section']]:
if policy_name.lower() == p_name.lower():
currently_set = True
pol_id = policy_name
break
# Check aliases
else:
for alias in policy_data['policy_lookup'][policy_name]['policy_aliases']:
log.debug('checking alias %s', alias)
@ -308,13 +317,13 @@ def set_(name,
policy_changes.append(policy_name)
else:
if additional_policy_comments:
ret['comment'] = '"{0}" is already set ({1}).\n'.format(policy_name, ', '.join(additional_policy_comments))
ret['comment'] = '"{0}" is already set ({1})\n'.format(policy_name, ', '.join(additional_policy_comments))
else:
ret['comment'] = '"{0}" is already set.\n'.format(policy_name) + ret['comment']
ret['comment'] = '"{0}" is already set\n'.format(policy_name) + ret['comment']
else:
log.debug('%s current setting matches '
'the requested setting', policy_name)
ret['comment'] = '"{0}" is already set.\n'.format(policy_name) + ret['comment']
ret['comment'] = '"{0}" is already set\n'.format(policy_name) + ret['comment']
else:
policy_changes.append(policy_name)
log.debug('policy %s is not set, we will configure it',
@ -322,7 +331,7 @@ def set_(name,
if __opts__['test']:
if policy_changes:
ret['result'] = None
ret['comment'] = 'The following policies are set to change:\n{0}.'.format(
ret['comment'] = 'The following policies are set to change:\n{0}'.format(
'\n'.join(policy_changes))
else:
ret['comment'] = 'All specified policies are properly configured'
@ -334,13 +343,17 @@ def set_(name,
adml_language=adml_language)
if _ret:
ret['result'] = _ret
ret['comment'] = 'The following policies changed:\n{0}.'.format(
'\n'.join(policy_changes))
ret['changes'] = salt.utils.dictdiffer.deep_diff(
current_policy,
__salt__['lgpo.get'](policy_class=policy_class,
adml_language=adml_language,
hierarchical_return=False))
if ret['changes']:
ret['comment'] = 'The following policies changed:\n{0}' \
''.format('\n'.join(policy_changes))
else:
ret['comment'] = 'The following policies are in the correct state:\n{0}' \
''.format('\n'.join(policy_changes))
else:
ret['result'] = False
ret['comment'] = 'Errors occurred while attempting to configure policies: {0}'.format(_ret)

115
salt/states/win_wusa.py Normal file
View File

@ -0,0 +1,115 @@
# -*- coding: utf-8 -*-
'''
Microsoft Updates (KB) Management
This module provides the ability to enforce KB installations
from files (.msu), without WSUS.
.. versionadded:: Neon
'''
# Import python libs
from __future__ import absolute_import, unicode_literals
import logging
# Import salt libs
import salt.utils.platform
import salt.exceptions
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'win_wusa'
def __virtual__():
'''
Load only on Windows
'''
if not salt.utils.platform.is_windows():
return False, 'Only available on Windows systems'
return __virtualname__
def installed(name, source):
'''
Enforce the installed state of a KB
name
Name of the Windows KB ("KB123456")
source
Source of .msu file corresponding to the KB
'''
ret = {
'name': name,
'changes': {},
'result': False,
'comment': '',
}
# Start with basic error-checking. Do all the passed parameters make sense
# and agree with each-other?
if not name or not source:
raise salt.exceptions.SaltInvocationError(
'Arguments "name" and "source" are mandatory.')
# Check the current state of the system. Does anything need to change?
current_state = __salt__['win_wusa.is_installed'](name)
if current_state:
ret['result'] = True
ret['comment'] = 'KB already installed'
return ret
# The state of the system does need to be changed. Check if we're running
# in ``test=true`` mode.
if __opts__['test'] is True:
ret['comment'] = 'The KB "{0}" will be installed.'.format(name)
ret['changes'] = {
'old': current_state,
'new': True,
}
# Return ``None`` when running with ``test=true``.
ret['result'] = None
return ret
try:
result = __states__['file.cached'](source,
skip_verify=True,
saltenv=__env__)
except Exception as exc:
msg = 'Failed to cache {0}: {1}'.format(
salt.utils.url.redact_http_basic_auth(source),
exc.__str__())
log.exception(msg)
ret['comment'] = msg
return ret
if result['result']:
# Get the path of the file in the minion cache
cached = __salt__['cp.is_cached'](source, saltenv=__env__)
else:
log.debug(
'failed to download %s',
salt.utils.url.redact_http_basic_auth(source)
)
return result
# Finally, make the actual change and return the result.
new_state = __salt__['win_wusa.install'](cached)
ret['comment'] = 'The KB "{0}" was installed!'.format(name)
ret['changes'] = {
'old': current_state,
'new': new_state,
}
ret['result'] = True
return ret

View File

@ -127,7 +127,6 @@ log = logging.getLogger(__name__)
__func_alias__ = {
'import_': 'import',
'export_': 'export',
}
# Define the state's virtual name
@ -623,7 +622,7 @@ def halted(name, graceful=True):
return ret
def export_(name, path, replace=False):
def export(name, path, replace=False):
'''
Export a zones configuration
@ -760,6 +759,11 @@ def import_(name, path, mode='import', nodataset=False, brand_opts=None):
``install``: will import and then try to install the zone
``attach``: will import and then try to attach of the zone
.. code-block:: yaml
omipkg1:
zone.import:
- path: /foo/bar/baz
'''
ret = {'name': name,
'changes': {},

View File

@ -5,7 +5,8 @@
{% if interface.hwaddress %} hwaddress {{interface.hwaddress}}
{%endif%}{% if interface.vlan_raw_device %} vlan-raw-device {{interface.vlan_raw_device}}
{%endif%}{% if interface.address %} address {{interface.address}}
{%endif%}{% if interface.netmask %} netmask {{interface.netmask}}
{%endif%}{% if interface.addresses %}{%for addr in interface.addresses %} address {{addr}}
{%endfor%}{%endif%}{% if interface.netmask %} netmask {{interface.netmask}}
{%endif%}{% if interface.broadcast %} broadcast {{interface.broadcast}}
{%endif%}{% if interface.metric %} metric {{interface.metric}}
{%endif%}{% if interface.gateway %} gateway {{interface.gateway}}
@ -30,8 +31,12 @@
{%endif%}{% if interface.unit %} unit {{interface.unit}}
{%endif%}{% if interface.options %} options {{interface.options}}
{%endif%}{% if interface.master %} bond-master {{interface.master}}
{%endif%}{% if interface.dns_nameservers %} dns-nameservers {%for item in interface.dns_nameservers %}{{item}} {%endfor%}
{%endif%}{% if interface.dns_search %} dns-search {% for item in interface.dns_search %}{{item}} {%endfor%}
{%endif%}{% if interface.dns_nameservers %} dns-nameservers {%
if interface.dns_nameservers is string %}{{ interface.dns_nameservers }}{%
else %}{{ interface.dns_nameservers|join(" ") }}{% endif %}
{%endif%}{% if interface.dns_search %} dns-search {%
if interface.dns_search is string %}{{interface.dns_search }}{%
else %}{{ interface.dns_search|join(" ") }}{% endif %}
{%endif%}{% if interface.ethtool %}{%for item in interface.ethtool_keys %} {{item}} {{interface.ethtool[item]}}
{%endfor%}{%endif%}{% if interface.bonding %}{%for item in interface.bonding_keys %} bond-{{item}} {{interface.bonding[item]}}
{%endfor%}{%endif%}{% if interface.bridging %}{%for item in interface.bridging_keys %} bridge_{{item}} {{interface.bridging[item]}}
@ -69,7 +74,8 @@
{%endif%}{% if interface.dhcp %} dhcp {{interface.dhcp}}{# END V6ONLOPTS #}
{%endif%}{% if interface.vlan_raw_device %} vlan-raw-device {{interface.vlan_raw_device}}
{%endif%}{% if interface.address %} address {{interface.address}}
{%endif%}{% if interface.netmask %} netmask {{interface.netmask}}
{%endif%}{% if interface.addresses %}{% for addr in interface.addresses %} address {{addr}}
{%endfor%}{%endif%}{% if interface.netmask %} netmask {{interface.netmask}}
{%endif%}{% if interface.broadcast %} broadcast {{interface.broadcast}}
{%endif%}{% if interface.metric %} metric {{interface.metric}}
{%endif%}{% if interface.gateway %} gateway {{interface.gateway}}
@ -94,8 +100,12 @@
{%endif%}{% if interface.unit %} unit {{interface.unit}}
{%endif%}{% if interface.options %} options {{interface.options}}
{%endif%}{% if interface.master %} bond-master {{interface.master}}
{%endif%}{% if interface.dns_nameservers %} dns-nameservers {%for item in interface.dns_nameservers %}{{item}} {%endfor%}
{%endif%}{% if interface.dns_search %} dns-search {% for item in interface.dns_search %}{{item}} {%endfor%}
{%endif%}{% if interface.dns_nameservers %} dns-nameservers {%
if interface.dns_nameservers is string %}{{ interface.dns_nameservers }}{%
else %}{{ interface.dns_nameservers|join(" ") }}{% endif %}
{%endif%}{% if interface.dns_search %} dns-search {%
if interface.dns_search is string %}{{interface.dns_search }}{%
else %}{{ interface.dns_search|join(" ") }}{% endif %}
{%endif%}{% if interface.ethtool %}{%for item in interface.ethtool_keys %} {{item}} {{interface.ethtool[item]}}
{%endfor%}{%endif%}{% if interface.bonding %}{%for item in interface.bonding_keys %} bond-{{item}} {{interface.bonding[item]}}
{%endfor%}{%endif%}{% if interface.bridging %}{%for item in interface.bridging_keys %} bridge_{{item}} {{interface.bridging[item]}}

View File

@ -108,7 +108,7 @@ Saltclass Examples
``<saltclass_path>/nodes/lausanne/qls.node1.yml``
.. code-block:: yaml
.. code-block:: jinja
environment: base
@ -228,19 +228,20 @@ def __virtual__():
def top(**kwargs):
'''
Node definitions path will be retrieved from __opts__ - or set to default -
then added to 'salt_data' dict that is passed to the 'get_tops' function.
'salt_data' dict is a convenient way to pass all the required datas to the function
It contains:
- __opts__
- empty __salt__
- __grains__
- empty __pillar__
- minion_id
- path
If successfull the function will return a top dict for minion_id
Compile tops
'''
# Node definitions path will be retrieved from args (or set to default),
# then added to 'salt_data' dict that is passed to the 'get_pillars'
# function. The dictionary contains:
# - __opts__
# - __salt__
# - __grains__
# - __pillar__
# - minion_id
# - path
#
# If successful, the function will return a pillar dict for minion_id.
# If path has not been set, make a default
_opts = __opts__['master_tops']['saltclass']
if 'path' not in _opts:

View File

@ -13,6 +13,7 @@ import signal
import hashlib
import logging
import weakref
import threading
from random import randint
# Import Salt Libs
@ -760,6 +761,9 @@ class ZeroMQPubServerChannel(salt.transport.server.PubServerChannel):
'''
Encapsulate synchronous operations for a publisher channel
'''
_sock_data = threading.local()
def __init__(self, opts):
self.opts = opts
self.serial = salt.payload.Serial(self.opts) # TODO: in init?
@ -795,9 +799,11 @@ class ZeroMQPubServerChannel(salt.transport.server.PubServerChannel):
# IPv6 sockets work for both IPv6 and IPv4 addresses
pub_sock.setsockopt(zmq.IPV4ONLY, 0)
pub_sock.setsockopt(zmq.BACKLOG, self.opts.get('zmq_backlog', 1000))
pub_sock.setsockopt(zmq.LINGER, -1)
pub_uri = 'tcp://{interface}:{publish_port}'.format(**self.opts)
# Prepare minion pull socket
pull_sock = context.socket(zmq.PULL)
pull_sock.setsockopt(zmq.LINGER, -1)
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = 'tcp://127.0.0.1:{0}'.format(
@ -860,15 +866,14 @@ class ZeroMQPubServerChannel(salt.transport.server.PubServerChannel):
raise exc
except KeyboardInterrupt:
# Cleanly close the sockets if we're shutting down
if pub_sock.closed is False:
pub_sock.setsockopt(zmq.LINGER, 1)
pub_sock.close()
if pull_sock.closed is False:
pull_sock.setsockopt(zmq.LINGER, 1)
pull_sock.close()
if context.closed is False:
context.term()
log.trace('Publish daemon caught Keyboard interupt, tearing down')
# Cleanly close the sockets if we're shutting down
if pub_sock.closed is False:
pub_sock.close()
if pull_sock.closed is False:
pull_sock.close()
if context.closed is False:
context.term()
def pre_fork(self, process_manager, kwargs=None):
'''
@ -880,23 +885,29 @@ class ZeroMQPubServerChannel(salt.transport.server.PubServerChannel):
'''
process_manager.add_process(self._publish_daemon, kwargs=kwargs)
def publish(self, load):
@property
def pub_sock(self):
'''
Publish "load" to minions
:param dict load: A load to be sent across the wire to minions
This thread's zmq publisher socket. This socket is stored on the class
so that multiple instantiations in the same thread will re-use a single
zmq socket.
'''
payload = {'enc': 'aes'}
try:
return self._sock_data.sock
except AttributeError:
pass
crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value)
payload['load'] = crypticle.dumps(load)
if self.opts['sign_pub_messages']:
master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem')
log.debug("Signing data packet")
payload['sig'] = salt.crypt.sign_message(master_pem_path, payload['load'])
# Send 0MQ to the publisher
context = zmq.Context(1)
pub_sock = context.socket(zmq.PUSH)
def pub_connect(self):
'''
Create and connect this thread's zmq socket. If a publisher socket
already exists "pub_close" is called before creating and connecting a
new socket.
'''
if self.pub_sock:
self.pub_close()
ctx = zmq.Context.instance()
self._sock_data.sock = ctx.socket(zmq.PUSH)
self.pub_sock.setsockopt(zmq.LINGER, -1)
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_publish_pull', 4514)
@ -905,7 +916,33 @@ class ZeroMQPubServerChannel(salt.transport.server.PubServerChannel):
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
pub_sock.connect(pull_uri)
log.debug("Connecting to pub server: %s", pull_uri)
self.pub_sock.connect(pull_uri)
return self._sock_data.sock
def pub_close(self):
'''
Disconnect an existing publisher socket and remove it from the local
thread's cache.
'''
if hasattr(self._sock_data, 'sock'):
self._sock_data.sock.close()
delattr(self._sock_data, 'sock')
def publish(self, load):
'''
Publish "load" to minions. This send the load to the publisher daemon
process with does the actual sending to minions.
:param dict load: A load to be sent across the wire to minions
'''
payload = {'enc': 'aes'}
crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value)
payload['load'] = crypticle.dumps(load)
if self.opts['sign_pub_messages']:
master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem')
log.debug("Signing data packet")
payload['sig'] = salt.crypt.sign_message(master_pem_path, payload['load'])
int_payload = {'payload': self.serial.dumps(payload)}
# add some targeting stuff for lists only (for now)
@ -928,12 +965,11 @@ class ZeroMQPubServerChannel(salt.transport.server.PubServerChannel):
'Sending payload to publish daemon. jid=%s size=%d',
load.get('jid', None), len(payload),
)
pub_sock.send(payload)
if not self.pub_sock:
self.pub_connect()
self.pub_sock.send(payload)
log.debug('Sent payload to publish daemon.')
pub_sock.close()
context.term()
class AsyncReqMessageClientPool(salt.transport.MessageClientPool):
'''

View File

@ -5,9 +5,16 @@ Decorators for salt.state
:codeauthor: :email:`Bo Maryniuk (bo@suse.de)`
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals
import logging
# Import salt libs
import salt.utils.stringutils
from salt.exceptions import SaltException
log = logging.getLogger(__name__)
class OutputUnifier(object):
def __init__(self, *policies):
@ -24,12 +31,14 @@ class OutputUnifier(object):
for pls in self.policies:
try:
result = pls(result)
except Exception as ex:
except Exception as exc:
log.debug('An exception occurred in this state: %s', exc,
exc_info_on_loglevel=logging.DEBUG)
result = {
'result': False,
'name': 'later',
'changes': {},
'comment': 'An exception occurred in this state: {0}'.format(ex)
'comment': 'An exception occurred in this state: {0}'.format(exc)
}
return result
return _func
@ -75,7 +84,9 @@ class OutputUnifier(object):
:return:
'''
if isinstance(result.get('comment'), list):
result['comment'] = '\n'.join([str(elm) for elm in result['comment']])
result['comment'] = u'\n'.join([
salt.utils.stringutils.to_unicode(elm) for elm in result['comment']
])
if result.get('result') is not None:
result['result'] = bool(result['result'])

View File

@ -15,8 +15,7 @@ except ImportError:
# Import 3rd-party libs
import copy
import logging
from salt.ext import six
from salt.serializers.yamlex import merge_recursive as _yamlex_merge_recursive
import salt.ext.six as six
log = logging.getLogger(__name__)
@ -94,6 +93,7 @@ def merge_recurse(obj_a, obj_b, merge_lists=False):
def merge_aggregate(obj_a, obj_b):
from salt.serializers.yamlex import merge_recursive as _yamlex_merge_recursive
return _yamlex_merge_recursive(obj_a, obj_b, level=1)

View File

@ -205,10 +205,22 @@ def rename(src, dst):
os.rename(src, dst)
def process_read_exception(exc, path):
def process_read_exception(exc, path, ignore=None):
'''
Common code for raising exceptions when reading a file fails
The ignore argument can be an iterable of integer error codes (or a single
integer error code) that should be ignored.
'''
if ignore is not None:
if isinstance(ignore, six.integer_types):
ignore = (ignore,)
else:
ignore = ()
if exc.errno in ignore:
return
if exc.errno == errno.ENOENT:
raise CommandExecutionError('{0} does not exist'.format(path))
elif exc.errno == errno.EACCES:

View File

@ -584,10 +584,20 @@ class GitProvider(object):
'''
cleaned = []
cmd_str = 'git remote prune origin'
# Attempt to force all output to plain ascii english, which is what some parsing code
# may expect.
# According to stackoverflow (http://goo.gl/l74GC8), we are setting LANGUAGE as well
# just to be sure.
env = os.environ.copy()
env[b"LANGUAGE"] = b"C"
env[b"LC_ALL"] = b"C"
cmd = subprocess.Popen(
shlex.split(cmd_str),
close_fds=not salt.utils.platform.is_windows(),
cwd=os.path.dirname(self.gitdir),
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = cmd.communicate()[0]
@ -984,6 +994,11 @@ class GitProvider(object):
Resolve dynamically-set branch
'''
if self.role == 'git_pillar' and self.branch == '__env__':
try:
return self.all_saltenvs
except AttributeError:
# all_saltenvs not configured for this remote
pass
target = self.opts.get('pillarenv') \
or self.opts.get('saltenv') \
or 'base'
@ -1598,6 +1613,10 @@ class Pygit2(GitProvider):
will let the calling function know whether or not a new repo was
initialized by this function.
'''
# https://github.com/libgit2/pygit2/issues/339
# https://github.com/libgit2/libgit2/issues/2122
home = os.path.expanduser('~')
pygit2.settings.search_path[pygit2.GIT_CONFIG_LEVEL_GLOBAL] = home
new = False
if not os.listdir(self.cachedir):
# Repo cachedir is empty, initialize a new repo there
@ -1606,17 +1625,7 @@ class Pygit2(GitProvider):
else:
# Repo cachedir exists, try to attach
try:
try:
self.repo = pygit2.Repository(self.cachedir)
except GitError as exc:
import pwd
# https://github.com/libgit2/pygit2/issues/339
# https://github.com/libgit2/libgit2/issues/2122
if "Error stat'ing config file" not in six.text_type(exc):
raise
home = pwd.getpwnam(salt.utils.user.get_user()).pw_dir
pygit2.settings.search_path[pygit2.GIT_CONFIG_LEVEL_GLOBAL] = home
self.repo = pygit2.Repository(self.cachedir)
self.repo = pygit2.Repository(self.cachedir)
except KeyError:
log.error(_INVALID_REPO, self.cachedir, self.url, self.role)
return new
@ -2977,7 +2986,11 @@ class GitPillar(GitBase):
cachedir = self.do_checkout(repo)
if cachedir is not None:
# Figure out which environment this remote should be assigned
if repo.env:
if repo.branch == '__env__' and hasattr(repo, 'all_saltenvs'):
env = self.opts.get('pillarenv') \
or self.opts.get('saltenv') \
or self.opts.get('git_pillar_base')
elif repo.env:
env = repo.env
else:
env = 'base' if repo.branch == repo.base else repo.get_checkout_target()
@ -2994,6 +3007,7 @@ class GitPillar(GitBase):
points at the correct path
'''
lcachelink = salt.utils.path.join(repo.linkdir, repo._mountpoint)
lcachedest = salt.utils.path.join(repo.cachedir, repo.root()).rstrip(os.sep)
wipe_linkdir = False
create_link = False
try:
@ -3027,11 +3041,11 @@ class GitPillar(GitBase):
)
wipe_linkdir = True
else:
if ldest != repo.cachedir:
if ldest != lcachedest:
log.debug(
'Destination of %s (%s) does not match '
'the expected value (%s)',
lcachelink, ldest, repo.cachedir
lcachelink, ldest, lcachedest
)
# Since we know that the parent dirs of the
# link are set up properly, all we need to do
@ -3076,16 +3090,16 @@ class GitPillar(GitBase):
if create_link:
try:
os.symlink(repo.cachedir, lcachelink)
os.symlink(lcachedest, lcachelink)
log.debug(
'Successfully linked %s to cachedir %s',
lcachelink, repo.cachedir
lcachelink, lcachedest
)
return True
except OSError as exc:
log.error(
'Failed to create symlink to %s at path %s: %s',
repo.cachedir, lcachelink, exc.__str__()
lcachedest, lcachelink, exc.__str__()
)
return False
except GitLockError:

28
salt/utils/icinga2.py Normal file
View File

@ -0,0 +1,28 @@
# -*- coding: utf-8 -*-
'''
Icinga2 Common Utils
=================
This module provides common functionality for icinga2 module and state.
.. versionadded:: 2018.8.3
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import re
# Import Salt libs
import salt.utils.path
log = logging.getLogger(__name__)
def get_certs_path():
icinga2_output = __salt__['cmd.run_all']([salt.utils.path.which('icinga2'), "--version"], python_shell=False)
version = re.search(r'r\d+\.\d+', icinga2_output['stdout']).group(0)
# Return new certs path for icinga2 >= 2.8
if int(version.split('.')[1]) >= 8:
return '/var/lib/icinga2/certs/'
# Keep backwords compatibility with older icinga2
return '/etc/icinga2/pki/'

View File

@ -103,10 +103,12 @@ def store_job(opts, load, event=None, mminion=None):
log.error(emsg)
raise KeyError(emsg)
try:
mminion.returners[savefstr](load['jid'], load)
except KeyError as e:
log.error("Load does not contain 'jid': %s", e)
if job_cache != 'local_cache':
try:
mminion.returners[savefstr](load['jid'], load)
except KeyError as e:
log.error("Load does not contain 'jid': %s", e)
mminion.returners[fstr](load)
if (opts.get('job_cache_store_endtime')

View File

@ -11,6 +11,7 @@ import subprocess
import os
import plistlib
import time
import xml.parsers.expat
# Import Salt Libs
import salt.modules.cmdmod
@ -40,6 +41,11 @@ __salt__ = {
'cmd.run': salt.modules.cmdmod._run_quiet,
}
if six.PY2:
class InvalidFileException(Exception):
pass
plistlib.InvalidFileException = InvalidFileException
def __virtual__():
'''
@ -301,6 +307,12 @@ def launchctl(sub_cmd, *args, **kwargs):
def _available_services(refresh=False):
'''
This is a helper function for getting the available macOS services.
The strategy is to look through the known system locations for
launchd plist files, parse them, and use their information for
populating the list of services. Services can run without a plist
file present, but normally services which have an automated startup
will have a plist file, so this is a minor compromise.
'''
try:
if __context__['available_services'] and not refresh:
@ -316,6 +328,15 @@ def _available_services(refresh=False):
'/System/Library/LaunchAgents',
'/System/Library/LaunchDaemons',
]
try:
for user in os.listdir('/Users/'):
agent_path = '/Users/{}/Library/LaunchAgents'.format(user)
if os.path.isdir(agent_path):
launchd_paths.append(agent_path)
except OSError:
pass
_available_services = dict()
for launch_dir in launchd_paths:
for root, dirs, files in salt.utils.path.os_walk(launch_dir):
@ -328,39 +349,59 @@ def _available_services(refresh=False):
# Follow symbolic links of files in _launchd_paths
file_path = os.path.join(root, file_name)
true_path = os.path.realpath(file_path)
log.trace('Gathering service info for %s', true_path)
# ignore broken symlinks
if not os.path.exists(true_path):
continue
try:
# This assumes most of the plist files
# will be already in XML format
plist = plistlib.readPlist(true_path)
if six.PY2:
# py2 plistlib can't read binary plists, and
# uses a different API than py3.
plist = plistlib.readPlist(true_path)
else:
with salt.utils.files.fopen(true_path, 'rb') as handle:
plist = plistlib.load(handle)
except Exception:
# If plistlib is unable to read the file we'll need to use
# the system provided plutil program to do the conversion
except plistlib.InvalidFileException:
# Raised in python3 if the file is not XML.
# There's nothing we can do; move on to the next one.
msg = 'Unable to parse "%s" as it is invalid XML: InvalidFileException.'
logging.warning(msg, true_path)
continue
except xml.parsers.expat.ExpatError:
# Raised by py2 for all errors.
# Raised by py3 if the file is XML, but with errors.
if six.PY3:
# There's an error in the XML, so move on.
msg = 'Unable to parse "%s" as it is invalid XML: xml.parsers.expat.ExpatError.'
logging.warning(msg, true_path)
continue
# Use the system provided plutil program to attempt
# conversion from binary.
cmd = '/usr/bin/plutil -convert xml1 -o - -- "{0}"'.format(
true_path)
plist_xml = __salt__['cmd.run'](cmd)
if six.PY2:
try:
plist_xml = __salt__['cmd.run'](cmd)
plist = plistlib.readPlistFromString(plist_xml)
else:
plist = plistlib.loads(
salt.utils.stringutils.to_bytes(plist_xml))
except xml.parsers.expat.ExpatError:
# There's still an error in the XML, so move on.
msg = 'Unable to parse "%s" as it is invalid XML: xml.parsers.expat.ExpatError.'
logging.warning(msg, true_path)
continue
try:
_available_services[plist.Label.lower()] = {
'file_name': file_name,
'file_path': true_path,
'plist': plist}
except AttributeError:
# Handle malformed plist files
_available_services[os.path.basename(file_name).lower()] = {
# not all launchd plists contain a Label key
_available_services[plist['Label'].lower()] = {
'file_name': file_name,
'file_path': true_path,
'plist': plist}
except KeyError:
log.debug('Service %s does not contain a'
' Label key. Skipping.', true_path)
continue
# put this in __context__ as this is a time consuming function.
# a fix for this issue. https://github.com/saltstack/salt/issues/48414

View File

@ -39,6 +39,98 @@ from salt.utils.zeromq import zmq
log = logging.getLogger(__name__)
def get_running_jobs(opts):
'''
Return the running jobs on this minion
'''
ret = []
proc_dir = os.path.join(opts['cachedir'], 'proc')
if not os.path.isdir(proc_dir):
return ret
for fn_ in os.listdir(proc_dir):
path = os.path.join(proc_dir, fn_)
try:
data = _read_proc_file(path, opts)
if data is not None:
ret.append(data)
except (IOError, OSError):
# proc files may be removed at any time during this process by
# the master process that is executing the JID in question, so
# we must ignore ENOENT during this process
log.trace('%s removed during processing by master process', path)
return ret
def _read_proc_file(path, opts):
'''
Return a dict of JID metadata, or None
'''
serial = salt.payload.Serial(opts)
with salt.utils.files.fopen(path, 'rb') as fp_:
buf = fp_.read()
fp_.close()
if buf:
data = serial.loads(buf)
else:
# Proc file is empty, remove
try:
os.remove(path)
except IOError:
log.debug('Unable to remove proc file %s.', path)
return None
if not isinstance(data, dict):
# Invalid serial object
return None
if not salt.utils.process.os_is_running(data['pid']):
# The process is no longer running, clear out the file and
# continue
try:
os.remove(path)
except IOError:
log.debug('Unable to remove proc file %s.', path)
return None
if not _check_cmdline(data):
pid = data.get('pid')
if pid:
log.warning(
'PID %s exists but does not appear to be a salt process.', pid
)
try:
os.remove(path)
except IOError:
log.debug('Unable to remove proc file %s.', path)
return None
return data
def _check_cmdline(data):
'''
In some cases where there are an insane number of processes being created
on a system a PID can get recycled or assigned to a non-Salt process.
On Linux this fn checks to make sure the PID we are checking on is actually
a Salt process.
For non-Linux systems we punt and just return True
'''
if not salt.utils.platform.is_linux():
return True
pid = data.get('pid')
if not pid:
return False
if not os.path.isdir('/proc'):
return True
path = os.path.join('/proc/{0}/cmdline'.format(pid))
if not os.path.isfile(path):
return False
try:
with salt.utils.files.fopen(path, 'rb') as fp_:
return b'salt' in fp_.read()
except (OSError, IOError):
return False
class MasterPillarUtil(object):
'''
Helper utility for easy access to targeted minion grain and
@ -721,6 +813,7 @@ def get_values_of_matching_keys(pattern_dict, user_name):
ret.extend(pattern_dict[expr])
return ret
# test code for the ConCache class
if __name__ == '__main__':

View File

@ -42,6 +42,9 @@ def running(opts):
def cache_jobs(opts, jid, ret):
'''
Write job information to cache
'''
serial = salt.payload.Serial(opts=opts)
fn_ = os.path.join(
@ -73,7 +76,7 @@ def _read_proc_file(path, opts):
try:
os.remove(path)
except IOError:
pass
log.debug('Unable to remove proc file %s.', path)
return None
if not isinstance(data, dict):
# Invalid serial object
@ -84,7 +87,7 @@ def _read_proc_file(path, opts):
try:
os.remove(path)
except IOError:
pass
log.debug('Unable to remove proc file %s.', path)
return None
if opts.get('multiprocessing'):
if data.get('pid') == pid:
@ -94,7 +97,7 @@ def _read_proc_file(path, opts):
try:
os.remove(path)
except IOError:
pass
log.debug('Unable to remove proc file %s.', path)
return None
if data.get('jid') == current_thread:
return None
@ -102,7 +105,7 @@ def _read_proc_file(path, opts):
try:
os.remove(path)
except IOError:
pass
log.debug('Unable to remove proc file %s.', path)
return None
if not _check_cmdline(data):
@ -114,7 +117,7 @@ def _read_proc_file(path, opts):
try:
os.remove(path)
except IOError:
pass
log.debug('Unable to remove proc file %s.', path)
return None
return data

View File

@ -245,8 +245,12 @@ class CkMinions(object):
try:
if self.opts['key_cache'] and os.path.exists(pki_cache_fn):
log.debug('Returning cached minion list')
with salt.utils.files.fopen(pki_cache_fn) as fn_:
return self.serial.load(fn_)
if six.PY2:
with salt.utils.files.fopen(pki_cache_fn) as fn_:
return self.serial.load(fn_)
else:
with salt.utils.files.fopen(pki_cache_fn, mode='rb') as fn_:
return self.serial.load(fn_)
else:
for fn_ in salt.utils.data.sorted_ignorecase(os.listdir(os.path.join(self.opts['pki_dir'], self.acc))):
if not fn_.startswith('.') and os.path.isfile(os.path.join(self.opts['pki_dir'], self.acc, fn_)):

View File

@ -32,6 +32,7 @@ import salt.utils.error
import salt.utils.event
import salt.utils.files
import salt.utils.jid
import salt.utils.master
import salt.utils.minion
import salt.utils.platform
import salt.utils.process
@ -47,6 +48,10 @@ import salt.log.setup as log_setup
import salt.defaults.exitcodes
from salt.utils.odict import OrderedDict
from salt.exceptions import (
SaltInvocationError
)
# Import 3rd-party libs
from salt.ext import six
@ -173,7 +178,11 @@ class Schedule(object):
data['run'] = True
if 'jid_include' not in data or data['jid_include']:
jobcount = 0
for job in salt.utils.minion.running(self.opts):
if self.opts['__role'] == 'master':
current_jobs = salt.utils.master.get_running_jobs(self.opts)
else:
current_jobs = salt.utils.minion.running(self.opts)
for job in current_jobs:
if 'schedule' in job:
log.debug(
'schedule.handle_func: Checking job against fun '
@ -609,6 +618,7 @@ class Schedule(object):
log.warning('schedule: The metadata parameter must be '
'specified as a dictionary. Ignoring.')
data_returner = data.get('returner', None)
salt.utils.process.appendproctitle('{0} {1}'.format(self.__class__.__name__, ret['jid']))
if not self.standalone:
@ -626,6 +636,22 @@ class Schedule(object):
# TODO: Make it readable! Splt to funcs, remove nested try-except-finally sections.
try:
minion_blackout_violation = False
if self.opts.get('pillar', {}).get('minion_blackout', False):
whitelist = self.opts.get('pillar', {}).get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
elif self.opts.get('grains', {}).get('minion_blackout', False):
whitelist = self.opts.get('grains', {}).get('minion_blackout_whitelist', [])
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
ret['pid'] = os.getpid()
if not self.standalone:
@ -704,6 +730,7 @@ class Schedule(object):
self.functions[mod_name].__globals__[global_key] = value
self.functions.pack['__context__']['retcode'] = 0
ret['return'] = self.functions[func](*args, **kwargs)
if not self.standalone:
@ -713,7 +740,6 @@ class Schedule(object):
ret['success'] = True
data_returner = data.get('returner', None)
if data_returner or self.schedule_returner:
if 'return_config' in data:
ret['ret_config'] = data['return_config']

View File

@ -40,8 +40,8 @@ class TimedProc(object):
if self.timeout and not isinstance(self.timeout, (int, float)):
raise salt.exceptions.TimedProcTimeoutError('Error: timeout {0} must be a number'.format(self.timeout))
if six.PY2 and kwargs.get('shell', False):
args = salt.utils.stringutils.to_bytes(args)
if kwargs.get('shell', False):
args = salt.utils.data.decode(args, to_str=True)
try:
self.process = subprocess.Popen(args, **kwargs)

View File

@ -81,6 +81,14 @@ def ipv6_addr(addr):
return __ip_addr(addr, socket.AF_INET6)
def ip_addr(addr):
'''
Returns True if the IPv4 or IPv6 address (and optional subnet) are valid,
otherwise returns False.
'''
return ipv4_addr(addr) or ipv6_addr(addr)
def netmask(mask):
'''
Returns True if the value passed is a valid netmask, otherwise return False

View File

@ -0,0 +1,578 @@
# -*- coding: utf-8 -*-
r'''
A salt util for modifying firewall settings.
.. versionadded:: 2018.3.4
.. versionadded:: Fluorine
This util allows you to modify firewall settings in the local group policy in
addition to the normal firewall settings. Parameters are taken from the
netsh advfirewall prompt.
.. note::
More information can be found in the advfirewall context in netsh. This can
be access by opening a netsh prompt. At a command prompt type the following:
c:\>netsh
netsh>advfirewall
netsh advfirewall>set help
netsh advfirewall>set domain help
Usage:
.. code-block:: python
import salt.utils.win_lgpo_netsh
# Get the inbound/outbound firewall settings for connections on the
# local domain profile
salt.utils.win_lgpo_netsh.get_settings(profile='domain',
section='firewallpolicy')
# Get the inbound/outbound firewall settings for connections on the
# domain profile as defined by local group policy
salt.utils.win_lgpo_netsh.get_settings(profile='domain',
section='firewallpolicy',
store='lgpo')
# Get all firewall settings for connections on the domain profile
salt.utils.win_lgpo_netsh.get_all_settings(profile='domain')
# Get all firewall settings for connections on the domain profile as
# defined by local group policy
salt.utils.win_lgpo_netsh.get_all_settings(profile='domain', store='lgpo')
# Get all firewall settings for all profiles
salt.utils.win_lgpo_netsh.get_all_settings()
# Get all firewall settings for all profiles as defined by local group
# policy
salt.utils.win_lgpo_netsh.get_all_settings(store='lgpo')
# Set the inbound setting for the domain profile to block inbound
# connections
salt.utils.win_lgpo_netsh.set_firewall_settings(profile='domain',
inbound='blockinbound')
# Set the outbound setting for the domain profile to allow outbound
# connections
salt.utils.win_lgpo_netsh.set_firewall_settings(profile='domain',
outbound='allowoutbound')
# Set inbound/outbound settings for the domain profile in the group
# policy to block inbound and allow outbound
salt.utils.win_lgpo_netsh.set_firewall_settings(profile='domain',
inbound='blockinbound',
outbound='allowoutbound',
store='lgpo')
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
from textwrap import dedent
import logging
import os
import re
import socket
import tempfile
import salt.modules.cmdmod
from salt.exceptions import CommandExecutionError
from salt.ext.six.moves import zip
log = logging.getLogger(__name__)
__hostname__ = socket.gethostname()
def _netsh_file(content):
'''
helper function to get the results of ``netsh -f content.txt``
Running ``netsh`` will drop you into a ``netsh`` prompt where you can issue
``netsh`` commands. You can put a series of commands in an external file and
run them as if from a ``netsh`` prompt using the ``-f`` switch. That's what
this function does.
Args:
content (str):
The contents of the file that will be run by the ``netsh -f``
command
Returns:
str: The text returned by the netsh command
'''
with tempfile.NamedTemporaryFile(mode='w',
prefix='salt-',
suffix='.netsh',
delete=False) as fp:
fp.write(content)
try:
log.debug('{0}:\n{1}'.format(fp.name, content))
return salt.modules.cmdmod.run('netsh -f {0}'.format(fp.name), python_shell=True)
finally:
os.remove(fp.name)
def _netsh_command(command, store):
if store.lower() not in ('local', 'lgpo'):
raise ValueError('Incorrect store: {0}'.format(store))
# set the store for local or lgpo
if store.lower() == 'local':
netsh_script = dedent('''\
advfirewall
set store local
{0}
'''.format(command))
else:
netsh_script = dedent('''\
advfirewall
set store gpo = {0}
{1}
'''.format(__hostname__, command))
return _netsh_file(content=netsh_script).splitlines()
def get_settings(profile, section, store='local'):
'''
Get the firewall property from the specified profile in the specified store
as returned by ``netsh advfirewall``.
Args:
profile (str):
The firewall profile to query. Valid options are:
- domain
- public
- private
section (str):
The property to query within the selected profile. Valid options
are:
- firewallpolicy : inbound/outbound behavior
- logging : firewall logging settings
- settings : firewall properties
- state : firewalls state (on | off)
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the properties for the specified profile
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# validate input
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if section.lower() not in ('state', 'firewallpolicy', 'settings', 'logging'):
raise ValueError('Incorrect section: {0}'.format(section))
if store.lower() not in ('local', 'lgpo'):
raise ValueError('Incorrect store: {0}'.format(store))
command = 'show {0}profile {1}'.format(profile, section)
# run it
results = _netsh_command(command=command, store=store)
# sample output:
# Domain Profile Settings:
# ----------------------------------------------------------------------
# LocalFirewallRules N/A (GPO-store only)
# LocalConSecRules N/A (GPO-store only)
# InboundUserNotification Disable
# RemoteManagement Disable
# UnicastResponseToMulticast Enable
# if it's less than 3 lines it failed
if len(results) < 3:
raise CommandExecutionError('Invalid results: {0}'.format(results))
ret = {}
# Skip the first 2 lines. Add everything else to a dictionary
for line in results[3:]:
ret.update(dict(list(zip(*[iter(re.split(r"\s{2,}", line))]*2))))
# Remove spaces from the values so that `Not Configured` is detected
# correctly
for item in ret:
ret[item] = ret[item].replace(' ', '')
# special handling for firewallpolicy
if section == 'firewallpolicy':
inbound, outbound = ret['Firewall Policy'].split(',')
return {'Inbound': inbound, 'Outbound': outbound}
return ret
def get_all_settings(profile, store='local'):
'''
Gets all the properties for the specified profile in the specified store
Args:
profile (str):
The firewall profile to query. Valid options are:
- domain
- public
- private
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the specified settings
'''
ret = dict()
ret.update(get_settings(profile=profile, section='state', store=store))
ret.update(get_settings(profile=profile, section='firewallpolicy', store=store))
ret.update(get_settings(profile=profile, section='settings', store=store))
ret.update(get_settings(profile=profile, section='logging', store=store))
return ret
def get_all_profiles(store='local'):
'''
Gets all properties for all profiles in the specified store
Args:
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the specified settings for each profile
'''
return {
'Domain Profile': get_all_settings(profile='domain', store=store),
'Private Profile': get_all_settings(profile='private', store=store),
'Public Profile': get_all_settings(profile='public', store=store)
}
def set_firewall_settings(profile,
inbound=None,
outbound=None,
store='local'):
'''
Set the firewall inbound/outbound settings for the specified profile and
store
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
inbound (str):
The inbound setting. If ``None`` is passed, the setting will remain
unchanged. Valid values are:
- blockinbound
- blockinboundalways
- allowinbound
- notconfigured
Default is ``None``
outbound (str):
The outbound setting. If ``None`` is passed, the setting will remain
unchanged. Valid values are:
- allowoutbound
- blockoutbound
- notconfigured
Default is ``None``
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if inbound and inbound.lower() not in ('blockinbound',
'blockinboundalways',
'allowinbound',
'notconfigured'):
raise ValueError('Incorrect inbound value: {0}'.format(inbound))
if outbound and outbound.lower() not in ('allowoutbound',
'blockoutbound',
'notconfigured'):
raise ValueError('Incorrect outbound value: {0}'.format(outbound))
if not inbound and not outbound:
raise ValueError('Must set inbound or outbound')
# You have to specify inbound and outbound setting at the same time
# If you're only specifying one, you have to get the current setting for the
# other
if not inbound or not outbound:
ret = get_settings(profile=profile,
section='firewallpolicy',
store=store)
if not inbound:
inbound = ret['Inbound']
if not outbound:
outbound = ret['Outbound']
command = 'set {0}profile firewallpolicy {1},{2}' \
''.format(profile, inbound, outbound)
results = _netsh_command(command=command, store=store)
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True
def set_logging_settings(profile, setting, value, store='local'):
'''
Configure logging settings for the Windows firewall.
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
setting (str):
The logging setting to configure. Valid options are:
- allowedconnections
- droppedconnections
- filename
- maxfilesize
value (str):
The value to apply to the setting. Valid values are dependent upon
the setting being configured. Valid options are:
allowedconnections:
- enable
- disable
- notconfigured
droppedconnections:
- enable
- disable
- notconfigured
filename:
- Full path and name of the firewall log file
- notconfigured
maxfilesize:
- 1 - 32767 (Kb)
- notconfigured
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if setting.lower() not in ('allowedconnections',
'droppedconnections',
'filename',
'maxfilesize'):
raise ValueError('Incorrect setting: {0}'.format(setting))
if setting.lower() in ('allowedconnections', 'droppedconnections'):
if value.lower() not in ('enable', 'disable', 'notconfigured'):
raise ValueError('Incorrect value: {0}'.format(value))
# TODO: Consider adding something like the following to validate filename
# https://stackoverflow.com/questions/9532499/check-whether-a-path-is-valid-in-python-without-creating-a-file-at-the-paths-ta
if setting.lower() == 'maxfilesize':
if value.lower() != 'notconfigured':
# Must be a number between 1 and 32767
try:
int(value)
except ValueError:
raise ValueError('Incorrect value: {0}'.format(value))
if not 1 <= int(value) <= 32767:
raise ValueError('Incorrect value: {0}'.format(value))
# Run the command
command = 'set {0}profile logging {1} {2}'.format(profile, setting, value)
results = _netsh_command(command=command, store=store)
# A successful run should return an empty list
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True
def set_settings(profile, setting, value, store='local'):
'''
Configure firewall settings.
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
setting (str):
The firewall setting to configure. Valid options are:
- localfirewallrules
- localconsecrules
- inboundusernotification
- remotemanagement
- unicastresponsetomulticast
value (str):
The value to apply to the setting. Valid options are
- enable
- disable
- notconfigured
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if setting.lower() not in ('localfirewallrules',
'localconsecrules',
'inboundusernotification',
'remotemanagement',
'unicastresponsetomulticast'):
raise ValueError('Incorrect setting: {0}'.format(setting))
if value.lower() not in ('enable', 'disable', 'notconfigured'):
raise ValueError('Incorrect value: {0}'.format(value))
# Run the command
command = 'set {0}profile settings {1} {2}'.format(profile, setting, value)
results = _netsh_command(command=command, store=store)
# A successful run should return an empty list
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True
def set_state(profile, state, store='local'):
'''
Configure the firewall state.
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
state (str):
The firewall state. Valid options are:
- on
- off
- notconfigured
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if state.lower() not in ('on', 'off', 'notconfigured'):
raise ValueError('Incorrect state: {0}'.format(state))
# Run the command
command = 'set {0}profile state {1}'.format(profile, state)
results = _netsh_command(command=command, store=store)
# A successful run should return an empty list
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True

View File

@ -271,7 +271,7 @@ def make_inheritable(token):
win32con.DUPLICATE_SAME_ACCESS)
def runas_system(cmd, username, password):
def runas_system(cmd, username, password, cwd=None):
# This only works as system, when salt is running as a service for example
# Check for a domain
@ -310,8 +310,8 @@ def runas_system(cmd, username, password):
except win32security.error as exc:
# User doesn't have admin, use existing token
if exc[0] == winerror.ERROR_NO_SUCH_LOGON_SESSION \
or exc[0] == winerror.ERROR_PRIVILEGE_NOT_HELD:
if exc.winerror == winerror.ERROR_NO_SUCH_LOGON_SESSION \
or exc.winerror == winerror.ERROR_PRIVILEGE_NOT_HELD:
elevated_token = token
else:
raise
@ -352,7 +352,7 @@ def runas_system(cmd, username, password):
1,
0,
user_environment,
None,
cwd,
startup_info)
hProcess, hThread, PId, TId = \
@ -397,7 +397,7 @@ def runas(cmd, username, password, cwd=None):
# This only works when not running under the system account
# Debug mode for example
if salt.utils.win_functions.get_current_user() == 'SYSTEM':
return runas_system(cmd, username, password)
return runas_system(cmd, username, password, cwd)
# Create a pipe to set as stdout in the child. The write handle needs to be
# inheritable.

Some files were not shown because too many files have changed in this diff Show More