Merge branch 'develop' into win_fix_renderers_tests

This commit is contained in:
Shane Lee 2017-03-02 10:29:36 -07:00 committed by GitHub
commit eb6d317289
832 changed files with 10105 additions and 8882 deletions

7
.mention-bot Normal file
View File

@ -0,0 +1,7 @@
{
"skipTitle": "Merge forward",
"delayed": true,
"delayedUntil": "2h",
"userBlacklist": []
}

View File

@ -24,7 +24,8 @@ load-plugins=saltpylint.pep8,
saltpylint.fileperms,
saltpylint.py3modernize,
saltpylint.smartup,
saltpylint.minpyver
saltpylint.minpyver,
saltpylint.salttesting
# Use multiple processes to speed up Pylint.
# Don't bump this values on PyLint 1.4.0 - Know bug that ignores the passed --rcfile

View File

@ -21,7 +21,8 @@ load-plugins=saltpylint.pep8,
saltpylint.fileperms,
saltpylint.py3modernize,
saltpylint.smartup,
saltpylint.minpyver
saltpylint.minpyver,
saltpylint.salttesting
# Use multiple processes to speed up Pylint.
# Don't bump this values on PyLint 1.4.0 - Know bug that ignores the passed --rcfile

View File

@ -549,11 +549,11 @@
#default_top: base
# The hash_type is the hash to use when discovering the hash of a file on
# the master server. The default is md5 but sha1, sha224, sha256, sha384
# and sha512 are also supported.
# the master server. The default is sha256, but md5, sha1, sha224, sha384 and
# sha512 are also supported.
#
# WARNING: While md5 is also supported, do not use it due to the high chance
# of possible collisions and thus security breach.
# WARNING: While md5 and sha1 are also supported, do not use them due to the
# high chance of possible collisions and thus security breach.
#
# Prior to changing this value, the master should be stopped and all Salt
# caches should be cleared.

View File

@ -161,7 +161,7 @@
# strip_colors: False
# Backup files that are replaced by file.managed and file.recurse under
# 'cachedir'/file_backups relative to their original location and appended
# 'cachedir'/file_backup relative to their original location and appended
# with a timestamp. The only valid setting is "minion". Disabled by default.
#
# Alternatively this can be specified for each file in state files:
@ -574,14 +574,11 @@
#fileserver_limit_traversal: False
# The hash_type is the hash to use when discovering the hash of a file on
# the local fileserver. The default is md5, but sha1, sha224, sha256, sha384
# the local fileserver. The default is sha256, but md5, sha1, sha224, sha384
# and sha512 are also supported.
#
# WARNING: While md5 and sha1 are also supported, do not use it due to the high chance
# of possible collisions and thus security breach.
#
# WARNING: While md5 is also supported, do not use it due to the high chance
# of possible collisions and thus security breach.
# WARNING: While md5 and sha1 are also supported, do not use them due to the
# high chance of possible collisions and thus security breach.
#
# Warning: Prior to changing this value, the minion should be stopped and all
# Salt caches should be cleared.
@ -652,6 +649,10 @@
###########################################
# Disable multiprocessing support, by default when a minion receives a
# publication a new process is spawned and the command is executed therein.
#
# WARNING: Disabling multiprocessing may result in substantial slowdowns
# when processing large pillars. See https://github.com/saltstack/salt/issues/38758
# for a full explanation.
#multiprocessing: True

View File

@ -119,7 +119,7 @@
# strip_colors: False
# Backup files that are replaced by file.managed and file.recurse under
# 'cachedir'/file_backups relative to their original location and appended
# 'cachedir'/file_backup relative to their original location and appended
# with a timestamp. The only valid setting is "minion". Disabled by default.
#
# Alternatively this can be specified for each file in state files:

View File

@ -244,7 +244,7 @@ on_saltstack = 'SALT_ON_SALTSTACK' in os.environ
project = 'Salt'
version = salt.version.__version__
latest_release = '2016.11.2' # latest release
latest_release = '2016.11.3' # latest release
previous_release = '2016.3.5' # latest release from previous branch
previous_release_dir = '2016.3' # path on web server for previous branch
next_release = '' # next release

View File

@ -6093,17 +6093,17 @@ fileserver_list_cache_time: 5
.UNINDENT
.SS \fBhash_type\fP
.sp
Default: \fBmd5\fP
Default: \fBsha256\fP
.sp
The hash_type is the hash to use when discovering the hash of a file on
the master server. The default is md5, but sha1, sha224, sha256, sha384, and
the master server. The default is sha256, but md5, sha1, sha224, sha384, and
sha512 are also supported.
.INDENT 0.0
.INDENT 3.5
.sp
.nf
.ft C
hash_type: md5
hash_type: sha256
.ft P
.fi
.UNINDENT
@ -10539,17 +10539,17 @@ fileserver_limit_traversal: False
.UNINDENT
.SS \fBhash_type\fP
.sp
Default: \fBmd5\fP
Default: \fBsha256\fP
.sp
The hash_type is the hash to use when discovering the hash of a file on the
local fileserver. The default is md5, but sha1, sha224, sha256, sha384, and
local fileserver. The default is sha256, but md5, sha1, sha224, sha384, and
sha512 are also supported.
.INDENT 0.0
.INDENT 3.5
.sp
.nf
.ft C
hash_type: md5
hash_type: sha256
.ft P
.fi
.UNINDENT
@ -11776,11 +11776,11 @@ event that an error is introduced in the latest revision of the repo.
#default_top: base
# The hash_type is the hash to use when discovering the hash of a file on
# the master server. The default is md5 but sha1, sha224, sha256, sha384
# and sha512 are also supported.
# the master server. The default is sha256, but md5, sha1, sha224, sha384 and
# sha512 are also supported.
#
# WARNING: While md5 is also supported, do not use it due to the high chance
# of possible collisions and thus security breach.
# WARNING: While md5 and sha1 are also supported, do not use them due to the
# high chance of possible collisions and thus security breach.
#
# Prior to changing this value, the master should be stopped and all Salt
# caches should be cleared.
@ -12459,7 +12459,7 @@ event that an error is introduced in the latest revision of the repo.
# strip_colors: False
# Backup files that are replaced by file.managed and file.recurse under
# \(aqcachedir\(aq/file_backups relative to their original location and appended
# \(aqcachedir\(aq/file_backup relative to their original location and appended
# with a timestamp. The only valid setting is "minion". Disabled by default.
#
# Alternatively this can be specified for each file in state files:
@ -12867,14 +12867,11 @@ event that an error is introduced in the latest revision of the repo.
#fileserver_limit_traversal: False
# The hash_type is the hash to use when discovering the hash of a file on
# the local fileserver. The default is md5, but sha1, sha224, sha256, sha384
# the local fileserver. The default is sha256, but md5, sha1, sha224, sha384
# and sha512 are also supported.
#
# WARNING: While md5 and sha1 are also supported, do not use it due to the high chance
# of possible collisions and thus security breach.
#
# WARNING: While md5 is also supported, do not use it due to the high chance
# of possible collisions and thus security breach.
# WARNING: While md5 and sha1 are also supported, do not use them due to the
# high chance of possible collisions and thus security breach.
#
# Warning: Prior to changing this value, the minion should be stopped and all
# Salt caches should be cleared.
@ -292069,7 +292066,7 @@ the test method:
.nf
.ft C
import integration
from salttesting.helpers import destructiveTest
from tests.support.helpers import destructiveTest
class PkgTest(integration.ModuleCase):
@destructiveTest
@ -292165,7 +292162,7 @@ can be used
.nf
.ft C
# Import logging handler
from salttesting.helpers import TestsLoggingHandler
from tests.support.helpers import TestsLoggingHandler
# .. inside test
with TestsLoggingHandler() as handler:

View File

@ -282,6 +282,7 @@ Valid options:
- pillar
- utils
- sdb
- cache
.. conf_master:: module_dirs
@ -1604,6 +1605,25 @@ on a large number of minions.
fileserver_list_cache_time: 5
.. conf_master:: fileserver_verify_config
``fileserver_verify_config``
------------------------------
.. versionadded:: Nitrogen
Default: ``True``
By default, as the master starts it performs some sanity checks on the
configured fileserver backends. If any of these sanity checks fail (such as
when an invalid configuration is used), the master daemon will abort.
To skip these sanity checks, set this option to ``False``.
.. code-block:: yaml
fileserver_verify_config: False
.. conf_master:: hash_type
``hash_type``
@ -3013,6 +3033,29 @@ they were created by a different master.
.. __: http://www.gluster.org/
.. conf_master:: git_pillar_includes
``git_pillar_includes``
***********************
.. versionadded:: Nitrogen
Default: ``True``
Normally, when processing :ref:`git_pillar remotes
<git-pillar-2015-8-0-and-later>`, if more than one repo under the same ``git``
section in the ``ext_pillar`` configuration refers to the same pillar
environment, then each repo in a given environment will have access to the
other repos' files to be referenced in their top files. However, it may be
desirable to disable this behavior. If so, set this value to ``False``.
For a more detailed examination of how includes work, see :ref:`this
explanation <git-pillar-multiple-remotes>` from the git_pillar documentation.
.. code-block:: yaml
git_pillar_includes: False
.. _git-ext-pillar-auth-opts:
Git External Pillar Authentication Options
@ -3144,6 +3187,25 @@ configured both globally and for individual remotes.
- '+refs/pull/*/head:refs/remotes/origin/pr/*'
- '+refs/pull/*/merge:refs/remotes/origin/merge/*'
.. conf_master:: git_pillar_verify_config
``git_pillar_verify_config``
----------------------------
.. versionadded:: Nitrogen
Default: ``True``
By default, as the master starts it performs some sanity checks on the
configured git_pillar repositories. If any of these sanity checks fail (such as
when an invalid configuration is used), the master daemon will abort.
To skip these sanity checks, set this option to ``False``.
.. code-block:: yaml
git_pillar_verify_config: False
.. _pillar-merging-opts:
Pillar Merging Options

View File

@ -81,9 +81,10 @@ The option can also be set to a list of masters, enabling
``ipv6``
--------
Default: ``False``
Default: ``None``
Whether the master should be connected over IPv6.
Whether the master should be connected over IPv6. By default salt minion
will try to automatically detect IPv6 connectivity to master.
.. code-block:: yaml
@ -1778,13 +1779,15 @@ the environment setting, but for pillar instead of states.
.. code-block:: yaml
pillarenv: None
pillarenv: dev
.. conf_minion:: pillarenv_from_saltenv
``pillarenv_from_saltenv``
--------------------------
.. versionadded:: Nitrogen
Default: ``False``
When set to ``True``, the :conf_minion:`pillarenv` value will assume the value

View File

@ -25,6 +25,7 @@ environments are not isolated from each other. This allows for logical
isolation of environments by the engineer using Salt, but also allows
for information to be used in multiple environments.
.. _file-roots-directory-overlay:
Directory Overlay
=================
@ -81,4 +82,4 @@ enable running Salt states without a Salt master. To use the local file server
interface, copy the file server data to the minion and set the file_roots
option on the minion to point to the directories copied from the master.
Once the minion ``file_roots`` option has been set, change the ``file_client``
option to local to make sure that the local file server interface is used.
option to local to make sure that the local file server interface is used.

View File

@ -419,3 +419,14 @@ and bug resolution. See the :ref:`Labels and Milestones
.. _`Closing issues via commit message`: https://help.github.com/articles/closing-issues-via-commit-messages
.. _`git format-patch`: https://www.kernel.org/pub/software/scm/git/docs/git-format-patch.html
.. _salt-users: https://groups.google.com/forum/#!forum/salt-users
Mentionbot
==========
SaltStack runs a mention-bot which notifies contributors who might be able
to help review incoming pull-requests based on their past contribution to
files which are being changed.
If you do not wish to receive these notifications, please add your GitHub
handle to the blacklist line in the `.mention-bot` file located in the
root of the Salt repository.

View File

@ -429,7 +429,7 @@ Test Helpers
------------
Several Salt-specific helpers are available. A full list is available by inspecting
functions exported in `salttesting.helpers`.
functions exported in `tests.support.helpers`.
`@expensiveTest` -- Designates a test which typically requires a relatively costly
external resource, like a cloud virtual machine. This decorator is not normally
@ -453,10 +453,10 @@ the test will be skipped if the binaries are not all present on the system.
`@skip_if_not_root` -- If the test is not executed as root, it will be skipped.
`@with_system_user` -- Creates and optionally destroys a system user within a test case.
See implementation details in `salttesting.helpers` for details.
See implementation details in `tests.support.helpers` for details.
`@with_system_group` -- Creates and optionally destroys a system group within a test case.
See implementation details in `salttesting.helpers` for details.
See implementation details in `tests.support.helpers` for details.
`@with_system_user_and_group` -- Creates and optionally destroys a system user and group
within a test case. See implementation details in `salttesting.helpers` for details.
within a test case. See implementation details in `tests.support.helpers` for details.

View File

@ -272,7 +272,7 @@ Now the workhorse method ``run_function`` can be used to test a module:
.. code-block:: python
import os
import integration
import tests.integration as integration
class TestModuleTest(integration.ModuleCase):
@ -312,7 +312,7 @@ Validating the shell commands can be done via shell tests:
import shutil
import tempfile
import integration
import tests.integration as integration
class KeyTest(integration.ShellCase):
'''
@ -345,7 +345,7 @@ Testing salt-ssh functionality can be done using the SSHCase test class:
.. code-block:: python
import integration
import tests.integration as integration
class SSHGrainsTest(integration.SSHCase):
'''
@ -370,7 +370,7 @@ on a minion event bus.
.. code-block:: python
import integration
import tests.integration as integration
class TestEvent(integration.SaltEventAssertsMixin):
'''
@ -392,7 +392,7 @@ Testing Salt's Syndic can be done via the SyndicCase test class:
.. code-block:: python
import integration
import tests.integration as integration
class TestSyndic(integration.SyndicCase):
'''
@ -438,11 +438,9 @@ to test states:
import shutil
# Import Salt Testing libs
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
import tests.integration as integration
# Import salt libs
import integration
import salt.utils
HFILE = os.path.join(integration.TMP, 'hosts')
@ -506,8 +504,8 @@ the test method:
.. code-block:: python
import integration
from salttesting.helpers import destructiveTest
import tests.integration as integration
from tests.support.helpers import destructiveTest
class DestructiveExampleModuleTest(integration.ModuleCase):
'''
@ -628,7 +626,7 @@ the test function:
.. code-block:: python
from salttesting.helpers import expensiveTest
from tests.support.helpers import expensiveTest
@expensiveTest
def test_instance(self):

View File

@ -122,14 +122,13 @@ Most commonly, the following imports are necessary to create a unit test:
.. code-block:: python
# Import Salt Testing libs
from salttesting import skipIf, TestCase
from salttesting.helpers import ensure_in_syspath
from tests.support.unit import skipIf, TestCase
If you need mock support to your tests, please also import:
.. code-block:: python
from salttesting.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch, call
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch, call
Evaluating Truth
@ -183,13 +182,13 @@ additional imports for MagicMock:
.. code-block:: python
# Import Salt Testing libs
from salttesting import TestCase
from tests.support.unit import TestCase
# Import Salt execution module to test
from salt.modules import db
# Import Mock libraries
from salttesting.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch, call
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch, call
# Create test case class and inherit from Salt's customized TestCase
# Skip this test case if we don't have access to mock!
@ -259,7 +258,7 @@ we might write the skeleton for testing ``fib.py``:
.. code-block:: python
# Import Salt Testing libs
from salttesting import TestCase
from tests.support.unit import TestCase
# Import Salt execution module to test
from salt.modules import fib
@ -339,17 +338,14 @@ will also redefine the ``__salt__`` dictionary such that it only contains
from salt.modules import linux_sysctl
# Import Salt Testing Libs
from salttesting import skipIf, TestCase
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import (
from tests.support.unit import skipIf, TestCase
from tests.support.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
ensure_in_syspath('../../')
# Globals
linux_sysctl.__salt__ = {}
@ -368,11 +364,6 @@ will also redefine the ``__salt__`` dictionary such that it only contains
with patch.dict(linux_sysctl.__salt__, {'cmd.run': mock_cmd}):
self.assertEqual(linux_sysctl.get('net.ipv4.ip_forward'), 1)
if __name__ == '__main__':
from integration import run_tests
run_tests(LinuxSysctlTestCase, needs_daemon=False)
Since ``get()`` has only one raise or return statement and that statement is a
success condition, the test function is simply named ``test_get()``. As
described, the single function call parameter, ``name`` is mocked with
@ -450,17 +441,14 @@ with.
from salt.exceptions import CommandExecutionError
# Import Salt Testing Libs
from salttesting import skipIf, TestCase
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import (
from tests.support.unit import skipIf, TestCase
from tests.support.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
ensure_in_syspath('../../')
# Globals
linux_sysctl.__salt__ = {}
@ -510,7 +498,3 @@ with.
with patch.dict(linux_sysctl.__salt__, {'cmd.run_all': mock_cmd}):
self.assertEqual(linux_sysctl.assign(
'net.ipv4.ip_forward', 1), ret)
if __name__ == '__main__':
from integration import run_tests
run_tests(LinuxSysctlTestCase, needs_daemon=False)

View File

@ -2,129 +2,19 @@
Solaris
=======
Salt was added to the OpenCSW package repository in September of 2012 by Romeo
Theriault <romeot@hawaii.edu> at version 0.10.2 of Salt. It has mainly been
tested on Solaris 10 (sparc), though it is built for and has been tested
minimally on Solaris 10 (x86), Solaris 9 (sparc/x86) and 11 (sparc/x86).
(Please let me know if you're using it on these platforms!) Most of the testing
has also just focused on the minion, though it has verified that the master
starts up successfully on Solaris 10.
Salt is known to work on Solaris but community packages are unmaintained.
Comments and patches for better support on these platforms is very welcome.
It is possible to install Salt on Solaris by using `setuptools`.
As of version 0.10.4, Solaris is well supported under salt, with all of the
following working well:
1. remote execution
2. grain detection
3. service control with SMF
4. 'pkg' states with 'pkgadd' and 'pkgutil' modules
5. cron modules/states
6. user and group modules/states
7. shadow password management modules/states
Salt is dependent on the following additional packages. These will
automatically be installed as dependencies of the ``py_salt`` package:
- py_yaml
- py_pyzmq
- py_jinja2
- py_msgpack_python
- py_m2crypto
- py_crypto
- python
Installation
============
To install Salt from the OpenCSW package repository you first need to install
`pkgutil`_ assuming you don't already have it installed:
On Solaris 10:
For example, to install the develop version of salt:
.. code-block:: bash
pkgadd -d http://get.opencsw.org/now
On Solaris 9:
.. code-block:: bash
wget http://mirror.opencsw.org/opencsw/pkgutil.pkg
pkgadd -d pkgutil.pkg all
Once pkgutil is installed you'll need to edit it's config file
``/etc/opt/csw/pkgutil.conf`` to point it at the unstable catalog:
.. code-block:: diff
- #mirror=http://mirror.opencsw.org/opencsw/testing
+ mirror=http://mirror.opencsw.org/opencsw/unstable
OK, time to install salt.
.. code-block:: bash
# Update the catalog
root> /opt/csw/bin/pkgutil -U
# Install salt
root> /opt/csw/bin/pkgutil -i -y py_salt
Minion Configuration
====================
Now that salt is installed you can find it's configuration files in
``/etc/opt/csw/salt/``.
You'll want to edit the minion config file to set the name of your salt master
server:
.. code-block:: diff
- #master: salt
+ master: your-salt-server
If you would like to use `pkgutil`_ as the default package provider for your
Solaris minions, you can do so using the :conf_minion:`providers` option in the
minion config file.
You can now start the salt minion like so:
On Solaris 10:
.. code-block:: bash
svcadm enable salt-minion
git clone https://github.com/saltstack/salt
cd salt
sudo python setup.py install --force
On Solaris 9:
.. note::
.. code-block:: bash
/etc/init.d/salt-minion start
You should now be able to log onto the salt master and check to see if the
salt-minion key is awaiting acceptance:
.. code-block:: bash
salt-key -l un
Accept the key:
.. code-block:: bash
salt-key -a <your-salt-minion>
Run a simple test against the minion:
.. code-block:: bash
salt '<your-salt-minion>' test.ping
Troubleshooting
===============
Logs are in ``/var/log/salt``
.. _pkgutil: http://www.opencsw.org/manual/for-administrators/getting-started.html
SaltStack does offer commerical support for Solaris which includes packages.

View File

@ -335,6 +335,7 @@ updated pillar data, but :py:func:`pillar.item <salt.modules.pillar.item>`,
<salt.modules.pillar.raw>` will not see this data unless refreshed using
:py:func:`saltutil.refresh_pillar <salt.modules.saltutil.refresh_pillar>`.
.. _pillar-environments:
How Pillar Environments Are Handled
===================================

View File

@ -10,17 +10,152 @@ Changes for v2016.11.2..v2016.11.3
Extended changelog courtesy of Todd Stansell (https://github.com/tjstansell/salt-changelogs):
*Generated at: 2017-02-16T16:48:51Z*
*Generated at: 2017-02-22T23:01:16Z*
Statistics:
- Total Merges: **126**
- Total Issue references: **75**
- Total PR references: **199**
- Total Merges: **139**
- Total Issue references: **78**
- Total PR references: **217**
Changes:
- **PR** `#39536`_: (*twangboy*) Namespace 'status' functions in 'win_status'
@ *2017-02-21T23:45:31Z*
- **PR** `#39005`_: (*cro*) Ungate the status.py module and raise unsupported errors in functions not executable on Windows.
| refs: `#39536`_
* 40f72db Merge pull request `#39536`_ from twangboy/fix_win_status
* d5453e2 Remove unused import (lint)
* 837c32e Remove list2cmdline
* c258cb3 Streamline wmic command returns for easier parsing
* 6d2cf81 Fix 'ping_master' function
* d946d10 Namespace 'status' functions in 'win_status'
- **PR** `#39534`_: (*rallytime*) Fix breakage in aptpkg and dpkg execution modules
@ *2017-02-21T20:31:15Z*
- **PR** `#39418`_: (*anlutro*) Allow aptpkg.info_installed on package names that aren't installed
| refs: `#39534`_
* dc8f578 Merge pull request `#39534`_ from rallytime/fix-pkg-function-specs
* d34a8fe Fix breakage in aptpkg and dpkg execution modules
* 1d0d7b2 Upgrade SaltTesting to run test suite for 2016.11 and add SaltPyLint (`#39521`_)
- **ISSUE** `#34712`_: (*richardscollin*) Salt Test Suite Error - develop
| refs: `#37366`_
- **PR** `#39521`_: (*vutny*) Upgrade SaltTesting to run test suite for 2016.11 and add SaltPyLint
- **PR** `#37366`_: (*eradman*) dev_python*.txt: use current SaltTesting and SaltPyLint modules
| refs: `#39521`_
- **PR** `#39370`_: (*twangboy*) Gate win_osinfo and winservice
@ *2017-02-17T23:53:58Z*
* e4c7168 Merge pull request `#39370`_ from twangboy/gate_win_utils
* 167cdb3 Gate windows specific imports, add __virtual__
* e67387d Add option to return a Non instantiated class
* 315b0cc Clarify return value for win_osinfo
* 994314e Fix more docs
* 2bbe3cb Fix some docs
* 4103563 Merge branch 'gate_win_utils' of https://github.com/twangboy/salt into gate_win_utils
* 24c1bd0 Remove extra newlines
* 82a86ce Add helper function for winservice
* 0051b5a Put the win_osinfo classes in a helper function
* 4e08534 Gate win_osinfo and winservice better
- **PR** `#39486`_: (*twangboy*) Remove orphaned function list_configurable_policies
@ *2017-02-17T22:21:50Z*
* a3e71b6 Merge pull request `#39486`_ from twangboy/win_remove_orphaned
* 1328055 Remove orphaned function list_configurable_policies
- **PR** `#39418`_: (*anlutro*) Allow aptpkg.info_installed on package names that aren't installed
| refs: `#39534`_
@ *2017-02-17T18:34:19Z*
* 87b269f Merge pull request `#39418`_ from alprs/fix-aptpkg_info_nonexistent_pkg
* 246bf1e add failhard argument to various apt pkg functions
- **PR** `#39438`_: (*mirceaulinic*) file.get_managed: refetch source when file hashsum is changed
@ *2017-02-17T17:58:29Z*
* e816d6c Merge pull request `#39438`_ from cloudflare/fix_39422
* 8453800 file.get_managed: refetch cached file when hashsum chnaged
- **PR** `#39432`_: (*dmaziuk*) Quick and dirty fix for GECOS fields with more than 3 commas
@ *2017-02-17T17:57:30Z*
- **ISSUE** `#39203`_: (*dmaziuk*) salt.users gecos field
| refs: `#39432`_ `#39432`_
* a5fe8f0 Merge pull request `#39432`_ from dmaziuk/issue39203
* 41c0463 Remove #
* 4f877c6 Quick and dirty fix for GECOS fields with more than 3 commas
- **PR** `#39484`_: (*corywright*) The Reactor docs should use pillar='{}' instead of 'pillar={}'
@ *2017-02-17T17:50:57Z*
* 3665229 Merge pull request `#39484`_ from corywright/fix-reactor-docs-pillar-keyword-args
* cc90d0d The Reactor docs should use pillar='{}' instead of 'pillar={}'
- **PR** `#39456`_: (*twangboy*) Add salt icon to buildenv directory
@ *2017-02-16T22:47:58Z*
* 2e3a9c5 Merge pull request `#39456`_ from twangboy/win_fix_icon
* 8dd915d Add salt icon to buildenv directory
- **PR** `#39462`_: (*twangboy*) Use url_path instead of url_data.path
@ *2017-02-16T22:44:18Z*
* 63adc03 Merge pull request `#39462`_ from twangboy/win_fix_fileclient
* a96bc13 Use url_path instead of url_data.path
- **PR** `#39458`_: (*rallytime*) Fix more warnings in doc build
@ *2017-02-16T21:45:52Z*
* e9b034f Merge pull request `#39458`_ from rallytime/fixup-more-doc-build-warnings
* e698bc3 Fix more warnings in doc build
- **PR** `#39437`_: (*sakateka*) Fixes about saltfile
@ *2017-02-16T20:32:15Z*
* e4f8c2b Merge pull request `#39437`_ from sakateka/fixes_about_saltfile
* ab68524 less pylint: salt/utils/parsers.py
* 9e7d9dc Revert "pylint: salt/utils/parsers.py"
* f3f129c document ~/.salt/Saltfile
* 33f3614 pylint: salt/utils/parsers.py
* 0f36e10 expand config_dir and '~/.salt/Saltfile' as last resort
* 1acf00d add 2016.11.3 changelog to release notes (`#39451`_)
- **PR** `#39451`_: (*Ch3LL*) add 2016.11.3 changelog to release notes
- **PR** `#39448`_: (*gtmanfred*) Add release notes for cisco proxy minions added in Carbon
@ *2017-02-16T17:29:48Z*
- **ISSUE** `#38032`_: (*meggiebot*) Add missing Carbon docs
| refs: `#39448`_
* 8e2cbd2 Merge pull request `#39448`_ from gtmanfred/2016.11
* 3172e88 Add release notes for cisco proxy minions added in Carbon
- **PR** `#39428`_: (*rallytime*) [2016.11] Merge forward from 2016.3 to 2016.11
@ *2017-02-16T00:01:15Z*
@ -846,6 +981,7 @@ Changes:
* a7fc02e Ungate the status.py module and raise unsupported errors in functions not executeable on Windows. (`#39005`_)
- **PR** `#39005`_: (*cro*) Ungate the status.py module and raise unsupported errors in functions not executable on Windows.
| refs: `#39536`_
- **PR** `#39012`_: (*terminalmage*) Fix "invalid lexer" errors in docs build
@ *2017-01-28T06:47:45Z*
@ -1252,6 +1388,7 @@ Changes:
.. _`#33890`: https://github.com/saltstack/salt/issues/33890
.. _`#34280`: https://github.com/saltstack/salt/pull/34280
.. _`#34551`: https://github.com/saltstack/salt/issues/34551
.. _`#34712`: https://github.com/saltstack/salt/issues/34712
.. _`#34780`: https://github.com/saltstack/salt/issues/34780
.. _`#35055`: https://github.com/saltstack/salt/pull/35055
.. _`#35777`: https://github.com/saltstack/salt/issues/35777
@ -1264,12 +1401,14 @@ Changes:
.. _`#37174`: https://github.com/saltstack/salt/issues/37174
.. _`#37262`: https://github.com/saltstack/salt/pull/37262
.. _`#37338`: https://github.com/saltstack/salt/pull/37338
.. _`#37366`: https://github.com/saltstack/salt/pull/37366
.. _`#37375`: https://github.com/saltstack/salt/pull/37375
.. _`#37413`: https://github.com/saltstack/salt/issues/37413
.. _`#37632`: https://github.com/saltstack/salt/pull/37632
.. _`#37864`: https://github.com/saltstack/salt/pull/37864
.. _`#37938`: https://github.com/saltstack/salt/issues/37938
.. _`#38003`: https://github.com/saltstack/salt/issues/38003
.. _`#38032`: https://github.com/saltstack/salt/issues/38032
.. _`#38081`: https://github.com/saltstack/salt/issues/38081
.. _`#38100`: https://github.com/saltstack/salt/issues/38100
.. _`#38165`: https://github.com/saltstack/salt/pull/38165
@ -1435,6 +1574,7 @@ Changes:
.. _`#39198`: https://github.com/saltstack/salt/pull/39198
.. _`#39199`: https://github.com/saltstack/salt/pull/39199
.. _`#39202`: https://github.com/saltstack/salt/pull/39202
.. _`#39203`: https://github.com/saltstack/salt/issues/39203
.. _`#39206`: https://github.com/saltstack/salt/pull/39206
.. _`#39209`: https://github.com/saltstack/salt/pull/39209
.. _`#39210`: https://github.com/saltstack/salt/pull/39210
@ -1484,16 +1624,31 @@ Changes:
.. _`#39362`: https://github.com/saltstack/salt/pull/39362
.. _`#39364`: https://github.com/saltstack/salt/pull/39364
.. _`#39369`: https://github.com/saltstack/salt/pull/39369
.. _`#39370`: https://github.com/saltstack/salt/pull/39370
.. _`#39378`: https://github.com/saltstack/salt/pull/39378
.. _`#39379`: https://github.com/saltstack/salt/pull/39379
.. _`#39380`: https://github.com/saltstack/salt/pull/39380
.. _`#39392`: https://github.com/saltstack/salt/pull/39392
.. _`#39400`: https://github.com/saltstack/salt/pull/39400
.. _`#39409`: https://github.com/saltstack/salt/pull/39409
.. _`#39418`: https://github.com/saltstack/salt/pull/39418
.. _`#39419`: https://github.com/saltstack/salt/pull/39419
.. _`#39424`: https://github.com/saltstack/salt/pull/39424
.. _`#39428`: https://github.com/saltstack/salt/pull/39428
.. _`#39429`: https://github.com/saltstack/salt/pull/39429
.. _`#39432`: https://github.com/saltstack/salt/pull/39432
.. _`#39437`: https://github.com/saltstack/salt/pull/39437
.. _`#39438`: https://github.com/saltstack/salt/pull/39438
.. _`#39448`: https://github.com/saltstack/salt/pull/39448
.. _`#39451`: https://github.com/saltstack/salt/pull/39451
.. _`#39456`: https://github.com/saltstack/salt/pull/39456
.. _`#39458`: https://github.com/saltstack/salt/pull/39458
.. _`#39462`: https://github.com/saltstack/salt/pull/39462
.. _`#39484`: https://github.com/saltstack/salt/pull/39484
.. _`#39486`: https://github.com/saltstack/salt/pull/39486
.. _`#39521`: https://github.com/saltstack/salt/pull/39521
.. _`#39534`: https://github.com/saltstack/salt/pull/39534
.. _`#39536`: https://github.com/saltstack/salt/pull/39536
.. _`bp-36336`: https://github.com/saltstack/salt/pull/36336
.. _`bp-37338`: https://github.com/saltstack/salt/pull/37338
.. _`bp-37375`: https://github.com/saltstack/salt/pull/37375

View File

@ -52,9 +52,21 @@ Grains Changes
may need to be adjusted to account for this change.
- Add ability to specify disk backing mode in the VMWare salt cloud profile.
State Module Changes
====================
- The :py:func:`service.running <salt.states.service.running>` and
:py:func:`service.dead <salt.states.service.dead>` states now support a
``no_block`` argument which, when set to ``True`` on systemd minions, will
start/stop the service using the ``--no-block`` flag in the ``systemctl``
command. On non-systemd minions, a warning will be issued.
Execution Module Changes
========================
- Several functions in the :mod:`systemd <salt.modules.systemd>` execution
module have gained a ``no_block`` argument, which when set to ``True`` will
use ``--no-block`` in the ``systemctl`` command.
- In the :mod:`solarisips <salt.modules.solarisips>` ``pkg`` module, the
default value for the ``refresh`` argument to the ``list_upgrades`` function
has been changed from ``False`` to ``True``. This makes the function more
@ -144,6 +156,11 @@ feature works can be found :ref:`here <gitfs-custom-refspecs>` in the GitFS
Walkthrough. The git_pillar and winrepo versions of this feature work the same
as their GitFS counterpart.
git_pillar "mountpoints" Feature Added
======================================
See :ref:`here <git-pillar-mountpoints>` for detailed documentation.
``dockerng`` State/Execution Module Renamed to ``docker``
=========================================================

View File

@ -107,6 +107,144 @@ A comma-separated list of optional packages that are recommended to be
installed with the package. This list is displayed in an informational message
when the package is installed to SPM.
files
~~~~~
A files section can be added, to specify a list of files to add to the SPM.
Such a section might look like:
.. code-block:: yaml
files:
- _pillar
- FORMULA
- _runners
- d|mymodule/index.rst
- r|README.rst
When ``files`` are specified, then only those files will be added to the SPM,
regardless of what other files exist in the directory. They will also be added
in the order specified, which is useful if you have a need to lay down files in
a specific order.
As can be seen in the example above, you may also tag files as being a specific
type. This is done by pre-pending a filename with its type, followed by a pipe
(``|``) character. The above example contains a document file and a readme. The
available file types are:
* ``c``: config file
* ``d``: documentation file
* ``g``: ghost file (i.e. the file contents are not included in the package payload)
* ``l``: license file
* ``r``: readme file
* ``s``: SLS file
* ``m``: Salt module
The first 5 of these types (``c``, ``d``, ``g``, ``l``, ``r``) will be placed in
``/usr/share/salt/spm/`` by default. This can be changed by setting an
``spm_share_dir`` value in your ``/etc/salt/spm`` configuration file.
The last two types (``s`` and ``m``) are currently ignored, but they are
reserved for future use.
Pre and Post States
-------------------
It is possible to run Salt states before and after installing a package by
using pre and post states. The following sections may be declared in a
``FORMULA``:
* ``pre_local_state``
* ``pre_tgt_state``
* ``post_local_state``
* ``post_tgt_state``
Sections with ``pre`` in their name are evaluated before a package is installed
and sections with ``post`` are evaluated after a package is installed. ``local``
states are evaluated before ``tgt`` states.
Each of these sections needs to be evaluated as text, rather than as YAML.
Consider the following block:
.. code-block::
pre_local_state: >
echo test > /tmp/spmtest:
cmd:
- run
Note that this declaration uses ``>`` after ``pre_local_state``. This is a YAML
marker that marks the next multi-line block as text, including newlines. It is
important to use this marker whenever declaring ``pre`` or ``post`` states, so
that the text following it can be evaluated properly.
local States
~~~~~~~~~~~~
``local`` states are evaluated locally; this is analagous to issuing a state
run using a ``salt-call --local`` command. These commands will be issued on the
local machine running the ``spm`` command, whether that machine is a master or
a minion.
``local`` states do not require any special arguments, but they must still use
the ``>`` marker to denote that the state is evaluated as text, not a data
structure.
.. code-block::
pre_local_state: >
echo test > /tmp/spmtest:
cmd:
- run
tgt States
~~~~~~~~~~
``tgt`` states are issued against a remote target. This is analogous to issuing
a state using the ``salt`` command. As such it requires that the machine that
the ``spm`` command is running on is a master.
Because ``tgt`` states require that a target be specified, their code blocks
are a little different. Consider the following state:
.. code-block::
pre_tgt_state:
tgt: '*'
data: >
echo test > /tmp/spmtest:
cmd:
- run
With ``tgt`` states, the state data is placed under a ``data`` section, inside
the ``*_tgt_state`` code block. The target is of course specified as a ``tgt``
and you may also optionally specify a ``tgt_type`` (the default is ``glob``).
You still need to use the ``>`` marker, but this time it follows the ``data``
line, rather than the ``*_tgt_state`` line.
Templating States
~~~~~~~~~~~~~~~~~
The reason that state data must be evaluated as text rather than a data
structure is because that state data is first processed through the rendering
engine, as it would be with a standard state run.
This means that you can use Jinja or any other supported renderer inside of
Salt. All formula variables are available to the renderer, so you can reference
``FORMULA`` data inside your state if you need to:
.. code-block::
pre_tgt_state:
tgt: '*'
data: >
echo {{ name }} > /tmp/spmtest:
cmd:
- run
You may also declare your own variables inside the ``FORMULA``. If SPM doesn't
recognize them then it will ignore them, so there are no restrictions on
variable names, outside of avoiding reserved words.
By default the renderer is set to ``yaml_jinja``. You may change this by
changing the ``renderer`` setting in the ``FORMULA`` itself.
Building a Package
------------------
Once a ``FORMULA`` file has been created, it is placed into the root of the

View File

@ -33,7 +33,7 @@ be called to execute a function, or a specific kernel.
Calling via a grain is done by passing the -G option to salt, specifying
a grain and a glob expression to match the value of the grain. The syntax for
the target is the grain key followed by a globexpression: "os:Arch*".
the target is the grain key followed by a glob expression: "os:Arch*".
.. code-block:: bash
@ -48,7 +48,7 @@ grains.item salt function:
salt '*' grains.items
more info on using targeting with grains can be found :ref:`here
More info on using targeting with grains can be found :ref:`here
<targeting-grains>`.
Compound Targeting
@ -73,7 +73,7 @@ is used with ``G@`` as well as a regular expression with ``E@``. The
``webser*`` target does not need to be prefaced with a target type specifier
because it is a glob.
more info on using compound targeting can be found :ref:`here
More info on using compound targeting can be found :ref:`here
<targeting-compound>`.
Node Group Targeting
@ -94,6 +94,10 @@ shorthand for having to type out complicated compound expressions.
 group2: 'G@os:Debian and foo.domain.com'
 group3: 'G@os:Debian and N@group1'
Advanced Targeting Methods
==========================
There are many ways to target individual minions or groups of minions in Salt:
.. toctree::

View File

@ -57,8 +57,13 @@ The minimal `ssl` option in the minion configuration file looks like this:
.. code-block:: yaml
ssl: True
# Versions below 2016.11.4:
ssl: {}
Specific options can be sent to the minion also, as defined in the Python
`ssl.wrap_socket` function.
.. note::
While setting the ssl_version is not required, we recomend it. Some older

View File

@ -379,7 +379,7 @@ the test method:
.. code-block:: python
import integration
from salttesting.helpers import destructiveTest
from tests.support.helpers import destructiveTest
class PkgTest(integration.ModuleCase):
@destructiveTest
@ -462,7 +462,7 @@ can be used
.. code-block:: python
# Import logging handler
from salttesting.helpers import TestsLoggingHandler
from tests.support.helpers import TestsLoggingHandler
# .. inside test
with TestsLoggingHandler() as handler:

View File

@ -81,9 +81,3 @@ levels of symlinks (defaults to 64), an error is always raised.
For some functions, this behavior is different to the behavior on Unix
platforms. In general, avoid symlink loops on either platform.
Modifying security properties (ACLs) on files
=============================================
There is no support in Salt for modifying ACLs, and therefore no support for
changing file permissions, besides modifying the owner/user.

View File

@ -11,6 +11,7 @@ elif __file__:
ROOT_DIR=application_path.split("bin/appdata")[0]
# Copied from syspaths.py
SHARE_DIR = os.path.join(ROOT_DIR, 'usr', 'share', 'salt')
CONFIG_DIR = os.path.join(ROOT_DIR, 'etc')
CACHE_DIR = os.path.join(ROOT_DIR, 'var', 'cache', 'salt')
SOCK_DIR = os.path.join(ROOT_DIR, 'var', 'run', 'salt')

View File

@ -5,5 +5,7 @@ apache-libcloud>=0.14.0
boto>=2.32.1
boto3>=1.2.1
moto>=0.3.6
SaltTesting
SaltPyLint>=v2017.2.22
SaltPyLint>=v2017.2.29
GitPython>=0.3
pytest
git+https://github.com/saltstack/pytest-salt.git@master#egg=pytest-salt

View File

@ -5,5 +5,7 @@ apache-libcloud>=0.14.0
boto>=2.32.1
boto3>=1.2.1
moto>=0.3.6
SaltTesting
SaltPyLint>=v2017.2.22
SaltPyLint>=v2017.2.29
GitPython>=0.3
pytest
git+https://github.com/saltstack/pytest-salt.git@master#egg=pytest-salt

View File

@ -58,7 +58,8 @@ def auth(username, password):
# Post to the API endpoint. If 200 is returned then the result will be the ACLs
# for this user
result = salt.utils.http.query(url, method='POST', data=data)
result = salt.utils.http.query(url, method='POST', data=data, status=True,
decode=True)
if result['status'] == 200:
log.debug('eauth REST call returned 200: {0}'.format(result))
if result['dict'] is not None:

View File

@ -67,6 +67,26 @@ def __validate__(config):
return True, 'Valid beacon configuration'
def _enforce_txt_record_maxlen(key, value):
'''
Enforces the TXT record maximum length of 255 characters.
TXT record length includes key, value, and '='.
:param str key: Key of the TXT record
:param str value: Value of the TXT record
:rtype: str
:return: The value of the TXT record. It may be truncated if it exceeds
the maximum permitted length. In case of truncation, '...' is
appended to indicate that the entire value is not present.
'''
# Add 1 for '=' seperator between key and value
if len(key) + len(value) + 1 > 255:
# 255 - 3 ('...') - 1 ('=') = 251
return value[:251 - len(key)] + '...'
return value
def beacon(config):
'''
Broadcast values via zeroconf
@ -158,11 +178,11 @@ def beacon(config):
grain_value = grain_value[grain_index]
else:
grain_value = ','.join(grain_value)
txt[item] = grain_value
txt[item] = _enforce_txt_record_maxlen(item, grain_value)
if LAST_GRAINS and (LAST_GRAINS.get(grain, '') != __grains__.get(grain, '')):
changes[str('txt.' + item)] = txt[item]
else:
txt[item] = config['txt'][item]
txt[item] = _enforce_txt_record_maxlen(item, config['txt'][item])
if not LAST_GRAINS:
changes[str('txt.' + item)] = txt[item]

View File

@ -60,6 +60,26 @@ def __validate__(config):
return True, 'Valid beacon configuration'
def _enforce_txt_record_maxlen(key, value):
'''
Enforces the TXT record maximum length of 255 characters.
TXT record length includes key, value, and '='.
:param str key: Key of the TXT record
:param str value: Value of the TXT record
:rtype: str
:return: The value of the TXT record. It may be truncated if it exceeds
the maximum permitted length. In case of truncation, '...' is
appended to indicate that the entire value is not present.
'''
# Add 1 for '=' seperator between key and value
if len(key) + len(value) + 1 > 255:
# 255 - 3 ('...') - 1 ('=') = 251
return value[:251 - len(key)] + '...'
return value
def beacon(config):
'''
Broadcast values via zeroconf
@ -152,11 +172,11 @@ def beacon(config):
grain_value = grain_value[grain_index]
else:
grain_value = ','.join(grain_value)
txt[item] = grain_value
txt[item] = _enforce_txt_record_maxlen(item, grain_value)
if LAST_GRAINS and (LAST_GRAINS.get(grain, '') != __grains__.get(grain, '')):
changes[str('txt.' + item)] = txt[item]
else:
txt[item] = config['txt'][item]
txt[item] = _enforce_txt_record_maxlen(item, config['txt'][item])
if not LAST_GRAINS:
changes[str('txt.' + item)] = txt[item]

View File

@ -143,8 +143,8 @@ class Cache(object):
added by the driver itself.
:return:
Return a python object fetched from the cache or None if the given
path or key not found.
Return a python object fetched from the cache or an empty dict if
the given path or key not found.
:raises SaltCacheError:
Raises an exception if cache driver detected an error accessing data

View File

@ -104,7 +104,7 @@ def fetch(bank, key):
try:
_, value = api.kv.get(c_key)
if value is None:
return value
return {}
return __context__['serial'].loads(value['Value'])
except Exception as exc:
raise SaltCacheError(

View File

@ -64,7 +64,7 @@ def fetch(bank, key, cachedir):
key_file = os.path.join(cachedir, os.path.normpath(bank), '{0}.p'.format(key))
if not os.path.isfile(key_file):
log.debug('Cache file "%s" does not exist', key_file)
return None
return {}
try:
with salt.utils.fopen(key_file, 'rb') as fh_:
return __context__['serial'].load(fh_)

View File

@ -310,7 +310,7 @@ def fetch(bank, key):
log.error(mesg)
raise SaltCacheError(mesg)
if redis_value is None:
return redis_value
return {}
return __context__['serial'].loads(redis_value)

View File

@ -58,21 +58,21 @@ class Batch(object):
# Broadcast to targets
fret = set()
nret = set()
try:
for ret in ping_gen:
if ('minions' and 'jid') in ret:
for minion in ret['minions']:
nret.add(minion)
continue
else:
for ret in ping_gen:
if ('minions' and 'jid') in ret:
for minion in ret['minions']:
nret.add(minion)
continue
else:
try:
m = next(six.iterkeys(ret))
if m is not None:
fret.add(m)
return (list(fret), ping_gen, nret.difference(fret))
except StopIteration:
if not self.quiet:
print_cli('No minions matched the target.')
return list(fret), ping_gen
except StopIteration:
if not self.quiet:
print_cli('No minions matched the target.')
break
if m is not None:
fret.add(m)
return (list(fret), ping_gen, nret.difference(fret))
def get_bnum(self):
'''

View File

@ -39,7 +39,15 @@ class SaltRun(parsers.SaltRunOptionParser):
pr = activate_profile(profiling_enabled)
try:
ret = runner.run()
if isinstance(ret, dict) and 'retcode' in ret.get('data', {}):
# In older versions ret['data']['retcode'] was used
# for signaling the return code. This has been
# changed for the orchestrate runner, but external
# runners might still use it. For this reason, we
# also check ret['data']['retcode'] if
# ret['retcode'] is not available.
if isinstance(ret, dict) and 'retcode' in ret:
self.exit(ret['retcode'])
elif isinstance(ret, dict) and 'retcode' in ret.get('data', {}):
self.exit(ret['data']['retcode'])
finally:
output_profile(

View File

@ -370,21 +370,14 @@ class SyncClientMixin(object):
args = low['arg']
if 'kwarg' not in low:
if f_call is None:
f_call = salt.utils.format_call(
self.functions[fun],
low,
expected_extra_kws=CLIENT_INTERNAL_KEYWORDS
)
kwargs = f_call.get('kwargs', {})
# throw a warning for the badly formed low data if we found
# kwargs using the old mechanism
if kwargs:
salt.utils.warn_until(
'Nitrogen',
'kwargs must be passed inside the low under "kwargs"'
)
log.critical(
'kwargs must be passed inside the low data within the '
'\'kwarg\' key. See usage of '
'salt.utils.args.parse_input() and '
'salt.minion.load_args_and_kwargs() elsewhere in the '
'codebase.'
)
kwargs = {}
else:
kwargs = low['kwarg']

View File

@ -583,6 +583,8 @@ VALID_OPTS = {
'git_pillar_pubkey': str,
'git_pillar_passphrase': str,
'git_pillar_refspecs': list,
'git_pillar_includes': bool,
'git_pillar_verify_config': bool,
'gitfs_remotes': list,
'gitfs_mountpoint': str,
'gitfs_root': str,
@ -684,6 +686,7 @@ VALID_OPTS = {
'fileserver_followsymlinks': bool,
'fileserver_ignoresymlinks': bool,
'fileserver_limit_traversal': bool,
'fileserver_verify_config': bool,
# The number of open files a daemon is allowed to have open. Frequently needs to be increased
# higher than the system default in order to account for the way zeromq consumes file handles.
@ -1095,6 +1098,7 @@ DEFAULT_MINION_OPTS = {
'git_pillar_pubkey': '',
'git_pillar_passphrase': '',
'git_pillar_refspecs': _DFLT_REFSPECS,
'git_pillar_includes': True,
'gitfs_remotes': [],
'gitfs_mountpoint': '',
'gitfs_root': '',
@ -1135,7 +1139,7 @@ DEFAULT_MINION_OPTS = {
'mine_interval': 60,
'ipc_mode': _DFLT_IPC_MODE,
'ipc_write_buffer': _DFLT_IPC_WBUFFER,
'ipv6': False,
'ipv6': None,
'file_buffer_size': 262144,
'tcp_pub_port': 4510,
'tcp_pull_port': 4511,
@ -1316,6 +1320,8 @@ DEFAULT_MASTER_OPTS = {
'git_pillar_pubkey': '',
'git_pillar_passphrase': '',
'git_pillar_refspecs': _DFLT_REFSPECS,
'git_pillar_includes': True,
'git_pillar_verify_config': True,
'gitfs_remotes': [],
'gitfs_mountpoint': '',
'gitfs_root': '',
@ -1390,6 +1396,7 @@ DEFAULT_MASTER_OPTS = {
'fileserver_followsymlinks': True,
'fileserver_ignoresymlinks': False,
'fileserver_limit_traversal': False,
'fileserver_verify_config': True,
'max_open_files': 100000,
'hash_type': 'sha256',
'conf_file': os.path.join(salt.syspaths.CONFIG_DIR, 'master'),
@ -1618,6 +1625,7 @@ DEFAULT_SPM_OPTS = {
# If set, spm_node_type will be either master or minion, but they should
# NOT be a default
'spm_node_type': '',
'spm_share_dir': os.path.join(salt.syspaths.SHARE_DIR, 'spm'),
# <---- Salt master settings overridden by SPM ----------------------
}

View File

@ -29,6 +29,7 @@ import salt.minion
import salt.search
import salt.key
import salt.fileserver
import salt.utils.args
import salt.utils.atomicfile
import salt.utils.event
import salt.utils.verify
@ -37,7 +38,7 @@ import salt.utils.gzip_util
import salt.utils.jid
from salt.pillar import git_pillar
from salt.utils.event import tagify
from salt.exceptions import SaltMasterError
from salt.exceptions import FileserverConfigError, SaltMasterError
# Import 3rd-party libs
import salt.ext.six as six
@ -87,12 +88,19 @@ def init_git_pillar(opts):
)
else:
# New git_pillar code
pillar = salt.utils.gitfs.GitPillar(opts)
pillar.init_remotes(
opts_dict['git'],
git_pillar.PER_REMOTE_OVERRIDES
)
ret.append(pillar)
try:
pillar = salt.utils.gitfs.GitPillar(opts)
pillar.init_remotes(
opts_dict['git'],
git_pillar.PER_REMOTE_OVERRIDES,
git_pillar.PER_REMOTE_ONLY
)
ret.append(pillar)
except FileserverConfigError:
if opts.get('git_pillar_verify_config', True):
raise
else:
log.critical('Could not initialize git_pillar')
return ret
@ -844,7 +852,7 @@ class RemoteFuncs(object):
opts = {}
opts.update(self.opts)
opts.update({'fun': load['fun'],
'arg': load['arg'],
'arg': salt.utils.args.parse_input(load['arg']),
'id': load['id'],
'doc': False,
'conf_file': self.opts['conf_file']})
@ -894,7 +902,7 @@ class RemoteFuncs(object):
# Set up the publication payload
pub_load = {
'fun': load['fun'],
'arg': load['arg'],
'arg': salt.utils.args.parse_input(load['arg']),
'tgt_type': load.get('tgt_type', 'glob'),
'tgt': load['tgt'],
'ret': load['ret'],
@ -947,7 +955,7 @@ class RemoteFuncs(object):
# Set up the publication payload
pub_load = {
'fun': load['fun'],
'arg': load['arg'],
'arg': salt.utils.args.parse_input(load['arg']),
'tgt_type': load.get('tgt_type', 'glob'),
'tgt': load['tgt'],
'ret': load['ret'],
@ -1523,7 +1531,7 @@ class LocalFuncs(object):
'tgt': load['tgt'],
'user': load['user'],
'fun': load['fun'],
'arg': load['arg'],
'arg': salt.utils.args.parse_input(load['arg']),
'minions': minions,
}
@ -1575,7 +1583,7 @@ class LocalFuncs(object):
# way that won't have a negative impact.
pub_load = {
'fun': load['fun'],
'arg': load['arg'],
'arg': salt.utils.args.parse_input(load['arg']),
'tgt': load['tgt'],
'jid': load['jid'],
'ret': load['ret'],

View File

@ -28,6 +28,29 @@ prefaced with a ``!``.
list_commands:
cmd: pillar.get salt:engines:slack:valid_commands target=saltmaster tgt_type=list
:configuration: Example configuration using groups
.. versionadded: Nitrogen
engines:
slack:
token: 'xoxb-xxxxxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxx'
control: True
groups:
gods:
users:
- garethgreenaway
commands:
- test.ping
- cmd.run
- list_jobs
- list_commands
aliases:
list_jobs:
cmd: jobs.list_jobs
list_commands:
cmd: pillar.get salt:engines:slack:valid_commands target=saltmaster tgt_type=list
:depends: slackclient
'''
@ -86,10 +109,18 @@ def start(token,
valid_commands=None,
control=False,
trigger="!",
groups=None,
tag='salt/engines/slack'):
'''
Listen to Slack events and forward them to Salt
'''
if valid_users is None:
valid_users = []
if valid_commands is None:
valid_commands = []
if __opts__.get('__role') == 'master':
fire_master = salt.utils.event.get_master_event(
__opts__,
@ -136,6 +167,21 @@ def start(token,
if _text:
if _text.startswith(trigger) and control:
# Get groups
if groups:
for group in groups:
if 'users' in groups[group]:
# Add users to valid_users
valid_users.extend(groups[group]['users'])
# Add commands to valid_commands
if 'commands' in groups[group]:
valid_commands.extend(groups[group]['commands'])
# Add group aliases to aliases
if 'aliases' in groups[group]:
aliases.update(groups[group]['aliases'])
# Ensure the user is allowed to run commands
if valid_users:
log.debug('{0} {1}'.format(all_users, _m['user']))
@ -165,18 +211,18 @@ def start(token,
args = []
kwargs = {}
# Ensure the command is allowed
if valid_commands:
if cmd not in valid_commands:
channel.send_message('Using {0} is not allowed.'.format(cmd))
return
# Evaluate aliases
if aliases and isinstance(aliases, dict) and cmd in aliases.keys():
cmdline = aliases[cmd].get('cmd')
cmdline = salt.utils.shlex_split(cmdline)
cmd = cmdline[0]
# Ensure the command is allowed
if valid_commands:
if cmd not in valid_commands:
channel.send_message('{0} is not allowed to use command {1}.'.format(all_users[_m['user']], cmd))
return
# Parse args and kwargs
if len(cmdline) > 1:
for item in cmdline[1:]:

View File

@ -1340,8 +1340,7 @@ class FSClient(RemoteClient):
the FSChan object
'''
def __init__(self, opts): # pylint: disable=W0231
self.opts = opts
self.utils = salt.loader.utils(opts)
Client.__init__(self, opts) # pylint: disable=W0233
self.channel = salt.fileserver.FSChan(opts)
self.auth = DumbAuth()

View File

@ -688,7 +688,7 @@ def _virtual(osdata):
break
else:
if osdata['kernel'] not in skip_cmds:
log.warning(
log.debug(
'All tools for virtual hardware identification failed to '
'execute because they do not exist on the system running this '
'instance or the user does not have the necessary permissions '

View File

@ -653,17 +653,16 @@ def grains(opts, force_refresh=False, proxy=None):
except (IOError, OSError):
pass
else:
if force_refresh:
log.debug('Grains refresh requested. Refreshing grains.')
else:
log.debug('Grains cache last modified {0} seconds ago and '
'cache expiration is set to {1}. '
'Grains cache expired. Refreshing.'.format(
grains_cache_age,
opts.get('grains_cache_expiration', 300)
))
log.debug('Grains cache last modified {0} seconds ago and '
'cache expiration is set to {1}. '
'Grains cache expired. Refreshing.'.format(
grains_cache_age,
opts.get('grains_cache_expiration', 300)
))
else:
log.debug('Grains cache file does not exist.')
else:
log.debug('Grains refresh requested. Refreshing grains.')
if opts.get('skip_grains', False):
return {}
@ -1046,7 +1045,8 @@ class LazyLoader(salt.utils.lazy.LazyDict):
self.pack = {} if pack is None else pack
if opts is None:
opts = {}
self.context_dict = salt.utils.context.ContextDict()
threadsafety = not opts.get('multiprocessing')
self.context_dict = salt.utils.context.ContextDict(threadsafe=threadsafety)
self.opts = self.__prep_mod_opts(opts)
self.module_dirs = module_dirs

View File

@ -323,11 +323,8 @@ class Maintenance(SignalHandlingMultiprocessingProcess):
for pillar in self.git_pillar:
pillar.update()
except Exception as exc:
log.error(
'Exception \'{0}\' caught while updating git_pillar'
.format(exc),
exc_info_on_loglevel=logging.DEBUG
)
log.error('Exception caught while updating git_pillar',
exc_info=True)
def handle_schedule(self):
'''
@ -458,19 +455,21 @@ class Master(SMaster):
'Cannot change to root directory ({1})'.format(err)
)
fileserver = salt.fileserver.Fileserver(self.opts)
if not fileserver.servers:
errors.append(
'Failed to load fileserver backends, the configured backends '
'are: {0}'.format(', '.join(self.opts['fileserver_backend']))
)
else:
# Run init() for all backends which support the function, to
# double-check configuration
try:
fileserver.init()
except FileserverConfigError as exc:
critical_errors.append('{0}'.format(exc))
if self.opts.get('fileserver_verify_config', True):
fileserver = salt.fileserver.Fileserver(self.opts)
if not fileserver.servers:
errors.append(
'Failed to load fileserver backends, the configured backends '
'are: {0}'.format(', '.join(self.opts['fileserver_backend']))
)
else:
# Run init() for all backends which support the function, to
# double-check configuration
try:
fileserver.init()
except FileserverConfigError as exc:
critical_errors.append('{0}'.format(exc))
if not self.opts['fileserver_backend']:
errors.append('No fileserver backends are configured')
@ -483,25 +482,29 @@ class Master(SMaster):
except OSError:
pass
non_legacy_git_pillars = [
x for x in self.opts.get('ext_pillar', [])
if 'git' in x
and not isinstance(x['git'], six.string_types)
]
if non_legacy_git_pillars:
try:
new_opts = copy.deepcopy(self.opts)
from salt.pillar.git_pillar \
import PER_REMOTE_OVERRIDES as overrides
for repo in non_legacy_git_pillars:
new_opts['ext_pillar'] = [repo]
try:
git_pillar = salt.utils.gitfs.GitPillar(new_opts)
git_pillar.init_remotes(repo['git'], overrides)
except FileserverConfigError as exc:
critical_errors.append(exc.strerror)
finally:
del new_opts
if self.opts.get('git_pillar_verify_config', True):
non_legacy_git_pillars = [
x for x in self.opts.get('ext_pillar', [])
if 'git' in x
and not isinstance(x['git'], six.string_types)
]
if non_legacy_git_pillars:
try:
new_opts = copy.deepcopy(self.opts)
from salt.pillar.git_pillar \
import PER_REMOTE_OVERRIDES as per_remote_overrides, \
PER_REMOTE_ONLY as per_remote_only
for repo in non_legacy_git_pillars:
new_opts['ext_pillar'] = [repo]
try:
git_pillar = salt.utils.gitfs.GitPillar(new_opts)
git_pillar.init_remotes(repo['git'],
per_remote_overrides,
per_remote_only)
except FileserverConfigError as exc:
critical_errors.append(exc.strerror)
finally:
del new_opts
if errors or critical_errors:
for error in errors:

View File

@ -133,7 +133,7 @@ log = logging.getLogger(__name__)
# 6. Handle publications
def resolve_dns(opts, fallback=True):
def resolve_dns(opts, fallback=True, connect=True):
'''
Resolves the master_ip and master_uri options
'''
@ -150,13 +150,13 @@ def resolve_dns(opts, fallback=True):
if opts['master'] == '':
raise SaltSystemExit
ret['master_ip'] = \
salt.utils.dns_check(opts['master'], True, opts['ipv6'])
salt.utils.dns_check(opts['master'], opts['master_port'], True, opts['ipv6'], connect)
except SaltClientError:
if opts['retry_dns']:
while True:
import salt.log
msg = ('Master hostname: \'{0}\' not found. Retrying in {1} '
'seconds').format(opts['master'], opts['retry_dns'])
msg = ('Master hostname: \'{0}\' not found or not responsive. '
'Retrying in {1} seconds').format(opts['master'], opts['retry_dns'])
if salt.log.setup.is_console_configured():
log.error(msg)
else:
@ -164,7 +164,7 @@ def resolve_dns(opts, fallback=True):
time.sleep(opts['retry_dns'])
try:
ret['master_ip'] = salt.utils.dns_check(
opts['master'], True, opts['ipv6']
opts['master'], opts['master_port'], True, opts['ipv6'], connect
)
break
except SaltClientError:
@ -285,24 +285,15 @@ def load_args_and_kwargs(func, args, data=None, ignore_invalid=False):
# above, that would result in a 2nd call to
# salt.utils.cli.yamlify_arg(), which could mangle the input.
_args.append(arg)
elif string_kwarg:
salt.utils.warn_until(
'Nitrogen',
'The list of function args and kwargs should be parsed '
'by salt.utils.args.parse_input() before calling '
'salt.minion.load_args_and_kwargs().'
if string_kwarg:
log.critical(
'String kwarg(s) %s passed to '
'salt.minion.load_args_and_kwargs(). This is no longer '
'supported, so the kwarg(s) will be ignored. Arguments '
'passed to salt.minion.load_args_and_kwargs() should be '
'passed to salt.utils.args.parse_input() first to load '
'and condition them properly.', string_kwarg
)
if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs.update(string_kwarg)
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
for key, val in six.iteritems(string_kwarg):
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
# if the arg is a dict with __kwarg__ == True, then its a kwarg
elif isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:
@ -689,7 +680,13 @@ class SMinion(MinionBase):
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules
'''
# Ensure that a pillar key is set in the opts, otherwise the loader
# will pack a newly-generated empty dict as the __pillar__ dunder, and
@ -763,7 +760,13 @@ class MasterMinion(object):
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules
'''
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(
@ -864,22 +867,25 @@ class MinionManager(MinionBase):
'''
last = 0 # never have we signed in
auth_wait = minion.opts['acceptance_wait_time']
failed = False
while True:
try:
if minion.opts.get('beacons_before_connect', False):
minion.setup_beacons()
if minion.opts.get('scheduler_before_connect', False):
minion.setup_scheduler()
yield minion.connect_master()
yield minion.connect_master(failed=failed)
minion.tune_in(start=False)
break
except SaltClientError as exc:
failed = True
log.error('Error while bringing up minion for multi-master. Is master at {0} responding?'.format(minion.opts['master']))
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except Exception as e:
failed = True
log.critical('Unexpected error while connecting to {0}'.format(minion.opts['master']), exc_info=True)
# Multi Master Tune In
@ -1020,7 +1026,7 @@ class Minion(MinionBase):
time.sleep(1)
sys.exit(0)
def sync_connect_master(self, timeout=None):
def sync_connect_master(self, timeout=None, failed=False):
'''
Block until we are connected to a master
'''
@ -1031,7 +1037,7 @@ class Minion(MinionBase):
self._sync_connect_master_success = True
self.io_loop.stop()
self._connect_master_future = self.connect_master()
self._connect_master_future = self.connect_master(failed=failed)
# finish connecting to master
self._connect_master_future.add_done_callback(on_connect_master_future_done)
if timeout:
@ -1059,11 +1065,11 @@ class Minion(MinionBase):
self.schedule.returners = self.returners
@tornado.gen.coroutine
def connect_master(self):
def connect_master(self, failed=False):
'''
Return a future which will complete when you are connected to a master
'''
master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe)
master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed)
yield self._post_master_init(master)
# TODO: better name...
@ -2626,6 +2632,7 @@ class SyndicManager(MinionBase):
'''
last = 0 # never have we signed in
auth_wait = opts['acceptance_wait_time']
failed = False
while True:
log.debug('Syndic attempting to connect to {0}'.format(opts['master']))
try:
@ -2634,7 +2641,7 @@ class SyndicManager(MinionBase):
safe=False,
io_loop=self.io_loop,
)
yield syndic.connect_master()
yield syndic.connect_master(failed=failed)
# set up the syndic to handle publishes (specifically not event forwarding)
syndic.tune_in_no_block()
@ -2644,6 +2651,7 @@ class SyndicManager(MinionBase):
log.info('Syndic successfully connected to {0}'.format(opts['master']))
break
except SaltClientError as exc:
failed = True
log.error('Error while bringing up syndic for multi-syndic. Is master at {0} responding?'.format(opts['master']))
last = time.time()
if auth_wait < self.max_auth_wait:
@ -2652,6 +2660,7 @@ class SyndicManager(MinionBase):
except KeyboardInterrupt:
raise
except: # pylint: disable=W0702
failed = True
log.critical('Unexpected error while connecting to {0}'.format(opts['master']), exc_info=True)
raise tornado.gen.Return(syndic)

View File

@ -408,6 +408,10 @@ def _get_cron_date_time(**kwargs):
value = str(kwargs.get(param, '1')).lower()
if value == 'random':
ret[param] = str(random.sample(range_max[param], 1)[0])
elif len(value.split(':')) == 2:
cron_range = sorted(value.split(':'))
start, end = int(cron_range[0]), int(cron_range[1])
ret[param] = str(random.randint(start, end))
else:
ret[param] = value

View File

@ -183,6 +183,10 @@ Functions
- :py:func:`docker.disconnect_container_from_network
<salt.modules.docker.disconnect_container_from_network>`
- Salt Functions and States Execution
- :py:func:`docker.call <salt.modules.docker.call>`
- :py:func:`docker.sls <salt.modules.docker.sls>`
- :py:func:`docker.sls_build <salt.modules.docker.sls_build>`
.. _docker-execution-driver:
@ -571,6 +575,17 @@ VALID_CREATE_OPTS = {
'devices': {
'path': 'HostConfig:Devices',
},
'sysctls': {
'path': 'HostConfig:Sysctls',
'min_docker': (1, 12, 0),
'default': {},
},
'ulimits': {
'path': 'HostConfig:Ulimits',
'min_docker': (1, 6, 0),
'min_docker_py': (1, 2, 0),
'default': [],
},
}
@ -831,10 +846,10 @@ def _get_client(timeout=None):
'Docker machine {0} failed: {1}'.format(docker_machine, exc))
try:
__context__['docker.client'] = docker.Client(**client_kwargs)
except AttributeError:
# docker-py 2.0 renamed this client attribute
__context__['docker.client'] = docker.APIClient(**client_kwargs)
except AttributeError:
__context__['docker.client'] = docker.Client(**client_kwargs)
def _get_md5(name, path):
@ -1928,6 +1943,81 @@ def _validate_input(kwargs,
else:
kwargs['labels'] = salt.utils.repack_dictlist(kwargs['labels'])
def _valid_sysctls(): # pylint: disable=unused-variable
'''
Can be a a dictionary
'''
if kwargs.get('sysctls') is None:
return
if isinstance(kwargs['sysctls'], list):
repacked_sysctl = {}
for sysctl_var in kwargs['sysctls']:
try:
key, val = sysctl_var.split('=')
except AttributeError:
raise SaltInvocationError(
'Invalid sysctl variable definition \'{0}\''
.format(sysctl_var)
)
else:
if key in repacked_sysctl:
raise SaltInvocationError(
'Duplicate sysctl variable \'{0}\''
.format(key)
)
if not isinstance(val, six.string_types):
raise SaltInvocationError(
'sysctl values must be strings {key}=\'{val}\''
.format(key=key, val=val))
repacked_sysctl[key] = val
kwargs['sysctls'] = repacked_sysctl
elif isinstance(kwargs['sysctls'], dict):
for key, val in six.iteritems(kwargs['sysctls']):
if not isinstance(val, six.string_types):
raise SaltInvocationError('sysctl values must be dicts')
elif not isinstance(kwargs['sysctls'], dict):
raise SaltInvocationError(
'Invalid sysctls configuration.'
)
def _valid_ulimits(): # pylint: disable=unused-variable
'''
Must be a string or list of strings with bind mount information
'''
if kwargs.get('ulimits') is None:
# No need to validate
return
err = (
'Invalid ulimits configuration. See the documentation for proper '
'usage.'
)
try:
_valid_dictlist('ulimits')
# If this was successful then assume the correct API value was
# passed on on the CLI and do not proceed with validation.
return
except SaltInvocationError:
pass
try:
_valid_stringlist('ulimits')
except SaltInvocationError:
raise SaltInvocationError(err)
new_ulimits = []
for ulimit in kwargs['ulimits']:
ulimit_name, comps = ulimit.strip().split('=', 1)
try:
comps = [int(x) for x in comps.split(':', 1)]
except ValueError:
raise SaltInvocationError(err)
if len(comps) == 1:
comps *= 2
soft_limit, hard_limit = comps
new_ulimits.append({'Name': ulimit_name,
'Soft': soft_limit,
'Hard': hard_limit})
kwargs['ulimits'] = new_ulimits
# And now, the actual logic to perform the validation
if 'docker.docker_version' not in __context__:
# Have to call this func using the __salt__ dunder (instead of just
@ -5793,7 +5883,17 @@ def _gather_pillar(pillarenv, pillar_override, **grains):
def call(name, function, *args, **kwargs):
'''
Executes a salt function inside a container
Executes a Salt function inside a running container
.. versionadded:: 2016.11.0
The container does not need to have Salt installed, but Python is required.
name
Container name or ID
function
Salt execution module function
CLI Example:
@ -5801,11 +5901,7 @@ def call(name, function, *args, **kwargs):
salt myminion docker.call test.ping
salt myminion test.arg arg1 arg2 key1=val1
The container does not need to have Salt installed, but Python
is required.
.. versionadded:: 2016.11.0
salt myminion dockerng.call compassionate_mirzakhani test.arg arg1 arg2 key1=val1
'''
# where to put the salt-thin
@ -5862,11 +5958,23 @@ def call(name, function, *args, **kwargs):
def sls(name, mods=None, saltenv='base', **kwargs):
'''
Apply the highstate defined by the specified modules.
Apply the states defined by the specified SLS modules to the running
container
For example, if your master defines the states ``web`` and ``rails``, you
can apply them to a container:
states by doing:
.. versionadded:: 2016.11.0
The container does not need to have Salt installed, but Python is required.
name
Container name or ID
mods : None
A string containing comma-separated list of SLS with defined states to
apply to the container.
saltenv : base
Specify the environment from which to retrieve the SLS indicated by the
`mods` parameter.
CLI Example:
@ -5874,10 +5982,6 @@ def sls(name, mods=None, saltenv='base', **kwargs):
salt myminion docker.sls compassionate_mirzakhani mods=rails,web
The container does not need to have Salt installed, but Python
is required.
.. versionadded:: 2016.11.0
'''
mods = [item.strip() for item in mods.split(',')] if mods else []
@ -5936,11 +6040,25 @@ def sls(name, mods=None, saltenv='base', **kwargs):
def sls_build(name, base='opensuse/python', mods=None, saltenv='base',
dryrun=False, **kwargs):
'''
Build a docker image using the specified sls modules and base image.
Build a Docker image using the specified SLS modules on top of base image
For example, if your master defines the states ``web`` and ``rails``, you
can build a docker image inside myminion that results of applying those
states by doing:
.. versionadded:: 2016.11.0
The base image does not need to have Salt installed, but Python is required.
name
Image name to be built and committed
base : opensuse/python
Name or ID of the base image
mods : None
A string containing comma-separated list of SLS with defined states to
apply to the base image.
saltenv : base
Specify the environment from which to retrieve the SLS indicated by the
`mods` parameter.
base
the base image
@ -5966,12 +6084,7 @@ def sls_build(name, base='opensuse/python', mods=None, saltenv='base',
salt myminion docker.sls_build imgname base=mybase mods=rails,web
The base image does not need to have Salt installed, but Python
is required.
.. versionadded:: 2016.11.0
'''
create_kwargs = salt.utils.clean_kwargs(**copy.deepcopy(kwargs))
for key in ('image', 'name', 'cmd', 'interactive', 'tty'):
try:
@ -5981,7 +6094,6 @@ def sls_build(name, base='opensuse/python', mods=None, saltenv='base',
# start a new container
ret = create(image=base,
name=name,
cmd='sleep infinity',
interactive=True, tty=True,
**create_kwargs)
@ -5994,13 +6106,12 @@ def sls_build(name, base='opensuse/python', mods=None, saltenv='base',
# fail if the state was not successful
if not dryrun and not salt.utils.check_state_result(ret):
raise CommandExecutionError(ret)
if dryrun is False:
ret = commit(id_, name)
finally:
stop(id_)
if dryrun:
rm_(id_)
return ret
return commit(id_, name)
return ret
def get_client_args():
@ -6040,11 +6151,15 @@ def get_client_args():
except AttributeError:
try:
endpoint_config_args = \
_argspec(docker.utils.create_endpoint_config).args
_argspec(docker.utils.utils.create_endpoint_config).args
except AttributeError:
raise CommandExecutionError(
'Failed to get create_host_config argspec'
)
try:
endpoint_config_args = \
_argspec(docker.utils.create_endpoint_config).args
except AttributeError:
raise CommandExecutionError(
'Failed to get create_endpoint_config argspec'
)
for arglist in (config_args, host_config_args, endpoint_config_args):
try:

View File

@ -101,6 +101,8 @@ def _get_instance(hosts=None, profile=None):
use_ssl = _profile.get('use_ssl', False)
ca_certs = _profile.get('ca_certs', False)
verify_certs = _profile.get('verify_certs', False)
username = _profile.get('username', None)
password = _profile.get('password', None)
if not hosts:
hosts = ['127.0.0.1:9200']
@ -114,6 +116,14 @@ def _get_instance(hosts=None, profile=None):
ca_certs=ca_certs,
verify_certs=verify_certs,
)
elif username and password:
es = elasticsearch.Elasticsearch(
hosts,
use_ssl=use_ssl,
ca_certs=ca_certs,
verify_certs=verify_certs,
http_auth=(username, password)
)
else:
# Custom connection class to use requests module with proxies
class ProxyConnection(RequestsHttpConnection):

View File

@ -687,31 +687,54 @@ def check_hash(path, file_hash):
'''
Check if a file matches the given hash string
Returns true if the hash matched, otherwise false. Raises ValueError if
the hash was not formatted correctly.
Returns ``True`` if the hash matches, otherwise ``False``.
path
A file path
Path to a file local to the minion.
hash
A string in the form <hash_type>:<hash_value>. For example:
``md5:e138491e9d5b97023cea823fe17bac22``
The hash to check against the file specified in the ``path`` argument.
For versions 2016.11.4 and newer, the hash can be specified without an
accompanying hash type (e.g. ``e138491e9d5b97023cea823fe17bac22``),
but for earlier releases it is necessary to also specify the hash type
in the format ``<hash_type>:<hash_value>`` (e.g.
``md5:e138491e9d5b97023cea823fe17bac22``).
CLI Example:
.. code-block:: bash
salt '*' file.check_hash /etc/fstab md5:<md5sum>
salt '*' file.check_hash /etc/fstab e138491e9d5b97023cea823fe17bac22
salt '*' file.check_hash /etc/fstab md5:e138491e9d5b97023cea823fe17bac22
'''
path = os.path.expanduser(path)
hash_parts = file_hash.split(':', 1)
if len(hash_parts) != 2:
# Support "=" for backward compatibility.
hash_parts = file_hash.split('=', 1)
if len(hash_parts) != 2:
raise ValueError('Bad hash format: \'{0}\''.format(file_hash))
hash_form, hash_value = hash_parts
return get_hash(path, hash_form) == hash_value
if not isinstance(file_hash, six.string_types):
raise SaltInvocationError('hash must be a string')
for sep in (':', '='):
if sep in file_hash:
hash_type, hash_value = file_hash.split(sep, 1)
break
else:
hash_value = file_hash
hash_len = len(file_hash)
hash_type = HASHES_REVMAP.get(hash_len)
if hash_type is None:
raise SaltInvocationError(
'Hash {0} (length: {1}) could not be matched to a supported '
'hash type. The supported hash types and lengths are: '
'{2}'.format(
file_hash,
hash_len,
', '.join(
['{0} ({1})'.format(HASHES_REVMAP[x], x)
for x in sorted(HASHES_REVMAP)]
),
)
)
return get_hash(path, hash_type) == hash_value
def find(path, *args, **kwargs):

View File

@ -210,7 +210,7 @@ def setvals(grains, destructive=False, refresh=True):
Defaults to False.
refresh
Refresh minion grains using saltutil.sync_grains.
Refresh modules and pillar after adding the new grains.
Defaults to True.
CLI Example:
@ -308,7 +308,7 @@ def setval(key, val, destructive=False, refresh=True):
Defaults to False.
refresh
Refresh minion grains using saltutil.sync_grains.
Refresh modules and pillar after adding the new grain.
Defaults to True.
CLI Example:

View File

@ -156,6 +156,67 @@ def db_remove(name, user=None, password=None, host=None, port=None, authdb=None)
return True
def _version(mdb):
return mdb.command('buildInfo')['version']
def version(user=None, password=None, host=None, port=None, database='admin', authdb=None):
'''
Get MongoDB instance version
CLI Example:
.. code-block:: bash
salt '*' mongodb.version <user> <password> <host> <port> <database>
'''
conn = _connect(user, password, host, port, authdb=authdb)
if not conn:
err_msg = "Failed to connect to MongoDB database {0}:{1}".format(host, port)
log.error(err_msg)
return (False, err_msg)
try:
mdb = pymongo.database.Database(conn, database)
return _version(mdb)
except pymongo.errors.PyMongoError as err:
log.error(
'Listing users failed with error: {0}'.format(
str(err)
)
)
return str(err)
def user_find(name, user=None, password=None, host=None, port=None,
database='admin', authdb=None):
'''
Get single user from MongoDB
CLI Example:
.. code-block:: bash
salt '*' mongodb.user_find <name> <user> <password> <host> <port> <database> <authdb>
'''
conn = _connect(user, password, host, port, authdb=authdb)
if not conn:
err_msg = "Failed to connect to MongoDB database {0}:{1}".format(host, port)
log.error(err_msg)
return (False, err_msg)
mdb = pymongo.database.Database(conn, database)
try:
return mdb.command("usersInfo", name)["users"]
except pymongo.errors.PyMongoError as err:
log.error(
'Listing users failed with error: {0}'.format(
str(err)
)
)
return (False, str(err))
def user_list(user=None, password=None, host=None, port=None, database='admin', authdb=None):
'''
List users of a Mongodb database
@ -175,20 +236,20 @@ def user_list(user=None, password=None, host=None, port=None, database='admin',
mdb = pymongo.database.Database(conn, database)
output = []
mongodb_version = mdb.eval('db.version()')
mongodb_version = _version(mdb)
if LooseVersion(mongodb_version) >= LooseVersion('2.6'):
for user in mdb.eval('db.getUsers()'):
output.append([
('user', user['user']),
('roles', user['roles'])
])
for user in mdb.command('usersInfo')['users']:
output.append(
{'user': user['user'],
'roles': user['roles']}
)
else:
for user in mdb.system.users.find():
output.append([
('user', user['user']),
('readOnly', user.get('readOnly', 'None'))
])
output.append(
{'user': user['user'],
'readOnly': user.get('readOnly', 'None')}
)
return output
except pymongo.errors.PyMongoError as err:
@ -224,7 +285,7 @@ def user_exists(name, user=None, password=None, host=None, port=None,
def user_create(name, passwd, user=None, password=None, host=None, port=None,
database='admin', authdb=None):
database='admin', authdb=None, roles=None):
'''
Create a Mongodb user
@ -232,16 +293,19 @@ def user_create(name, passwd, user=None, password=None, host=None, port=None,
.. code-block:: bash
salt '*' mongodb.user_create <name> <user> <password> <host> <port> <database>
salt '*' mongodb.user_create <user_name> <user_password> <roles> <user> <password> <host> <port> <database>
'''
conn = _connect(user, password, host, port, authdb=authdb)
if not conn:
return 'Failed to connect to mongo database'
if not roles:
roles = []
try:
log.info('Creating user {0}'.format(name))
mdb = pymongo.database.Database(conn, database)
mdb.add_user(name, passwd)
mdb.add_user(name, passwd, roles=roles)
except pymongo.errors.PyMongoError as err:
log.error(
'Creating database {0} failed with error: {1}'.format(
@ -347,7 +411,7 @@ def user_grant_roles(name, roles, database, user=None, password=None, host=None,
try:
log.info('Granting roles {0} to user {1}'.format(roles, name))
mdb = pymongo.database.Database(conn, database)
mdb.eval("db.grantRolesToUser('{0}', {1})".format(name, roles))
mdb.command("grantRolesToUser", name, roles=roles)
except pymongo.errors.PyMongoError as err:
log.error(
'Granting roles {0} to user {1} failed with error: {2}'.format(
@ -386,7 +450,7 @@ def user_revoke_roles(name, roles, database, user=None, password=None, host=None
try:
log.info('Revoking roles {0} from user {1}'.format(roles, name))
mdb = pymongo.database.Database(conn, database)
mdb.eval("db.revokeRolesFromUser('{0}', {1})".format(name, roles))
mdb.command("revokeRolesFromUser", name, roles=roles)
except pymongo.errors.PyMongoError as err:
log.error(
'Revoking roles {0} from user {1} failed with error: {2}'.format(

View File

@ -621,19 +621,22 @@ def query(database, query, **connection_args):
orig_conv = MySQLdb.converters.conversions
conv_iter = iter(orig_conv)
conv = dict(zip(conv_iter, [str] * len(orig_conv)))
# some converters are lists, do not break theses
conv[FIELD_TYPE.BLOB] = [
(FLAG.BINARY, str),
]
conv[FIELD_TYPE.STRING] = [
(FLAG.BINARY, str),
]
conv[FIELD_TYPE.VAR_STRING] = [
(FLAG.BINARY, str),
]
conv[FIELD_TYPE.VARCHAR] = [
(FLAG.BINARY, str),
]
conv_mysqldb = {'MYSQLDB': True}
if conv_mysqldb.get(MySQLdb.__package__.upper()):
conv[FIELD_TYPE.BLOB] = [
(FLAG.BINARY, str),
]
conv[FIELD_TYPE.STRING] = [
(FLAG.BINARY, str),
]
conv[FIELD_TYPE.VAR_STRING] = [
(FLAG.BINARY, str),
]
conv[FIELD_TYPE.VARCHAR] = [
(FLAG.BINARY, str),
]
connection_args.update({'connection_db': database, 'connection_conv': conv})
dbc = _connect(**connection_args)

View File

@ -78,6 +78,7 @@ def xccdf(params):
error = None
upload_dir = None
action = None
returncode = None
try:
parser = _ArgumentParser()
@ -92,14 +93,17 @@ def xccdf(params):
tempdir = tempfile.mkdtemp()
proc = Popen(
shlex.split(cmd), stdout=PIPE, stderr=PIPE, cwd=tempdir)
(stdoutdata, stderrdata) = proc.communicate()
(stdoutdata, error) = proc.communicate()
success = _OSCAP_EXIT_CODES_MAP[proc.returncode]
returncode = proc.returncode
if success:
caller = Caller()
caller.cmd('cp.push_dir', tempdir)
shutil.rmtree(tempdir, ignore_errors=True)
upload_dir = tempdir
else:
error = stderrdata
return dict(success=success, upload_dir=upload_dir, error=error)
return dict(
success=success,
upload_dir=upload_dir,
error=error,
returncode=returncode)

View File

@ -38,7 +38,7 @@ from salt.exceptions import (
CommandExecutionError, MinionError, SaltInvocationError
)
REPO_REGEXP = r'^#?\s*(src|src/gz)\s+[^\s<>]+\s+[^\s<>]+'
REPO_REGEXP = r'^#?\s*(src|src/gz)\s+([^\s<>]+|"[^<>]+")\s+[^\s<>]+'
OPKG_CONFDIR = '/etc/opkg'
ATTR_MAP = {
'Architecture': 'arch',
@ -993,7 +993,7 @@ def list_repos():
line = line[1:]
else:
repo['enabled'] = True
cols = line.strip().split()
cols = salt.utils.shlex_split(line.strip())
if cols[0] in 'src':
repo['compressed'] = False
else:
@ -1038,7 +1038,7 @@ def _del_repo_from_file(alias, filepath):
if regex.search(line):
if line.startswith('#'):
line = line[1:]
cols = line.strip().split()
cols = salt.utils.shlex_split(line.strip())
if alias != cols[1]:
output.append(line)
with open(filepath, 'w') as fhandle:
@ -1051,7 +1051,11 @@ def _add_new_repo(alias, uri, compressed, enabled=True):
'''
repostr = '# ' if not enabled else ''
repostr += 'src/gz ' if compressed else 'src '
repostr += alias + ' ' + uri + '\n'
if ' ' in alias:
repostr += '"' + alias + '" '
else:
repostr += alias + ' '
repostr += uri + '\n'
conffile = os.path.join(OPKG_CONFDIR, alias + '.conf')
with open(conffile, 'a') as fhandle:
@ -1065,7 +1069,8 @@ def _mod_repo_in_file(alias, repostr, filepath):
with open(filepath) as fhandle:
output = []
for line in fhandle:
if alias not in line:
cols = salt.utils.shlex_split(line.strip())
if alias not in cols:
output.append(line)
else:
output.append(repostr + '\n')
@ -1161,7 +1166,11 @@ def mod_repo(alias, **kwargs):
repostr += 'src/gz ' if kwargs['compressed'] else 'src'
else:
repostr += 'src/gz' if source['compressed'] else 'src'
repostr += ' {0}'.format(kwargs['alias'] if 'alias' in kwargs else alias)
repo_alias = kwargs['alias'] if 'alias' in kwargs else alias
if ' ' in repo_alias:
repostr += ' "{0}"'.format(repo_alias)
else:
repostr += ' {0}'.format(repo_alias)
repostr += ' {0}'.format(kwargs['uri'] if 'uri' in kwargs else source['uri'])
_mod_repo_in_file(alias, repostr, source['file'])
elif uri and source['uri'] == uri:

View File

@ -359,8 +359,9 @@ def build_schedule_item(name, **kwargs):
else:
schedule[name]['splay'] = kwargs['splay']
for item in ['range', 'when', 'once', 'once_fmt', 'cron', 'returner',
'return_config', 'return_kwargs', 'until', 'run_on_start']:
for item in ['range', 'when', 'once', 'once_fmt', 'cron',
'returner', 'after', 'return_config', 'return_kwargs',
'until', 'run_on_start']:
if item in kwargs:
schedule[name][item] = kwargs[item]

View File

@ -53,7 +53,7 @@ def __virtual__():
if __grains__['kernel'] != 'Linux':
return (False, 'Non Linux OSes are not supported')
# SUSE >=12.0 uses systemd
if __grains__.get('os_family', '') == 'SUSE':
if __grains__.get('os_family', '') == 'Suse':
try:
# osrelease might be in decimal format (e.g. "12.1"), or for
# SLES might include service pack (e.g. "11 SP3"), so split on

View File

@ -421,8 +421,10 @@ def reload_modules():
salt '*' sys.reload_modules
'''
# This is handled inside the minion.py file, the function is caught before
# it ever gets here
# This function is actually handled inside the minion.py file, the function
# is caught before it ever gets here. Therefore, the docstring above is
# only for the online docs, and ANY CHANGES made to it must also be made in
# each of the gen_modules() funcs in minion.py.
return True

View File

@ -274,7 +274,7 @@ def _runlevel():
return ret
def _systemctl_cmd(action, name=None, systemd_scope=False):
def _systemctl_cmd(action, name=None, systemd_scope=False, no_block=False):
'''
Build a systemctl command line. Treat unit names without one
of the valid suffixes as a service.
@ -285,6 +285,8 @@ def _systemctl_cmd(action, name=None, systemd_scope=False):
and __salt__['config.get']('systemd.scope', True):
ret.extend(['systemd-run', '--scope'])
ret.append('systemctl')
if no_block:
ret.append('--no-block')
if isinstance(action, six.string_types):
action = shlex.split(action)
ret.extend(action)
@ -731,7 +733,7 @@ def masked(name, runtime=False):
return False
def start(name):
def start(name, no_block=False):
'''
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
@ -746,6 +748,11 @@ def start(name):
Start the specified service with systemd
no_block : False
Set to ``True`` to start the service using ``--no-block``.
.. versionadded:: Nitrogen
CLI Example:
.. code-block:: bash
@ -755,11 +762,11 @@ def start(name):
_check_for_unit_changes(name)
unmask(name)
return __salt__['cmd.retcode'](
_systemctl_cmd('start', name, systemd_scope=True),
_systemctl_cmd('start', name, systemd_scope=True, no_block=no_block),
python_shell=False) == 0
def stop(name):
def stop(name, no_block=False):
'''
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
@ -774,6 +781,11 @@ def stop(name):
Stop the specified service with systemd
no_block : False
Set to ``True`` to start the service using ``--no-block``.
.. versionadded:: Nitrogen
CLI Example:
.. code-block:: bash
@ -782,11 +794,11 @@ def stop(name):
'''
_check_for_unit_changes(name)
return __salt__['cmd.retcode'](
_systemctl_cmd('stop', name, systemd_scope=True),
_systemctl_cmd('stop', name, systemd_scope=True, no_block=no_block),
python_shell=False) == 0
def restart(name):
def restart(name, no_block=False):
'''
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
@ -801,6 +813,11 @@ def restart(name):
Restart the specified service with systemd
no_block : False
Set to ``True`` to start the service using ``--no-block``.
.. versionadded:: Nitrogen
CLI Example:
.. code-block:: bash
@ -810,11 +827,11 @@ def restart(name):
_check_for_unit_changes(name)
unmask(name)
return __salt__['cmd.retcode'](
_systemctl_cmd('restart', name, systemd_scope=True),
_systemctl_cmd('restart', name, systemd_scope=True, no_block=no_block),
python_shell=False) == 0
def reload_(name):
def reload_(name, no_block=False):
'''
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
@ -829,6 +846,11 @@ def reload_(name):
Reload the specified service with systemd
no_block : False
Set to ``True`` to start the service using ``--no-block``.
.. versionadded:: Nitrogen
CLI Example:
.. code-block:: bash
@ -838,11 +860,11 @@ def reload_(name):
_check_for_unit_changes(name)
unmask(name)
return __salt__['cmd.retcode'](
_systemctl_cmd('reload', name, systemd_scope=True),
_systemctl_cmd('reload', name, systemd_scope=True, no_block=no_block),
python_shell=False) == 0
def force_reload(name):
def force_reload(name, no_block=True):
'''
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
@ -859,6 +881,11 @@ def force_reload(name):
Force-reload the specified service with systemd
no_block : False
Set to ``True`` to start the service using ``--no-block``.
.. versionadded:: Nitrogen
CLI Example:
.. code-block:: bash
@ -868,7 +895,8 @@ def force_reload(name):
_check_for_unit_changes(name)
unmask(name)
return __salt__['cmd.retcode'](
_systemctl_cmd('force-reload', name, systemd_scope=True),
_systemctl_cmd('force-reload', name,
systemd_scope=True, no_block=no_block),
python_shell=False) == 0
@ -908,12 +936,18 @@ def enable(name, **kwargs): # pylint: disable=unused-argument
Enable the named service to start when the system boots
no_block : False
Set to ``True`` to start the service using ``--no-block``.
.. versionadded:: Nitrogen
CLI Example:
.. code-block:: bash
salt '*' service.enable <service name>
'''
no_block = kwargs.pop('no_block', False)
_check_for_unit_changes(name)
unmask(name)
if name in _get_sysv_services():
@ -930,7 +964,7 @@ def enable(name, **kwargs): # pylint: disable=unused-argument
python_shell=False,
ignore_retcode=True) == 0
return __salt__['cmd.retcode'](
_systemctl_cmd('enable', name, systemd_scope=True),
_systemctl_cmd('enable', name, systemd_scope=True, no_block=no_block),
python_shell=False,
ignore_retcode=True) == 0
@ -952,12 +986,18 @@ def disable(name, **kwargs): # pylint: disable=unused-argument
Disable the named service to not start when the system boots
no_block : False
Set to ``True`` to start the service using ``--no-block``.
.. versionadded:: Nitrogen
CLI Example:
.. code-block:: bash
salt '*' service.disable <service name>
'''
no_block = kwargs.pop('no_block', False)
_check_for_unit_changes(name)
if name in _get_sysv_services():
cmd = []
@ -973,9 +1013,9 @@ def disable(name, **kwargs): # pylint: disable=unused-argument
python_shell=False,
ignore_retcode=True) == 0
return __salt__['cmd.retcode'](
_systemctl_cmd('disable', name, systemd_scope=True),
_systemctl_cmd('disable', name, systemd_scope=True, no_block=no_block),
python_shell=False,
ignore_recode=True) == 0
ignore_retcode=True) == 0
# The unused kwargs argument is required to maintain consistency with the API

View File

@ -45,9 +45,13 @@ def list_():
'''
result = __salt__['cmd.run']('tuned-adm list').splitlines()
# Remove "Available profiles:"
result.pop(0)
# Remove "Current active profile:.*"
result.pop()
result = [i.lstrip('- ') for i in result]
# Output can be : " - <profile name> - <description>" (v2.7.1)
# or " - <profile name> " (v2.4.1)
result = [i.split('-')[1].strip() for i in result]
return result

View File

@ -38,8 +38,25 @@ Functions to interact with Hashicorp Vault.
Grains are also available, for example like this:
``my-policies/{grains[os]}``
Optional. If policies is not configured, saltstack/minions and
saltstack/{minion} are used as defaults.
If a template contains a grain which evaluates to a list, it will be
expanded into multiple policies. For example, given the template
``saltstack/by-role/{grains[roles]}``, and a minion having these grains:
.. code-block: yaml
grains:
roles:
- web
- database
The minion will have the policies ``saltstack/by-role/web`` and
``saltstack/by-role/database``. Note however that list members which do
not have simple string representations, such as dictionaries or objects,
do not work and will throw an exception. Strings and numbers are
examples of types which work well.
Optional. If policies is not configured, ``saltstack/minions`` and
``saltstack/{minion}`` are used as defaults.
Add this segment to the master configuration file, or

View File

@ -175,7 +175,7 @@ import salt.utils.vmware
import salt.utils.http
from salt.utils import dictupdate
from salt.exceptions import CommandExecutionError, VMwareSaltError
from salt.utils.decorators import depends
from salt.utils.decorators import depends, ignores_kwargs
from salt.utils import clean_kwargs
# Import Third Party Libs
@ -377,7 +377,7 @@ def disconnect(service_instance):
@depends(HAS_ESX_CLI)
def esxcli_cmd(cmd_str, host=None, username=None, password=None, protocol=None, port=None, esxi_hosts=None):
def esxcli_cmd(cmd_str, host=None, username=None, password=None, protocol=None, port=None, esxi_hosts=None, credstore=None):
'''
Run an ESXCLI command directly on the host or list of hosts.
@ -408,6 +408,9 @@ def esxcli_cmd(cmd_str, host=None, username=None, password=None, protocol=None,
If ``host`` is a vCenter host, then use esxi_hosts to execute this function
on a list of one or more ESXi machines.
credstore
Optionally set to path to the credential store file.
CLI Example:
.. code-block:: bash
@ -428,7 +431,7 @@ def esxcli_cmd(cmd_str, host=None, username=None, password=None, protocol=None,
for esxi_host in esxi_hosts:
response = salt.utils.vmware.esxcli(host, username, password, cmd_str,
protocol=protocol, port=port,
esxi_host=esxi_host)
esxi_host=esxi_host, credstore=credstore)
if response['retcode'] != 0:
ret.update({esxi_host: {'Error': response.get('stdout')}})
else:
@ -436,7 +439,8 @@ def esxcli_cmd(cmd_str, host=None, username=None, password=None, protocol=None,
else:
# Handles a single host or a vCenter connection when no esxi_hosts are provided.
response = salt.utils.vmware.esxcli(host, username, password, cmd_str,
protocol=protocol, port=port)
protocol=protocol, port=port,
credstore=credstore)
if response['retcode'] != 0:
ret.update({host: {'Error': response.get('stdout')}})
else:
@ -446,7 +450,7 @@ def esxcli_cmd(cmd_str, host=None, username=None, password=None, protocol=None,
@depends(HAS_ESX_CLI)
def get_coredump_network_config(host, username, password, protocol=None, port=None, esxi_hosts=None):
def get_coredump_network_config(host, username, password, protocol=None, port=None, esxi_hosts=None, credstore=None):
'''
Retrieve information on ESXi or vCenter network dump collection and
format it into a dictionary.
@ -472,6 +476,9 @@ def get_coredump_network_config(host, username, password, protocol=None, port=No
If ``host`` is a vCenter host, then use esxi_hosts to execute this function
on a list of one or more ESXi machines.
credstore
Optionally set to path to the credential store file.
:return: A dictionary with the network configuration, or, if getting
the network config failed, a an error message retrieved from the
standard cmd.run_all dictionary, per host.
@ -497,7 +504,7 @@ def get_coredump_network_config(host, username, password, protocol=None, port=No
for esxi_host in esxi_hosts:
response = salt.utils.vmware.esxcli(host, username, password, cmd,
protocol=protocol, port=port,
esxi_host=esxi_host)
esxi_host=esxi_host, credstore=credstore)
if response['retcode'] != 0:
ret.update({esxi_host: {'Error': response.get('stdout')}})
else:
@ -506,7 +513,8 @@ def get_coredump_network_config(host, username, password, protocol=None, port=No
else:
# Handles a single host or a vCenter connection when no esxi_hosts are provided.
response = salt.utils.vmware.esxcli(host, username, password, cmd,
protocol=protocol, port=port)
protocol=protocol, port=port,
credstore=credstore)
if response['retcode'] != 0:
ret.update({host: {'Error': response.get('stdout')}})
else:
@ -518,7 +526,7 @@ def get_coredump_network_config(host, username, password, protocol=None, port=No
@depends(HAS_ESX_CLI)
def coredump_network_enable(host, username, password, enabled, protocol=None, port=None, esxi_hosts=None):
def coredump_network_enable(host, username, password, enabled, protocol=None, port=None, esxi_hosts=None, credstore=None):
'''
Enable or disable ESXi core dump collection. Returns ``True`` if coredump is enabled
and returns ``False`` if core dump is not enabled. If there was an error, the error
@ -548,6 +556,9 @@ def coredump_network_enable(host, username, password, enabled, protocol=None, po
If ``host`` is a vCenter host, then use esxi_hosts to execute this function
on a list of one or more ESXi machines.
credstore
Optionally set to path to the credential store file.
CLI Example:
.. code-block:: bash
@ -573,7 +584,7 @@ def coredump_network_enable(host, username, password, enabled, protocol=None, po
for esxi_host in esxi_hosts:
response = salt.utils.vmware.esxcli(host, username, password, cmd,
protocol=protocol, port=port,
esxi_host=esxi_host)
esxi_host=esxi_host, credstore=credstore)
if response['retcode'] != 0:
ret.update({esxi_host: {'Error': response.get('stdout')}})
else:
@ -582,7 +593,8 @@ def coredump_network_enable(host, username, password, enabled, protocol=None, po
else:
# Handles a single host or a vCenter connection when no esxi_hosts are provided.
response = salt.utils.vmware.esxcli(host, username, password, cmd,
protocol=protocol, port=port)
protocol=protocol, port=port,
credstore=credstore)
if response['retcode'] != 0:
ret.update({host: {'Error': response.get('stdout')}})
else:
@ -600,7 +612,8 @@ def set_coredump_network_config(host,
port=None,
host_vnic='vmk0',
dump_port=6500,
esxi_hosts=None):
esxi_hosts=None,
credstore=None):
'''
Set the network parameters for a network coredump collection.
@ -637,6 +650,9 @@ def set_coredump_network_config(host,
dump_port
TCP port to use for the dump, defaults to ``6500``.
credstore
Optionally set to path to the credential store file.
:return: A standard cmd.run_all dictionary with a `success` key added, per host.
`success` will be True if the set succeeded, False otherwise.
@ -662,7 +678,7 @@ def set_coredump_network_config(host,
for esxi_host in esxi_hosts:
response = salt.utils.vmware.esxcli(host, username, password, cmd,
protocol=protocol, port=port,
esxi_host=esxi_host)
esxi_host=esxi_host, credstore=credstore)
if response['retcode'] != 0:
response['success'] = False
else:
@ -673,7 +689,8 @@ def set_coredump_network_config(host,
else:
# Handles a single host or a vCenter connection when no esxi_hosts are provided.
response = salt.utils.vmware.esxcli(host, username, password, cmd,
protocol=protocol, port=port)
protocol=protocol, port=port,
credstore=credstore)
if response['retcode'] != 0:
response['success'] = False
else:
@ -684,7 +701,7 @@ def set_coredump_network_config(host,
@depends(HAS_ESX_CLI)
def get_firewall_status(host, username, password, protocol=None, port=None, esxi_hosts=None):
def get_firewall_status(host, username, password, protocol=None, port=None, esxi_hosts=None, credstore=None):
'''
Show status of all firewall rule sets.
@ -709,6 +726,9 @@ def get_firewall_status(host, username, password, protocol=None, port=None, esxi
If ``host`` is a vCenter host, then use esxi_hosts to execute this function
on a list of one or more ESXi machines.
credstore
Optionally set to path to the credential store file.
:return: Nested dictionary with two toplevel keys ``rulesets`` and ``success``
``success`` will be True or False depending on query success
``rulesets`` will list the rulesets and their statuses if ``success``
@ -735,7 +755,7 @@ def get_firewall_status(host, username, password, protocol=None, port=None, esxi
for esxi_host in esxi_hosts:
response = salt.utils.vmware.esxcli(host, username, password, cmd,
protocol=protocol, port=port,
esxi_host=esxi_host)
esxi_host=esxi_host, credstore=credstore)
if response['retcode'] != 0:
ret.update({esxi_host: {'Error': response['stdout'],
'success': False,
@ -746,7 +766,8 @@ def get_firewall_status(host, username, password, protocol=None, port=None, esxi
else:
# Handles a single host or a vCenter connection when no esxi_hosts are provided.
response = salt.utils.vmware.esxcli(host, username, password, cmd,
protocol=protocol, port=port)
protocol=protocol, port=port,
credstore=credstore)
if response['retcode'] != 0:
ret.update({host: {'Error': response['stdout'],
'success': False,
@ -766,7 +787,8 @@ def enable_firewall_ruleset(host,
ruleset_name,
protocol=None,
port=None,
esxi_hosts=None):
esxi_hosts=None,
credstore=None):
'''
Enable or disable an ESXi firewall rule set.
@ -797,6 +819,9 @@ def enable_firewall_ruleset(host,
If ``host`` is a vCenter host, then use esxi_hosts to execute this function
on a list of one or more ESXi machines.
credstore
Optionally set to path to the credential store file.
:return: A standard cmd.run_all dictionary, per host.
CLI Example:
@ -822,19 +847,20 @@ def enable_firewall_ruleset(host,
for esxi_host in esxi_hosts:
response = salt.utils.vmware.esxcli(host, username, password, cmd,
protocol=protocol, port=port,
esxi_host=esxi_host)
esxi_host=esxi_host, credstore=credstore)
ret.update({esxi_host: response})
else:
# Handles a single host or a vCenter connection when no esxi_hosts are provided.
response = salt.utils.vmware.esxcli(host, username, password, cmd,
protocol=protocol, port=port)
protocol=protocol, port=port,
credstore=credstore)
ret.update({host: response})
return ret
@depends(HAS_ESX_CLI)
def syslog_service_reload(host, username, password, protocol=None, port=None, esxi_hosts=None):
def syslog_service_reload(host, username, password, protocol=None, port=None, esxi_hosts=None, credstore=None):
'''
Reload the syslog service so it will pick up any changes.
@ -859,6 +885,9 @@ def syslog_service_reload(host, username, password, protocol=None, port=None, es
If ``host`` is a vCenter host, then use esxi_hosts to execute this function
on a list of one or more ESXi machines.
credstore
Optionally set to path to the credential store file.
:return: A standard cmd.run_all dictionary. This dictionary will at least
have a `retcode` key. If `retcode` is 0 the command was successful.
@ -883,12 +912,13 @@ def syslog_service_reload(host, username, password, protocol=None, port=None, es
for esxi_host in esxi_hosts:
response = salt.utils.vmware.esxcli(host, username, password, cmd,
protocol=protocol, port=port,
esxi_host=esxi_host)
esxi_host=esxi_host, credstore=credstore)
ret.update({esxi_host: response})
else:
# Handles a single host or a vCenter connection when no esxi_hosts are provided.
response = salt.utils.vmware.esxcli(host, username, password, cmd,
protocol=protocol, port=port)
protocol=protocol, port=port,
credstore=credstore)
ret.update({host: response})
return ret
@ -904,7 +934,8 @@ def set_syslog_config(host,
port=None,
firewall=True,
reset_service=True,
esxi_hosts=None):
esxi_hosts=None,
credstore=None):
'''
Set the specified syslog configuration parameter. By default, this function will
reset the syslog service after the configuration is set.
@ -950,6 +981,9 @@ def set_syslog_config(host,
If ``host`` is a vCenter host, then use esxi_hosts to execute this function
on a list of one or more ESXi machines.
credstore
Optionally set to path to the credential store file.
:return: Dictionary with a top-level key of 'success' which indicates
if all the parameters were reset, and individual keys
for each parameter indicating which succeeded or failed, per host.
@ -980,7 +1014,7 @@ def set_syslog_config(host,
response = enable_firewall_ruleset(host, username, password,
ruleset_enable=True, ruleset_name='syslog',
protocol=protocol, port=port,
esxi_hosts=[esxi_host]).get(esxi_host)
esxi_hosts=[esxi_host], credstore=credstore).get(esxi_host)
if response['retcode'] != 0:
ret.update({esxi_host: {'enable_firewall': {'message': response['stdout'],
'success': False}}})
@ -990,7 +1024,8 @@ def set_syslog_config(host,
# Handles a single host or a vCenter connection when no esxi_hosts are provided.
response = enable_firewall_ruleset(host, username, password,
ruleset_enable=True, ruleset_name='syslog',
protocol=protocol, port=port).get(host)
protocol=protocol, port=port,
credstore=credstore).get(host)
if response['retcode'] != 0:
ret.update({host: {'enable_firewall': {'message': response['stdout'],
'success': False}}})
@ -1005,7 +1040,8 @@ def set_syslog_config(host,
for esxi_host in esxi_hosts:
response = _set_syslog_config_helper(host, username, password, syslog_config,
config_value, protocol=protocol, port=port,
reset_service=reset_service, esxi_host=esxi_host)
reset_service=reset_service, esxi_host=esxi_host,
credstore=credstore)
# Ensure we don't overwrite any dictionary data already set
# By updating the esxi_host directly.
if ret.get(esxi_host) is None:
@ -1016,7 +1052,7 @@ def set_syslog_config(host,
# Handles a single host or a vCenter connection when no esxi_hosts are provided.
response = _set_syslog_config_helper(host, username, password, syslog_config,
config_value, protocol=protocol, port=port,
reset_service=reset_service)
reset_service=reset_service, credstore=credstore)
# Ensure we don't overwrite any dictionary data already set
# By updating the host directly.
if ret.get(host) is None:
@ -1027,7 +1063,7 @@ def set_syslog_config(host,
@depends(HAS_ESX_CLI)
def get_syslog_config(host, username, password, protocol=None, port=None, esxi_hosts=None):
def get_syslog_config(host, username, password, protocol=None, port=None, esxi_hosts=None, credstore=None):
'''
Retrieve the syslog configuration.
@ -1052,6 +1088,9 @@ def get_syslog_config(host, username, password, protocol=None, port=None, esxi_h
If ``host`` is a vCenter host, then use esxi_hosts to execute this function
on a list of one or more ESXi machines.
credstore
Optionally set to path to the credential store file.
:return: Dictionary with keys and values corresponding to the
syslog configuration, per host.
@ -1076,13 +1115,14 @@ def get_syslog_config(host, username, password, protocol=None, port=None, esxi_h
for esxi_host in esxi_hosts:
response = salt.utils.vmware.esxcli(host, username, password, cmd,
protocol=protocol, port=port,
esxi_host=esxi_host)
esxi_host=esxi_host, credstore=credstore)
# format the response stdout into something useful
ret.update({esxi_host: _format_syslog_config(response)})
else:
# Handles a single host or a vCenter connection when no esxi_hosts are provided.
response = salt.utils.vmware.esxcli(host, username, password, cmd,
protocol=protocol, port=port)
protocol=protocol, port=port,
credstore=credstore)
# format the response stdout into something useful
ret.update({host: _format_syslog_config(response)})
@ -1096,7 +1136,8 @@ def reset_syslog_config(host,
protocol=None,
port=None,
syslog_config=None,
esxi_hosts=None):
esxi_hosts=None,
credstore=None):
'''
Reset the syslog service to its default settings.
@ -1129,6 +1170,9 @@ def reset_syslog_config(host,
If ``host`` is a vCenter host, then use esxi_hosts to execute this function
on a list of one or more ESXi machines.
credstore
Optionally set to path to the credential store file.
:return: Dictionary with a top-level key of 'success' which indicates
if all the parameters were reset, and individual keys
for each parameter indicating which succeeded or failed, per host.
@ -1170,18 +1214,20 @@ def reset_syslog_config(host,
response_dict = _reset_syslog_config_params(host, username, password,
cmd, resets, valid_resets,
protocol=protocol, port=port,
esxi_host=esxi_host)
esxi_host=esxi_host, credstore=credstore)
ret.update({esxi_host: response_dict})
else:
# Handles a single host or a vCenter connection when no esxi_hosts are provided.
response_dict = _reset_syslog_config_params(host, username, password,
cmd, resets, valid_resets,
protocol=protocol, port=port)
protocol=protocol, port=port,
credstore=credstore)
ret.update({host: response_dict})
return ret
@ignores_kwargs('credstore')
def upload_ssh_key(host, username, password, ssh_key=None, ssh_key_file=None,
protocol=None, port=None, certificate_verify=False):
'''
@ -1253,6 +1299,7 @@ def upload_ssh_key(host, username, password, ssh_key=None, ssh_key_file=None,
return ret
@ignores_kwargs('credstore')
def get_ssh_key(host,
username,
password,
@ -1310,6 +1357,7 @@ def get_ssh_key(host,
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def get_host_datetime(host, username, password, protocol=None, port=None, host_names=None):
'''
Get the date/time information for a given host or list of host_names.
@ -1368,6 +1416,7 @@ def get_host_datetime(host, username, password, protocol=None, port=None, host_n
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def get_ntp_config(host, username, password, protocol=None, port=None, host_names=None):
'''
Get the NTP configuration information for a given host or list of host_names.
@ -1425,6 +1474,7 @@ def get_ntp_config(host, username, password, protocol=None, port=None, host_name
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def get_service_policy(host, username, password, service_name, protocol=None, port=None, host_names=None):
'''
Get the service name's policy for a given host or list of hosts.
@ -1531,6 +1581,7 @@ def get_service_policy(host, username, password, service_name, protocol=None, po
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def get_service_running(host, username, password, service_name, protocol=None, port=None, host_names=None):
'''
Get the service name's running state for a given host or list of hosts.
@ -1637,6 +1688,7 @@ def get_service_running(host, username, password, service_name, protocol=None, p
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def get_vmotion_enabled(host, username, password, protocol=None, port=None, host_names=None):
'''
Get the VMotion enabled status for a given host or a list of host_names. Returns ``True``
@ -1698,6 +1750,7 @@ def get_vmotion_enabled(host, username, password, protocol=None, port=None, host
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def get_vsan_enabled(host, username, password, protocol=None, port=None, host_names=None):
'''
Get the VSAN enabled status for a given host or a list of host_names. Returns ``True``
@ -1764,6 +1817,7 @@ def get_vsan_enabled(host, username, password, protocol=None, port=None, host_na
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def get_vsan_eligible_disks(host, username, password, protocol=None, port=None, host_names=None):
'''
Returns a list of VSAN-eligible disks for a given host or list of host_names.
@ -1859,6 +1913,7 @@ def test_vcenter_connection(service_instance=None):
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def system_info(host, username, password, protocol=None, port=None):
'''
Return system information about a VMware environment.
@ -1899,6 +1954,7 @@ def system_info(host, username, password, protocol=None, port=None):
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def list_datacenters(host, username, password, protocol=None, port=None):
'''
Returns a list of datacenters for the the specified host.
@ -1936,6 +1992,7 @@ def list_datacenters(host, username, password, protocol=None, port=None):
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def list_clusters(host, username, password, protocol=None, port=None):
'''
Returns a list of clusters for the the specified host.
@ -1973,6 +2030,7 @@ def list_clusters(host, username, password, protocol=None, port=None):
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def list_datastore_clusters(host, username, password, protocol=None, port=None):
'''
Returns a list of datastore clusters for the the specified host.
@ -2009,6 +2067,7 @@ def list_datastore_clusters(host, username, password, protocol=None, port=None):
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def list_datastores(host, username, password, protocol=None, port=None):
'''
Returns a list of datastores for the the specified host.
@ -2045,6 +2104,7 @@ def list_datastores(host, username, password, protocol=None, port=None):
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def list_hosts(host, username, password, protocol=None, port=None):
'''
Returns a list of hosts for the the specified VMware environment.
@ -2081,6 +2141,7 @@ def list_hosts(host, username, password, protocol=None, port=None):
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def list_resourcepools(host, username, password, protocol=None, port=None):
'''
Returns a list of resource pools for the the specified host.
@ -2117,6 +2178,7 @@ def list_resourcepools(host, username, password, protocol=None, port=None):
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def list_networks(host, username, password, protocol=None, port=None):
'''
Returns a list of networks for the the specified host.
@ -2153,6 +2215,7 @@ def list_networks(host, username, password, protocol=None, port=None):
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def list_vms(host, username, password, protocol=None, port=None):
'''
Returns a list of VMs for the the specified host.
@ -2189,6 +2252,7 @@ def list_vms(host, username, password, protocol=None, port=None):
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def list_folders(host, username, password, protocol=None, port=None):
'''
Returns a list of folders for the the specified host.
@ -2225,6 +2289,7 @@ def list_folders(host, username, password, protocol=None, port=None):
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def list_dvs(host, username, password, protocol=None, port=None):
'''
Returns a list of distributed virtual switches for the the specified host.
@ -2261,6 +2326,7 @@ def list_dvs(host, username, password, protocol=None, port=None):
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def list_vapps(host, username, password, protocol=None, port=None):
'''
Returns a list of vApps for the the specified host.
@ -2298,6 +2364,7 @@ def list_vapps(host, username, password, protocol=None, port=None):
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def list_ssds(host, username, password, protocol=None, port=None, host_names=None):
'''
Returns a list of SSDs for the given host or list of host_names.
@ -2358,6 +2425,7 @@ def list_ssds(host, username, password, protocol=None, port=None, host_names=Non
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def list_non_ssds(host, username, password, protocol=None, port=None, host_names=None):
'''
Returns a list of Non-SSD disks for the given host or list of host_names.
@ -2425,6 +2493,7 @@ def list_non_ssds(host, username, password, protocol=None, port=None, host_names
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def set_ntp_config(host, username, password, ntp_servers, protocol=None, port=None, host_names=None):
'''
Set NTP configuration for a given host of list of host_names.
@ -2504,6 +2573,7 @@ def set_ntp_config(host, username, password, ntp_servers, protocol=None, port=No
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def service_start(host,
username,
password,
@ -2614,6 +2684,7 @@ def service_start(host,
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def service_stop(host,
username,
password,
@ -2724,6 +2795,7 @@ def service_stop(host,
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def service_restart(host,
username,
password,
@ -2834,6 +2906,7 @@ def service_restart(host,
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def set_service_policy(host,
username,
password,
@ -2962,6 +3035,7 @@ def set_service_policy(host,
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def update_host_datetime(host, username, password, protocol=None, port=None, host_names=None):
'''
Update the date/time on the given host or list of host_names. This function should be
@ -3028,6 +3102,7 @@ def update_host_datetime(host, username, password, protocol=None, port=None, hos
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def update_host_password(host, username, password, new_password, protocol=None, port=None):
'''
Update the password for a given host.
@ -3090,6 +3165,7 @@ def update_host_password(host, username, password, new_password, protocol=None,
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def vmotion_disable(host, username, password, protocol=None, port=None, host_names=None):
'''
Disable vMotion for a given host or list of host_names.
@ -3158,6 +3234,7 @@ def vmotion_disable(host, username, password, protocol=None, port=None, host_nam
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def vmotion_enable(host, username, password, protocol=None, port=None, host_names=None, device='vmk0'):
'''
Enable vMotion for a given host or list of host_names.
@ -3230,6 +3307,7 @@ def vmotion_enable(host, username, password, protocol=None, port=None, host_name
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def vsan_add_disks(host, username, password, protocol=None, port=None, host_names=None):
'''
Add any VSAN-eligible disks to the VSAN System for the given host or list of host_names.
@ -3333,6 +3411,7 @@ def vsan_add_disks(host, username, password, protocol=None, port=None, host_name
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def vsan_disable(host, username, password, protocol=None, port=None, host_names=None):
'''
Disable VSAN for a given host or list of host_names.
@ -3417,6 +3496,7 @@ def vsan_disable(host, username, password, protocol=None, port=None, host_names=
@depends(HAS_PYVMOMI)
@ignores_kwargs('credstore')
def vsan_enable(host, username, password, protocol=None, port=None, host_names=None):
'''
Enable VSAN for a given host or list of host_names.
@ -3724,7 +3804,7 @@ def _get_vsan_eligible_disks(service_instance, host, host_names):
def _reset_syslog_config_params(host, username, password, cmd, resets, valid_resets,
protocol=None, port=None, esxi_host=None):
protocol=None, port=None, esxi_host=None, credstore=None):
'''
Helper function for reset_syslog_config that resets the config and populates the return dictionary.
'''
@ -3738,7 +3818,7 @@ def _reset_syslog_config_params(host, username, password, cmd, resets, valid_res
if reset_param in valid_resets:
ret = salt.utils.vmware.esxcli(host, username, password, cmd + reset_param,
protocol=protocol, port=port,
esxi_host=esxi_host)
esxi_host=esxi_host, credstore=credstore)
ret_dict[reset_param] = {}
ret_dict[reset_param]['success'] = ret['retcode'] == 0
if ret['retcode'] != 0:
@ -3757,7 +3837,7 @@ def _reset_syslog_config_params(host, username, password, cmd, resets, valid_res
def _set_syslog_config_helper(host, username, password, syslog_config, config_value,
protocol=None, port=None, reset_service=None, esxi_host=None):
protocol=None, port=None, reset_service=None, esxi_host=None, credstore=None):
'''
Helper function for set_syslog_config that sets the config and populates the return dictionary.
'''
@ -3773,7 +3853,7 @@ def _set_syslog_config_helper(host, username, password, syslog_config, config_va
response = salt.utils.vmware.esxcli(host, username, password, cmd,
protocol=protocol, port=port,
esxi_host=esxi_host)
esxi_host=esxi_host, credstore=credstore)
# Update the return dictionary for success or error messages.
if response['retcode'] != 0:
@ -3791,12 +3871,13 @@ def _set_syslog_config_helper(host, username, password, syslog_config, config_va
host_name = host
response = syslog_service_reload(host, username, password,
protocol=protocol, port=port,
esxi_hosts=esxi_host).get(host_name)
esxi_hosts=esxi_host, credstore=credstore).get(host_name)
ret_dict.update({'syslog_restart': {'success': response['retcode'] == 0}})
return ret_dict
@ignores_kwargs('credstore')
def add_host_to_dvs(host, username, password, vmknic_name, vmnic_name,
dvs_name, target_portgroup_name, uplink_portgroup_name,
protocol=None, port=None, host_names=None):

View File

@ -1,11 +1,12 @@
# -*- coding: utf-8 -*-
'''
Manage information about files on the minion, set/read user, group
data
data, modify the ACL of files/directories
:depends: - win32api
- win32file
- win32security
- win32con
- salt.utils.win_dacl
'''
from __future__ import absolute_import
@ -40,17 +41,6 @@ import salt.utils.atomicfile # do not remove, used in imported file.py function
from salt.exceptions import CommandExecutionError, SaltInvocationError
# pylint: enable=W0611
# Import third party libs
try:
import win32api
import win32file
import win32security
import win32con
from pywintypes import error as pywinerror
HAS_WINDOWS_MODULES = True
except ImportError:
HAS_WINDOWS_MODULES = False
# Import salt libs
import salt.utils
from salt.modules.file import (check_hash, # pylint: disable=W0611
@ -68,6 +58,14 @@ from salt.modules.file import (check_hash, # pylint: disable=W0611
from salt.utils import namespaced_function as _namespaced_function
HAS_WINDOWS_MODULES = False
if salt.utils.is_windows():
import win32api
import win32file
import win32con
from pywintypes import error as pywinerror
HAS_WINDOWS_MODULES = True
HAS_WIN_DACL = False
if salt.utils.is_windows():
import salt.utils.win_dacl
@ -195,92 +193,6 @@ def _resolve_symlink(path, max_depth=64):
return path
def _change_privilege_state(privilege_name, enable):
'''
Change the state, either enable or disable, of the named privilege for this
process.
If the change fails, an exception will be raised. If successful, it returns
True.
'''
log.debug(
'{0} the privilege {1} for this process.'.format(
'Enabling' if enable else 'Disabling',
privilege_name
)
)
# this is a pseudo-handle that doesn't need to be closed
hProc = win32api.GetCurrentProcess()
hToken = None
try:
hToken = win32security.OpenProcessToken(
hProc,
win32security.TOKEN_QUERY | win32security.TOKEN_ADJUST_PRIVILEGES
)
privilege = win32security.LookupPrivilegeValue(None, privilege_name)
if enable:
privilege_attrs = win32security.SE_PRIVILEGE_ENABLED
else:
# a value of 0 disables a privilege (there's no constant for it)
privilege_attrs = 0
# check that the handle has the requested privilege
token_privileges = dict(win32security.GetTokenInformation(
hToken, win32security.TokenPrivileges))
if privilege not in token_privileges:
if enable:
raise SaltInvocationError(
'The requested privilege {0} is not available for this '
'process (check Salt user privileges).'.format(privilege_name))
else: # disable a privilege this process does not have
log.debug(
'Cannot disable privilege {0} because this process '
'does not have that privilege.'.format(privilege_name)
)
return True
else:
# check if the privilege is already in the requested state
if token_privileges[privilege] == privilege_attrs:
log.debug(
'The requested privilege {0} is already in the '
'requested state.'.format(privilege_name)
)
return True
changes = win32security.AdjustTokenPrivileges(
hToken,
False,
[(privilege, privilege_attrs)]
)
finally:
if hToken:
win32api.CloseHandle(hToken)
if not bool(changes):
raise SaltInvocationError(
'Could not {0} the {1} privilege for this process'.format(
'enable' if enable else 'remove',
privilege_name
)
)
else:
return True
def _enable_privilege(privilege_name):
'''
Enables the named privilege for this process.
'''
return _change_privilege_state(privilege_name, True)
def _disable_privilege(privilege_name):
'''
Disables the named privilege for this process.
'''
return _change_privilege_state(privilege_name, False)
def gid_to_group(gid):
'''
Convert the group id to the group name on this system
@ -293,6 +205,12 @@ def gid_to_group(gid):
instead; an info level log entry will be generated if this function is used
directly.
Args:
gid (str): The gid of the group
Returns:
str: The name of the group
CLI Example:
.. code-block:: bash
@ -319,6 +237,12 @@ def group_to_gid(group):
instead; an info level log entry will be generated if this function is used
directly.
Args:
group (str): The name of the group
Returns:
str: The gid of the group
CLI Example:
.. code-block:: bash
@ -330,7 +254,10 @@ def group_to_gid(group):
log.info('The function {0} should not be used on Windows systems; '
'see function docs for details.'.format(func_name))
return _user_to_uid(group)
if group is None:
return ''
return salt.utils.win_dacl.get_sid_string(group)
def get_pgid(path, follow_symlinks=True):
@ -344,6 +271,16 @@ def get_pgid(path, follow_symlinks=True):
Ensure you know what you are doing before using this function.
Args:
path (str): The path to the file or directory
follow_symlinks (bool):
If the object specified by ``path`` is a symlink, get attributes of
the linked file instead of the symlink itself. Default is True
Returns:
str: The gid of the primary group
CLI Example:
.. code-block:: bash
@ -351,7 +288,7 @@ def get_pgid(path, follow_symlinks=True):
salt '*' file.get_pgid c:\\temp\\test.txt
'''
if not os.path.exists(path):
return False
raise CommandExecutionError('Path not found: {0}'.format(path))
# Under Windows, if the path is a symlink, the user that owns the symlink is
# returned, not the user that owns the file/directory the symlink is
@ -361,25 +298,8 @@ def get_pgid(path, follow_symlinks=True):
if follow_symlinks and sys.getwindowsversion().major >= 6:
path = _resolve_symlink(path)
try:
secdesc = win32security.GetFileSecurity(
path, win32security.GROUP_SECURITY_INFORMATION
)
# Not all filesystems mountable within windows
# have SecurityDescriptor's. For instance, some mounted
# SAMBA shares, or VirtualBox's shared folders. If we
# can't load a file descriptor for the file, we default
# to "Everyone" - http://support.microsoft.com/kb/243330
except MemoryError:
# generic memory error (win2k3+)
return 'S-1-1-0'
except pywinerror as exc:
# Incorrect function error (win2k8+)
if exc.winerror == 1 or exc.winerror == 50:
return 'S-1-1-0'
raise
group_sid = secdesc.GetSecurityDescriptorGroup()
return win32security.ConvertSidToStringSid(group_sid)
group_name = salt.utils.win_dacl.get_primary_group(path)
return salt.utils.win_dacl.get_sid_string(group_name)
def get_pgroup(path, follow_symlinks=True):
@ -398,6 +318,16 @@ def get_pgroup(path, follow_symlinks=True):
means no value was returned. To be certain, use the `get_pgid` function
which will return the SID, including for the system 'None' group.
Args:
path (str): The path to the file or directory
follow_symlinks (bool):
If the object specified by ``path`` is a symlink, get attributes of
the linked file instead of the symlink itself. Default is True
Returns:
str: The name of the primary group
CLI Example:
.. code-block:: bash
@ -426,6 +356,16 @@ def get_gid(path, follow_symlinks=True):
If you do actually want to access the 'primary group' of a file, use
`file.get_pgid`.
Args:
path (str): The path to the file or directory
follow_symlinks (bool):
If the object specified by ``path`` is a symlink, get attributes of
the linked file instead of the symlink itself. Default is True
Returns:
str: The gid of the owner
CLI Example:
.. code-block:: bash
@ -460,6 +400,16 @@ def get_group(path, follow_symlinks=True):
If you do actually want to access the 'primary group' of a file, use
`file.get_pgroup`.
Args:
path (str): The path to the file or directory
follow_symlinks (bool):
If the object specified by ``path`` is a symlink, get attributes of
the linked file instead of the symlink itself. Default is True
Returns:
str: The name of the owner
CLI Example:
.. code-block:: bash
@ -479,6 +429,12 @@ def uid_to_user(uid):
'''
Convert a uid to a user name
Args:
uid (str): The user id to lookup
Returns:
str: The name of the user
CLI Example:
.. code-block:: bash
@ -488,24 +444,19 @@ def uid_to_user(uid):
if uid is None or uid == '':
return ''
sid = win32security.GetBinarySid(uid)
try:
name, domain, account_type = win32security.LookupAccountSid(None, sid)
return name
except pywinerror as exc:
# if user does not exist...
# 1332 = No mapping between account names and security IDs was carried
# out.
if exc.winerror == 1332:
return ''
else:
raise
return salt.utils.win_dacl.get_name(uid)
def user_to_uid(user):
'''
Convert user name to a uid
Args:
user (str): The user to lookup
Returns:
str: The user id of the user
CLI Example:
.. code-block:: bash
@ -514,28 +465,8 @@ def user_to_uid(user):
'''
if user is None:
user = salt.utils.get_user()
return _user_to_uid(user)
def _user_to_uid(user):
'''
Convert user name to a uid
'''
if user is None or user == '':
return ''
try:
sid, domain, account_type = win32security.LookupAccountName(None, user)
except pywinerror as exc:
# if user does not exist...
# 1332 = No mapping between account names and security IDs was carried
# out.
if exc.winerror == 1332:
return ''
else:
raise
return win32security.ConvertSidToStringSid(sid)
return salt.utils.win_dacl.get_sid_string(user)
def get_uid(path, follow_symlinks=True):
@ -545,6 +476,17 @@ def get_uid(path, follow_symlinks=True):
Symlinks are followed by default to mimic Unix behavior. Specify
`follow_symlinks=False` to turn off this behavior.
Args:
path (str): The path to the file or directory
follow_symlinks (bool):
If the object specified by ``path`` is a symlink, get attributes of
the linked file instead of the symlink itself. Default is True
Returns:
str: The uid of the owner
CLI Example:
.. code-block:: bash
@ -553,7 +495,7 @@ def get_uid(path, follow_symlinks=True):
salt '*' file.get_uid c:\\temp\\test.txt follow_symlinks=False
'''
if not os.path.exists(path):
return False
raise CommandExecutionError('Path not found: {0}'.format(path))
# Under Windows, if the path is a symlink, the user that owns the symlink is
# returned, not the user that owns the file/directory the symlink is
@ -562,20 +504,9 @@ def get_uid(path, follow_symlinks=True):
# supported on Windows Vista or later.
if follow_symlinks and sys.getwindowsversion().major >= 6:
path = _resolve_symlink(path)
try:
secdesc = win32security.GetFileSecurity(
path, win32security.OWNER_SECURITY_INFORMATION
)
except MemoryError:
# generic memory error (win2k3+)
return 'S-1-1-0'
except pywinerror as exc:
# Incorrect function error (win2k8+)
if exc.winerror == 1 or exc.winerror == 50:
return 'S-1-1-0'
raise
owner_sid = secdesc.GetSecurityDescriptorOwner()
return win32security.ConvertSidToStringSid(owner_sid)
owner_sid = salt.utils.win_dacl.get_owner(path)
return salt.utils.win_dacl.get_sid_string(owner_sid)
def get_user(path, follow_symlinks=True):
@ -585,6 +516,17 @@ def get_user(path, follow_symlinks=True):
Symlinks are followed by default to mimic Unix behavior. Specify
`follow_symlinks=False` to turn off this behavior.
Args:
path (str): The path to the file or directory
follow_symlinks (bool):
If the object specified by ``path`` is a symlink, get attributes of
the linked file instead of the symlink itself. Default is True
Returns:
str: The name of the owner
CLI Example:
.. code-block:: bash
@ -592,7 +534,18 @@ def get_user(path, follow_symlinks=True):
salt '*' file.get_user c:\\temp\\test.txt
salt '*' file.get_user c:\\temp\\test.txt follow_symlinks=False
'''
return uid_to_user(get_uid(path, follow_symlinks))
if not os.path.exists(path):
raise CommandExecutionError('Path not found: {0}'.format(path))
# Under Windows, if the path is a symlink, the user that owns the symlink is
# returned, not the user that owns the file/directory the symlink is
# pointing to. This behavior is *different* to *nix, therefore the symlink
# is first resolved manually if necessary. Remember symlinks are only
# supported on Windows Vista or later.
if follow_symlinks and sys.getwindowsversion().major >= 6:
path = _resolve_symlink(path)
return salt.utils.win_dacl.get_owner(path)
def get_mode(path):
@ -602,6 +555,12 @@ def get_mode(path):
Right now we're just returning None because Windows' doesn't have a mode
like Linux
Args:
path (str): The path to the file or directory
Returns:
None
CLI Example:
.. code-block:: bash
@ -609,7 +568,7 @@ def get_mode(path):
salt '*' file.get_mode /etc/passwd
'''
if not os.path.exists(path):
return ''
raise CommandExecutionError('Path not found: {0}'.format(path))
func_name = '{0}.get_mode'.format(__virtualname__)
if __opts__.get('fun', '') == func_name:
@ -639,6 +598,15 @@ def lchown(path, user, group=None, pgroup=None):
Otherwise Salt will interpret it as the Python value of None and no primary
group changes will occur. See the example below.
Args:
path (str): The path to the file or directory
user (str): The name of the user to own the file
group (str): The group (not used)
pgroup (str): The primary group to assign
Returns:
bool: True if successful, otherwise error
CLI Example:
.. code-block:: bash
@ -650,13 +618,11 @@ def lchown(path, user, group=None, pgroup=None):
if group:
func_name = '{0}.lchown'.format(__virtualname__)
if __opts__.get('fun', '') == func_name:
log.info('The group parameter has no effect when using {0} on Windows '
'systems; see function docs for details.'.format(func_name))
log.debug(
'win_file.py {0} Ignoring the group parameter for {1}'.format(
func_name, path
)
)
log.info('The group parameter has no effect when using {0} on '
'Windows systems; see function docs for details.'
''.format(func_name))
log.debug('win_file.py {0} Ignoring the group parameter for {1}'
''.format(func_name, path))
group = None
return chown(path, user, group, pgroup, follow_symlinks=False)
@ -676,9 +642,17 @@ def chown(path, user, group=None, pgroup=None, follow_symlinks=True):
If you do want to change the 'primary group' property and understand the
implications, pass the Windows only parameter, pgroup, instead.
To set the primary group to 'None', it must be specified in quotes.
Otherwise Salt will interpret it as the Python value of None and no primary
group changes will occur. See the example below.
Args:
path (str): The path to the file or directory
user (str): The name of the user to own the file
group (str): The group (not used)
pgroup (str): The primary group to assign
follow_symlinks (bool):
If the object specified by ``path`` is a symlink, get attributes of
the linked file instead of the symlink itself. Default is True
Returns:
bool: True if successful, otherwise error
CLI Example:
@ -689,72 +663,26 @@ def chown(path, user, group=None, pgroup=None, follow_symlinks=True):
salt '*' file.chown c:\\temp\\test.txt myusername "pgroup='None'"
'''
# the group parameter is not used; only provided for API compatibility
if group:
if group is not None:
func_name = '{0}.chown'.format(__virtualname__)
if __opts__.get('fun', '') == func_name:
log.info('The group parameter has no effect when using {0} on Windows '
'systems; see function docs for details.'.format(func_name))
log.debug(
'win_file.py {0} Ignoring the group parameter for {1}'.format(
func_name, path
)
)
group = None
err = ''
# get SID object for user
try:
userSID, domainName, objectType = win32security.LookupAccountName(None, user)
except pywinerror:
err += 'User does not exist\n'
if pgroup:
# get SID object for group
try:
groupSID, domainName, objectType = win32security.LookupAccountName(None, pgroup)
except pywinerror:
err += 'Group does not exist\n'
else:
groupSID = None
if not os.path.exists(path):
err += 'File not found'
if err:
return err
log.info('The group parameter has no effect when using {0} on '
'Windows systems; see function docs for details.'
''.format(func_name))
log.debug('win_file.py {0} Ignoring the group parameter for {1}'
''.format(func_name, path))
if follow_symlinks and sys.getwindowsversion().major >= 6:
path = _resolve_symlink(path)
privilege_enabled = False
try:
privilege_enabled = _enable_privilege(win32security.SE_RESTORE_NAME)
if pgroup:
# set owner and group
win32security.SetNamedSecurityInfo(
path,
win32security.SE_FILE_OBJECT,
win32security.OWNER_SECURITY_INFORMATION + win32security.GROUP_SECURITY_INFORMATION,
userSID,
groupSID,
None,
None
)
else:
# set owner only
win32security.SetNamedSecurityInfo(
path,
win32security.SE_FILE_OBJECT,
win32security.OWNER_SECURITY_INFORMATION,
userSID,
None,
None,
None
)
finally:
if privilege_enabled:
_disable_privilege(win32security.SE_RESTORE_NAME)
if not os.path.exists(path):
raise CommandExecutionError('Path not found: {0}'.format(path))
return None
salt.utils.win_dacl.set_owner(path, user)
if pgroup:
salt.utils.win_dacl.set_primary_group(path, pgroup)
return True
def chpgrp(path, group):
@ -768,9 +696,12 @@ def chpgrp(path, group):
Ensure you know what you are doing before using this function.
To set the primary group to 'None', it must be specified in quotes.
Otherwise Salt will interpret it as the Python value of None and no primary
group changes will occur. See the example below.
Args:
path (str): The path to the file or directory
pgroup (str): The primary group to assign
Returns:
bool: True if successful, otherwise error
CLI Example:
@ -779,42 +710,7 @@ def chpgrp(path, group):
salt '*' file.chpgrp c:\\temp\\test.txt Administrators
salt '*' file.chpgrp c:\\temp\\test.txt "'None'"
'''
if group is None:
raise SaltInvocationError("The group value was specified as None and "
"is invalid. If you mean the built-in None "
"group, specify the group in lowercase, e.g. "
"'none'.")
err = ''
# get SID object for group
try:
groupSID, domainName, objectType = win32security.LookupAccountName(None, group)
except pywinerror:
err += 'Group does not exist\n'
if not os.path.exists(path):
err += 'File not found\n'
if err:
return err
# set group
privilege_enabled = False
try:
privilege_enabled = _enable_privilege(win32security.SE_RESTORE_NAME)
win32security.SetNamedSecurityInfo(
path,
win32security.SE_FILE_OBJECT,
win32security.GROUP_SECURITY_INFORMATION,
None,
groupSID,
None,
None
)
finally:
if privilege_enabled:
_disable_privilege(win32security.SE_RESTORE_NAME)
return None
return salt.utils.win_dacl.set_primary_group(path, group)
def chgrp(path, group):
@ -833,8 +729,17 @@ def chgrp(path, group):
this function is superfluous and will generate an info level log entry if
used directly.
If you do actually want to set the 'primary group' of a file, use `file
.chpgrp`.
If you do actually want to set the 'primary group' of a file, use ``file
.chpgrp``.
To set group permissions use ``file.set_perms``
Args:
path (str): The path to the file or directory
group (str): The group (unused)
Returns:
None
CLI Example:
@ -866,18 +771,31 @@ def stats(path, hash_type='sha256', follow_symlinks=True):
compatibility with Unix behavior. If the 'primary group' is required, it
can be accessed in the `pgroup` and `pgid` properties.
Args:
path (str): The path to the file or directory
hash_type (str): The type of hash to return
follow_symlinks (bool):
If the object specified by ``path`` is a symlink, get attributes of
the linked file instead of the symlink itself. Default is True
Returns:
dict: A dictionary of file/directory stats
CLI Example:
.. code-block:: bash
salt '*' file.stats /etc/passwd
'''
ret = {}
if not os.path.exists(path):
return ret
raise CommandExecutionError('Path not found: {0}'.format(path))
if follow_symlinks and sys.getwindowsversion().major >= 6:
path = _resolve_symlink(path)
pstat = os.stat(path)
ret = {}
ret['inode'] = pstat.st_ino
# don't need to resolve symlinks again because we've already done that
ret['uid'] = get_uid(path, follow_symlinks=False)
@ -918,17 +836,20 @@ def get_attributes(path):
Return a dictionary object with the Windows
file attributes for a file.
Args:
path (str): The path to the file or directory
Returns:
dict: A dictionary of file attributes
CLI Example:
.. code-block:: bash
salt '*' file.get_attributes c:\\temp\\a.txt
'''
err = ''
if not os.path.exists(path):
err += 'File not found\n'
if err:
return err
raise CommandExecutionError('Path not found: {0}'.format(path))
# set up dictionary for attribute values
attributes = {}
@ -979,6 +900,21 @@ def set_attributes(path, archive=None, hidden=None, normal=None,
Set file attributes for a file. Note that the normal attribute
means that all others are false. So setting it will clear all others.
Args:
path (str): The path to the file or directory
archive (bool): Sets the archive attribute. Default is None
hidden (bool): Sets the hidden attribute. Default is None
normal (bool):
Resets the file attributes. Cannot be used in conjunction with any
other attribute. Default is None
notIndexed (bool): Sets the indexed attribute. Default is None
readonly (bool): Sets the readonly attribute. Default is None
system (bool): Sets the system attribute. Default is None
temporary (bool): Sets the temporary attribute. Default is None
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
@ -986,18 +922,19 @@ def set_attributes(path, archive=None, hidden=None, normal=None,
salt '*' file.set_attributes c:\\temp\\a.txt normal=True
salt '*' file.set_attributes c:\\temp\\a.txt readonly=True hidden=True
'''
err = ''
if not os.path.exists(path):
err += 'File not found\n'
raise CommandExecutionError('Path not found: {0}'.format(path))
if normal:
if archive or hidden or notIndexed or readonly or system or temporary:
err += 'Normal attribute may not be used with any other attributes\n'
else:
return win32file.SetFileAttributes(path, 128)
if err:
return err
raise CommandExecutionError(
'Normal attribute may not be used with any other attributes')
ret = win32file.SetFileAttributes(path, 128)
return True if ret is None else False
# Get current attributes
intAttributes = win32file.GetFileAttributes(path)
# individually set or clear bits for appropriate attributes
if archive is not None:
if archive:
@ -1029,7 +966,9 @@ def set_attributes(path, archive=None, hidden=None, normal=None,
intAttributes |= 0x100
else:
intAttributes &= 0xFEFF
return win32file.SetFileAttributes(path, intAttributes)
ret = win32file.SetFileAttributes(path, intAttributes)
return True if ret is None else False
def set_mode(path, mode):
@ -1039,6 +978,13 @@ def set_mode(path, mode):
This just calls get_mode, which returns None because we don't use mode on
Windows
Args:
path: The path to the file or directory
mode: The mode (not used)
Returns:
None
CLI Example:
.. code-block:: bash
@ -1058,12 +1004,12 @@ def remove(path, force=False):
'''
Remove the named file or directory
:param str path: The path to the file or directory to remove.
Args:
path (str): The path to the file or directory to remove.
force (bool): Remove even if marked Read-Only. Default is False
:param bool force: Remove even if marked Read-Only
:return: True if successful, False if unsuccessful
:rtype: bool
Returns:
bool: True if successful, False if unsuccessful
CLI Example:
@ -1079,7 +1025,7 @@ def remove(path, force=False):
# Does the file/folder exists
if not os.path.exists(path):
return 'File/Folder not found: {0}'.format(path)
raise CommandExecutionError('Path not found: {0}'.format(path))
if not os.path.isabs(path):
raise SaltInvocationError('File path must be absolute.')
@ -1127,6 +1073,13 @@ def symlink(src, link):
exception - invalid symlinks cannot be created. The source path must exist.
If it doesn't, an error will be raised.
Args:
src (str): The path to a file or directory
link (str): The path to the link
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
@ -1191,6 +1144,12 @@ def is_link(path):
is not a symlink, however, the error raised will be a SaltInvocationError,
not an OSError.
Args:
path (str): The path to a file or directory
Returns:
bool: True if path is a symlink, otherwise False
CLI Example:
.. code-block:: bash
@ -1281,6 +1240,12 @@ def readlink(path):
not a symlink, however, the error raised will be a SaltInvocationError, not
an OSError.
Args:
path (str): The path to the symlink
Returns:
str: The path that the symlink points to
CLI Example:
.. code-block:: bash
@ -1954,30 +1919,37 @@ def set_perms(path, grant_perms=None, deny_perms=None, inheritance=True):
path (str): The full path to the directory.
grant_perms (dict): A dictionary containing the user/group and the basic
permissions to grant, ie: ``{'user': {'perms': 'basic_permission'}}``.
You can also set the ``applies_to`` setting here. The default is
``this_folder_subfolders_files``. Specify another ``applies_to`` setting
like this:
grant_perms (dict):
A dictionary containing the user/group and the basic permissions to
grant, ie: ``{'user': {'perms': 'basic_permission'}}``. You can also
set the ``applies_to`` setting here. The default is
``this_folder_subfolders_files``. Specify another ``applies_to``
setting like this:
.. code-block:: yaml
.. code-block:: yaml
{'user': {'perms': 'full_control', 'applies_to': 'this_folder'}}
{'user': {'perms': 'full_control', 'applies_to': 'this_folder'}}
To set advanced permissions use a list for the ``perms`` parameter, ie:
To set advanced permissions use a list for the ``perms`` parameter,
ie:
.. code-block:: yaml
.. code-block:: yaml
{'user': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder'}}
{'user': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder'}}
deny_perms (dict): A dictionary containing the user/group and
permissions to deny along with the ``applies_to`` setting. Use the same
format used for the ``grant_perms`` parameter. Remember, deny
permissions supersede grant permissions.
To see a list of available attributes and applies to settings see
the documentation for salt.utils.win_dacl
inheritance (bool): If True the object will inherit permissions from the
parent, if False, inheritance will be disabled. Inheritance setting will
not apply to parent directories if they must be created
deny_perms (dict):
A dictionary containing the user/group and permissions to deny along
with the ``applies_to`` setting. Use the same format used for the
``grant_perms`` parameter. Remember, deny permissions supersede
grant permissions.
inheritance (bool):
If True the object will inherit permissions from the parent, if
False, inheritance will be disabled. Inheritance setting will not
apply to parent directories if they must be created
Returns:
bool: True if successful, otherwise raise an error

View File

@ -15,6 +15,7 @@ from __future__ import absolute_import, unicode_literals
import json
import logging
import os
import decimal
# Import salt libs
from salt.ext.six.moves import range
@ -108,6 +109,22 @@ def _list_certs(certificate_store='My'):
return ret
def _iisVersion():
pscmd = []
pscmd.append(r"Get-ItemProperty HKLM:\\SOFTWARE\\Microsoft\\InetStp\\")
pscmd.append(' | Select-Object MajorVersion, MinorVersion')
cmd_ret = _srvmgr(pscmd, return_json=True)
try:
items = json.loads(cmd_ret['stdout'], strict=False)
except ValueError:
log.error('Unable to parse return data as Json.')
return -1
return decimal.Decimal("{0}.{1}".format(items[0]['MajorVersion'], items[0]['MinorVersion']))
def _srvmgr(cmd, return_json=False):
'''
Execute a powershell command from the WebAdministration PS module.
@ -765,6 +782,11 @@ def create_cert_binding(name, site, hostheader='', ipaddress='*', port=443,
'''
name = str(name).upper()
binding_info = _get_binding_info(hostheader, ipaddress, port)
if _iisVersion() < 8:
# IIS 7.5 and earlier don't support SNI for HTTPS, therefore cert bindings don't contain the host header
binding_info = binding_info.rpartition(':')[0] + ':'
binding_path = r"IIS:\SslBindings\{0}".format(binding_info.replace(':', '!'))
if sslflags not in _VALID_SSL_FLAGS:
@ -801,10 +823,19 @@ def create_cert_binding(name, site, hostheader='', ipaddress='*', port=443,
log.error('Certificate not present: {0}'.format(name))
return False
ps_cmd = ['New-Item',
'-Path', "'{0}'".format(binding_path),
'-Thumbprint', "'{0}'".format(name),
'-SSLFlags', '{0}'.format(sslflags)]
if _iisVersion() < 8:
# IIS 7.5 and earlier have different syntax for associating a certificate with a site
# Modify IP spec to IIS 7.5 format
iis7path = binding_path.replace(r"\*!", "\\0.0.0.0!")
ps_cmd = ['New-Item',
'-Path', "'{0}'".format(iis7path),
'-Thumbprint', "'{0}'".format(name)]
else:
ps_cmd = ['New-Item',
'-Path', "'{0}'".format(binding_path),
'-Thumbprint', "'{0}'".format(name),
'-SSLFlags', '{0}'.format(sslflags)]
cmd_ret = _srvmgr(ps_cmd)
@ -824,6 +855,7 @@ def create_cert_binding(name, site, hostheader='', ipaddress='*', port=443,
return True
log.error('Unable to create certificate binding: {0}'.format(name))
return False

View File

@ -481,9 +481,28 @@ def _refresh_db_conditional(saltenv, **kwargs):
def refresh_db(**kwargs):
'''
Fectches metadata files and calls :py:func:`pkg.genrepo
Fetches metadata files and calls :py:func:`pkg.genrepo
<salt.modules.win_pkg.genrepo>` to compile updated repository metadata.
Kwargs:
saltenv (str): Salt environment. Default: ``base``
verbose (bool):
Return verbose data structure which includes 'success_list', a list
of all sls files and the package names contained within. Default
'False'
failhard (bool):
If ``True``, an error will be raised if any repo SLS files failed to
process. If ``False``, no error will be raised, and a dictionary
containing the full results will be returned.
Returns:
dict: A dictionary containing the results of the database refresh.
.. Warning::
When calling this command from a state using `module.run` be sure to
pass `failhard: False`. Otherwise the state will report failure if it
encounters a bad software definition file.
CLI Example:
.. code-block:: bash

View File

@ -25,7 +25,7 @@ Example output::
'''
from __future__ import absolute_import
# Import python libs
import collections
import salt.utils.odict
from numbers import Number
# Import salt libs
@ -130,7 +130,7 @@ class NestDisplay(object):
)
# respect key ordering of ordered dicts
if isinstance(ret, collections.OrderedDict):
if isinstance(ret, salt.utils.odict.OrderedDict):
keys = ret.keys()
else:
keys = sorted(ret)

View File

@ -10,6 +10,8 @@ import os
import collections
import logging
import tornado.gen
import sys
import traceback
# Import salt libs
import salt.loader
@ -403,20 +405,21 @@ class Pillar(object):
# Gather initial top files
try:
if self.opts['pillarenv']:
tops[self.opts['pillarenv']] = [
compile_template(
self.client.cache_file(
self.opts['state_top'],
self.opts['pillarenv']
),
self.rend,
self.opts['renderer'],
self.opts['renderer_blacklist'],
self.opts['renderer_whitelist'],
self.opts['pillarenv'],
_pillar_rend=True,
)
]
top = self.client.cache_file(
self.opts['state_top'], self.opts['pillarenv'])
if top:
tops[self.opts['pillarenv']] = [
compile_template(
top,
self.rend,
self.opts['renderer'],
self.opts['renderer_blacklist'],
self.opts['renderer_whitelist'],
self.opts['pillarenv'],
_pillar_rend=True,
)
]
else:
for saltenv in self._get_envs():
if self.opts.get('pillar_source_merging_strategy', None) == "none":
@ -817,8 +820,16 @@ class Pillar(object):
pillar_dirs,
key)
except Exception as exc:
errors.append('Failed to load ext_pillar {0}: {1}'.format(
key, exc))
errors.append(
'Failed to load ext_pillar {0}: {1}'.format(
key,
exc.__str__(),
)
)
log.error(
'Execption caught loading ext_pillar \'%s\':\n%s',
key, ''.join(traceback.format_tb(sys.exc_info()[2]))
)
if ext:
pillar = merge(
pillar,

View File

@ -298,6 +298,171 @@ instead of ``gitfs`` (e.g. :conf_master:`git_pillar_pubkey`,
.. _GitPython: https://github.com/gitpython-developers/GitPython
.. _pygit2: https://github.com/libgit2/pygit2
.. _git-pillar-multiple-repos:
How Multiple Remotes Are Handled
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
As noted above, multiple remotes can be included in the same ``git`` ext_pillar
configuration. Consider the following:
.. code-block:: yaml
my_etcd_config:
etcd.host: 127.0.0.1
etcd.port: 4001
ext_pillar:
- etcd: my_etcd_config
- git:
- master https://mydomain.tld/foo.git:
- root: pillar
- master https://mydomain.tld/bar.git
- master https://mydomain.tld/baz.git
- dev https://mydomain.tld/qux.git
- git:
- master https://mydomain.tld/abc.git
- dev https://mydomain.tld/123.git
To understand how pillar data from these repos will be compiled, it's important
to know how Salt will process them. The following points should be kept in
mind:
1. Each ext_pillar is called separately from the others. So, in the above
example, the :mod:`etcd <salt.pillar.etcd>` ext_pillar will be evaluated
first, with the first group of git_pillar remotes evaluated next (and merged
into the etcd pillar data). Lastly, the second group of git_pillar remotes
will be evaluated, and then merged into the ext_pillar data evaluated before
it.
2. Within a single group of git_pillar remotes, each remote will be evaluated in
order, with results merged together as each remote is evaluated.
.. note::
Prior to the Nitrogen release, remotes would be evaluated in a
non-deterministic order.
3. By default, when a repo is evaluated, other remotes' which share its pillar
environment will have their files made available to the remote being
processed.
The first point should be straightforward enough, but the second and third
could use some additional clarification.
First, point #2. In the first group of git_pillar remotes, the top file and
pillar SLS files in the ``foo`` remote will be evaluated first. The ``bar``
remote will be evaluated next, and its results will be merged into the pillar
data compiled when the ``foo`` remote was evaluated. As the subsequent remotes
are evaluated, their data will be merged in the same fashion.
But wait, don't these repositories belong to more than one pillar environments?
Well, yes. The default method of generating pillar data compiles pillar data
from all environments. This behavior can be overridden using a ``pillarenv``.
Setting a :conf_minion:`pillarenv` in the minion config file will make that
minion tell the master to ignore any pillar data from environments which don't
match that pillarenv. A pillarenv can also be specified for a given minion or
set of minions when :mod:`running states <salt.modules.state>`, by using he
``pillarenv`` argument. The CLI pillarenv will override one set in the minion
config file. So, assuming that a pillarenv of ``base`` was set for a minion, it
would not get any of the pillar variables configured in the ``qux`` remote,
since that remote is assigned to the ``dev`` environment. The only way to get
its pillar data would be to specify a pillarenv of ``dev``, which would mean
that it would then ignore any items from the ``base`` pillarenv. A more
detailed explanation of pillar environments can be found :ref:`here
<pillar-environments>`.
Moving on to point #3, and looking at the example ext_pillar configuration, as
the ``foo`` remote is evaluated, it will also have access to the files from the
``bar`` and ``baz`` remotes, since all three are assigned to the ``base``
pillar environment. So, if an SLS file referenced by the ``foo`` remotes's top
file does not exist in the ``foo`` remote, it will be searched for in the
``bar`` remote, followed by the ``baz`` remote. When it comes time to evaluate
the ``bar`` remote, SLS files referenced by the ``bar`` remote's top file will
first be looked for in the ``bar`` remote, followed by ``foo``, and ``baz``,
and when the ``baz`` remote is processed, SLS files will be looked for in
``baz``, followed by ``foo`` and ``bar``. This "failover" logic is called a
:ref:`directory overlay <file-roots-directory-overlay>`, and it is also used by
:conf_master:`file_roots` and :conf_minion`pillar_roots`. The ordering of which
remote is checked for SLS files is determined by the order they are listed.
First the remote being processed is checked, then the others that share the
same environment are checked. However, before the Nitrogen release, since
evaluation was unordered, the remote being processed would be checked, followed
in no specific order by the other repos which share the same environment.
Beginning with the Nitrogen release, this behavior of git_pillar remotes having
access to files in other repos which share the same environment can be disabled
by setting :conf_master:`git_pillar_includes` to ``False``. If this is done,
then all git_pillar remotes will only have access to their own SLS files.
Another way of ensuring that a git_pillar remote will not have access to SLS
files from other git_pillar remotes which share the same pillar environment is
to put them in a separate ``git`` section under ``ext_pillar``. Look again at
the example configuration above. In the second group of git_pillar remotes, the
``abc`` remote would not have access to the SLS files from the ``foo``,
``bar``, and ``baz`` remotes, and vice-versa.
.. _git-pillar-mountpoints:
Mountpoints
~~~~~~~~~~~
.. versionadded:: Nitrogen
Assume the following pillar top file:
.. code-block:: yaml
base:
'web*':
- common
- web.server.nginx
- web.server.appdata
Now, assume that you would like to configure the ``web.server.nginx`` and
``web.server.appdata`` SLS files in separate repos. This could be done using
the following ext_pillar configuration (assuming that
:conf_master:`git_pillar_includes` has not been set to ``False``):
.. code-block:: yaml
ext_pillar:
- git:
- master https://mydomain.tld/pillar-common.git
- master https://mydomain.tld/pillar-nginx.git
- master https://mydomain.tld/pillar-appdata.git
However, in order to get the files in the second and third git_pillar remotes
to work, you would need to first create the directory structure underneath it
(i.e. place them underneath ``web/server/`` in the repository). This also makes
it tedious to reorganize the configuration, as changing ``web.server.nginx`` to
``web.nginx`` in the top file would require you to also move the SLS files in
the ``pillar-nginx`` up a directory level.
For these reasons, much like gitfs, git_pillar now supports a "mountpoint"
feature. Using the following ext_pillar configuration, the SLS files in the
second and third git_pillar remotes can be placed in the root of the git
repository:
.. code-block:: yaml
ext_pillar:
- git:
- master https://mydomain.tld/pillar-common.git
- master https://mydomain.tld/pillar-nginx.git:
- mountpoint: web/server/
- master https://mydomain.tld/pillar-appdata.git:
- mountpoint: web/server/
Now, if the top file changed the SLS target from ``web.server.nginx``, instead
of reorganizing the git repository, you would just need to adjust the
mountpoint to ``web/`` (and restart the ``salt-master`` daemon).
.. note::
- Leading and trailing slashes on the mountpoints are optional.
- Use of the ``mountpoint`` feature requires that
:conf_master:`git_pillar_includes` is not disabled.
- Content from mounted git_pillar repos can only be referenced by a top
file in the same pillar environment.
'''
from __future__ import absolute_import
@ -325,6 +490,7 @@ except ImportError:
# pylint: enable=import-error
PER_REMOTE_OVERRIDES = ('env', 'root', 'ssl_verify', 'refspecs')
PER_REMOTE_ONLY = ('name', 'mountpoint')
# Set up logging
log = logging.getLogger(__name__)
@ -378,7 +544,7 @@ def ext_pillar(minion_id, repo, pillar_dirs):
opts['pillar_roots'] = {}
opts['__git_pillar'] = True
pillar = salt.utils.gitfs.GitPillar(opts)
pillar.init_remotes(repo, PER_REMOTE_OVERRIDES)
pillar.init_remotes(repo, PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
if __opts__.get('__role') == 'minion':
# If masterless, fetch the remotes. We'll need to remove this once
# we make the minion daemon able to run standalone.
@ -394,21 +560,37 @@ def ext_pillar(minion_id, repo, pillar_dirs):
False
)
for pillar_dir, env in six.iteritems(pillar.pillar_dirs):
log.debug(
'git_pillar is processing pillar SLS from {0} for pillar '
'env \'{1}\''.format(pillar_dir, env)
)
all_dirs = [d for (d, e) in six.iteritems(pillar.pillar_dirs)
if env == e]
if pillar_dir in pillar.pillar_linked_dirs:
log.debug(
'git_pillar is skipping processing on %s as it is a '
'mounted repo', pillar_dir
)
continue
else:
log.debug(
'git_pillar is processing pillar SLS from %s for pillar '
'env \'%s\'', pillar_dir, env
)
# Ensure that the current pillar_dir is first in the list, so that
# the pillar top.sls is sourced from the correct location.
pillar_roots = [pillar_dir]
pillar_roots.extend([x for x in all_dirs if x != pillar_dir])
if env == '__env__':
env = opts.get('pillarenv') \
or opts.get('environment') \
or opts.get('git_pillar_base')
log.debug('__env__ maps to %s', env)
pillar_roots = [pillar_dir]
if __opts__['git_pillar_includes']:
# Add the rest of the pillar_dirs in this environment to the
# list, excluding the current pillar_dir being processed. This
# is because it was already specified above as the first in the
# list, so that its top file is sourced from the correct
# location and not from another git_pillar remote.
pillar_roots.extend(
[d for (d, e) in six.iteritems(pillar.pillar_dirs)
if env == e and d != pillar_dir]
)
opts['pillar_roots'] = {env: pillar_roots}
local_pillar = Pillar(opts, __grains__, minion_id, env)
@ -449,9 +631,10 @@ class _LegacyGitPillar(object):
self.repo = git.Repo.init(rp_)
except (git.exc.NoSuchPathError,
git.exc.InvalidGitRepositoryError) as exc:
log.error('GitPython exception caught while '
'initializing the repo: {0}. Maybe '
'git is not available.'.format(exc))
log.error(
'GitPython exception caught while initializing the repo: %s. '
'Maybe the git CLI program is not available.', exc
)
except Exception as exc:
log.exception('Undefined exception in git pillar. '
'This may be a bug should be reported to the '
@ -502,8 +685,10 @@ class _LegacyGitPillar(object):
log.debug('Legacy git_pillar: Updating \'%s\'', self.rp_location)
self.repo.git.fetch()
except git.exc.GitCommandError as exc:
log.error('Unable to fetch the latest changes from remote '
'{0}: {1}'.format(self.rp_location, exc))
log.error(
'Unable to fetch the latest changes from remote %s: %s',
self.rp_location, exc
)
return False
try:

View File

@ -10,19 +10,19 @@ ext_pillar, simply ported to use mako instead of jinja2 for templating.
It supports the following features:
- multiple config files that are mako templates with support for ``pillar``,
``__grains__``, ``__salt__``, ``__opts__`` objects
``__grains__``, ``__salt__``, ``__opts__`` objects.
- a config file renders as an ordered list of files. Unless absolute, the paths
of these files are relative to the current config file - if absolute, they
will be treated literally.
- this list of files are read in ordered as mako templates with support for
``stack``, ``pillar``, ``__grains__``, ``__salt__``, ``__opts__`` objects
- all these rendered files are then parsed as ``yaml``
- then all yaml dicts are merged in order with support for the following
- this list of files are read in order as mako templates with support for
``stack``, ``pillar``, ``__grains__``, ``__salt__``, ``__opts__`` objects.
- all these rendered files are then parsed as ``yaml``.
- then all yaml dicts are merged in order, with support for the following.
merging strategies: ``merge-first``, ``merge-last``, ``remove``, and
``overwrite``
``overwrite``.
- stack config files can be matched based on ``pillar``, ``grains``, or
``opts`` values, which make it possible to support kind of self-contained
environments
environments.
Configuration in Salt
---------------------
@ -41,7 +41,7 @@ MakoStack config file like below:
.. code:: yaml
ext_pillar:
- stack: /path/to/stack.cfg
- makostack: /path/to/stack.cfg
List of config files
~~~~~~~~~~~~~~~~~~~~
@ -51,7 +51,7 @@ You can also provide a list of config files:
.. code:: yaml
ext_pillar:
- stack:
- makostack:
- /path/to/stack1.cfg
- /path/to/stack2.cfg
@ -67,7 +67,7 @@ Here is an example of such a configuration, which should speak by itself:
.. code:: yaml
ext_pillar:
- stack:
- makostack:
pillar:environment:
dev: /path/to/dev/stack.cfg
prod: /path/to/prod/stack.cfg
@ -78,7 +78,6 @@ Here is an example of such a configuration, which should speak by itself:
opts:custom:opt:
value: /path/to/stack0.cfg
Grafting data from files to arbitrary namespaces
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -431,7 +430,7 @@ def ext_pillar(minion_id, pillar, *args, **kwargs):
else:
namespace = None
if not os.path.isfile(cfg):
log.warning('Ignoring pillar stack cfg "{0}": '
log.warning('Ignoring Stack cfg "{0}": '
'file does not exist'.format(cfg))
continue
stack = _process_stack_cfg(cfg, stack, minion_id, pillar, namespace)
@ -459,18 +458,19 @@ def _process_stack_cfg(cfg, stack, minion_id, pillar, namespace):
pillar=pillar, stack=stack)
obj = yaml.safe_load(p)
if not isinstance(obj, dict):
log.info('Ignoring pillar stack template "{0}": Can\'t parse '
log.info('Ignoring Stack template "{0}": Can\'t parse '
'as a valid yaml dictionary'.format(path))
continue
if namespace:
for sub in namespace.split(':')[::-1]:
obj = {sub: obj}
stack = _merge_dict(stack, obj)
log.info('Stack template {0} parsed'.format(path))
except exceptions.TopLevelLookupException as e:
log.info('Stack template "{0}" not found.'.format(path))
continue
except Exception as e:
log.info('Ignoring pillar stack template "{0}":'.format(path))
log.info('Ignoring Stack template "{0}":'.format(path))
log.info('{0}'.format(exceptions.text_error_template().render()))
continue
return stack

View File

@ -105,6 +105,7 @@ look like this:
- first_password
- second_password
- third_password
credstore: <path to credential store>
proxytype
^^^^^^^^^
@ -168,6 +169,18 @@ port
If the ESXi host is not using the default port, set this value to an
alternate port. Default is ``443``.
credstore
^^^^^^^^^
If the ESXi host is using an untrusted SSL certificate, set this value to
the file path where the credential store is located. This file is passed to
``esxcli``. Default is ``<HOME>/.vmware/credstore/vicredentials.xml`` on Linux
and ``<APPDATA>/VMware/credstore/vicredentials.xml`` on Windows.
.. note::
``HOME`` variable is sometimes not set for processes running as system
services. If you want to rely on the default credential store location,
make sure ``HOME`` is set for the proxy process.
Salt Proxy
----------
@ -321,6 +334,7 @@ def init(opts):
DETAILS['password'] = password
DETAILS['protocol'] = opts['proxy'].get('protocol', 'https')
DETAILS['port'] = opts['proxy'].get('port', '443')
DETAILS['credstore'] = opts['proxy'].get('credstore')
def grains():
@ -401,6 +415,7 @@ def ch_config(cmd, *args, **kwargs):
kwargs['password'] = DETAILS['password']
kwargs['port'] = DETAILS['port']
kwargs['protocol'] = DETAILS['protocol']
kwargs['credstore'] = DETAILS['credstore']
if 'vsphere.' + cmd not in __salt__:
return {'retcode': -1, 'message': 'vsphere.' + cmd + ' is not available.'}

View File

@ -45,6 +45,8 @@ from contextlib import contextmanager
import json
import sys
# import salt libs
import salt.ext.six as six
from salt.exceptions import SaltInvocationError, SaltMasterError
try:
@ -237,7 +239,7 @@ def pop(queue, quantity=1):
'''
Pop one or more or all items from the queue return them.
'''
cmd = 'SELECT data FROM {0}'.format(queue)
cmd = 'SELECT id, data FROM {0}'.format(queue)
if quantity != 'all':
try:
quantity = int(quantity)
@ -247,17 +249,18 @@ def pop(queue, quantity=1):
raise SaltInvocationError(error_txt)
cmd = ''.join([cmd, ' LIMIT {0};'.format(quantity)])
log.debug('SQL Query: {0}'.format(cmd))
items = []
with _conn(commit=True) as cur:
items = []
cur.execute(cmd)
result = cur.fetchall()
if len(result) > 0:
items = [item[0] for item in result]
itemlist = "','".join(items)
del_cmd = '''DELETE FROM {0} WHERE data IN ('{1}');'''.format(
queue, itemlist)
ids = [six.text_type(item[0]) for item in result]
items = [item[1] for item in result]
idlist = "','".join(ids)
del_cmd = '''DELETE FROM {0} WHERE id IN ('{1}');'''.format(
queue, idlist)
log.debug('SQL Query: {0}'.format(del_cmd))
cur.execute(del_cmd)
return [json.loads(x) for x in items]
return items

View File

@ -239,6 +239,9 @@ def save_minions(jid, minions, syndic_id=None):
'''
Save/update the serialized list of minions for a given job
'''
# Ensure we have a list for Python 3 compatability
minions = list(minions)
log.debug(
'Adding minions for job %s%s: %s',
jid,

View File

@ -475,8 +475,8 @@ def exit_success(jid, ext_source=None):
ext_source=ext_source
)
minions = data['Minions']
result = data['Result']
minions = data.get('Minions', [])
result = data.get('Result', {})
for minion in minions:
if minion in result and 'return' in result[minion]:
@ -484,6 +484,9 @@ def exit_success(jid, ext_source=None):
else:
ret[minion] = False
for minion in result:
if 'return' in result[minion] and result[minion]['return']:
ret[minion] = True
return ret

View File

@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
'''
General management and processing of queues.
============================================
This runner facilitates interacting with various queue backends such as the
included sqlite3 queue or the planned AWS SQS and Redis queues
@ -25,6 +26,42 @@ reactor is set up to match on event tags for a specific queue and then take
infrastructure actions on those minion IDs. These actions might be to delete
the minion's key from the master, use salt-cloud to destroy the vm, or some
other custom action.
Queued runners
==============
Using the Salt Queues, references to the commandline arguments of other runners
can be saved to be processed later. The queue runners require a queue backend
that can store json data (default: :mod:`pgjsonb <salt.queues.pgjsonb_queue>`).
Once the queue is setup, the `runner_queue` will need to be configured.
.. code-block:: yaml
runner_queue:
queue: runners
backend: pgjsonb
.. note:: only the queue is required, this defaults to using pgjsonb
Once this is set, then the following can be added to the scheduler on the
master and it will run the specified amount of commands per time period.
.. code-block:: yaml
schedule:
runner queue:
schedule:
function: saltutil.runner
minutes: 1
args:
- queue.process_runner
kwargs:
quantity: 2
The above configuration will pop 2 runner jobs off the runner queue, and then
run them. And it will do this every minute, unless there are any jobs that are
still running from the last time the process_runner task was executed.
'''
# Import python libs
@ -33,8 +70,8 @@ from __future__ import absolute_import
# Import salt libs
import salt.loader
import salt.utils.event
from salt.utils.event import tagify
import salt.ext.six as six
from salt.utils.event import get_event, tagify
from salt.exceptions import SaltInvocationError
@ -172,7 +209,7 @@ def process_queue(queue, quantity=1, backend='sqlite'):
salt-run queue.process_queue myqueue all backend=sqlite
'''
# get ready to send an event
event = salt.utils.event.get_event(
event = get_event(
'master',
__opts__['sock_dir'],
__opts__['transport'],
@ -190,3 +227,81 @@ def process_queue(queue, quantity=1, backend='sqlite'):
'queue': queue,
}
event.fire_event(data, tagify([queue, 'process'], prefix='queue'))
return data
def __get_queue_opts(queue=None, backend=None):
'''
Get consistent opts for the queued runners
'''
if queue is None:
queue = __opts__.get('runner_queue', {}).get('queue')
if backend is None:
backend = __opts__.get('runner_queue', {}).get('backend', 'pgjsonb')
return {'backend': backend,
'queue': queue}
def insert_runner(fun, args=None, kwargs=None, queue=None, backend=None):
'''
Insert a reference to a runner into the queue so that it can be run later.
fun
The runner function that is going to be run
args
list or comma-seperated string of args to send to fun
kwargs
dictionary of keyword arguments to send to fun
queue
queue to insert the runner reference into
backend
backend that to use for the queue
CLI Example:
.. code-block:: bash
salt-run queue.insert_runner test.stdout_print
salt-run queue.insert_runner event.send test_insert_runner kwargs='{"data": {"foo": "bar"}}'
'''
if args is None:
args = []
elif isinstance(args, six.string_types):
args = args.split(',')
if kwargs is None:
kwargs = {}
queue_kwargs = __get_queue_opts(queue=queue, backend=backend)
data = {'fun': fun, 'args': args, 'kwargs': kwargs}
return insert(items=data, **queue_kwargs)
def process_runner(quantity=1, queue=None, backend=None):
'''
Process queued runners
quantity
number of runners to process
queue
queue to insert the runner reference into
backend
backend that to use for the queue
CLI Example:
.. code-block:: bash
salt-run queue.process_runner
salt-run queue.process_runner 5
'''
queue_kwargs = __get_queue_opts(queue=queue, backend=backend)
data = process_queue(quantity=quantity, **queue_kwargs)
for job in data['items']:
__salt__[job['fun']](*job['args'], **job['kwargs'])

View File

@ -55,6 +55,7 @@ def sync_all(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
ret['pillar'] = sync_pillar(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['utils'] = sync_utils(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['sdb'] = sync_sdb(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['cache'] = sync_cache(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
return ret
@ -396,3 +397,29 @@ def sync_sdb(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
return salt.utils.extmods.sync(__opts__, 'sdb', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_cache(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: Nitrogen
Sync utils modules from ``salt://_cache`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-seperated list of modules to sync
extmod_blacklist : None
comma-seperated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_cache
'''
return salt.utils.extmods.sync(__opts__, 'cache', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]

View File

@ -83,9 +83,9 @@ def orchestrate(mods,
ret = {'data': {minion.opts['id']: running}, 'outputter': 'highstate'}
res = salt.utils.check_state_result(ret['data'])
if res:
ret['data']['retcode'] = 0
ret['retcode'] = 0
else:
ret['data']['retcode'] = 1
ret['retcode'] = 1
return ret
# Aliases for orchestrate runner

View File

@ -11,6 +11,7 @@ documented in the execution module docs.
from __future__ import absolute_import
import base64
import logging
import string
import requests
import salt.crypt
@ -117,7 +118,61 @@ def _get_policies(minion_id, config):
policies = []
for pattern in policy_patterns:
policies.append(pattern.format(**mappings))
try:
for expanded_pattern in _expand_pattern_lists(pattern, **mappings):
policies.append(expanded_pattern.format(**mappings))
except KeyError:
log.warning('Could not resolve policy pattern {0}'.format(pattern))
log.debug('{0} policies: {1}'.format(minion_id, policies))
return policies
def _expand_pattern_lists(pattern, **mappings):
'''
Expands the pattern for any list-valued mappings, such that for any list of
length N in the mappings present in the pattern, N copies of the pattern are
returned, each with an element of the list substituted.
pattern:
A pattern to expand, for example ``by-role/{grains[roles]}``
mappings:
A dictionary of variables that can be expanded into the pattern.
Example: Given the pattern `` by-role/{grains[roles]}`` and the below grains
.. code-block:: yaml
grains:
roles:
- web
- database
This function will expand into two patterns,
``[by-role/web, by-role/database]``.
Note that this method does not expand any non-list patterns.
'''
expanded_patterns = []
f = string.Formatter()
'''
This function uses a string.Formatter to get all the formatting tokens from
the pattern, then recursively replaces tokens whose expanded value is a
list. For a list with N items, it will create N new pattern strings and
then continue with the next token. In practice this is expected to not be
very expensive, since patterns will typically involve a handful of lists at
most.
''' # pylint: disable=W0105
for (_, field_name, _, _) in f.parse(pattern):
if field_name is None:
continue
(value, _) = f.get_field(field_name, None, mappings)
if isinstance(value, list):
token = '{{{0}}}'.format(field_name)
expanded = [pattern.replace(token, str(elem)) for elem in value]
for expanded_item in expanded:
result = _expand_pattern_lists(expanded_item, **mappings)
expanded_patterns += result
return expanded_patterns
return [pattern]

View File

@ -20,6 +20,7 @@ import grp
import sys
# Import Salt libs
import salt.client
import salt.config
import salt.loader
import salt.cache
@ -31,10 +32,20 @@ from salt.ext.six import string_types
from salt.ext.six.moves import input
from salt.ext.six.moves import zip
from salt.ext.six.moves import filter
from salt.template import compile_template
# Get logging started
log = logging.getLogger(__name__)
FILE_TYPES = ('c', 'd', 'g', 'l', 'r', 's', 'm')
# c: config file
# d: documentation file
# g: ghost file (i.e. the file contents are not included in the package payload)
# l: license file
# r: readme file
# s: SLS file
# m: Salt module
class SPMException(Exception):
'''
@ -220,6 +231,10 @@ class SPMClient(object):
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
caller_opts = self.opts.copy()
caller_opts['file_client'] = 'local'
self.caller = salt.client.Caller(mopts=caller_opts)
self.client = salt.client.get_local_client(self.opts['conf_file'])
cache = salt.cache.Cache(self.opts)
packages = args[1:]
@ -342,8 +357,10 @@ class SPMClient(object):
else:
response = http.query(dl_url, text=True)
with salt.utils.fopen(out_file, 'w') as outf:
outf.write(response.get("text"))
outf.write(response.get('text'))
# First we download everything, then we install
for package in dl_list:
# Kick off the install
self._install_indv_pkg(package, out_file)
return
@ -445,6 +462,7 @@ class SPMClient(object):
raise SPMPackageError('Invalid package: the {0} was not found'.format(field))
pkg_files = formula_tar.getmembers()
# First pass: check for files that already exist
existing_files = self._pkgfiles_fun('check_existing', pkg_name, pkg_files, formula_def)
@ -456,6 +474,22 @@ class SPMClient(object):
# We've decided to install
self._pkgdb_fun('register_pkg', pkg_name, formula_def, self.db_conn)
# Run the pre_local_state script, if present
if 'pre_local_state' in formula_def:
high_data = self._render(formula_def['pre_local_state'], formula_def)
ret = self.caller.cmd('state.high', data=high_data)
if 'pre_tgt_state' in formula_def:
log.debug('Executing pre_tgt_state script')
high_data = self._render(formula_def['pre_tgt_state']['data'], formula_def)
tgt = formula_def['pre_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['pre_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['pre_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
# No defaults for this in config.py; default to the current running
# user and group
uid = self.opts.get('spm_uid', os.getuid())
@ -493,6 +527,23 @@ class SPMClient(object):
digest,
self.db_conn)
# Run the post_local_state script, if present
if 'post_local_state' in formula_def:
log.debug('Executing post_local_state script')
high_data = self._render(formula_def['post_local_state'], formula_def)
self.caller.cmd('state.high', data=high_data)
if 'post_tgt_state' in formula_def:
log.debug('Executing post_tgt_state script')
high_data = self._render(formula_def['post_tgt_state']['data'], formula_def)
tgt = formula_def['post_tgt_state']['tgt']
ret = self.client.run_job(
tgt=formula_def['post_tgt_state']['tgt'],
fun='state.high',
tgt_type=formula_def['post_tgt_state'].get('tgt_type', 'glob'),
timout=self.opts['timeout'],
data=high_data,
)
formula_tar.close()
def _resolve_deps(self, formula_def):
@ -943,12 +994,31 @@ class SPMClient(object):
formula_tar = tarfile.open(out_path, 'w:bz2')
try:
formula_tar.add(formula_path, formula_conf['name'], filter=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude)
except TypeError:
formula_tar.add(formula_path, formula_conf['name'], exclude=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], exclude=self._exclude)
if 'files' in formula_conf:
# This allows files to be added to the SPM file in a specific order.
# It also allows for files to be tagged as a certain type, as with
# RPM files. This tag is ignored here, but is used when installing
# the SPM file.
if isinstance(formula_conf['files'], list):
formula_dir = tarfile.TarInfo(formula_conf['name'])
formula_dir.type = tarfile.DIRTYPE
formula_tar.addfile(formula_dir)
for file_ in formula_conf['files']:
for ftype in FILE_TYPES:
if file_.startswith('{0}|'.format(ftype)):
file_ = file_.lstrip('{0}|'.format(ftype))
formula_tar.add(
os.path.join(os.getcwd(), file_),
os.path.join(formula_conf['name'], file_),
)
else:
# If no files are specified, then the whole directory will be added.
try:
formula_tar.add(formula_path, formula_conf['name'], filter=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude)
except TypeError:
formula_tar.add(formula_path, formula_conf['name'], exclude=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], exclude=self._exclude)
formula_tar.close()
self.ui.status('Built package {0}'.format(out_path))
@ -967,6 +1037,27 @@ class SPMClient(object):
return None
return member
def _render(self, data, formula_def):
'''
Render a [pre|post]_local_state or [pre|post]_tgt_state script
'''
# FORMULA can contain a renderer option
renderer = formula_def.get('renderer', self.opts.get('renderer', 'yaml_jinja'))
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get('renderer_blacklist')
whitelist = self.opts.get('renderer_whitelist')
template_vars = formula_def.copy()
template_vars['opts'] = self.opts.copy()
return compile_template(
':string:',
rend,
renderer,
blacklist,
whitelist,
input_data=data,
**template_vars
)
class SPMUserInterface(object):
'''

View File

@ -13,6 +13,14 @@ import salt.syspaths
# Get logging started
log = logging.getLogger(__name__)
FILE_TYPES = ('c', 'd', 'g', 'l', 'r', 's', 'm')
# c: config file
# d: documentation file
# g: ghost file (i.e. the file contents are not included in the package payload)
# l: license file
# r: readme file
# s: SLS file
# m: Salt module
def init(**kwargs):
@ -100,10 +108,23 @@ def install_file(package, formula_tar, member, formula_def, conn=None):
tld = formula_def.get('top_level_dir', package)
new_name = member.name.replace('{0}/'.format(package), '', 1)
if not new_name.startswith(tld) and not new_name.startswith('_') and not new_name.startswith('pillar.example'):
if not new_name.startswith(tld) and not new_name.startswith('_') and not \
new_name.startswith('pillar.example') and not new_name.startswith('README'):
log.debug('{0} not in top level directory, not installing'.format(new_name))
return False
for line in formula_def.get('files', []):
tag = ''
for ftype in FILE_TYPES:
if line.startswith('{0}|'.format(ftype)):
tag = line.split('|', 1)[0]
line = line.split('|', 1)[1]
if tag and new_name == line:
if tag in ('c', 'd', 'g', 'l', 'r'):
out_path = __opts__['spm_share_dir']
elif tag in ('s', 'm'):
pass
if new_name.startswith('{0}/_'.format(package)):
if node_type in ('master', 'minion'):
# Module files are distributed via extmods directory

View File

@ -493,17 +493,19 @@ def present(
if not instance_ids:
instance_ids = []
if instance_names:
# AWS borks on adding instances in "non-running" states, so filter 'em out.
running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped')
for n in instance_names:
instance_ids += __salt__['boto_ec2.find_instances'](name=n, region=region,
key=key, keyid=keyid,
profile=profile)
profile=profile,
in_states=running_states)
# Backwards compat: Only touch attached instances if requested (e.g. if some are defined).
if instance_ids:
if __opts__['test']:
if __salt__['boto_elb.set_instances'](name, instance_ids, True, region,
key, keyid, profile):
ret['comment'] += ' ELB {0} instances would be updated.'.format(name)
ret['result'] = None
return ret
_ret = __salt__['boto_elb.set_instances'](name, instance_ids, False, region,
key, keyid, profile)
@ -562,7 +564,6 @@ def register_instances(name, instances, region=None, key=None, keyid=None,
else:
if __opts__['test']:
ret['comment'] = 'ELB {0} is set to register : {1}.'.format(name, new)
ret['result'] = None
return ret
state = __salt__['boto_elb.register_instances'](name,
instances,
@ -674,7 +675,6 @@ def _elb_present(
if not exists:
if __opts__['test']:
ret['comment'] = 'ELB {0} is set to be created.'.format(name)
ret['result'] = None
return ret
created = __salt__['boto_elb.create'](name=name,
availability_zones=availability_zones,
@ -777,7 +777,6 @@ def _listeners_present(
else:
msg.append('Listeners already set on ELB {0}.'.format(name))
ret['comment'] = ' '.join(msg)
ret['result'] = None
return ret
if to_delete:
@ -840,7 +839,6 @@ def _security_groups_present(
if __opts__['test']:
msg = 'ELB {0} set to have security groups modified.'.format(name)
ret['comment'] = msg
ret['result'] = None
return ret
changed = __salt__['boto_elb.apply_security_groups'](
name, security_groups, region, key, keyid, profile
@ -904,7 +902,6 @@ def _attributes_present(
if attrs_to_set:
if __opts__['test']:
ret['comment'] = 'ELB {0} set to have attributes set.'.format(name)
ret['result'] = None
return ret
was_set = __salt__['boto_elb.set_attributes'](name, attributes,
region, key, keyid,
@ -947,7 +944,6 @@ def _health_check_present(
if need_to_set:
if __opts__['test']:
msg = 'ELB {0} set to have health check set.'.format(name)
ret['result'] = True
ret['comment'] = msg
return ret
was_set = __salt__['boto_elb.set_health_check'](name, health_check,
@ -997,7 +993,6 @@ def _zones_present(
if __opts__['test']:
msg = 'ELB {0} to have availability zones set.'.format(name)
ret['comment'] = msg
ret['result'] = None
return ret
if to_enable:
enabled = __salt__['boto_elb.enable_availability_zones'](name,
@ -1069,7 +1064,6 @@ def _subnets_present(
if __opts__['test']:
msg = 'ELB {0} to have subnets set.'.format(name)
ret['comment'] = msg
ret['result'] = None
return ret
if to_enable:
attached = __salt__['boto_elb.attach_subnets'](name, to_enable,
@ -1283,15 +1277,12 @@ def _policies_present(
msg.append('Policy {0} added.'.format(policy))
for policy in to_delete:
msg.append('Policy {0} deleted.'.format(policy))
ret['result'] = None
else:
msg.append('Policies already set on ELB {0}.'.format(name))
for listener in listeners_to_update:
msg.append('Listener {0} policies updated.'.format(listener))
ret['result'] = None
for backend in backends_to_update:
msg.append('Backend {0} policies updated.'.format(backend))
ret['result'] = None
ret['comment'] = ' '.join(msg)
return ret
@ -1412,7 +1403,6 @@ def absent(
if exists:
if __opts__['test']:
ret['comment'] = 'ELB {0} is set to be removed.'.format(name)
ret['result'] = None
return ret
deleted = __salt__['boto_elb.delete'](name, region, key, keyid,
profile)
@ -1458,7 +1448,6 @@ def _tags_present(name,
msg = 'The following tag{0} set to be removed: {1}.'.format(
('s are' if len(tags_to_remove) > 1 else ' is'), ', '.join(tags_to_remove))
ret['comment'] = ' '.join([ret['comment'], msg])
ret['result'] = None
else:
_ret = __salt__['boto_elb.delete_tags'](
name,
@ -1483,13 +1472,11 @@ def _tags_present(name,
('s are' if len(tags_to_add.keys()) > 1 else ' is'),
', '.join(tags_to_add.keys()))
ret['comment'] = ' '. join([ret['comment'], msg])
ret['result'] = None
if tags_to_update:
msg = 'The following tag {0} set to be updated: {1}.'.format(
('values are' if len(tags_to_update.keys()) > 1 else 'value is'),
', '.join(tags_to_update.keys()))
ret['comment'] = ' '.join([ret['comment'], msg])
ret['result'] = None
else:
all_tag_changes = dictupdate.update(tags_to_add, tags_to_update)
_ret = __salt__['boto_elb.set_tags'](

View File

@ -269,11 +269,11 @@ def _role_present(
if not policy_document:
policy = __salt__['boto_iam.build_policy'](region, key, keyid,
profile)
if role['assume_role_policy_document'] != policy:
if _sort_policy(role['assume_role_policy_document']) != _sort_policy(policy):
update_needed = True
_policy_document = policy
else:
if role['assume_role_policy_document'] != policy_document:
if _sort_policy(role['assume_role_policy_document']) != _sort_policy(policy_document):
update_needed = True
_policy_document = policy_document
if update_needed:
@ -357,6 +357,19 @@ def _instance_profile_associated(
return ret
def _sort_policy(doc):
# List-type sub-items in policies don't happen to be order-sensitive, but
# compare operations will render them unequal, leading to non-idempotent
# state runs. We'll sort any list-type subitems before comparison to reduce
# the likelihood of false negatives.
if isinstance(doc, list):
return sorted([_sort_policy(i) for i in doc])
elif isinstance(doc, dict):
return dict([(k, _sort_policy(v)) for k, v in doc.items()])
else:
return doc
def _policies_present(
name,
policies=None,

View File

@ -1279,7 +1279,7 @@ def route_table_absent(name, region=None,
def nat_gateway_present(name, subnet_name=None, subnet_id=None,
region=None, key=None, keyid=None, profile=None):
region=None, key=None, keyid=None, profile=None, allocation_id=None):
'''
Ensure a nat gateway exists within the specified subnet
@ -1304,6 +1304,10 @@ def nat_gateway_present(name, subnet_name=None, subnet_id=None,
Id of the subnet within which the nat gateway should exist.
Either subnet_name or subnet_id must be provided.
allocation_id
If specified, the elastic IP address referenced by the ID is
associated with the gateway. Otherwise, a new allocation_id is created and used.
region
Region to connect to.
@ -1337,7 +1341,8 @@ def nat_gateway_present(name, subnet_name=None, subnet_id=None,
r = __salt__['boto_vpc.create_nat_gateway'](subnet_name=subnet_name,
subnet_id=subnet_id,
region=region, key=key,
keyid=keyid, profile=profile)
keyid=keyid, profile=profile,
allocation_id=allocation_id)
if not r.get('created'):
ret['result'] = False
ret['comment'] = 'Failed to create nat gateway: {0}.'.format(r['error']['message'])

View File

@ -32,7 +32,7 @@ from salt.modules.docker import (
STOP_TIMEOUT,
VALID_CREATE_OPTS,
_validate_input,
_get_repo_tag
_get_repo_tag,
)
# pylint: enable=no-name-in-module,import-error
import salt.utils
@ -100,7 +100,7 @@ def _prep_input(kwargs):
configure in an SLS file as a dictlist. If the data type is a string, then
skip repacking and let _validate_input() try to sort it out.
'''
for kwarg in ('environment', 'lxc_conf'):
for kwarg in ('environment', 'lxc_conf', 'sysctls'):
kwarg_value = kwargs.get(kwarg)
if kwarg_value is not None \
and not isinstance(kwarg_value, six.string_types):
@ -223,6 +223,7 @@ def _compare(actual, create_kwargs, defaults_from_image):
ret.update({item: {'old': actual_ports,
'new': desired_ports}})
continue
elif item == 'volumes':
if actual_data is None:
actual_data = []
@ -390,6 +391,7 @@ def _compare(actual, create_kwargs, defaults_from_image):
# sometimes `[]`. We have to deal with it.
if bool(actual_data) != bool(data):
ret.update({item: {'old': actual_data, 'new': data}})
elif item == 'labels':
if actual_data is None:
actual_data = {}
@ -416,6 +418,7 @@ def _compare(actual, create_kwargs, defaults_from_image):
if data != actual_data:
ret.update({item: {'old': actual_data, 'new': data}})
continue
elif item == 'devices':
if data:
keys = ['PathOnHost', 'PathInContainer', 'CgroupPermissions']
@ -433,6 +436,35 @@ def _compare(actual, create_kwargs, defaults_from_image):
if data != actual_data:
ret.update({item: {'old': actual_data, 'new': data}})
continue
elif item == 'sysctls':
if actual_data is None:
actual_data = []
actual_sysctls = {}
for sysctl_var in actual_data:
try:
key, val = sysctl_var.split('=', 1)
except (AttributeError, ValueError):
log.warning(
'Unexpected sysctl variable in inspect '
'output {0}'.format(sysctl_var)
)
continue
else:
actual_sysctls[key] = val
log.trace('dockerng.running ({0}): munged actual value: {1}'
.format(item, actual_sysctls))
sysctls_diff = {}
for key in data:
actual_val = actual_sysctls.get(key)
if data[key] != actual_val:
sysctls_ptr = sysctls_diff.setdefault(item, {})
sysctls_ptr.setdefault('old', {})[key] = actual_val
sysctls_ptr.setdefault('new', {})[key] = data[key]
if sysctls_diff:
ret.update(sysctls_diff)
continue
elif item == 'security_opt':
if actual_data is None:
actual_data = []
@ -448,6 +480,7 @@ def _compare(actual, create_kwargs, defaults_from_image):
ret.update({item: {'old': actual_data,
'new': desired_data}})
continue
elif item in ('cmd', 'command', 'entrypoint'):
if (actual_data is None and item not in create_kwargs and
_image_get(config['image_path'])):
@ -511,6 +544,9 @@ def image_present(name,
insecure_registry=False,
client_timeout=CLIENT_TIMEOUT,
dockerfile=None,
sls=None,
base='opensuse/python',
saltenv='base',
**kwargs):
'''
Ensure that an image is present. The image can either be pulled from a
@ -546,7 +582,7 @@ def image_present(name,
- build: /home/myuser/docker/myimage
- dockerfile: Dockerfile.alternative
.. versionadded:: develop
.. versionadded:: 2016.11.0
The image will be built using :py:func:`docker.build
<salt.modules.docker.build>` and the specified image name and tag
@ -575,7 +611,33 @@ def image_present(name,
Allows for an alternative Dockerfile to be specified. Path to alternative
Dockefile is relative to the build path for the Docker container.
.. versionadded:: develop
.. versionadded:: 2016.11.0
sls
Allow for building images with ``dockerng.sls_build`` by specify the
sls files to build with. This can be a list or comma-seperated string.
.. code-block:: yaml
myuser/myimage:mytag:
dockerng.image_present:
- sls:
- webapp1
- webapp2
- base: centos
- saltenv: base
.. versionadded: Nitrogen
base
Base image with which to start ``dockerng.sls_build``
.. versionadded: Nitrogen
saltenv
environment from which to pull sls files for ``dockerng.sls_build``.
.. versionadded: Nitrogen
'''
ret = {'name': name,
'changes': {},
@ -605,7 +667,7 @@ def image_present(name,
else:
image_info = None
if build:
if build or sls:
action = 'built'
elif load:
action = 'loaded'
@ -632,6 +694,23 @@ def image_present(name,
if image_info is None or image_update['Id'] != image_info['Id'][:12]:
ret['changes'] = image_update
elif sls:
if isinstance(sls, list):
sls = ','.join(sls)
try:
image_update = __salt__['dockerng.sls_build'](name=image,
base=base,
mods=sls,
saltenv=saltenv)
except Exception as exc:
ret['comment'] = (
'Encountered error using sls {0} for building {1}: {2}'
.format(sls, image, exc)
)
return ret
if image_info is None or image_update['Id'] != image_info['Id'][:12]:
ret['changes'] = image_update
elif load:
try:
image_update = __salt__['docker.load'](path=load, image=image)
@ -1521,6 +1600,29 @@ def running(name,
This option requires Docker 1.5.0 or newer.
ulimits
List of ulimits. These limits should be passed in
the format ``<ulimit_name>:<soft_limit>:<hard_limit>``, with the hard
limit being optional.
.. code-block:: yaml
foo:
dockerng.running:
- image: bar/baz:latest
- ulimits: nofile=1024:1024,nproc=60
Ulimits can be passed as a YAML list instead of a comma-separated list:
.. code-block:: yaml
foo:
dockerng.running:
- image: bar/baz:latest
- ulimits:
- nofile=1024:1024
- nproc=60
labels
Add Metadata to the container. Can be a list of strings/dictionaries
or a dictionary of strings (keys and values).
@ -1600,6 +1702,34 @@ def running(name,
`Configure logging drivers`_ documentation for more information.
.. _`Configure logging drivers`: https://docs.docker.com/engine/admin/logging/overview/
sysctls
Either a list of variable/value mappings, or a list of strings in the
format ``VARNAME=value``. The below two examples are equivalent:
.. code-block:: yaml
foo:
docker.running:
- image: bar/baz:latest
- sysctls:
- VAR1: value
- VAR2: value
.. code-block:: yaml
foo:
docker.running:
- image: bar/baz:latest
- sysctls:
- VAR1=value
- VAR2=value
.. note::
Values must be strings. Otherwise it will be considered
as an error.
'''
ret = {'name': name,
'changes': {},

View File

@ -283,7 +283,8 @@ import salt.utils.files
import salt.utils.templates
import salt.utils.url
from salt.utils.locales import sdecode
from salt.exceptions import CommandExecutionError
from salt.exceptions import CommandExecutionError, SaltInvocationError
if salt.utils.is_windows():
import salt.utils.win_dacl
@ -1121,7 +1122,10 @@ def symlink(
mode
The permissions to set on this file, aka 644, 0775, 4664. Not supported
on Windows
on Windows.
The default mode for new files and directories corresponds umask of salt
process. For existing files and directories it's not enforced.
'''
name = os.path.expanduser(name)
@ -1446,7 +1450,7 @@ def managed(name,
will not be changed or managed.
If the file is hosted on a HTTP or FTP server then the source_hash
argument is also required
argument is also required.
A list of sources can also be passed in to provide a default source and
a set of fallbacks. The first source in the list that is found to exist
@ -1585,7 +1589,10 @@ def managed(name,
is running as on the minion On Windows, this is ignored
mode
The mode to set on this file, e.g. ``644``, ``0775``, or ``4664``.
The permissions to set on this file, e.g. ``644``, ``0775``, or ``4664``.
The default mode for new files and directories corresponds umask of salt
process. For existing files and directories it's not enforced.
.. note::
This option is **not** supported on Windows.
@ -1620,6 +1627,9 @@ def managed(name,
will be assigned permissions by adding the execute bit to the mode of
the files.
The default mode for new files and directories corresponds umask of salt
process. For existing files and directories it's not enforced.
replace : True
If set to ``False`` and the file already exists, the file will not be
modified even if changes would otherwise be made. Permissions and
@ -2433,11 +2443,17 @@ def directory(name,
dir_mode / mode
The permissions mode to set any directories created. Not supported on
Windows
Windows.
The default mode for new files and directories corresponds umask of salt
process. For existing files and directories it's not enforced.
file_mode
The permissions mode to set any files created if 'mode' is run in
'recurse'. This defaults to dir_mode. Not supported on Windows
'recurse'. This defaults to dir_mode. Not supported on Windows.
The default mode for new files and directories corresponds umask of salt
process. For existing files and directories it's not enforced.
makedirs
If the directory is located in a path without a parent directory, then
@ -2875,14 +2891,19 @@ def recurse(name,
salt is running as on the minion. On Windows, this is ignored
dir_mode
The mode to set on any directories created.
The permissions mode to set on any directories created.
The default mode for new files and directories corresponds umask of salt
process. For existing files and directories it's not enforced.
.. note::
This option is **not** supported on Windows.
file_mode
The mode to set on any files created.
Windows
The permissions mode to set on any files created.
The default mode for new files and directories corresponds umask of salt
process. For existing files and directories it's not enforced.
.. note::
This option is **not** supported on Windows.
@ -2896,7 +2917,10 @@ def recurse(name,
incompatible with the ``contents`` options.
sym_mode
The mode to set on any symlink created.
The permissions mode to set on any symlink created.
The default mode for new files and directories corresponds umask of salt
process. For existing files and directories it's not enforced.
.. note::
This option is **not** supported on Windows.
@ -3542,8 +3566,7 @@ def line(name, content=None, match=None, mode=None, location=None,
if not name:
return _error(ret, 'Must provide name to file.line')
if create and not os.path.isfile(name):
managed(name, create=create, user=user, group=group, mode=file_mode)
managed(name, create=create, user=user, group=group, mode=file_mode)
check_res, check_msg = _check_file(name)
if not check_res:
@ -3822,7 +3845,7 @@ def blockreplace(
will not be changed or managed.
If the file is hosted on a HTTP or FTP server then the source_hash
argument is also required
argument is also required.
A list of sources can also be passed in to provide a default source and
a set of fallbacks. The first source in the list that is found to exist
@ -3831,7 +3854,8 @@ def blockreplace(
.. code-block:: yaml
file_override_example:
file.managed:
file.blockreplace:
- name: /etc/example.conf
- source:
- salt://file_that_does_not_exist
- salt://file_that_exists
@ -3856,45 +3880,8 @@ def blockreplace(
sha1 40
md5 32
**Using a Source Hash File**
The file can contain several checksums for several files. Each line
must contain both the file name and the hash. If no file name is
matched, the first hash encountered will be used, otherwise the most
secure hash with the correct source file name will be used.
When using a source hash file the source_hash argument needs to be a
url, the standard download urls are supported, ftp, http, salt etc:
Example:
.. code-block:: yaml
tomdroid-src-0.7.3.tar.gz:
file.managed:
- name: /tmp/tomdroid-src-0.7.3.tar.gz
- source: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz
- source_hash: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.hash
The following is an example of the supported source_hash format:
.. code-block:: text
/etc/rc.conf ef6e82e4006dee563d98ada2a2a80a27
sha254c8525aee419eb649f0233be91c151178b30f0dff8ebbdcc8de71b1d5c8bcc06a /etc/resolv.conf
ead48423703509d37c4a90e6a0d53e143b6fc268
Debian file type ``*.dsc`` files are also supported.
**Inserting the Source Hash in the sls Data**
Examples:
.. code-block:: yaml
tomdroid-src-0.7.3.tar.gz:
file.managed:
- name: /tmp/tomdroid-src-0.7.3.tar.gz
- source: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz
- source_hash: md5=79eef25f9b0b2c642c62b7f737d4f53f
See the ``source_hash`` parameter description for :mod:`file.managed
<salt.states.file.managed>` function for more details and examples.
template
The named templating engine will be used to render the downloaded file.
@ -4349,34 +4336,8 @@ def append(name,
sha1 40
md5 32
The file can contain several checksums for several files. Each line
must contain both the file name and the hash. If no file name is
matched, the first hash encountered will be used, otherwise the most
secure hash with the correct source file name will be used.
Debian file type ``*.dsc`` is supported.
Examples:
.. code-block:: text
/etc/rc.conf ef6e82e4006dee563d98ada2a2a80a27
sha254c8525aee419eb649f0233be91c151178b30f0dff8ebbdcc8de71b1d5c8bcc06a /etc/resolv.conf
ead48423703509d37c4a90e6a0d53e143b6fc268
Known issues:
If the remote server URL has the hash file as an apparent
sub-directory of the source file, the module will discover that it
has already cached a directory where a file should be cached. For
example:
.. code-block:: yaml
tomdroid-src-0.7.3.tar.gz:
file.managed:
- name: /tmp/tomdroid-src-0.7.3.tar.gz
- source: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz
- source_hash: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz/+md5
See the ``source_hash`` parameter description for :mod:`file.managed
<salt.states.file.managed>` function for more details and examples.
template
The named templating engine will be used to render the appended-to file.
@ -4790,7 +4751,6 @@ def prepend(name,
def patch(name,
source=None,
hash=None,
options='',
dry_run_first=True,
**kwargs):
@ -4812,11 +4772,13 @@ def patch(name,
salt://spam/eggs. A source is required.
hash
Hash of the patched file. If the hash of the target file matches this
value then the patch is assumed to have been applied. The hash string
is as string in the form <hash_type>:<hash_value>. For example:
md5:e138491e9d5b97023cea823fe17bac22. For more informations, check the
:mod:`file.check_hash <salt.modules.file.check_hash>` module.
The hash of the patched file. If the hash of the target file matches
this value then the patch is assumed to have been applied. For versions
2016.11.4 and newer, the hash can be specified without an accompanying
hash type (e.g. ``e138491e9d5b97023cea823fe17bac22``), but for earlier
releases it is necessary to also specify the hash type in the format
``<hash_type>:<hash_value>`` (e.g.
``md5:e138491e9d5b97023cea823fe17bac22``).
options
Extra options to pass to patch.
@ -4829,7 +4791,7 @@ def patch(name,
by the ``source`` parameter. If not provided, this defaults to the
environment from which the state is being executed.
Usage:
**Usage:**
.. code-block:: yaml
@ -4837,8 +4799,15 @@ def patch(name,
/opt/file.txt:
file.patch:
- source: salt://file.patch
- hash: md5:e138491e9d5b97023cea823fe17bac22
- hash: e138491e9d5b97023cea823fe17bac22
.. note::
For minions running version 2016.11.3 or older, the hash in the example
above would need to be specified with the hash type (i.e.
``md5:e138491e9d5b97023cea823fe17bac22``).
'''
hash_ = kwargs.pop('hash', None)
if 'env' in kwargs:
salt.utils.warn_until(
'Oxygen',
@ -4858,11 +4827,16 @@ def patch(name,
return _error(ret, check_msg)
if not source:
return _error(ret, 'Source is required')
if hash is None:
if hash_ is None:
return _error(ret, 'Hash is required')
if hash and __salt__['file.check_hash'](name, hash):
ret.update(result=True, comment='Patch is already applied')
try:
if hash_ and __salt__['file.check_hash'](name, hash_):
ret['result'] = True
ret['comment'] = 'Patch is already applied'
return ret
except (SaltInvocationError, ValueError) as exc:
ret['comment'] = exc.__str__()
return ret
# get cached file or copy it to cache
@ -4873,9 +4847,8 @@ def patch(name,
return ret
log.debug(
'State patch.applied cached source {0} -> {1}'.format(
source, cached_source_path
)
'State patch.applied cached source %s -> %s',
source, cached_source_path
)
if dry_run_first or __opts__['test']:
@ -4886,20 +4859,18 @@ def patch(name,
ret['comment'] = 'File {0} will be patched'.format(name)
ret['result'] = None
return ret
if ret['changes']['retcode']:
if ret['changes']['retcode'] != 0:
return ret
ret['changes'] = __salt__['file.patch'](
name, cached_source_path, options=options
)
ret['result'] = not ret['changes']['retcode']
if ret['result'] and hash and not __salt__['file.check_hash'](name, hash):
ret.update(
result=False,
comment='File {0} hash mismatch after patch was applied'.format(
name
)
)
ret['result'] = ret['changes']['retcode'] == 0
# No need to check for SaltInvocationError or ValueError this time, since
# these exceptions would have been caught above.
if ret['result'] and hash_ and not __salt__['file.check_hash'](name, hash_):
ret['result'] = False
ret['comment'] = 'Hash mismatch after patch was applied'
return ret
@ -5030,7 +5001,10 @@ def copy(
The permissions to set on the copied file, aka 644, '0775', '4664'.
If ``preserve`` is set to ``True``, then this will be ignored.
Not supported on Windows
Not supported on Windows.
The default mode for new files and directories corresponds umask of salt
process. For existing files and directories it's not enforced.
subdir
.. versionadded:: 2015.5.0
@ -5410,6 +5384,9 @@ def serialize(name,
The permissions to set on this file, e.g. ``644``, ``0775``, or
``4664``.
The default mode for new files and directories corresponds umask of salt
process. For existing files and directories it's not enforced.
.. note::
This option is **not** supported on Windows.

114
salt/states/loop.py Normal file
View File

@ -0,0 +1,114 @@
# -*- coding: utf-8 -*-
'''
Loop state
Allows for looping over execution modules.
.. versionadded:: Nitrogen
.. code-block:: yaml
wait_for_service_to_be_healthy:
loop.until:
- name: boto_elb.get_instance_health
- condition: m_ret[0]['state'] == 'InService'
- period: 5
- timeout: 20
- m_args:
- {{ elb }}
- m_kwargs:
keyid: {{ access_key }}
key: {{ secret_key }}
instances: "{{ instance }}"
.. warning::
This state allows arbitrary python code to be executed through the condition
parameter which is literally evaluated within the state. Please use caution.
'''
# Import python libs
from __future__ import absolute_import
import logging
import time
# Initialize logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'loop'
def __virtual__():
return True
def until(name,
m_args=None,
m_kwargs=None,
condition=None,
period=0,
timeout=604800):
'''
Loop over an execution module until a condition is met.
name
The name of the execution module
m_args
The execution module's positional arguments
m_kwargs
The execution module's keyword arguments
condition
The condition which must be met for the loop to break. This
should contain ``m_ret`` which is the return from the execution
module.
period
The number of seconds to wait between executions
timeout
The timeout in seconds
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
if name not in __salt__:
ret['comment'] = 'Cannot find module {0}'.format(name)
return ret
if condition is None:
ret['comment'] = 'An exit condition must be specified'
return ret
if not isinstance(period, int):
ret['comment'] = 'Period must be specified as an integer in seconds'
return ret
if not isinstance(timeout, int):
ret['comment'] = 'Timeout must be specified as an integer in seconds'
return ret
if __opts__['test']:
ret['comment'] = 'The execution module {0} will be run'.format(name)
ret['result'] = None
return ret
def timed_out():
if time.time() >= timeout:
return True
return False
timeout = time.time() + timeout
while not timed_out():
m_ret = __salt__[name](*m_args, **m_kwargs)
if eval(condition): # pylint: disable=W0123
ret['result'] = True
ret['comment'] = 'Condition {0} was met'.format(condition)
return ret
time.sleep(period)
ret['comment'] = 'Timed out while waiting for condition {0}'.format(condition)
return ret

View File

@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
'''
r'''
Execution of Salt modules from within states
============================================
@ -91,6 +91,23 @@ arguments. For example:
delvol_on_destroy: 'True'
}
Another example that creates a recurring task that runs a batch file on a
Windows system:
.. code-block:: yaml
eventsviewer:
module.run:
- name: task.create_task
- m_name: 'events-viewer'
- user_name: System
- kwargs: {
action_type: 'Execute',
cmd: 'c:\netops\scripts\events_viewer.bat',
trigger_type: 'Daily',
start_date: '2017-1-20',
start_time: '11:59PM'
}
'''
from __future__ import absolute_import

View File

@ -64,6 +64,7 @@ import time
# Import Salt libs
import salt.utils
from salt.utils.args import get_function_argspec as _argspec
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
@ -293,7 +294,12 @@ def _available(name, ret):
return avail
def running(name, enable=None, sig=None, init_delay=None, **kwargs):
def running(name,
enable=None,
sig=None,
init_delay=None,
no_block=False,
**kwargs):
'''
Ensure that the service is running
@ -316,6 +322,11 @@ def running(name, enable=None, sig=None, init_delay=None, **kwargs):
for requisite states wherein a dependent state might assume a service
has started but is not yet fully initialized.
no_block : False
**For systemd minions only.** Starts the service using ``--no-block``.
.. versionadded:: Nitrogen
.. note::
``watch`` can be used with service.running to restart a service when
another state changes ( example: a file.managed state that creates the
@ -371,7 +382,16 @@ def running(name, enable=None, sig=None, init_delay=None, **kwargs):
if enable is True:
ret.update(_enable(name, False, result=False, **kwargs))
func_ret = __salt__['service.start'](name)
start_kwargs = {}
if no_block:
if 'no_block' in _argspec(__salt__['service.start']).args:
start_kwargs = {'no_block': no_block}
else:
ret.setdefault('warnings', []).append(
'The \'no_block\' argument is not supported on this platform.'
)
func_ret = __salt__['service.start'](name, **start_kwargs)
if not func_ret:
ret['result'] = False
@ -416,7 +436,12 @@ def running(name, enable=None, sig=None, init_delay=None, **kwargs):
return ret
def dead(name, enable=None, sig=None, init_delay=None, **kwargs):
def dead(name,
enable=None,
sig=None,
init_delay=None,
no_block=False,
**kwargs):
'''
Ensure that the named service is dead by stopping the service if it is running
@ -434,6 +459,12 @@ def dead(name, enable=None, sig=None, init_delay=None, **kwargs):
init_delay
Add a sleep command (in seconds) before the check to make sure service
is killed.
.. versionadded:: Nitrogen
no_block : False
**For systemd minions only.** Stops the service using ``--no-block``.
.. versionadded:: Nitrogen
'''
ret = {'name': name,
@ -484,7 +515,16 @@ def dead(name, enable=None, sig=None, init_delay=None, **kwargs):
ret['comment'] = 'Service {0} is set to be killed'.format(name)
return ret
func_ret = __salt__['service.stop'](name)
stop_kwargs = {}
if no_block:
if 'no_block' in _argspec(__salt__['service.stop']).args:
stop_kwargs = {'no_block': no_block}
else:
ret.setdefault('warnings', []).append(
'The \'no_block\' argument is not supported on this platform.'
)
func_ret = __salt__['service.stop'](name, **stop_kwargs)
if not func_ret:
ret['result'] = False
ret['comment'] = 'Service {0} failed to die'.format(name)

View File

@ -25,6 +25,20 @@ import os
# Import salt libs
from salt.exceptions import CommandNotFoundError
import salt.utils
# Define the state's virtual name
__virtualname__ = 'ssh_known_hosts'
def __virtual__():
'''
Does not work on Windows, requires ssh module functions
'''
if salt.utils.is_windows():
return False, 'ssh_known_hosts: Does not support Windows'
return __virtualname__
def present(

View File

@ -497,6 +497,9 @@ def present(name,
if key == 'passwd' and not empty_password:
__salt__['shadow.set_password'](name, password)
continue
if key == 'passwd' and empty_password:
log.warn("No password will be set when empty_password=True")
continue
if key == 'date':
__salt__['shadow.set_date'](name, date)
continue

View File

@ -36,7 +36,8 @@ except ImportError:
'SRV_ROOT_DIR', 'BASE_FILE_ROOTS_DIR',
'BASE_PILLAR_ROOTS_DIR', 'BASE_THORIUM_ROOTS_DIR',
'BASE_MASTER_ROOTS_DIR', 'LOGS_DIR', 'PIDFILE_DIR',
'SPM_FORMULA_PATH', 'SPM_PILLAR_PATH', 'SPM_REACTOR_PATH'):
'SPM_FORMULA_PATH', 'SPM_PILLAR_PATH', 'SPM_REACTOR_PATH',
'SHARE_DIR'):
setattr(__generated_syspaths, key, None)
@ -74,6 +75,19 @@ if CONFIG_DIR is None:
else:
CONFIG_DIR = os.path.join(ROOT_DIR, 'etc', 'salt')
SHARE_DIR = __generated_syspaths.SHARE_DIR
if SHARE_DIR is None:
if __PLATFORM.startswith('win'):
SHARE_DIR = os.path.join(ROOT_DIR, 'share')
elif 'freebsd' in __PLATFORM:
SHARE_DIR = os.path.join(ROOT_DIR, 'usr', 'local', 'share', 'salt')
elif 'netbsd' in __PLATFORM:
SHARE_DIR = os.path.join(ROOT_DIR, 'usr', 'share', 'salt')
elif 'sunos5' in __PLATFORM:
SHARE_DIR = os.path.join(ROOT_DIR, 'usr', 'share', 'salt')
else:
SHARE_DIR = os.path.join(ROOT_DIR, 'usr', 'share', 'salt')
CACHE_DIR = __generated_syspaths.CACHE_DIR
if CACHE_DIR is None:
CACHE_DIR = os.path.join(ROOT_DIR, 'var', 'cache', 'salt')
@ -125,6 +139,7 @@ if SPM_REACTOR_PATH is None:
__all__ = [
'ROOT_DIR',
'SHARE_DIR',
'CONFIG_DIR',
'CACHE_DIR',
'SOCK_DIR',

View File

@ -243,6 +243,7 @@ class AESReqServerMixin(object):
fp_.write(load['pub'])
eload = {'result': False,
'id': load['id'],
'act': 'denied',
'pub': load['pub']}
self.event.fire_event(eload, salt.utils.event.tagify(prefix='auth'))
return {'enc': 'clear',
@ -331,6 +332,7 @@ class AESReqServerMixin(object):
fp_.write(load['pub'])
eload = {'result': False,
'id': load['id'],
'act': 'denied',
'pub': load['pub']}
self.event.fire_event(eload, salt.utils.event.tagify(prefix='auth'))
return {'enc': 'clear',

View File

@ -351,7 +351,7 @@ class AsyncZeroMQPubChannel(salt.transport.mixins.auth.AESPubClientMixin, salt.t
zmq.RECONNECT_IVL_MAX, self.opts['recon_max']
)
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
if (self.opts['ipv6'] is True or ':' in self.opts['master_ip']) and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self._socket.setsockopt(zmq.IPV4ONLY, 0)

View File

@ -751,10 +751,11 @@ def ip_bracket(addr):
return addr
def dns_check(addr, safe=False, ipv6=False):
def dns_check(addr, port, safe=False, ipv6=None, connect=True):
'''
Return the ip resolved by dns, but do not exit on failure, only raise an
exception. Obeys system preference for IPv4/6 address resolution.
Tries to connect to the address before considering it useful.
'''
error = False
lookup = addr
@ -769,18 +770,30 @@ def dns_check(addr, safe=False, ipv6=False):
if not hostnames:
error = True
else:
addr = False
resolved = False
for h in hostnames:
if h[0] == socket.AF_INET:
addr = ip_bracket(h[4][0])
if h[0] == socket.AF_INET and ipv6 is True:
continue
if h[0] == socket.AF_INET6 and ipv6 is False:
continue
if h[0] == socket.AF_INET6 and connect is False and ipv6 is None:
continue
candidate_addr = ip_bracket(h[4][0])
if not connect:
resolved = candidate_addr
s = socket.socket(h[0], socket.SOCK_STREAM)
try:
s.connect((candidate_addr.strip('[]'), port))
s.close()
resolved = candidate_addr
break
elif h[0] == socket.AF_INET6:
if not ipv6:
seen_ipv6 = True
continue
addr = ip_bracket(h[4][0])
break
if not addr:
except socket.error:
pass
if not resolved:
error = True
except TypeError:
err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup)
@ -789,10 +802,7 @@ def dns_check(addr, safe=False, ipv6=False):
error = True
if error:
if seen_ipv6 and not addr:
err = ('DNS lookup of \'{0}\' failed, but ipv6 address ignored. Enable ipv6 in config to use it.').format(lookup)
else:
err = ('DNS lookup of \'{0}\' failed.').format(lookup)
err = ('DNS lookup or connection check of \'{0}\' failed.').format(addr)
if safe:
if salt.log.is_console_configured():
# If logging is not configured it also means that either
@ -801,7 +811,7 @@ def dns_check(addr, safe=False, ipv6=False):
log.error(err)
raise SaltClientError()
raise SaltSystemExit(code=42, msg=err)
return addr
return resolved
def required_module_list(docstring=None):

View File

@ -66,12 +66,15 @@ class ContextDict(collections.MutableMapping):
then allow any children to override the values of the parent.
'''
def __init__(self, **data):
def __init__(self, threadsafe=False, **data):
# state should be thread local, so this object can be threadsafe
self._state = threading.local()
# variable for the overridden data
self._state.data = None
self.global_data = {}
# Threadsafety indicates whether or not we should protect data stored
# in child context dicts from being leaked
self._threadsafe = threadsafe
@property
def active(self):
@ -89,7 +92,7 @@ class ContextDict(collections.MutableMapping):
'''
Clone this context, and return the ChildContextDict
'''
child = ChildContextDict(parent=self, overrides=kwargs)
child = ChildContextDict(parent=self, threadsafe=self._threadsafe, overrides=kwargs)
return child
def __setitem__(self, key, val):
@ -127,19 +130,24 @@ class ChildContextDict(collections.MutableMapping):
'''An overrideable child of ContextDict
'''
def __init__(self, parent, overrides=None):
def __init__(self, parent, overrides=None, threadsafe=False):
self.parent = parent
self._data = {} if overrides is None else overrides
self._old_data = None
# merge self.global_data into self._data
for k, v in six.iteritems(self.parent.global_data):
if k not in self._data:
# A deepcopy is necessary to avoid using the same
# objects in globals as we do in thread local storage.
# Otherwise, changing one would automatically affect
# the other.
self._data[k] = copy.deepcopy(v)
if threadsafe:
for k, v in six.iteritems(self.parent.global_data):
if k not in self._data:
# A deepcopy is necessary to avoid using the same
# objects in globals as we do in thread local storage.
# Otherwise, changing one would automatically affect
# the other.
self._data[k] = copy.deepcopy(v)
else:
for k, v in six.iteritems(self.parent.global_data):
if k not in self._data:
self._data[k] = v
def __setitem__(self, key, val):
self._data[key] = val

View File

@ -588,3 +588,21 @@ class _WithDeprecated(_DeprecationDecorator):
with_deprecated = _WithDeprecated
def ignores_kwargs(*kwarg_names):
'''
Decorator to filter out unexpected keyword arguments from the call
kwarg_names:
List of argument names to ignore
'''
def _ignores_kwargs(fn):
def __ignores_kwargs(*args, **kwargs):
kwargs_filtered = kwargs.copy()
for name in kwarg_names:
if name in kwargs_filtered:
del kwargs_filtered[name]
return fn(*args, **kwargs_filtered)
return __ignores_kwargs
return _ignores_kwargs

View File

@ -24,6 +24,7 @@ import salt.utils
import salt.utils.itertools
import salt.utils.url
import salt.fileserver
from salt.utils.odict import OrderedDict
from salt.utils.process import os_is_running as pid_exists
from salt.exceptions import (
FileserverConfigError,
@ -85,13 +86,15 @@ try:
GitError = pygit2.errors.GitError
except AttributeError:
GitError = Exception
except Exception as err: # cffi VerificationError also may happen
HAS_PYGIT2 = False # and pygit2 requrests re-compilation
# on a production system (!),
# but cffi might be absent as well!
# Therefore just a generic Exception class.
if not isinstance(err, ImportError):
log.error('Import pygit2 failed: %s', err)
except Exception as exc:
# Exceptions other than ImportError can be raised in cases where there is a
# problem with cffi (such as when python-cffi is upgraded and pygit2 tries
# to rebuild itself against the newer cffi). Therefore, we simply will
# catch a generic exception, and log the exception if it is anything other
# than an ImportError.
HAS_PYGIT2 = False
if not isinstance(exc, ImportError):
log.exception('Failed to import pygit2')
# pylint: enable=import-error
@ -243,10 +246,10 @@ class GitProvider(object):
else:
msg = (
'Invalid {0} configuration parameter \'{1}\' in '
'remote {2}. Valid parameters are: {3}.'.format(
'remote \'{2}\'. Valid parameters are: {3}.'.format(
self.role,
param,
self.url,
self.id,
', '.join(valid_per_remote_params)
)
)
@ -314,11 +317,8 @@ class GitProvider(object):
if not isinstance(self.url, six.string_types):
log.critical(
'Invalid {0} remote \'{1}\'. Remotes must be strings, you '
'may need to enclose the URL in quotes'.format(
self.role,
self.id
)
'Invalid %s remote \'%s\'. Remotes must be strings, you '
'may need to enclose the URL in quotes', self.role, self.id
)
failhard(self.role)
@ -330,6 +330,15 @@ class GitProvider(object):
self.hash = hash_type(self.id).hexdigest()
self.cachedir_basename = getattr(self, 'name', self.hash)
self.cachedir = salt.utils.path_join(cache_root, self.cachedir_basename)
self.linkdir = salt.utils.path_join(cache_root,
'links',
self.cachedir_basename)
try:
# Remove linkdir if it exists
salt.utils.rm_rf(self.linkdir)
except OSError:
pass
if not os.path.isdir(self.cachedir):
os.makedirs(self.cachedir)
@ -340,7 +349,7 @@ class GitProvider(object):
'{2}'.format(self.role, self.id, exc))
if isinstance(self, GitPython):
msg += ' Perhaps git is not available.'
log.critical(msg, exc_info_on_loglevel=logging.DEBUG)
log.critical(msg, exc_info=True)
failhard(self.role)
def _get_envs_from_ref_paths(self, refs):
@ -665,7 +674,7 @@ class GitProvider(object):
self._get_lock_file(lock_type),
exc
)
log.error(msg, exc_info_on_loglevel=logging.DEBUG)
log.error(msg, exc_info=True)
raise GitLockError(exc.errno, msg)
msg = 'Set {0} lock for {1} remote \'{2}\''.format(
lock_type,
@ -1255,7 +1264,7 @@ class Pygit2(GitProvider):
log.error(
'pygit2 was unable to get SHA for %s in %s remote '
'\'%s\'', local_ref, self.role, self.id,
exc_info_on_loglevel=logging.DEBUG
exc_info=True
)
return None
@ -1333,7 +1342,7 @@ class Pygit2(GitProvider):
'Unable to resolve %s from %s remote \'%s\' '
'to either an annotated or non-annotated tag',
tag_ref, self.role, self.id,
exc_info_on_loglevel=logging.DEBUG
exc_info=True
)
return None
@ -1349,7 +1358,7 @@ class Pygit2(GitProvider):
log.error(
'Failed to checkout %s from %s remote \'%s\': %s',
tgt_ref, self.role, self.id, exc,
exc_info_on_loglevel=logging.DEBUG
exc_info=True
)
return None
log.error(
@ -1506,19 +1515,19 @@ class Pygit2(GitProvider):
'You may need to add ssh:// to the repo string or '
'libgit2 must be compiled with libssh2 to support '
'SSH authentication.', self.role, self.id,
exc_info_on_loglevel=logging.DEBUG
exc_info=True
)
elif 'authentication required but no callback set' in exc_str:
log.error(
'%s remote \'%s\' requires authentication, but no '
'authentication configured', self.role, self.id,
exc_info_on_loglevel=logging.DEBUG
exc_info=True
)
else:
log.error(
'Error occurred fetching %s remote \'%s\': %s',
self.role, self.id, exc,
exc_info_on_loglevel=logging.DEBUG
exc_info=True
)
return False
try:
@ -1800,13 +1809,21 @@ class GitBase(object):
Base class for gitfs/git_pillar
'''
def __init__(self, opts, valid_providers=VALID_PROVIDERS, cache_root=None):
'''
IMPORTANT: If specifying a cache_root, understand that this is also
where the remotes will be cloned. A non-default cache_root is only
really designed right now for winrepo, as its repos need to be checked
out into the winrepo locations and not within the cachedir.
'''
self.opts = opts
self.valid_providers = valid_providers
self.get_provider()
if cache_root is not None:
self.cache_root = cache_root
self.cache_root = self.remote_root = cache_root
else:
self.cache_root = salt.utils.path_join(self.opts['cachedir'], self.role)
self.cache_root = salt.utils.path_join(self.opts['cachedir'],
self.role)
self.remote_root = salt.utils.path_join(self.cache_root, 'remotes')
self.env_cache = salt.utils.path_join(self.cache_root, 'envs.p')
self.hash_cachedir = salt.utils.path_join(
self.cache_root, 'hash')
@ -2035,7 +2052,7 @@ class GitBase(object):
log.error(
'Exception caught while fetching %s remote \'%s\': %s',
self.role, repo.id, exc,
exc_info_on_loglevel=logging.DEBUG
exc_info=True
)
return changed
@ -2317,7 +2334,7 @@ class GitBase(object):
repo.role,
repo.id,
exc,
exc_info_on_loglevel=logging.DEBUG
exc_info=True
)
break
else:
@ -2641,7 +2658,8 @@ class GitPillar(GitBase):
'''
Checkout the targeted branches/tags from the git_pillar remotes
'''
self.pillar_dirs = {}
self.pillar_dirs = OrderedDict()
self.pillar_linked_dirs = []
for repo in self.remotes:
cachedir = self.do_checkout(repo)
if cachedir is not None:
@ -2651,7 +2669,71 @@ class GitPillar(GitBase):
else:
base_branch = self.opts['{0}_base'.format(self.role)]
env = 'base' if repo.branch == base_branch else repo.branch
self.pillar_dirs[cachedir] = env
if repo._mountpoint:
if self.link_mountpoint(repo, cachedir):
self.pillar_dirs[repo.linkdir] = env
self.pillar_linked_dirs.append(repo.linkdir)
else:
self.pillar_dirs[cachedir] = env
def link_mountpoint(self, repo, cachedir):
'''
Ensure that the mountpoint is linked to the passed cachedir
'''
lcachelink = salt.utils.path_join(repo.linkdir, repo._mountpoint)
if not os.path.islink(lcachelink):
ldirname = os.path.dirname(lcachelink)
try:
os.symlink(cachedir, lcachelink)
except OSError as exc:
if exc.errno == errno.ENOENT:
# The parent dir does not exist, create it and then
# re-attempt to create the symlink
try:
os.makedirs(ldirname)
except OSError as exc:
log.error(
'Failed to create path %s: %s',
ldirname, exc.__str__()
)
return False
else:
try:
os.symlink(cachedir, lcachelink)
except OSError:
log.error(
'Could not create symlink to %s at path %s: %s',
cachedir, lcachelink, exc.__str__()
)
return False
elif exc.errno == errno.EEXIST:
# A file or dir already exists at this path, remove it and
# then re-attempt to create the symlink
try:
salt.utils.rm_rf(lcachelink)
except OSError as exc:
log.error(
'Failed to remove file/dir at path %s: %s',
lcachelink, exc.__str__()
)
return False
else:
try:
os.symlink(cachedir, lcachelink)
except OSError:
log.error(
'Could not create symlink to %s at path %s: %s',
cachedir, lcachelink, exc.__str__()
)
return False
else:
# Other kind of error encountered
log.error(
'Could not create symlink to %s at path %s: %s',
cachedir, lcachelink, exc.__str__()
)
return False
return True
def update(self):
'''

View File

@ -762,7 +762,11 @@ def _render(template, render, renderer, template_dict, opts):
rend = salt.loader.render(opts, {})
blacklist = opts.get('renderer_blacklist')
whitelist = opts.get('renderer_whitelist')
return compile_template(template, rend, renderer, blacklist, whitelist, **template_dict)
ret = compile_template(template, rend, renderer, blacklist, whitelist, **template_dict)
ret = ret.read()
if str(ret).startswith('#!') and not str(ret).startswith('#!/'):
ret = str(ret).split('\n', 1)[1]
return ret
with salt.utils.fopen(template, 'r') as fh_:
return fh_.read()

View File

@ -146,6 +146,14 @@ class MasterPillarUtil(object):
if not salt.utils.verify.valid_id(self.opts, minion_id):
continue
mdata = self.cache.fetch('minions/{0}'.format(minion_id), 'data')
if not isinstance(mdata, dict):
log.warning(
'cache.fetch should always return a dict. ReturnedType: {0}, MinionId: {1}'.format(
type(mdata).__name__,
minion_id
)
)
continue
if 'grains' in mdata:
grains[minion_id] = mdata['grains']
if 'pillar' in mdata:

View File

@ -2992,6 +2992,11 @@ class SaltSSHOptionParser(six.with_metaclass(OptionParserMeta,
if self.options.ssh_askpass:
self.options.ssh_passwd = getpass.getpass('Password: ')
for group in self.option_groups:
for option in group.option_list:
if option.dest == 'ssh_passwd':
option.explicit = True
break
def setup_config(self):
return config.master_config(self.get_config_file_path())

View File

@ -916,6 +916,26 @@ class Schedule(object):
'''
Evaluate and execute the schedule
'''
def _splay(splaytime):
'''
Calculate splaytime
'''
splay_ = None
if isinstance(splaytime, dict):
if splaytime['end'] >= splaytime['start']:
splay_ = random.randint(splaytime['start'],
splaytime['end'])
else:
log.error('schedule.handle_func: Invalid Splay, '
'end must be larger than start. Ignoring splay.')
else:
splay_ = random.randint(1, splaytime)
if splay_:
log.debug('schedule.handle_func: Adding splay of '
'{0} seconds to next run.'.format(splay_))
return splay_
schedule = self.option('schedule')
if not isinstance(schedule, dict):
raise ValueError('Schedule must be of type dict.')
@ -946,10 +966,18 @@ class Schedule(object):
)
if 'name' not in data:
data['name'] = job
# Add up how many seconds between now and then
when = 0
seconds = 0
cron = 0
if '_next_fire_time' not in data:
data['_next_fire_time'] = None
if '_splay' not in data:
data['_splay'] = None
if 'run_on_start' in data and \
data['run_on_start'] and \
'_run_on_start' not in data:
data['_run_on_start'] = True
now = int(time.time())
if 'until' in data:
@ -1005,11 +1033,20 @@ class Schedule(object):
continue
if True in [True for item in time_elements if item in data]:
# Add up how many seconds between now and then
seconds += int(data.get('seconds', 0))
seconds += int(data.get('minutes', 0)) * 60
seconds += int(data.get('hours', 0)) * 3600
seconds += int(data.get('days', 0)) * 86400
if '_seconds' not in data:
interval = int(data.get('seconds', 0))
interval += int(data.get('minutes', 0)) * 60
interval += int(data.get('hours', 0)) * 3600
interval += int(data.get('days', 0)) * 86400
data['_seconds'] = interval
if not data['_next_fire_time']:
data['_next_fire_time'] = now + data['_seconds']
if interval < self.loop_interval:
self.loop_interval = interval
elif 'once' in data:
once_fmt = data.get('once_fmt', '%Y-%m-%dT%H:%M:%S')
@ -1021,10 +1058,8 @@ class Schedule(object):
data['once'], once_fmt)
continue
if now != once:
continue
else:
seconds = 1
if not data['_next_fire_time']:
data['_next_fire_time'] = once
elif 'when' in data:
if not _WHEN_SUPPORTED:
@ -1071,35 +1106,18 @@ class Schedule(object):
when = int(time.mktime(when__.timetuple()))
if when >= now:
_when.append(when)
if data['_splay']:
_when.append(data['_splay'])
_when.sort()
if _when:
# Grab the first element
# which is the next run time
when = _when[0]
# If we're switching to the next run in a list
# ensure the job can run
if '_when' in data and data['_when'] != when:
data['_when_run'] = True
data['_when'] = when
seconds = when - now
# scheduled time is in the past and the run was not triggered before
if seconds < 0 and not data.get('_when_run', False):
continue
if '_when_run' not in data:
data['_when_run'] = True
# Backup the run time
if '_when' not in data:
data['_when'] = when
# A new 'when' ensure _when_run is True
if when > data['_when']:
data['_when'] = when
data['_when_run'] = True
if not data['_next_fire_time'] or \
now > data['_next_fire_time']:
data['_next_fire_time'] = when
else:
continue
@ -1134,91 +1152,44 @@ class Schedule(object):
log.error('Invalid date string. Ignoring')
continue
when = int(time.mktime(when__.timetuple()))
now = int(time.time())
seconds = when - now
# scheduled time is in the past and the run was not triggered before
if seconds < 0 and not data.get('_when_run', False):
continue
if '_when_run' not in data:
data['_when_run'] = True
# Backup the run time
if '_when' not in data:
data['_when'] = when
# A new 'when' ensure _when_run is True
if when > data['_when']:
data['_when'] = when
data['_when_run'] = True
if not data['_next_fire_time'] or \
now > data['_next_fire_time']:
data['_next_fire_time'] = when
elif 'cron' in data:
if not _CRON_SUPPORTED:
log.error('Missing python-croniter. Ignoring job {0}'.format(job))
continue
now = int(time.mktime(datetime.datetime.now().timetuple()))
try:
cron = int(croniter.croniter(data['cron'], now).get_next())
except (ValueError, KeyError):
log.error('Invalid cron string. Ignoring')
continue
seconds = cron - now
if not data['_next_fire_time'] or \
now > data['_next_fire_time']:
try:
data['_next_fire_time'] = int(
croniter.croniter(data['cron'], now).get_next())
except (ValueError, KeyError):
log.error('Invalid cron string. Ignoring')
continue
else:
continue
# Check if the seconds variable is lower than current lowest
# loop interval needed. If it is lower than overwrite variable
# external loops using can then check this variable for how often
# they need to reschedule themselves
# Not used with 'when' parameter, causes run away jobs and CPU
# spikes.
if 'when' not in data:
if seconds < self.loop_interval:
self.loop_interval = seconds
run = False
seconds = data['_next_fire_time'] - now
if data['_splay']:
seconds = data['_splay'] - now
if -1 < seconds <= 0:
run = True
if 'splay' in data:
if 'when' in data:
log.error('Unable to use "splay" with "when" option at this time. Ignoring.')
elif 'cron' in data:
log.error('Unable to use "splay" with "cron" option at this time. Ignoring.')
else:
if '_seconds' not in data:
log.debug('The _seconds parameter is missing, '
'most likely the first run or the schedule '
'has been refreshed refresh.')
if 'seconds' in data:
data['_seconds'] = data['seconds']
else:
data['_seconds'] = 0
if '_run_on_start' in data and data['_run_on_start']:
run = True
data['_run_on_start'] = False
elif run:
if 'splay' in data and not data['_splay']:
run = False
splay = _splay(data['splay'])
data['_splay'] = data['_next_fire_time'] + splay
if 'when' in data:
# scheduled time is now or in the past, and the run was triggered before
if seconds <= 0 and data['_when_run']:
data['_when_run'] = False
run = True
elif 'cron' in data:
if seconds == 1:
run = True
else:
if job in self.intervals:
if now - self.intervals[job] >= seconds:
run = True
else:
# If run_on_start is True, the job will run when the Salt
# minion start. If the value is False will run at the next
# scheduled run. Default is True.
if 'run_on_start' in data:
if data['run_on_start']:
run = True
else:
self.intervals[job] = int(time.time())
else:
run = True
if run:
if 'range' in data:
if not _RANGE_SUPPORTED:
log.error('Missing python-dateutil. Ignoring job {0}'.format(job))
@ -1242,7 +1213,7 @@ class Schedule(object):
else:
run = False
else:
if now >= start and now <= end:
if start <= now <= end:
run = True
else:
run = False
@ -1257,30 +1228,8 @@ class Schedule(object):
if not run:
continue
else:
if 'splay' in data:
if 'when' in data:
log.error('Unable to use "splay" with "when" option at this time. Ignoring.')
else:
if isinstance(data['splay'], dict):
if data['splay']['end'] >= data['splay']['start']:
splay = random.randint(data['splay']['start'], data['splay']['end'])
else:
log.error('schedule.handle_func: Invalid Splay, end must be larger than start. \
Ignoring splay.')
splay = None
else:
splay = random.randint(0, data['splay'])
if splay:
log.debug('schedule.handle_func: Adding splay of '
'{0} seconds to next run.'.format(splay))
if 'seconds' in data:
data['seconds'] = data['_seconds'] + splay
else:
data['seconds'] = 0 + splay
log.info('Running scheduled job: {0}'.format(job))
log.info('Running scheduled job: {0}'.format(job))
if 'jid_include' not in data or data['jid_include']:
data['jid_include'] = True
@ -1322,7 +1271,9 @@ class Schedule(object):
if multiprocessing_enabled:
proc.join()
finally:
self.intervals[job] = now
if '_seconds' in data:
data['_next_fire_time'] = now + data['_seconds']
data['_splay'] = None
if salt.utils.is_windows():
# Restore our function references.
self.functions = functions

View File

@ -116,7 +116,7 @@ def __virtual__():
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None):
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
@ -128,6 +128,8 @@ def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None):
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
@ -142,6 +144,9 @@ def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None):
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the

Some files were not shown because too many files have changed in this diff Show More