mirror of
https://github.com/valitydev/salt.git
synced 2024-11-07 08:58:59 +00:00
Merge remote-tracking branch 'upstream/develop' into salt_test
This commit is contained in:
commit
3a1b6558b6
7
.mention-bot
Normal file
7
.mention-bot
Normal file
@ -0,0 +1,7 @@
|
||||
{
|
||||
"skipTitle": "Merge forward",
|
||||
"delayed": true,
|
||||
"delayedUntil": "2h",
|
||||
"userBlacklist": []
|
||||
}
|
||||
|
@ -24,7 +24,8 @@ load-plugins=saltpylint.pep8,
|
||||
saltpylint.fileperms,
|
||||
saltpylint.py3modernize,
|
||||
saltpylint.smartup,
|
||||
saltpylint.minpyver
|
||||
saltpylint.minpyver,
|
||||
saltpylint.salttesting
|
||||
|
||||
# Use multiple processes to speed up Pylint.
|
||||
# Don't bump this values on PyLint 1.4.0 - Know bug that ignores the passed --rcfile
|
||||
@ -70,6 +71,7 @@ disable=W0142,
|
||||
I0011,
|
||||
I0012,
|
||||
W1202,
|
||||
E1320,
|
||||
E8402,
|
||||
E8731
|
||||
# E8121,
|
||||
|
@ -21,7 +21,8 @@ load-plugins=saltpylint.pep8,
|
||||
saltpylint.fileperms,
|
||||
saltpylint.py3modernize,
|
||||
saltpylint.smartup,
|
||||
saltpylint.minpyver
|
||||
saltpylint.minpyver,
|
||||
saltpylint.salttesting
|
||||
|
||||
# Use multiple processes to speed up Pylint.
|
||||
# Don't bump this values on PyLint 1.4.0 - Know bug that ignores the passed --rcfile
|
||||
@ -71,6 +72,7 @@ disable=R,
|
||||
E1101,
|
||||
E1103,
|
||||
E1136,
|
||||
E1320,
|
||||
E8114,
|
||||
C0102,
|
||||
C0103,
|
||||
@ -131,6 +133,7 @@ disable=R,
|
||||
# E1101 (no-member) [pylint isn't smart enough]
|
||||
# E1103 (maybe-no-member)
|
||||
# E1136 (unsubscriptable-object)
|
||||
# E1320 (un-indexed-curly-braces-error)
|
||||
# E8114 (indentation-is-not-a-multiple-of-four-comment)
|
||||
# C0102 (blacklisted-name) [because it activates C0103 too]
|
||||
# C0103 (invalid-name)
|
||||
|
@ -549,11 +549,11 @@
|
||||
#default_top: base
|
||||
|
||||
# The hash_type is the hash to use when discovering the hash of a file on
|
||||
# the master server. The default is md5 but sha1, sha224, sha256, sha384
|
||||
# and sha512 are also supported.
|
||||
# the master server. The default is sha256, but md5, sha1, sha224, sha384 and
|
||||
# sha512 are also supported.
|
||||
#
|
||||
# WARNING: While md5 is also supported, do not use it due to the high chance
|
||||
# of possible collisions and thus security breach.
|
||||
# WARNING: While md5 and sha1 are also supported, do not use them due to the
|
||||
# high chance of possible collisions and thus security breach.
|
||||
#
|
||||
# Prior to changing this value, the master should be stopped and all Salt
|
||||
# caches should be cleared.
|
||||
|
15
conf/minion
15
conf/minion
@ -161,7 +161,7 @@
|
||||
# strip_colors: False
|
||||
|
||||
# Backup files that are replaced by file.managed and file.recurse under
|
||||
# 'cachedir'/file_backups relative to their original location and appended
|
||||
# 'cachedir'/file_backup relative to their original location and appended
|
||||
# with a timestamp. The only valid setting is "minion". Disabled by default.
|
||||
#
|
||||
# Alternatively this can be specified for each file in state files:
|
||||
@ -574,14 +574,11 @@
|
||||
#fileserver_limit_traversal: False
|
||||
|
||||
# The hash_type is the hash to use when discovering the hash of a file on
|
||||
# the local fileserver. The default is md5, but sha1, sha224, sha256, sha384
|
||||
# the local fileserver. The default is sha256, but md5, sha1, sha224, sha384
|
||||
# and sha512 are also supported.
|
||||
#
|
||||
# WARNING: While md5 and sha1 are also supported, do not use it due to the high chance
|
||||
# of possible collisions and thus security breach.
|
||||
#
|
||||
# WARNING: While md5 is also supported, do not use it due to the high chance
|
||||
# of possible collisions and thus security breach.
|
||||
# WARNING: While md5 and sha1 are also supported, do not use them due to the
|
||||
# high chance of possible collisions and thus security breach.
|
||||
#
|
||||
# Warning: Prior to changing this value, the minion should be stopped and all
|
||||
# Salt caches should be cleared.
|
||||
@ -652,6 +649,10 @@
|
||||
###########################################
|
||||
# Disable multiprocessing support, by default when a minion receives a
|
||||
# publication a new process is spawned and the command is executed therein.
|
||||
#
|
||||
# WARNING: Disabling multiprocessing may result in substantial slowdowns
|
||||
# when processing large pillars. See https://github.com/saltstack/salt/issues/38758
|
||||
# for a full explanation.
|
||||
#multiprocessing: True
|
||||
|
||||
|
||||
|
@ -119,7 +119,7 @@
|
||||
# strip_colors: False
|
||||
|
||||
# Backup files that are replaced by file.managed and file.recurse under
|
||||
# 'cachedir'/file_backups relative to their original location and appended
|
||||
# 'cachedir'/file_backup relative to their original location and appended
|
||||
# with a timestamp. The only valid setting is "minion". Disabled by default.
|
||||
#
|
||||
# Alternatively this can be specified for each file in state files:
|
||||
|
@ -244,7 +244,7 @@ on_saltstack = 'SALT_ON_SALTSTACK' in os.environ
|
||||
project = 'Salt'
|
||||
|
||||
version = salt.version.__version__
|
||||
latest_release = '2016.11.2' # latest release
|
||||
latest_release = '2016.11.3' # latest release
|
||||
previous_release = '2016.3.5' # latest release from previous branch
|
||||
previous_release_dir = '2016.3' # path on web server for previous branch
|
||||
next_release = '' # next release
|
||||
|
@ -6093,17 +6093,17 @@ fileserver_list_cache_time: 5
|
||||
.UNINDENT
|
||||
.SS \fBhash_type\fP
|
||||
.sp
|
||||
Default: \fBmd5\fP
|
||||
Default: \fBsha256\fP
|
||||
.sp
|
||||
The hash_type is the hash to use when discovering the hash of a file on
|
||||
the master server. The default is md5, but sha1, sha224, sha256, sha384, and
|
||||
the master server. The default is sha256, but md5, sha1, sha224, sha384, and
|
||||
sha512 are also supported.
|
||||
.INDENT 0.0
|
||||
.INDENT 3.5
|
||||
.sp
|
||||
.nf
|
||||
.ft C
|
||||
hash_type: md5
|
||||
hash_type: sha256
|
||||
.ft P
|
||||
.fi
|
||||
.UNINDENT
|
||||
@ -10539,17 +10539,17 @@ fileserver_limit_traversal: False
|
||||
.UNINDENT
|
||||
.SS \fBhash_type\fP
|
||||
.sp
|
||||
Default: \fBmd5\fP
|
||||
Default: \fBsha256\fP
|
||||
.sp
|
||||
The hash_type is the hash to use when discovering the hash of a file on the
|
||||
local fileserver. The default is md5, but sha1, sha224, sha256, sha384, and
|
||||
local fileserver. The default is sha256, but md5, sha1, sha224, sha384, and
|
||||
sha512 are also supported.
|
||||
.INDENT 0.0
|
||||
.INDENT 3.5
|
||||
.sp
|
||||
.nf
|
||||
.ft C
|
||||
hash_type: md5
|
||||
hash_type: sha256
|
||||
.ft P
|
||||
.fi
|
||||
.UNINDENT
|
||||
@ -11776,11 +11776,11 @@ event that an error is introduced in the latest revision of the repo.
|
||||
#default_top: base
|
||||
|
||||
# The hash_type is the hash to use when discovering the hash of a file on
|
||||
# the master server. The default is md5 but sha1, sha224, sha256, sha384
|
||||
# and sha512 are also supported.
|
||||
# the master server. The default is sha256, but md5, sha1, sha224, sha384 and
|
||||
# sha512 are also supported.
|
||||
#
|
||||
# WARNING: While md5 is also supported, do not use it due to the high chance
|
||||
# of possible collisions and thus security breach.
|
||||
# WARNING: While md5 and sha1 are also supported, do not use them due to the
|
||||
# high chance of possible collisions and thus security breach.
|
||||
#
|
||||
# Prior to changing this value, the master should be stopped and all Salt
|
||||
# caches should be cleared.
|
||||
@ -12459,7 +12459,7 @@ event that an error is introduced in the latest revision of the repo.
|
||||
# strip_colors: False
|
||||
|
||||
# Backup files that are replaced by file.managed and file.recurse under
|
||||
# \(aqcachedir\(aq/file_backups relative to their original location and appended
|
||||
# \(aqcachedir\(aq/file_backup relative to their original location and appended
|
||||
# with a timestamp. The only valid setting is "minion". Disabled by default.
|
||||
#
|
||||
# Alternatively this can be specified for each file in state files:
|
||||
@ -12867,14 +12867,11 @@ event that an error is introduced in the latest revision of the repo.
|
||||
#fileserver_limit_traversal: False
|
||||
|
||||
# The hash_type is the hash to use when discovering the hash of a file on
|
||||
# the local fileserver. The default is md5, but sha1, sha224, sha256, sha384
|
||||
# the local fileserver. The default is sha256, but md5, sha1, sha224, sha384
|
||||
# and sha512 are also supported.
|
||||
#
|
||||
# WARNING: While md5 and sha1 are also supported, do not use it due to the high chance
|
||||
# of possible collisions and thus security breach.
|
||||
#
|
||||
# WARNING: While md5 is also supported, do not use it due to the high chance
|
||||
# of possible collisions and thus security breach.
|
||||
# WARNING: While md5 and sha1 are also supported, do not use them due to the
|
||||
# high chance of possible collisions and thus security breach.
|
||||
#
|
||||
# Warning: Prior to changing this value, the minion should be stopped and all
|
||||
# Salt caches should be cleared.
|
||||
@ -292069,7 +292066,7 @@ the test method:
|
||||
.nf
|
||||
.ft C
|
||||
import integration
|
||||
from salttesting.helpers import destructiveTest
|
||||
from tests.support.helpers import destructiveTest
|
||||
|
||||
class PkgTest(integration.ModuleCase):
|
||||
@destructiveTest
|
||||
@ -292165,7 +292162,7 @@ can be used
|
||||
.nf
|
||||
.ft C
|
||||
# Import logging handler
|
||||
from salttesting.helpers import TestsLoggingHandler
|
||||
from tests.support.helpers import TestsLoggingHandler
|
||||
|
||||
# .. inside test
|
||||
with TestsLoggingHandler() as handler:
|
||||
|
@ -282,6 +282,7 @@ Valid options:
|
||||
- pillar
|
||||
- utils
|
||||
- sdb
|
||||
- cache
|
||||
|
||||
.. conf_master:: module_dirs
|
||||
|
||||
@ -1604,6 +1605,25 @@ on a large number of minions.
|
||||
|
||||
fileserver_list_cache_time: 5
|
||||
|
||||
.. conf_master:: fileserver_verify_config
|
||||
|
||||
``fileserver_verify_config``
|
||||
------------------------------
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
Default: ``True``
|
||||
|
||||
By default, as the master starts it performs some sanity checks on the
|
||||
configured fileserver backends. If any of these sanity checks fail (such as
|
||||
when an invalid configuration is used), the master daemon will abort.
|
||||
|
||||
To skip these sanity checks, set this option to ``False``.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
fileserver_verify_config: False
|
||||
|
||||
.. conf_master:: hash_type
|
||||
|
||||
``hash_type``
|
||||
@ -3013,6 +3033,29 @@ they were created by a different master.
|
||||
|
||||
.. __: http://www.gluster.org/
|
||||
|
||||
.. conf_master:: git_pillar_includes
|
||||
|
||||
``git_pillar_includes``
|
||||
***********************
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
Default: ``True``
|
||||
|
||||
Normally, when processing :ref:`git_pillar remotes
|
||||
<git-pillar-2015-8-0-and-later>`, if more than one repo under the same ``git``
|
||||
section in the ``ext_pillar`` configuration refers to the same pillar
|
||||
environment, then each repo in a given environment will have access to the
|
||||
other repos' files to be referenced in their top files. However, it may be
|
||||
desirable to disable this behavior. If so, set this value to ``False``.
|
||||
|
||||
For a more detailed examination of how includes work, see :ref:`this
|
||||
explanation <git-pillar-multiple-remotes>` from the git_pillar documentation.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
git_pillar_includes: False
|
||||
|
||||
.. _git-ext-pillar-auth-opts:
|
||||
|
||||
Git External Pillar Authentication Options
|
||||
@ -3144,6 +3187,25 @@ configured both globally and for individual remotes.
|
||||
- '+refs/pull/*/head:refs/remotes/origin/pr/*'
|
||||
- '+refs/pull/*/merge:refs/remotes/origin/merge/*'
|
||||
|
||||
.. conf_master:: git_pillar_verify_config
|
||||
|
||||
``git_pillar_verify_config``
|
||||
----------------------------
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
Default: ``True``
|
||||
|
||||
By default, as the master starts it performs some sanity checks on the
|
||||
configured git_pillar repositories. If any of these sanity checks fail (such as
|
||||
when an invalid configuration is used), the master daemon will abort.
|
||||
|
||||
To skip these sanity checks, set this option to ``False``.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
git_pillar_verify_config: False
|
||||
|
||||
.. _pillar-merging-opts:
|
||||
|
||||
Pillar Merging Options
|
||||
|
@ -81,9 +81,10 @@ The option can also be set to a list of masters, enabling
|
||||
``ipv6``
|
||||
--------
|
||||
|
||||
Default: ``False``
|
||||
Default: ``None``
|
||||
|
||||
Whether the master should be connected over IPv6.
|
||||
Whether the master should be connected over IPv6. By default salt minion
|
||||
will try to automatically detect IPv6 connectivity to master.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
@ -1778,13 +1779,15 @@ the environment setting, but for pillar instead of states.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
pillarenv: None
|
||||
pillarenv: dev
|
||||
|
||||
.. conf_minion:: pillarenv_from_saltenv
|
||||
|
||||
``pillarenv_from_saltenv``
|
||||
--------------------------
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
Default: ``False``
|
||||
|
||||
When set to ``True``, the :conf_minion:`pillarenv` value will assume the value
|
||||
|
@ -25,6 +25,7 @@ environments are not isolated from each other. This allows for logical
|
||||
isolation of environments by the engineer using Salt, but also allows
|
||||
for information to be used in multiple environments.
|
||||
|
||||
.. _file-roots-directory-overlay:
|
||||
|
||||
Directory Overlay
|
||||
=================
|
||||
@ -81,4 +82,4 @@ enable running Salt states without a Salt master. To use the local file server
|
||||
interface, copy the file server data to the minion and set the file_roots
|
||||
option on the minion to point to the directories copied from the master.
|
||||
Once the minion ``file_roots`` option has been set, change the ``file_client``
|
||||
option to local to make sure that the local file server interface is used.
|
||||
option to local to make sure that the local file server interface is used.
|
||||
|
@ -419,3 +419,14 @@ and bug resolution. See the :ref:`Labels and Milestones
|
||||
.. _`Closing issues via commit message`: https://help.github.com/articles/closing-issues-via-commit-messages
|
||||
.. _`git format-patch`: https://www.kernel.org/pub/software/scm/git/docs/git-format-patch.html
|
||||
.. _salt-users: https://groups.google.com/forum/#!forum/salt-users
|
||||
|
||||
Mentionbot
|
||||
==========
|
||||
|
||||
SaltStack runs a mention-bot which notifies contributors who might be able
|
||||
to help review incoming pull-requests based on their past contribution to
|
||||
files which are being changed.
|
||||
|
||||
If you do not wish to receive these notifications, please add your GitHub
|
||||
handle to the blacklist line in the `.mention-bot` file located in the
|
||||
root of the Salt repository.
|
||||
|
@ -29,7 +29,7 @@ Creates a new execution module within salt/modules/{{module_name}}.py
|
||||
module_unit
|
||||
^^^^^^^^^^^
|
||||
|
||||
Creates a new execution module unit test suite within tests/unit/modules/{{module_name}}_test.py
|
||||
Creates a new execution module unit test suite within tests/unit/modules/test_{{module_name}}.py
|
||||
|
||||
state
|
||||
^^^^^
|
||||
@ -39,7 +39,7 @@ Creates a new state module within salt/states/{{module_name}}.py
|
||||
state_unit
|
||||
^^^^^^^^^^
|
||||
|
||||
Creates a new state module unit test suite within tests/unit/states/{{module_name}}_test.py
|
||||
Creates a new state module unit test suite within tests/unit/states/test_{{module_name}}.py
|
||||
|
||||
|
||||
Adding templates
|
||||
@ -103,4 +103,4 @@ salt.utils.extend module
|
||||
========================
|
||||
|
||||
.. automodule:: salt.utils.extend
|
||||
:members:
|
||||
:members:
|
||||
|
@ -429,7 +429,7 @@ Test Helpers
|
||||
------------
|
||||
|
||||
Several Salt-specific helpers are available. A full list is available by inspecting
|
||||
functions exported in `salttesting.helpers`.
|
||||
functions exported in `tests.support.helpers`.
|
||||
|
||||
`@expensiveTest` -- Designates a test which typically requires a relatively costly
|
||||
external resource, like a cloud virtual machine. This decorator is not normally
|
||||
@ -453,10 +453,10 @@ the test will be skipped if the binaries are not all present on the system.
|
||||
`@skip_if_not_root` -- If the test is not executed as root, it will be skipped.
|
||||
|
||||
`@with_system_user` -- Creates and optionally destroys a system user within a test case.
|
||||
See implementation details in `salttesting.helpers` for details.
|
||||
See implementation details in `tests.support.helpers` for details.
|
||||
|
||||
`@with_system_group` -- Creates and optionally destroys a system group within a test case.
|
||||
See implementation details in `salttesting.helpers` for details.
|
||||
See implementation details in `tests.support.helpers` for details.
|
||||
|
||||
`@with_system_user_and_group` -- Creates and optionally destroys a system user and group
|
||||
within a test case. See implementation details in `salttesting.helpers` for details.
|
||||
within a test case. See implementation details in `tests.support.helpers` for details.
|
||||
|
@ -272,7 +272,7 @@ Now the workhorse method ``run_function`` can be used to test a module:
|
||||
.. code-block:: python
|
||||
|
||||
import os
|
||||
import integration
|
||||
import tests.integration as integration
|
||||
|
||||
|
||||
class TestModuleTest(integration.ModuleCase):
|
||||
@ -312,7 +312,7 @@ Validating the shell commands can be done via shell tests:
|
||||
import shutil
|
||||
import tempfile
|
||||
|
||||
import integration
|
||||
import tests.integration as integration
|
||||
|
||||
class KeyTest(integration.ShellCase):
|
||||
'''
|
||||
@ -345,7 +345,7 @@ Testing salt-ssh functionality can be done using the SSHCase test class:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import integration
|
||||
import tests.integration as integration
|
||||
|
||||
class SSHGrainsTest(integration.SSHCase):
|
||||
'''
|
||||
@ -370,7 +370,7 @@ on a minion event bus.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import integration
|
||||
import tests.integration as integration
|
||||
|
||||
class TestEvent(integration.SaltEventAssertsMixin):
|
||||
'''
|
||||
@ -392,7 +392,7 @@ Testing Salt's Syndic can be done via the SyndicCase test class:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import integration
|
||||
import tests.integration as integration
|
||||
|
||||
class TestSyndic(integration.SyndicCase):
|
||||
'''
|
||||
@ -438,11 +438,9 @@ to test states:
|
||||
import shutil
|
||||
|
||||
# Import Salt Testing libs
|
||||
from salttesting.helpers import ensure_in_syspath
|
||||
ensure_in_syspath('../../')
|
||||
import tests.integration as integration
|
||||
|
||||
# Import salt libs
|
||||
import integration
|
||||
import salt.utils
|
||||
|
||||
HFILE = os.path.join(integration.TMP, 'hosts')
|
||||
@ -506,8 +504,8 @@ the test method:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import integration
|
||||
from salttesting.helpers import destructiveTest
|
||||
import tests.integration as integration
|
||||
from tests.support.helpers import destructiveTest
|
||||
|
||||
class DestructiveExampleModuleTest(integration.ModuleCase):
|
||||
'''
|
||||
@ -628,7 +626,7 @@ the test function:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from salttesting.helpers import expensiveTest
|
||||
from tests.support.helpers import expensiveTest
|
||||
|
||||
@expensiveTest
|
||||
def test_instance(self):
|
||||
|
@ -122,14 +122,13 @@ Most commonly, the following imports are necessary to create a unit test:
|
||||
.. code-block:: python
|
||||
|
||||
# Import Salt Testing libs
|
||||
from salttesting import skipIf, TestCase
|
||||
from salttesting.helpers import ensure_in_syspath
|
||||
from tests.support.unit import skipIf, TestCase
|
||||
|
||||
If you need mock support to your tests, please also import:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from salttesting.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch, call
|
||||
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch, call
|
||||
|
||||
|
||||
Evaluating Truth
|
||||
@ -183,13 +182,13 @@ additional imports for MagicMock:
|
||||
.. code-block:: python
|
||||
|
||||
# Import Salt Testing libs
|
||||
from salttesting import TestCase
|
||||
from tests.support.unit import TestCase
|
||||
|
||||
# Import Salt execution module to test
|
||||
from salt.modules import db
|
||||
|
||||
# Import Mock libraries
|
||||
from salttesting.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch, call
|
||||
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch, call
|
||||
|
||||
# Create test case class and inherit from Salt's customized TestCase
|
||||
# Skip this test case if we don't have access to mock!
|
||||
@ -259,7 +258,7 @@ we might write the skeleton for testing ``fib.py``:
|
||||
.. code-block:: python
|
||||
|
||||
# Import Salt Testing libs
|
||||
from salttesting import TestCase
|
||||
from tests.support.unit import TestCase
|
||||
|
||||
# Import Salt execution module to test
|
||||
from salt.modules import fib
|
||||
@ -339,17 +338,14 @@ will also redefine the ``__salt__`` dictionary such that it only contains
|
||||
from salt.modules import linux_sysctl
|
||||
|
||||
# Import Salt Testing Libs
|
||||
from salttesting import skipIf, TestCase
|
||||
from salttesting.helpers import ensure_in_syspath
|
||||
from salttesting.mock import (
|
||||
from tests.support.unit import skipIf, TestCase
|
||||
from tests.support.mock import (
|
||||
MagicMock,
|
||||
patch,
|
||||
NO_MOCK,
|
||||
NO_MOCK_REASON
|
||||
)
|
||||
|
||||
ensure_in_syspath('../../')
|
||||
|
||||
# Globals
|
||||
linux_sysctl.__salt__ = {}
|
||||
|
||||
@ -368,11 +364,6 @@ will also redefine the ``__salt__`` dictionary such that it only contains
|
||||
with patch.dict(linux_sysctl.__salt__, {'cmd.run': mock_cmd}):
|
||||
self.assertEqual(linux_sysctl.get('net.ipv4.ip_forward'), 1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from integration import run_tests
|
||||
run_tests(LinuxSysctlTestCase, needs_daemon=False)
|
||||
|
||||
Since ``get()`` has only one raise or return statement and that statement is a
|
||||
success condition, the test function is simply named ``test_get()``. As
|
||||
described, the single function call parameter, ``name`` is mocked with
|
||||
@ -450,17 +441,14 @@ with.
|
||||
from salt.exceptions import CommandExecutionError
|
||||
|
||||
# Import Salt Testing Libs
|
||||
from salttesting import skipIf, TestCase
|
||||
from salttesting.helpers import ensure_in_syspath
|
||||
from salttesting.mock import (
|
||||
from tests.support.unit import skipIf, TestCase
|
||||
from tests.support.mock import (
|
||||
MagicMock,
|
||||
patch,
|
||||
NO_MOCK,
|
||||
NO_MOCK_REASON
|
||||
)
|
||||
|
||||
ensure_in_syspath('../../')
|
||||
|
||||
# Globals
|
||||
linux_sysctl.__salt__ = {}
|
||||
|
||||
@ -510,7 +498,3 @@ with.
|
||||
with patch.dict(linux_sysctl.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(linux_sysctl.assign(
|
||||
'net.ipv4.ip_forward', 1), ret)
|
||||
|
||||
if __name__ == '__main__':
|
||||
from integration import run_tests
|
||||
run_tests(LinuxSysctlTestCase, needs_daemon=False)
|
||||
|
@ -2,129 +2,19 @@
|
||||
Solaris
|
||||
=======
|
||||
|
||||
Salt was added to the OpenCSW package repository in September of 2012 by Romeo
|
||||
Theriault <romeot@hawaii.edu> at version 0.10.2 of Salt. It has mainly been
|
||||
tested on Solaris 10 (sparc), though it is built for and has been tested
|
||||
minimally on Solaris 10 (x86), Solaris 9 (sparc/x86) and 11 (sparc/x86).
|
||||
(Please let me know if you're using it on these platforms!) Most of the testing
|
||||
has also just focused on the minion, though it has verified that the master
|
||||
starts up successfully on Solaris 10.
|
||||
Salt is known to work on Solaris but community packages are unmaintained.
|
||||
|
||||
Comments and patches for better support on these platforms is very welcome.
|
||||
It is possible to install Salt on Solaris by using `setuptools`.
|
||||
|
||||
As of version 0.10.4, Solaris is well supported under salt, with all of the
|
||||
following working well:
|
||||
|
||||
1. remote execution
|
||||
2. grain detection
|
||||
3. service control with SMF
|
||||
4. 'pkg' states with 'pkgadd' and 'pkgutil' modules
|
||||
5. cron modules/states
|
||||
6. user and group modules/states
|
||||
7. shadow password management modules/states
|
||||
|
||||
Salt is dependent on the following additional packages. These will
|
||||
automatically be installed as dependencies of the ``py_salt`` package:
|
||||
|
||||
- py_yaml
|
||||
- py_pyzmq
|
||||
- py_jinja2
|
||||
- py_msgpack_python
|
||||
- py_m2crypto
|
||||
- py_crypto
|
||||
- python
|
||||
|
||||
Installation
|
||||
============
|
||||
|
||||
To install Salt from the OpenCSW package repository you first need to install
|
||||
`pkgutil`_ assuming you don't already have it installed:
|
||||
|
||||
On Solaris 10:
|
||||
For example, to install the develop version of salt:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pkgadd -d http://get.opencsw.org/now
|
||||
|
||||
On Solaris 9:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
wget http://mirror.opencsw.org/opencsw/pkgutil.pkg
|
||||
pkgadd -d pkgutil.pkg all
|
||||
|
||||
Once pkgutil is installed you'll need to edit it's config file
|
||||
``/etc/opt/csw/pkgutil.conf`` to point it at the unstable catalog:
|
||||
|
||||
.. code-block:: diff
|
||||
|
||||
- #mirror=http://mirror.opencsw.org/opencsw/testing
|
||||
+ mirror=http://mirror.opencsw.org/opencsw/unstable
|
||||
|
||||
OK, time to install salt.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Update the catalog
|
||||
root> /opt/csw/bin/pkgutil -U
|
||||
# Install salt
|
||||
root> /opt/csw/bin/pkgutil -i -y py_salt
|
||||
|
||||
Minion Configuration
|
||||
====================
|
||||
|
||||
Now that salt is installed you can find it's configuration files in
|
||||
``/etc/opt/csw/salt/``.
|
||||
|
||||
You'll want to edit the minion config file to set the name of your salt master
|
||||
server:
|
||||
|
||||
.. code-block:: diff
|
||||
|
||||
- #master: salt
|
||||
+ master: your-salt-server
|
||||
|
||||
If you would like to use `pkgutil`_ as the default package provider for your
|
||||
Solaris minions, you can do so using the :conf_minion:`providers` option in the
|
||||
minion config file.
|
||||
|
||||
You can now start the salt minion like so:
|
||||
|
||||
On Solaris 10:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
svcadm enable salt-minion
|
||||
git clone https://github.com/saltstack/salt
|
||||
cd salt
|
||||
sudo python setup.py install --force
|
||||
|
||||
|
||||
On Solaris 9:
|
||||
.. note::
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
/etc/init.d/salt-minion start
|
||||
|
||||
You should now be able to log onto the salt master and check to see if the
|
||||
salt-minion key is awaiting acceptance:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-key -l un
|
||||
|
||||
Accept the key:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-key -a <your-salt-minion>
|
||||
|
||||
Run a simple test against the minion:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '<your-salt-minion>' test.ping
|
||||
|
||||
Troubleshooting
|
||||
===============
|
||||
|
||||
Logs are in ``/var/log/salt``
|
||||
|
||||
.. _pkgutil: http://www.opencsw.org/manual/for-administrators/getting-started.html
|
||||
SaltStack does offer commerical support for Solaris which includes packages.
|
||||
|
@ -89,12 +89,33 @@ A simpler returner, such as Slack or HipChat, requires:
|
||||
Step 2: Configure the Returner
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
After you understand the configuration and have the external system ready, add
|
||||
the returner configuration settings to the Salt Minion configuration file for
|
||||
the External Job Cache, or to the Salt Master configuration file for the Master
|
||||
Job Cache.
|
||||
After you understand the configuration and have the external system ready, the
|
||||
configuration requirements must be declared.
|
||||
|
||||
For example, MySQL requires:
|
||||
External Job Cache
|
||||
""""""""""""""""""
|
||||
|
||||
The returner configuration settings can be declared in the Salt Minion
|
||||
configuration file, the Minion's pillar data, or the Minion's grains.
|
||||
|
||||
If ``external_job_cache`` configuration settings are specified in more than
|
||||
one place, the options are retrieved in the following order. The first
|
||||
configuration location that is found is the one that will be used.
|
||||
|
||||
- Minion configuration file
|
||||
- Minion's grains
|
||||
- Minion's pillar data
|
||||
|
||||
Master Job Cache
|
||||
""""""""""""""""
|
||||
|
||||
The returner configuration settings for the Master Job Cache should be
|
||||
declared in the Salt Master's configuration file.
|
||||
|
||||
Configuration File Examples
|
||||
"""""""""""""""""""""""""""
|
||||
|
||||
MySQL requires:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
|
@ -31,8 +31,10 @@ Mine Functions
|
||||
To enable the Salt Mine the ``mine_functions`` option needs to be applied to a
|
||||
Minion. This option can be applied via the Minion's configuration file, or the
|
||||
Minion's Pillar. The ``mine_functions`` option dictates what functions are
|
||||
being executed and allows for arguments to be passed in. If no arguments are
|
||||
passed, an empty list must be added:
|
||||
being executed and allows for arguments to be passed in. The list of
|
||||
functions are available in the :py:mod:`salt.module`. If no arguments
|
||||
are passed, an empty list must be added like in the ``test.ping`` function in
|
||||
the example below:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
@ -42,6 +44,11 @@ passed, an empty list must be added:
|
||||
interface: eth0
|
||||
cidr: '10.0.0.0/8'
|
||||
|
||||
In the example above :py:mod:`salt.modules.network.ip_addrs` has additional
|
||||
filters to help narrow down the results. In the above example IP addresses
|
||||
are only returned if they are on a eth0 interface and in the 10.0.0.0/8 IP
|
||||
range.
|
||||
|
||||
Mine Functions Aliases
|
||||
----------------------
|
||||
|
||||
@ -152,12 +159,47 @@ to add them to the pool of load balanced servers.
|
||||
mine_functions:
|
||||
network.ip_addrs: [eth0]
|
||||
|
||||
Then trigger the minions to refresh their pillar data by running:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' saltutil.refresh_pillar
|
||||
|
||||
Verify that the results are showing up in the pillar on the minions by
|
||||
executing the following and checking for ``network.ip_addrs`` in the output:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' pillar.items
|
||||
|
||||
Which should show that the function is present on the minion, but not include
|
||||
the output:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
minion1.example.com:
|
||||
----------
|
||||
mine_functions:
|
||||
----------
|
||||
network.ip_addrs:
|
||||
- eth0
|
||||
|
||||
Mine data is typically only updated on the master every 60 minutes, this can
|
||||
be modified by setting:
|
||||
|
||||
:file:`/etc/salt/minion.d/mine.conf`:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
mine_interval: 5
|
||||
|
||||
To force the mine data to update immediately run:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' mine.update
|
||||
|
||||
Setup the :py:mod:`salt.states.file.managed` state in
|
||||
:file:`/srv/salt/haproxy.sls`:
|
||||
|
||||
.. code-block:: yaml
|
||||
@ -168,7 +210,7 @@ to add them to the pool of load balanced servers.
|
||||
- source: salt://haproxy_config
|
||||
- template: jinja
|
||||
|
||||
:file:`/srv/salt/haproxy_config`:
|
||||
Create the Jinja template in :file:`/srv/salt/haproxy_config`:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
@ -180,6 +222,8 @@ to add them to the pool of load balanced servers.
|
||||
|
||||
<...file contents snipped...>
|
||||
|
||||
In the above example, ``server`` will be expanded to the ``minion_id``.
|
||||
|
||||
.. note::
|
||||
The expr_form argument will be renamed to ``tgt_type`` in the Nitrogen
|
||||
release of Salt.
|
||||
|
@ -335,6 +335,7 @@ updated pillar data, but :py:func:`pillar.item <salt.modules.pillar.item>`,
|
||||
<salt.modules.pillar.raw>` will not see this data unless refreshed using
|
||||
:py:func:`saltutil.refresh_pillar <salt.modules.saltutil.refresh_pillar>`.
|
||||
|
||||
.. _pillar-environments:
|
||||
|
||||
How Pillar Environments Are Handled
|
||||
===================================
|
||||
|
@ -10,17 +10,152 @@ Changes for v2016.11.2..v2016.11.3
|
||||
|
||||
Extended changelog courtesy of Todd Stansell (https://github.com/tjstansell/salt-changelogs):
|
||||
|
||||
*Generated at: 2017-02-16T16:48:51Z*
|
||||
*Generated at: 2017-02-22T23:01:16Z*
|
||||
|
||||
Statistics:
|
||||
|
||||
- Total Merges: **126**
|
||||
- Total Issue references: **75**
|
||||
- Total PR references: **199**
|
||||
- Total Merges: **139**
|
||||
- Total Issue references: **78**
|
||||
- Total PR references: **217**
|
||||
|
||||
Changes:
|
||||
|
||||
|
||||
- **PR** `#39536`_: (*twangboy*) Namespace 'status' functions in 'win_status'
|
||||
@ *2017-02-21T23:45:31Z*
|
||||
|
||||
- **PR** `#39005`_: (*cro*) Ungate the status.py module and raise unsupported errors in functions not executable on Windows.
|
||||
| refs: `#39536`_
|
||||
* 40f72db Merge pull request `#39536`_ from twangboy/fix_win_status
|
||||
* d5453e2 Remove unused import (lint)
|
||||
|
||||
* 837c32e Remove list2cmdline
|
||||
|
||||
* c258cb3 Streamline wmic command returns for easier parsing
|
||||
|
||||
* 6d2cf81 Fix 'ping_master' function
|
||||
|
||||
* d946d10 Namespace 'status' functions in 'win_status'
|
||||
|
||||
- **PR** `#39534`_: (*rallytime*) Fix breakage in aptpkg and dpkg execution modules
|
||||
@ *2017-02-21T20:31:15Z*
|
||||
|
||||
- **PR** `#39418`_: (*anlutro*) Allow aptpkg.info_installed on package names that aren't installed
|
||||
| refs: `#39534`_
|
||||
* dc8f578 Merge pull request `#39534`_ from rallytime/fix-pkg-function-specs
|
||||
* d34a8fe Fix breakage in aptpkg and dpkg execution modules
|
||||
|
||||
* 1d0d7b2 Upgrade SaltTesting to run test suite for 2016.11 and add SaltPyLint (`#39521`_)
|
||||
|
||||
- **ISSUE** `#34712`_: (*richardscollin*) Salt Test Suite Error - develop
|
||||
| refs: `#37366`_
|
||||
- **PR** `#39521`_: (*vutny*) Upgrade SaltTesting to run test suite for 2016.11 and add SaltPyLint
|
||||
- **PR** `#37366`_: (*eradman*) dev_python*.txt: use current SaltTesting and SaltPyLint modules
|
||||
| refs: `#39521`_
|
||||
|
||||
- **PR** `#39370`_: (*twangboy*) Gate win_osinfo and winservice
|
||||
@ *2017-02-17T23:53:58Z*
|
||||
|
||||
* e4c7168 Merge pull request `#39370`_ from twangboy/gate_win_utils
|
||||
* 167cdb3 Gate windows specific imports, add __virtual__
|
||||
|
||||
* e67387d Add option to return a Non instantiated class
|
||||
|
||||
* 315b0cc Clarify return value for win_osinfo
|
||||
|
||||
* 994314e Fix more docs
|
||||
|
||||
* 2bbe3cb Fix some docs
|
||||
|
||||
* 4103563 Merge branch 'gate_win_utils' of https://github.com/twangboy/salt into gate_win_utils
|
||||
|
||||
* 24c1bd0 Remove extra newlines
|
||||
|
||||
* 82a86ce Add helper function for winservice
|
||||
|
||||
* 0051b5a Put the win_osinfo classes in a helper function
|
||||
|
||||
* 4e08534 Gate win_osinfo and winservice better
|
||||
|
||||
- **PR** `#39486`_: (*twangboy*) Remove orphaned function list_configurable_policies
|
||||
@ *2017-02-17T22:21:50Z*
|
||||
|
||||
* a3e71b6 Merge pull request `#39486`_ from twangboy/win_remove_orphaned
|
||||
* 1328055 Remove orphaned function list_configurable_policies
|
||||
|
||||
- **PR** `#39418`_: (*anlutro*) Allow aptpkg.info_installed on package names that aren't installed
|
||||
| refs: `#39534`_
|
||||
@ *2017-02-17T18:34:19Z*
|
||||
|
||||
* 87b269f Merge pull request `#39418`_ from alprs/fix-aptpkg_info_nonexistent_pkg
|
||||
* 246bf1e add failhard argument to various apt pkg functions
|
||||
|
||||
- **PR** `#39438`_: (*mirceaulinic*) file.get_managed: refetch source when file hashsum is changed
|
||||
@ *2017-02-17T17:58:29Z*
|
||||
|
||||
* e816d6c Merge pull request `#39438`_ from cloudflare/fix_39422
|
||||
* 8453800 file.get_managed: refetch cached file when hashsum chnaged
|
||||
|
||||
- **PR** `#39432`_: (*dmaziuk*) Quick and dirty fix for GECOS fields with more than 3 commas
|
||||
@ *2017-02-17T17:57:30Z*
|
||||
|
||||
- **ISSUE** `#39203`_: (*dmaziuk*) salt.users gecos field
|
||||
| refs: `#39432`_ `#39432`_
|
||||
* a5fe8f0 Merge pull request `#39432`_ from dmaziuk/issue39203
|
||||
* 41c0463 Remove #
|
||||
|
||||
* 4f877c6 Quick and dirty fix for GECOS fields with more than 3 commas
|
||||
|
||||
- **PR** `#39484`_: (*corywright*) The Reactor docs should use pillar='{}' instead of 'pillar={}'
|
||||
@ *2017-02-17T17:50:57Z*
|
||||
|
||||
* 3665229 Merge pull request `#39484`_ from corywright/fix-reactor-docs-pillar-keyword-args
|
||||
* cc90d0d The Reactor docs should use pillar='{}' instead of 'pillar={}'
|
||||
|
||||
- **PR** `#39456`_: (*twangboy*) Add salt icon to buildenv directory
|
||||
@ *2017-02-16T22:47:58Z*
|
||||
|
||||
* 2e3a9c5 Merge pull request `#39456`_ from twangboy/win_fix_icon
|
||||
* 8dd915d Add salt icon to buildenv directory
|
||||
|
||||
- **PR** `#39462`_: (*twangboy*) Use url_path instead of url_data.path
|
||||
@ *2017-02-16T22:44:18Z*
|
||||
|
||||
* 63adc03 Merge pull request `#39462`_ from twangboy/win_fix_fileclient
|
||||
* a96bc13 Use url_path instead of url_data.path
|
||||
|
||||
- **PR** `#39458`_: (*rallytime*) Fix more warnings in doc build
|
||||
@ *2017-02-16T21:45:52Z*
|
||||
|
||||
* e9b034f Merge pull request `#39458`_ from rallytime/fixup-more-doc-build-warnings
|
||||
* e698bc3 Fix more warnings in doc build
|
||||
|
||||
- **PR** `#39437`_: (*sakateka*) Fixes about saltfile
|
||||
@ *2017-02-16T20:32:15Z*
|
||||
|
||||
* e4f8c2b Merge pull request `#39437`_ from sakateka/fixes_about_saltfile
|
||||
* ab68524 less pylint: salt/utils/parsers.py
|
||||
|
||||
* 9e7d9dc Revert "pylint: salt/utils/parsers.py"
|
||||
|
||||
* f3f129c document ~/.salt/Saltfile
|
||||
|
||||
* 33f3614 pylint: salt/utils/parsers.py
|
||||
|
||||
* 0f36e10 expand config_dir and '~/.salt/Saltfile' as last resort
|
||||
|
||||
* 1acf00d add 2016.11.3 changelog to release notes (`#39451`_)
|
||||
|
||||
- **PR** `#39451`_: (*Ch3LL*) add 2016.11.3 changelog to release notes
|
||||
|
||||
- **PR** `#39448`_: (*gtmanfred*) Add release notes for cisco proxy minions added in Carbon
|
||||
@ *2017-02-16T17:29:48Z*
|
||||
|
||||
- **ISSUE** `#38032`_: (*meggiebot*) Add missing Carbon docs
|
||||
| refs: `#39448`_
|
||||
* 8e2cbd2 Merge pull request `#39448`_ from gtmanfred/2016.11
|
||||
* 3172e88 Add release notes for cisco proxy minions added in Carbon
|
||||
|
||||
- **PR** `#39428`_: (*rallytime*) [2016.11] Merge forward from 2016.3 to 2016.11
|
||||
@ *2017-02-16T00:01:15Z*
|
||||
|
||||
@ -846,6 +981,7 @@ Changes:
|
||||
* a7fc02e Ungate the status.py module and raise unsupported errors in functions not executeable on Windows. (`#39005`_)
|
||||
|
||||
- **PR** `#39005`_: (*cro*) Ungate the status.py module and raise unsupported errors in functions not executable on Windows.
|
||||
| refs: `#39536`_
|
||||
|
||||
- **PR** `#39012`_: (*terminalmage*) Fix "invalid lexer" errors in docs build
|
||||
@ *2017-01-28T06:47:45Z*
|
||||
@ -1252,6 +1388,7 @@ Changes:
|
||||
.. _`#33890`: https://github.com/saltstack/salt/issues/33890
|
||||
.. _`#34280`: https://github.com/saltstack/salt/pull/34280
|
||||
.. _`#34551`: https://github.com/saltstack/salt/issues/34551
|
||||
.. _`#34712`: https://github.com/saltstack/salt/issues/34712
|
||||
.. _`#34780`: https://github.com/saltstack/salt/issues/34780
|
||||
.. _`#35055`: https://github.com/saltstack/salt/pull/35055
|
||||
.. _`#35777`: https://github.com/saltstack/salt/issues/35777
|
||||
@ -1264,12 +1401,14 @@ Changes:
|
||||
.. _`#37174`: https://github.com/saltstack/salt/issues/37174
|
||||
.. _`#37262`: https://github.com/saltstack/salt/pull/37262
|
||||
.. _`#37338`: https://github.com/saltstack/salt/pull/37338
|
||||
.. _`#37366`: https://github.com/saltstack/salt/pull/37366
|
||||
.. _`#37375`: https://github.com/saltstack/salt/pull/37375
|
||||
.. _`#37413`: https://github.com/saltstack/salt/issues/37413
|
||||
.. _`#37632`: https://github.com/saltstack/salt/pull/37632
|
||||
.. _`#37864`: https://github.com/saltstack/salt/pull/37864
|
||||
.. _`#37938`: https://github.com/saltstack/salt/issues/37938
|
||||
.. _`#38003`: https://github.com/saltstack/salt/issues/38003
|
||||
.. _`#38032`: https://github.com/saltstack/salt/issues/38032
|
||||
.. _`#38081`: https://github.com/saltstack/salt/issues/38081
|
||||
.. _`#38100`: https://github.com/saltstack/salt/issues/38100
|
||||
.. _`#38165`: https://github.com/saltstack/salt/pull/38165
|
||||
@ -1435,6 +1574,7 @@ Changes:
|
||||
.. _`#39198`: https://github.com/saltstack/salt/pull/39198
|
||||
.. _`#39199`: https://github.com/saltstack/salt/pull/39199
|
||||
.. _`#39202`: https://github.com/saltstack/salt/pull/39202
|
||||
.. _`#39203`: https://github.com/saltstack/salt/issues/39203
|
||||
.. _`#39206`: https://github.com/saltstack/salt/pull/39206
|
||||
.. _`#39209`: https://github.com/saltstack/salt/pull/39209
|
||||
.. _`#39210`: https://github.com/saltstack/salt/pull/39210
|
||||
@ -1484,16 +1624,31 @@ Changes:
|
||||
.. _`#39362`: https://github.com/saltstack/salt/pull/39362
|
||||
.. _`#39364`: https://github.com/saltstack/salt/pull/39364
|
||||
.. _`#39369`: https://github.com/saltstack/salt/pull/39369
|
||||
.. _`#39370`: https://github.com/saltstack/salt/pull/39370
|
||||
.. _`#39378`: https://github.com/saltstack/salt/pull/39378
|
||||
.. _`#39379`: https://github.com/saltstack/salt/pull/39379
|
||||
.. _`#39380`: https://github.com/saltstack/salt/pull/39380
|
||||
.. _`#39392`: https://github.com/saltstack/salt/pull/39392
|
||||
.. _`#39400`: https://github.com/saltstack/salt/pull/39400
|
||||
.. _`#39409`: https://github.com/saltstack/salt/pull/39409
|
||||
.. _`#39418`: https://github.com/saltstack/salt/pull/39418
|
||||
.. _`#39419`: https://github.com/saltstack/salt/pull/39419
|
||||
.. _`#39424`: https://github.com/saltstack/salt/pull/39424
|
||||
.. _`#39428`: https://github.com/saltstack/salt/pull/39428
|
||||
.. _`#39429`: https://github.com/saltstack/salt/pull/39429
|
||||
.. _`#39432`: https://github.com/saltstack/salt/pull/39432
|
||||
.. _`#39437`: https://github.com/saltstack/salt/pull/39437
|
||||
.. _`#39438`: https://github.com/saltstack/salt/pull/39438
|
||||
.. _`#39448`: https://github.com/saltstack/salt/pull/39448
|
||||
.. _`#39451`: https://github.com/saltstack/salt/pull/39451
|
||||
.. _`#39456`: https://github.com/saltstack/salt/pull/39456
|
||||
.. _`#39458`: https://github.com/saltstack/salt/pull/39458
|
||||
.. _`#39462`: https://github.com/saltstack/salt/pull/39462
|
||||
.. _`#39484`: https://github.com/saltstack/salt/pull/39484
|
||||
.. _`#39486`: https://github.com/saltstack/salt/pull/39486
|
||||
.. _`#39521`: https://github.com/saltstack/salt/pull/39521
|
||||
.. _`#39534`: https://github.com/saltstack/salt/pull/39534
|
||||
.. _`#39536`: https://github.com/saltstack/salt/pull/39536
|
||||
.. _`bp-36336`: https://github.com/saltstack/salt/pull/36336
|
||||
.. _`bp-37338`: https://github.com/saltstack/salt/pull/37338
|
||||
.. _`bp-37375`: https://github.com/saltstack/salt/pull/37375
|
||||
|
@ -52,9 +52,21 @@ Grains Changes
|
||||
may need to be adjusted to account for this change.
|
||||
- Add ability to specify disk backing mode in the VMWare salt cloud profile.
|
||||
|
||||
State Module Changes
|
||||
====================
|
||||
|
||||
- The :py:func:`service.running <salt.states.service.running>` and
|
||||
:py:func:`service.dead <salt.states.service.dead>` states now support a
|
||||
``no_block`` argument which, when set to ``True`` on systemd minions, will
|
||||
start/stop the service using the ``--no-block`` flag in the ``systemctl``
|
||||
command. On non-systemd minions, a warning will be issued.
|
||||
|
||||
Execution Module Changes
|
||||
========================
|
||||
|
||||
- Several functions in the :mod:`systemd <salt.modules.systemd>` execution
|
||||
module have gained a ``no_block`` argument, which when set to ``True`` will
|
||||
use ``--no-block`` in the ``systemctl`` command.
|
||||
- In the :mod:`solarisips <salt.modules.solarisips>` ``pkg`` module, the
|
||||
default value for the ``refresh`` argument to the ``list_upgrades`` function
|
||||
has been changed from ``False`` to ``True``. This makes the function more
|
||||
@ -144,6 +156,11 @@ feature works can be found :ref:`here <gitfs-custom-refspecs>` in the GitFS
|
||||
Walkthrough. The git_pillar and winrepo versions of this feature work the same
|
||||
as their GitFS counterpart.
|
||||
|
||||
git_pillar "mountpoints" Feature Added
|
||||
======================================
|
||||
|
||||
See :ref:`here <git-pillar-mountpoints>` for detailed documentation.
|
||||
|
||||
``dockerng`` State/Execution Module Renamed to ``docker``
|
||||
=========================================================
|
||||
|
||||
|
@ -107,6 +107,144 @@ A comma-separated list of optional packages that are recommended to be
|
||||
installed with the package. This list is displayed in an informational message
|
||||
when the package is installed to SPM.
|
||||
|
||||
files
|
||||
~~~~~
|
||||
A files section can be added, to specify a list of files to add to the SPM.
|
||||
Such a section might look like:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
files:
|
||||
- _pillar
|
||||
- FORMULA
|
||||
- _runners
|
||||
- d|mymodule/index.rst
|
||||
- r|README.rst
|
||||
|
||||
When ``files`` are specified, then only those files will be added to the SPM,
|
||||
regardless of what other files exist in the directory. They will also be added
|
||||
in the order specified, which is useful if you have a need to lay down files in
|
||||
a specific order.
|
||||
|
||||
As can be seen in the example above, you may also tag files as being a specific
|
||||
type. This is done by pre-pending a filename with its type, followed by a pipe
|
||||
(``|``) character. The above example contains a document file and a readme. The
|
||||
available file types are:
|
||||
|
||||
* ``c``: config file
|
||||
* ``d``: documentation file
|
||||
* ``g``: ghost file (i.e. the file contents are not included in the package payload)
|
||||
* ``l``: license file
|
||||
* ``r``: readme file
|
||||
* ``s``: SLS file
|
||||
* ``m``: Salt module
|
||||
|
||||
The first 5 of these types (``c``, ``d``, ``g``, ``l``, ``r``) will be placed in
|
||||
``/usr/share/salt/spm/`` by default. This can be changed by setting an
|
||||
``spm_share_dir`` value in your ``/etc/salt/spm`` configuration file.
|
||||
|
||||
The last two types (``s`` and ``m``) are currently ignored, but they are
|
||||
reserved for future use.
|
||||
|
||||
Pre and Post States
|
||||
-------------------
|
||||
It is possible to run Salt states before and after installing a package by
|
||||
using pre and post states. The following sections may be declared in a
|
||||
``FORMULA``:
|
||||
|
||||
* ``pre_local_state``
|
||||
* ``pre_tgt_state``
|
||||
* ``post_local_state``
|
||||
* ``post_tgt_state``
|
||||
|
||||
Sections with ``pre`` in their name are evaluated before a package is installed
|
||||
and sections with ``post`` are evaluated after a package is installed. ``local``
|
||||
states are evaluated before ``tgt`` states.
|
||||
|
||||
Each of these sections needs to be evaluated as text, rather than as YAML.
|
||||
Consider the following block:
|
||||
|
||||
.. code-block::
|
||||
|
||||
pre_local_state: >
|
||||
echo test > /tmp/spmtest:
|
||||
cmd:
|
||||
- run
|
||||
|
||||
Note that this declaration uses ``>`` after ``pre_local_state``. This is a YAML
|
||||
marker that marks the next multi-line block as text, including newlines. It is
|
||||
important to use this marker whenever declaring ``pre`` or ``post`` states, so
|
||||
that the text following it can be evaluated properly.
|
||||
|
||||
local States
|
||||
~~~~~~~~~~~~
|
||||
``local`` states are evaluated locally; this is analagous to issuing a state
|
||||
run using a ``salt-call --local`` command. These commands will be issued on the
|
||||
local machine running the ``spm`` command, whether that machine is a master or
|
||||
a minion.
|
||||
|
||||
``local`` states do not require any special arguments, but they must still use
|
||||
the ``>`` marker to denote that the state is evaluated as text, not a data
|
||||
structure.
|
||||
|
||||
.. code-block::
|
||||
|
||||
pre_local_state: >
|
||||
echo test > /tmp/spmtest:
|
||||
cmd:
|
||||
- run
|
||||
|
||||
tgt States
|
||||
~~~~~~~~~~
|
||||
``tgt`` states are issued against a remote target. This is analogous to issuing
|
||||
a state using the ``salt`` command. As such it requires that the machine that
|
||||
the ``spm`` command is running on is a master.
|
||||
|
||||
Because ``tgt`` states require that a target be specified, their code blocks
|
||||
are a little different. Consider the following state:
|
||||
|
||||
.. code-block::
|
||||
|
||||
pre_tgt_state:
|
||||
tgt: '*'
|
||||
data: >
|
||||
echo test > /tmp/spmtest:
|
||||
cmd:
|
||||
- run
|
||||
|
||||
With ``tgt`` states, the state data is placed under a ``data`` section, inside
|
||||
the ``*_tgt_state`` code block. The target is of course specified as a ``tgt``
|
||||
and you may also optionally specify a ``tgt_type`` (the default is ``glob``).
|
||||
|
||||
You still need to use the ``>`` marker, but this time it follows the ``data``
|
||||
line, rather than the ``*_tgt_state`` line.
|
||||
|
||||
Templating States
|
||||
~~~~~~~~~~~~~~~~~
|
||||
The reason that state data must be evaluated as text rather than a data
|
||||
structure is because that state data is first processed through the rendering
|
||||
engine, as it would be with a standard state run.
|
||||
|
||||
This means that you can use Jinja or any other supported renderer inside of
|
||||
Salt. All formula variables are available to the renderer, so you can reference
|
||||
``FORMULA`` data inside your state if you need to:
|
||||
|
||||
.. code-block::
|
||||
|
||||
pre_tgt_state:
|
||||
tgt: '*'
|
||||
data: >
|
||||
echo {{ name }} > /tmp/spmtest:
|
||||
cmd:
|
||||
- run
|
||||
|
||||
You may also declare your own variables inside the ``FORMULA``. If SPM doesn't
|
||||
recognize them then it will ignore them, so there are no restrictions on
|
||||
variable names, outside of avoiding reserved words.
|
||||
|
||||
By default the renderer is set to ``yaml_jinja``. You may change this by
|
||||
changing the ``renderer`` setting in the ``FORMULA`` itself.
|
||||
|
||||
Building a Package
|
||||
------------------
|
||||
Once a ``FORMULA`` file has been created, it is placed into the root of the
|
||||
|
@ -33,7 +33,7 @@ be called to execute a function, or a specific kernel.
|
||||
|
||||
Calling via a grain is done by passing the -G option to salt, specifying
|
||||
a grain and a glob expression to match the value of the grain. The syntax for
|
||||
the target is the grain key followed by a globexpression: "os:Arch*".
|
||||
the target is the grain key followed by a glob expression: "os:Arch*".
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@ -48,7 +48,7 @@ grains.item salt function:
|
||||
|
||||
salt '*' grains.items
|
||||
|
||||
more info on using targeting with grains can be found :ref:`here
|
||||
More info on using targeting with grains can be found :ref:`here
|
||||
<targeting-grains>`.
|
||||
|
||||
Compound Targeting
|
||||
@ -73,7 +73,7 @@ is used with ``G@`` as well as a regular expression with ``E@``. The
|
||||
``webser*`` target does not need to be prefaced with a target type specifier
|
||||
because it is a glob.
|
||||
|
||||
more info on using compound targeting can be found :ref:`here
|
||||
More info on using compound targeting can be found :ref:`here
|
||||
<targeting-compound>`.
|
||||
|
||||
Node Group Targeting
|
||||
@ -94,6 +94,10 @@ shorthand for having to type out complicated compound expressions.
|
||||
group2: 'G@os:Debian and foo.domain.com'
|
||||
group3: 'G@os:Debian and N@group1'
|
||||
|
||||
|
||||
Advanced Targeting Methods
|
||||
==========================
|
||||
|
||||
There are many ways to target individual minions or groups of minions in Salt:
|
||||
|
||||
.. toctree::
|
||||
|
@ -31,6 +31,46 @@ actual message that we are sending. With this flexible wire protocol we can
|
||||
implement any message semantics that we'd like-- including multiplexed message
|
||||
passing on a single socket.
|
||||
|
||||
TLS Support
|
||||
===========
|
||||
|
||||
.. version_added:: 2016.11.1
|
||||
|
||||
The TCP transport allows for the master/minion communication to be optionally
|
||||
wrapped in a TLS connection. Enabling this is simple, the master and minion need
|
||||
to be using the tcp connection, then the `ssl` option is enabled. The `ssl`
|
||||
option is passed as a dict and corresponds to the options passed to the
|
||||
Python `ssl.wrap_socket <https://docs.python.org/2/library/ssl.html#ssl.wrap_socket>`
|
||||
function.
|
||||
|
||||
A simple setup looks like this, on the Salt Master add the `ssl` option to the
|
||||
master configuration file:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
ssl:
|
||||
keyfile: <path_to_keyfile>
|
||||
certfile: <path_to_certfile>
|
||||
ssl_version: PROTOCOL_TLSv1_2
|
||||
|
||||
The minimal `ssl` option in the minion configuration file looks like this:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
ssl: True
|
||||
# Versions below 2016.11.4:
|
||||
ssl: {}
|
||||
|
||||
Specific options can be sent to the minion also, as defined in the Python
|
||||
`ssl.wrap_socket` function.
|
||||
|
||||
.. note::
|
||||
|
||||
While setting the ssl_version is not required, we recomend it. Some older
|
||||
versions of python do not support the latest TLS protocol and if this is
|
||||
the case for your version of python we strongly recommend upgrading your
|
||||
version of Python.
|
||||
|
||||
|
||||
Crypto
|
||||
======
|
||||
|
@ -101,11 +101,13 @@ releases pygit2_ 0.20.3 and libgit2_ 0.20.0 is the recommended combination.
|
||||
RedHat Pygit2 Issues
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Around the time of the release of RedHat 7.3, RedHat effectively broke pygit2_
|
||||
by upgrading python-cffi_ to a release incompatible with the version of pygit2_
|
||||
available in their repositories. This prevents Python from importing the
|
||||
pygit2_ module at all, leading to a master that refuses to start, and leaving
|
||||
the following errors in the master log file:
|
||||
The release of RedHat/CentOS 7.3 upgraded both ``python-cffi`` and
|
||||
``http-parser``, both of which are dependencies for pygit2_/libgit2_. Both
|
||||
pygit2_ and libgit2_ (which are from the EPEL repository and not managed
|
||||
directly by RedHat) need to be rebuilt against these updated dependencies.
|
||||
|
||||
The below errors will show up in the master log if an incompatible
|
||||
``python-pygit2`` package is installed:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
@ -114,34 +116,37 @@ the following errors in the master log file:
|
||||
2017-02-10 09:07:34,907 [salt.utils.gitfs ][CRITICAL][11211] No suitable gitfs provider module is installed.
|
||||
2017-02-10 09:07:34,912 [salt.master ][CRITICAL][11211] Master failed pre flight checks, exiting
|
||||
|
||||
This issue has been reported on the `RedHat Bugzilla`_. In the meantime, you
|
||||
can work around it by downgrading python-cffi_. To do this, go to `this page`_
|
||||
and download the appropriate python-cffi_ 0.8.6 RPM. Then copy that RPM to the
|
||||
master and downgrade using the ``rpm`` command. For example:
|
||||
The below errors will show up in the master log if an incompatible ``libgit2``
|
||||
package is installed:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
2017-02-15 18:04:45,211 [salt.utils.gitfs ][ERROR ][6211] Error occurred fetching gitfs remote 'https://foo.com/bar.git': No Content-Type header in response
|
||||
|
||||
As of 15 February 2017, ``python-pygit2`` has been rebuilt and is in the stable
|
||||
EPEL repository. However, ``libgit2`` remains broken (a `bug report`_ has been
|
||||
filed to get it rebuilt).
|
||||
|
||||
In the meantime, you can work around this by downgrading ``http-parser``. To do
|
||||
this, go to `this page`_ and download the appropriate ``http-parser`` RPM for
|
||||
the OS architecture you are using (x86_64, etc.). Then downgrade using the
|
||||
``rpm`` command. For example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# rpm -Uvh --oldpackage python-cffi-0.8.6-1.el7.x86_64.rpm
|
||||
[root@784e8a8c5028 /]# curl --silent -O https://kojipkgs.fedoraproject.org//packages/http-parser/2.0/5.20121128gitcd01361.el7/x86_64/http-parser-2.0-5.20121128gitcd01361.el7.x86_64.rpm
|
||||
[root@784e8a8c5028 /]# rpm -Uvh --oldpackage http-parser-2.0-5.20121128gitcd01361.el7.x86_64.rpm
|
||||
Preparing... ################################# [100%]
|
||||
Updating / installing...
|
||||
1:python-cffi-0.8.6-1.el7 ################################# [ 50%]
|
||||
1:http-parser-2.0-5.20121128gitcd01################################# [ 50%]
|
||||
Cleaning up / removing...
|
||||
2:python-cffi-1.6.0-5.el7 ################################# [100%]
|
||||
# rpm -q python-cffi
|
||||
python-cffi-0.8.6-1.el7.x86_64
|
||||
2:http-parser-2.7.1-3.el7 ################################# [100%]
|
||||
|
||||
To confirm that pygit2_ is now "fixed", you can test trying to import it like so:
|
||||
A restart of the salt-master daemon may be required to allow http(s)
|
||||
repositories to continue to be fetched.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# python -c 'import pygit2'
|
||||
#
|
||||
|
||||
If the command produces no output, then your master should work when you start
|
||||
it again.
|
||||
|
||||
.. _`this page`: https://koji.fedoraproject.org/koji/buildinfo?buildID=569520
|
||||
.. _`RedHat Bugzilla`: https://bugzilla.redhat.com/show_bug.cgi?id=1400668
|
||||
.. _`this page`: https://koji.fedoraproject.org/koji/buildinfo?buildID=703753
|
||||
.. _`bug report`: https://bugzilla.redhat.com/show_bug.cgi?id=1422583
|
||||
|
||||
|
||||
GitPython
|
||||
|
@ -379,7 +379,7 @@ the test method:
|
||||
.. code-block:: python
|
||||
|
||||
import integration
|
||||
from salttesting.helpers import destructiveTest
|
||||
from tests.support.helpers import destructiveTest
|
||||
|
||||
class PkgTest(integration.ModuleCase):
|
||||
@destructiveTest
|
||||
@ -462,7 +462,7 @@ can be used
|
||||
.. code-block:: python
|
||||
|
||||
# Import logging handler
|
||||
from salttesting.helpers import TestsLoggingHandler
|
||||
from tests.support.helpers import TestsLoggingHandler
|
||||
|
||||
# .. inside test
|
||||
with TestsLoggingHandler() as handler:
|
||||
|
@ -81,9 +81,3 @@ levels of symlinks (defaults to 64), an error is always raised.
|
||||
|
||||
For some functions, this behavior is different to the behavior on Unix
|
||||
platforms. In general, avoid symlink loops on either platform.
|
||||
|
||||
|
||||
Modifying security properties (ACLs) on files
|
||||
=============================================
|
||||
There is no support in Salt for modifying ACLs, and therefore no support for
|
||||
changing file permissions, besides modifying the owner/user.
|
||||
|
@ -11,6 +11,7 @@ elif __file__:
|
||||
ROOT_DIR=application_path.split("bin/appdata")[0]
|
||||
|
||||
# Copied from syspaths.py
|
||||
SHARE_DIR = os.path.join(ROOT_DIR, 'usr', 'share', 'salt')
|
||||
CONFIG_DIR = os.path.join(ROOT_DIR, 'etc')
|
||||
CACHE_DIR = os.path.join(ROOT_DIR, 'var', 'cache', 'salt')
|
||||
SOCK_DIR = os.path.join(ROOT_DIR, 'var', 'run', 'salt')
|
||||
|
@ -5,5 +5,7 @@ apache-libcloud>=0.14.0
|
||||
boto>=2.32.1
|
||||
boto3>=1.2.1
|
||||
moto>=0.3.6
|
||||
SaltTesting
|
||||
SaltPyLint>=v2017.2.22
|
||||
SaltPyLint>=v2017.2.29
|
||||
GitPython>=0.3
|
||||
pytest
|
||||
git+https://github.com/saltstack/pytest-salt.git@master#egg=pytest-salt
|
||||
|
@ -5,5 +5,7 @@ apache-libcloud>=0.14.0
|
||||
boto>=2.32.1
|
||||
boto3>=1.2.1
|
||||
moto>=0.3.6
|
||||
SaltTesting
|
||||
SaltPyLint>=v2017.2.22
|
||||
SaltPyLint>=v2017.2.29
|
||||
GitPython>=0.3
|
||||
pytest
|
||||
git+https://github.com/saltstack/pytest-salt.git@master#egg=pytest-salt
|
||||
|
4
requirements/pytest.txt
Normal file
4
requirements/pytest.txt
Normal file
@ -0,0 +1,4 @@
|
||||
pytest
|
||||
git+https://github.com/eisensheng/pytest-catchlog.git@develop#egg=Pytest-catchlog
|
||||
pytest-helpers-namespace
|
||||
pytest-tempdir
|
@ -58,7 +58,8 @@ def auth(username, password):
|
||||
|
||||
# Post to the API endpoint. If 200 is returned then the result will be the ACLs
|
||||
# for this user
|
||||
result = salt.utils.http.query(url, method='POST', data=data)
|
||||
result = salt.utils.http.query(url, method='POST', data=data, status=True,
|
||||
decode=True)
|
||||
if result['status'] == 200:
|
||||
log.debug('eauth REST call returned 200: {0}'.format(result))
|
||||
if result['dict'] is not None:
|
||||
|
@ -67,6 +67,26 @@ def __validate__(config):
|
||||
return True, 'Valid beacon configuration'
|
||||
|
||||
|
||||
def _enforce_txt_record_maxlen(key, value):
|
||||
'''
|
||||
Enforces the TXT record maximum length of 255 characters.
|
||||
TXT record length includes key, value, and '='.
|
||||
|
||||
:param str key: Key of the TXT record
|
||||
:param str value: Value of the TXT record
|
||||
|
||||
:rtype: str
|
||||
:return: The value of the TXT record. It may be truncated if it exceeds
|
||||
the maximum permitted length. In case of truncation, '...' is
|
||||
appended to indicate that the entire value is not present.
|
||||
'''
|
||||
# Add 1 for '=' seperator between key and value
|
||||
if len(key) + len(value) + 1 > 255:
|
||||
# 255 - 3 ('...') - 1 ('=') = 251
|
||||
return value[:251 - len(key)] + '...'
|
||||
return value
|
||||
|
||||
|
||||
def beacon(config):
|
||||
'''
|
||||
Broadcast values via zeroconf
|
||||
@ -158,11 +178,11 @@ def beacon(config):
|
||||
grain_value = grain_value[grain_index]
|
||||
else:
|
||||
grain_value = ','.join(grain_value)
|
||||
txt[item] = grain_value
|
||||
txt[item] = _enforce_txt_record_maxlen(item, grain_value)
|
||||
if LAST_GRAINS and (LAST_GRAINS.get(grain, '') != __grains__.get(grain, '')):
|
||||
changes[str('txt.' + item)] = txt[item]
|
||||
else:
|
||||
txt[item] = config['txt'][item]
|
||||
txt[item] = _enforce_txt_record_maxlen(item, config['txt'][item])
|
||||
|
||||
if not LAST_GRAINS:
|
||||
changes[str('txt.' + item)] = txt[item]
|
||||
|
@ -60,6 +60,26 @@ def __validate__(config):
|
||||
return True, 'Valid beacon configuration'
|
||||
|
||||
|
||||
def _enforce_txt_record_maxlen(key, value):
|
||||
'''
|
||||
Enforces the TXT record maximum length of 255 characters.
|
||||
TXT record length includes key, value, and '='.
|
||||
|
||||
:param str key: Key of the TXT record
|
||||
:param str value: Value of the TXT record
|
||||
|
||||
:rtype: str
|
||||
:return: The value of the TXT record. It may be truncated if it exceeds
|
||||
the maximum permitted length. In case of truncation, '...' is
|
||||
appended to indicate that the entire value is not present.
|
||||
'''
|
||||
# Add 1 for '=' seperator between key and value
|
||||
if len(key) + len(value) + 1 > 255:
|
||||
# 255 - 3 ('...') - 1 ('=') = 251
|
||||
return value[:251 - len(key)] + '...'
|
||||
return value
|
||||
|
||||
|
||||
def beacon(config):
|
||||
'''
|
||||
Broadcast values via zeroconf
|
||||
@ -152,11 +172,11 @@ def beacon(config):
|
||||
grain_value = grain_value[grain_index]
|
||||
else:
|
||||
grain_value = ','.join(grain_value)
|
||||
txt[item] = grain_value
|
||||
txt[item] = _enforce_txt_record_maxlen(item, grain_value)
|
||||
if LAST_GRAINS and (LAST_GRAINS.get(grain, '') != __grains__.get(grain, '')):
|
||||
changes[str('txt.' + item)] = txt[item]
|
||||
else:
|
||||
txt[item] = config['txt'][item]
|
||||
txt[item] = _enforce_txt_record_maxlen(item, config['txt'][item])
|
||||
|
||||
if not LAST_GRAINS:
|
||||
changes[str('txt.' + item)] = txt[item]
|
||||
|
4
salt/cache/__init__.py
vendored
4
salt/cache/__init__.py
vendored
@ -143,8 +143,8 @@ class Cache(object):
|
||||
added by the driver itself.
|
||||
|
||||
:return:
|
||||
Return a python object fetched from the cache or None if the given
|
||||
path or key not found.
|
||||
Return a python object fetched from the cache or an empty dict if
|
||||
the given path or key not found.
|
||||
|
||||
:raises SaltCacheError:
|
||||
Raises an exception if cache driver detected an error accessing data
|
||||
|
2
salt/cache/consul.py
vendored
2
salt/cache/consul.py
vendored
@ -104,7 +104,7 @@ def fetch(bank, key):
|
||||
try:
|
||||
_, value = api.kv.get(c_key)
|
||||
if value is None:
|
||||
return value
|
||||
return {}
|
||||
return __context__['serial'].loads(value['Value'])
|
||||
except Exception as exc:
|
||||
raise SaltCacheError(
|
||||
|
2
salt/cache/localfs.py
vendored
2
salt/cache/localfs.py
vendored
@ -64,7 +64,7 @@ def fetch(bank, key, cachedir):
|
||||
key_file = os.path.join(cachedir, os.path.normpath(bank), '{0}.p'.format(key))
|
||||
if not os.path.isfile(key_file):
|
||||
log.debug('Cache file "%s" does not exist', key_file)
|
||||
return None
|
||||
return {}
|
||||
try:
|
||||
with salt.utils.fopen(key_file, 'rb') as fh_:
|
||||
return __context__['serial'].load(fh_)
|
||||
|
2
salt/cache/redis_cache.py
vendored
2
salt/cache/redis_cache.py
vendored
@ -310,7 +310,7 @@ def fetch(bank, key):
|
||||
log.error(mesg)
|
||||
raise SaltCacheError(mesg)
|
||||
if redis_value is None:
|
||||
return redis_value
|
||||
return {}
|
||||
return __context__['serial'].loads(redis_value)
|
||||
|
||||
|
||||
|
@ -58,21 +58,21 @@ class Batch(object):
|
||||
# Broadcast to targets
|
||||
fret = set()
|
||||
nret = set()
|
||||
try:
|
||||
for ret in ping_gen:
|
||||
if ('minions' and 'jid') in ret:
|
||||
for minion in ret['minions']:
|
||||
nret.add(minion)
|
||||
continue
|
||||
else:
|
||||
for ret in ping_gen:
|
||||
if ('minions' and 'jid') in ret:
|
||||
for minion in ret['minions']:
|
||||
nret.add(minion)
|
||||
continue
|
||||
else:
|
||||
try:
|
||||
m = next(six.iterkeys(ret))
|
||||
if m is not None:
|
||||
fret.add(m)
|
||||
return (list(fret), ping_gen, nret.difference(fret))
|
||||
except StopIteration:
|
||||
if not self.quiet:
|
||||
print_cli('No minions matched the target.')
|
||||
return list(fret), ping_gen
|
||||
except StopIteration:
|
||||
if not self.quiet:
|
||||
print_cli('No minions matched the target.')
|
||||
break
|
||||
if m is not None:
|
||||
fret.add(m)
|
||||
return (list(fret), ping_gen, nret.difference(fret))
|
||||
|
||||
def get_bnum(self):
|
||||
'''
|
||||
|
@ -39,7 +39,15 @@ class SaltRun(parsers.SaltRunOptionParser):
|
||||
pr = activate_profile(profiling_enabled)
|
||||
try:
|
||||
ret = runner.run()
|
||||
if isinstance(ret, dict) and 'retcode' in ret.get('data', {}):
|
||||
# In older versions ret['data']['retcode'] was used
|
||||
# for signaling the return code. This has been
|
||||
# changed for the orchestrate runner, but external
|
||||
# runners might still use it. For this reason, we
|
||||
# also check ret['data']['retcode'] if
|
||||
# ret['retcode'] is not available.
|
||||
if isinstance(ret, dict) and 'retcode' in ret:
|
||||
self.exit(ret['retcode'])
|
||||
elif isinstance(ret, dict) and 'retcode' in ret.get('data', {}):
|
||||
self.exit(ret['data']['retcode'])
|
||||
finally:
|
||||
output_profile(
|
||||
|
@ -576,6 +576,7 @@ class LocalClient(object):
|
||||
tgt_type='glob',
|
||||
ret='',
|
||||
jid='',
|
||||
full_return=False,
|
||||
kwarg=None,
|
||||
**kwargs):
|
||||
'''
|
||||
@ -664,6 +665,9 @@ class LocalClient(object):
|
||||
|
||||
:param kwarg: A dictionary with keyword arguments for the function.
|
||||
|
||||
:param full_return: Output the job return only (default) or the full
|
||||
return including exit code and other job metadata.
|
||||
|
||||
:param kwargs: Optional keyword arguments.
|
||||
Authentication credentials may be passed when using
|
||||
:conf_master:`external_auth`.
|
||||
@ -714,7 +718,8 @@ class LocalClient(object):
|
||||
|
||||
if fn_ret:
|
||||
for mid, data in six.iteritems(fn_ret):
|
||||
ret[mid] = data.get('ret', {})
|
||||
ret[mid] = (data if full_return
|
||||
else data.get('ret', {}))
|
||||
|
||||
for failed in list(set(pub_data['minions']) ^ set(ret.keys())):
|
||||
ret[failed] = False
|
||||
|
@ -138,7 +138,7 @@ class SyncClientMixin(object):
|
||||
salt.utils.error.raise_error(**ret['error'])
|
||||
return ret
|
||||
|
||||
def cmd_sync(self, low, timeout=None):
|
||||
def cmd_sync(self, low, timeout=None, full_return=False):
|
||||
'''
|
||||
Execute a runner function synchronously; eauth is respected
|
||||
|
||||
@ -166,7 +166,7 @@ class SyncClientMixin(object):
|
||||
"RunnerClient job '{0}' timed out".format(job['jid']),
|
||||
jid=job['jid'])
|
||||
|
||||
return ret['data']['return']
|
||||
return ret if full_return else ret['data']['return']
|
||||
|
||||
def cmd(self, fun, arg=None, pub_data=None, kwarg=None, print_event=True, full_return=False):
|
||||
'''
|
||||
@ -370,21 +370,14 @@ class SyncClientMixin(object):
|
||||
args = low['arg']
|
||||
|
||||
if 'kwarg' not in low:
|
||||
if f_call is None:
|
||||
f_call = salt.utils.format_call(
|
||||
self.functions[fun],
|
||||
low,
|
||||
expected_extra_kws=CLIENT_INTERNAL_KEYWORDS
|
||||
)
|
||||
kwargs = f_call.get('kwargs', {})
|
||||
|
||||
# throw a warning for the badly formed low data if we found
|
||||
# kwargs using the old mechanism
|
||||
if kwargs:
|
||||
salt.utils.warn_until(
|
||||
'Nitrogen',
|
||||
'kwargs must be passed inside the low under "kwargs"'
|
||||
)
|
||||
log.critical(
|
||||
'kwargs must be passed inside the low data within the '
|
||||
'\'kwarg\' key. See usage of '
|
||||
'salt.utils.args.parse_input() and '
|
||||
'salt.minion.load_args_and_kwargs() elsewhere in the '
|
||||
'codebase.'
|
||||
)
|
||||
kwargs = {}
|
||||
else:
|
||||
kwargs = low['kwarg']
|
||||
|
||||
|
@ -68,7 +68,7 @@ def get_url(path, dest, saltenv='base'):
|
||||
'''
|
||||
retrieve a URL
|
||||
'''
|
||||
src = __context__['fileclient'].get_url(
|
||||
src = __context__['fileclient'].cache_file(
|
||||
path,
|
||||
saltenv,
|
||||
cachedir=os.path.join('salt-ssh', __salt__.kwargs['id_']))
|
||||
|
@ -1026,6 +1026,13 @@ def destroy(name, call=None):
|
||||
|
||||
instanceId = _get_node(name)['InstanceId']
|
||||
|
||||
# have to stop instance before del it
|
||||
stop_params = {
|
||||
'Action': 'StopInstance',
|
||||
'InstanceId': instanceId
|
||||
}
|
||||
query(stop_params)
|
||||
|
||||
params = {
|
||||
'Action': 'DeleteInstance',
|
||||
'InstanceId': instanceId
|
||||
|
@ -342,7 +342,7 @@ def create(vm_):
|
||||
)
|
||||
|
||||
if ssh_interface == 'private':
|
||||
log.info("ssh_interafce: Setting interface for ssh to 'private'.")
|
||||
log.info("ssh_interface: Setting interface for ssh to 'private'.")
|
||||
kwargs['ssh_interface'] = ssh_interface
|
||||
else:
|
||||
if ssh_interface != 'public':
|
||||
@ -350,7 +350,7 @@ def create(vm_):
|
||||
"The DigitalOcean driver requires ssh_interface to be defined as 'public' or 'private'."
|
||||
)
|
||||
else:
|
||||
log.info("ssh_interafce: Setting interface for ssh to 'public'.")
|
||||
log.info("ssh_interface: Setting interface for ssh to 'public'.")
|
||||
kwargs['ssh_interface'] = ssh_interface
|
||||
|
||||
private_networking = config.get_cloud_config_value(
|
||||
|
@ -245,6 +245,8 @@ def create(vm_):
|
||||
)
|
||||
for private_ip in private:
|
||||
private_ip = preferred_ip(vm_, [private_ip])
|
||||
if private_ip is False:
|
||||
continue
|
||||
if salt.utils.cloud.is_public_ip(private_ip):
|
||||
log.warning('%s is a public IP', private_ip)
|
||||
data.public_ips.append(private_ip)
|
||||
|
@ -2221,14 +2221,21 @@ def query_instance(vm_=None, call=None):
|
||||
|
||||
log.debug('Returned query data: {0}'.format(data))
|
||||
|
||||
if ssh_interface(vm_) == 'public_ips' and 'ipAddress' in data[0]['instancesSet']['item']:
|
||||
log.error(
|
||||
'Public IP not detected.'
|
||||
)
|
||||
return data
|
||||
if ssh_interface(vm_) == 'private_ips' and \
|
||||
'privateIpAddress' in data[0]['instancesSet']['item']:
|
||||
return data
|
||||
if ssh_interface(vm_) == 'public_ips':
|
||||
if 'ipAddress' in data[0]['instancesSet']['item']:
|
||||
return data
|
||||
else:
|
||||
log.error(
|
||||
'Public IP not detected.'
|
||||
)
|
||||
|
||||
if ssh_interface(vm_) == 'private_ips':
|
||||
if 'privateIpAddress' in data[0]['instancesSet']['item']:
|
||||
return data
|
||||
else:
|
||||
log.error(
|
||||
'Private IP not detected.'
|
||||
)
|
||||
|
||||
try:
|
||||
data = salt.utils.cloud.wait_for_ip(
|
||||
@ -2482,6 +2489,31 @@ def wait_for_instance(
|
||||
return vm_
|
||||
|
||||
|
||||
def _validate_key_path_and_mode(key_filename):
|
||||
if key_filename is None:
|
||||
raise SaltCloudSystemExit(
|
||||
'The required \'private_key\' configuration setting is missing from the '
|
||||
'\'ec2\' driver.'
|
||||
)
|
||||
|
||||
if not os.path.exists(key_filename):
|
||||
raise SaltCloudSystemExit(
|
||||
'The EC2 key file \'{0}\' does not exist.\n'.format(
|
||||
key_filename
|
||||
)
|
||||
)
|
||||
|
||||
key_mode = stat.S_IMODE(os.stat(key_filename).st_mode)
|
||||
if key_mode not in (0o400, 0o600):
|
||||
raise SaltCloudSystemExit(
|
||||
'The EC2 key file \'{0}\' needs to be set to mode 0400 or 0600.\n'.format(
|
||||
key_filename
|
||||
)
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def create(vm_=None, call=None):
|
||||
'''
|
||||
Create a single VM from a data dict
|
||||
@ -2511,32 +2543,10 @@ def create(vm_=None, call=None):
|
||||
key_filename = config.get_cloud_config_value(
|
||||
'private_key', vm_, __opts__, search_global=False, default=None
|
||||
)
|
||||
if deploy or (deploy and win_password == 'auto'):
|
||||
if deploy:
|
||||
# The private_key and keyname settings are only needed for bootstrapping
|
||||
# new instances when deploy is True, or when win_password is set to 'auto'
|
||||
# and deploy is also True.
|
||||
if key_filename is None:
|
||||
raise SaltCloudSystemExit(
|
||||
'The required \'private_key\' configuration setting is missing from the '
|
||||
'\'ec2\' driver.'
|
||||
)
|
||||
|
||||
if not os.path.exists(key_filename):
|
||||
raise SaltCloudSystemExit(
|
||||
'The EC2 key file \'{0}\' does not exist.\n'.format(
|
||||
key_filename
|
||||
)
|
||||
)
|
||||
|
||||
key_mode = str(
|
||||
oct(stat.S_IMODE(os.stat(key_filename).st_mode))
|
||||
)
|
||||
if key_mode not in ('0400', '0600'):
|
||||
raise SaltCloudSystemExit(
|
||||
'The EC2 key file \'{0}\' needs to be set to mode 0400 or 0600.\n'.format(
|
||||
key_filename
|
||||
)
|
||||
)
|
||||
# new instances when deploy is True
|
||||
_validate_key_path_and_mode(key_filename)
|
||||
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
|
@ -921,6 +921,8 @@ def create(vm_):
|
||||
)
|
||||
for private_ip in private:
|
||||
private_ip = preferred_ip(vm_, [private_ip])
|
||||
if private_ip is False:
|
||||
continue
|
||||
if salt.utils.cloud.is_public_ip(private_ip):
|
||||
log.warning('{0} is a public IP'.format(private_ip))
|
||||
data.public_ips.append(private_ip)
|
||||
|
@ -722,6 +722,8 @@ def create(vm_):
|
||||
)
|
||||
for private_ip in private:
|
||||
private_ip = preferred_ip(vm_, [private_ip])
|
||||
if private_ip is False:
|
||||
continue
|
||||
if salt.utils.cloud.is_public_ip(private_ip):
|
||||
log.warning('{0} is a public IP'.format(private_ip))
|
||||
data.public_ips.append(private_ip)
|
||||
|
@ -53,7 +53,13 @@ from __future__ import absolute_import
|
||||
import logging
|
||||
import pprint
|
||||
import time
|
||||
import packet
|
||||
|
||||
# Import 3rd-party libs
|
||||
try:
|
||||
import packet
|
||||
HAS_PACKET = True
|
||||
except ImportError:
|
||||
HAS_PACKET = False
|
||||
|
||||
# Import Salt Libs
|
||||
import salt.config as config
|
||||
@ -89,6 +95,8 @@ def __virtual__():
|
||||
'''
|
||||
Check for Packet configs.
|
||||
'''
|
||||
if HAS_PACKET is False:
|
||||
return False, 'The packet python library is not installed'
|
||||
if get_configured_provider() is False:
|
||||
return False
|
||||
|
||||
|
@ -290,6 +290,8 @@ def create(vm_):
|
||||
)
|
||||
for private_ip in private:
|
||||
private_ip = preferred_ip(vm_, [private_ip])
|
||||
if private_ip is False:
|
||||
continue
|
||||
if salt.utils.cloud.is_public_ip(private_ip):
|
||||
log.warning('{0} is a public IP'.format(private_ip))
|
||||
data.public_ips.append(private_ip)
|
||||
|
@ -583,6 +583,8 @@ VALID_OPTS = {
|
||||
'git_pillar_pubkey': str,
|
||||
'git_pillar_passphrase': str,
|
||||
'git_pillar_refspecs': list,
|
||||
'git_pillar_includes': bool,
|
||||
'git_pillar_verify_config': bool,
|
||||
'gitfs_remotes': list,
|
||||
'gitfs_mountpoint': str,
|
||||
'gitfs_root': str,
|
||||
@ -684,6 +686,7 @@ VALID_OPTS = {
|
||||
'fileserver_followsymlinks': bool,
|
||||
'fileserver_ignoresymlinks': bool,
|
||||
'fileserver_limit_traversal': bool,
|
||||
'fileserver_verify_config': bool,
|
||||
|
||||
# The number of open files a daemon is allowed to have open. Frequently needs to be increased
|
||||
# higher than the system default in order to account for the way zeromq consumes file handles.
|
||||
@ -976,7 +979,7 @@ VALID_OPTS = {
|
||||
# http://docs.python.org/2/library/ssl.html#ssl.wrap_socket
|
||||
# Note: to set enum arguments values like `cert_reqs` and `ssl_version` use constant names
|
||||
# without ssl module prefix: `CERT_REQUIRED` or `PROTOCOL_SSLv23`.
|
||||
'ssl': (dict, None),
|
||||
'ssl': (dict, bool, type(None)),
|
||||
|
||||
# Controls how a multi-function job returns its data. If this is False,
|
||||
# it will return its data using a dictionary with the function name as
|
||||
@ -1095,6 +1098,7 @@ DEFAULT_MINION_OPTS = {
|
||||
'git_pillar_pubkey': '',
|
||||
'git_pillar_passphrase': '',
|
||||
'git_pillar_refspecs': _DFLT_REFSPECS,
|
||||
'git_pillar_includes': True,
|
||||
'gitfs_remotes': [],
|
||||
'gitfs_mountpoint': '',
|
||||
'gitfs_root': '',
|
||||
@ -1135,7 +1139,7 @@ DEFAULT_MINION_OPTS = {
|
||||
'mine_interval': 60,
|
||||
'ipc_mode': _DFLT_IPC_MODE,
|
||||
'ipc_write_buffer': _DFLT_IPC_WBUFFER,
|
||||
'ipv6': False,
|
||||
'ipv6': None,
|
||||
'file_buffer_size': 262144,
|
||||
'tcp_pub_port': 4510,
|
||||
'tcp_pull_port': 4511,
|
||||
@ -1316,6 +1320,8 @@ DEFAULT_MASTER_OPTS = {
|
||||
'git_pillar_pubkey': '',
|
||||
'git_pillar_passphrase': '',
|
||||
'git_pillar_refspecs': _DFLT_REFSPECS,
|
||||
'git_pillar_includes': True,
|
||||
'git_pillar_verify_config': True,
|
||||
'gitfs_remotes': [],
|
||||
'gitfs_mountpoint': '',
|
||||
'gitfs_root': '',
|
||||
@ -1390,6 +1396,7 @@ DEFAULT_MASTER_OPTS = {
|
||||
'fileserver_followsymlinks': True,
|
||||
'fileserver_ignoresymlinks': False,
|
||||
'fileserver_limit_traversal': False,
|
||||
'fileserver_verify_config': True,
|
||||
'max_open_files': 100000,
|
||||
'hash_type': 'sha256',
|
||||
'conf_file': os.path.join(salt.syspaths.CONFIG_DIR, 'master'),
|
||||
@ -1615,6 +1622,10 @@ DEFAULT_SPM_OPTS = {
|
||||
'spm_db': os.path.join(salt.syspaths.CACHE_DIR, 'spm', 'packages.db'),
|
||||
'cache': 'localfs',
|
||||
'spm_repo_dups': 'ignore',
|
||||
# If set, spm_node_type will be either master or minion, but they should
|
||||
# NOT be a default
|
||||
'spm_node_type': '',
|
||||
'spm_share_dir': os.path.join(salt.syspaths.SHARE_DIR, 'spm'),
|
||||
# <---- Salt master settings overridden by SPM ----------------------
|
||||
}
|
||||
|
||||
@ -3166,7 +3177,11 @@ def _update_ssl_config(opts):
|
||||
'''
|
||||
Resolves string names to integer constant in ssl configuration.
|
||||
'''
|
||||
if opts['ssl'] is None:
|
||||
if opts['ssl'] in (None, False):
|
||||
opts['ssl'] = None
|
||||
return
|
||||
if opts['ssl'] is True:
|
||||
opts['ssl'] = {}
|
||||
return
|
||||
import ssl
|
||||
for key, prefix in (('cert_reqs', 'CERT_'),
|
||||
@ -3263,7 +3278,7 @@ def apply_minion_config(overrides=None,
|
||||
if 'beacons' not in opts:
|
||||
opts['beacons'] = {}
|
||||
|
||||
if overrides.get('ipc_write_buffer', '') == 'dynamic':
|
||||
if (overrides or {}).get('ipc_write_buffer', '') == 'dynamic':
|
||||
opts['ipc_write_buffer'] = _DFLT_IPC_WBUFFER
|
||||
if 'ipc_write_buffer' not in overrides:
|
||||
opts['ipc_write_buffer'] = 0
|
||||
@ -3348,7 +3363,7 @@ def apply_master_config(overrides=None, defaults=None):
|
||||
)
|
||||
opts['token_dir'] = os.path.join(opts['cachedir'], 'tokens')
|
||||
opts['syndic_dir'] = os.path.join(opts['cachedir'], 'syndics')
|
||||
if overrides.get('ipc_write_buffer', '') == 'dynamic':
|
||||
if (overrides or {}).get('ipc_write_buffer', '') == 'dynamic':
|
||||
opts['ipc_write_buffer'] = _DFLT_IPC_WBUFFER
|
||||
if 'ipc_write_buffer' not in overrides:
|
||||
opts['ipc_write_buffer'] = 0
|
||||
@ -3374,7 +3389,7 @@ def apply_master_config(overrides=None, defaults=None):
|
||||
]
|
||||
|
||||
# These can be set to syslog, so, not actual paths on the system
|
||||
for config_key in ('log_file', 'key_logfile'):
|
||||
for config_key in ('log_file', 'key_logfile', 'ssh_log_file'):
|
||||
log_setting = opts.get(config_key, '')
|
||||
if log_setting is None:
|
||||
continue
|
||||
|
@ -29,6 +29,7 @@ import salt.minion
|
||||
import salt.search
|
||||
import salt.key
|
||||
import salt.fileserver
|
||||
import salt.utils.args
|
||||
import salt.utils.atomicfile
|
||||
import salt.utils.event
|
||||
import salt.utils.verify
|
||||
@ -37,7 +38,7 @@ import salt.utils.gzip_util
|
||||
import salt.utils.jid
|
||||
from salt.pillar import git_pillar
|
||||
from salt.utils.event import tagify
|
||||
from salt.exceptions import SaltMasterError
|
||||
from salt.exceptions import FileserverConfigError, SaltMasterError
|
||||
|
||||
# Import 3rd-party libs
|
||||
import salt.ext.six as six
|
||||
@ -87,12 +88,19 @@ def init_git_pillar(opts):
|
||||
)
|
||||
else:
|
||||
# New git_pillar code
|
||||
pillar = salt.utils.gitfs.GitPillar(opts)
|
||||
pillar.init_remotes(
|
||||
opts_dict['git'],
|
||||
git_pillar.PER_REMOTE_OVERRIDES
|
||||
)
|
||||
ret.append(pillar)
|
||||
try:
|
||||
pillar = salt.utils.gitfs.GitPillar(opts)
|
||||
pillar.init_remotes(
|
||||
opts_dict['git'],
|
||||
git_pillar.PER_REMOTE_OVERRIDES,
|
||||
git_pillar.PER_REMOTE_ONLY
|
||||
)
|
||||
ret.append(pillar)
|
||||
except FileserverConfigError:
|
||||
if opts.get('git_pillar_verify_config', True):
|
||||
raise
|
||||
else:
|
||||
log.critical('Could not initialize git_pillar')
|
||||
return ret
|
||||
|
||||
|
||||
@ -844,7 +852,7 @@ class RemoteFuncs(object):
|
||||
opts = {}
|
||||
opts.update(self.opts)
|
||||
opts.update({'fun': load['fun'],
|
||||
'arg': load['arg'],
|
||||
'arg': salt.utils.args.parse_input(load['arg']),
|
||||
'id': load['id'],
|
||||
'doc': False,
|
||||
'conf_file': self.opts['conf_file']})
|
||||
@ -894,7 +902,7 @@ class RemoteFuncs(object):
|
||||
# Set up the publication payload
|
||||
pub_load = {
|
||||
'fun': load['fun'],
|
||||
'arg': load['arg'],
|
||||
'arg': salt.utils.args.parse_input(load['arg']),
|
||||
'tgt_type': load.get('tgt_type', 'glob'),
|
||||
'tgt': load['tgt'],
|
||||
'ret': load['ret'],
|
||||
@ -947,7 +955,7 @@ class RemoteFuncs(object):
|
||||
# Set up the publication payload
|
||||
pub_load = {
|
||||
'fun': load['fun'],
|
||||
'arg': load['arg'],
|
||||
'arg': salt.utils.args.parse_input(load['arg']),
|
||||
'tgt_type': load.get('tgt_type', 'glob'),
|
||||
'tgt': load['tgt'],
|
||||
'ret': load['ret'],
|
||||
@ -1523,7 +1531,7 @@ class LocalFuncs(object):
|
||||
'tgt': load['tgt'],
|
||||
'user': load['user'],
|
||||
'fun': load['fun'],
|
||||
'arg': load['arg'],
|
||||
'arg': salt.utils.args.parse_input(load['arg']),
|
||||
'minions': minions,
|
||||
}
|
||||
|
||||
@ -1575,7 +1583,7 @@ class LocalFuncs(object):
|
||||
# way that won't have a negative impact.
|
||||
pub_load = {
|
||||
'fun': load['fun'],
|
||||
'arg': load['arg'],
|
||||
'arg': salt.utils.args.parse_input(load['arg']),
|
||||
'tgt': load['tgt'],
|
||||
'jid': load['jid'],
|
||||
'ret': load['ret'],
|
||||
|
@ -28,6 +28,29 @@ prefaced with a ``!``.
|
||||
list_commands:
|
||||
cmd: pillar.get salt:engines:slack:valid_commands target=saltmaster tgt_type=list
|
||||
|
||||
:configuration: Example configuration using groups
|
||||
.. versionadded: Nitrogen
|
||||
|
||||
engines:
|
||||
slack:
|
||||
token: 'xoxb-xxxxxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxx'
|
||||
control: True
|
||||
groups:
|
||||
gods:
|
||||
users:
|
||||
- garethgreenaway
|
||||
commands:
|
||||
- test.ping
|
||||
- cmd.run
|
||||
- list_jobs
|
||||
- list_commands
|
||||
aliases:
|
||||
list_jobs:
|
||||
cmd: jobs.list_jobs
|
||||
list_commands:
|
||||
cmd: pillar.get salt:engines:slack:valid_commands target=saltmaster tgt_type=list
|
||||
|
||||
|
||||
:depends: slackclient
|
||||
'''
|
||||
|
||||
@ -86,10 +109,18 @@ def start(token,
|
||||
valid_commands=None,
|
||||
control=False,
|
||||
trigger="!",
|
||||
groups=None,
|
||||
tag='salt/engines/slack'):
|
||||
'''
|
||||
Listen to Slack events and forward them to Salt
|
||||
'''
|
||||
|
||||
if valid_users is None:
|
||||
valid_users = []
|
||||
|
||||
if valid_commands is None:
|
||||
valid_commands = []
|
||||
|
||||
if __opts__.get('__role') == 'master':
|
||||
fire_master = salt.utils.event.get_master_event(
|
||||
__opts__,
|
||||
@ -136,6 +167,21 @@ def start(token,
|
||||
if _text:
|
||||
if _text.startswith(trigger) and control:
|
||||
|
||||
# Get groups
|
||||
if groups:
|
||||
for group in groups:
|
||||
if 'users' in groups[group]:
|
||||
# Add users to valid_users
|
||||
valid_users.extend(groups[group]['users'])
|
||||
|
||||
# Add commands to valid_commands
|
||||
if 'commands' in groups[group]:
|
||||
valid_commands.extend(groups[group]['commands'])
|
||||
|
||||
# Add group aliases to aliases
|
||||
if 'aliases' in groups[group]:
|
||||
aliases.update(groups[group]['aliases'])
|
||||
|
||||
# Ensure the user is allowed to run commands
|
||||
if valid_users:
|
||||
log.debug('{0} {1}'.format(all_users, _m['user']))
|
||||
@ -165,18 +211,18 @@ def start(token,
|
||||
args = []
|
||||
kwargs = {}
|
||||
|
||||
# Ensure the command is allowed
|
||||
if valid_commands:
|
||||
if cmd not in valid_commands:
|
||||
channel.send_message('Using {0} is not allowed.'.format(cmd))
|
||||
return
|
||||
|
||||
# Evaluate aliases
|
||||
if aliases and isinstance(aliases, dict) and cmd in aliases.keys():
|
||||
cmdline = aliases[cmd].get('cmd')
|
||||
cmdline = salt.utils.shlex_split(cmdline)
|
||||
cmd = cmdline[0]
|
||||
|
||||
# Ensure the command is allowed
|
||||
if valid_commands:
|
||||
if cmd not in valid_commands:
|
||||
channel.send_message('{0} is not allowed to use command {1}.'.format(all_users[_m['user']], cmd))
|
||||
return
|
||||
|
||||
# Parse args and kwargs
|
||||
if len(cmdline) > 1:
|
||||
for item in cmdline[1:]:
|
||||
|
@ -40,6 +40,7 @@ log = logging.getLogger(__name__)
|
||||
def __virtual__():
|
||||
if not __opts__.get('minion_data_cache'):
|
||||
return (False, 'stalekey engine requires minion_data_cache to be enabled')
|
||||
return True
|
||||
|
||||
|
||||
def _get_keys():
|
||||
|
@ -36,12 +36,13 @@ import sys
|
||||
import types
|
||||
|
||||
__author__ = "Benjamin Peterson <benjamin@python.org>"
|
||||
__version__ = "1.9.0"
|
||||
__version__ = "1.10.0"
|
||||
|
||||
|
||||
# Useful for very coarse version differentiation.
|
||||
PY2 = sys.version_info[0] == 2
|
||||
PY3 = sys.version_info[0] == 3
|
||||
PY34 = sys.version_info[0:2] >= (3, 4)
|
||||
|
||||
if PY3:
|
||||
string_types = str,
|
||||
@ -64,6 +65,7 @@ else:
|
||||
else:
|
||||
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
|
||||
class X(object):
|
||||
|
||||
def __len__(self):
|
||||
return 1 << 31
|
||||
try:
|
||||
@ -95,7 +97,7 @@ class _LazyDescr(object):
|
||||
|
||||
def __get__(self, obj, tp):
|
||||
result = self._resolve()
|
||||
setattr(obj, self.name, result) # Invokes __set__.
|
||||
setattr(obj, self.name, result) # Invokes __set__.
|
||||
try:
|
||||
# This is a bit ugly, but it avoids running this again by
|
||||
# removing this descriptor.
|
||||
@ -167,12 +169,14 @@ class MovedAttribute(_LazyDescr):
|
||||
|
||||
|
||||
class _SixMetaPathImporter(object):
|
||||
|
||||
"""
|
||||
A meta path importer to import six.moves and its submodules.
|
||||
|
||||
This class implements a PEP302 finder and loader. It should be compatible
|
||||
with Python 2.5 and all existing versions of Python3
|
||||
"""
|
||||
|
||||
def __init__(self, six_module_name):
|
||||
self.name = six_module_name
|
||||
self.known_modules = {}
|
||||
@ -230,6 +234,7 @@ _importer = _SixMetaPathImporter(__name__)
|
||||
|
||||
|
||||
class _MovedItems(_LazyModule):
|
||||
|
||||
"""Lazy loading of moved objects"""
|
||||
__path__ = [] # mark as package
|
||||
|
||||
@ -241,8 +246,10 @@ _moved_attributes = [
|
||||
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
|
||||
MovedAttribute("intern", "__builtin__", "sys"),
|
||||
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
|
||||
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
|
||||
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
|
||||
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
|
||||
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
|
||||
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
|
||||
MovedAttribute("reduce", "__builtin__", "functools"),
|
||||
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
|
||||
MovedAttribute("StringIO", "StringIO", "io"),
|
||||
@ -252,7 +259,6 @@ _moved_attributes = [
|
||||
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
|
||||
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
|
||||
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
|
||||
|
||||
MovedModule("builtins", "__builtin__"),
|
||||
MovedModule("configparser", "ConfigParser"),
|
||||
MovedModule("copyreg", "copy_reg"),
|
||||
@ -299,8 +305,13 @@ _moved_attributes = [
|
||||
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
|
||||
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
|
||||
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
|
||||
MovedModule("winreg", "_winreg"),
|
||||
]
|
||||
# Add windows specific modules.
|
||||
if sys.platform == "win32":
|
||||
_moved_attributes += [
|
||||
MovedModule("winreg", "_winreg"),
|
||||
]
|
||||
|
||||
for attr in _moved_attributes:
|
||||
setattr(_MovedItems, attr.name, attr)
|
||||
if isinstance(attr, MovedModule):
|
||||
@ -314,6 +325,7 @@ _importer._add_module(moves, "moves")
|
||||
|
||||
|
||||
class Module_six_moves_urllib_parse(_LazyModule):
|
||||
|
||||
"""Lazy loading of moved objects in six.moves.urllib_parse"""
|
||||
|
||||
|
||||
@ -353,6 +365,7 @@ _importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_pa
|
||||
|
||||
|
||||
class Module_six_moves_urllib_error(_LazyModule):
|
||||
|
||||
"""Lazy loading of moved objects in six.moves.urllib_error"""
|
||||
|
||||
|
||||
@ -372,6 +385,7 @@ _importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.er
|
||||
|
||||
|
||||
class Module_six_moves_urllib_request(_LazyModule):
|
||||
|
||||
"""Lazy loading of moved objects in six.moves.urllib_request"""
|
||||
|
||||
|
||||
@ -421,6 +435,7 @@ _importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.
|
||||
|
||||
|
||||
class Module_six_moves_urllib_response(_LazyModule):
|
||||
|
||||
"""Lazy loading of moved objects in six.moves.urllib_response"""
|
||||
|
||||
|
||||
@ -441,6 +456,7 @@ _importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib
|
||||
|
||||
|
||||
class Module_six_moves_urllib_robotparser(_LazyModule):
|
||||
|
||||
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
|
||||
|
||||
|
||||
@ -458,6 +474,7 @@ _importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.url
|
||||
|
||||
|
||||
class Module_six_moves_urllib(types.ModuleType):
|
||||
|
||||
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
|
||||
__path__ = [] # mark as package
|
||||
parse = _importer._get_module("moves.urllib_parse")
|
||||
@ -528,6 +545,9 @@ if PY3:
|
||||
|
||||
create_bound_method = types.MethodType
|
||||
|
||||
def create_unbound_method(func, cls):
|
||||
return func
|
||||
|
||||
Iterator = object
|
||||
else:
|
||||
def get_unbound_function(unbound):
|
||||
@ -536,6 +556,9 @@ else:
|
||||
def create_bound_method(func, obj):
|
||||
return types.MethodType(func, obj, obj.__class__)
|
||||
|
||||
def create_unbound_method(func, cls):
|
||||
return types.MethodType(func, None, cls)
|
||||
|
||||
class Iterator(object):
|
||||
|
||||
def next(self):
|
||||
@ -574,16 +597,16 @@ if PY3:
|
||||
viewitems = operator.methodcaller("items")
|
||||
else:
|
||||
def iterkeys(d, **kw):
|
||||
return iter(d.iterkeys(**kw))
|
||||
return d.iterkeys(**kw)
|
||||
|
||||
def itervalues(d, **kw):
|
||||
return iter(d.itervalues(**kw))
|
||||
return d.itervalues(**kw)
|
||||
|
||||
def iteritems(d, **kw):
|
||||
return iter(d.iteritems(**kw))
|
||||
return d.iteritems(**kw)
|
||||
|
||||
def iterlists(d, **kw):
|
||||
return iter(d.iterlists(**kw))
|
||||
return d.iterlists(**kw)
|
||||
|
||||
viewkeys = operator.methodcaller("viewkeys")
|
||||
|
||||
@ -602,15 +625,13 @@ _add_doc(iterlists,
|
||||
if PY3:
|
||||
def b(s):
|
||||
return s.encode("latin-1")
|
||||
|
||||
def u(s):
|
||||
return s
|
||||
unichr = chr
|
||||
if sys.version_info[1] <= 1:
|
||||
def int2byte(i):
|
||||
return bytes((i,))
|
||||
else:
|
||||
# This is about 2x faster than the implementation above on 3.2+
|
||||
int2byte = operator.methodcaller("to_bytes", 1, "big")
|
||||
import struct
|
||||
int2byte = struct.Struct(">B").pack
|
||||
del struct
|
||||
byte2int = operator.itemgetter(0)
|
||||
indexbytes = operator.getitem
|
||||
iterbytes = iter
|
||||
@ -618,18 +639,25 @@ if PY3:
|
||||
StringIO = io.StringIO
|
||||
BytesIO = io.BytesIO
|
||||
_assertCountEqual = "assertCountEqual"
|
||||
_assertRaisesRegex = "assertRaisesRegex"
|
||||
_assertRegex = "assertRegex"
|
||||
if sys.version_info[1] <= 1:
|
||||
_assertRaisesRegex = "assertRaisesRegexp"
|
||||
_assertRegex = "assertRegexpMatches"
|
||||
else:
|
||||
_assertRaisesRegex = "assertRaisesRegex"
|
||||
_assertRegex = "assertRegex"
|
||||
else:
|
||||
def b(s):
|
||||
return s
|
||||
# Workaround for standalone backslash
|
||||
|
||||
def u(s):
|
||||
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
|
||||
unichr = unichr
|
||||
int2byte = chr
|
||||
|
||||
def byte2int(bs):
|
||||
return ord(bs[0])
|
||||
|
||||
def indexbytes(buf, i):
|
||||
return ord(buf[i])
|
||||
iterbytes = functools.partial(itertools.imap, ord)
|
||||
@ -657,7 +685,6 @@ def assertRegex(self, *args, **kwargs):
|
||||
if PY3:
|
||||
exec_ = getattr(moves.builtins, "exec")
|
||||
|
||||
|
||||
def reraise(tp, value, tb=None):
|
||||
if value is None:
|
||||
value = tp()
|
||||
@ -678,7 +705,6 @@ else:
|
||||
_locs_ = _globs_
|
||||
exec("""exec _code_ in _globs_, _locs_""")
|
||||
|
||||
|
||||
exec_("""def reraise(tp, value, tb=None):
|
||||
raise tp, value, tb
|
||||
""")
|
||||
@ -706,13 +732,14 @@ if print_ is None:
|
||||
fp = kwargs.pop("file", sys.stdout)
|
||||
if fp is None:
|
||||
return
|
||||
|
||||
def write(data):
|
||||
if not isinstance(data, basestring):
|
||||
data = str(data)
|
||||
# If the file has an encoding, encode unicode with it.
|
||||
if (isinstance(fp, file) and
|
||||
isinstance(data, unicode) and
|
||||
fp.encoding is not None):
|
||||
isinstance(data, unicode) and
|
||||
fp.encoding is not None):
|
||||
errors = getattr(fp, "errors", None)
|
||||
if errors is None:
|
||||
errors = "strict"
|
||||
@ -755,6 +782,7 @@ if print_ is None:
|
||||
write(end)
|
||||
if sys.version_info[:2] < (3, 3):
|
||||
_print = print_
|
||||
|
||||
def print_(*args, **kwargs):
|
||||
fp = kwargs.get("file", sys.stdout)
|
||||
flush = kwargs.pop("flush", False)
|
||||
@ -775,12 +803,14 @@ if sys.version_info[0:2] < (3, 4):
|
||||
else:
|
||||
wraps = functools.wraps
|
||||
|
||||
|
||||
def with_metaclass(meta, *bases):
|
||||
"""Create a base class with a metaclass."""
|
||||
# This requires a bit of explanation: the basic idea is to make a dummy
|
||||
# metaclass for one level of class instantiation that replaces itself with
|
||||
# the actual metaclass.
|
||||
class metaclass(meta):
|
||||
|
||||
def __new__(cls, name, this_bases, d):
|
||||
return meta(name, bases, d)
|
||||
return type.__new__(metaclass, 'temporary_class', (), {})
|
||||
@ -837,7 +867,7 @@ if sys.meta_path:
|
||||
# the six meta path importer, since the other six instance will have
|
||||
# inserted an importer with different class.
|
||||
if (type(importer).__name__ == "_SixMetaPathImporter" and
|
||||
importer.name == __name__):
|
||||
importer.name == __name__):
|
||||
del sys.meta_path[i]
|
||||
break
|
||||
del i, importer
|
||||
|
@ -11,7 +11,7 @@ import os
|
||||
import string
|
||||
import shutil
|
||||
import ftplib
|
||||
from tornado.httputil import parse_response_start_line, HTTPInputError
|
||||
from tornado.httputil import parse_response_start_line, HTTPHeaders, HTTPInputError
|
||||
|
||||
# Import salt libs
|
||||
from salt.exceptions import (
|
||||
@ -356,9 +356,10 @@ class Client(object):
|
||||
del dirs[:]
|
||||
else:
|
||||
for found_file in files:
|
||||
stripped_root = os.path.relpath(root, path).replace('/', '.')
|
||||
stripped_root = os.path.relpath(root, path)
|
||||
if salt.utils.is_windows():
|
||||
stripped_root = stripped_root.replace('\\', '/')
|
||||
stripped_root = stripped_root.replace('/', '.')
|
||||
if found_file.endswith(('.sls')):
|
||||
if found_file.endswith('init.sls'):
|
||||
if stripped_root.endswith('.'):
|
||||
@ -590,21 +591,45 @@ class Client(object):
|
||||
# Use list here to make it writable inside the on_header callback. Simple bool doesn't
|
||||
# work here: on_header creates a new local variable instead. This could be avoided in
|
||||
# Py3 with 'nonlocal' statement. There is no Py2 alternative for this.
|
||||
write_body = [False]
|
||||
write_body = [None, False, None]
|
||||
|
||||
def on_header(hdr):
|
||||
if write_body[1] is not False and write_body[2] is None:
|
||||
# Try to find out what content type encoding is used if this is a text file
|
||||
write_body[1].parse_line(hdr) # pylint: disable=no-member
|
||||
if 'Content-Type' in write_body[1]:
|
||||
content_type = write_body[1].get('Content-Type') # pylint: disable=no-member
|
||||
if not content_type.startswith('text'):
|
||||
write_body[1] = write_body[2] = False
|
||||
else:
|
||||
encoding = 'utf-8'
|
||||
fields = content_type.split(';')
|
||||
for field in fields:
|
||||
if 'encoding' in field:
|
||||
encoding = field.split('encoding=')[-1]
|
||||
write_body[2] = encoding
|
||||
# We have found our encoding. Stop processing headers.
|
||||
write_body[1] = False
|
||||
|
||||
if write_body[0] is not None:
|
||||
# We already parsed the first line. No need to run the code below again
|
||||
return
|
||||
|
||||
try:
|
||||
hdr = parse_response_start_line(hdr)
|
||||
except HTTPInputError:
|
||||
# Not the first line, do nothing
|
||||
return
|
||||
write_body[0] = hdr.code not in [301, 302, 303, 307]
|
||||
write_body[1] = HTTPHeaders()
|
||||
|
||||
if no_cache:
|
||||
result = []
|
||||
|
||||
def on_chunk(chunk):
|
||||
if write_body[0]:
|
||||
if write_body[2]:
|
||||
chunk = chunk.decode(write_body[2])
|
||||
result.append(chunk)
|
||||
else:
|
||||
dest_tmp = "{0}.part".format(dest)
|
||||
@ -629,7 +654,9 @@ class Client(object):
|
||||
if 'handle' not in query:
|
||||
raise MinionError('Error: {0} reading {1}'.format(query['error'], url))
|
||||
if no_cache:
|
||||
return ''.join(result)
|
||||
if write_body[2]:
|
||||
return six.u('').join(result)
|
||||
return six.b('').join(result)
|
||||
else:
|
||||
destfp.close()
|
||||
destfp = None
|
||||
@ -1339,7 +1366,7 @@ class FSClient(RemoteClient):
|
||||
the FSChan object
|
||||
'''
|
||||
def __init__(self, opts): # pylint: disable=W0231
|
||||
self.opts = opts
|
||||
Client.__init__(self, opts) # pylint: disable=W0233
|
||||
self.channel = salt.fileserver.FSChan(opts)
|
||||
self.auth = DumbAuth()
|
||||
|
||||
|
@ -685,14 +685,14 @@ def _virtual(osdata):
|
||||
grains['virtual'] = 'kvm'
|
||||
elif 'joyent smartdc hvm' in model:
|
||||
grains['virtual'] = 'kvm'
|
||||
break
|
||||
else:
|
||||
if osdata['kernel'] in skip_cmds:
|
||||
log.warning(
|
||||
"The tools 'dmidecode' and 'lspci' failed to "
|
||||
'execute because they do not exist on the system of the user '
|
||||
'running this instance or the user does not have the '
|
||||
'necessary permissions to execute them. Grains output might '
|
||||
'not be accurate.'
|
||||
if osdata['kernel'] not in skip_cmds:
|
||||
log.debug(
|
||||
'All tools for virtual hardware identification failed to '
|
||||
'execute because they do not exist on the system running this '
|
||||
'instance or the user does not have the necessary permissions '
|
||||
'to execute them. Grains output might not be accurate.'
|
||||
)
|
||||
|
||||
choices = ('Linux', 'OpenBSD', 'HP-UX')
|
||||
@ -851,7 +851,7 @@ def _virtual(osdata):
|
||||
grains['virtual_subtype'] = 'Xen Dom0'
|
||||
|
||||
for command in failed_commands:
|
||||
log.warning(
|
||||
log.info(
|
||||
"Although '{0}' was found in path, the current user "
|
||||
'cannot execute it. Grains output might not be '
|
||||
'accurate.'.format(command)
|
||||
|
@ -653,17 +653,16 @@ def grains(opts, force_refresh=False, proxy=None):
|
||||
except (IOError, OSError):
|
||||
pass
|
||||
else:
|
||||
if force_refresh:
|
||||
log.debug('Grains refresh requested. Refreshing grains.')
|
||||
else:
|
||||
log.debug('Grains cache last modified {0} seconds ago and '
|
||||
'cache expiration is set to {1}. '
|
||||
'Grains cache expired. Refreshing.'.format(
|
||||
grains_cache_age,
|
||||
opts.get('grains_cache_expiration', 300)
|
||||
))
|
||||
log.debug('Grains cache last modified {0} seconds ago and '
|
||||
'cache expiration is set to {1}. '
|
||||
'Grains cache expired. Refreshing.'.format(
|
||||
grains_cache_age,
|
||||
opts.get('grains_cache_expiration', 300)
|
||||
))
|
||||
else:
|
||||
log.debug('Grains cache file does not exist.')
|
||||
else:
|
||||
log.debug('Grains refresh requested. Refreshing grains.')
|
||||
|
||||
if opts.get('skip_grains', False):
|
||||
return {}
|
||||
@ -696,11 +695,11 @@ def grains(opts, force_refresh=False, proxy=None):
|
||||
if force_refresh: # if we refresh, lets reload grain modules
|
||||
funcs.clear()
|
||||
# Run core grains
|
||||
for key, fun in six.iteritems(funcs):
|
||||
for key in funcs:
|
||||
if not key.startswith('core.'):
|
||||
continue
|
||||
log.trace('Loading {0} grain'.format(key))
|
||||
ret = fun()
|
||||
ret = funcs[key]()
|
||||
if not isinstance(ret, dict):
|
||||
continue
|
||||
if grains_deep_merge:
|
||||
@ -709,7 +708,7 @@ def grains(opts, force_refresh=False, proxy=None):
|
||||
grains_data.update(ret)
|
||||
|
||||
# Run the rest of the grains
|
||||
for key, fun in six.iteritems(funcs):
|
||||
for key in funcs:
|
||||
if key.startswith('core.') or key == '_errors':
|
||||
continue
|
||||
try:
|
||||
@ -719,17 +718,17 @@ def grains(opts, force_refresh=False, proxy=None):
|
||||
# one parameter. Then the grains can have access to the
|
||||
# proxymodule for retrieving information from the connected
|
||||
# device.
|
||||
if fun.__code__.co_argcount == 1:
|
||||
ret = fun(proxy)
|
||||
if funcs[key].__code__.co_argcount == 1:
|
||||
ret = funcs[key](proxy)
|
||||
else:
|
||||
ret = fun()
|
||||
ret = funcs[key]()
|
||||
except Exception:
|
||||
if is_proxy():
|
||||
log.info('The following CRITICAL message may not be an error; the proxy may not be completely established yet.')
|
||||
log.critical(
|
||||
'Failed to load grains defined in grain file {0} in '
|
||||
'function {1}, error:\n'.format(
|
||||
key, fun
|
||||
key, funcs[key]
|
||||
),
|
||||
exc_info=True
|
||||
)
|
||||
@ -1046,7 +1045,8 @@ class LazyLoader(salt.utils.lazy.LazyDict):
|
||||
self.pack = {} if pack is None else pack
|
||||
if opts is None:
|
||||
opts = {}
|
||||
self.context_dict = salt.utils.context.ContextDict()
|
||||
threadsafety = not opts.get('multiprocessing')
|
||||
self.context_dict = salt.utils.context.ContextDict(threadsafe=threadsafety)
|
||||
self.opts = self.__prep_mod_opts(opts)
|
||||
|
||||
self.module_dirs = module_dirs
|
||||
@ -1177,10 +1177,10 @@ class LazyLoader(salt.utils.lazy.LazyDict):
|
||||
# The files are added in order of priority, so order *must* be retained.
|
||||
self.file_mapping = salt.utils.odict.OrderedDict()
|
||||
|
||||
for mod_dir in self.module_dirs:
|
||||
for mod_dir in sorted(self.module_dirs):
|
||||
files = []
|
||||
try:
|
||||
files = os.listdir(mod_dir)
|
||||
files = sorted(os.listdir(mod_dir))
|
||||
except OSError:
|
||||
continue # Next mod_dir
|
||||
for filename in files:
|
||||
@ -1611,7 +1611,7 @@ class LazyLoader(salt.utils.lazy.LazyDict):
|
||||
except Exception as exc:
|
||||
error_reason = (
|
||||
'Exception raised when processing __virtual__ function'
|
||||
' for {0}. Module will not be loaded {1}'.format(
|
||||
' for {0}. Module will not be loaded: {1}'.format(
|
||||
module_name, exc))
|
||||
log.error(error_reason, exc_info_on_loglevel=logging.DEBUG)
|
||||
virtual = None
|
||||
|
@ -323,11 +323,8 @@ class Maintenance(SignalHandlingMultiprocessingProcess):
|
||||
for pillar in self.git_pillar:
|
||||
pillar.update()
|
||||
except Exception as exc:
|
||||
log.error(
|
||||
'Exception \'{0}\' caught while updating git_pillar'
|
||||
.format(exc),
|
||||
exc_info_on_loglevel=logging.DEBUG
|
||||
)
|
||||
log.error('Exception caught while updating git_pillar',
|
||||
exc_info=True)
|
||||
|
||||
def handle_schedule(self):
|
||||
'''
|
||||
@ -458,19 +455,21 @@ class Master(SMaster):
|
||||
'Cannot change to root directory ({1})'.format(err)
|
||||
)
|
||||
|
||||
fileserver = salt.fileserver.Fileserver(self.opts)
|
||||
if not fileserver.servers:
|
||||
errors.append(
|
||||
'Failed to load fileserver backends, the configured backends '
|
||||
'are: {0}'.format(', '.join(self.opts['fileserver_backend']))
|
||||
)
|
||||
else:
|
||||
# Run init() for all backends which support the function, to
|
||||
# double-check configuration
|
||||
try:
|
||||
fileserver.init()
|
||||
except FileserverConfigError as exc:
|
||||
critical_errors.append('{0}'.format(exc))
|
||||
if self.opts.get('fileserver_verify_config', True):
|
||||
fileserver = salt.fileserver.Fileserver(self.opts)
|
||||
if not fileserver.servers:
|
||||
errors.append(
|
||||
'Failed to load fileserver backends, the configured backends '
|
||||
'are: {0}'.format(', '.join(self.opts['fileserver_backend']))
|
||||
)
|
||||
else:
|
||||
# Run init() for all backends which support the function, to
|
||||
# double-check configuration
|
||||
try:
|
||||
fileserver.init()
|
||||
except FileserverConfigError as exc:
|
||||
critical_errors.append('{0}'.format(exc))
|
||||
|
||||
if not self.opts['fileserver_backend']:
|
||||
errors.append('No fileserver backends are configured')
|
||||
|
||||
@ -483,25 +482,29 @@ class Master(SMaster):
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
non_legacy_git_pillars = [
|
||||
x for x in self.opts.get('ext_pillar', [])
|
||||
if 'git' in x
|
||||
and not isinstance(x['git'], six.string_types)
|
||||
]
|
||||
if non_legacy_git_pillars:
|
||||
try:
|
||||
new_opts = copy.deepcopy(self.opts)
|
||||
from salt.pillar.git_pillar \
|
||||
import PER_REMOTE_OVERRIDES as overrides
|
||||
for repo in non_legacy_git_pillars:
|
||||
new_opts['ext_pillar'] = [repo]
|
||||
try:
|
||||
git_pillar = salt.utils.gitfs.GitPillar(new_opts)
|
||||
git_pillar.init_remotes(repo['git'], overrides)
|
||||
except FileserverConfigError as exc:
|
||||
critical_errors.append(exc.strerror)
|
||||
finally:
|
||||
del new_opts
|
||||
if self.opts.get('git_pillar_verify_config', True):
|
||||
non_legacy_git_pillars = [
|
||||
x for x in self.opts.get('ext_pillar', [])
|
||||
if 'git' in x
|
||||
and not isinstance(x['git'], six.string_types)
|
||||
]
|
||||
if non_legacy_git_pillars:
|
||||
try:
|
||||
new_opts = copy.deepcopy(self.opts)
|
||||
from salt.pillar.git_pillar \
|
||||
import PER_REMOTE_OVERRIDES as per_remote_overrides, \
|
||||
PER_REMOTE_ONLY as per_remote_only
|
||||
for repo in non_legacy_git_pillars:
|
||||
new_opts['ext_pillar'] = [repo]
|
||||
try:
|
||||
git_pillar = salt.utils.gitfs.GitPillar(new_opts)
|
||||
git_pillar.init_remotes(repo['git'],
|
||||
per_remote_overrides,
|
||||
per_remote_only)
|
||||
except FileserverConfigError as exc:
|
||||
critical_errors.append(exc.strerror)
|
||||
finally:
|
||||
del new_opts
|
||||
|
||||
if errors or critical_errors:
|
||||
for error in errors:
|
||||
|
@ -133,7 +133,7 @@ log = logging.getLogger(__name__)
|
||||
# 6. Handle publications
|
||||
|
||||
|
||||
def resolve_dns(opts, fallback=True):
|
||||
def resolve_dns(opts, fallback=True, connect=True):
|
||||
'''
|
||||
Resolves the master_ip and master_uri options
|
||||
'''
|
||||
@ -150,13 +150,13 @@ def resolve_dns(opts, fallback=True):
|
||||
if opts['master'] == '':
|
||||
raise SaltSystemExit
|
||||
ret['master_ip'] = \
|
||||
salt.utils.dns_check(opts['master'], True, opts['ipv6'])
|
||||
salt.utils.dns_check(opts['master'], opts['master_port'], True, opts['ipv6'], connect)
|
||||
except SaltClientError:
|
||||
if opts['retry_dns']:
|
||||
while True:
|
||||
import salt.log
|
||||
msg = ('Master hostname: \'{0}\' not found. Retrying in {1} '
|
||||
'seconds').format(opts['master'], opts['retry_dns'])
|
||||
msg = ('Master hostname: \'{0}\' not found or not responsive. '
|
||||
'Retrying in {1} seconds').format(opts['master'], opts['retry_dns'])
|
||||
if salt.log.setup.is_console_configured():
|
||||
log.error(msg)
|
||||
else:
|
||||
@ -164,7 +164,7 @@ def resolve_dns(opts, fallback=True):
|
||||
time.sleep(opts['retry_dns'])
|
||||
try:
|
||||
ret['master_ip'] = salt.utils.dns_check(
|
||||
opts['master'], True, opts['ipv6']
|
||||
opts['master'], opts['master_port'], True, opts['ipv6'], connect
|
||||
)
|
||||
break
|
||||
except SaltClientError:
|
||||
@ -285,24 +285,15 @@ def load_args_and_kwargs(func, args, data=None, ignore_invalid=False):
|
||||
# above, that would result in a 2nd call to
|
||||
# salt.utils.cli.yamlify_arg(), which could mangle the input.
|
||||
_args.append(arg)
|
||||
elif string_kwarg:
|
||||
salt.utils.warn_until(
|
||||
'Nitrogen',
|
||||
'The list of function args and kwargs should be parsed '
|
||||
'by salt.utils.args.parse_input() before calling '
|
||||
'salt.minion.load_args_and_kwargs().'
|
||||
if string_kwarg:
|
||||
log.critical(
|
||||
'String kwarg(s) %s passed to '
|
||||
'salt.minion.load_args_and_kwargs(). This is no longer '
|
||||
'supported, so the kwarg(s) will be ignored. Arguments '
|
||||
'passed to salt.minion.load_args_and_kwargs() should be '
|
||||
'passed to salt.utils.args.parse_input() first to load '
|
||||
'and condition them properly.', string_kwarg
|
||||
)
|
||||
if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args:
|
||||
# Function supports **kwargs or is a positional argument to
|
||||
# the function.
|
||||
_kwargs.update(string_kwarg)
|
||||
else:
|
||||
# **kwargs not in argspec and parsed argument name not in
|
||||
# list of positional arguments. This keyword argument is
|
||||
# invalid.
|
||||
for key, val in six.iteritems(string_kwarg):
|
||||
invalid_kwargs.append('{0}={1}'.format(key, val))
|
||||
continue
|
||||
|
||||
# if the arg is a dict with __kwarg__ == True, then its a kwarg
|
||||
elif isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:
|
||||
@ -689,7 +680,13 @@ class SMinion(MinionBase):
|
||||
|
||||
def gen_modules(self, initial_load=False):
|
||||
'''
|
||||
Load all of the modules for the minion
|
||||
Tell the minion to reload the execution modules
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' sys.reload_modules
|
||||
'''
|
||||
# Ensure that a pillar key is set in the opts, otherwise the loader
|
||||
# will pack a newly-generated empty dict as the __pillar__ dunder, and
|
||||
@ -763,7 +760,13 @@ class MasterMinion(object):
|
||||
|
||||
def gen_modules(self, initial_load=False):
|
||||
'''
|
||||
Load all of the modules for the minion
|
||||
Tell the minion to reload the execution modules
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' sys.reload_modules
|
||||
'''
|
||||
self.utils = salt.loader.utils(self.opts)
|
||||
self.functions = salt.loader.minion_mods(
|
||||
@ -864,22 +867,25 @@ class MinionManager(MinionBase):
|
||||
'''
|
||||
last = 0 # never have we signed in
|
||||
auth_wait = minion.opts['acceptance_wait_time']
|
||||
failed = False
|
||||
while True:
|
||||
try:
|
||||
if minion.opts.get('beacons_before_connect', False):
|
||||
minion.setup_beacons()
|
||||
if minion.opts.get('scheduler_before_connect', False):
|
||||
minion.setup_scheduler()
|
||||
yield minion.connect_master()
|
||||
yield minion.connect_master(failed=failed)
|
||||
minion.tune_in(start=False)
|
||||
break
|
||||
except SaltClientError as exc:
|
||||
failed = True
|
||||
log.error('Error while bringing up minion for multi-master. Is master at {0} responding?'.format(minion.opts['master']))
|
||||
last = time.time()
|
||||
if auth_wait < self.max_auth_wait:
|
||||
auth_wait += self.auth_wait
|
||||
yield tornado.gen.sleep(auth_wait) # TODO: log?
|
||||
except Exception as e:
|
||||
failed = True
|
||||
log.critical('Unexpected error while connecting to {0}'.format(minion.opts['master']), exc_info=True)
|
||||
|
||||
# Multi Master Tune In
|
||||
@ -1020,7 +1026,7 @@ class Minion(MinionBase):
|
||||
time.sleep(1)
|
||||
sys.exit(0)
|
||||
|
||||
def sync_connect_master(self, timeout=None):
|
||||
def sync_connect_master(self, timeout=None, failed=False):
|
||||
'''
|
||||
Block until we are connected to a master
|
||||
'''
|
||||
@ -1031,7 +1037,7 @@ class Minion(MinionBase):
|
||||
self._sync_connect_master_success = True
|
||||
self.io_loop.stop()
|
||||
|
||||
self._connect_master_future = self.connect_master()
|
||||
self._connect_master_future = self.connect_master(failed=failed)
|
||||
# finish connecting to master
|
||||
self._connect_master_future.add_done_callback(on_connect_master_future_done)
|
||||
if timeout:
|
||||
@ -1059,11 +1065,11 @@ class Minion(MinionBase):
|
||||
self.schedule.returners = self.returners
|
||||
|
||||
@tornado.gen.coroutine
|
||||
def connect_master(self):
|
||||
def connect_master(self, failed=False):
|
||||
'''
|
||||
Return a future which will complete when you are connected to a master
|
||||
'''
|
||||
master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe)
|
||||
master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed)
|
||||
yield self._post_master_init(master)
|
||||
|
||||
# TODO: better name...
|
||||
@ -2626,6 +2632,7 @@ class SyndicManager(MinionBase):
|
||||
'''
|
||||
last = 0 # never have we signed in
|
||||
auth_wait = opts['acceptance_wait_time']
|
||||
failed = False
|
||||
while True:
|
||||
log.debug('Syndic attempting to connect to {0}'.format(opts['master']))
|
||||
try:
|
||||
@ -2634,7 +2641,7 @@ class SyndicManager(MinionBase):
|
||||
safe=False,
|
||||
io_loop=self.io_loop,
|
||||
)
|
||||
yield syndic.connect_master()
|
||||
yield syndic.connect_master(failed=failed)
|
||||
# set up the syndic to handle publishes (specifically not event forwarding)
|
||||
syndic.tune_in_no_block()
|
||||
|
||||
@ -2644,6 +2651,7 @@ class SyndicManager(MinionBase):
|
||||
log.info('Syndic successfully connected to {0}'.format(opts['master']))
|
||||
break
|
||||
except SaltClientError as exc:
|
||||
failed = True
|
||||
log.error('Error while bringing up syndic for multi-syndic. Is master at {0} responding?'.format(opts['master']))
|
||||
last = time.time()
|
||||
if auth_wait < self.max_auth_wait:
|
||||
@ -2652,6 +2660,7 @@ class SyndicManager(MinionBase):
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except: # pylint: disable=W0702
|
||||
failed = True
|
||||
log.critical('Unexpected error while connecting to {0}'.format(opts['master']), exc_info=True)
|
||||
|
||||
raise tornado.gen.Return(syndic)
|
||||
|
@ -35,6 +35,7 @@ from salt.exceptions import SaltInvocationError, CommandExecutionError
|
||||
import salt.utils
|
||||
import salt.utils.files
|
||||
import salt.utils.itertools
|
||||
import salt.utils.templates
|
||||
|
||||
# TODO: Check that the passed arguments are correct
|
||||
|
||||
|
@ -4,6 +4,12 @@ Connection module for Amazon CloudTrail
|
||||
|
||||
.. versionadded:: 2016.3.0
|
||||
|
||||
:depends:
|
||||
- boto
|
||||
- boto3
|
||||
|
||||
The dependencies listed above can be installed via package or pip.
|
||||
|
||||
:configuration: This module accepts explicit Lambda credentials but can also
|
||||
utilize IAM roles assigned to the instance through Instance Profiles.
|
||||
Dynamic credentials are then automatically obtained from AWS API and no
|
||||
@ -39,8 +45,6 @@ Connection module for Amazon CloudTrail
|
||||
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
|
||||
region: us-east-1
|
||||
|
||||
:depends: boto3
|
||||
|
||||
'''
|
||||
# keep lint from choking on _get_conn and _cache_id
|
||||
#pylint: disable=E0602
|
||||
|
@ -4,6 +4,12 @@ Connection module for Amazon IoT
|
||||
|
||||
.. versionadded:: 2016.3.0
|
||||
|
||||
:depends:
|
||||
- boto
|
||||
- boto3
|
||||
|
||||
The dependencies listed above can be installed via package or pip.
|
||||
|
||||
:configuration: This module accepts explicit Lambda credentials but can also
|
||||
utilize IAM roles assigned to the instance through Instance Profiles.
|
||||
Dynamic credentials are then automatically obtained from AWS API and no
|
||||
@ -39,8 +45,6 @@ Connection module for Amazon IoT
|
||||
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
|
||||
region: us-east-1
|
||||
|
||||
:depends: boto3
|
||||
|
||||
'''
|
||||
# keep lint from choking on _get_conn and _cache_id
|
||||
#pylint: disable=E0602
|
||||
|
@ -4,6 +4,12 @@ Connection module for Amazon Lambda
|
||||
|
||||
.. versionadded:: 2016.3.0
|
||||
|
||||
:depends:
|
||||
- boto
|
||||
- boto3
|
||||
|
||||
The dependencies listed above can be installed via package or pip.
|
||||
|
||||
:configuration: This module accepts explicit Lambda credentials but can also
|
||||
utilize IAM roles assigned to the instance through Instance Profiles.
|
||||
Dynamic credentials are then automatically obtained from AWS API and no
|
||||
@ -69,8 +75,6 @@ Connection module for Amazon Lambda
|
||||
error:
|
||||
message: error message
|
||||
|
||||
:depends: boto3
|
||||
|
||||
'''
|
||||
# keep lint from choking on _get_conn and _cache_id
|
||||
# pylint: disable=E0602
|
||||
|
@ -4,6 +4,12 @@ Connection module for Amazon S3 Buckets
|
||||
|
||||
.. versionadded:: 2016.3.0
|
||||
|
||||
:depends:
|
||||
- boto
|
||||
- boto3
|
||||
|
||||
The dependencies listed above can be installed via package or pip.
|
||||
|
||||
:configuration: This module accepts explicit Lambda credentials but can also
|
||||
utilize IAM roles assigned to the instance through Instance Profiles.
|
||||
Dynamic credentials are then automatically obtained from AWS API and no
|
||||
@ -39,8 +45,6 @@ Connection module for Amazon S3 Buckets
|
||||
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
|
||||
region: us-east-1
|
||||
|
||||
:depends: boto3
|
||||
|
||||
'''
|
||||
# keep lint from choking on _get_conn and _cache_id
|
||||
#pylint: disable=E0602
|
||||
|
@ -14,6 +14,7 @@ from __future__ import absolute_import
|
||||
import json
|
||||
import logging
|
||||
import distutils.version # pylint: disable=import-error,no-name-in-module
|
||||
import shlex
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
@ -56,6 +57,21 @@ def _check_valid_version():
|
||||
)
|
||||
|
||||
|
||||
def _construct_bower_command(bower_command):
|
||||
'''
|
||||
Create bower command line string
|
||||
'''
|
||||
if not bower_command:
|
||||
raise CommandExecutionError(
|
||||
'bower_command, e.g. install, must be specified')
|
||||
|
||||
cmd = ['bower'] + shlex.split(bower_command)
|
||||
cmd.extend(['--config.analytics', 'false',
|
||||
'--config.interactive', 'false',
|
||||
'--allow-root', '--json'])
|
||||
return cmd
|
||||
|
||||
|
||||
def install(pkg,
|
||||
dir,
|
||||
pkgs=None,
|
||||
@ -96,16 +112,12 @@ def install(pkg,
|
||||
'''
|
||||
_check_valid_version()
|
||||
|
||||
cmd = 'bower install'
|
||||
cmd += ' --config.analytics false'
|
||||
cmd += ' --config.interactive false'
|
||||
cmd += ' --allow-root'
|
||||
cmd += ' --json'
|
||||
cmd = _construct_bower_command('install')
|
||||
|
||||
if pkg:
|
||||
cmd += ' "{0}"'.format(pkg)
|
||||
cmd.append(pkg)
|
||||
elif pkgs:
|
||||
cmd += ' "{0}"'.format('" "'.join(pkgs))
|
||||
cmd.extend(pkgs)
|
||||
|
||||
result = __salt__['cmd.run_all'](cmd,
|
||||
cwd=dir,
|
||||
@ -149,12 +161,8 @@ def uninstall(pkg, dir, runas=None, env=None):
|
||||
'''
|
||||
_check_valid_version()
|
||||
|
||||
cmd = 'bower uninstall'
|
||||
cmd += ' --config.analytics false'
|
||||
cmd += ' --config.interactive false'
|
||||
cmd += ' --allow-root'
|
||||
cmd += ' --json'
|
||||
cmd += ' "{0}"'.format(pkg)
|
||||
cmd = _construct_bower_command('uninstall')
|
||||
cmd.append(pkg)
|
||||
|
||||
result = __salt__['cmd.run_all'](cmd,
|
||||
cwd=dir,
|
||||
@ -194,11 +202,8 @@ def list_(dir, runas=None, env=None):
|
||||
'''
|
||||
_check_valid_version()
|
||||
|
||||
cmd = 'bower list --json'
|
||||
cmd += ' --config.analytics false'
|
||||
cmd += ' --config.interactive false'
|
||||
cmd += ' --offline'
|
||||
cmd += ' --allow-root'
|
||||
cmd = _construct_bower_command('list')
|
||||
cmd.append('--offline')
|
||||
|
||||
result = __salt__['cmd.run_all'](cmd,
|
||||
cwd=dir,
|
||||
@ -210,3 +215,44 @@ def list_(dir, runas=None, env=None):
|
||||
raise CommandExecutionError(result['stderr'])
|
||||
|
||||
return json.loads(result['stdout'])['dependencies']
|
||||
|
||||
|
||||
def prune(dir, runas=None, env=None):
|
||||
'''
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
Remove extraneous local Bower packages, i.e. those not referenced in bower.json
|
||||
|
||||
dir
|
||||
The directory whose packages will be pruned
|
||||
|
||||
runas
|
||||
The user to run Bower with
|
||||
|
||||
env
|
||||
Environment variables to set when invoking Bower. Uses the same ``env``
|
||||
format as the :py:func:`cmd.run <salt.modules.cmdmod.run>` execution
|
||||
function.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' bower.prune /path/to/project
|
||||
|
||||
'''
|
||||
_check_valid_version()
|
||||
|
||||
cmd = _construct_bower_command('prune')
|
||||
|
||||
result = __salt__['cmd.run_all'](cmd,
|
||||
cwd=dir,
|
||||
runas=runas,
|
||||
env=env,
|
||||
python_shell=False)
|
||||
|
||||
if result['retcode'] != 0:
|
||||
raise CommandExecutionError(result['stderr'])
|
||||
|
||||
# Bower returns an empty dictionary if nothing was pruned
|
||||
return json.loads(result['stdout'])
|
||||
|
@ -408,6 +408,10 @@ def _get_cron_date_time(**kwargs):
|
||||
value = str(kwargs.get(param, '1')).lower()
|
||||
if value == 'random':
|
||||
ret[param] = str(random.sample(range_max[param], 1)[0])
|
||||
elif len(value.split(':')) == 2:
|
||||
cron_range = sorted(value.split(':'))
|
||||
start, end = int(cron_range[0]), int(cron_range[1])
|
||||
ret[param] = str(random.randint(start, end))
|
||||
else:
|
||||
ret[param] = value
|
||||
|
||||
|
@ -183,6 +183,10 @@ Functions
|
||||
- :py:func:`docker.disconnect_container_from_network
|
||||
<salt.modules.docker.disconnect_container_from_network>`
|
||||
|
||||
- Salt Functions and States Execution
|
||||
- :py:func:`docker.call <salt.modules.docker.call>`
|
||||
- :py:func:`docker.sls <salt.modules.docker.sls>`
|
||||
- :py:func:`docker.sls_build <salt.modules.docker.sls_build>`
|
||||
|
||||
|
||||
.. _docker-execution-driver:
|
||||
@ -191,11 +195,12 @@ Executing Commands Within a Running Container
|
||||
---------------------------------------------
|
||||
|
||||
.. note::
|
||||
With the release of Docker 1.3.1, the Execution Driver has been removed.
|
||||
Starting in Salt 2016.3.6, 2016.11.4, and Nitrogen, Salt defaults to using
|
||||
the ``docker-exec`` driver, however for older Salt releases it will be
|
||||
necessary to set the ``docker.exec_driver`` config option to either
|
||||
``docker-exec`` or ``nsenter`` for Docker versions 1.3.1 and newer.
|
||||
With the release of Docker 1.13.1, the Execution Driver has been removed.
|
||||
Starting in versions 2016.3.6, 2016.11.4, and Nitrogen, Salt defaults to
|
||||
using ``docker exec`` to run commands in containers, however for older Salt
|
||||
releases it will be necessary to set the ``docker.exec_driver`` config
|
||||
option to either ``docker-exec`` or ``nsenter`` for Docker versions 1.13.1
|
||||
and newer.
|
||||
|
||||
Multiple methods exist for executing commands within Docker containers:
|
||||
|
||||
@ -258,7 +263,6 @@ import distutils.version # pylint: disable=import-error,no-name-in-module,unuse
|
||||
import fnmatch
|
||||
import functools
|
||||
import gzip
|
||||
import inspect as inspect_module
|
||||
import io
|
||||
import json
|
||||
import logging
|
||||
@ -278,6 +282,7 @@ import subprocess
|
||||
from salt.exceptions import CommandExecutionError, SaltInvocationError
|
||||
import salt.ext.six as six
|
||||
from salt.ext.six.moves import map # pylint: disable=import-error,redefined-builtin
|
||||
from salt.utils.args import get_function_argspec as _argspec
|
||||
import salt.utils
|
||||
import salt.utils.decorators
|
||||
import salt.utils.files
|
||||
@ -292,11 +297,22 @@ import salt.client.ssh.state
|
||||
# pylint: disable=import-error
|
||||
try:
|
||||
import docker
|
||||
import docker.utils
|
||||
HAS_DOCKER_PY = True
|
||||
except ImportError:
|
||||
HAS_DOCKER_PY = False
|
||||
|
||||
# These next two imports are only necessary to have access to the needed
|
||||
# functions so that we can get argspecs for the container config, host config,
|
||||
# and networking config (see the get_client_args() function).
|
||||
try:
|
||||
import docker.types
|
||||
except ImportError:
|
||||
pass
|
||||
try:
|
||||
import docker.utils
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
try:
|
||||
if six.PY2:
|
||||
import backports.lzma as lzma
|
||||
@ -559,6 +575,17 @@ VALID_CREATE_OPTS = {
|
||||
'devices': {
|
||||
'path': 'HostConfig:Devices',
|
||||
},
|
||||
'sysctls': {
|
||||
'path': 'HostConfig:Sysctls',
|
||||
'min_docker': (1, 12, 0),
|
||||
'default': {},
|
||||
},
|
||||
'ulimits': {
|
||||
'path': 'HostConfig:Ulimits',
|
||||
'min_docker': (1, 6, 0),
|
||||
'min_docker_py': (1, 2, 0),
|
||||
'default': [],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@ -781,6 +808,8 @@ def _get_client(timeout=None):
|
||||
'''
|
||||
if 'docker.client' not in __context__:
|
||||
client_kwargs = {}
|
||||
if timeout is not None:
|
||||
client_kwargs['timeout'] = timeout
|
||||
for key, val in (('base_url', 'docker.url'),
|
||||
('version', 'docker.version')):
|
||||
param = __salt__['config.get'](val, NOTSET)
|
||||
@ -816,14 +845,10 @@ def _get_client(timeout=None):
|
||||
'Docker machine {0} failed: {1}'.format(docker_machine, exc))
|
||||
|
||||
try:
|
||||
__context__['docker.client'] = docker.Client(**client_kwargs)
|
||||
except AttributeError:
|
||||
# docker-py 2.0 renamed this client attribute
|
||||
__context__['docker.client'] = docker.APIClient(**client_kwargs)
|
||||
|
||||
# Set a new timeout if one was passed
|
||||
if timeout is not None and __context__['docker.client'].timeout != timeout:
|
||||
__context__['docker.client'].timeout = timeout
|
||||
except AttributeError:
|
||||
__context__['docker.client'] = docker.Client(**client_kwargs)
|
||||
|
||||
|
||||
def _get_md5(name, path):
|
||||
@ -1917,6 +1942,81 @@ def _validate_input(kwargs,
|
||||
else:
|
||||
kwargs['labels'] = salt.utils.repack_dictlist(kwargs['labels'])
|
||||
|
||||
def _valid_sysctls(): # pylint: disable=unused-variable
|
||||
'''
|
||||
Can be a a dictionary
|
||||
'''
|
||||
if kwargs.get('sysctls') is None:
|
||||
return
|
||||
if isinstance(kwargs['sysctls'], list):
|
||||
repacked_sysctl = {}
|
||||
for sysctl_var in kwargs['sysctls']:
|
||||
try:
|
||||
key, val = sysctl_var.split('=')
|
||||
except AttributeError:
|
||||
raise SaltInvocationError(
|
||||
'Invalid sysctl variable definition \'{0}\''
|
||||
.format(sysctl_var)
|
||||
)
|
||||
else:
|
||||
if key in repacked_sysctl:
|
||||
raise SaltInvocationError(
|
||||
'Duplicate sysctl variable \'{0}\''
|
||||
.format(key)
|
||||
)
|
||||
if not isinstance(val, six.string_types):
|
||||
raise SaltInvocationError(
|
||||
'sysctl values must be strings {key}=\'{val}\''
|
||||
.format(key=key, val=val))
|
||||
repacked_sysctl[key] = val
|
||||
kwargs['sysctls'] = repacked_sysctl
|
||||
elif isinstance(kwargs['sysctls'], dict):
|
||||
for key, val in six.iteritems(kwargs['sysctls']):
|
||||
if not isinstance(val, six.string_types):
|
||||
raise SaltInvocationError('sysctl values must be dicts')
|
||||
elif not isinstance(kwargs['sysctls'], dict):
|
||||
raise SaltInvocationError(
|
||||
'Invalid sysctls configuration.'
|
||||
)
|
||||
|
||||
def _valid_ulimits(): # pylint: disable=unused-variable
|
||||
'''
|
||||
Must be a string or list of strings with bind mount information
|
||||
'''
|
||||
if kwargs.get('ulimits') is None:
|
||||
# No need to validate
|
||||
return
|
||||
err = (
|
||||
'Invalid ulimits configuration. See the documentation for proper '
|
||||
'usage.'
|
||||
)
|
||||
try:
|
||||
_valid_dictlist('ulimits')
|
||||
# If this was successful then assume the correct API value was
|
||||
# passed on on the CLI and do not proceed with validation.
|
||||
return
|
||||
except SaltInvocationError:
|
||||
pass
|
||||
try:
|
||||
_valid_stringlist('ulimits')
|
||||
except SaltInvocationError:
|
||||
raise SaltInvocationError(err)
|
||||
|
||||
new_ulimits = []
|
||||
for ulimit in kwargs['ulimits']:
|
||||
ulimit_name, comps = ulimit.strip().split('=', 1)
|
||||
try:
|
||||
comps = [int(x) for x in comps.split(':', 1)]
|
||||
except ValueError:
|
||||
raise SaltInvocationError(err)
|
||||
if len(comps) == 1:
|
||||
comps *= 2
|
||||
soft_limit, hard_limit = comps
|
||||
new_ulimits.append({'Name': ulimit_name,
|
||||
'Soft': soft_limit,
|
||||
'Hard': hard_limit})
|
||||
kwargs['ulimits'] = new_ulimits
|
||||
|
||||
# And now, the actual logic to perform the validation
|
||||
if 'docker.docker_version' not in __context__:
|
||||
# Have to call this func using the __salt__ dunder (instead of just
|
||||
@ -3180,7 +3280,7 @@ def create(image,
|
||||
# https://docs.docker.com/engine/reference/api/docker_remote_api_v1.15/#create-a-container
|
||||
if salt.utils.version_cmp(version()['ApiVersion'], '1.15') > 0:
|
||||
client = __context__['docker.client']
|
||||
host_config_args = inspect_module.getargspec(docker.utils.create_host_config).args
|
||||
host_config_args = get_client_args()['host_config']
|
||||
create_kwargs['host_config'] = client.create_host_config(
|
||||
**dict((arg, create_kwargs.pop(arg, None)) for arg in host_config_args if arg != 'version')
|
||||
)
|
||||
@ -5782,20 +5882,25 @@ def _gather_pillar(pillarenv, pillar_override, **grains):
|
||||
|
||||
def call(name, function, *args, **kwargs):
|
||||
'''
|
||||
Executes a salt function inside a container
|
||||
Executes a Salt function inside a running container
|
||||
|
||||
.. versionadded:: 2016.11.0
|
||||
|
||||
The container does not need to have Salt installed, but Python is required.
|
||||
|
||||
name
|
||||
Container name or ID
|
||||
|
||||
function
|
||||
Salt execution module function
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt myminion docker.call test.ping
|
||||
|
||||
salt myminion test.arg arg1 arg2 key1=val1
|
||||
|
||||
The container does not need to have Salt installed, but Python
|
||||
is required.
|
||||
|
||||
.. versionadded:: 2016.11.0
|
||||
salt myminion dockerng.call compassionate_mirzakhani test.arg arg1 arg2 key1=val1
|
||||
|
||||
'''
|
||||
# where to put the salt-thin
|
||||
@ -5852,11 +5957,23 @@ def call(name, function, *args, **kwargs):
|
||||
|
||||
def sls(name, mods=None, saltenv='base', **kwargs):
|
||||
'''
|
||||
Apply the highstate defined by the specified modules.
|
||||
Apply the states defined by the specified SLS modules to the running
|
||||
container
|
||||
|
||||
For example, if your master defines the states ``web`` and ``rails``, you
|
||||
can apply them to a container:
|
||||
states by doing:
|
||||
.. versionadded:: 2016.11.0
|
||||
|
||||
The container does not need to have Salt installed, but Python is required.
|
||||
|
||||
name
|
||||
Container name or ID
|
||||
|
||||
mods : None
|
||||
A string containing comma-separated list of SLS with defined states to
|
||||
apply to the container.
|
||||
|
||||
saltenv : base
|
||||
Specify the environment from which to retrieve the SLS indicated by the
|
||||
`mods` parameter.
|
||||
|
||||
CLI Example:
|
||||
|
||||
@ -5864,10 +5981,6 @@ def sls(name, mods=None, saltenv='base', **kwargs):
|
||||
|
||||
salt myminion docker.sls compassionate_mirzakhani mods=rails,web
|
||||
|
||||
The container does not need to have Salt installed, but Python
|
||||
is required.
|
||||
|
||||
.. versionadded:: 2016.11.0
|
||||
'''
|
||||
mods = [item.strip() for item in mods.split(',')] if mods else []
|
||||
|
||||
@ -5926,11 +6039,25 @@ def sls(name, mods=None, saltenv='base', **kwargs):
|
||||
def sls_build(name, base='opensuse/python', mods=None, saltenv='base',
|
||||
dryrun=False, **kwargs):
|
||||
'''
|
||||
Build a docker image using the specified sls modules and base image.
|
||||
Build a Docker image using the specified SLS modules on top of base image
|
||||
|
||||
For example, if your master defines the states ``web`` and ``rails``, you
|
||||
can build a docker image inside myminion that results of applying those
|
||||
states by doing:
|
||||
.. versionadded:: 2016.11.0
|
||||
|
||||
The base image does not need to have Salt installed, but Python is required.
|
||||
|
||||
name
|
||||
Image name to be built and committed
|
||||
|
||||
base : opensuse/python
|
||||
Name or ID of the base image
|
||||
|
||||
mods : None
|
||||
A string containing comma-separated list of SLS with defined states to
|
||||
apply to the base image.
|
||||
|
||||
saltenv : base
|
||||
Specify the environment from which to retrieve the SLS indicated by the
|
||||
`mods` parameter.
|
||||
|
||||
base
|
||||
the base image
|
||||
@ -5956,12 +6083,7 @@ def sls_build(name, base='opensuse/python', mods=None, saltenv='base',
|
||||
|
||||
salt myminion docker.sls_build imgname base=mybase mods=rails,web
|
||||
|
||||
The base image does not need to have Salt installed, but Python
|
||||
is required.
|
||||
|
||||
.. versionadded:: 2016.11.0
|
||||
'''
|
||||
|
||||
create_kwargs = salt.utils.clean_kwargs(**copy.deepcopy(kwargs))
|
||||
for key in ('image', 'name', 'cmd', 'interactive', 'tty'):
|
||||
try:
|
||||
@ -5971,7 +6093,6 @@ def sls_build(name, base='opensuse/python', mods=None, saltenv='base',
|
||||
|
||||
# start a new container
|
||||
ret = create(image=base,
|
||||
name=name,
|
||||
cmd='sleep infinity',
|
||||
interactive=True, tty=True,
|
||||
**create_kwargs)
|
||||
@ -5984,10 +6105,82 @@ def sls_build(name, base='opensuse/python', mods=None, saltenv='base',
|
||||
# fail if the state was not successful
|
||||
if not dryrun and not salt.utils.check_state_result(ret):
|
||||
raise CommandExecutionError(ret)
|
||||
if dryrun is False:
|
||||
ret = commit(id_, name)
|
||||
finally:
|
||||
stop(id_)
|
||||
|
||||
if dryrun:
|
||||
rm_(id_)
|
||||
return ret
|
||||
return commit(id_, name)
|
||||
return ret
|
||||
|
||||
|
||||
def get_client_args():
|
||||
'''
|
||||
.. versionadded:: 2016.3.6,2016.11.4,Nitrogen
|
||||
|
||||
Returns the args for docker-py's `low-level API`_, organized by container
|
||||
config, host config, and networking config.
|
||||
|
||||
.. _`low-level API`: http://docker-py.readthedocs.io/en/stable/api.html
|
||||
salt myminion docker.get_client_args
|
||||
'''
|
||||
try:
|
||||
config_args = _argspec(docker.types.ContainerConfig.__init__).args
|
||||
except AttributeError:
|
||||
try:
|
||||
config_args = _argspec(docker.utils.create_container_config).args
|
||||
except AttributeError:
|
||||
raise CommandExecutionError(
|
||||
'Failed to get create_container_config argspec'
|
||||
)
|
||||
|
||||
try:
|
||||
host_config_args = \
|
||||
_argspec(docker.types.HostConfig.__init__).args
|
||||
except AttributeError:
|
||||
try:
|
||||
host_config_args = _argspec(docker.utils.create_host_config).args
|
||||
except AttributeError:
|
||||
raise CommandExecutionError(
|
||||
'Failed to get create_host_config argspec'
|
||||
)
|
||||
|
||||
try:
|
||||
endpoint_config_args = \
|
||||
_argspec(docker.types.EndpointConfig.__init__).args
|
||||
except AttributeError:
|
||||
try:
|
||||
endpoint_config_args = \
|
||||
_argspec(docker.utils.utils.create_endpoint_config).args
|
||||
except AttributeError:
|
||||
try:
|
||||
endpoint_config_args = \
|
||||
_argspec(docker.utils.create_endpoint_config).args
|
||||
except AttributeError:
|
||||
raise CommandExecutionError(
|
||||
'Failed to get create_endpoint_config argspec'
|
||||
)
|
||||
|
||||
for arglist in (config_args, host_config_args, endpoint_config_args):
|
||||
try:
|
||||
# The API version is passed automagically by the API code that
|
||||
# imports these classes/functions and is not an arg that we will be
|
||||
# passing, so remove it if present.
|
||||
arglist.remove('version')
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
# Remove any args in host or networking config from the main config dict.
|
||||
# This keeps us from accidentally allowing args that have been moved from
|
||||
# the container config to the host config (but are still accepted by
|
||||
# create_container_config so warnings can be issued).
|
||||
for arglist in (host_config_args, endpoint_config_args):
|
||||
for item in arglist:
|
||||
try:
|
||||
config_args.remove(item)
|
||||
except ValueError:
|
||||
# Arg is not in config_args
|
||||
pass
|
||||
|
||||
return {'config': config_args,
|
||||
'host_config': host_config_args,
|
||||
'networking_config': endpoint_config_args}
|
||||
|
@ -101,6 +101,8 @@ def _get_instance(hosts=None, profile=None):
|
||||
use_ssl = _profile.get('use_ssl', False)
|
||||
ca_certs = _profile.get('ca_certs', False)
|
||||
verify_certs = _profile.get('verify_certs', False)
|
||||
username = _profile.get('username', None)
|
||||
password = _profile.get('password', None)
|
||||
|
||||
if not hosts:
|
||||
hosts = ['127.0.0.1:9200']
|
||||
@ -114,6 +116,14 @@ def _get_instance(hosts=None, profile=None):
|
||||
ca_certs=ca_certs,
|
||||
verify_certs=verify_certs,
|
||||
)
|
||||
elif username and password:
|
||||
es = elasticsearch.Elasticsearch(
|
||||
hosts,
|
||||
use_ssl=use_ssl,
|
||||
ca_certs=ca_certs,
|
||||
verify_certs=verify_certs,
|
||||
http_auth=(username, password)
|
||||
)
|
||||
else:
|
||||
# Custom connection class to use requests module with proxies
|
||||
class ProxyConnection(RequestsHttpConnection):
|
||||
|
@ -687,31 +687,54 @@ def check_hash(path, file_hash):
|
||||
'''
|
||||
Check if a file matches the given hash string
|
||||
|
||||
Returns true if the hash matched, otherwise false. Raises ValueError if
|
||||
the hash was not formatted correctly.
|
||||
Returns ``True`` if the hash matches, otherwise ``False``.
|
||||
|
||||
path
|
||||
A file path
|
||||
Path to a file local to the minion.
|
||||
|
||||
hash
|
||||
A string in the form <hash_type>:<hash_value>. For example:
|
||||
``md5:e138491e9d5b97023cea823fe17bac22``
|
||||
The hash to check against the file specified in the ``path`` argument.
|
||||
For versions 2016.11.4 and newer, the hash can be specified without an
|
||||
accompanying hash type (e.g. ``e138491e9d5b97023cea823fe17bac22``),
|
||||
but for earlier releases it is necessary to also specify the hash type
|
||||
in the format ``<hash_type>:<hash_value>`` (e.g.
|
||||
``md5:e138491e9d5b97023cea823fe17bac22``).
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' file.check_hash /etc/fstab md5:<md5sum>
|
||||
salt '*' file.check_hash /etc/fstab e138491e9d5b97023cea823fe17bac22
|
||||
salt '*' file.check_hash /etc/fstab md5:e138491e9d5b97023cea823fe17bac22
|
||||
'''
|
||||
path = os.path.expanduser(path)
|
||||
|
||||
hash_parts = file_hash.split(':', 1)
|
||||
if len(hash_parts) != 2:
|
||||
# Support "=" for backward compatibility.
|
||||
hash_parts = file_hash.split('=', 1)
|
||||
if len(hash_parts) != 2:
|
||||
raise ValueError('Bad hash format: \'{0}\''.format(file_hash))
|
||||
hash_form, hash_value = hash_parts
|
||||
return get_hash(path, hash_form) == hash_value
|
||||
if not isinstance(file_hash, six.string_types):
|
||||
raise SaltInvocationError('hash must be a string')
|
||||
|
||||
for sep in (':', '='):
|
||||
if sep in file_hash:
|
||||
hash_type, hash_value = file_hash.split(sep, 1)
|
||||
break
|
||||
else:
|
||||
hash_value = file_hash
|
||||
hash_len = len(file_hash)
|
||||
hash_type = HASHES_REVMAP.get(hash_len)
|
||||
if hash_type is None:
|
||||
raise SaltInvocationError(
|
||||
'Hash {0} (length: {1}) could not be matched to a supported '
|
||||
'hash type. The supported hash types and lengths are: '
|
||||
'{2}'.format(
|
||||
file_hash,
|
||||
hash_len,
|
||||
', '.join(
|
||||
['{0} ({1})'.format(HASHES_REVMAP[x], x)
|
||||
for x in sorted(HASHES_REVMAP)]
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
return get_hash(path, hash_type) == hash_value
|
||||
|
||||
|
||||
def find(path, *args, **kwargs):
|
||||
@ -3657,7 +3680,7 @@ def apply_template_on_contents(
|
||||
opts=__opts__)['data']
|
||||
if six.PY2:
|
||||
contents = contents.encode('utf-8')
|
||||
elif six.PY3:
|
||||
elif six.PY3 and isinstance(contents, bytes):
|
||||
# bytes -> str
|
||||
contents = contents.decode('utf-8')
|
||||
else:
|
||||
@ -5028,6 +5051,9 @@ def makedirs_(path,
|
||||
'''
|
||||
path = os.path.expanduser(path)
|
||||
|
||||
if mode:
|
||||
mode = salt.utils.normalize_mode(mode)
|
||||
|
||||
# walk up the directory structure until we find the first existing
|
||||
# directory
|
||||
dirname = os.path.normpath(os.path.dirname(path))
|
||||
|
@ -201,7 +201,7 @@ def item(*args, **kwargs):
|
||||
return ret
|
||||
|
||||
|
||||
def setvals(grains, destructive=False):
|
||||
def setvals(grains, destructive=False, refresh=True):
|
||||
'''
|
||||
Set new grains values in the grains config file
|
||||
|
||||
@ -209,6 +209,10 @@ def setvals(grains, destructive=False):
|
||||
If an operation results in a key being removed, delete the key, too.
|
||||
Defaults to False.
|
||||
|
||||
refresh
|
||||
Refresh modules and pillar after adding the new grains.
|
||||
Defaults to True.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -284,12 +288,12 @@ def setvals(grains, destructive=False):
|
||||
log.error(msg.format(fn_))
|
||||
if not __opts__.get('local', False):
|
||||
# Sync the grains
|
||||
__salt__['saltutil.sync_grains']()
|
||||
__salt__['saltutil.sync_grains'](refresh=refresh)
|
||||
# Return the grains we just set to confirm everything was OK
|
||||
return new_grains
|
||||
|
||||
|
||||
def setval(key, val, destructive=False):
|
||||
def setval(key, val, destructive=False, refresh=True):
|
||||
'''
|
||||
Set a grains value in the grains config file
|
||||
|
||||
@ -303,6 +307,10 @@ def setval(key, val, destructive=False):
|
||||
If an operation results in a key being removed, delete the key, too.
|
||||
Defaults to False.
|
||||
|
||||
refresh
|
||||
Refresh modules and pillar after adding the new grain.
|
||||
Defaults to True.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -310,7 +318,7 @@ def setval(key, val, destructive=False):
|
||||
salt '*' grains.setval key val
|
||||
salt '*' grains.setval key "{'sub-key': 'val', 'sub-key2': 'val2'}"
|
||||
'''
|
||||
return setvals({key: val}, destructive)
|
||||
return setvals({key: val}, destructive, refresh)
|
||||
|
||||
|
||||
def append(key, val, convert=False, delimiter=DEFAULT_TARGET_DELIM):
|
||||
|
@ -35,6 +35,8 @@ try:
|
||||
except ImportError:
|
||||
HAS_INFLUXDB = False
|
||||
|
||||
import collections
|
||||
import json
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
@ -593,7 +595,7 @@ def get_continuous_query(database, name, **client_args):
|
||||
return {}
|
||||
|
||||
|
||||
def create_continuous_query(database, name, query, **client_args):
|
||||
def create_continuous_query(database, name, query, resample_time=None, coverage_period=None, **client_args):
|
||||
'''
|
||||
Create a continuous query.
|
||||
|
||||
@ -604,17 +606,34 @@ def create_continuous_query(database, name, query, **client_args):
|
||||
name
|
||||
Name of the continuous query to create.
|
||||
|
||||
query:
|
||||
query
|
||||
The continuous query string.
|
||||
|
||||
resample_time : None
|
||||
Duration between continuous query resampling.
|
||||
|
||||
coverage_period : None
|
||||
Duration specifying time period per sample.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' influxdb.create_continuous_query mydb cq_month 'SELECT mean(*) INTO mydb.a_month.:MEASUREMENT FROM mydb.a_week./.*/ GROUP BY time(5m), *' '''
|
||||
client = _client(**client_args)
|
||||
full_query = 'CREATE CONTINUOUS QUERY {0} ON {1} BEGIN {2} END'
|
||||
query = full_query.format(name, database, query)
|
||||
full_query = 'CREATE CONTINUOUS QUERY {name} ON {database}'
|
||||
if resample_time:
|
||||
full_query += ' RESAMPLE EVERY {resample_time}'
|
||||
if coverage_period:
|
||||
full_query += ' FOR {coverage_period}'
|
||||
full_query += ' BEGIN {query} END'
|
||||
query = full_query.format(
|
||||
name=name,
|
||||
database=database,
|
||||
query=query,
|
||||
resample_time=resample_time,
|
||||
coverage_period=coverage_period
|
||||
)
|
||||
client.query(query)
|
||||
return True
|
||||
|
||||
@ -641,3 +660,36 @@ def drop_continuous_query(database, name, **client_args):
|
||||
query = 'DROP CONTINUOUS QUERY {0} ON {1}'.format(name, database)
|
||||
client.query(query)
|
||||
return True
|
||||
|
||||
|
||||
def _pull_query_results(resultset):
|
||||
'''
|
||||
Parses a ResultSet returned from InfluxDB into a dictionary of results,
|
||||
grouped by series names and optional JSON-encoded grouping tags.
|
||||
'''
|
||||
_results = collections.defaultdict(lambda: {})
|
||||
for _header, _values in resultset.items():
|
||||
_header, _group_tags = _header
|
||||
if _group_tags:
|
||||
_results[_header][json.dumps(_group_tags)] = [_value for _value in _values]
|
||||
else:
|
||||
_results[_header] = [_value for _value in _values]
|
||||
return dict(sorted(_results.items()))
|
||||
|
||||
|
||||
def query(database, query, **client_args):
|
||||
'''
|
||||
Execute a query.
|
||||
|
||||
database
|
||||
Name of the database to query on.
|
||||
|
||||
query
|
||||
InfluxQL query string.
|
||||
'''
|
||||
client = _client(**client_args)
|
||||
_result = client.query(query, database=database)
|
||||
|
||||
if isinstance(_result, collections.Sequence):
|
||||
return [_pull_query_results(_query_result) for _query_result in _result if _query_result]
|
||||
return [_pull_query_results(_result) if _result else {}]
|
||||
|
@ -107,6 +107,11 @@ def _conf(family='ipv4'):
|
||||
elif __grains__['os_family'] == 'SUSE':
|
||||
# SuSE does not seem to use separate files for IPv4 and IPv6
|
||||
return '/etc/sysconfig/scripts/SuSEfirewall2-custom'
|
||||
elif __grains__['os'] == 'Alpine':
|
||||
if family == 'ipv6':
|
||||
return '/etc/iptables/rules6-save'
|
||||
else:
|
||||
return '/etc/iptables/rules-save'
|
||||
else:
|
||||
raise SaltException('Saving iptables to file is not' +
|
||||
' supported on {0}.'.format(__grains__['os']) +
|
||||
|
@ -156,6 +156,67 @@ def db_remove(name, user=None, password=None, host=None, port=None, authdb=None)
|
||||
return True
|
||||
|
||||
|
||||
def _version(mdb):
|
||||
return mdb.command('buildInfo')['version']
|
||||
|
||||
|
||||
def version(user=None, password=None, host=None, port=None, database='admin', authdb=None):
|
||||
'''
|
||||
Get MongoDB instance version
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' mongodb.version <user> <password> <host> <port> <database>
|
||||
'''
|
||||
conn = _connect(user, password, host, port, authdb=authdb)
|
||||
if not conn:
|
||||
err_msg = "Failed to connect to MongoDB database {0}:{1}".format(host, port)
|
||||
log.error(err_msg)
|
||||
return (False, err_msg)
|
||||
|
||||
try:
|
||||
mdb = pymongo.database.Database(conn, database)
|
||||
return _version(mdb)
|
||||
except pymongo.errors.PyMongoError as err:
|
||||
log.error(
|
||||
'Listing users failed with error: {0}'.format(
|
||||
str(err)
|
||||
)
|
||||
)
|
||||
return str(err)
|
||||
|
||||
|
||||
def user_find(name, user=None, password=None, host=None, port=None,
|
||||
database='admin', authdb=None):
|
||||
'''
|
||||
Get single user from MongoDB
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' mongodb.user_find <name> <user> <password> <host> <port> <database> <authdb>
|
||||
'''
|
||||
conn = _connect(user, password, host, port, authdb=authdb)
|
||||
if not conn:
|
||||
err_msg = "Failed to connect to MongoDB database {0}:{1}".format(host, port)
|
||||
log.error(err_msg)
|
||||
return (False, err_msg)
|
||||
|
||||
mdb = pymongo.database.Database(conn, database)
|
||||
try:
|
||||
return mdb.command("usersInfo", name)["users"]
|
||||
except pymongo.errors.PyMongoError as err:
|
||||
log.error(
|
||||
'Listing users failed with error: {0}'.format(
|
||||
str(err)
|
||||
)
|
||||
)
|
||||
return (False, str(err))
|
||||
|
||||
|
||||
def user_list(user=None, password=None, host=None, port=None, database='admin', authdb=None):
|
||||
'''
|
||||
List users of a Mongodb database
|
||||
@ -175,20 +236,20 @@ def user_list(user=None, password=None, host=None, port=None, database='admin',
|
||||
mdb = pymongo.database.Database(conn, database)
|
||||
|
||||
output = []
|
||||
mongodb_version = mdb.eval('db.version()')
|
||||
mongodb_version = _version(mdb)
|
||||
|
||||
if LooseVersion(mongodb_version) >= LooseVersion('2.6'):
|
||||
for user in mdb.eval('db.getUsers()'):
|
||||
output.append([
|
||||
('user', user['user']),
|
||||
('roles', user['roles'])
|
||||
])
|
||||
for user in mdb.command('usersInfo')['users']:
|
||||
output.append(
|
||||
{'user': user['user'],
|
||||
'roles': user['roles']}
|
||||
)
|
||||
else:
|
||||
for user in mdb.system.users.find():
|
||||
output.append([
|
||||
('user', user['user']),
|
||||
('readOnly', user.get('readOnly', 'None'))
|
||||
])
|
||||
output.append(
|
||||
{'user': user['user'],
|
||||
'readOnly': user.get('readOnly', 'None')}
|
||||
)
|
||||
return output
|
||||
|
||||
except pymongo.errors.PyMongoError as err:
|
||||
@ -224,7 +285,7 @@ def user_exists(name, user=None, password=None, host=None, port=None,
|
||||
|
||||
|
||||
def user_create(name, passwd, user=None, password=None, host=None, port=None,
|
||||
database='admin', authdb=None):
|
||||
database='admin', authdb=None, roles=None):
|
||||
'''
|
||||
Create a Mongodb user
|
||||
|
||||
@ -232,16 +293,19 @@ def user_create(name, passwd, user=None, password=None, host=None, port=None,
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' mongodb.user_create <name> <user> <password> <host> <port> <database>
|
||||
salt '*' mongodb.user_create <user_name> <user_password> <roles> <user> <password> <host> <port> <database>
|
||||
'''
|
||||
conn = _connect(user, password, host, port, authdb=authdb)
|
||||
if not conn:
|
||||
return 'Failed to connect to mongo database'
|
||||
|
||||
if not roles:
|
||||
roles = []
|
||||
|
||||
try:
|
||||
log.info('Creating user {0}'.format(name))
|
||||
mdb = pymongo.database.Database(conn, database)
|
||||
mdb.add_user(name, passwd)
|
||||
mdb.add_user(name, passwd, roles=roles)
|
||||
except pymongo.errors.PyMongoError as err:
|
||||
log.error(
|
||||
'Creating database {0} failed with error: {1}'.format(
|
||||
@ -347,7 +411,7 @@ def user_grant_roles(name, roles, database, user=None, password=None, host=None,
|
||||
try:
|
||||
log.info('Granting roles {0} to user {1}'.format(roles, name))
|
||||
mdb = pymongo.database.Database(conn, database)
|
||||
mdb.eval("db.grantRolesToUser('{0}', {1})".format(name, roles))
|
||||
mdb.command("grantRolesToUser", name, roles=roles)
|
||||
except pymongo.errors.PyMongoError as err:
|
||||
log.error(
|
||||
'Granting roles {0} to user {1} failed with error: {2}'.format(
|
||||
@ -386,7 +450,7 @@ def user_revoke_roles(name, roles, database, user=None, password=None, host=None
|
||||
try:
|
||||
log.info('Revoking roles {0} from user {1}'.format(roles, name))
|
||||
mdb = pymongo.database.Database(conn, database)
|
||||
mdb.eval("db.revokeRolesFromUser('{0}', {1})".format(name, roles))
|
||||
mdb.command("revokeRolesFromUser", name, roles=roles)
|
||||
except pymongo.errors.PyMongoError as err:
|
||||
log.error(
|
||||
'Revoking roles {0} from user {1} failed with error: {2}'.format(
|
||||
|
@ -82,7 +82,7 @@ def _active_mountinfo(ret):
|
||||
'root': comps[3],
|
||||
'opts': _resolve_user_group_names(comps[5].split(',')),
|
||||
'fstype': comps[_sep + 1],
|
||||
'device': device_name,
|
||||
'device': device_name.replace('\\040', '\\ '),
|
||||
'alt_device': _list.get(comps[4], None),
|
||||
'superopts': _resolve_user_group_names(comps[_sep + 3].split(',')),
|
||||
'device_uuid': device_uuid,
|
||||
@ -539,7 +539,7 @@ def set_fstab(
|
||||
# preserve arguments for updating
|
||||
entry_args = {
|
||||
'name': name,
|
||||
'device': device,
|
||||
'device': device.replace('\\ ', '\\040'),
|
||||
'fstype': fstype,
|
||||
'opts': opts,
|
||||
'dump': dump,
|
||||
|
@ -621,19 +621,22 @@ def query(database, query, **connection_args):
|
||||
orig_conv = MySQLdb.converters.conversions
|
||||
conv_iter = iter(orig_conv)
|
||||
conv = dict(zip(conv_iter, [str] * len(orig_conv)))
|
||||
|
||||
# some converters are lists, do not break theses
|
||||
conv[FIELD_TYPE.BLOB] = [
|
||||
(FLAG.BINARY, str),
|
||||
]
|
||||
conv[FIELD_TYPE.STRING] = [
|
||||
(FLAG.BINARY, str),
|
||||
]
|
||||
conv[FIELD_TYPE.VAR_STRING] = [
|
||||
(FLAG.BINARY, str),
|
||||
]
|
||||
conv[FIELD_TYPE.VARCHAR] = [
|
||||
(FLAG.BINARY, str),
|
||||
]
|
||||
conv_mysqldb = {'MYSQLDB': True}
|
||||
if conv_mysqldb.get(MySQLdb.__package__.upper()):
|
||||
conv[FIELD_TYPE.BLOB] = [
|
||||
(FLAG.BINARY, str),
|
||||
]
|
||||
conv[FIELD_TYPE.STRING] = [
|
||||
(FLAG.BINARY, str),
|
||||
]
|
||||
conv[FIELD_TYPE.VAR_STRING] = [
|
||||
(FLAG.BINARY, str),
|
||||
]
|
||||
conv[FIELD_TYPE.VARCHAR] = [
|
||||
(FLAG.BINARY, str),
|
||||
]
|
||||
|
||||
connection_args.update({'connection_db': database, 'connection_conv': conv})
|
||||
dbc = _connect(**connection_args)
|
||||
|
109
salt/modules/openscap.py
Normal file
109
salt/modules/openscap.py
Normal file
@ -0,0 +1,109 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import absolute_import
|
||||
import tempfile
|
||||
import shlex
|
||||
import shutil
|
||||
from subprocess import Popen, PIPE
|
||||
|
||||
from salt.client import Caller
|
||||
|
||||
|
||||
ArgumentParser = object
|
||||
|
||||
try:
|
||||
import argparse # pylint: disable=minimum-python-version
|
||||
ArgumentParser = argparse.ArgumentParser
|
||||
HAS_ARGPARSE = True
|
||||
except ImportError: # python 2.6
|
||||
HAS_ARGPARSE = False
|
||||
|
||||
|
||||
_XCCDF_MAP = {
|
||||
'eval': {
|
||||
'parser_arguments': [
|
||||
(('--profile',), {'required': True}),
|
||||
],
|
||||
'cmd_pattern': (
|
||||
"oscap xccdf eval "
|
||||
"--oval-results --results results.xml --report report.html "
|
||||
"--profile {0} {1}"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def __virtual__():
|
||||
return HAS_ARGPARSE, 'argparse module is required.'
|
||||
|
||||
|
||||
class _ArgumentParser(ArgumentParser):
|
||||
|
||||
def __init__(self, action=None, *args, **kwargs):
|
||||
super(_ArgumentParser, self).__init__(*args, prog='oscap', **kwargs)
|
||||
self.add_argument('action', choices=['eval'])
|
||||
add_arg = None
|
||||
for params, kwparams in _XCCDF_MAP['eval']['parser_arguments']:
|
||||
self.add_argument(*params, **kwparams)
|
||||
|
||||
def error(self, message, *args, **kwargs):
|
||||
raise Exception(message)
|
||||
|
||||
|
||||
_OSCAP_EXIT_CODES_MAP = {
|
||||
0: True, # all rules pass
|
||||
1: False, # there is an error during evaluation
|
||||
2: True # there is at least one rule with either fail or unknown result
|
||||
}
|
||||
|
||||
|
||||
def xccdf(params):
|
||||
'''
|
||||
Run ``oscap xccdf`` commands on minions.
|
||||
It uses cp.push_dir to upload the generated files to the salt master
|
||||
in the master's minion files cachedir
|
||||
(defaults to ``/var/cache/salt/master/minions/minion-id/files``)
|
||||
|
||||
It needs ``file_recv`` set to ``True`` in the master configuration file.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' openscap.xccdf "eval --profile Default /usr/share/openscap/scap-yast2sec-xccdf.xml"
|
||||
'''
|
||||
params = shlex.split(params)
|
||||
policy = params[-1]
|
||||
|
||||
success = True
|
||||
error = None
|
||||
upload_dir = None
|
||||
action = None
|
||||
returncode = None
|
||||
|
||||
try:
|
||||
parser = _ArgumentParser()
|
||||
action = parser.parse_known_args(params)[0].action
|
||||
args, argv = _ArgumentParser(action=action).parse_known_args(args=params)
|
||||
except Exception as err:
|
||||
success = False
|
||||
error = str(err)
|
||||
|
||||
if success:
|
||||
cmd = _XCCDF_MAP[action]['cmd_pattern'].format(args.profile, policy)
|
||||
tempdir = tempfile.mkdtemp()
|
||||
proc = Popen(
|
||||
shlex.split(cmd), stdout=PIPE, stderr=PIPE, cwd=tempdir)
|
||||
(stdoutdata, error) = proc.communicate()
|
||||
success = _OSCAP_EXIT_CODES_MAP[proc.returncode]
|
||||
returncode = proc.returncode
|
||||
if success:
|
||||
caller = Caller()
|
||||
caller.cmd('cp.push_dir', tempdir)
|
||||
shutil.rmtree(tempdir, ignore_errors=True)
|
||||
upload_dir = tempdir
|
||||
|
||||
return dict(
|
||||
success=success,
|
||||
upload_dir=upload_dir,
|
||||
error=error,
|
||||
returncode=returncode)
|
@ -38,7 +38,7 @@ from salt.exceptions import (
|
||||
CommandExecutionError, MinionError, SaltInvocationError
|
||||
)
|
||||
|
||||
REPO_REGEXP = r'^#?\s*(src|src/gz)\s+[^\s<>]+\s+[^\s<>]+'
|
||||
REPO_REGEXP = r'^#?\s*(src|src/gz)\s+([^\s<>]+|"[^<>]+")\s+[^\s<>]+'
|
||||
OPKG_CONFDIR = '/etc/opkg'
|
||||
ATTR_MAP = {
|
||||
'Architecture': 'arch',
|
||||
@ -993,7 +993,7 @@ def list_repos():
|
||||
line = line[1:]
|
||||
else:
|
||||
repo['enabled'] = True
|
||||
cols = line.strip().split()
|
||||
cols = salt.utils.shlex_split(line.strip())
|
||||
if cols[0] in 'src':
|
||||
repo['compressed'] = False
|
||||
else:
|
||||
@ -1038,7 +1038,7 @@ def _del_repo_from_file(alias, filepath):
|
||||
if regex.search(line):
|
||||
if line.startswith('#'):
|
||||
line = line[1:]
|
||||
cols = line.strip().split()
|
||||
cols = salt.utils.shlex_split(line.strip())
|
||||
if alias != cols[1]:
|
||||
output.append(line)
|
||||
with open(filepath, 'w') as fhandle:
|
||||
@ -1051,7 +1051,11 @@ def _add_new_repo(alias, uri, compressed, enabled=True):
|
||||
'''
|
||||
repostr = '# ' if not enabled else ''
|
||||
repostr += 'src/gz ' if compressed else 'src '
|
||||
repostr += alias + ' ' + uri + '\n'
|
||||
if ' ' in alias:
|
||||
repostr += '"' + alias + '" '
|
||||
else:
|
||||
repostr += alias + ' '
|
||||
repostr += uri + '\n'
|
||||
conffile = os.path.join(OPKG_CONFDIR, alias + '.conf')
|
||||
|
||||
with open(conffile, 'a') as fhandle:
|
||||
@ -1065,7 +1069,8 @@ def _mod_repo_in_file(alias, repostr, filepath):
|
||||
with open(filepath) as fhandle:
|
||||
output = []
|
||||
for line in fhandle:
|
||||
if alias not in line:
|
||||
cols = salt.utils.shlex_split(line.strip())
|
||||
if alias not in cols:
|
||||
output.append(line)
|
||||
else:
|
||||
output.append(repostr + '\n')
|
||||
@ -1161,7 +1166,11 @@ def mod_repo(alias, **kwargs):
|
||||
repostr += 'src/gz ' if kwargs['compressed'] else 'src'
|
||||
else:
|
||||
repostr += 'src/gz' if source['compressed'] else 'src'
|
||||
repostr += ' {0}'.format(kwargs['alias'] if 'alias' in kwargs else alias)
|
||||
repo_alias = kwargs['alias'] if 'alias' in kwargs else alias
|
||||
if ' ' in repo_alias:
|
||||
repostr += ' "{0}"'.format(repo_alias)
|
||||
else:
|
||||
repostr += ' {0}'.format(repo_alias)
|
||||
repostr += ' {0}'.format(kwargs['uri'] if 'uri' in kwargs else source['uri'])
|
||||
_mod_repo_in_file(alias, repostr, source['file'])
|
||||
elif uri and source['uri'] == uri:
|
||||
|
@ -248,6 +248,8 @@ def hdel(key, *fields, **options):
|
||||
'''
|
||||
Delete one of more hash fields.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -266,6 +268,8 @@ def hexists(key, field, host=None, port=None, db=None, password=None):
|
||||
'''
|
||||
Determine if a hash fields exists.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -308,6 +312,8 @@ def hincrby(key, field, increment=1, host=None, port=None, db=None, password=Non
|
||||
'''
|
||||
Increment the integer value of a hash field by the given number.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -322,6 +328,8 @@ def hincrbyfloat(key, field, increment=1.0, host=None, port=None, db=None, passw
|
||||
'''
|
||||
Increment the float value of a hash field by the given number.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -336,6 +344,8 @@ def hlen(key, host=None, port=None, db=None, password=None):
|
||||
'''
|
||||
Returns number of fields of a hash.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -350,6 +360,8 @@ def hmget(key, *fields, **options):
|
||||
'''
|
||||
Returns the values of all the given hash fields.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -368,6 +380,8 @@ def hmset(key, **fieldsvals):
|
||||
'''
|
||||
Sets multiple hash fields to multiple values.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -386,6 +400,8 @@ def hset(key, field, value, host=None, port=None, db=None, password=None):
|
||||
'''
|
||||
Set the value of a hash field.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -400,6 +416,8 @@ def hsetnx(key, field, value, host=None, port=None, db=None, password=None):
|
||||
'''
|
||||
Set the value of a hash field only if the field does not exist.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -414,6 +432,8 @@ def hvals(key, host=None, port=None, db=None, password=None):
|
||||
'''
|
||||
Return all the values in a hash.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -428,6 +448,8 @@ def hscan(key, cursor=0, match=None, count=None, host=None, port=None, db=None,
|
||||
'''
|
||||
Incrementally iterate hash fields and associated values.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
@ -359,8 +359,9 @@ def build_schedule_item(name, **kwargs):
|
||||
else:
|
||||
schedule[name]['splay'] = kwargs['splay']
|
||||
|
||||
for item in ['range', 'when', 'once', 'once_fmt', 'cron', 'returner',
|
||||
'return_config', 'return_kwargs', 'until', 'run_on_start']:
|
||||
for item in ['range', 'when', 'once', 'once_fmt', 'cron',
|
||||
'returner', 'after', 'return_config', 'return_kwargs',
|
||||
'until', 'run_on_start']:
|
||||
if item in kwargs:
|
||||
schedule[name][item] = kwargs[item]
|
||||
|
||||
|
@ -56,3 +56,39 @@ def delete(uri):
|
||||
salt '*' sdb.delete sdb://mymemcached/foo
|
||||
'''
|
||||
return salt.utils.sdb.sdb_delete(uri, __opts__, __utils__)
|
||||
|
||||
|
||||
def get_or_set_hash(uri,
|
||||
length=8,
|
||||
chars='abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'):
|
||||
'''
|
||||
Perform a one-time generation of a hash and write it to sdb.
|
||||
If that value has already been set return the value instead.
|
||||
|
||||
This is useful for generating passwords or keys that are specific to
|
||||
multiple minions that need to be stored somewhere centrally.
|
||||
|
||||
State Example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
some_mysql_user:
|
||||
mysql_user:
|
||||
- present
|
||||
- host: localhost
|
||||
- password: '{{ salt['sdb.get_or_set_hash']('some_mysql_user_pass') }}'
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' sdb.get_or_set_hash 'SECRET_KEY' 50
|
||||
|
||||
.. warning::
|
||||
|
||||
This function could return strings which may contain characters which are reserved
|
||||
as directives by the YAML parser, such as strings beginning with ``%``. To avoid
|
||||
issues when using the output of this function in an SLS file containing YAML+Jinja,
|
||||
surround the call with single quotes.
|
||||
'''
|
||||
return salt.utils.sdb.sdb_get_or_set_hash(uri, __opts__, length, chars, __utils__)
|
||||
|
@ -53,7 +53,7 @@ def __virtual__():
|
||||
if __grains__['kernel'] != 'Linux':
|
||||
return (False, 'Non Linux OSes are not supported')
|
||||
# SUSE >=12.0 uses systemd
|
||||
if __grains__.get('os_family', '') == 'SUSE':
|
||||
if __grains__.get('os_family', '') == 'Suse':
|
||||
try:
|
||||
# osrelease might be in decimal format (e.g. "12.1"), or for
|
||||
# SLES might include service pack (e.g. "11 SP3"), so split on
|
||||
|
@ -98,17 +98,19 @@ def _validate_core_properties(properties):
|
||||
props_string = ""
|
||||
|
||||
for prop_name, prop_value in six.iteritems(properties):
|
||||
if prop_name in STRING_PROPS_LIST:
|
||||
if not isinstance(prop_value, six.string_types):
|
||||
raise ValueError('In option "properties", core property "'+prop_name+'" value must be a string')
|
||||
|
||||
props_string = props_string+"&"+prop_name+"="+prop_value
|
||||
|
||||
elif prop_name in BOOL_PROPS_LIST:
|
||||
if prop_name in BOOL_PROPS_LIST:
|
||||
if not isinstance(prop_value, bool):
|
||||
raise ValueError('Option "'+prop_name+'" value must be an boolean')
|
||||
|
||||
props_string = props_string+"&property."+prop_name+"="+("true" if prop_value else "false")
|
||||
elif prop_name in STRING_PROPS_LIST:
|
||||
if not isinstance(prop_value, six.string_types):
|
||||
raise ValueError('In option "properties", core property "'+prop_name+'" value must be a string')
|
||||
|
||||
props_string = props_string+"&property."+prop_name+"="+prop_value
|
||||
|
||||
else:
|
||||
props_string = props_string+"&property."+str(prop_name)+"="+str(prop_value)
|
||||
|
||||
return props_string
|
||||
|
||||
@ -229,7 +231,10 @@ def alias_get_collections(alias_name, **kwargs):
|
||||
if not isinstance(alias_name, six.string_types):
|
||||
raise ValueError('Alias name must be a string')
|
||||
|
||||
collection_aliases = [(k_v[0], k_v[1]["aliases"]) for k_v in six.iteritems(cluster_status(**kwargs)["collections"])]
|
||||
collection_aliases = [
|
||||
(k_v[0], k_v[1]["aliases"])
|
||||
for k_v in six.iteritems(cluster_status(**kwargs)["collections"])
|
||||
if "aliases" in k_v[1]]
|
||||
aliases = [k_v1[0] for k_v1 in [k_v for k_v in collection_aliases if alias_name in k_v[1]]]
|
||||
|
||||
return aliases
|
||||
|
@ -4,7 +4,6 @@ Module for returning various status data about a minion.
|
||||
These data can be useful for compiling into stats later.
|
||||
'''
|
||||
|
||||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
import datetime
|
||||
@ -41,6 +40,16 @@ __func_alias__ = {
|
||||
}
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Not all functions supported by Windows
|
||||
'''
|
||||
if salt.utils.is_windows():
|
||||
return False, 'Windows platform is not supported by this module'
|
||||
|
||||
return __virtualname__
|
||||
|
||||
|
||||
def _number(text):
|
||||
'''
|
||||
Convert a string to a number.
|
||||
@ -67,8 +76,6 @@ def procs():
|
||||
salt '*' status.procs
|
||||
'''
|
||||
# Get the user, pid and cmd
|
||||
if salt.utils.is_windows():
|
||||
raise CommandExecutionError('This platform is not supported')
|
||||
ret = {}
|
||||
uind = 0
|
||||
pind = 0
|
||||
@ -117,8 +124,6 @@ def custom():
|
||||
|
||||
salt '*' status.custom
|
||||
'''
|
||||
if salt.utils.is_windows():
|
||||
raise CommandExecutionError('This platform is not supported')
|
||||
ret = {}
|
||||
conf = __salt__['config.dot_vals']('status')
|
||||
for key, val in six.iteritems(conf):
|
||||
@ -587,10 +592,6 @@ def diskusage(*args):
|
||||
salt '*' status.diskusage ext? # usage for ext[234] filesystems
|
||||
salt '*' status.diskusage / ext? # usage for / and all ext filesystems
|
||||
'''
|
||||
|
||||
if salt.utils.is_windows():
|
||||
raise CommandExecutionError('This platform is not supported')
|
||||
|
||||
selected = set()
|
||||
fstypes = set()
|
||||
if not args:
|
||||
@ -929,8 +930,6 @@ def w(): # pylint: disable=C0103
|
||||
|
||||
salt '*' status.w
|
||||
'''
|
||||
if salt.utils.is_windows():
|
||||
raise CommandExecutionError('This platform is not supported')
|
||||
user_list = []
|
||||
users = __salt__['cmd.run']('w -h').splitlines()
|
||||
for row in users:
|
||||
|
@ -421,8 +421,10 @@ def reload_modules():
|
||||
|
||||
salt '*' sys.reload_modules
|
||||
'''
|
||||
# This is handled inside the minion.py file, the function is caught before
|
||||
# it ever gets here
|
||||
# This function is actually handled inside the minion.py file, the function
|
||||
# is caught before it ever gets here. Therefore, the docstring above is
|
||||
# only for the online docs, and ANY CHANGES made to it must also be made in
|
||||
# each of the gen_modules() funcs in minion.py.
|
||||
return True
|
||||
|
||||
|
||||
|
@ -11,6 +11,7 @@ import os.path
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
import salt.ext.six as six
|
||||
from salt.exceptions import CommandExecutionError, SaltInvocationError
|
||||
|
||||
|
||||
@ -504,10 +505,14 @@ def get_computer_desc():
|
||||
match = pattern.match(line)
|
||||
if match:
|
||||
# get rid of whitespace then strip off quotes
|
||||
desc = _strip_quotes(match.group(1).strip()).replace('\\"', '"')
|
||||
desc = _strip_quotes(match.group(1).strip())
|
||||
# no break so we get the last occurance
|
||||
except IOError:
|
||||
return False
|
||||
if six.PY3:
|
||||
desc = desc.replace('\\"', '"').decode('unicode_escape')
|
||||
else:
|
||||
desc = desc.replace('\\"', '"').decode('string_escape')
|
||||
return desc
|
||||
|
||||
|
||||
@ -526,9 +531,13 @@ def set_computer_desc(desc):
|
||||
|
||||
salt '*' system.set_computer_desc "Michael's laptop"
|
||||
'''
|
||||
if six.PY3:
|
||||
desc = desc.encode('unicode_escape').replace('"', '\\"')
|
||||
else:
|
||||
desc = desc.encode('string_escape').replace('"', '\\"')
|
||||
hostname_cmd = salt.utils.which('hostnamectl')
|
||||
if hostname_cmd:
|
||||
result = __salt__['cmd.retcode']('{0} set-hostname --pretty {1}'.format(hostname_cmd, desc))
|
||||
result = __salt__['cmd.retcode']('{0} set-hostname --pretty "{1}"'.format(hostname_cmd, desc))
|
||||
return True if result == 0 else False
|
||||
|
||||
if not os.path.isfile('/etc/machine-info'):
|
||||
@ -537,7 +546,7 @@ def set_computer_desc(desc):
|
||||
|
||||
is_pretty_hostname_found = False
|
||||
pattern = re.compile(r'^\s*PRETTY_HOSTNAME=(.*)$')
|
||||
new_line = 'PRETTY_HOSTNAME="{0}"'.format(desc.replace('"', '\\"'))
|
||||
new_line = 'PRETTY_HOSTNAME="{0}"'.format(desc)
|
||||
try:
|
||||
with salt.utils.fopen('/etc/machine-info', 'r+') as mach_info:
|
||||
lines = mach_info.readlines()
|
||||
|
@ -274,7 +274,7 @@ def _runlevel():
|
||||
return ret
|
||||
|
||||
|
||||
def _systemctl_cmd(action, name=None, systemd_scope=False):
|
||||
def _systemctl_cmd(action, name=None, systemd_scope=False, no_block=False):
|
||||
'''
|
||||
Build a systemctl command line. Treat unit names without one
|
||||
of the valid suffixes as a service.
|
||||
@ -285,6 +285,8 @@ def _systemctl_cmd(action, name=None, systemd_scope=False):
|
||||
and __salt__['config.get']('systemd.scope', True):
|
||||
ret.extend(['systemd-run', '--scope'])
|
||||
ret.append('systemctl')
|
||||
if no_block:
|
||||
ret.append('--no-block')
|
||||
if isinstance(action, six.string_types):
|
||||
action = shlex.split(action)
|
||||
ret.extend(action)
|
||||
@ -731,7 +733,7 @@ def masked(name, runtime=False):
|
||||
return False
|
||||
|
||||
|
||||
def start(name):
|
||||
def start(name, no_block=False):
|
||||
'''
|
||||
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
|
||||
On minions running systemd>=205, `systemd-run(1)`_ is now used to
|
||||
@ -746,6 +748,11 @@ def start(name):
|
||||
|
||||
Start the specified service with systemd
|
||||
|
||||
no_block : False
|
||||
Set to ``True`` to start the service using ``--no-block``.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -755,11 +762,11 @@ def start(name):
|
||||
_check_for_unit_changes(name)
|
||||
unmask(name)
|
||||
return __salt__['cmd.retcode'](
|
||||
_systemctl_cmd('start', name, systemd_scope=True),
|
||||
_systemctl_cmd('start', name, systemd_scope=True, no_block=no_block),
|
||||
python_shell=False) == 0
|
||||
|
||||
|
||||
def stop(name):
|
||||
def stop(name, no_block=False):
|
||||
'''
|
||||
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
|
||||
On minions running systemd>=205, `systemd-run(1)`_ is now used to
|
||||
@ -774,6 +781,11 @@ def stop(name):
|
||||
|
||||
Stop the specified service with systemd
|
||||
|
||||
no_block : False
|
||||
Set to ``True`` to start the service using ``--no-block``.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -782,11 +794,11 @@ def stop(name):
|
||||
'''
|
||||
_check_for_unit_changes(name)
|
||||
return __salt__['cmd.retcode'](
|
||||
_systemctl_cmd('stop', name, systemd_scope=True),
|
||||
_systemctl_cmd('stop', name, systemd_scope=True, no_block=no_block),
|
||||
python_shell=False) == 0
|
||||
|
||||
|
||||
def restart(name):
|
||||
def restart(name, no_block=False):
|
||||
'''
|
||||
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
|
||||
On minions running systemd>=205, `systemd-run(1)`_ is now used to
|
||||
@ -801,6 +813,11 @@ def restart(name):
|
||||
|
||||
Restart the specified service with systemd
|
||||
|
||||
no_block : False
|
||||
Set to ``True`` to start the service using ``--no-block``.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -810,11 +827,11 @@ def restart(name):
|
||||
_check_for_unit_changes(name)
|
||||
unmask(name)
|
||||
return __salt__['cmd.retcode'](
|
||||
_systemctl_cmd('restart', name, systemd_scope=True),
|
||||
_systemctl_cmd('restart', name, systemd_scope=True, no_block=no_block),
|
||||
python_shell=False) == 0
|
||||
|
||||
|
||||
def reload_(name):
|
||||
def reload_(name, no_block=False):
|
||||
'''
|
||||
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
|
||||
On minions running systemd>=205, `systemd-run(1)`_ is now used to
|
||||
@ -829,6 +846,11 @@ def reload_(name):
|
||||
|
||||
Reload the specified service with systemd
|
||||
|
||||
no_block : False
|
||||
Set to ``True`` to start the service using ``--no-block``.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -838,11 +860,11 @@ def reload_(name):
|
||||
_check_for_unit_changes(name)
|
||||
unmask(name)
|
||||
return __salt__['cmd.retcode'](
|
||||
_systemctl_cmd('reload', name, systemd_scope=True),
|
||||
_systemctl_cmd('reload', name, systemd_scope=True, no_block=no_block),
|
||||
python_shell=False) == 0
|
||||
|
||||
|
||||
def force_reload(name):
|
||||
def force_reload(name, no_block=True):
|
||||
'''
|
||||
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
|
||||
On minions running systemd>=205, `systemd-run(1)`_ is now used to
|
||||
@ -859,6 +881,11 @@ def force_reload(name):
|
||||
|
||||
Force-reload the specified service with systemd
|
||||
|
||||
no_block : False
|
||||
Set to ``True`` to start the service using ``--no-block``.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -868,7 +895,8 @@ def force_reload(name):
|
||||
_check_for_unit_changes(name)
|
||||
unmask(name)
|
||||
return __salt__['cmd.retcode'](
|
||||
_systemctl_cmd('force-reload', name, systemd_scope=True),
|
||||
_systemctl_cmd('force-reload', name,
|
||||
systemd_scope=True, no_block=no_block),
|
||||
python_shell=False) == 0
|
||||
|
||||
|
||||
@ -908,12 +936,18 @@ def enable(name, **kwargs): # pylint: disable=unused-argument
|
||||
|
||||
Enable the named service to start when the system boots
|
||||
|
||||
no_block : False
|
||||
Set to ``True`` to start the service using ``--no-block``.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' service.enable <service name>
|
||||
'''
|
||||
no_block = kwargs.pop('no_block', False)
|
||||
_check_for_unit_changes(name)
|
||||
unmask(name)
|
||||
if name in _get_sysv_services():
|
||||
@ -930,7 +964,7 @@ def enable(name, **kwargs): # pylint: disable=unused-argument
|
||||
python_shell=False,
|
||||
ignore_retcode=True) == 0
|
||||
return __salt__['cmd.retcode'](
|
||||
_systemctl_cmd('enable', name, systemd_scope=True),
|
||||
_systemctl_cmd('enable', name, systemd_scope=True, no_block=no_block),
|
||||
python_shell=False,
|
||||
ignore_retcode=True) == 0
|
||||
|
||||
@ -952,12 +986,18 @@ def disable(name, **kwargs): # pylint: disable=unused-argument
|
||||
|
||||
Disable the named service to not start when the system boots
|
||||
|
||||
no_block : False
|
||||
Set to ``True`` to start the service using ``--no-block``.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' service.disable <service name>
|
||||
'''
|
||||
no_block = kwargs.pop('no_block', False)
|
||||
_check_for_unit_changes(name)
|
||||
if name in _get_sysv_services():
|
||||
cmd = []
|
||||
@ -973,9 +1013,9 @@ def disable(name, **kwargs): # pylint: disable=unused-argument
|
||||
python_shell=False,
|
||||
ignore_retcode=True) == 0
|
||||
return __salt__['cmd.retcode'](
|
||||
_systemctl_cmd('disable', name, systemd_scope=True),
|
||||
_systemctl_cmd('disable', name, systemd_scope=True, no_block=no_block),
|
||||
python_shell=False,
|
||||
ignore_recode=True) == 0
|
||||
ignore_retcode=True) == 0
|
||||
|
||||
|
||||
# The unused kwargs argument is required to maintain consistency with the API
|
||||
|
@ -107,6 +107,8 @@ def _get_zone_etc_localtime():
|
||||
return get_zonecode()
|
||||
raise CommandExecutionError(tzfile + ' does not exist')
|
||||
elif exc.errno == errno.EINVAL:
|
||||
if 'FreeBSD' in __grains__['os_family']:
|
||||
return get_zonecode()
|
||||
log.warning(
|
||||
tzfile + ' is not a symbolic link, attempting to match ' +
|
||||
tzfile + ' to zoneinfo files'
|
||||
|
@ -45,9 +45,13 @@ def list_():
|
||||
'''
|
||||
|
||||
result = __salt__['cmd.run']('tuned-adm list').splitlines()
|
||||
# Remove "Available profiles:"
|
||||
result.pop(0)
|
||||
# Remove "Current active profile:.*"
|
||||
result.pop()
|
||||
result = [i.lstrip('- ') for i in result]
|
||||
# Output can be : " - <profile name> - <description>" (v2.7.1)
|
||||
# or " - <profile name> " (v2.4.1)
|
||||
result = [i.split('-')[1].strip() for i in result]
|
||||
return result
|
||||
|
||||
|
||||
|
@ -38,8 +38,25 @@ Functions to interact with Hashicorp Vault.
|
||||
Grains are also available, for example like this:
|
||||
``my-policies/{grains[os]}``
|
||||
|
||||
Optional. If policies is not configured, saltstack/minions and
|
||||
saltstack/{minion} are used as defaults.
|
||||
If a template contains a grain which evaluates to a list, it will be
|
||||
expanded into multiple policies. For example, given the template
|
||||
``saltstack/by-role/{grains[roles]}``, and a minion having these grains:
|
||||
|
||||
.. code-block: yaml
|
||||
|
||||
grains:
|
||||
roles:
|
||||
- web
|
||||
- database
|
||||
|
||||
The minion will have the policies ``saltstack/by-role/web`` and
|
||||
``saltstack/by-role/database``. Note however that list members which do
|
||||
not have simple string representations, such as dictionaries or objects,
|
||||
do not work and will throw an exception. Strings and numbers are
|
||||
examples of types which work well.
|
||||
|
||||
Optional. If policies is not configured, ``saltstack/minions`` and
|
||||
``saltstack/{minion}`` are used as defaults.
|
||||
|
||||
|
||||
Add this segment to the master configuration file, or
|
||||
|
@ -175,7 +175,7 @@ import salt.utils.vmware
|
||||
import salt.utils.http
|
||||
from salt.utils import dictupdate
|
||||
from salt.exceptions import CommandExecutionError, VMwareSaltError
|
||||
from salt.utils.decorators import depends
|
||||
from salt.utils.decorators import depends, ignores_kwargs
|
||||
from salt.utils import clean_kwargs
|
||||
|
||||
# Import Third Party Libs
|
||||
@ -377,7 +377,7 @@ def disconnect(service_instance):
|
||||
|
||||
|
||||
@depends(HAS_ESX_CLI)
|
||||
def esxcli_cmd(cmd_str, host=None, username=None, password=None, protocol=None, port=None, esxi_hosts=None):
|
||||
def esxcli_cmd(cmd_str, host=None, username=None, password=None, protocol=None, port=None, esxi_hosts=None, credstore=None):
|
||||
'''
|
||||
Run an ESXCLI command directly on the host or list of hosts.
|
||||
|
||||
@ -408,6 +408,9 @@ def esxcli_cmd(cmd_str, host=None, username=None, password=None, protocol=None,
|
||||
If ``host`` is a vCenter host, then use esxi_hosts to execute this function
|
||||
on a list of one or more ESXi machines.
|
||||
|
||||
credstore
|
||||
Optionally set to path to the credential store file.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -428,7 +431,7 @@ def esxcli_cmd(cmd_str, host=None, username=None, password=None, protocol=None,
|
||||
for esxi_host in esxi_hosts:
|
||||
response = salt.utils.vmware.esxcli(host, username, password, cmd_str,
|
||||
protocol=protocol, port=port,
|
||||
esxi_host=esxi_host)
|
||||
esxi_host=esxi_host, credstore=credstore)
|
||||
if response['retcode'] != 0:
|
||||
ret.update({esxi_host: {'Error': response.get('stdout')}})
|
||||
else:
|
||||
@ -436,7 +439,8 @@ def esxcli_cmd(cmd_str, host=None, username=None, password=None, protocol=None,
|
||||
else:
|
||||
# Handles a single host or a vCenter connection when no esxi_hosts are provided.
|
||||
response = salt.utils.vmware.esxcli(host, username, password, cmd_str,
|
||||
protocol=protocol, port=port)
|
||||
protocol=protocol, port=port,
|
||||
credstore=credstore)
|
||||
if response['retcode'] != 0:
|
||||
ret.update({host: {'Error': response.get('stdout')}})
|
||||
else:
|
||||
@ -446,7 +450,7 @@ def esxcli_cmd(cmd_str, host=None, username=None, password=None, protocol=None,
|
||||
|
||||
|
||||
@depends(HAS_ESX_CLI)
|
||||
def get_coredump_network_config(host, username, password, protocol=None, port=None, esxi_hosts=None):
|
||||
def get_coredump_network_config(host, username, password, protocol=None, port=None, esxi_hosts=None, credstore=None):
|
||||
'''
|
||||
Retrieve information on ESXi or vCenter network dump collection and
|
||||
format it into a dictionary.
|
||||
@ -472,6 +476,9 @@ def get_coredump_network_config(host, username, password, protocol=None, port=No
|
||||
If ``host`` is a vCenter host, then use esxi_hosts to execute this function
|
||||
on a list of one or more ESXi machines.
|
||||
|
||||
credstore
|
||||
Optionally set to path to the credential store file.
|
||||
|
||||
:return: A dictionary with the network configuration, or, if getting
|
||||
the network config failed, a an error message retrieved from the
|
||||
standard cmd.run_all dictionary, per host.
|
||||
@ -497,7 +504,7 @@ def get_coredump_network_config(host, username, password, protocol=None, port=No
|
||||
for esxi_host in esxi_hosts:
|
||||
response = salt.utils.vmware.esxcli(host, username, password, cmd,
|
||||
protocol=protocol, port=port,
|
||||
esxi_host=esxi_host)
|
||||
esxi_host=esxi_host, credstore=credstore)
|
||||
if response['retcode'] != 0:
|
||||
ret.update({esxi_host: {'Error': response.get('stdout')}})
|
||||
else:
|
||||
@ -506,7 +513,8 @@ def get_coredump_network_config(host, username, password, protocol=None, port=No
|
||||
else:
|
||||
# Handles a single host or a vCenter connection when no esxi_hosts are provided.
|
||||
response = salt.utils.vmware.esxcli(host, username, password, cmd,
|
||||
protocol=protocol, port=port)
|
||||
protocol=protocol, port=port,
|
||||
credstore=credstore)
|
||||
if response['retcode'] != 0:
|
||||
ret.update({host: {'Error': response.get('stdout')}})
|
||||
else:
|
||||
@ -518,7 +526,7 @@ def get_coredump_network_config(host, username, password, protocol=None, port=No
|
||||
|
||||
|
||||
@depends(HAS_ESX_CLI)
|
||||
def coredump_network_enable(host, username, password, enabled, protocol=None, port=None, esxi_hosts=None):
|
||||
def coredump_network_enable(host, username, password, enabled, protocol=None, port=None, esxi_hosts=None, credstore=None):
|
||||
'''
|
||||
Enable or disable ESXi core dump collection. Returns ``True`` if coredump is enabled
|
||||
and returns ``False`` if core dump is not enabled. If there was an error, the error
|
||||
@ -548,6 +556,9 @@ def coredump_network_enable(host, username, password, enabled, protocol=None, po
|
||||
If ``host`` is a vCenter host, then use esxi_hosts to execute this function
|
||||
on a list of one or more ESXi machines.
|
||||
|
||||
credstore
|
||||
Optionally set to path to the credential store file.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -573,7 +584,7 @@ def coredump_network_enable(host, username, password, enabled, protocol=None, po
|
||||
for esxi_host in esxi_hosts:
|
||||
response = salt.utils.vmware.esxcli(host, username, password, cmd,
|
||||
protocol=protocol, port=port,
|
||||
esxi_host=esxi_host)
|
||||
esxi_host=esxi_host, credstore=credstore)
|
||||
if response['retcode'] != 0:
|
||||
ret.update({esxi_host: {'Error': response.get('stdout')}})
|
||||
else:
|
||||
@ -582,7 +593,8 @@ def coredump_network_enable(host, username, password, enabled, protocol=None, po
|
||||
else:
|
||||
# Handles a single host or a vCenter connection when no esxi_hosts are provided.
|
||||
response = salt.utils.vmware.esxcli(host, username, password, cmd,
|
||||
protocol=protocol, port=port)
|
||||
protocol=protocol, port=port,
|
||||
credstore=credstore)
|
||||
if response['retcode'] != 0:
|
||||
ret.update({host: {'Error': response.get('stdout')}})
|
||||
else:
|
||||
@ -600,7 +612,8 @@ def set_coredump_network_config(host,
|
||||
port=None,
|
||||
host_vnic='vmk0',
|
||||
dump_port=6500,
|
||||
esxi_hosts=None):
|
||||
esxi_hosts=None,
|
||||
credstore=None):
|
||||
'''
|
||||
|
||||
Set the network parameters for a network coredump collection.
|
||||
@ -637,6 +650,9 @@ def set_coredump_network_config(host,
|
||||
dump_port
|
||||
TCP port to use for the dump, defaults to ``6500``.
|
||||
|
||||
credstore
|
||||
Optionally set to path to the credential store file.
|
||||
|
||||
:return: A standard cmd.run_all dictionary with a `success` key added, per host.
|
||||
`success` will be True if the set succeeded, False otherwise.
|
||||
|
||||
@ -662,7 +678,7 @@ def set_coredump_network_config(host,
|
||||
for esxi_host in esxi_hosts:
|
||||
response = salt.utils.vmware.esxcli(host, username, password, cmd,
|
||||
protocol=protocol, port=port,
|
||||
esxi_host=esxi_host)
|
||||
esxi_host=esxi_host, credstore=credstore)
|
||||
if response['retcode'] != 0:
|
||||
response['success'] = False
|
||||
else:
|
||||
@ -673,7 +689,8 @@ def set_coredump_network_config(host,
|
||||
else:
|
||||
# Handles a single host or a vCenter connection when no esxi_hosts are provided.
|
||||
response = salt.utils.vmware.esxcli(host, username, password, cmd,
|
||||
protocol=protocol, port=port)
|
||||
protocol=protocol, port=port,
|
||||
credstore=credstore)
|
||||
if response['retcode'] != 0:
|
||||
response['success'] = False
|
||||
else:
|
||||
@ -684,7 +701,7 @@ def set_coredump_network_config(host,
|
||||
|
||||
|
||||
@depends(HAS_ESX_CLI)
|
||||
def get_firewall_status(host, username, password, protocol=None, port=None, esxi_hosts=None):
|
||||
def get_firewall_status(host, username, password, protocol=None, port=None, esxi_hosts=None, credstore=None):
|
||||
'''
|
||||
Show status of all firewall rule sets.
|
||||
|
||||
@ -709,6 +726,9 @@ def get_firewall_status(host, username, password, protocol=None, port=None, esxi
|
||||
If ``host`` is a vCenter host, then use esxi_hosts to execute this function
|
||||
on a list of one or more ESXi machines.
|
||||
|
||||
credstore
|
||||
Optionally set to path to the credential store file.
|
||||
|
||||
:return: Nested dictionary with two toplevel keys ``rulesets`` and ``success``
|
||||
``success`` will be True or False depending on query success
|
||||
``rulesets`` will list the rulesets and their statuses if ``success``
|
||||
@ -735,7 +755,7 @@ def get_firewall_status(host, username, password, protocol=None, port=None, esxi
|
||||
for esxi_host in esxi_hosts:
|
||||
response = salt.utils.vmware.esxcli(host, username, password, cmd,
|
||||
protocol=protocol, port=port,
|
||||
esxi_host=esxi_host)
|
||||
esxi_host=esxi_host, credstore=credstore)
|
||||
if response['retcode'] != 0:
|
||||
ret.update({esxi_host: {'Error': response['stdout'],
|
||||
'success': False,
|
||||
@ -746,7 +766,8 @@ def get_firewall_status(host, username, password, protocol=None, port=None, esxi
|
||||
else:
|
||||
# Handles a single host or a vCenter connection when no esxi_hosts are provided.
|
||||
response = salt.utils.vmware.esxcli(host, username, password, cmd,
|
||||
protocol=protocol, port=port)
|
||||
protocol=protocol, port=port,
|
||||
credstore=credstore)
|
||||
if response['retcode'] != 0:
|
||||
ret.update({host: {'Error': response['stdout'],
|
||||
'success': False,
|
||||
@ -766,7 +787,8 @@ def enable_firewall_ruleset(host,
|
||||
ruleset_name,
|
||||
protocol=None,
|
||||
port=None,
|
||||
esxi_hosts=None):
|
||||
esxi_hosts=None,
|
||||
credstore=None):
|
||||
'''
|
||||
Enable or disable an ESXi firewall rule set.
|
||||
|
||||
@ -797,6 +819,9 @@ def enable_firewall_ruleset(host,
|
||||
If ``host`` is a vCenter host, then use esxi_hosts to execute this function
|
||||
on a list of one or more ESXi machines.
|
||||
|
||||
credstore
|
||||
Optionally set to path to the credential store file.
|
||||
|
||||
:return: A standard cmd.run_all dictionary, per host.
|
||||
|
||||
CLI Example:
|
||||
@ -822,19 +847,20 @@ def enable_firewall_ruleset(host,
|
||||
for esxi_host in esxi_hosts:
|
||||
response = salt.utils.vmware.esxcli(host, username, password, cmd,
|
||||
protocol=protocol, port=port,
|
||||
esxi_host=esxi_host)
|
||||
esxi_host=esxi_host, credstore=credstore)
|
||||
ret.update({esxi_host: response})
|
||||
else:
|
||||
# Handles a single host or a vCenter connection when no esxi_hosts are provided.
|
||||
response = salt.utils.vmware.esxcli(host, username, password, cmd,
|
||||
protocol=protocol, port=port)
|
||||
protocol=protocol, port=port,
|
||||
credstore=credstore)
|
||||
ret.update({host: response})
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
@depends(HAS_ESX_CLI)
|
||||
def syslog_service_reload(host, username, password, protocol=None, port=None, esxi_hosts=None):
|
||||
def syslog_service_reload(host, username, password, protocol=None, port=None, esxi_hosts=None, credstore=None):
|
||||
'''
|
||||
Reload the syslog service so it will pick up any changes.
|
||||
|
||||
@ -859,6 +885,9 @@ def syslog_service_reload(host, username, password, protocol=None, port=None, es
|
||||
If ``host`` is a vCenter host, then use esxi_hosts to execute this function
|
||||
on a list of one or more ESXi machines.
|
||||
|
||||
credstore
|
||||
Optionally set to path to the credential store file.
|
||||
|
||||
:return: A standard cmd.run_all dictionary. This dictionary will at least
|
||||
have a `retcode` key. If `retcode` is 0 the command was successful.
|
||||
|
||||
@ -883,12 +912,13 @@ def syslog_service_reload(host, username, password, protocol=None, port=None, es
|
||||
for esxi_host in esxi_hosts:
|
||||
response = salt.utils.vmware.esxcli(host, username, password, cmd,
|
||||
protocol=protocol, port=port,
|
||||
esxi_host=esxi_host)
|
||||
esxi_host=esxi_host, credstore=credstore)
|
||||
ret.update({esxi_host: response})
|
||||
else:
|
||||
# Handles a single host or a vCenter connection when no esxi_hosts are provided.
|
||||
response = salt.utils.vmware.esxcli(host, username, password, cmd,
|
||||
protocol=protocol, port=port)
|
||||
protocol=protocol, port=port,
|
||||
credstore=credstore)
|
||||
ret.update({host: response})
|
||||
|
||||
return ret
|
||||
@ -904,7 +934,8 @@ def set_syslog_config(host,
|
||||
port=None,
|
||||
firewall=True,
|
||||
reset_service=True,
|
||||
esxi_hosts=None):
|
||||
esxi_hosts=None,
|
||||
credstore=None):
|
||||
'''
|
||||
Set the specified syslog configuration parameter. By default, this function will
|
||||
reset the syslog service after the configuration is set.
|
||||
@ -950,6 +981,9 @@ def set_syslog_config(host,
|
||||
If ``host`` is a vCenter host, then use esxi_hosts to execute this function
|
||||
on a list of one or more ESXi machines.
|
||||
|
||||
credstore
|
||||
Optionally set to path to the credential store file.
|
||||
|
||||
:return: Dictionary with a top-level key of 'success' which indicates
|
||||
if all the parameters were reset, and individual keys
|
||||
for each parameter indicating which succeeded or failed, per host.
|
||||
@ -980,7 +1014,7 @@ def set_syslog_config(host,
|
||||
response = enable_firewall_ruleset(host, username, password,
|
||||
ruleset_enable=True, ruleset_name='syslog',
|
||||
protocol=protocol, port=port,
|
||||
esxi_hosts=[esxi_host]).get(esxi_host)
|
||||
esxi_hosts=[esxi_host], credstore=credstore).get(esxi_host)
|
||||
if response['retcode'] != 0:
|
||||
ret.update({esxi_host: {'enable_firewall': {'message': response['stdout'],
|
||||
'success': False}}})
|
||||
@ -990,7 +1024,8 @@ def set_syslog_config(host,
|
||||
# Handles a single host or a vCenter connection when no esxi_hosts are provided.
|
||||
response = enable_firewall_ruleset(host, username, password,
|
||||
ruleset_enable=True, ruleset_name='syslog',
|
||||
protocol=protocol, port=port).get(host)
|
||||
protocol=protocol, port=port,
|
||||
credstore=credstore).get(host)
|
||||
if response['retcode'] != 0:
|
||||
ret.update({host: {'enable_firewall': {'message': response['stdout'],
|
||||
'success': False}}})
|
||||
@ -1005,7 +1040,8 @@ def set_syslog_config(host,
|
||||
for esxi_host in esxi_hosts:
|
||||
response = _set_syslog_config_helper(host, username, password, syslog_config,
|
||||
config_value, protocol=protocol, port=port,
|
||||
reset_service=reset_service, esxi_host=esxi_host)
|
||||
reset_service=reset_service, esxi_host=esxi_host,
|
||||
credstore=credstore)
|
||||
# Ensure we don't overwrite any dictionary data already set
|
||||
# By updating the esxi_host directly.
|
||||
if ret.get(esxi_host) is None:
|
||||
@ -1016,7 +1052,7 @@ def set_syslog_config(host,
|
||||
# Handles a single host or a vCenter connection when no esxi_hosts are provided.
|
||||
response = _set_syslog_config_helper(host, username, password, syslog_config,
|
||||
config_value, protocol=protocol, port=port,
|
||||
reset_service=reset_service)
|
||||
reset_service=reset_service, credstore=credstore)
|
||||
# Ensure we don't overwrite any dictionary data already set
|
||||
# By updating the host directly.
|
||||
if ret.get(host) is None:
|
||||
@ -1027,7 +1063,7 @@ def set_syslog_config(host,
|
||||
|
||||
|
||||
@depends(HAS_ESX_CLI)
|
||||
def get_syslog_config(host, username, password, protocol=None, port=None, esxi_hosts=None):
|
||||
def get_syslog_config(host, username, password, protocol=None, port=None, esxi_hosts=None, credstore=None):
|
||||
'''
|
||||
Retrieve the syslog configuration.
|
||||
|
||||
@ -1052,6 +1088,9 @@ def get_syslog_config(host, username, password, protocol=None, port=None, esxi_h
|
||||
If ``host`` is a vCenter host, then use esxi_hosts to execute this function
|
||||
on a list of one or more ESXi machines.
|
||||
|
||||
credstore
|
||||
Optionally set to path to the credential store file.
|
||||
|
||||
:return: Dictionary with keys and values corresponding to the
|
||||
syslog configuration, per host.
|
||||
|
||||
@ -1076,13 +1115,14 @@ def get_syslog_config(host, username, password, protocol=None, port=None, esxi_h
|
||||
for esxi_host in esxi_hosts:
|
||||
response = salt.utils.vmware.esxcli(host, username, password, cmd,
|
||||
protocol=protocol, port=port,
|
||||
esxi_host=esxi_host)
|
||||
esxi_host=esxi_host, credstore=credstore)
|
||||
# format the response stdout into something useful
|
||||
ret.update({esxi_host: _format_syslog_config(response)})
|
||||
else:
|
||||
# Handles a single host or a vCenter connection when no esxi_hosts are provided.
|
||||
response = salt.utils.vmware.esxcli(host, username, password, cmd,
|
||||
protocol=protocol, port=port)
|
||||
protocol=protocol, port=port,
|
||||
credstore=credstore)
|
||||
# format the response stdout into something useful
|
||||
ret.update({host: _format_syslog_config(response)})
|
||||
|
||||
@ -1096,7 +1136,8 @@ def reset_syslog_config(host,
|
||||
protocol=None,
|
||||
port=None,
|
||||
syslog_config=None,
|
||||
esxi_hosts=None):
|
||||
esxi_hosts=None,
|
||||
credstore=None):
|
||||
'''
|
||||
Reset the syslog service to its default settings.
|
||||
|
||||
@ -1129,6 +1170,9 @@ def reset_syslog_config(host,
|
||||
If ``host`` is a vCenter host, then use esxi_hosts to execute this function
|
||||
on a list of one or more ESXi machines.
|
||||
|
||||
credstore
|
||||
Optionally set to path to the credential store file.
|
||||
|
||||
:return: Dictionary with a top-level key of 'success' which indicates
|
||||
if all the parameters were reset, and individual keys
|
||||
for each parameter indicating which succeeded or failed, per host.
|
||||
@ -1170,18 +1214,20 @@ def reset_syslog_config(host,
|
||||
response_dict = _reset_syslog_config_params(host, username, password,
|
||||
cmd, resets, valid_resets,
|
||||
protocol=protocol, port=port,
|
||||
esxi_host=esxi_host)
|
||||
esxi_host=esxi_host, credstore=credstore)
|
||||
ret.update({esxi_host: response_dict})
|
||||
else:
|
||||
# Handles a single host or a vCenter connection when no esxi_hosts are provided.
|
||||
response_dict = _reset_syslog_config_params(host, username, password,
|
||||
cmd, resets, valid_resets,
|
||||
protocol=protocol, port=port)
|
||||
protocol=protocol, port=port,
|
||||
credstore=credstore)
|
||||
ret.update({host: response_dict})
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
@ignores_kwargs('credstore')
|
||||
def upload_ssh_key(host, username, password, ssh_key=None, ssh_key_file=None,
|
||||
protocol=None, port=None, certificate_verify=False):
|
||||
'''
|
||||
@ -1253,6 +1299,7 @@ def upload_ssh_key(host, username, password, ssh_key=None, ssh_key_file=None,
|
||||
return ret
|
||||
|
||||
|
||||
@ignores_kwargs('credstore')
|
||||
def get_ssh_key(host,
|
||||
username,
|
||||
password,
|
||||
@ -1310,6 +1357,7 @@ def get_ssh_key(host,
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@ignores_kwargs('credstore')
|
||||
def get_host_datetime(host, username, password, protocol=None, port=None, host_names=None):
|
||||
'''
|
||||
Get the date/time information for a given host or list of host_names.
|
||||
@ -1368,6 +1416,7 @@ def get_host_datetime(host, username, password, protocol=None, port=None, host_n
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@ignores_kwargs('credstore')
|
||||
def get_ntp_config(host, username, password, protocol=None, port=None, host_names=None):
|
||||
'''
|
||||
Get the NTP configuration information for a given host or list of host_names.
|
||||
@ -1425,6 +1474,7 @@ def get_ntp_config(host, username, password, protocol=None, port=None, host_name
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@ignores_kwargs('credstore')
|
||||
def get_service_policy(host, username, password, service_name, protocol=None, port=None, host_names=None):
|
||||
'''
|
||||
Get the service name's policy for a given host or list of hosts.
|
||||
@ -1531,6 +1581,7 @@ def get_service_policy(host, username, password, service_name, protocol=None, po
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@ignores_kwargs('credstore')
|
||||
def get_service_running(host, username, password, service_name, protocol=None, port=None, host_names=None):
|
||||
'''
|
||||
Get the service name's running state for a given host or list of hosts.
|
||||
@ -1637,6 +1688,7 @@ def get_service_running(host, username, password, service_name, protocol=None, p
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@ignores_kwargs('credstore')
|
||||
def get_vmotion_enabled(host, username, password, protocol=None, port=None, host_names=None):
|
||||
'''
|
||||
Get the VMotion enabled status for a given host or a list of host_names. Returns ``True``
|
||||
@ -1698,6 +1750,7 @@ def get_vmotion_enabled(host, username, password, protocol=None, port=None, host
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@ignores_kwargs('credstore')
|
||||
def get_vsan_enabled(host, username, password, protocol=None, port=None, host_names=None):
|
||||
'''
|
||||
Get the VSAN enabled status for a given host or a list of host_names. Returns ``True``
|
||||
@ -1764,6 +1817,7 @@ def get_vsan_enabled(host, username, password, protocol=None, port=None, host_na
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@ignores_kwargs('credstore')
|
||||
def get_vsan_eligible_disks(host, username, password, protocol=None, port=None, host_names=None):
|
||||
'''
|
||||
Returns a list of VSAN-eligible disks for a given host or list of host_names.
|
||||
@ -1859,6 +1913,7 @@ def test_vcenter_connection(service_instance=None):
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@ignores_kwargs('credstore')
|
||||
def system_info(host, username, password, protocol=None, port=None):
|
||||
'''
|
||||
Return system information about a VMware environment.
|
||||
@ -1899,6 +1954,7 @@ def system_info(host, username, password, protocol=None, port=None):
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@ignores_kwargs('credstore')
|
||||
def list_datacenters(host, username, password, protocol=None, port=None):
|
||||
'''
|
||||
Returns a list of datacenters for the the specified host.
|
||||
@ -1936,6 +1992,7 @@ def list_datacenters(host, username, password, protocol=None, port=None):
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@ignores_kwargs('credstore')
|
||||
def list_clusters(host, username, password, protocol=None, port=None):
|
||||
'''
|
||||
Returns a list of clusters for the the specified host.
|
||||
@ -1973,6 +2030,7 @@ def list_clusters(host, username, password, protocol=None, port=None):
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@ignores_kwargs('credstore')
|
||||
def list_datastore_clusters(host, username, password, protocol=None, port=None):
|
||||
'''
|
||||
Returns a list of datastore clusters for the the specified host.
|
||||
@ -2009,6 +2067,7 @@ def list_datastore_clusters(host, username, password, protocol=None, port=None):
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@ignores_kwargs('credstore')
|
||||
def list_datastores(host, username, password, protocol=None, port=None):
|
||||
'''
|
||||
Returns a list of datastores for the the specified host.
|
||||
@ -2045,6 +2104,7 @@ def list_datastores(host, username, password, protocol=None, port=None):
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@ignores_kwargs('credstore')
|
||||
def list_hosts(host, username, password, protocol=None, port=None):
|
||||
'''
|
||||
Returns a list of hosts for the the specified VMware environment.
|
||||
@ -2081,6 +2141,7 @@ def list_hosts(host, username, password, protocol=None, port=None):
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@ignores_kwargs('credstore')
|
||||
def list_resourcepools(host, username, password, protocol=None, port=None):
|
||||
'''
|
||||
Returns a list of resource pools for the the specified host.
|
||||
@ -2117,6 +2178,7 @@ def list_resourcepools(host, username, password, protocol=None, port=None):
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@ignores_kwargs('credstore')
|
||||
def list_networks(host, username, password, protocol=None, port=None):
|
||||
'''
|
||||
Returns a list of networks for the the specified host.
|
||||
@ -2153,6 +2215,7 @@ def list_networks(host, username, password, protocol=None, port=None):
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@ignores_kwargs('credstore')
|
||||
def list_vms(host, username, password, protocol=None, port=None):
|
||||
'''
|
||||
Returns a list of VMs for the the specified host.
|
||||
@ -2189,6 +2252,7 @@ def list_vms(host, username, password, protocol=None, port=None):
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@ignores_kwargs('credstore')
|
||||
def list_folders(host, username, password, protocol=None, port=None):
|
||||
'''
|
||||
Returns a list of folders for the the specified host.
|
||||
@ -2225,6 +2289,7 @@ def list_folders(host, username, password, protocol=None, port=None):
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@ignores_kwargs('credstore')
|
||||
def list_dvs(host, username, password, protocol=None, port=None):
|
||||
'''
|
||||
Returns a list of distributed virtual switches for the the specified host.
|
||||
@ -2261,6 +2326,7 @@ def list_dvs(host, username, password, protocol=None, port=None):
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@ignores_kwargs('credstore')
|
||||
def list_vapps(host, username, password, protocol=None, port=None):
|
||||
'''
|
||||
Returns a list of vApps for the the specified host.
|
||||
@ -2298,6 +2364,7 @@ def list_vapps(host, username, password, protocol=None, port=None):
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@ignores_kwargs('credstore')
|
||||
def list_ssds(host, username, password, protocol=None, port=None, host_names=None):
|
||||
'''
|
||||
Returns a list of SSDs for the given host or list of host_names.
|
||||
@ -2358,6 +2425,7 @@ def list_ssds(host, username, password, protocol=None, port=None, host_names=Non
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@ignores_kwargs('credstore')
|
||||
def list_non_ssds(host, username, password, protocol=None, port=None, host_names=None):
|
||||
'''
|
||||
Returns a list of Non-SSD disks for the given host or list of host_names.
|
||||
@ -2425,6 +2493,7 @@ def list_non_ssds(host, username, password, protocol=None, port=None, host_names
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@ignores_kwargs('credstore')
|
||||
def set_ntp_config(host, username, password, ntp_servers, protocol=None, port=None, host_names=None):
|
||||
'''
|
||||
Set NTP configuration for a given host of list of host_names.
|
||||
@ -2504,6 +2573,7 @@ def set_ntp_config(host, username, password, ntp_servers, protocol=None, port=No
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@ignores_kwargs('credstore')
|
||||
def service_start(host,
|
||||
username,
|
||||
password,
|
||||
@ -2614,6 +2684,7 @@ def service_start(host,
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@ignores_kwargs('credstore')
|
||||
def service_stop(host,
|
||||
username,
|
||||
password,
|
||||
@ -2724,6 +2795,7 @@ def service_stop(host,
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@ignores_kwargs('credstore')
|
||||
def service_restart(host,
|
||||
username,
|
||||
password,
|
||||
@ -2834,6 +2906,7 @@ def service_restart(host,
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@ignores_kwargs('credstore')
|
||||
def set_service_policy(host,
|
||||
username,
|
||||
password,
|
||||
@ -2962,6 +3035,7 @@ def set_service_policy(host,
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@ignores_kwargs('credstore')
|
||||
def update_host_datetime(host, username, password, protocol=None, port=None, host_names=None):
|
||||
'''
|
||||
Update the date/time on the given host or list of host_names. This function should be
|
||||
@ -3028,6 +3102,7 @@ def update_host_datetime(host, username, password, protocol=None, port=None, hos
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@ignores_kwargs('credstore')
|
||||
def update_host_password(host, username, password, new_password, protocol=None, port=None):
|
||||
'''
|
||||
Update the password for a given host.
|
||||
@ -3090,6 +3165,7 @@ def update_host_password(host, username, password, new_password, protocol=None,
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@ignores_kwargs('credstore')
|
||||
def vmotion_disable(host, username, password, protocol=None, port=None, host_names=None):
|
||||
'''
|
||||
Disable vMotion for a given host or list of host_names.
|
||||
@ -3158,6 +3234,7 @@ def vmotion_disable(host, username, password, protocol=None, port=None, host_nam
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@ignores_kwargs('credstore')
|
||||
def vmotion_enable(host, username, password, protocol=None, port=None, host_names=None, device='vmk0'):
|
||||
'''
|
||||
Enable vMotion for a given host or list of host_names.
|
||||
@ -3230,6 +3307,7 @@ def vmotion_enable(host, username, password, protocol=None, port=None, host_name
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@ignores_kwargs('credstore')
|
||||
def vsan_add_disks(host, username, password, protocol=None, port=None, host_names=None):
|
||||
'''
|
||||
Add any VSAN-eligible disks to the VSAN System for the given host or list of host_names.
|
||||
@ -3333,6 +3411,7 @@ def vsan_add_disks(host, username, password, protocol=None, port=None, host_name
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@ignores_kwargs('credstore')
|
||||
def vsan_disable(host, username, password, protocol=None, port=None, host_names=None):
|
||||
'''
|
||||
Disable VSAN for a given host or list of host_names.
|
||||
@ -3417,6 +3496,7 @@ def vsan_disable(host, username, password, protocol=None, port=None, host_names=
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@ignores_kwargs('credstore')
|
||||
def vsan_enable(host, username, password, protocol=None, port=None, host_names=None):
|
||||
'''
|
||||
Enable VSAN for a given host or list of host_names.
|
||||
@ -3724,7 +3804,7 @@ def _get_vsan_eligible_disks(service_instance, host, host_names):
|
||||
|
||||
|
||||
def _reset_syslog_config_params(host, username, password, cmd, resets, valid_resets,
|
||||
protocol=None, port=None, esxi_host=None):
|
||||
protocol=None, port=None, esxi_host=None, credstore=None):
|
||||
'''
|
||||
Helper function for reset_syslog_config that resets the config and populates the return dictionary.
|
||||
'''
|
||||
@ -3738,7 +3818,7 @@ def _reset_syslog_config_params(host, username, password, cmd, resets, valid_res
|
||||
if reset_param in valid_resets:
|
||||
ret = salt.utils.vmware.esxcli(host, username, password, cmd + reset_param,
|
||||
protocol=protocol, port=port,
|
||||
esxi_host=esxi_host)
|
||||
esxi_host=esxi_host, credstore=credstore)
|
||||
ret_dict[reset_param] = {}
|
||||
ret_dict[reset_param]['success'] = ret['retcode'] == 0
|
||||
if ret['retcode'] != 0:
|
||||
@ -3757,7 +3837,7 @@ def _reset_syslog_config_params(host, username, password, cmd, resets, valid_res
|
||||
|
||||
|
||||
def _set_syslog_config_helper(host, username, password, syslog_config, config_value,
|
||||
protocol=None, port=None, reset_service=None, esxi_host=None):
|
||||
protocol=None, port=None, reset_service=None, esxi_host=None, credstore=None):
|
||||
'''
|
||||
Helper function for set_syslog_config that sets the config and populates the return dictionary.
|
||||
'''
|
||||
@ -3773,7 +3853,7 @@ def _set_syslog_config_helper(host, username, password, syslog_config, config_va
|
||||
|
||||
response = salt.utils.vmware.esxcli(host, username, password, cmd,
|
||||
protocol=protocol, port=port,
|
||||
esxi_host=esxi_host)
|
||||
esxi_host=esxi_host, credstore=credstore)
|
||||
|
||||
# Update the return dictionary for success or error messages.
|
||||
if response['retcode'] != 0:
|
||||
@ -3791,12 +3871,13 @@ def _set_syslog_config_helper(host, username, password, syslog_config, config_va
|
||||
host_name = host
|
||||
response = syslog_service_reload(host, username, password,
|
||||
protocol=protocol, port=port,
|
||||
esxi_hosts=esxi_host).get(host_name)
|
||||
esxi_hosts=esxi_host, credstore=credstore).get(host_name)
|
||||
ret_dict.update({'syslog_restart': {'success': response['retcode'] == 0}})
|
||||
|
||||
return ret_dict
|
||||
|
||||
|
||||
@ignores_kwargs('credstore')
|
||||
def add_host_to_dvs(host, username, password, vmknic_name, vmnic_name,
|
||||
dvs_name, target_portgroup_name, uplink_portgroup_name,
|
||||
protocol=None, port=None, host_names=None):
|
||||
|
@ -1,11 +1,12 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Manage information about files on the minion, set/read user, group
|
||||
data
|
||||
data, modify the ACL of files/directories
|
||||
|
||||
:depends: - win32api
|
||||
- win32file
|
||||
- win32security
|
||||
- win32con
|
||||
- salt.utils.win_dacl
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
@ -40,17 +41,6 @@ import salt.utils.atomicfile # do not remove, used in imported file.py function
|
||||
from salt.exceptions import CommandExecutionError, SaltInvocationError
|
||||
# pylint: enable=W0611
|
||||
|
||||
# Import third party libs
|
||||
try:
|
||||
import win32api
|
||||
import win32file
|
||||
import win32security
|
||||
import win32con
|
||||
from pywintypes import error as pywinerror
|
||||
HAS_WINDOWS_MODULES = True
|
||||
except ImportError:
|
||||
HAS_WINDOWS_MODULES = False
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
from salt.modules.file import (check_hash, # pylint: disable=W0611
|
||||
@ -68,6 +58,14 @@ from salt.modules.file import (check_hash, # pylint: disable=W0611
|
||||
|
||||
from salt.utils import namespaced_function as _namespaced_function
|
||||
|
||||
HAS_WINDOWS_MODULES = False
|
||||
if salt.utils.is_windows():
|
||||
import win32api
|
||||
import win32file
|
||||
import win32con
|
||||
from pywintypes import error as pywinerror
|
||||
HAS_WINDOWS_MODULES = True
|
||||
|
||||
HAS_WIN_DACL = False
|
||||
if salt.utils.is_windows():
|
||||
import salt.utils.win_dacl
|
||||
@ -195,92 +193,6 @@ def _resolve_symlink(path, max_depth=64):
|
||||
return path
|
||||
|
||||
|
||||
def _change_privilege_state(privilege_name, enable):
|
||||
'''
|
||||
Change the state, either enable or disable, of the named privilege for this
|
||||
process.
|
||||
|
||||
If the change fails, an exception will be raised. If successful, it returns
|
||||
True.
|
||||
'''
|
||||
log.debug(
|
||||
'{0} the privilege {1} for this process.'.format(
|
||||
'Enabling' if enable else 'Disabling',
|
||||
privilege_name
|
||||
)
|
||||
)
|
||||
# this is a pseudo-handle that doesn't need to be closed
|
||||
hProc = win32api.GetCurrentProcess()
|
||||
hToken = None
|
||||
try:
|
||||
hToken = win32security.OpenProcessToken(
|
||||
hProc,
|
||||
win32security.TOKEN_QUERY | win32security.TOKEN_ADJUST_PRIVILEGES
|
||||
)
|
||||
privilege = win32security.LookupPrivilegeValue(None, privilege_name)
|
||||
if enable:
|
||||
privilege_attrs = win32security.SE_PRIVILEGE_ENABLED
|
||||
else:
|
||||
# a value of 0 disables a privilege (there's no constant for it)
|
||||
privilege_attrs = 0
|
||||
|
||||
# check that the handle has the requested privilege
|
||||
token_privileges = dict(win32security.GetTokenInformation(
|
||||
hToken, win32security.TokenPrivileges))
|
||||
if privilege not in token_privileges:
|
||||
if enable:
|
||||
raise SaltInvocationError(
|
||||
'The requested privilege {0} is not available for this '
|
||||
'process (check Salt user privileges).'.format(privilege_name))
|
||||
else: # disable a privilege this process does not have
|
||||
log.debug(
|
||||
'Cannot disable privilege {0} because this process '
|
||||
'does not have that privilege.'.format(privilege_name)
|
||||
)
|
||||
return True
|
||||
else:
|
||||
# check if the privilege is already in the requested state
|
||||
if token_privileges[privilege] == privilege_attrs:
|
||||
log.debug(
|
||||
'The requested privilege {0} is already in the '
|
||||
'requested state.'.format(privilege_name)
|
||||
)
|
||||
return True
|
||||
|
||||
changes = win32security.AdjustTokenPrivileges(
|
||||
hToken,
|
||||
False,
|
||||
[(privilege, privilege_attrs)]
|
||||
)
|
||||
finally:
|
||||
if hToken:
|
||||
win32api.CloseHandle(hToken)
|
||||
|
||||
if not bool(changes):
|
||||
raise SaltInvocationError(
|
||||
'Could not {0} the {1} privilege for this process'.format(
|
||||
'enable' if enable else 'remove',
|
||||
privilege_name
|
||||
)
|
||||
)
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
def _enable_privilege(privilege_name):
|
||||
'''
|
||||
Enables the named privilege for this process.
|
||||
'''
|
||||
return _change_privilege_state(privilege_name, True)
|
||||
|
||||
|
||||
def _disable_privilege(privilege_name):
|
||||
'''
|
||||
Disables the named privilege for this process.
|
||||
'''
|
||||
return _change_privilege_state(privilege_name, False)
|
||||
|
||||
|
||||
def gid_to_group(gid):
|
||||
'''
|
||||
Convert the group id to the group name on this system
|
||||
@ -293,6 +205,12 @@ def gid_to_group(gid):
|
||||
instead; an info level log entry will be generated if this function is used
|
||||
directly.
|
||||
|
||||
Args:
|
||||
gid (str): The gid of the group
|
||||
|
||||
Returns:
|
||||
str: The name of the group
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -319,6 +237,12 @@ def group_to_gid(group):
|
||||
instead; an info level log entry will be generated if this function is used
|
||||
directly.
|
||||
|
||||
Args:
|
||||
group (str): The name of the group
|
||||
|
||||
Returns:
|
||||
str: The gid of the group
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -330,7 +254,10 @@ def group_to_gid(group):
|
||||
log.info('The function {0} should not be used on Windows systems; '
|
||||
'see function docs for details.'.format(func_name))
|
||||
|
||||
return _user_to_uid(group)
|
||||
if group is None:
|
||||
return ''
|
||||
|
||||
return salt.utils.win_dacl.get_sid_string(group)
|
||||
|
||||
|
||||
def get_pgid(path, follow_symlinks=True):
|
||||
@ -344,6 +271,16 @@ def get_pgid(path, follow_symlinks=True):
|
||||
|
||||
Ensure you know what you are doing before using this function.
|
||||
|
||||
Args:
|
||||
path (str): The path to the file or directory
|
||||
|
||||
follow_symlinks (bool):
|
||||
If the object specified by ``path`` is a symlink, get attributes of
|
||||
the linked file instead of the symlink itself. Default is True
|
||||
|
||||
Returns:
|
||||
str: The gid of the primary group
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -351,7 +288,7 @@ def get_pgid(path, follow_symlinks=True):
|
||||
salt '*' file.get_pgid c:\\temp\\test.txt
|
||||
'''
|
||||
if not os.path.exists(path):
|
||||
return False
|
||||
raise CommandExecutionError('Path not found: {0}'.format(path))
|
||||
|
||||
# Under Windows, if the path is a symlink, the user that owns the symlink is
|
||||
# returned, not the user that owns the file/directory the symlink is
|
||||
@ -361,25 +298,8 @@ def get_pgid(path, follow_symlinks=True):
|
||||
if follow_symlinks and sys.getwindowsversion().major >= 6:
|
||||
path = _resolve_symlink(path)
|
||||
|
||||
try:
|
||||
secdesc = win32security.GetFileSecurity(
|
||||
path, win32security.GROUP_SECURITY_INFORMATION
|
||||
)
|
||||
# Not all filesystems mountable within windows
|
||||
# have SecurityDescriptor's. For instance, some mounted
|
||||
# SAMBA shares, or VirtualBox's shared folders. If we
|
||||
# can't load a file descriptor for the file, we default
|
||||
# to "Everyone" - http://support.microsoft.com/kb/243330
|
||||
except MemoryError:
|
||||
# generic memory error (win2k3+)
|
||||
return 'S-1-1-0'
|
||||
except pywinerror as exc:
|
||||
# Incorrect function error (win2k8+)
|
||||
if exc.winerror == 1 or exc.winerror == 50:
|
||||
return 'S-1-1-0'
|
||||
raise
|
||||
group_sid = secdesc.GetSecurityDescriptorGroup()
|
||||
return win32security.ConvertSidToStringSid(group_sid)
|
||||
group_name = salt.utils.win_dacl.get_primary_group(path)
|
||||
return salt.utils.win_dacl.get_sid_string(group_name)
|
||||
|
||||
|
||||
def get_pgroup(path, follow_symlinks=True):
|
||||
@ -398,6 +318,16 @@ def get_pgroup(path, follow_symlinks=True):
|
||||
means no value was returned. To be certain, use the `get_pgid` function
|
||||
which will return the SID, including for the system 'None' group.
|
||||
|
||||
Args:
|
||||
path (str): The path to the file or directory
|
||||
|
||||
follow_symlinks (bool):
|
||||
If the object specified by ``path`` is a symlink, get attributes of
|
||||
the linked file instead of the symlink itself. Default is True
|
||||
|
||||
Returns:
|
||||
str: The name of the primary group
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -426,6 +356,16 @@ def get_gid(path, follow_symlinks=True):
|
||||
If you do actually want to access the 'primary group' of a file, use
|
||||
`file.get_pgid`.
|
||||
|
||||
Args:
|
||||
path (str): The path to the file or directory
|
||||
|
||||
follow_symlinks (bool):
|
||||
If the object specified by ``path`` is a symlink, get attributes of
|
||||
the linked file instead of the symlink itself. Default is True
|
||||
|
||||
Returns:
|
||||
str: The gid of the owner
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -460,6 +400,16 @@ def get_group(path, follow_symlinks=True):
|
||||
If you do actually want to access the 'primary group' of a file, use
|
||||
`file.get_pgroup`.
|
||||
|
||||
Args:
|
||||
path (str): The path to the file or directory
|
||||
|
||||
follow_symlinks (bool):
|
||||
If the object specified by ``path`` is a symlink, get attributes of
|
||||
the linked file instead of the symlink itself. Default is True
|
||||
|
||||
Returns:
|
||||
str: The name of the owner
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -479,6 +429,12 @@ def uid_to_user(uid):
|
||||
'''
|
||||
Convert a uid to a user name
|
||||
|
||||
Args:
|
||||
uid (str): The user id to lookup
|
||||
|
||||
Returns:
|
||||
str: The name of the user
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -488,24 +444,19 @@ def uid_to_user(uid):
|
||||
if uid is None or uid == '':
|
||||
return ''
|
||||
|
||||
sid = win32security.GetBinarySid(uid)
|
||||
try:
|
||||
name, domain, account_type = win32security.LookupAccountSid(None, sid)
|
||||
return name
|
||||
except pywinerror as exc:
|
||||
# if user does not exist...
|
||||
# 1332 = No mapping between account names and security IDs was carried
|
||||
# out.
|
||||
if exc.winerror == 1332:
|
||||
return ''
|
||||
else:
|
||||
raise
|
||||
return salt.utils.win_dacl.get_name(uid)
|
||||
|
||||
|
||||
def user_to_uid(user):
|
||||
'''
|
||||
Convert user name to a uid
|
||||
|
||||
Args:
|
||||
user (str): The user to lookup
|
||||
|
||||
Returns:
|
||||
str: The user id of the user
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -514,28 +465,8 @@ def user_to_uid(user):
|
||||
'''
|
||||
if user is None:
|
||||
user = salt.utils.get_user()
|
||||
return _user_to_uid(user)
|
||||
|
||||
|
||||
def _user_to_uid(user):
|
||||
'''
|
||||
Convert user name to a uid
|
||||
'''
|
||||
if user is None or user == '':
|
||||
return ''
|
||||
|
||||
try:
|
||||
sid, domain, account_type = win32security.LookupAccountName(None, user)
|
||||
except pywinerror as exc:
|
||||
# if user does not exist...
|
||||
# 1332 = No mapping between account names and security IDs was carried
|
||||
# out.
|
||||
if exc.winerror == 1332:
|
||||
return ''
|
||||
else:
|
||||
raise
|
||||
|
||||
return win32security.ConvertSidToStringSid(sid)
|
||||
return salt.utils.win_dacl.get_sid_string(user)
|
||||
|
||||
|
||||
def get_uid(path, follow_symlinks=True):
|
||||
@ -545,6 +476,17 @@ def get_uid(path, follow_symlinks=True):
|
||||
Symlinks are followed by default to mimic Unix behavior. Specify
|
||||
`follow_symlinks=False` to turn off this behavior.
|
||||
|
||||
Args:
|
||||
path (str): The path to the file or directory
|
||||
|
||||
follow_symlinks (bool):
|
||||
If the object specified by ``path`` is a symlink, get attributes of
|
||||
the linked file instead of the symlink itself. Default is True
|
||||
|
||||
Returns:
|
||||
str: The uid of the owner
|
||||
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -553,7 +495,7 @@ def get_uid(path, follow_symlinks=True):
|
||||
salt '*' file.get_uid c:\\temp\\test.txt follow_symlinks=False
|
||||
'''
|
||||
if not os.path.exists(path):
|
||||
return False
|
||||
raise CommandExecutionError('Path not found: {0}'.format(path))
|
||||
|
||||
# Under Windows, if the path is a symlink, the user that owns the symlink is
|
||||
# returned, not the user that owns the file/directory the symlink is
|
||||
@ -562,20 +504,9 @@ def get_uid(path, follow_symlinks=True):
|
||||
# supported on Windows Vista or later.
|
||||
if follow_symlinks and sys.getwindowsversion().major >= 6:
|
||||
path = _resolve_symlink(path)
|
||||
try:
|
||||
secdesc = win32security.GetFileSecurity(
|
||||
path, win32security.OWNER_SECURITY_INFORMATION
|
||||
)
|
||||
except MemoryError:
|
||||
# generic memory error (win2k3+)
|
||||
return 'S-1-1-0'
|
||||
except pywinerror as exc:
|
||||
# Incorrect function error (win2k8+)
|
||||
if exc.winerror == 1 or exc.winerror == 50:
|
||||
return 'S-1-1-0'
|
||||
raise
|
||||
owner_sid = secdesc.GetSecurityDescriptorOwner()
|
||||
return win32security.ConvertSidToStringSid(owner_sid)
|
||||
|
||||
owner_sid = salt.utils.win_dacl.get_owner(path)
|
||||
return salt.utils.win_dacl.get_sid_string(owner_sid)
|
||||
|
||||
|
||||
def get_user(path, follow_symlinks=True):
|
||||
@ -585,6 +516,17 @@ def get_user(path, follow_symlinks=True):
|
||||
Symlinks are followed by default to mimic Unix behavior. Specify
|
||||
`follow_symlinks=False` to turn off this behavior.
|
||||
|
||||
Args:
|
||||
path (str): The path to the file or directory
|
||||
|
||||
follow_symlinks (bool):
|
||||
If the object specified by ``path`` is a symlink, get attributes of
|
||||
the linked file instead of the symlink itself. Default is True
|
||||
|
||||
Returns:
|
||||
str: The name of the owner
|
||||
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -592,7 +534,18 @@ def get_user(path, follow_symlinks=True):
|
||||
salt '*' file.get_user c:\\temp\\test.txt
|
||||
salt '*' file.get_user c:\\temp\\test.txt follow_symlinks=False
|
||||
'''
|
||||
return uid_to_user(get_uid(path, follow_symlinks))
|
||||
if not os.path.exists(path):
|
||||
raise CommandExecutionError('Path not found: {0}'.format(path))
|
||||
|
||||
# Under Windows, if the path is a symlink, the user that owns the symlink is
|
||||
# returned, not the user that owns the file/directory the symlink is
|
||||
# pointing to. This behavior is *different* to *nix, therefore the symlink
|
||||
# is first resolved manually if necessary. Remember symlinks are only
|
||||
# supported on Windows Vista or later.
|
||||
if follow_symlinks and sys.getwindowsversion().major >= 6:
|
||||
path = _resolve_symlink(path)
|
||||
|
||||
return salt.utils.win_dacl.get_owner(path)
|
||||
|
||||
|
||||
def get_mode(path):
|
||||
@ -602,6 +555,12 @@ def get_mode(path):
|
||||
Right now we're just returning None because Windows' doesn't have a mode
|
||||
like Linux
|
||||
|
||||
Args:
|
||||
path (str): The path to the file or directory
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -609,7 +568,7 @@ def get_mode(path):
|
||||
salt '*' file.get_mode /etc/passwd
|
||||
'''
|
||||
if not os.path.exists(path):
|
||||
return ''
|
||||
raise CommandExecutionError('Path not found: {0}'.format(path))
|
||||
|
||||
func_name = '{0}.get_mode'.format(__virtualname__)
|
||||
if __opts__.get('fun', '') == func_name:
|
||||
@ -639,6 +598,15 @@ def lchown(path, user, group=None, pgroup=None):
|
||||
Otherwise Salt will interpret it as the Python value of None and no primary
|
||||
group changes will occur. See the example below.
|
||||
|
||||
Args:
|
||||
path (str): The path to the file or directory
|
||||
user (str): The name of the user to own the file
|
||||
group (str): The group (not used)
|
||||
pgroup (str): The primary group to assign
|
||||
|
||||
Returns:
|
||||
bool: True if successful, otherwise error
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -650,13 +618,11 @@ def lchown(path, user, group=None, pgroup=None):
|
||||
if group:
|
||||
func_name = '{0}.lchown'.format(__virtualname__)
|
||||
if __opts__.get('fun', '') == func_name:
|
||||
log.info('The group parameter has no effect when using {0} on Windows '
|
||||
'systems; see function docs for details.'.format(func_name))
|
||||
log.debug(
|
||||
'win_file.py {0} Ignoring the group parameter for {1}'.format(
|
||||
func_name, path
|
||||
)
|
||||
)
|
||||
log.info('The group parameter has no effect when using {0} on '
|
||||
'Windows systems; see function docs for details.'
|
||||
''.format(func_name))
|
||||
log.debug('win_file.py {0} Ignoring the group parameter for {1}'
|
||||
''.format(func_name, path))
|
||||
group = None
|
||||
|
||||
return chown(path, user, group, pgroup, follow_symlinks=False)
|
||||
@ -676,9 +642,17 @@ def chown(path, user, group=None, pgroup=None, follow_symlinks=True):
|
||||
If you do want to change the 'primary group' property and understand the
|
||||
implications, pass the Windows only parameter, pgroup, instead.
|
||||
|
||||
To set the primary group to 'None', it must be specified in quotes.
|
||||
Otherwise Salt will interpret it as the Python value of None and no primary
|
||||
group changes will occur. See the example below.
|
||||
Args:
|
||||
path (str): The path to the file or directory
|
||||
user (str): The name of the user to own the file
|
||||
group (str): The group (not used)
|
||||
pgroup (str): The primary group to assign
|
||||
follow_symlinks (bool):
|
||||
If the object specified by ``path`` is a symlink, get attributes of
|
||||
the linked file instead of the symlink itself. Default is True
|
||||
|
||||
Returns:
|
||||
bool: True if successful, otherwise error
|
||||
|
||||
CLI Example:
|
||||
|
||||
@ -689,72 +663,26 @@ def chown(path, user, group=None, pgroup=None, follow_symlinks=True):
|
||||
salt '*' file.chown c:\\temp\\test.txt myusername "pgroup='None'"
|
||||
'''
|
||||
# the group parameter is not used; only provided for API compatibility
|
||||
if group:
|
||||
if group is not None:
|
||||
func_name = '{0}.chown'.format(__virtualname__)
|
||||
if __opts__.get('fun', '') == func_name:
|
||||
log.info('The group parameter has no effect when using {0} on Windows '
|
||||
'systems; see function docs for details.'.format(func_name))
|
||||
log.debug(
|
||||
'win_file.py {0} Ignoring the group parameter for {1}'.format(
|
||||
func_name, path
|
||||
)
|
||||
)
|
||||
group = None
|
||||
|
||||
err = ''
|
||||
# get SID object for user
|
||||
try:
|
||||
userSID, domainName, objectType = win32security.LookupAccountName(None, user)
|
||||
except pywinerror:
|
||||
err += 'User does not exist\n'
|
||||
|
||||
if pgroup:
|
||||
# get SID object for group
|
||||
try:
|
||||
groupSID, domainName, objectType = win32security.LookupAccountName(None, pgroup)
|
||||
except pywinerror:
|
||||
err += 'Group does not exist\n'
|
||||
else:
|
||||
groupSID = None
|
||||
|
||||
if not os.path.exists(path):
|
||||
err += 'File not found'
|
||||
if err:
|
||||
return err
|
||||
log.info('The group parameter has no effect when using {0} on '
|
||||
'Windows systems; see function docs for details.'
|
||||
''.format(func_name))
|
||||
log.debug('win_file.py {0} Ignoring the group parameter for {1}'
|
||||
''.format(func_name, path))
|
||||
|
||||
if follow_symlinks and sys.getwindowsversion().major >= 6:
|
||||
path = _resolve_symlink(path)
|
||||
|
||||
privilege_enabled = False
|
||||
try:
|
||||
privilege_enabled = _enable_privilege(win32security.SE_RESTORE_NAME)
|
||||
if pgroup:
|
||||
# set owner and group
|
||||
win32security.SetNamedSecurityInfo(
|
||||
path,
|
||||
win32security.SE_FILE_OBJECT,
|
||||
win32security.OWNER_SECURITY_INFORMATION + win32security.GROUP_SECURITY_INFORMATION,
|
||||
userSID,
|
||||
groupSID,
|
||||
None,
|
||||
None
|
||||
)
|
||||
else:
|
||||
# set owner only
|
||||
win32security.SetNamedSecurityInfo(
|
||||
path,
|
||||
win32security.SE_FILE_OBJECT,
|
||||
win32security.OWNER_SECURITY_INFORMATION,
|
||||
userSID,
|
||||
None,
|
||||
None,
|
||||
None
|
||||
)
|
||||
finally:
|
||||
if privilege_enabled:
|
||||
_disable_privilege(win32security.SE_RESTORE_NAME)
|
||||
if not os.path.exists(path):
|
||||
raise CommandExecutionError('Path not found: {0}'.format(path))
|
||||
|
||||
return None
|
||||
salt.utils.win_dacl.set_owner(path, user)
|
||||
if pgroup:
|
||||
salt.utils.win_dacl.set_primary_group(path, pgroup)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def chpgrp(path, group):
|
||||
@ -768,9 +696,12 @@ def chpgrp(path, group):
|
||||
|
||||
Ensure you know what you are doing before using this function.
|
||||
|
||||
To set the primary group to 'None', it must be specified in quotes.
|
||||
Otherwise Salt will interpret it as the Python value of None and no primary
|
||||
group changes will occur. See the example below.
|
||||
Args:
|
||||
path (str): The path to the file or directory
|
||||
pgroup (str): The primary group to assign
|
||||
|
||||
Returns:
|
||||
bool: True if successful, otherwise error
|
||||
|
||||
CLI Example:
|
||||
|
||||
@ -779,42 +710,7 @@ def chpgrp(path, group):
|
||||
salt '*' file.chpgrp c:\\temp\\test.txt Administrators
|
||||
salt '*' file.chpgrp c:\\temp\\test.txt "'None'"
|
||||
'''
|
||||
if group is None:
|
||||
raise SaltInvocationError("The group value was specified as None and "
|
||||
"is invalid. If you mean the built-in None "
|
||||
"group, specify the group in lowercase, e.g. "
|
||||
"'none'.")
|
||||
|
||||
err = ''
|
||||
# get SID object for group
|
||||
try:
|
||||
groupSID, domainName, objectType = win32security.LookupAccountName(None, group)
|
||||
except pywinerror:
|
||||
err += 'Group does not exist\n'
|
||||
|
||||
if not os.path.exists(path):
|
||||
err += 'File not found\n'
|
||||
if err:
|
||||
return err
|
||||
|
||||
# set group
|
||||
privilege_enabled = False
|
||||
try:
|
||||
privilege_enabled = _enable_privilege(win32security.SE_RESTORE_NAME)
|
||||
win32security.SetNamedSecurityInfo(
|
||||
path,
|
||||
win32security.SE_FILE_OBJECT,
|
||||
win32security.GROUP_SECURITY_INFORMATION,
|
||||
None,
|
||||
groupSID,
|
||||
None,
|
||||
None
|
||||
)
|
||||
finally:
|
||||
if privilege_enabled:
|
||||
_disable_privilege(win32security.SE_RESTORE_NAME)
|
||||
|
||||
return None
|
||||
return salt.utils.win_dacl.set_primary_group(path, group)
|
||||
|
||||
|
||||
def chgrp(path, group):
|
||||
@ -833,8 +729,17 @@ def chgrp(path, group):
|
||||
this function is superfluous and will generate an info level log entry if
|
||||
used directly.
|
||||
|
||||
If you do actually want to set the 'primary group' of a file, use `file
|
||||
.chpgrp`.
|
||||
If you do actually want to set the 'primary group' of a file, use ``file
|
||||
.chpgrp``.
|
||||
|
||||
To set group permissions use ``file.set_perms``
|
||||
|
||||
Args:
|
||||
path (str): The path to the file or directory
|
||||
group (str): The group (unused)
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
||||
CLI Example:
|
||||
|
||||
@ -866,18 +771,31 @@ def stats(path, hash_type='sha256', follow_symlinks=True):
|
||||
compatibility with Unix behavior. If the 'primary group' is required, it
|
||||
can be accessed in the `pgroup` and `pgid` properties.
|
||||
|
||||
Args:
|
||||
path (str): The path to the file or directory
|
||||
hash_type (str): The type of hash to return
|
||||
follow_symlinks (bool):
|
||||
If the object specified by ``path`` is a symlink, get attributes of
|
||||
the linked file instead of the symlink itself. Default is True
|
||||
|
||||
Returns:
|
||||
dict: A dictionary of file/directory stats
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' file.stats /etc/passwd
|
||||
'''
|
||||
ret = {}
|
||||
if not os.path.exists(path):
|
||||
return ret
|
||||
raise CommandExecutionError('Path not found: {0}'.format(path))
|
||||
|
||||
if follow_symlinks and sys.getwindowsversion().major >= 6:
|
||||
path = _resolve_symlink(path)
|
||||
|
||||
pstat = os.stat(path)
|
||||
|
||||
ret = {}
|
||||
ret['inode'] = pstat.st_ino
|
||||
# don't need to resolve symlinks again because we've already done that
|
||||
ret['uid'] = get_uid(path, follow_symlinks=False)
|
||||
@ -918,17 +836,20 @@ def get_attributes(path):
|
||||
Return a dictionary object with the Windows
|
||||
file attributes for a file.
|
||||
|
||||
Args:
|
||||
path (str): The path to the file or directory
|
||||
|
||||
Returns:
|
||||
dict: A dictionary of file attributes
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' file.get_attributes c:\\temp\\a.txt
|
||||
'''
|
||||
err = ''
|
||||
if not os.path.exists(path):
|
||||
err += 'File not found\n'
|
||||
if err:
|
||||
return err
|
||||
raise CommandExecutionError('Path not found: {0}'.format(path))
|
||||
|
||||
# set up dictionary for attribute values
|
||||
attributes = {}
|
||||
@ -979,6 +900,21 @@ def set_attributes(path, archive=None, hidden=None, normal=None,
|
||||
Set file attributes for a file. Note that the normal attribute
|
||||
means that all others are false. So setting it will clear all others.
|
||||
|
||||
Args:
|
||||
path (str): The path to the file or directory
|
||||
archive (bool): Sets the archive attribute. Default is None
|
||||
hidden (bool): Sets the hidden attribute. Default is None
|
||||
normal (bool):
|
||||
Resets the file attributes. Cannot be used in conjunction with any
|
||||
other attribute. Default is None
|
||||
notIndexed (bool): Sets the indexed attribute. Default is None
|
||||
readonly (bool): Sets the readonly attribute. Default is None
|
||||
system (bool): Sets the system attribute. Default is None
|
||||
temporary (bool): Sets the temporary attribute. Default is None
|
||||
|
||||
Returns:
|
||||
bool: True if successful, otherwise False
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -986,18 +922,19 @@ def set_attributes(path, archive=None, hidden=None, normal=None,
|
||||
salt '*' file.set_attributes c:\\temp\\a.txt normal=True
|
||||
salt '*' file.set_attributes c:\\temp\\a.txt readonly=True hidden=True
|
||||
'''
|
||||
err = ''
|
||||
if not os.path.exists(path):
|
||||
err += 'File not found\n'
|
||||
raise CommandExecutionError('Path not found: {0}'.format(path))
|
||||
|
||||
if normal:
|
||||
if archive or hidden or notIndexed or readonly or system or temporary:
|
||||
err += 'Normal attribute may not be used with any other attributes\n'
|
||||
else:
|
||||
return win32file.SetFileAttributes(path, 128)
|
||||
if err:
|
||||
return err
|
||||
raise CommandExecutionError(
|
||||
'Normal attribute may not be used with any other attributes')
|
||||
ret = win32file.SetFileAttributes(path, 128)
|
||||
return True if ret is None else False
|
||||
|
||||
# Get current attributes
|
||||
intAttributes = win32file.GetFileAttributes(path)
|
||||
|
||||
# individually set or clear bits for appropriate attributes
|
||||
if archive is not None:
|
||||
if archive:
|
||||
@ -1029,7 +966,9 @@ def set_attributes(path, archive=None, hidden=None, normal=None,
|
||||
intAttributes |= 0x100
|
||||
else:
|
||||
intAttributes &= 0xFEFF
|
||||
return win32file.SetFileAttributes(path, intAttributes)
|
||||
|
||||
ret = win32file.SetFileAttributes(path, intAttributes)
|
||||
return True if ret is None else False
|
||||
|
||||
|
||||
def set_mode(path, mode):
|
||||
@ -1039,6 +978,13 @@ def set_mode(path, mode):
|
||||
This just calls get_mode, which returns None because we don't use mode on
|
||||
Windows
|
||||
|
||||
Args:
|
||||
path: The path to the file or directory
|
||||
mode: The mode (not used)
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -1058,12 +1004,12 @@ def remove(path, force=False):
|
||||
'''
|
||||
Remove the named file or directory
|
||||
|
||||
:param str path: The path to the file or directory to remove.
|
||||
Args:
|
||||
path (str): The path to the file or directory to remove.
|
||||
force (bool): Remove even if marked Read-Only. Default is False
|
||||
|
||||
:param bool force: Remove even if marked Read-Only
|
||||
|
||||
:return: True if successful, False if unsuccessful
|
||||
:rtype: bool
|
||||
Returns:
|
||||
bool: True if successful, False if unsuccessful
|
||||
|
||||
CLI Example:
|
||||
|
||||
@ -1079,7 +1025,7 @@ def remove(path, force=False):
|
||||
|
||||
# Does the file/folder exists
|
||||
if not os.path.exists(path):
|
||||
return 'File/Folder not found: {0}'.format(path)
|
||||
raise CommandExecutionError('Path not found: {0}'.format(path))
|
||||
|
||||
if not os.path.isabs(path):
|
||||
raise SaltInvocationError('File path must be absolute.')
|
||||
@ -1127,6 +1073,13 @@ def symlink(src, link):
|
||||
exception - invalid symlinks cannot be created. The source path must exist.
|
||||
If it doesn't, an error will be raised.
|
||||
|
||||
Args:
|
||||
src (str): The path to a file or directory
|
||||
link (str): The path to the link
|
||||
|
||||
Returns:
|
||||
bool: True if successful, otherwise False
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -1191,6 +1144,12 @@ def is_link(path):
|
||||
is not a symlink, however, the error raised will be a SaltInvocationError,
|
||||
not an OSError.
|
||||
|
||||
Args:
|
||||
path (str): The path to a file or directory
|
||||
|
||||
Returns:
|
||||
bool: True if path is a symlink, otherwise False
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -1281,6 +1240,12 @@ def readlink(path):
|
||||
not a symlink, however, the error raised will be a SaltInvocationError, not
|
||||
an OSError.
|
||||
|
||||
Args:
|
||||
path (str): The path to the symlink
|
||||
|
||||
Returns:
|
||||
str: The path that the symlink points to
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -1954,30 +1919,37 @@ def set_perms(path, grant_perms=None, deny_perms=None, inheritance=True):
|
||||
|
||||
path (str): The full path to the directory.
|
||||
|
||||
grant_perms (dict): A dictionary containing the user/group and the basic
|
||||
permissions to grant, ie: ``{'user': {'perms': 'basic_permission'}}``.
|
||||
You can also set the ``applies_to`` setting here. The default is
|
||||
``this_folder_subfolders_files``. Specify another ``applies_to`` setting
|
||||
like this:
|
||||
grant_perms (dict):
|
||||
A dictionary containing the user/group and the basic permissions to
|
||||
grant, ie: ``{'user': {'perms': 'basic_permission'}}``. You can also
|
||||
set the ``applies_to`` setting here. The default is
|
||||
``this_folder_subfolders_files``. Specify another ``applies_to``
|
||||
setting like this:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: yaml
|
||||
|
||||
{'user': {'perms': 'full_control', 'applies_to': 'this_folder'}}
|
||||
{'user': {'perms': 'full_control', 'applies_to': 'this_folder'}}
|
||||
|
||||
To set advanced permissions use a list for the ``perms`` parameter, ie:
|
||||
To set advanced permissions use a list for the ``perms`` parameter,
|
||||
ie:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: yaml
|
||||
|
||||
{'user': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder'}}
|
||||
{'user': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder'}}
|
||||
|
||||
deny_perms (dict): A dictionary containing the user/group and
|
||||
permissions to deny along with the ``applies_to`` setting. Use the same
|
||||
format used for the ``grant_perms`` parameter. Remember, deny
|
||||
permissions supersede grant permissions.
|
||||
To see a list of available attributes and applies to settings see
|
||||
the documentation for salt.utils.win_dacl
|
||||
|
||||
inheritance (bool): If True the object will inherit permissions from the
|
||||
parent, if False, inheritance will be disabled. Inheritance setting will
|
||||
not apply to parent directories if they must be created
|
||||
deny_perms (dict):
|
||||
A dictionary containing the user/group and permissions to deny along
|
||||
with the ``applies_to`` setting. Use the same format used for the
|
||||
``grant_perms`` parameter. Remember, deny permissions supersede
|
||||
grant permissions.
|
||||
|
||||
inheritance (bool):
|
||||
If True the object will inherit permissions from the parent, if
|
||||
False, inheritance will be disabled. Inheritance setting will not
|
||||
apply to parent directories if they must be created
|
||||
|
||||
Returns:
|
||||
bool: True if successful, otherwise raise an error
|
||||
|
@ -15,6 +15,7 @@ from __future__ import absolute_import, unicode_literals
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import decimal
|
||||
|
||||
# Import salt libs
|
||||
from salt.ext.six.moves import range
|
||||
@ -108,6 +109,22 @@ def _list_certs(certificate_store='My'):
|
||||
return ret
|
||||
|
||||
|
||||
def _iisVersion():
|
||||
pscmd = []
|
||||
pscmd.append(r"Get-ItemProperty HKLM:\\SOFTWARE\\Microsoft\\InetStp\\")
|
||||
pscmd.append(' | Select-Object MajorVersion, MinorVersion')
|
||||
|
||||
cmd_ret = _srvmgr(pscmd, return_json=True)
|
||||
|
||||
try:
|
||||
items = json.loads(cmd_ret['stdout'], strict=False)
|
||||
except ValueError:
|
||||
log.error('Unable to parse return data as Json.')
|
||||
return -1
|
||||
|
||||
return decimal.Decimal("{0}.{1}".format(items[0]['MajorVersion'], items[0]['MinorVersion']))
|
||||
|
||||
|
||||
def _srvmgr(cmd, return_json=False):
|
||||
'''
|
||||
Execute a powershell command from the WebAdministration PS module.
|
||||
@ -765,6 +782,11 @@ def create_cert_binding(name, site, hostheader='', ipaddress='*', port=443,
|
||||
'''
|
||||
name = str(name).upper()
|
||||
binding_info = _get_binding_info(hostheader, ipaddress, port)
|
||||
|
||||
if _iisVersion() < 8:
|
||||
# IIS 7.5 and earlier don't support SNI for HTTPS, therefore cert bindings don't contain the host header
|
||||
binding_info = binding_info.rpartition(':')[0] + ':'
|
||||
|
||||
binding_path = r"IIS:\SslBindings\{0}".format(binding_info.replace(':', '!'))
|
||||
|
||||
if sslflags not in _VALID_SSL_FLAGS:
|
||||
@ -801,10 +823,19 @@ def create_cert_binding(name, site, hostheader='', ipaddress='*', port=443,
|
||||
log.error('Certificate not present: {0}'.format(name))
|
||||
return False
|
||||
|
||||
ps_cmd = ['New-Item',
|
||||
'-Path', "'{0}'".format(binding_path),
|
||||
'-Thumbprint', "'{0}'".format(name),
|
||||
'-SSLFlags', '{0}'.format(sslflags)]
|
||||
if _iisVersion() < 8:
|
||||
# IIS 7.5 and earlier have different syntax for associating a certificate with a site
|
||||
# Modify IP spec to IIS 7.5 format
|
||||
iis7path = binding_path.replace(r"\*!", "\\0.0.0.0!")
|
||||
|
||||
ps_cmd = ['New-Item',
|
||||
'-Path', "'{0}'".format(iis7path),
|
||||
'-Thumbprint', "'{0}'".format(name)]
|
||||
else:
|
||||
ps_cmd = ['New-Item',
|
||||
'-Path', "'{0}'".format(binding_path),
|
||||
'-Thumbprint', "'{0}'".format(name),
|
||||
'-SSLFlags', '{0}'.format(sslflags)]
|
||||
|
||||
cmd_ret = _srvmgr(ps_cmd)
|
||||
|
||||
@ -824,6 +855,7 @@ def create_cert_binding(name, site, hostheader='', ipaddress='*', port=443,
|
||||
return True
|
||||
|
||||
log.error('Unable to create certificate binding: {0}'.format(name))
|
||||
|
||||
return False
|
||||
|
||||
|
||||
|
@ -481,9 +481,28 @@ def _refresh_db_conditional(saltenv, **kwargs):
|
||||
|
||||
def refresh_db(**kwargs):
|
||||
'''
|
||||
Fectches metadata files and calls :py:func:`pkg.genrepo
|
||||
Fetches metadata files and calls :py:func:`pkg.genrepo
|
||||
<salt.modules.win_pkg.genrepo>` to compile updated repository metadata.
|
||||
|
||||
Kwargs:
|
||||
saltenv (str): Salt environment. Default: ``base``
|
||||
verbose (bool):
|
||||
Return verbose data structure which includes 'success_list', a list
|
||||
of all sls files and the package names contained within. Default
|
||||
'False'
|
||||
failhard (bool):
|
||||
If ``True``, an error will be raised if any repo SLS files failed to
|
||||
process. If ``False``, no error will be raised, and a dictionary
|
||||
containing the full results will be returned.
|
||||
|
||||
Returns:
|
||||
dict: A dictionary containing the results of the database refresh.
|
||||
|
||||
.. Warning::
|
||||
When calling this command from a state using `module.run` be sure to
|
||||
pass `failhard: False`. Otherwise the state will report failure if it
|
||||
encounters a bad software definition file.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
@ -12,47 +12,59 @@ or for problem solving if your minion is having problems.
|
||||
|
||||
# Import Python Libs
|
||||
from __future__ import absolute_import
|
||||
import os
|
||||
import ctypes
|
||||
import sys
|
||||
import time
|
||||
import datetime
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# Import Salt Libs
|
||||
import salt.utils
|
||||
import salt.ext.six as six
|
||||
import salt.utils.event
|
||||
from salt._compat import subprocess
|
||||
from salt.utils.network import host_to_ips as _host_to_ips
|
||||
# pylint: disable=W0611
|
||||
from salt.modules.status import ping_master, time_
|
||||
import copy
|
||||
# pylint: enable=W0611
|
||||
from salt.utils import namespaced_function as _namespaced_function
|
||||
|
||||
import os
|
||||
import ctypes
|
||||
import sys
|
||||
import time
|
||||
import datetime
|
||||
from subprocess import list2cmdline
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
try:
|
||||
# Import 3rd Party Libs
|
||||
if salt.utils.is_windows():
|
||||
import wmi
|
||||
import salt.utils.winapi
|
||||
has_required_packages = True
|
||||
except ImportError:
|
||||
if salt.utils.is_windows():
|
||||
log.exception('pywin32 and wmi python packages are required '
|
||||
'in order to use the status module.')
|
||||
has_required_packages = False
|
||||
HAS_WMI = True
|
||||
else:
|
||||
HAS_WMI = False
|
||||
|
||||
__opts__ = {}
|
||||
|
||||
# Define the module's virtual name
|
||||
__virtualname__ = 'status'
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only works on Windows systems
|
||||
Only works on Windows systems with WMI and WinAPI
|
||||
'''
|
||||
if salt.utils.is_windows() and has_required_packages:
|
||||
return __virtualname__
|
||||
return (False, 'Cannot load win_status module on non-windows')
|
||||
if not salt.utils.is_windows():
|
||||
return False, 'win_status.py: Requires Windows'
|
||||
|
||||
if not HAS_WMI:
|
||||
return False, 'win_status.py: Requires WMI and WinAPI'
|
||||
|
||||
# Namespace modules from `status.py`
|
||||
global ping_master, time_
|
||||
ping_master = _namespaced_function(ping_master, globals())
|
||||
time_ = _namespaced_function(time_, globals())
|
||||
|
||||
return __virtualname__
|
||||
|
||||
__func_alias__ = {
|
||||
'time_': 'time'
|
||||
}
|
||||
|
||||
|
||||
def cpuload():
|
||||
@ -69,17 +81,8 @@ def cpuload():
|
||||
'''
|
||||
|
||||
# Pull in the information from WMIC
|
||||
cmd = list2cmdline(['wmic', 'cpu'])
|
||||
info = __salt__['cmd.run'](cmd).split('\r\n')
|
||||
|
||||
# Find the location of LoadPercentage
|
||||
column = info[0].index('LoadPercentage')
|
||||
|
||||
# Get the end of the number.
|
||||
end = info[1].index(' ', column+1)
|
||||
|
||||
# Return pull it out of the informatin and cast it to an int
|
||||
return int(info[1][column:end])
|
||||
cmd = ['wmic', 'cpu', 'get', 'loadpercentage', '/value']
|
||||
return int(__salt__['cmd.run'](cmd).split('=')[1])
|
||||
|
||||
|
||||
def diskusage(human_readable=False, path=None):
|
||||
@ -203,18 +206,9 @@ def uptime(human_readable=False):
|
||||
'''
|
||||
|
||||
# Open up a subprocess to get information from WMIC
|
||||
cmd = list2cmdline(['wmic', 'os', 'get', 'lastbootuptime'])
|
||||
outs = __salt__['cmd.run'](cmd)
|
||||
cmd = ['wmic', 'os', 'get', 'lastbootuptime', '/value']
|
||||
startup_time = __salt__['cmd.run'](cmd).split('=')[1][:14]
|
||||
|
||||
# Get the line that has when the computer started in it:
|
||||
stats_line = ''
|
||||
# use second line from output
|
||||
stats_line = outs.split('\r\n')[1]
|
||||
|
||||
# Extract the time string from the line and parse
|
||||
#
|
||||
# Get string, just use the leading 14 characters
|
||||
startup_time = stats_line[:14]
|
||||
# Convert to time struct
|
||||
startup_time = time.strptime(startup_time, '%Y%m%d%H%M%S')
|
||||
# Convert to datetime object
|
||||
|
@ -224,7 +224,7 @@ def apiinfo_version(**connection_args):
|
||||
salt '*' zabbix.apiinfo_version
|
||||
'''
|
||||
conn_args = _login(**connection_args)
|
||||
|
||||
ret = False
|
||||
try:
|
||||
if conn_args:
|
||||
method = 'apiinfo.version'
|
||||
@ -264,6 +264,7 @@ def user_create(alias, passwd, usrgrps, **connection_args):
|
||||
salt '*' zabbix.user_create james password007 '[7, 12]' firstname='James Bond'
|
||||
'''
|
||||
conn_args = _login(**connection_args)
|
||||
ret = False
|
||||
try:
|
||||
if conn_args:
|
||||
method = 'user.create'
|
||||
@ -302,6 +303,7 @@ def user_delete(users, **connection_args):
|
||||
salt '*' zabbix.user_delete 15
|
||||
'''
|
||||
conn_args = _login(**connection_args)
|
||||
ret = False
|
||||
try:
|
||||
if conn_args:
|
||||
method = 'user.delete'
|
||||
@ -337,6 +339,7 @@ def user_exists(alias, **connection_args):
|
||||
salt '*' zabbix.user_exists james
|
||||
'''
|
||||
conn_args = _login(**connection_args)
|
||||
ret = False
|
||||
try:
|
||||
if conn_args:
|
||||
method = 'user.get'
|
||||
@ -346,7 +349,7 @@ def user_exists(alias, **connection_args):
|
||||
else:
|
||||
raise KeyError
|
||||
except KeyError:
|
||||
return False
|
||||
return ret
|
||||
|
||||
|
||||
def user_get(alias=None, userids=None, **connection_args):
|
||||
@ -369,6 +372,7 @@ def user_get(alias=None, userids=None, **connection_args):
|
||||
salt '*' zabbix.user_get james
|
||||
'''
|
||||
conn_args = _login(**connection_args)
|
||||
ret = False
|
||||
try:
|
||||
if conn_args:
|
||||
method = 'user.get'
|
||||
@ -385,7 +389,7 @@ def user_get(alias=None, userids=None, **connection_args):
|
||||
else:
|
||||
raise KeyError
|
||||
except KeyError:
|
||||
return False
|
||||
return ret
|
||||
|
||||
|
||||
def user_update(userid, **connection_args):
|
||||
@ -409,6 +413,7 @@ def user_update(userid, **connection_args):
|
||||
salt '*' zabbix.user_update 16 visible_name='James Brown'
|
||||
'''
|
||||
conn_args = _login(**connection_args)
|
||||
ret = False
|
||||
try:
|
||||
if conn_args:
|
||||
method = 'user.update'
|
||||
@ -444,6 +449,7 @@ def user_getmedia(userids=None, **connection_args):
|
||||
salt '*' zabbix.user_getmedia
|
||||
'''
|
||||
conn_args = _login(**connection_args)
|
||||
ret = False
|
||||
try:
|
||||
if conn_args:
|
||||
method = 'usermedia.get'
|
||||
@ -457,7 +463,7 @@ def user_getmedia(userids=None, **connection_args):
|
||||
else:
|
||||
raise KeyError
|
||||
except KeyError:
|
||||
return False
|
||||
return ret
|
||||
|
||||
|
||||
def user_addmedia(userids, active, mediatypeid, period, sendto, severity, **connection_args):
|
||||
@ -486,6 +492,7 @@ def user_addmedia(userids, active, mediatypeid, period, sendto, severity, **conn
|
||||
|
||||
'''
|
||||
conn_args = _login(**connection_args)
|
||||
ret = False
|
||||
try:
|
||||
if conn_args:
|
||||
method = 'user.addmedia'
|
||||
@ -526,6 +533,7 @@ def user_deletemedia(mediaids, **connection_args):
|
||||
salt '*' zabbix.user_deletemedia 27
|
||||
'''
|
||||
conn_args = _login(**connection_args)
|
||||
ret = False
|
||||
try:
|
||||
if conn_args:
|
||||
method = 'user.deletemedia'
|
||||
@ -559,6 +567,7 @@ def user_list(**connection_args):
|
||||
salt '*' zabbix.user_list
|
||||
'''
|
||||
conn_args = _login(**connection_args)
|
||||
ret = False
|
||||
try:
|
||||
if conn_args:
|
||||
method = 'user.get'
|
||||
@ -592,6 +601,7 @@ def usergroup_create(name, **connection_args):
|
||||
salt '*' zabbix.usergroup_create GroupName
|
||||
'''
|
||||
conn_args = _login(**connection_args)
|
||||
ret = False
|
||||
try:
|
||||
if conn_args:
|
||||
method = 'usergroup.create'
|
||||
@ -623,6 +633,7 @@ def usergroup_delete(usergroupids, **connection_args):
|
||||
salt '*' zabbix.usergroup_delete 28
|
||||
'''
|
||||
conn_args = _login(**connection_args)
|
||||
ret = False
|
||||
try:
|
||||
if conn_args:
|
||||
method = 'usergroup.delete'
|
||||
@ -660,6 +671,7 @@ def usergroup_exists(name=None, node=None, nodeids=None, **connection_args):
|
||||
'''
|
||||
conn_args = _login(**connection_args)
|
||||
zabbix_version = apiinfo_version(**connection_args)
|
||||
ret = False
|
||||
try:
|
||||
if conn_args:
|
||||
# usergroup.exists deprecated
|
||||
@ -688,7 +700,7 @@ def usergroup_exists(name=None, node=None, nodeids=None, **connection_args):
|
||||
else:
|
||||
raise KeyError
|
||||
except KeyError:
|
||||
return False
|
||||
return ret
|
||||
|
||||
|
||||
def usergroup_get(name=None, usrgrpids=None, userids=None, **connection_args):
|
||||
@ -714,6 +726,7 @@ def usergroup_get(name=None, usrgrpids=None, userids=None, **connection_args):
|
||||
salt '*' zabbix.usergroup_get Guests
|
||||
'''
|
||||
conn_args = _login(**connection_args)
|
||||
ret = False
|
||||
try:
|
||||
if conn_args:
|
||||
method = 'usergroup.get'
|
||||
@ -733,7 +746,7 @@ def usergroup_get(name=None, usrgrpids=None, userids=None, **connection_args):
|
||||
else:
|
||||
raise KeyError
|
||||
except KeyError:
|
||||
return False
|
||||
return ret
|
||||
|
||||
|
||||
def usergroup_update(usrgrpid, **connection_args):
|
||||
@ -757,6 +770,7 @@ def usergroup_update(usrgrpid, **connection_args):
|
||||
salt '*' zabbix.usergroup_update 8 name=guestsRenamed
|
||||
'''
|
||||
conn_args = _login(**connection_args)
|
||||
ret = False
|
||||
try:
|
||||
if conn_args:
|
||||
method = 'usergroup.update'
|
||||
@ -788,6 +802,7 @@ def usergroup_list(**connection_args):
|
||||
salt '*' zabbix.usergroup_list
|
||||
'''
|
||||
conn_args = _login(**connection_args)
|
||||
ret = False
|
||||
try:
|
||||
if conn_args:
|
||||
method = 'usergroup.get'
|
||||
@ -797,7 +812,7 @@ def usergroup_list(**connection_args):
|
||||
else:
|
||||
raise KeyError
|
||||
except KeyError:
|
||||
return False
|
||||
return ret
|
||||
|
||||
|
||||
def host_create(host, groups, interfaces, **connection_args):
|
||||
@ -828,6 +843,7 @@ def host_create(host, groups, interfaces, **connection_args):
|
||||
visible_name='Host Visible Name'
|
||||
'''
|
||||
conn_args = _login(**connection_args)
|
||||
ret = False
|
||||
try:
|
||||
if conn_args:
|
||||
method = 'host.create'
|
||||
@ -871,6 +887,7 @@ def host_delete(hostids, **connection_args):
|
||||
salt '*' zabbix.host_delete 10106
|
||||
'''
|
||||
conn_args = _login(**connection_args)
|
||||
ret = False
|
||||
try:
|
||||
if conn_args:
|
||||
method = 'host.delete'
|
||||
@ -910,7 +927,7 @@ def host_exists(host=None, hostid=None, name=None, node=None, nodeids=None, **co
|
||||
'''
|
||||
conn_args = _login(**connection_args)
|
||||
zabbix_version = apiinfo_version(**connection_args)
|
||||
|
||||
ret = False
|
||||
try:
|
||||
if conn_args:
|
||||
# hostgroup.exists deprecated
|
||||
@ -948,7 +965,7 @@ def host_exists(host=None, hostid=None, name=None, node=None, nodeids=None, **co
|
||||
else:
|
||||
raise KeyError
|
||||
except KeyError:
|
||||
return False
|
||||
return ret
|
||||
|
||||
|
||||
def host_get(host=None, name=None, hostids=None, **connection_args):
|
||||
@ -975,6 +992,7 @@ def host_get(host=None, name=None, hostids=None, **connection_args):
|
||||
salt '*' zabbix.host_get 'Zabbix server'
|
||||
'''
|
||||
conn_args = _login(**connection_args)
|
||||
ret = False
|
||||
try:
|
||||
if conn_args:
|
||||
method = 'host.get'
|
||||
@ -993,7 +1011,7 @@ def host_get(host=None, name=None, hostids=None, **connection_args):
|
||||
else:
|
||||
raise KeyError
|
||||
except KeyError:
|
||||
return False
|
||||
return ret
|
||||
|
||||
|
||||
def host_update(hostid, **connection_args):
|
||||
@ -1020,6 +1038,7 @@ def host_update(hostid, **connection_args):
|
||||
salt '*' zabbix.host_update 10084 name='Zabbix server2'
|
||||
'''
|
||||
conn_args = _login(**connection_args)
|
||||
ret = False
|
||||
try:
|
||||
if conn_args:
|
||||
method = 'host.update'
|
||||
@ -1051,6 +1070,7 @@ def host_list(**connection_args):
|
||||
salt '*' zabbix.host_list
|
||||
'''
|
||||
conn_args = _login(**connection_args)
|
||||
ret = False
|
||||
try:
|
||||
if conn_args:
|
||||
method = 'host.get'
|
||||
@ -1060,7 +1080,7 @@ def host_list(**connection_args):
|
||||
else:
|
||||
raise KeyError
|
||||
except KeyError:
|
||||
return False
|
||||
return ret
|
||||
|
||||
|
||||
def hostgroup_create(name, **connection_args):
|
||||
@ -1084,6 +1104,7 @@ def hostgroup_create(name, **connection_args):
|
||||
salt '*' zabbix.hostgroup_create MyNewGroup
|
||||
'''
|
||||
conn_args = _login(**connection_args)
|
||||
ret = False
|
||||
try:
|
||||
if conn_args:
|
||||
method = 'hostgroup.create'
|
||||
@ -1116,6 +1137,7 @@ def hostgroup_delete(hostgroupids, **connection_args):
|
||||
salt '*' zabbix.hostgroup_delete 23
|
||||
'''
|
||||
conn_args = _login(**connection_args)
|
||||
ret = False
|
||||
try:
|
||||
if conn_args:
|
||||
method = 'hostgroup.delete'
|
||||
@ -1154,6 +1176,7 @@ def hostgroup_exists(name=None, groupid=None, node=None, nodeids=None, **connect
|
||||
'''
|
||||
conn_args = _login(**connection_args)
|
||||
zabbix_version = apiinfo_version(**connection_args)
|
||||
ret = False
|
||||
try:
|
||||
if conn_args:
|
||||
# hostgroup.exists deprecated
|
||||
@ -1187,7 +1210,7 @@ def hostgroup_exists(name=None, groupid=None, node=None, nodeids=None, **connect
|
||||
else:
|
||||
raise KeyError
|
||||
except KeyError:
|
||||
return False
|
||||
return ret
|
||||
|
||||
|
||||
def hostgroup_get(name=None, groupids=None, hostids=None, **connection_args):
|
||||
@ -1216,6 +1239,7 @@ def hostgroup_get(name=None, groupids=None, hostids=None, **connection_args):
|
||||
salt '*' zabbix.hostgroup_get MyNewGroup
|
||||
'''
|
||||
conn_args = _login(**connection_args)
|
||||
ret = False
|
||||
try:
|
||||
if conn_args:
|
||||
method = 'hostgroup.get'
|
||||
@ -1235,7 +1259,7 @@ def hostgroup_get(name=None, groupids=None, hostids=None, **connection_args):
|
||||
else:
|
||||
raise KeyError
|
||||
except KeyError:
|
||||
return False
|
||||
return ret
|
||||
|
||||
|
||||
def hostgroup_update(groupid, name=None, **connection_args):
|
||||
@ -1260,6 +1284,7 @@ def hostgroup_update(groupid, name=None, **connection_args):
|
||||
salt '*' zabbix.hostgroup_update 24 name='Renamed Name'
|
||||
'''
|
||||
conn_args = _login(**connection_args)
|
||||
ret = False
|
||||
try:
|
||||
if conn_args:
|
||||
method = 'hostgroup.update'
|
||||
@ -1293,6 +1318,7 @@ def hostgroup_list(**connection_args):
|
||||
salt '*' zabbix.hostgroup_list
|
||||
'''
|
||||
conn_args = _login(**connection_args)
|
||||
ret = False
|
||||
try:
|
||||
if conn_args:
|
||||
method = 'hostgroup.get'
|
||||
@ -1302,7 +1328,7 @@ def hostgroup_list(**connection_args):
|
||||
else:
|
||||
raise KeyError
|
||||
except KeyError:
|
||||
return False
|
||||
return ret
|
||||
|
||||
|
||||
def hostinterface_get(hostids, **connection_args):
|
||||
@ -1326,6 +1352,7 @@ def hostinterface_get(hostids, **connection_args):
|
||||
salt '*' zabbix.hostinterface_get 101054
|
||||
'''
|
||||
conn_args = _login(**connection_args)
|
||||
ret = False
|
||||
try:
|
||||
if conn_args:
|
||||
method = 'hostinterface.get'
|
||||
@ -1338,7 +1365,7 @@ def hostinterface_get(hostids, **connection_args):
|
||||
else:
|
||||
raise KeyError
|
||||
except KeyError:
|
||||
return False
|
||||
return ret
|
||||
|
||||
|
||||
def hostinterface_create(hostid, ip, dns='', main=1, type=1, useip=1, port=None, **connection_args):
|
||||
@ -1369,6 +1396,7 @@ def hostinterface_create(hostid, ip, dns='', main=1, type=1, useip=1, port=None,
|
||||
salt '*' zabbix.hostinterface_create 10105 192.193.194.197
|
||||
'''
|
||||
conn_args = _login(**connection_args)
|
||||
ret = False
|
||||
|
||||
if not port:
|
||||
port = INTERFACE_DEFAULT_PORTS[type]
|
||||
@ -1405,6 +1433,7 @@ def hostinterface_delete(interfaceids, **connection_args):
|
||||
salt '*' zabbix.hostinterface_delete 50
|
||||
'''
|
||||
conn_args = _login(**connection_args)
|
||||
ret = False
|
||||
try:
|
||||
if conn_args:
|
||||
method = 'hostinterface.delete'
|
||||
@ -1441,6 +1470,7 @@ def hostinterface_update(interfaceid, **connection_args):
|
||||
salt '*' zabbix.hostinterface_update 6 ip=0.0.0.2
|
||||
'''
|
||||
conn_args = _login(**connection_args)
|
||||
ret = False
|
||||
try:
|
||||
if conn_args:
|
||||
method = 'hostinterface.update'
|
||||
@ -1482,6 +1512,7 @@ def template_get(name=None, host=None, templateids=None, **connection_args):
|
||||
salt '*' zabbix.template_get templateids="['10050', '10001']"
|
||||
'''
|
||||
conn_args = _login(**connection_args)
|
||||
ret = False
|
||||
try:
|
||||
if conn_args:
|
||||
method = 'template.get'
|
||||
@ -1498,7 +1529,7 @@ def template_get(name=None, host=None, templateids=None, **connection_args):
|
||||
else:
|
||||
raise KeyError
|
||||
except KeyError:
|
||||
return False
|
||||
return ret
|
||||
|
||||
|
||||
def run_query(method, params, **connection_args):
|
||||
|
@ -125,7 +125,7 @@ class NetapiClient(object):
|
||||
disable_custom_roster=True)
|
||||
return ssh_client.cmd_sync(kwargs)
|
||||
|
||||
def runner(self, fun, timeout=None, **kwargs):
|
||||
def runner(self, fun, timeout=None, full_return=False, **kwargs):
|
||||
'''
|
||||
Run `runner modules <all-salt.runners>` synchronously
|
||||
|
||||
@ -138,7 +138,7 @@ class NetapiClient(object):
|
||||
'''
|
||||
kwargs['fun'] = fun
|
||||
runner = salt.runner.RunnerClient(self.opts)
|
||||
return runner.cmd_sync(kwargs, timeout=timeout)
|
||||
return runner.cmd_sync(kwargs, timeout=timeout, full_return=full_return)
|
||||
|
||||
def runner_async(self, fun, **kwargs):
|
||||
'''
|
||||
|
@ -36,26 +36,14 @@ def __virtual__():
|
||||
return False
|
||||
|
||||
|
||||
def start():
|
||||
'''
|
||||
Start the saltnado!
|
||||
'''
|
||||
def get_application(opts):
|
||||
try:
|
||||
from . import saltnado
|
||||
except ImportError as err:
|
||||
logger.error('ImportError! {0}'.format(str(err)))
|
||||
return None
|
||||
|
||||
mod_opts = __opts__.get(__virtualname__, {})
|
||||
|
||||
if 'num_processes' not in mod_opts:
|
||||
mod_opts['num_processes'] = 1
|
||||
|
||||
if mod_opts['num_processes'] > 1 and mod_opts.get('debug', False) is True:
|
||||
raise Exception((
|
||||
'Tornado\'s debug implementation is not compatible with multiprocess. '
|
||||
'Either disable debug, or set num_processes to 1.'
|
||||
))
|
||||
mod_opts = opts.get(__virtualname__, {})
|
||||
|
||||
paths = [
|
||||
(r"/", saltnado.SaltAPIHandler),
|
||||
@ -73,7 +61,7 @@ def start():
|
||||
if mod_opts.get('websockets', False):
|
||||
from . import saltnado_websockets
|
||||
|
||||
token_pattern = r"([0-9A-Fa-f]{{{0}}})".format(len(getattr(hashlib, __opts__.get('hash_type', 'md5'))().hexdigest()))
|
||||
token_pattern = r"([0-9A-Fa-f]{{{0}}})".format(len(getattr(hashlib, opts.get('hash_type', 'md5'))().hexdigest()))
|
||||
all_events_pattern = r"/all_events/{0}".format(token_pattern)
|
||||
formatted_events_pattern = r"/formatted_events/{0}".format(token_pattern)
|
||||
logger.debug("All events URL pattern is {0}".format(all_events_pattern))
|
||||
@ -89,9 +77,26 @@ def start():
|
||||
|
||||
application = tornado.web.Application(paths, debug=mod_opts.get('debug', False))
|
||||
|
||||
application.opts = __opts__
|
||||
application.opts = opts
|
||||
application.mod_opts = mod_opts
|
||||
application.auth = salt.auth.LoadAuth(__opts__)
|
||||
application.auth = salt.auth.LoadAuth(opts)
|
||||
return application
|
||||
|
||||
|
||||
def start():
|
||||
'''
|
||||
Start the saltnado!
|
||||
'''
|
||||
mod_opts = __opts__.get(__virtualname__, {})
|
||||
|
||||
if 'num_processes' not in mod_opts:
|
||||
mod_opts['num_processes'] = 1
|
||||
|
||||
if mod_opts['num_processes'] > 1 and mod_opts.get('debug', False) is True:
|
||||
raise Exception((
|
||||
'Tornado\'s debug implementation is not compatible with multiprocess. '
|
||||
'Either disable debug, or set num_processes to 1.'
|
||||
))
|
||||
|
||||
# the kwargs for the HTTPServer
|
||||
kwargs = {}
|
||||
@ -109,7 +114,7 @@ def start():
|
||||
ssl_opts.update({'keyfile': mod_opts['ssl_key']})
|
||||
kwargs['ssl_options'] = ssl_opts
|
||||
|
||||
http_server = tornado.httpserver.HTTPServer(application, **kwargs)
|
||||
http_server = tornado.httpserver.HTTPServer(get_application(__opts__), **kwargs)
|
||||
try:
|
||||
http_server.bind(mod_opts['port'],
|
||||
address=mod_opts.get('address'),
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user