mirror of
https://github.com/valitydev/salt.git
synced 2024-11-09 01:36:48 +00:00
Merge branch 'develop' into zmq_ioflo
Conflicts: salt/config.py
This commit is contained in:
commit
96c84e697b
6
.gitignore
vendored
6
.gitignore
vendored
@ -24,6 +24,8 @@ lib64
|
|||||||
pip/
|
pip/
|
||||||
share/
|
share/
|
||||||
tests/integration/tmp/
|
tests/integration/tmp/
|
||||||
|
tests/cachedir/
|
||||||
|
tests/unit/templates/roots/
|
||||||
|
|
||||||
# tox - ignore any tox-created virtualenv dirs
|
# tox - ignore any tox-created virtualenv dirs
|
||||||
.tox
|
.tox
|
||||||
@ -64,3 +66,7 @@ _version.py
|
|||||||
|
|
||||||
# Ignore grains file written out during tests
|
# Ignore grains file written out during tests
|
||||||
tests/integration/files/conf/grains
|
tests/integration/files/conf/grains
|
||||||
|
/salt/_syspaths.py
|
||||||
|
|
||||||
|
# ignore the local root
|
||||||
|
/root/**
|
||||||
|
@ -194,7 +194,7 @@ dummy-variables-rgx=_|dummy
|
|||||||
|
|
||||||
# List of additional names supposed to be defined in builtins. Remember that
|
# List of additional names supposed to be defined in builtins. Remember that
|
||||||
# you should avoid to define new builtins when possible.
|
# you should avoid to define new builtins when possible.
|
||||||
additional-builtins=__opts__,__salt__,__pillar__,__grains__,__context__,__ret__,__env__,__low__,__states__,__lowstate__,__running__,__active_provider_name__,__master_opts__,__progress__
|
additional-builtins=__opts__,__salt__,__pillar__,__grains__,__context__,__ret__,__env__,__low__,__states__,__lowstate__,__running__,__active_provider_name__,__master_opts__,__jid_event__
|
||||||
|
|
||||||
|
|
||||||
[SIMILARITIES]
|
[SIMILARITIES]
|
||||||
|
@ -154,7 +154,7 @@ indent-string=' '
|
|||||||
|
|
||||||
# List of additional names supposed to be defined in builtins. Remember that
|
# List of additional names supposed to be defined in builtins. Remember that
|
||||||
# you should avoid to define new builtins when possible.
|
# you should avoid to define new builtins when possible.
|
||||||
additional-builtins=__opts__,__salt__,__pillar__,__grains__,__context__,__ret__,__env__,__low__,__states__,__lowstate__,__running__,__active_provider_name__,__master_opts__,__progress__
|
additional-builtins=__opts__,__salt__,__pillar__,__grains__,__context__,__ret__,__env__,__low__,__states__,__lowstate__,__running__,__active_provider_name__,__master_opts__,__jid_event__
|
||||||
|
|
||||||
|
|
||||||
[IMPORTS]
|
[IMPORTS]
|
||||||
|
@ -16,8 +16,8 @@ before_install:
|
|||||||
- pip install git+https://github.com/saltstack/salt-testing.git#egg=SaltTesting
|
- pip install git+https://github.com/saltstack/salt-testing.git#egg=SaltTesting
|
||||||
|
|
||||||
install:
|
install:
|
||||||
- pip install -r zeromq-requirements.txt -r cloud-requirements.txt
|
- pip install -r requirements/zeromq.txt -r requirements/cloud.txt
|
||||||
- pip install --allow-all-external -r opt_requirements.txt
|
- pip install --allow-all-external -r requirements/opt.txt
|
||||||
|
|
||||||
before_script:
|
before_script:
|
||||||
- "/home/travis/virtualenv/python${TRAVIS_PYTHON_VERSION}/bin/pylint --rcfile=.testing.pylintrc salt/ && echo 'Finished Pylint Check Cleanly' || echo 'Finished Pylint Check With Errors'"
|
- "/home/travis/virtualenv/python${TRAVIS_PYTHON_VERSION}/bin/pylint --rcfile=.testing.pylintrc salt/ && echo 'Finished Pylint Check Cleanly' || echo 'Finished Pylint Check With Errors'"
|
||||||
|
1
AUTHORS
1
AUTHORS
@ -91,6 +91,7 @@ Robert Fielding
|
|||||||
Sean Channel <pentabular@gmail.com>
|
Sean Channel <pentabular@gmail.com>
|
||||||
Seth House <seth@eseth.com>
|
Seth House <seth@eseth.com>
|
||||||
Seth Vidal <skvidal@fedoraproject.org>
|
Seth Vidal <skvidal@fedoraproject.org>
|
||||||
|
Stas Alekseev <stas.alekseev@gmail.com>
|
||||||
Thomas Schreiber <tom@rizumu.us>
|
Thomas Schreiber <tom@rizumu.us>
|
||||||
Thomas S Hatch <thatch45@gmail.com>
|
Thomas S Hatch <thatch45@gmail.com>
|
||||||
Tor Hveem <xt@bash.no>
|
Tor Hveem <xt@bash.no>
|
||||||
|
@ -163,7 +163,7 @@ ZeroMQ Transport:
|
|||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
pip install -r zeromq-requirements.txt
|
pip install -r requirements/zeromq.txt
|
||||||
pip install psutil
|
pip install psutil
|
||||||
pip install -e .
|
pip install -e .
|
||||||
|
|
||||||
@ -180,7 +180,7 @@ RAET Transport:
|
|||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
pip install -r raet-requirements.txt
|
pip install -r requirements/raet.txt
|
||||||
pip install psutil
|
pip install psutil
|
||||||
pip install -e .
|
pip install -e .
|
||||||
|
|
||||||
@ -279,9 +279,9 @@ If it is less than 2047, you should increase it with::
|
|||||||
Running the tests
|
Running the tests
|
||||||
~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
For running tests, you'll also need to install ``dev_requirements_python2x.txt``::
|
For running tests, you'll also need to install ``requirements/dev_python2x.txt``::
|
||||||
|
|
||||||
pip install -r dev_requirements_python2x.txt
|
pip install -r requirements/dev_python2x.txt
|
||||||
|
|
||||||
Finally you use setup.py to run the tests with the following command::
|
Finally you use setup.py to run the tests with the following command::
|
||||||
|
|
||||||
|
@ -2,10 +2,10 @@ include AUTHORS
|
|||||||
include HACKING.rst
|
include HACKING.rst
|
||||||
include LICENSE
|
include LICENSE
|
||||||
include README.rst
|
include README.rst
|
||||||
include _requirements.txt
|
include requirements/base.txt
|
||||||
include raet-requirements.txt
|
include requirements/raet.txt
|
||||||
include cloud-requirements.txt
|
include requirements/cloud.txt
|
||||||
include zeromq-requirements.txt
|
include requirements/zeromq.txt
|
||||||
include tests/*.py
|
include tests/*.py
|
||||||
recursive-include tests *
|
recursive-include tests *
|
||||||
include tests/integration/modules/files/*
|
include tests/integration/modules/files/*
|
||||||
|
@ -13,7 +13,7 @@ Documentation
|
|||||||
=============
|
=============
|
||||||
|
|
||||||
Installation instructions, getting started guides, and in-depth API
|
Installation instructions, getting started guides, and in-depth API
|
||||||
documention.
|
documentation.
|
||||||
|
|
||||||
http://docs.saltstack.com
|
http://docs.saltstack.com
|
||||||
|
|
||||||
|
@ -645,6 +645,9 @@
|
|||||||
|
|
||||||
# The format of the console logging messages. Allowed formatting options can
|
# The format of the console logging messages. Allowed formatting options can
|
||||||
# be seen here: http://docs.python.org/library/logging.html#logrecord-attributes
|
# be seen here: http://docs.python.org/library/logging.html#logrecord-attributes
|
||||||
|
#
|
||||||
|
# Console log colors are specified by these special log formatters:
|
||||||
|
# %(colorlevel), %(colorname), %(colorprocess), %(colormsg).
|
||||||
#log_fmt_console: '[%(levelname)-8s] %(message)s'
|
#log_fmt_console: '[%(levelname)-8s] %(message)s'
|
||||||
#log_fmt_logfile: '%(asctime)s,%(msecs)03.0f [%(name)-17s][%(levelname)-8s] %(message)s'
|
#log_fmt_logfile: '%(asctime)s,%(msecs)03.0f [%(name)-17s][%(levelname)-8s] %(message)s'
|
||||||
|
|
||||||
|
@ -460,6 +460,10 @@
|
|||||||
# states is cluttering the logs. Set it to True to ignore them.
|
# states is cluttering the logs. Set it to True to ignore them.
|
||||||
#state_output_diff: False
|
#state_output_diff: False
|
||||||
|
|
||||||
|
# The state_output_profile setting changes whether profile information
|
||||||
|
# will be shown for each state run.
|
||||||
|
#state_output_profile: True
|
||||||
|
|
||||||
# Fingerprint of the master public key to double verify the master is valid,
|
# Fingerprint of the master public key to double verify the master is valid,
|
||||||
# the master fingerprint can be found by running "salt-key -F master" on the
|
# the master fingerprint can be found by running "salt-key -F master" on the
|
||||||
# salt master.
|
# salt master.
|
||||||
@ -505,6 +509,9 @@
|
|||||||
|
|
||||||
# The format of the console logging messages. Allowed formatting options can
|
# The format of the console logging messages. Allowed formatting options can
|
||||||
# be seen here: http://docs.python.org/library/logging.html#logrecord-attributes
|
# be seen here: http://docs.python.org/library/logging.html#logrecord-attributes
|
||||||
|
#
|
||||||
|
# Console log colors are specified by these special log formatters:
|
||||||
|
# %(colorlevel), %(colorname), %(colorprocess), %(colormsg).
|
||||||
#log_fmt_console: '[%(levelname)-8s] %(message)s'
|
#log_fmt_console: '[%(levelname)-8s] %(message)s'
|
||||||
#log_fmt_logfile: '%(asctime)s,%(msecs)03.0f [%(name)-17s][%(levelname)-8s] %(message)s'
|
#log_fmt_logfile: '%(asctime)s,%(msecs)03.0f [%(name)-17s][%(levelname)-8s] %(message)s'
|
||||||
|
|
||||||
|
@ -1,3 +0,0 @@
|
|||||||
-r dev_requirements_python27.txt
|
|
||||||
|
|
||||||
unittest2
|
|
@ -26,6 +26,17 @@
|
|||||||
|
|
||||||
The \texttt{top.sls} file is used to map what SLS modules get loaded onto what minions via the state system.\\
|
The \texttt{top.sls} file is used to map what SLS modules get loaded onto what minions via the state system.\\
|
||||||
|
|
||||||
|
It is located in the file defined in the \texttt{file_roots} variable of the
|
||||||
|
salt master configuration file which is found in
|
||||||
|
\texttt{CONFIG_DIR}/master.
|
||||||
|
|
||||||
|
The file roots is defined like this by default.
|
||||||
|
\begin{verbatim}
|
||||||
|
file_roots:
|
||||||
|
base:
|
||||||
|
- /srv/salt
|
||||||
|
\end{verbatim}
|
||||||
|
|
||||||
Here is an example \texttt{top.sls} file which uses \texttt{pkg}, \texttt{file} and \texttt{service} states:
|
Here is an example \texttt{top.sls} file which uses \texttt{pkg}, \texttt{file} and \texttt{service} states:
|
||||||
|
|
||||||
\begin{verbatim}
|
\begin{verbatim}
|
||||||
|
@ -155,6 +155,8 @@ version = salt.version.__version__
|
|||||||
#release = '.'.join(map(str, salt.version.__version_info__))
|
#release = '.'.join(map(str, salt.version.__version_info__))
|
||||||
release = '2014.7.0'
|
release = '2014.7.0'
|
||||||
|
|
||||||
|
needs_sphinx = '1.3'
|
||||||
|
|
||||||
spelling_lang = 'en_US'
|
spelling_lang = 'en_US'
|
||||||
language = 'en'
|
language = 'en'
|
||||||
locale_dirs = [
|
locale_dirs = [
|
||||||
@ -201,7 +203,7 @@ extlinks = {
|
|||||||
'blob': ('https://github.com/saltstack/salt/blob/%s/%%s' % 'develop', None),
|
'blob': ('https://github.com/saltstack/salt/blob/%s/%%s' % 'develop', None),
|
||||||
'download': ('https://cloud.github.com/downloads/saltstack/salt/%s', None),
|
'download': ('https://cloud.github.com/downloads/saltstack/salt/%s', None),
|
||||||
'issue': ('https://github.com/saltstack/salt/issues/%s', 'issue '),
|
'issue': ('https://github.com/saltstack/salt/issues/%s', 'issue '),
|
||||||
'formula': ('https://github.com/saltstack-formulas/%s', ''),
|
'formula_url': ('https://github.com/saltstack-formulas/%s', ''),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -214,7 +216,7 @@ gettext_compact = False
|
|||||||
### HTML options
|
### HTML options
|
||||||
html_theme = 'saltstack'
|
html_theme = 'saltstack'
|
||||||
html_theme_path = ['_themes']
|
html_theme_path = ['_themes']
|
||||||
html_title = None
|
html_title = u''
|
||||||
html_short_title = 'Salt'
|
html_short_title = 'Salt'
|
||||||
|
|
||||||
html_static_path = ['_static']
|
html_static_path = ['_static']
|
||||||
|
@ -67,6 +67,9 @@ States - Configuration Management with Salt:
|
|||||||
Masterless Quickstart:
|
Masterless Quickstart:
|
||||||
:doc:`Salt Quickstart </topics/tutorials/quickstart>`
|
:doc:`Salt Quickstart </topics/tutorials/quickstart>`
|
||||||
|
|
||||||
|
Running Salt without root access in userland:
|
||||||
|
- :doc:`Salt Usermode <topics/tutorials/rooted.rst>`
|
||||||
|
|
||||||
A list of all tutorials can be found here:
|
A list of all tutorials can be found here:
|
||||||
:doc:`All Salt tutorials <topics/tutorials/index>`
|
:doc:`All Salt tutorials <topics/tutorials/index>`
|
||||||
|
|
||||||
|
@ -19970,8 +19970,8 @@ the lines below, depending on the relevant Python version:
|
|||||||
.sp
|
.sp
|
||||||
.nf
|
.nf
|
||||||
.ft C
|
.ft C
|
||||||
pip install \-r dev_requirements_python26.txt
|
pip install \-r requirements/dev_python26.txt
|
||||||
pip install \-r dev_requirements_python27.txt
|
pip install \-r requirements/dev_python27.txt
|
||||||
.ft P
|
.ft P
|
||||||
.fi
|
.fi
|
||||||
.UNINDENT
|
.UNINDENT
|
||||||
|
@ -31,6 +31,81 @@ invoked with the following command:
|
|||||||
|
|
||||||
# salt-run test.foo
|
# salt-run test.foo
|
||||||
|
|
||||||
|
Runners have several options for controlling output.
|
||||||
|
|
||||||
|
Any ``print`` statement in a runner is automatically also
|
||||||
|
fired onto the master event bus where. For example:
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
def a_runner(outputter=None, display_progress=False):
|
||||||
|
print('Hello world')
|
||||||
|
...
|
||||||
|
|
||||||
|
The above would result in an event fired as follows:
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
Event fired at Tue Jan 13 15:26:45 2015
|
||||||
|
*************************
|
||||||
|
Tag: salt/run/20150113152644070246/print
|
||||||
|
Data:
|
||||||
|
{'_stamp': '2015-01-13T15:26:45.078707',
|
||||||
|
'data': 'hello',
|
||||||
|
'outputter': 'pprint'}
|
||||||
|
|
||||||
|
|
||||||
|
A runner may also send a progress event, which is displayed to the user during
|
||||||
|
runner execution and is also passed across the event bus if the ``display_progress``
|
||||||
|
argument to a runner is set to True.
|
||||||
|
|
||||||
|
A custom runner may send its own progress event by using the
|
||||||
|
``__jid_event_.fire_event()`` method as shown here:
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
if display_progress:
|
||||||
|
__jid_event__.fire_event({'message': 'A progress message', 'progress')
|
||||||
|
|
||||||
|
The above would produce output on the console reading: ``A progress message``
|
||||||
|
as well as an event on the event similar to:
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
Event fired at Tue Jan 13 15:21:20 2015
|
||||||
|
*************************
|
||||||
|
Tag: salt/run/20150113152118341421/progress
|
||||||
|
Data:
|
||||||
|
{'_stamp': '2015-01-13T15:21:20.390053',
|
||||||
|
'message': "A progress message"}
|
||||||
|
|
||||||
|
A runner could use the same approach to send an event with a customized tag
|
||||||
|
onto the event bus by replacing the second argument (``progress``) with
|
||||||
|
whatever tag is desired. However, this will not be shown on the command-line
|
||||||
|
and will only be fired onto the event bus.
|
||||||
|
|
||||||
|
Synchronous vs. Asynchronous
|
||||||
|
----------------------------
|
||||||
|
|
||||||
|
A runner may be fired asychronously which will immediately return control. In
|
||||||
|
this case, no output will be display to the user if ``salt-run`` is being used
|
||||||
|
from the command-line. If used programatically, no results will be returned.
|
||||||
|
If results are desired, they must be gathered either by firing events on the
|
||||||
|
bus from the runner and then watching for them or by some other means.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
When running a runner in asyncronous mode, the ``--progress`` flag will
|
||||||
|
not deliver output to the salt-run CLI. However, progress events will
|
||||||
|
still be fired on the bus.
|
||||||
|
|
||||||
|
In synchronous mode, which is the default, control will not be returned until
|
||||||
|
the runner has finished executing.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
To add custom runners, put them in a directory and add it to
|
To add custom runners, put them in a directory and add it to
|
||||||
:conf_master:`runner_dirs` in the master configuration file.
|
:conf_master:`runner_dirs` in the master configuration file.
|
||||||
|
|
||||||
|
@ -27,7 +27,7 @@ directory structure. A proper directory structure clearly defines the
|
|||||||
functionality of each state to the user via visual inspection of the state's
|
functionality of each state to the user via visual inspection of the state's
|
||||||
name.
|
name.
|
||||||
|
|
||||||
Reviewing the :formula:`MySQL Salt Formula <mysql-formula>`
|
Reviewing the :formula_url:`MySQL Salt Formula <mysql-formula>`
|
||||||
it is clear to see the benefits to the end-user when reviewing a sample of the
|
it is clear to see the benefits to the end-user when reviewing a sample of the
|
||||||
available states:
|
available states:
|
||||||
|
|
||||||
@ -54,7 +54,7 @@ file in the following way:
|
|||||||
This clear definition ensures that the user is properly informed of what each
|
This clear definition ensures that the user is properly informed of what each
|
||||||
state will do.
|
state will do.
|
||||||
|
|
||||||
Another example comes from the :formula:`vim-formula`:
|
Another example comes from the :formula_url:`vim-formula`:
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
|
@ -108,3 +108,24 @@ command:
|
|||||||
public:
|
public:
|
||||||
True
|
True
|
||||||
...SNIP...
|
...SNIP...
|
||||||
|
|
||||||
|
|
||||||
|
SmartDataCenter
|
||||||
|
===============
|
||||||
|
This driver can also be used with the Joyent SmartDataCenter project. More
|
||||||
|
details can be found at:
|
||||||
|
|
||||||
|
.. _`SmartDataCenter`: https://github.com/joyent/sdc
|
||||||
|
|
||||||
|
This requires that an api_host_suffix is set. The default value for this is
|
||||||
|
`.api.joyentcloud.com`. All characters, including the leading `.`, should be
|
||||||
|
included:
|
||||||
|
|
||||||
|
.. code-block:: yaml
|
||||||
|
|
||||||
|
my-joyent-config:
|
||||||
|
provider: joyent
|
||||||
|
user: fred
|
||||||
|
password: saltybacon
|
||||||
|
private_key: /root/joyent.pem
|
||||||
|
api_host_suffix: .api.myhostname.com
|
||||||
|
@ -170,3 +170,33 @@ configuration please add:
|
|||||||
size: 512 MB Standard
|
size: 512 MB Standard
|
||||||
image: FreeBSD 9.0
|
image: FreeBSD 9.0
|
||||||
force_first_gen: True
|
force_first_gen: True
|
||||||
|
|
||||||
|
Private Subnets
|
||||||
|
--------------------------------
|
||||||
|
By default salt-cloud will not add Rackspace private networks to new servers. To enable
|
||||||
|
a private network to a server instantiated by salt cloud, add the following section
|
||||||
|
to the provider file (typically ``/etc/salt/cloud.providers.d/rackspace.conf``)
|
||||||
|
|
||||||
|
.. code-block:: yaml
|
||||||
|
|
||||||
|
networks:
|
||||||
|
- fixed:
|
||||||
|
# This is the private network
|
||||||
|
- private-network-id
|
||||||
|
# This is Rackspace's "PublicNet"
|
||||||
|
- 00000000-0000-0000-0000-000000000000
|
||||||
|
# This is Rackspace's "ServiceNet"
|
||||||
|
- 11111111-1111-1111-1111-111111111111
|
||||||
|
|
||||||
|
To get the Rackspace private network ID, go to Networking, Networks and hover over the private network name.
|
||||||
|
|
||||||
|
The order of the networks in the above code block does not map to the order of the
|
||||||
|
ethernet devices on newly created servers. Public IP will always be first ( eth0 )
|
||||||
|
followed by servicenet ( eth1 ) and then private networks.
|
||||||
|
|
||||||
|
Enabling the private network per above gives the option of using the private subnet for
|
||||||
|
all master-minion communication, including the bootstrap install of salt-minion. To
|
||||||
|
enable the minion to use the private subnet, update the master: line in the minion:
|
||||||
|
section of the providers file. To configure the master to only listen on the private
|
||||||
|
subnet IP, update the interface: line in the /etc/salt/master file to be the private
|
||||||
|
subnet IP of the salt master.
|
||||||
|
@ -16,7 +16,7 @@ https://github.com/saltstack-formulas
|
|||||||
|
|
||||||
As a simple example, to install the popular Apache web server (using the normal
|
As a simple example, to install the popular Apache web server (using the normal
|
||||||
defaults for the underlying distro) simply include the
|
defaults for the underlying distro) simply include the
|
||||||
:formula:`apache-formula` from a top file:
|
:formula_url:`apache-formula` from a top file:
|
||||||
|
|
||||||
.. code-block:: yaml
|
.. code-block:: yaml
|
||||||
|
|
||||||
@ -107,7 +107,7 @@ Formula may be included in an existing ``sls`` file. This is often useful when
|
|||||||
a state you are writing needs to ``require`` or ``extend`` a state defined in
|
a state you are writing needs to ``require`` or ``extend`` a state defined in
|
||||||
the formula.
|
the formula.
|
||||||
|
|
||||||
Here is an example of a state that uses the :formula:`epel-formula` in a
|
Here is an example of a state that uses the :formula_url:`epel-formula` in a
|
||||||
``require`` declaration which directs Salt to not install the ``python26``
|
``require`` declaration which directs Salt to not install the ``python26``
|
||||||
package until after the EPEL repository has also been installed:
|
package until after the EPEL repository has also been installed:
|
||||||
|
|
||||||
@ -129,7 +129,7 @@ referenced from other state files. It is usually cleanest to include these
|
|||||||
Formula directly from a Top File.
|
Formula directly from a Top File.
|
||||||
|
|
||||||
For example the easiest way to set up an OpenStack deployment on a single
|
For example the easiest way to set up an OpenStack deployment on a single
|
||||||
machine is to include the :formula:`openstack-standalone-formula` directly from
|
machine is to include the :formula_url:`openstack-standalone-formula` directly from
|
||||||
a :file:`top.sls` file:
|
a :file:`top.sls` file:
|
||||||
|
|
||||||
.. code-block:: yaml
|
.. code-block:: yaml
|
||||||
@ -172,7 +172,7 @@ normal state mechanisms. Formula can be required from other States with
|
|||||||
:ref:`requisites-require` declarations, they can be modified using ``extend``,
|
:ref:`requisites-require` declarations, they can be modified using ``extend``,
|
||||||
they can made to watch other states with :ref:`requisites-watch-in`.
|
they can made to watch other states with :ref:`requisites-watch-in`.
|
||||||
|
|
||||||
The following example uses the stock :formula:`apache-formula` alongside a
|
The following example uses the stock :formula_url:`apache-formula` alongside a
|
||||||
custom state to create a vhost on a Debian/Ubuntu system and to reload the
|
custom state to create a vhost on a Debian/Ubuntu system and to reload the
|
||||||
Apache service whenever the vhost is changed.
|
Apache service whenever the vhost is changed.
|
||||||
|
|
||||||
@ -1034,7 +1034,7 @@ without also including undesirable or unintended side-effects.
|
|||||||
|
|
||||||
The following is a best-practice example for a reusable Apache formula. (This
|
The following is a best-practice example for a reusable Apache formula. (This
|
||||||
skips platform-specific options for brevity. See the full
|
skips platform-specific options for brevity. See the full
|
||||||
:formula:`apache-formula` for more.)
|
:formula_url:`apache-formula` for more.)
|
||||||
|
|
||||||
.. code-block:: yaml
|
.. code-block:: yaml
|
||||||
|
|
||||||
@ -1199,9 +1199,9 @@ A basic Formula repository should have the following layout:
|
|||||||
|-- README.rst
|
|-- README.rst
|
||||||
`-- VERSION
|
`-- VERSION
|
||||||
|
|
||||||
.. seealso:: :formula:`template-formula`
|
.. seealso:: :formula_url:`template-formula`
|
||||||
|
|
||||||
The :formula:`template-formula` repository has a pre-built layout that
|
The :formula_url:`template-formula` repository has a pre-built layout that
|
||||||
serves as the basic structure for a new formula repository. Just copy the
|
serves as the basic structure for a new formula repository. Just copy the
|
||||||
files from there and edit them.
|
files from there and edit them.
|
||||||
|
|
||||||
|
@ -34,8 +34,9 @@ Create a new `virtualenv`_:
|
|||||||
|
|
||||||
.. _`virtualenv`: https://pypi.python.org/pypi/virtualenv
|
.. _`virtualenv`: https://pypi.python.org/pypi/virtualenv
|
||||||
|
|
||||||
On Arch Linux, where Python 3 is the default installation of Python, use the
|
Avoid making your :ref:`virtualenv path too long <too_long_socket_path>`.
|
||||||
``virtualenv2`` command instead of ``virtualenv``.
|
On Arch Linux, where Python 3 is the default installation of Python, use
|
||||||
|
the ``virtualenv2`` command instead of ``virtualenv``.
|
||||||
|
|
||||||
.. note:: Using system Python modules in the virtualenv
|
.. note:: Using system Python modules in the virtualenv
|
||||||
|
|
||||||
@ -121,8 +122,7 @@ Copy the master and minion config files into your virtualenv:
|
|||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
mkdir -p /path/to/your/virtualenv/etc/salt
|
mkdir -p /path/to/your/virtualenv/etc/salt
|
||||||
cp ./salt/conf/master /path/to/your/virtualenv/etc/salt/master
|
cp ./salt/conf/master ./salt/conf/minion /path/to/your/virtualenv/etc/salt/
|
||||||
cp ./salt/conf/minion /path/to/your/virtualenv/etc/salt/minion
|
|
||||||
|
|
||||||
Edit the master config file:
|
Edit the master config file:
|
||||||
|
|
||||||
@ -175,25 +175,28 @@ do this, add ``-l debug`` to the calls to ``salt-master`` and ``salt-minion``.
|
|||||||
If you would like to log to the console instead of to the log file, remove the
|
If you would like to log to the console instead of to the log file, remove the
|
||||||
``-d``.
|
``-d``.
|
||||||
|
|
||||||
Once the minion starts, you may see an error like the following:
|
.. _too_long_socket_path:
|
||||||
|
.. note:: Too long socket path?
|
||||||
|
|
||||||
.. code-block:: bash
|
Once the minion starts, you may see an error like the following:
|
||||||
|
|
||||||
zmq.core.error.ZMQError: ipc path "/path/to/your/virtualenv/
|
.. code-block:: bash
|
||||||
var/run/salt/minion/minion_event_7824dcbcfd7a8f6755939af70b96249f_pub.ipc"
|
|
||||||
is longer than 107 characters (sizeof(sockaddr_un.sun_path)).
|
|
||||||
|
|
||||||
This means that the path to the socket the minion is using is too long. This is
|
zmq.core.error.ZMQError: ipc path "/path/to/your/virtualenv/
|
||||||
a system limitation, so the only workaround is to reduce the length of this
|
var/run/salt/minion/minion_event_7824dcbcfd7a8f6755939af70b96249f_pub.ipc"
|
||||||
path. This can be done in a couple different ways:
|
is longer than 107 characters (sizeof(sockaddr_un.sun_path)).
|
||||||
|
|
||||||
1. Create your virtualenv in a path that is short enough.
|
This means that the path to the socket the minion is using is too long. This is
|
||||||
2. Edit the :conf_minion:`sock_dir` minion config variable and reduce its
|
a system limitation, so the only workaround is to reduce the length of this
|
||||||
length. Remember that this path is relative to the value you set in
|
path. This can be done in a couple different ways:
|
||||||
:conf_minion:`root_dir`.
|
|
||||||
|
|
||||||
``NOTE:`` The socket path is limited to 107 characters on Solaris and Linux,
|
1. Create your virtualenv in a path that is short enough.
|
||||||
and 103 characters on BSD-based systems.
|
2. Edit the :conf_minion:`sock_dir` minion config variable and reduce its
|
||||||
|
length. Remember that this path is relative to the value you set in
|
||||||
|
:conf_minion:`root_dir`.
|
||||||
|
|
||||||
|
``NOTE:`` The socket path is limited to 107 characters on Solaris and Linux,
|
||||||
|
and 103 characters on BSD-based systems.
|
||||||
|
|
||||||
.. note:: File descriptor limits
|
.. note:: File descriptor limits
|
||||||
|
|
||||||
@ -233,7 +236,7 @@ to a virtualenv using pip:
|
|||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
pip install Sphinx
|
pip install Sphinx==1.3b2
|
||||||
|
|
||||||
Change to salt documentation directory, then:
|
Change to salt documentation directory, then:
|
||||||
|
|
||||||
|
@ -8,8 +8,8 @@ the lines below, depending on the relevant Python version:
|
|||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
pip install -r dev_requirements_python26.txt
|
pip install -r requirements/dev_python26.txt
|
||||||
pip install -r dev_requirements_python27.txt
|
pip install -r requirements/dev_python27.txt
|
||||||
|
|
||||||
.. note::
|
.. note::
|
||||||
|
|
||||||
|
@ -151,11 +151,14 @@ dictionary should be sent instead.
|
|||||||
|
|
||||||
# Import the proper library
|
# Import the proper library
|
||||||
import salt.utils.event
|
import salt.utils.event
|
||||||
|
# bring in the system configuration
|
||||||
|
import salt.config
|
||||||
|
cfg = salt.config.minion_config("/etc/salt/minion")
|
||||||
# Fire deploy action
|
# Fire deploy action
|
||||||
sock_dir = '/var/run/salt/minion'
|
sock_dir = '/var/run/salt/minion'
|
||||||
payload = {'sample-msg': 'this is a test',
|
payload = {'sample-msg': 'this is a test',
|
||||||
'example': 'this is the same test'}
|
'example': 'this is the same test'}
|
||||||
event = salt.utils.event.SaltEvent('master', sock_dir)
|
event = salt.utils.event.SaltEvent('master', sock_dir, opts=cfg)
|
||||||
event.fire_event(payload, 'tag')
|
event.fire_event(payload, 'tag')
|
||||||
|
|
||||||
It should be noted that this code can be used in 3rd party applications as well.
|
It should be noted that this code can be used in 3rd party applications as well.
|
||||||
@ -177,3 +180,19 @@ programmatically, without having to make other calls to Salt.
|
|||||||
A 3rd party process can listen to the event bus on the master and another 3rd party
|
A 3rd party process can listen to the event bus on the master and another 3rd party
|
||||||
process can fire events to the process on the master, which Salt will happily
|
process can fire events to the process on the master, which Salt will happily
|
||||||
pass along.
|
pass along.
|
||||||
|
|
||||||
|
To fire an event to be sent to the master, from the minion, from code:
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
import salt.utils.event
|
||||||
|
import salt.config
|
||||||
|
cfg = salt.config.minion_config("/etc/salt/minion")
|
||||||
|
sock_dir = '/var/run/salt/minion'
|
||||||
|
tag = "tag"
|
||||||
|
payload = {'sample-msg": "this is a test'}
|
||||||
|
# The message wrapper
|
||||||
|
# Create an event interface
|
||||||
|
event = salt.utils.event.SaltEvent("minion", sock_dir, opts=cfg)
|
||||||
|
# Fire the event across
|
||||||
|
event.fire_master(payload, tag)
|
||||||
|
@ -18,6 +18,10 @@ Install Salt stable releases from the Arch Linux Official repositories as follow
|
|||||||
|
|
||||||
pacman -S salt
|
pacman -S salt
|
||||||
|
|
||||||
|
.. note:: transports
|
||||||
|
|
||||||
|
Unlike other linux distributions, please be aware that Arch Linux's package manager pacman defaults to RAET as the Salt transport. If you want to use ZeroMQ instead, make sure to enter the associated number for the salt-zmq repository when prompted.
|
||||||
|
|
||||||
Tracking develop
|
Tracking develop
|
||||||
----------------
|
----------------
|
||||||
|
|
||||||
|
@ -96,7 +96,7 @@ Optional Dependencies
|
|||||||
.. _`Python 2.6`: http://python.org/download/
|
.. _`Python 2.6`: http://python.org/download/
|
||||||
.. _`ZeroMQ`: http://zeromq.org/
|
.. _`ZeroMQ`: http://zeromq.org/
|
||||||
.. _`pyzmq`: https://github.com/zeromq/pyzmq
|
.. _`pyzmq`: https://github.com/zeromq/pyzmq
|
||||||
.. _`msgpack-python`: https://pypi.python.org/pypi/msgpack-python/0.1.12
|
.. _`msgpack-python`: https://pypi.python.org/pypi/msgpack-python/
|
||||||
.. _`PyCrypto`: https://www.dlitz.net/software/pycrypto/
|
.. _`PyCrypto`: https://www.dlitz.net/software/pycrypto/
|
||||||
.. _`M2Crypto`: http://chandlerproject.org/Projects/MeTooCrypto
|
.. _`M2Crypto`: http://chandlerproject.org/Projects/MeTooCrypto
|
||||||
.. _`YAML`: http://pyyaml.org/
|
.. _`YAML`: http://pyyaml.org/
|
||||||
|
@ -67,6 +67,20 @@ may be given at a time:
|
|||||||
|
|
||||||
sudo apt-get install salt-syndic
|
sudo apt-get install salt-syndic
|
||||||
|
|
||||||
|
Some core components are packaged separately in the Ubuntu repositories. These should be installed as well: salt-cloud, salt-ssh, salt-api
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
sudo apt-get install salt-cloud
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
sudo apt-get install salt-ssh
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
sudo apt-get install salt-api
|
||||||
|
|
||||||
.. _ubuntu-config:
|
.. _ubuntu-config:
|
||||||
|
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@ minion exe>` should match the contents of the corresponding md5 file.
|
|||||||
.. admonition:: Download here
|
.. admonition:: Download here
|
||||||
|
|
||||||
* 2014.7.0
|
* 2014.7.0
|
||||||
* `Salt-Minion-2014.7.0-x86-Setup.exe <http://docs.saltstack.com/downloads/Salt-Minion-2014.7.0-x86-Setup.exe>`__ | `md5 <http://docs.saltstack.com/downloads/Salt-Minion-2014.7.0-x86-Setup.exe.md5>`__
|
* `Salt-Minion-2014.7.0-1-win32-Setup.exe <http://docs.saltstack.com/downloads/Salt-Minion-2014.7.0-1-win32-Setup.exe>`__ | `md5 <http://docs.saltstack.com/downloads/Salt-Minion-2014.7.0-1-win32-Setup.exe.md5>`__
|
||||||
* `Salt-Minion-2014.7.0-AMD64-Setup.exe <http://docs.saltstack.com/downloads/Salt-Minion-2014.7.0-AMD64-Setup.exe>`__ | `md5 <http://docs.saltstack.com/downloads/Salt-Minion-2014.7.0-AMD64-Setup.exe.md5>`__
|
* `Salt-Minion-2014.7.0-AMD64-Setup.exe <http://docs.saltstack.com/downloads/Salt-Minion-2014.7.0-AMD64-Setup.exe>`__ | `md5 <http://docs.saltstack.com/downloads/Salt-Minion-2014.7.0-AMD64-Setup.exe.md5>`__
|
||||||
|
|
||||||
* 2014.1.13
|
* 2014.1.13
|
||||||
|
@ -62,6 +62,43 @@ be adjusted for the minion via the `mine_interval` option:
|
|||||||
|
|
||||||
mine_interval: 60
|
mine_interval: 60
|
||||||
|
|
||||||
|
Mine in Salt-SSH
|
||||||
|
================
|
||||||
|
|
||||||
|
As of the 2015.2 release of salt, salt-ssh supports ``mine.get``.
|
||||||
|
|
||||||
|
Because the minions cannot provide their own ``mine_functions`` configuration,
|
||||||
|
we retrieve the args for specified mine functions in one of three places,
|
||||||
|
searched in the following order:
|
||||||
|
|
||||||
|
1. Roster data
|
||||||
|
2. Pillar
|
||||||
|
3. Master config
|
||||||
|
|
||||||
|
The ``mine_functions`` are formatted exactly the same as in normal salt, just
|
||||||
|
stored in a different location. Here is an example of a flat roster containing
|
||||||
|
``mine_functions``:
|
||||||
|
|
||||||
|
.. code-block:: yaml
|
||||||
|
|
||||||
|
test:
|
||||||
|
host: 104.237.131.248
|
||||||
|
user: root
|
||||||
|
mine_functions:
|
||||||
|
cmd.run: ['echo "hello!"']
|
||||||
|
network.ip_addrs:
|
||||||
|
interface: eth0
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
Because of the differences in the architecture of salt-ssh, ``mine.get``
|
||||||
|
calls are somewhat inefficient. Salt must make a new salt-ssh call to each
|
||||||
|
of the minions in question to retrieve the requested data, much like a
|
||||||
|
publish call. However, unlike publish, it must run the requested function
|
||||||
|
as a wrapper function, so we can retrieve the function args from the pillar
|
||||||
|
of the minion in question. This results in a non-trivial delay in
|
||||||
|
retrieving the requested data.
|
||||||
|
|
||||||
Example
|
Example
|
||||||
=======
|
=======
|
||||||
|
|
||||||
|
@ -548,6 +548,12 @@ New Salt-Cloud Providers
|
|||||||
- :mod:`LXC Containers <salt.cloud.clouds.lxc>`
|
- :mod:`LXC Containers <salt.cloud.clouds.lxc>`
|
||||||
- :mod:`Proxmox (OpenVZ containers & KVM) <salt.cloud.clouds.proxmox>`
|
- :mod:`Proxmox (OpenVZ containers & KVM) <salt.cloud.clouds.proxmox>`
|
||||||
|
|
||||||
|
Salt Call Change
|
||||||
|
================
|
||||||
|
|
||||||
|
When used with a returner, salt-call now contacts a master if ``--local``
|
||||||
|
is not specicified.
|
||||||
|
|
||||||
|
|
||||||
Deprecations
|
Deprecations
|
||||||
============
|
============
|
||||||
|
@ -13,3 +13,7 @@ Version 2014.7.1 is a bugfix release for :doc:`2014.7.0
|
|||||||
(:issue:`18468`)
|
(:issue:`18468`)
|
||||||
- Fixed use of Jinja templates in masterless mode with non-roots fileserver
|
- Fixed use of Jinja templates in masterless mode with non-roots fileserver
|
||||||
backend (:issue:`17963`)
|
backend (:issue:`17963`)
|
||||||
|
- Re-enabled pillar and compound matching for mine and publish calls. Note that
|
||||||
|
pillar globbing is still disabled for those modes, for security reasons.
|
||||||
|
(:issue:`17194`)
|
||||||
|
- Fix for ``tty: True`` in salt-ssh (:issue:`16847`)
|
||||||
|
@ -10,3 +10,4 @@ Salt SSH
|
|||||||
- Added support for ``state.single`` in ``salt-ssh``
|
- Added support for ``state.single`` in ``salt-ssh``
|
||||||
- Added support for ``publish.publish``, ``publish.full_data``, and
|
- Added support for ``publish.publish``, ``publish.full_data``, and
|
||||||
``publish.runner`` in ``salt-ssh``
|
``publish.runner`` in ``salt-ssh``
|
||||||
|
- Added support for ``mine.get`` in ``salt-ssh``
|
8
doc/topics/releases/beryllium.rst
Normal file
8
doc/topics/releases/beryllium.rst
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
=======================================
|
||||||
|
Salt Release Notes - Codename Beryllium
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
JBoss 7 State
|
||||||
|
=============
|
||||||
|
|
||||||
|
- Remove unused argument ``timeout`` in jboss7.status
|
@ -118,6 +118,13 @@ Due to the fact that the targeting approach differs in salt-ssh, only glob
|
|||||||
and regex targets are supported as of this writing, the remaining target
|
and regex targets are supported as of this writing, the remaining target
|
||||||
systems still need to be implemented.
|
systems still need to be implemented.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
By default, Grains are settable through ``salt-ssh``. By
|
||||||
|
default, these grains will *not* be persisted across reboots.
|
||||||
|
|
||||||
|
See the "thin_dir" setting in :doc:`Roster documentation </topics/ssh/roster>`
|
||||||
|
for more details.
|
||||||
|
|
||||||
Configuring Salt SSH
|
Configuring Salt SSH
|
||||||
====================
|
====================
|
||||||
|
|
||||||
@ -144,15 +151,28 @@ If you are commonly passing in CLI options to ``salt-ssh``, you can create
|
|||||||
a ``Saltfile`` to automatically use these options. This is common if you're
|
a ``Saltfile`` to automatically use these options. This is common if you're
|
||||||
managing several different salt projects on the same server.
|
managing several different salt projects on the same server.
|
||||||
|
|
||||||
So if you ``cd`` into a directory with a Saltfile with the following
|
So if you ``cd`` into a directory with a ``Saltfile`` with the following
|
||||||
contents:
|
YAML contents:
|
||||||
|
|
||||||
.. code-block:: yaml
|
.. code-block:: yaml
|
||||||
|
|
||||||
salt-ssh:
|
salt-ssh:
|
||||||
config_dir: path/to/config/dir
|
config_dir: path/to/config/dir
|
||||||
max_prox: 30
|
max_prox: 30
|
||||||
|
wipe_ssh: true
|
||||||
|
|
||||||
Instead of having to call
|
Instead of having to call
|
||||||
``salt-ssh --config-dir=path/to/config/dir --max-procs=30 \* test.ping`` you
|
``salt-ssh --config-dir=path/to/config/dir --max-procs=30 --wipe \* test.ping`` you
|
||||||
can call ``salt-ssh \* test.ping``.
|
can call ``salt-ssh \* test.ping``.
|
||||||
|
|
||||||
|
Boolean-style options should be specified in their YAML representation.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
The option keys specified must match the destination attributes for the
|
||||||
|
options specified in the parser
|
||||||
|
:py:class:`salt.utils.parsers.SaltSSHOptionParser`. For example, in the
|
||||||
|
case of the ``--wipe`` command line option, its ``dest`` is configured to
|
||||||
|
be ``wipe_ssh`` and thus this is what should be configured in the
|
||||||
|
``Saltfile``. Using the names of flags for this option, being ``wipe:
|
||||||
|
true`` or ``w: true``, will not work.
|
||||||
|
@ -34,14 +34,26 @@ The information which can be stored in a roster `target` is the following:
|
|||||||
|
|
||||||
.. code-block:: yaml
|
.. code-block:: yaml
|
||||||
|
|
||||||
<Salt ID>: # The id to reference the target system with
|
<Salt ID>: # The id to reference the target system with
|
||||||
host: # The IP address or DNS name of the remote host
|
host: # The IP address or DNS name of the remote host
|
||||||
user: # The user to log in as
|
user: # The user to log in as
|
||||||
passwd: # The password to log in with
|
passwd: # The password to log in with
|
||||||
|
|
||||||
# Optional parameters
|
# Optional parameters
|
||||||
port: # The target system's ssh port number
|
port: # The target system's ssh port number
|
||||||
sudo: # Boolean to run command via sudo
|
sudo: # Boolean to run command via sudo
|
||||||
priv: # File path to ssh private key, defaults to salt-ssh.rsa
|
priv: # File path to ssh private key, defaults to salt-ssh.rsa
|
||||||
timeout: # Number of seconds to wait for response when establishing a
|
timeout: # Number of seconds to wait for response when establishing a
|
||||||
SSH connection
|
SSH connection
|
||||||
|
thin_dir: # The target system's storage directory for Salt components.
|
||||||
|
Defaults to /tmp/salt-<hash>.
|
||||||
|
|
||||||
|
thin_dir
|
||||||
|
--------
|
||||||
|
|
||||||
|
Salt needs to upload a standalone environment to the target system, and this
|
||||||
|
defaults to /tmp/salt-<hash>. This directory will be cleaned up per normal
|
||||||
|
systems operation.
|
||||||
|
|
||||||
|
If you need a persistent Salt environment, for instance to set persistent grains,
|
||||||
|
this value will need to be changed.
|
||||||
|
@ -247,3 +247,30 @@ service.
|
|||||||
auth_timeout:
|
auth_timeout:
|
||||||
The total time to wait for the authentication process to complete, regardless
|
The total time to wait for the authentication process to complete, regardless
|
||||||
of the number of attempts.
|
of the number of attempts.
|
||||||
|
|
||||||
|
|
||||||
|
=====================
|
||||||
|
Running state locally
|
||||||
|
=====================
|
||||||
|
|
||||||
|
To debug the states, you can use call locally.
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
salt-call -l trace --local state.highstate
|
||||||
|
|
||||||
|
|
||||||
|
The top.sls file is used to map what SLS modules get loaded onto what minions via the state system.
|
||||||
|
|
||||||
|
It is located in the file defined in the ``file_roots`` variable of the salt master
|
||||||
|
configuration file which is defined by found in ``CONFIG_DIR/master``, normally ``/etc/salt/master``
|
||||||
|
|
||||||
|
The default configuration for the ``file_roots`` is:
|
||||||
|
|
||||||
|
.. code-block:: yaml
|
||||||
|
|
||||||
|
file_roots:
|
||||||
|
base:
|
||||||
|
- /srv/salt
|
||||||
|
|
||||||
|
So the top file is defaulted to the location ``/srv/salt/top.sls``
|
||||||
|
@ -157,6 +157,8 @@ Virtual Machine generation applications are available for many platforms:
|
|||||||
vm-builder:
|
vm-builder:
|
||||||
https://wiki.debian.org/VMBuilder
|
https://wiki.debian.org/VMBuilder
|
||||||
|
|
||||||
|
.. seealso:: :formula_url:`vmbuilder-formula`
|
||||||
|
|
||||||
Once virtual machine images are available, the easiest way to make them
|
Once virtual machine images are available, the easiest way to make them
|
||||||
available to Salt Virt is to place them in the Salt file server. Just copy an
|
available to Salt Virt is to place them in the Salt file server. Just copy an
|
||||||
image into ``/srv/salt`` and it can now be used by Salt Virt.
|
image into ``/srv/salt`` and it can now be used by Salt Virt.
|
||||||
|
@ -297,7 +297,6 @@ To avoid that, the master can use a pre-created signature of its public-key.
|
|||||||
The signature is saved as a base64 encoded string which the master reads
|
The signature is saved as a base64 encoded string which the master reads
|
||||||
once when starting and attaches only that string to auth-replies.
|
once when starting and attaches only that string to auth-replies.
|
||||||
|
|
||||||
DO ME HERE
|
|
||||||
Enabling this also gives paranoid users the possibility, to have the signing
|
Enabling this also gives paranoid users the possibility, to have the signing
|
||||||
key-pair on a different system than the actual salt-master and create the public
|
key-pair on a different system than the actual salt-master and create the public
|
||||||
keys signature there. Probably on a system with more restrictive firewall rules,
|
keys signature there. Probably on a system with more restrictive firewall rules,
|
||||||
|
80
doc/topics/tutorials/rooted.rst
Normal file
80
doc/topics/tutorials/rooted.rst
Normal file
@ -0,0 +1,80 @@
|
|||||||
|
====================================
|
||||||
|
running salt as normal user tutorial
|
||||||
|
====================================
|
||||||
|
|
||||||
|
.. include:: /_incl/requisite_incl.rst
|
||||||
|
|
||||||
|
Running Salt functions as non root user
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
If you dont want to run salt cloud as root or even install it you can
|
||||||
|
configure it to have a virtual root in your working directory.
|
||||||
|
|
||||||
|
The salt system uses the ``salt.syspath`` module to find the variables
|
||||||
|
|
||||||
|
If you run the salt-build, it will generated in:
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
./build/lib.linux-x86_64-2.7/salt/_syspaths.py
|
||||||
|
|
||||||
|
To generate it, run the command:
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
python setup.py build
|
||||||
|
|
||||||
|
Copy the generated module into your salt directory
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
cp ./build/lib.linux-x86_64-2.7/salt/_syspaths.py salt/_syspaths.py
|
||||||
|
|
||||||
|
Edit it to include needed variables and your new paths
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
# you need to edit this
|
||||||
|
ROOT_DIR = *your current dir* + '/salt/root'
|
||||||
|
|
||||||
|
# you need to edit this
|
||||||
|
INSTALL_DIR = *location of source code*
|
||||||
|
|
||||||
|
CONFIG_DIR = ROOT_DIR + '/etc/salt'
|
||||||
|
CACHE_DIR = ROOT_DIR + '/var/cache/salt'
|
||||||
|
SOCK_DIR = ROOT_DIR + '/var/run/salt'
|
||||||
|
SRV_ROOT_DIR= ROOT_DIR + '/srv'
|
||||||
|
BASE_FILE_ROOTS_DIR = ROOT_DIR + '/srv/salt'
|
||||||
|
BASE_PILLAR_ROOTS_DIR = ROOT_DIR + '/srv/pillar'
|
||||||
|
BASE_MASTER_ROOTS_DIR = ROOT_DIR + '/srv/salt-master'
|
||||||
|
LOGS_DIR = ROOT_DIR + '/var/log/salt'
|
||||||
|
PIDFILE_DIR = ROOT_DIR + '/var/run'
|
||||||
|
CLOUD_DIR = INSTALL_DIR + '/cloud'
|
||||||
|
BOOTSTRAP = CLOUD_DIR + '/deploy/bootstrap-salt.sh'
|
||||||
|
|
||||||
|
|
||||||
|
Create the directory structure
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
mkdir -p root/etc/salt root/var/cache/run root/run/salt root/srv
|
||||||
|
root/srv/salt root/srv/pillar root/srv/salt-master root/var/log/salt root/var/run
|
||||||
|
|
||||||
|
|
||||||
|
Populate the configuration files:
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
cp -r conf/* root/etc/salt/
|
||||||
|
|
||||||
|
Edit your ``root/etc/salt/master`` configuration that is used by salt-cloud:
|
||||||
|
|
||||||
|
.. code-block:: yaml
|
||||||
|
|
||||||
|
user: *your user name*
|
||||||
|
|
||||||
|
Run like this:
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
PYTHONPATH=`pwd` scripts/salt-cloud
|
@ -57,25 +57,25 @@ installed and running. Include the following at the bottom of your
|
|||||||
- require: # requisite declaration
|
- require: # requisite declaration
|
||||||
- pkg: apache # requisite reference
|
- pkg: apache # requisite reference
|
||||||
|
|
||||||
**line 9** is the :ref:`id-declaration`. In this example it is the location we
|
**line 7** is the :ref:`id-declaration`. In this example it is the location we
|
||||||
want to install our custom HTML file. (**Note:** the default location that
|
want to install our custom HTML file. (**Note:** the default location that
|
||||||
Apache serves may differ from the above on your OS or distro. ``/srv/www``
|
Apache serves may differ from the above on your OS or distro. ``/srv/www``
|
||||||
could also be a likely place to look.)
|
could also be a likely place to look.)
|
||||||
|
|
||||||
**Line 10** the :ref:`state-declaration`. This example uses the Salt :mod:`file
|
**Line 8** the :ref:`state-declaration`. This example uses the Salt :mod:`file
|
||||||
state <salt.states.file>`.
|
state <salt.states.file>`.
|
||||||
|
|
||||||
**Line 11** is the :ref:`function-declaration`. The :func:`managed function
|
**Line 9** is the :ref:`function-declaration`. The :func:`managed function
|
||||||
<salt.states.file.managed>` will download a file from the master and install it
|
<salt.states.file.managed>` will download a file from the master and install it
|
||||||
in the location specified.
|
in the location specified.
|
||||||
|
|
||||||
**Line 12** is a :ref:`function-arg-declaration` which, in this example, passes
|
**Line 10** is a :ref:`function-arg-declaration` which, in this example, passes
|
||||||
the ``source`` argument to the :func:`managed function
|
the ``source`` argument to the :func:`managed function
|
||||||
<salt.states.file.managed>`.
|
<salt.states.file.managed>`.
|
||||||
|
|
||||||
**Line 13** is a :ref:`requisite-declaration`.
|
**Line 11** is a :ref:`requisite-declaration`.
|
||||||
|
|
||||||
**Line 14** is a :ref:`requisite-reference` which refers to a state and an ID.
|
**Line 12** is a :ref:`requisite-reference` which refers to a state and an ID.
|
||||||
In this example, it is referring to the ``ID declaration`` from our example in
|
In this example, it is referring to the ``ID declaration`` from our example in
|
||||||
:doc:`part 1 <states_pt1>`. This declaration tells Salt not to install the HTML
|
:doc:`part 1 <states_pt1>`. This declaration tells Salt not to install the HTML
|
||||||
file until Apache is installed.
|
file until Apache is installed.
|
||||||
@ -85,6 +85,7 @@ directory:
|
|||||||
|
|
||||||
.. code-block:: html
|
.. code-block:: html
|
||||||
|
|
||||||
|
<!DOCTYPE html>
|
||||||
<html>
|
<html>
|
||||||
<head><title>Salt rocks</title></head>
|
<head><title>Salt rocks</title></head>
|
||||||
<body>
|
<body>
|
||||||
|
@ -18,14 +18,16 @@
|
|||||||
# Generated from the help of salt programs on commit ad89a752f807d5ea00d3a9b3257d283ef6b69c10
|
# Generated from the help of salt programs on commit ad89a752f807d5ea00d3a9b3257d283ef6b69c10
|
||||||
#
|
#
|
||||||
# ISSUES:
|
# ISSUES:
|
||||||
# TODO: #1 add: salt-api salt-cloud salt-ssh salt-syndic
|
# TODO: #1 add: salt-api salt-cloud salt-ssh
|
||||||
# TODO: #2 write tests (use https://github.com/terlar/fish-tank)
|
# TODO: #2 write tests (use https://github.com/terlar/fish-tank)
|
||||||
# TODO: #3 add completion for builtin states
|
# TODO: #3 add completion for builtin states
|
||||||
# TODO: #4 use caching (see https://github.com/saltstack/salt/issues/15321)
|
# TODO: #4 use caching (see https://github.com/saltstack/salt/issues/15321)
|
||||||
# TODO: #5 add help to the positional arguments (like '(Minion)', '(Grain)')
|
# TODO: #5 add help to the positional arguments (like '(Minion)', '(Grain)')
|
||||||
|
# using __fish_salt_list function everythere)
|
||||||
# TODO: #6 add minion selection by grains (call "salt '*' grains.ls", use #4)
|
# TODO: #6 add minion selection by grains (call "salt '*' grains.ls", use #4)
|
||||||
# BUG: #7 salt-call autocompletion and salt packages not works; it hangs. Ask
|
# BUG: #7 salt-call autocompletion and salt packages not works; it hangs. Ask
|
||||||
# fish devs?
|
# fish devs?
|
||||||
|
# TODO: #8 sort with `sort` or leave as is?
|
||||||
|
|
||||||
# common general options (from --help)
|
# common general options (from --help)
|
||||||
set -l salt_programs \
|
set -l salt_programs \
|
||||||
@ -156,35 +158,40 @@ set -g __fish_salt_default_program 'salt'
|
|||||||
# salt --out raw server test.ping
|
# salt --out raw server test.ping
|
||||||
# Consider rewriting using __fish_complete_subcommand
|
# Consider rewriting using __fish_complete_subcommand
|
||||||
function __fish_salt_program
|
function __fish_salt_program
|
||||||
set result (commandline -pco)
|
if status --is-interactive
|
||||||
if test -n "$result"
|
set result (commandline -pco)
|
||||||
if [ $result[1] = 'salt-call' ]; and contains -- '--local' $result
|
if test -n "$result"
|
||||||
set options '--local'
|
if [ $result[1] = 'salt-call' ]; and contains -- '--local' $result
|
||||||
|
set options '--local'
|
||||||
|
end
|
||||||
|
set result $result[1] $options
|
||||||
end
|
end
|
||||||
set result $result[1] $options
|
|
||||||
else
|
|
||||||
set result $__fish_salt_default_program
|
|
||||||
end
|
end
|
||||||
|
set result $__fish_salt_default_program
|
||||||
echo $result
|
echo $result
|
||||||
end
|
end
|
||||||
|
|
||||||
function __fish_salt_save_first_commandline_token_not_matching_args_to
|
function __fish_salt_save_first_commandline_token_not_matching_args_to
|
||||||
set -l cli (commandline -pco)
|
if status --is-interactive
|
||||||
for i in $cli
|
set -l cli (commandline -pco)
|
||||||
if echo "$i" | grep -Ev (__fish_salt_join '|' $argv)
|
for i in $cli
|
||||||
set -g $argv[1] $i
|
if echo "$i" | grep -Ev (__fish_salt_join '|' $argv)
|
||||||
return 0
|
set -g $argv[1] $i
|
||||||
|
return 0
|
||||||
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
return 1
|
return 1
|
||||||
end
|
end
|
||||||
|
|
||||||
function __fish_salt_commandline_tokens_not_matching_args
|
function __fish_salt_commandline_tokens_not_matching_args
|
||||||
set tokens (commandline -pco)
|
if status --is-interactive
|
||||||
set result 1
|
set tokens (commandline -pco)
|
||||||
for token in $tokens
|
set result 1
|
||||||
if echo "$token" | grep -Ev (__fish_salt_join '|' $argv)
|
for token in $tokens
|
||||||
set result 0
|
if echo "$token" | grep -Ev (__fish_salt_join '|' $argv)
|
||||||
|
set result 0
|
||||||
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
return $result
|
return $result
|
||||||
|
@ -1,2 +1,2 @@
|
|||||||
-r ../../../raet-requirements.txt
|
-r ../../../requirements/raet.txt
|
||||||
-r requirements.txt
|
-r requirements.txt
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
GitPython==0.3.2.RC1
|
GitPython==0.3.2.RC1
|
||||||
halite
|
halite
|
||||||
-r ../../../opt_requirements.txt
|
-r ../../../requirements/opt.txt
|
||||||
-r ../../../cloud-requirements.txt
|
-r ../../../requirements/cloud.txt
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Need to set a specific version of pyzmq, so can't use the main project's requirements file... have to copy it in and modify...
|
# Need to set a specific version of pyzmq, so can't use the main project's requirements file... have to copy it in and modify...
|
||||||
#-r ../../../zeromq-requirements.txt
|
#-r ../../../requirements/zeromq.txt
|
||||||
-r ../../../_requirements.txt
|
-r ../../../requirements/base.txt
|
||||||
M2Crypto
|
M2Crypto
|
||||||
pycrypto
|
pycrypto
|
||||||
pyzmq == 13.1.0
|
pyzmq == 13.1.0
|
||||||
|
@ -138,14 +138,14 @@ InstallDirRegKey HKLM "${PRODUCT_DIR_REGKEY}" ""
|
|||||||
ShowInstDetails show
|
ShowInstDetails show
|
||||||
ShowUnInstDetails show
|
ShowUnInstDetails show
|
||||||
|
|
||||||
; Check and install Visual C++ 2008 SP1 redist packages
|
; Check and install Visual C++ 2008 SP1 MFC Security Update redist packages
|
||||||
; See http://blogs.msdn.com/b/astebner/archive/2009/01/29/9384143.aspx for more info
|
; See http://blogs.msdn.com/b/astebner/archive/2009/01/29/9384143.aspx for more info
|
||||||
Section -Prerequisites
|
Section -Prerequisites
|
||||||
|
|
||||||
!define VC_REDIST_X64_GUID "{8220EEFE-38CD-377E-8595-13398D740ACE}"
|
!define VC_REDIST_X64_GUID "{5FCE6D76-F5DC-37AB-B2B8-22AB8CEDB1D4}"
|
||||||
!define VC_REDIST_X86_GUID "{9A25302D-30C0-39D9-BD6F-21E6EC160475}"
|
!define VC_REDIST_X86_GUID "{9BE518E6-ECC6-35A9-88E4-87755C07200F}"
|
||||||
!define VC_REDIST_X64_URI "http://download.microsoft.com/download/2/d/6/2d61c766-107b-409d-8fba-c39e61ca08e8/vcredist_x64.exe"
|
!define VC_REDIST_X64_URI "http://download.microsoft.com/download/5/D/8/5D8C65CB-C849-4025-8E95-C3966CAFD8AE/vcredist_x64.exe"
|
||||||
!define VC_REDIST_X86_URI "http://download.microsoft.com/download/d/d/9/dd9a82d0-52ef-40db-8dab-795376989c03/vcredist_x86.exe"
|
!define VC_REDIST_X86_URI "http://download.microsoft.com/download/5/D/8/5D8C65CB-C849-4025-8E95-C3966CAFD8AE/vcredist_x86.exe"
|
||||||
|
|
||||||
Var /GLOBAL VcRedistGuid
|
Var /GLOBAL VcRedistGuid
|
||||||
Var /GLOBAL VcRedistUri
|
Var /GLOBAL VcRedistUri
|
||||||
|
@ -1,5 +0,0 @@
|
|||||||
-r _requirements.txt
|
|
||||||
|
|
||||||
libnacl
|
|
||||||
ioflo
|
|
||||||
raet
|
|
3
requirements/dev_python26.txt
Normal file
3
requirements/dev_python26.txt
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
-r dev_python27.txt
|
||||||
|
|
||||||
|
unittest2
|
@ -1,4 +1,4 @@
|
|||||||
-r _requirements.txt
|
-r base.txt
|
||||||
-e git+https://github.com/saltstack/salt-testing.git#egg=SaltTesting
|
-e git+https://github.com/saltstack/salt-testing.git#egg=SaltTesting
|
||||||
|
|
||||||
mock
|
mock
|
5
requirements/raet.txt
Normal file
5
requirements/raet.txt
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
-r base.txt
|
||||||
|
|
||||||
|
libnacl
|
||||||
|
ioflo
|
||||||
|
raet
|
@ -1,4 +1,4 @@
|
|||||||
-r _requirements.txt
|
-r base.txt
|
||||||
|
|
||||||
M2Crypto
|
M2Crypto
|
||||||
pycrypto
|
pycrypto
|
@ -188,7 +188,17 @@ class Authorize(object):
|
|||||||
'''
|
'''
|
||||||
Gather and create the authorization data sets
|
Gather and create the authorization data sets
|
||||||
'''
|
'''
|
||||||
return self.opts['external_auth']
|
auth_data = self.opts['external_auth']
|
||||||
|
|
||||||
|
if 'django' in auth_data and '^model' in auth_data['django']:
|
||||||
|
auth_from_django = salt.auth.django.retrieve_auth_entries()
|
||||||
|
auth_data = salt.utils.dictupdate.merge(auth_data, auth_from_django)
|
||||||
|
|
||||||
|
#for auth_back in self.opts.get('external_auth_sources', []):
|
||||||
|
# fstr = '{0}.perms'.format(auth_back)
|
||||||
|
# if fstr in self.loadauth.auth:
|
||||||
|
# auth_data.append(getattr(self.loadauth.auth)())
|
||||||
|
return auth_data
|
||||||
|
|
||||||
def token(self, adata, load):
|
def token(self, adata, load):
|
||||||
'''
|
'''
|
||||||
@ -265,6 +275,9 @@ class Authorize(object):
|
|||||||
Note: this will check that the user has at least one right that will let
|
Note: this will check that the user has at least one right that will let
|
||||||
him execute "load", this does not deal with conflicting rules
|
him execute "load", this does not deal with conflicting rules
|
||||||
'''
|
'''
|
||||||
|
|
||||||
|
adata = self.auth_data
|
||||||
|
good = False
|
||||||
if load.get('token', False):
|
if load.get('token', False):
|
||||||
for sub_auth in self.token(self.auth_data, load):
|
for sub_auth in self.token(self.auth_data, load):
|
||||||
if sub_auth:
|
if sub_auth:
|
||||||
|
170
salt/auth/django.py
Normal file
170
salt/auth/django.py
Normal file
@ -0,0 +1,170 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
'''
|
||||||
|
Provide authentication using Django Web Framework
|
||||||
|
|
||||||
|
Django authentication depends on the presence of the django
|
||||||
|
framework in the PYTHONPATH, the django project's settings.py file being in
|
||||||
|
the PYTHONPATH and accessible via the DJANGO_SETTINGS_MODULE environment
|
||||||
|
variable. This can be hard to debug.
|
||||||
|
|
||||||
|
django auth can be defined like any other eauth module:
|
||||||
|
|
||||||
|
external_auth:
|
||||||
|
django:
|
||||||
|
fred:
|
||||||
|
- .*
|
||||||
|
- '@runner'
|
||||||
|
|
||||||
|
This will authenticate Fred via django and allow him to run any
|
||||||
|
execution module and all runners.
|
||||||
|
|
||||||
|
The details of the django auth can also be located inside the django database. The
|
||||||
|
relevant entry in the models.py file would look like this:
|
||||||
|
|
||||||
|
class SaltExternalAuthModel(models.Model):
|
||||||
|
|
||||||
|
user_fk = models.ForeignKey(auth.User)
|
||||||
|
minion_matcher = models.CharField()
|
||||||
|
minion_fn = models.CharField()
|
||||||
|
|
||||||
|
Then, in the master's config file the external_auth clause should look like
|
||||||
|
|
||||||
|
external_auth:
|
||||||
|
django:
|
||||||
|
^model: <fully-qualified reference to model class>
|
||||||
|
|
||||||
|
When a user attempts to authenticate via Django, Salt will import the package
|
||||||
|
indicated via the keyword '^model'. That model must have the fields
|
||||||
|
indicated above, though the model DOES NOT have to be named 'SaltExternalAuthModel'.
|
||||||
|
|
||||||
|
:depends: - Django Web Framework
|
||||||
|
'''
|
||||||
|
|
||||||
|
# Import python libs
|
||||||
|
from __future__ import absolute_import
|
||||||
|
import logging
|
||||||
|
|
||||||
|
# Import salt libs
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
try:
|
||||||
|
import django
|
||||||
|
import django.conf
|
||||||
|
import django.contrib.auth
|
||||||
|
HAS_DJANGO = True
|
||||||
|
except Exception as exc:
|
||||||
|
# If Django is installed and is not detected, uncomment
|
||||||
|
# the following line to display additional information
|
||||||
|
#log.warning('Could not load Django auth module. Found exception: {0}'.format(exc))
|
||||||
|
HAS_DJANGO = False
|
||||||
|
|
||||||
|
django_auth_class = None
|
||||||
|
|
||||||
|
|
||||||
|
def django_auth_setup():
|
||||||
|
'''
|
||||||
|
Prepare the connection to the Django authentication framework
|
||||||
|
'''
|
||||||
|
global django_auth_class
|
||||||
|
|
||||||
|
if django_auth_class is not None:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Versions 1.7 and later of Django don't pull models until
|
||||||
|
# they are needed. When using framework facilities outside the
|
||||||
|
# web application container we need to run django.setup() to
|
||||||
|
# get the model definitions cached.
|
||||||
|
if '^model' in __opts__['external_auth']['django']:
|
||||||
|
django_model_fullname = __opts__['external_auth']['django']['^model']
|
||||||
|
django_model_name = django_model_fullname.split('.')[-1]
|
||||||
|
django_module_name = '.'.join(django_model_fullname.split('.')[0:-1])
|
||||||
|
|
||||||
|
__import__(django_module_name, globals(), locals(), 'SaltExternalAuthModel')
|
||||||
|
django_auth_class_str = 'django_auth_module.{0}'.format(django_model_name)
|
||||||
|
django_auth_class = eval(django_auth_class_str) # pylint: disable=W0123
|
||||||
|
|
||||||
|
if django.VERSION >= (1, 7):
|
||||||
|
django.setup()
|
||||||
|
|
||||||
|
|
||||||
|
def auth(username, password):
|
||||||
|
'''
|
||||||
|
Simple Django auth
|
||||||
|
'''
|
||||||
|
|
||||||
|
django_auth_setup()
|
||||||
|
user = django.contrib.auth.authenticate(username=username, password=password)
|
||||||
|
if user is not None:
|
||||||
|
if user.is_active:
|
||||||
|
log.debug('Django authentication successful')
|
||||||
|
|
||||||
|
auth_dict_from_db = retrieve_auth_entries(username)[username]
|
||||||
|
if auth_dict_from_db is not None:
|
||||||
|
__opts__['external_auth']['django'][username] = auth_dict_from_db
|
||||||
|
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
log.debug('Django authentication: the password is valid but the account is disabled.')
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def retrieve_auth_entries(u=None):
|
||||||
|
'''
|
||||||
|
|
||||||
|
:param u: Username to filter for
|
||||||
|
:return: Dictionary that can be slotted into the __opts__ structure for eauth that designates the
|
||||||
|
user and his or her ACL
|
||||||
|
|
||||||
|
username minion_or_fn_matcher minion_fn
|
||||||
|
fred test.ping
|
||||||
|
fred server1 network.interfaces
|
||||||
|
fred server1 raid.list
|
||||||
|
fred server2 .*
|
||||||
|
guru .*
|
||||||
|
smartadmin server1 .*
|
||||||
|
|
||||||
|
Should result in
|
||||||
|
fred:
|
||||||
|
- test.ping
|
||||||
|
- server1:
|
||||||
|
- network.interfaces
|
||||||
|
- raid.list
|
||||||
|
- server2:
|
||||||
|
- .*
|
||||||
|
guru:
|
||||||
|
- .*
|
||||||
|
smartadmin:
|
||||||
|
- server1:
|
||||||
|
- .*
|
||||||
|
|
||||||
|
'''
|
||||||
|
django_auth_setup()
|
||||||
|
|
||||||
|
if u is None:
|
||||||
|
db_records = django_auth_class.objects.all()
|
||||||
|
else:
|
||||||
|
db_records = django_auth_class.objects.filter(user_fk__username=u)
|
||||||
|
auth_dict = {}
|
||||||
|
|
||||||
|
for a in db_records:
|
||||||
|
if a.user_fk.username not in auth_dict:
|
||||||
|
auth_dict[a.user_fk.username] = []
|
||||||
|
|
||||||
|
if not a.minion_or_fn_matcher and a.minion_fn:
|
||||||
|
auth_dict[a.user_fk.username].append(a.minion_fn)
|
||||||
|
elif a.minion_or_fn_matcher and not a.minion_fn:
|
||||||
|
auth_dict[a.user_fk.username].append(a.minion_or_fn_matcher)
|
||||||
|
else:
|
||||||
|
found = False
|
||||||
|
for d in auth_dict[a.user_fk.username]:
|
||||||
|
if isinstance(d, dict):
|
||||||
|
if a.minion_or_fn_matcher in d.keys():
|
||||||
|
auth_dict[a.user_fk.username][a.minion_or_fn_matcher].append(a.minion_fn)
|
||||||
|
found = True
|
||||||
|
if not found:
|
||||||
|
auth_dict[a.user_fk.username].append({a.minion_or_fn_matcher: [a.minion_fn]})
|
||||||
|
|
||||||
|
log.debug('django auth_dict is {0}'.format(repr(auth_dict)))
|
||||||
|
return auth_dict
|
@ -1,13 +1,25 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
from __future__ import print_function
|
'''
|
||||||
from __future__ import absolute_import
|
salt.cli.api
|
||||||
|
~~~~~~~~~~~~~
|
||||||
|
|
||||||
import six
|
Salt's api cli parser.
|
||||||
|
|
||||||
|
'''
|
||||||
|
|
||||||
|
# Import Python libs
|
||||||
|
from __future__ import absolute_import, print_function
|
||||||
import sys
|
import sys
|
||||||
|
import os.path
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
# Import Salt libs
|
||||||
import salt.utils.parsers as parsers
|
import salt.utils.parsers as parsers
|
||||||
import salt.version
|
import salt.version
|
||||||
|
import salt.syspaths as syspaths
|
||||||
|
|
||||||
|
# Import 3rd-party libs
|
||||||
|
import salt.ext.six as six
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -25,7 +37,7 @@ class SaltAPI(six.with_metaclass(parsers.OptionParserMeta, # pylint: disable=W0
|
|||||||
# ConfigDirMixIn config filename attribute
|
# ConfigDirMixIn config filename attribute
|
||||||
_config_filename_ = 'master'
|
_config_filename_ = 'master'
|
||||||
# LogLevelMixIn attributes
|
# LogLevelMixIn attributes
|
||||||
_default_logging_logfile_ = '/var/log/salt/api'
|
_default_logging_logfile_ = os.path.join(syspaths.LOGS_DIR, 'api')
|
||||||
|
|
||||||
def setup_config(self):
|
def setup_config(self):
|
||||||
return salt.config.api_config(self.get_config_file_path())
|
return salt.config.api_config(self.get_config_file_path())
|
||||||
|
@ -13,6 +13,7 @@ import copy
|
|||||||
# Import salt libs
|
# Import salt libs
|
||||||
import salt.client
|
import salt.client
|
||||||
import salt.output
|
import salt.output
|
||||||
|
import salt.utils.minions
|
||||||
from salt.utils import print_cli
|
from salt.utils import print_cli
|
||||||
from salt.ext.six.moves import range
|
from salt.ext.six.moves import range
|
||||||
|
|
||||||
@ -32,25 +33,13 @@ class Batch(object):
|
|||||||
'''
|
'''
|
||||||
Return a list of minions to use for the batch run
|
Return a list of minions to use for the batch run
|
||||||
'''
|
'''
|
||||||
args = [self.opts['tgt'],
|
ckminions = salt.utils.minions.CkMinions(self.opts)
|
||||||
'test.ping',
|
|
||||||
[],
|
|
||||||
self.opts['timeout'],
|
|
||||||
]
|
|
||||||
|
|
||||||
selected_target_option = self.opts.get('selected_target_option', None)
|
selected_target_option = self.opts.get('selected_target_option', None)
|
||||||
if selected_target_option is not None:
|
if selected_target_option is not None:
|
||||||
args.append(selected_target_option)
|
expr_form = selected_target_option
|
||||||
else:
|
else:
|
||||||
args.append(self.opts.get('expr_form', 'glob'))
|
expr_form = self.opts.get('expr_form', 'glob')
|
||||||
|
return ckminions.check_minions(self.opts['tgt'], expr_form=expr_form)
|
||||||
fret = []
|
|
||||||
for ret in self.local.cmd_iter(*args, **self.eauth):
|
|
||||||
for minion in ret:
|
|
||||||
if not self.quiet:
|
|
||||||
print_cli('{0} Detected for this batch run'.format(minion))
|
|
||||||
fret.append(minion)
|
|
||||||
return sorted(fret)
|
|
||||||
|
|
||||||
def get_bnum(self):
|
def get_bnum(self):
|
||||||
'''
|
'''
|
||||||
|
@ -10,7 +10,6 @@ from __future__ import absolute_import
|
|||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import logging
|
import logging
|
||||||
import datetime
|
|
||||||
import traceback
|
import traceback
|
||||||
import multiprocessing
|
import multiprocessing
|
||||||
import time
|
import time
|
||||||
@ -23,6 +22,7 @@ import salt.output
|
|||||||
import salt.payload
|
import salt.payload
|
||||||
import salt.transport
|
import salt.transport
|
||||||
import salt.utils.args
|
import salt.utils.args
|
||||||
|
import salt.utils.jid
|
||||||
import salt.defaults.exitcodes
|
import salt.defaults.exitcodes
|
||||||
from salt.ext.six import string_types
|
from salt.ext.six import string_types
|
||||||
from salt.log import LOG_LEVELS
|
from salt.log import LOG_LEVELS
|
||||||
@ -99,7 +99,7 @@ class ZeroMQCaller(object):
|
|||||||
'''
|
'''
|
||||||
ret = {}
|
ret = {}
|
||||||
fun = self.opts['fun']
|
fun = self.opts['fun']
|
||||||
ret['jid'] = '{0:%Y%m%d%H%M%S%f}'.format(datetime.datetime.now())
|
ret['jid'] = salt.utils.jid.gen_jid()
|
||||||
proc_fn = os.path.join(
|
proc_fn = os.path.join(
|
||||||
salt.minion.get_proc_dir(self.opts['cachedir']),
|
salt.minion.get_proc_dir(self.opts['cachedir']),
|
||||||
ret['jid']
|
ret['jid']
|
||||||
|
@ -30,6 +30,7 @@ from salt.ext.six import string_types
|
|||||||
|
|
||||||
# Import salt libs
|
# Import salt libs
|
||||||
import salt.config
|
import salt.config
|
||||||
|
import salt.minion
|
||||||
import salt.payload
|
import salt.payload
|
||||||
import salt.transport
|
import salt.transport
|
||||||
import salt.loader
|
import salt.loader
|
||||||
@ -808,7 +809,9 @@ class LocalClient(object):
|
|||||||
try:
|
try:
|
||||||
raw = event.get_event_noblock()
|
raw = event.get_event_noblock()
|
||||||
if gather_errors:
|
if gather_errors:
|
||||||
if raw and raw.get('tag', '').startswith('_salt_error') or raw.get('tag', '').startswith(jid_tag):
|
if (raw and
|
||||||
|
(raw.get('tag', '').startswith('_salt_error') or
|
||||||
|
raw.get('tag', '').startswith(jid_tag))):
|
||||||
yield raw
|
yield raw
|
||||||
else:
|
else:
|
||||||
if raw and raw.get('tag', '').startswith(jid_tag):
|
if raw and raw.get('tag', '').startswith(jid_tag):
|
||||||
@ -1448,7 +1451,7 @@ class LocalClient(object):
|
|||||||
# When running tests, if self.events is not destroyed, we leak 2
|
# When running tests, if self.events is not destroyed, we leak 2
|
||||||
# threads per test case which uses self.client
|
# threads per test case which uses self.client
|
||||||
if hasattr(self, 'event'):
|
if hasattr(self, 'event'):
|
||||||
# The call bellow will take care of calling 'self.event.destroy()'
|
# The call below will take care of calling 'self.event.destroy()'
|
||||||
del self.event
|
del self.event
|
||||||
|
|
||||||
|
|
||||||
|
@ -4,12 +4,18 @@ A collection of mixins useful for the various *Client interfaces
|
|||||||
'''
|
'''
|
||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
from __future__ import absolute_import
|
from __future__ import absolute_import
|
||||||
import datetime
|
import __builtin__
|
||||||
|
import collections
|
||||||
import logging
|
import logging
|
||||||
|
import time
|
||||||
import multiprocessing
|
import multiprocessing
|
||||||
|
|
||||||
|
import salt.exceptions
|
||||||
import salt.utils
|
import salt.utils
|
||||||
import salt.utils.event
|
import salt.utils.event
|
||||||
|
import salt.utils.jid
|
||||||
|
import salt.transport
|
||||||
|
from salt.utils.error import raise_error
|
||||||
from salt.utils.event import tagify
|
from salt.utils.event import tagify
|
||||||
from salt.utils.doc import strip_rst as _strip_rst
|
from salt.utils.doc import strip_rst as _strip_rst
|
||||||
|
|
||||||
@ -33,15 +39,209 @@ class SyncClientMixin(object):
|
|||||||
err = 'Function {0!r} is unavailable'.format(fun)
|
err = 'Function {0!r} is unavailable'.format(fun)
|
||||||
raise salt.exceptions.CommandExecutionError(err)
|
raise salt.exceptions.CommandExecutionError(err)
|
||||||
|
|
||||||
|
def master_call(self, **kwargs):
|
||||||
|
'''
|
||||||
|
Execute a function through the master network interface.
|
||||||
|
'''
|
||||||
|
load = kwargs
|
||||||
|
load['cmd'] = self.client
|
||||||
|
channel = salt.transport.Channel.factory(self.opts,
|
||||||
|
crypt='clear',
|
||||||
|
usage='master_call')
|
||||||
|
ret = channel.send(load)
|
||||||
|
if isinstance(ret, collections.Mapping):
|
||||||
|
if 'error' in ret:
|
||||||
|
raise_error(**ret['error'])
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def cmd_sync(self, low, timeout=None):
|
||||||
|
'''
|
||||||
|
Execute a runner function synchronously; eauth is respected
|
||||||
|
|
||||||
|
This function requires that :conf_master:`external_auth` is configured
|
||||||
|
and the user is authorized to execute runner functions: (``@runner``).
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
runner.eauth_sync({
|
||||||
|
'fun': 'jobs.list_jobs',
|
||||||
|
'username': 'saltdev',
|
||||||
|
'password': 'saltdev',
|
||||||
|
'eauth': 'pam',
|
||||||
|
})
|
||||||
|
'''
|
||||||
|
event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'])
|
||||||
|
job = self.master_call(**low)
|
||||||
|
ret_tag = salt.utils.event.tagify('ret', base=job['tag'])
|
||||||
|
|
||||||
|
if timeout is None:
|
||||||
|
timeout = 300
|
||||||
|
ret = event.get_event(tag=ret_tag, full=True, wait=timeout)
|
||||||
|
if ret is None:
|
||||||
|
raise salt.exceptions.SaltClientTimeout(
|
||||||
|
"RunnerClient job '{0}' timed out".format(job['jid']),
|
||||||
|
jid=job['jid'])
|
||||||
|
|
||||||
|
return ret['data']['return']
|
||||||
|
|
||||||
|
def cmd(self, fun, arg=None, pub_data=None, kwarg=None):
|
||||||
|
'''
|
||||||
|
Execute a function
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
>>> opts = salt.config.master_config('/etc/salt/master')
|
||||||
|
>>> runner = salt.runner.RunnerClient(opts)
|
||||||
|
>>> runner.cmd('jobs.list_jobs', [])
|
||||||
|
{
|
||||||
|
'20131219215650131543': {
|
||||||
|
'Arguments': [300],
|
||||||
|
'Function': 'test.sleep',
|
||||||
|
'StartTime': '2013, Dec 19 21:56:50.131543',
|
||||||
|
'Target': '*',
|
||||||
|
'Target-type': 'glob',
|
||||||
|
'User': 'saltdev'
|
||||||
|
},
|
||||||
|
'20131219215921857715': {
|
||||||
|
'Arguments': [300],
|
||||||
|
'Function': 'test.sleep',
|
||||||
|
'StartTime': '2013, Dec 19 21:59:21.857715',
|
||||||
|
'Target': '*',
|
||||||
|
'Target-type': 'glob',
|
||||||
|
'User': 'saltdev'
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
'''
|
||||||
|
if arg is None:
|
||||||
|
arg = tuple()
|
||||||
|
if not isinstance(arg, list) and not isinstance(arg, tuple):
|
||||||
|
raise salt.exceptions.SaltInvocationError(
|
||||||
|
'arg must be formatted as a list/tuple'
|
||||||
|
)
|
||||||
|
if pub_data is None:
|
||||||
|
pub_data = {}
|
||||||
|
if not isinstance(pub_data, dict):
|
||||||
|
raise salt.exceptions.SaltInvocationError(
|
||||||
|
'pub_data must be formatted as a dictionary'
|
||||||
|
)
|
||||||
|
if kwarg is None:
|
||||||
|
kwarg = {}
|
||||||
|
if not isinstance(kwarg, dict):
|
||||||
|
raise salt.exceptions.SaltInvocationError(
|
||||||
|
'kwarg must be formatted as a dictionary'
|
||||||
|
)
|
||||||
|
arglist = salt.utils.args.parse_input(arg)
|
||||||
|
|
||||||
|
# if you were passed kwarg, add it to arglist
|
||||||
|
if kwarg:
|
||||||
|
kwarg['__kwarg__'] = True
|
||||||
|
arglist.append(kwarg)
|
||||||
|
|
||||||
|
args, kwargs = salt.minion.load_args_and_kwargs(
|
||||||
|
self.functions[fun], arglist, pub_data
|
||||||
|
)
|
||||||
|
low = {'fun': fun,
|
||||||
|
'args': args,
|
||||||
|
'kwargs': kwargs}
|
||||||
|
return self.low(fun, low)
|
||||||
|
|
||||||
def low(self, fun, low):
|
def low(self, fun, low):
|
||||||
'''
|
'''
|
||||||
Execute a function from low data
|
Execute a function from low data
|
||||||
|
Low data includes:
|
||||||
|
required:
|
||||||
|
- fun: the name of the function to run
|
||||||
|
optional:
|
||||||
|
- args: a list of args to pass to fun
|
||||||
|
- kwargs: kwargs for fun
|
||||||
|
- __user__: user who is running the command
|
||||||
|
- __jid__: jid to run under
|
||||||
|
- __tag__: tag to run under
|
||||||
'''
|
'''
|
||||||
self._verify_fun(fun)
|
jid = low.get('__jid__', salt.utils.jid.gen_jid())
|
||||||
l_fun = self.functions[fun]
|
tag = low.get('__tag__', tagify(jid, prefix=self.tag_prefix))
|
||||||
f_call = salt.utils.format_call(l_fun, low)
|
data = {'fun': '{0}.{1}'.format(self.client, fun),
|
||||||
ret = l_fun(*f_call.get('args', ()), **f_call.get('kwargs', {}))
|
'jid': jid,
|
||||||
return ret
|
'user': low.get('__user__', 'UNKNOWN'),
|
||||||
|
}
|
||||||
|
event = salt.utils.event.get_event(
|
||||||
|
'master',
|
||||||
|
self.opts['sock_dir'],
|
||||||
|
self.opts['transport'],
|
||||||
|
opts=self.opts,
|
||||||
|
listen=False)
|
||||||
|
event.fire_event(data, tagify('new', base=tag))
|
||||||
|
|
||||||
|
# TODO: document these, and test that they exist
|
||||||
|
# TODO: Other things to inject??
|
||||||
|
func_globals = {'__jid__': jid,
|
||||||
|
'__user__': data['user'],
|
||||||
|
'__tag__': tag,
|
||||||
|
'__jid_event__': salt.utils.event.NamespacedEvent(event, tag),
|
||||||
|
}
|
||||||
|
|
||||||
|
def over_print(output):
|
||||||
|
'''
|
||||||
|
Print and duplicate the print to an event
|
||||||
|
'''
|
||||||
|
print_event = {'data': output,
|
||||||
|
'outputter': 'pprint'}
|
||||||
|
func_globals['__jid_event__'].fire_event(print_event, 'print')
|
||||||
|
__builtin__.print(output) # and do the old style printout
|
||||||
|
func_globals['print'] = over_print
|
||||||
|
|
||||||
|
# Inject some useful globals to the funciton's global namespace
|
||||||
|
for global_key, value in func_globals.iteritems():
|
||||||
|
self.functions[fun].func_globals[global_key] = value
|
||||||
|
try:
|
||||||
|
self._verify_fun(fun)
|
||||||
|
|
||||||
|
# There are some descrepencies of what a "low" structure is
|
||||||
|
# in the publisher world it is a dict including stuff such as jid,
|
||||||
|
# fun, arg (a list of args, with kwargs packed in). Historically
|
||||||
|
# this particular one has had no "arg" and just has had all the
|
||||||
|
# kwargs packed into the top level object. The plan is to move away
|
||||||
|
# from that since the caller knows what is an arg vs a kwarg, but
|
||||||
|
# while we make the transition we will load "kwargs" using format_call
|
||||||
|
# if there are no kwargs in the low object passed in
|
||||||
|
f_call = None
|
||||||
|
if 'args' not in low:
|
||||||
|
f_call = salt.utils.format_call(self.functions[fun], low)
|
||||||
|
args = f_call.get('args', ())
|
||||||
|
else:
|
||||||
|
args = low['args']
|
||||||
|
if 'kwargs' not in low:
|
||||||
|
if f_call is None:
|
||||||
|
f_call = salt.utils.format_call(self.functions[fun], low)
|
||||||
|
kwargs = f_call.get('kwargs', {})
|
||||||
|
|
||||||
|
# throw a warning for the badly formed low data if we found
|
||||||
|
# kwargs using the old mechanism
|
||||||
|
if kwargs:
|
||||||
|
salt.utils.warn_until(
|
||||||
|
'Boron',
|
||||||
|
'kwargs must be passed inside the low under "kwargs"'
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
kwargs = low['kwargs']
|
||||||
|
|
||||||
|
data['return'] = self.functions[fun](*args, **kwargs)
|
||||||
|
data['success'] = True
|
||||||
|
except Exception as exc:
|
||||||
|
data['return'] = 'Exception occurred in {0} {1}: {2}: {3}'.format(
|
||||||
|
self.client,
|
||||||
|
fun,
|
||||||
|
exc.__class__.__name__,
|
||||||
|
exc,
|
||||||
|
)
|
||||||
|
data['success'] = False
|
||||||
|
|
||||||
|
event.fire_event(data, tagify('ret', base=tag))
|
||||||
|
# if we fired an event, make sure to delete the event object.
|
||||||
|
# This will ensure that we call destroy, which will do the 0MQ linger
|
||||||
|
del event
|
||||||
|
return data['return']
|
||||||
|
|
||||||
def get_docs(self, arg=None):
|
def get_docs(self, arg=None):
|
||||||
'''
|
'''
|
||||||
@ -72,46 +272,103 @@ class AsyncClientMixin(object):
|
|||||||
multiprocess and fire the return data on the event bus
|
multiprocess and fire the return data on the event bus
|
||||||
'''
|
'''
|
||||||
salt.utils.daemonize()
|
salt.utils.daemonize()
|
||||||
data = {'fun': '{0}.{1}'.format(self.client, fun),
|
|
||||||
'jid': jid,
|
|
||||||
'user': user,
|
|
||||||
}
|
|
||||||
event = salt.utils.event.get_event(
|
|
||||||
'master',
|
|
||||||
self.opts['sock_dir'],
|
|
||||||
self.opts['transport'],
|
|
||||||
opts=self.opts,
|
|
||||||
listen=False)
|
|
||||||
event.fire_event(data, tagify('new', base=tag))
|
|
||||||
|
|
||||||
try:
|
# pack a few things into low
|
||||||
data['return'] = self.low(fun, low)
|
low['__jid__'] = jid
|
||||||
data['success'] = True
|
low['__user__'] = user
|
||||||
except Exception as exc:
|
low['__tag__'] = tag
|
||||||
data['return'] = 'Exception occurred in {0} {1}: {2}: {3}'.format(
|
|
||||||
self.client,
|
|
||||||
fun,
|
|
||||||
exc.__class__.__name__,
|
|
||||||
exc,
|
|
||||||
)
|
|
||||||
data['success'] = False
|
|
||||||
data['user'] = user
|
|
||||||
|
|
||||||
event.fire_event(data, tagify('ret', base=tag))
|
self.low(fun, low)
|
||||||
# if we fired an event, make sure to delete the event object.
|
|
||||||
# This will ensure that we call destroy, which will do the 0MQ linger
|
def cmd_async(self, low):
|
||||||
del event
|
'''
|
||||||
|
Execute a function asynchronously; eauth is respected
|
||||||
|
|
||||||
|
This function requires that :conf_master:`external_auth` is configured
|
||||||
|
and the user is authorized
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
>>> wheel.cmd_async({
|
||||||
|
'fun': 'key.finger',
|
||||||
|
'match': 'jerry',
|
||||||
|
'eauth': 'auto',
|
||||||
|
'username': 'saltdev',
|
||||||
|
'password': 'saltdev',
|
||||||
|
})
|
||||||
|
{'jid': '20131219224744416681', 'tag': 'salt/wheel/20131219224744416681'}
|
||||||
|
'''
|
||||||
|
return self.master_call(**low)
|
||||||
|
|
||||||
def async(self, fun, low, user='UNKNOWN'):
|
def async(self, fun, low, user='UNKNOWN'):
|
||||||
'''
|
'''
|
||||||
Execute the function in a multiprocess and return the event tag to use
|
Execute the function in a multiprocess and return the event tag to use
|
||||||
to watch for the return
|
to watch for the return
|
||||||
'''
|
'''
|
||||||
jid = '{0:%Y%m%d%H%M%S%f}'.format(datetime.datetime.now())
|
jid = salt.utils.jid.gen_jid()
|
||||||
tag = tagify(jid, prefix=self.tag_prefix)
|
tag = tagify(jid, prefix=self.tag_prefix)
|
||||||
|
|
||||||
proc = multiprocessing.Process(
|
proc = multiprocessing.Process(
|
||||||
target=self._proc_function,
|
target=self._proc_function,
|
||||||
args=(fun, low, user, tag, jid))
|
args=(fun, low, user, tag, jid))
|
||||||
proc.start()
|
proc.start()
|
||||||
|
proc.join() # MUST join, otherwise we leave zombies all over
|
||||||
return {'tag': tag, 'jid': jid}
|
return {'tag': tag, 'jid': jid}
|
||||||
|
|
||||||
|
def print_async_event(self, suffix, event):
|
||||||
|
'''
|
||||||
|
Print all of the events with the prefix 'tag'
|
||||||
|
'''
|
||||||
|
# some suffixes we don't want to print
|
||||||
|
if suffix in ('new', ):
|
||||||
|
return
|
||||||
|
|
||||||
|
# TODO: clean up this event print out. We probably want something
|
||||||
|
# more general, since this will get *really* messy as
|
||||||
|
# people use more events that don't quite fit into this mold
|
||||||
|
if suffix == 'ret': # for "ret" just print out return
|
||||||
|
salt.output.display_output(event['return'], '', self.opts)
|
||||||
|
elif isinstance(event, dict) and 'outputter' in event and event['outputter'] is not None:
|
||||||
|
print(self.outputters[event['outputter']](event['data']))
|
||||||
|
# otherwise fall back on basic printing
|
||||||
|
else:
|
||||||
|
event.pop('_stamp') # remove the timestamp before printing
|
||||||
|
print('{tag}: {event}'.format(tag=suffix,
|
||||||
|
event=event))
|
||||||
|
|
||||||
|
def get_async_returns(self, tag, timeout=None, event=None):
|
||||||
|
'''
|
||||||
|
Yield all events from a given tag until "ret" is recieved or timeout is
|
||||||
|
reached.
|
||||||
|
'''
|
||||||
|
if timeout is None:
|
||||||
|
timeout = self.opts['timeout'] * 2
|
||||||
|
|
||||||
|
if event is None:
|
||||||
|
event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'])
|
||||||
|
timeout_at = time.time() + timeout
|
||||||
|
last_progress_timestamp = time.time()
|
||||||
|
basetag_depth = tag.count('/') + 1
|
||||||
|
|
||||||
|
# no need to have a sleep, get_event has one inside
|
||||||
|
while True:
|
||||||
|
raw = event.get_event(timeout, tag=tag, full=True)
|
||||||
|
# If we saw no events in the event bus timeout
|
||||||
|
# OR
|
||||||
|
# we have reached the total timeout
|
||||||
|
# AND
|
||||||
|
# have not seen any progress events for the length of the timeout.
|
||||||
|
now = time.time()
|
||||||
|
if raw is None and (now > timeout_at and
|
||||||
|
now - last_progress_timestamp > timeout):
|
||||||
|
# Timeout reached
|
||||||
|
break
|
||||||
|
try:
|
||||||
|
tag_parts = raw['tag'].split('/')
|
||||||
|
suffix = '/'.join(tag_parts[basetag_depth:])
|
||||||
|
last_progress_timestamp = now
|
||||||
|
yield suffix, raw['data']
|
||||||
|
if tag_parts[3] == 'ret':
|
||||||
|
raise StopIteration() # we are done, we got return
|
||||||
|
except (IndexError, KeyError):
|
||||||
|
continue
|
||||||
|
@ -18,6 +18,8 @@ import re
|
|||||||
import time
|
import time
|
||||||
import yaml
|
import yaml
|
||||||
import uuid
|
import uuid
|
||||||
|
import tempfile
|
||||||
|
import binascii
|
||||||
from salt.ext.six.moves import input
|
from salt.ext.six.moves import input
|
||||||
|
|
||||||
# Import salt libs
|
# Import salt libs
|
||||||
@ -312,11 +314,11 @@ class SSH(object):
|
|||||||
if stderr:
|
if stderr:
|
||||||
return {host: stderr}
|
return {host: stderr}
|
||||||
return {host: 'Bad Return'}
|
return {host: 'Bad Return'}
|
||||||
if salt.exitcodes.EX_OK != retcode:
|
if salt.defaults.exitcodes.EX_OK != retcode:
|
||||||
return {host: stderr}
|
return {host: stderr}
|
||||||
return {host: stdout}
|
return {host: stdout}
|
||||||
|
|
||||||
def handle_routine(self, que, opts, host, target):
|
def handle_routine(self, que, opts, host, target, mine=False):
|
||||||
'''
|
'''
|
||||||
Run the routine in a "Thread", put a dict on the queue
|
Run the routine in a "Thread", put a dict on the queue
|
||||||
'''
|
'''
|
||||||
@ -328,6 +330,7 @@ class SSH(object):
|
|||||||
mods=self.mods,
|
mods=self.mods,
|
||||||
fsclient=self.fsclient,
|
fsclient=self.fsclient,
|
||||||
thin=self.thin,
|
thin=self.thin,
|
||||||
|
mine=mine,
|
||||||
**target)
|
**target)
|
||||||
ret = {'id': single.id}
|
ret = {'id': single.id}
|
||||||
stdout, stderr, retcode = single.run()
|
stdout, stderr, retcode = single.run()
|
||||||
@ -350,7 +353,7 @@ class SSH(object):
|
|||||||
}
|
}
|
||||||
que.put(ret)
|
que.put(ret)
|
||||||
|
|
||||||
def handle_ssh(self):
|
def handle_ssh(self, mine=False):
|
||||||
'''
|
'''
|
||||||
Spin up the needed threads or processes and execute the subsequent
|
Spin up the needed threads or processes and execute the subsequent
|
||||||
routines
|
routines
|
||||||
@ -378,6 +381,7 @@ class SSH(object):
|
|||||||
self.opts,
|
self.opts,
|
||||||
host,
|
host,
|
||||||
self.targets[host],
|
self.targets[host],
|
||||||
|
mine,
|
||||||
)
|
)
|
||||||
routine = multiprocessing.Process(
|
routine = multiprocessing.Process(
|
||||||
target=self.handle_routine,
|
target=self.handle_routine,
|
||||||
@ -407,9 +411,14 @@ class SSH(object):
|
|||||||
if len(rets) >= len(self.targets):
|
if len(rets) >= len(self.targets):
|
||||||
break
|
break
|
||||||
|
|
||||||
def run_iter(self):
|
def run_iter(self, mine=False):
|
||||||
'''
|
'''
|
||||||
Execute and yield returns as they come in, do not print to the display
|
Execute and yield returns as they come in, do not print to the display
|
||||||
|
|
||||||
|
mine
|
||||||
|
The Single objects will use mine_functions defined in the roster,
|
||||||
|
pillar, or master config (they will be checked in that order) and
|
||||||
|
will modify the argv with the arguments from mine_functions
|
||||||
'''
|
'''
|
||||||
fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
|
fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
|
||||||
jid = self.returners[fstr]()
|
jid = self.returners[fstr]()
|
||||||
@ -436,7 +445,7 @@ class SSH(object):
|
|||||||
# save load to the master job cache
|
# save load to the master job cache
|
||||||
self.returners['{0}.save_load'.format(self.opts['master_job_cache'])](jid, job_load)
|
self.returners['{0}.save_load'.format(self.opts['master_job_cache'])](jid, job_load)
|
||||||
|
|
||||||
for ret in self.handle_ssh():
|
for ret in self.handle_ssh(mine=mine):
|
||||||
host = next(ret.iterkeys())
|
host = next(ret.iterkeys())
|
||||||
self.cache_job(jid, host, ret[host])
|
self.cache_job(jid, host, ret[host])
|
||||||
if self.event:
|
if self.event:
|
||||||
@ -546,7 +555,12 @@ class Single(object):
|
|||||||
mods=None,
|
mods=None,
|
||||||
fsclient=None,
|
fsclient=None,
|
||||||
thin=None,
|
thin=None,
|
||||||
|
mine=False,
|
||||||
**kwargs):
|
**kwargs):
|
||||||
|
# Get mine setting and mine_functions if defined in kwargs (from roster)
|
||||||
|
self.mine = mine
|
||||||
|
self.mine_functions = kwargs.get('mine_functions')
|
||||||
|
|
||||||
self.opts = opts
|
self.opts = opts
|
||||||
self.tty = tty
|
self.tty = tty
|
||||||
if kwargs.get('wipe'):
|
if kwargs.get('wipe'):
|
||||||
@ -675,7 +689,7 @@ class Single(object):
|
|||||||
cmd_str = ' '.join([self._escape_arg(arg) for arg in self.argv])
|
cmd_str = ' '.join([self._escape_arg(arg) for arg in self.argv])
|
||||||
stdout, stderr, retcode = self.shell.exec_cmd(cmd_str)
|
stdout, stderr, retcode = self.shell.exec_cmd(cmd_str)
|
||||||
|
|
||||||
elif self.fun in self.wfuncs:
|
elif self.fun in self.wfuncs or self.mine:
|
||||||
stdout = self.run_wfunc()
|
stdout = self.run_wfunc()
|
||||||
|
|
||||||
else:
|
else:
|
||||||
@ -780,8 +794,31 @@ class Single(object):
|
|||||||
**self.target)
|
**self.target)
|
||||||
self.wfuncs = salt.loader.ssh_wrapper(opts, wrapper, self.context)
|
self.wfuncs = salt.loader.ssh_wrapper(opts, wrapper, self.context)
|
||||||
wrapper.wfuncs = self.wfuncs
|
wrapper.wfuncs = self.wfuncs
|
||||||
|
|
||||||
|
# We're running in the mind, need to fetch the arguments from the
|
||||||
|
# roster, pillar, master config (in that order)
|
||||||
|
if self.mine:
|
||||||
|
mine_args = None
|
||||||
|
if self.mine_functions and self.fun in self.mine_functions:
|
||||||
|
mine_args = self.mine_functions[self.fun]
|
||||||
|
elif opts['pillar'] and self.fun in opts['pillar'].get('mine_functions', {}):
|
||||||
|
mine_args = opts['pillar']['mine_functions'][self.fun]
|
||||||
|
elif self.fun in self.context['master_opts'].get('mine_functions', {}):
|
||||||
|
mine_args = self.context['master_opts']['mine_functions'][self.fun]
|
||||||
|
|
||||||
|
# If we found mine_args, replace our command's args
|
||||||
|
if isinstance(mine_args, dict):
|
||||||
|
self.args = []
|
||||||
|
self.kwargs = mine_args
|
||||||
|
elif isinstance(mine_args, list):
|
||||||
|
self.args = mine_args
|
||||||
|
self.kwargs = {}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
result = self.wfuncs[self.fun](*self.args, **self.kwargs)
|
if self.mine:
|
||||||
|
result = wrapper[self.fun](*self.args, **self.kwargs)
|
||||||
|
else:
|
||||||
|
result = self.wfuncs[self.fun](*self.args, **self.kwargs)
|
||||||
except TypeError as exc:
|
except TypeError as exc:
|
||||||
result = 'TypeError encountered executing {0}: {1}'.format(self.fun, exc)
|
result = 'TypeError encountered executing {0}: {1}'.format(self.fun, exc)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
@ -858,6 +895,44 @@ ARGS = {9}\n'''.format(self.minion_config,
|
|||||||
for stdout, stderr, retcode in self.shell.exec_nb_cmd(cmd_str):
|
for stdout, stderr, retcode in self.shell.exec_nb_cmd(cmd_str):
|
||||||
yield stdout, stderr, retcode
|
yield stdout, stderr, retcode
|
||||||
|
|
||||||
|
def shim_cmd(self, cmd_str):
|
||||||
|
'''
|
||||||
|
Run a shim command.
|
||||||
|
|
||||||
|
If tty is enabled, we must scp the shim to the target system and
|
||||||
|
execute it there
|
||||||
|
'''
|
||||||
|
if not self.tty:
|
||||||
|
return self.shell.exec_cmd(cmd_str)
|
||||||
|
|
||||||
|
# Write the shim to a file
|
||||||
|
shim_dir = os.path.join(self.opts['cachedir'], 'ssh_shim')
|
||||||
|
if not os.path.exists(shim_dir):
|
||||||
|
os.makedirs(shim_dir)
|
||||||
|
with tempfile.NamedTemporaryFile(mode='w',
|
||||||
|
prefix='shim_',
|
||||||
|
dir=shim_dir,
|
||||||
|
delete=False) as shim_tmp_file:
|
||||||
|
shim_tmp_file.write(cmd_str)
|
||||||
|
|
||||||
|
# Copy shim to target system, under $HOME/.<randomized name>
|
||||||
|
target_shim_file = '.{0}'.format(binascii.hexlify(os.urandom(6)))
|
||||||
|
self.shell.send(shim_tmp_file.name, target_shim_file)
|
||||||
|
|
||||||
|
# Remove our shim file
|
||||||
|
try:
|
||||||
|
os.remove(shim_tmp_file.name)
|
||||||
|
except IOError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Execute shim
|
||||||
|
ret = self.shell.exec_cmd('/bin/sh $HOME/{0}'.format(target_shim_file))
|
||||||
|
|
||||||
|
# Remove shim from target system
|
||||||
|
self.shell.exec_cmd('rm $HOME/{0}'.format(target_shim_file))
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
def cmd_block(self, is_retry=False):
|
def cmd_block(self, is_retry=False):
|
||||||
'''
|
'''
|
||||||
Prepare the pre-check command to send to the subsystem
|
Prepare the pre-check command to send to the subsystem
|
||||||
@ -871,7 +946,7 @@ ARGS = {9}\n'''.format(self.minion_config,
|
|||||||
|
|
||||||
log.debug('Performing shimmed, blocking command as follows:\n{0}'.format(' '.join(self.argv)))
|
log.debug('Performing shimmed, blocking command as follows:\n{0}'.format(' '.join(self.argv)))
|
||||||
cmd_str = self._cmd_str()
|
cmd_str = self._cmd_str()
|
||||||
stdout, stderr, retcode = self.shell.exec_cmd(cmd_str)
|
stdout, stderr, retcode = self.shim_cmd(cmd_str)
|
||||||
|
|
||||||
log.debug('STDOUT {1}\n{0}'.format(stdout, self.target['host']))
|
log.debug('STDOUT {1}\n{0}'.format(stdout, self.target['host']))
|
||||||
log.debug('STDERR {1}\n{0}'.format(stderr, self.target['host']))
|
log.debug('STDERR {1}\n{0}'.format(stderr, self.target['host']))
|
||||||
@ -881,7 +956,7 @@ ARGS = {9}\n'''.format(self.minion_config,
|
|||||||
if error:
|
if error:
|
||||||
if error == 'Undefined SHIM state':
|
if error == 'Undefined SHIM state':
|
||||||
self.deploy()
|
self.deploy()
|
||||||
stdout, stderr, retcode = self.shell.exec_cmd(cmd_str)
|
stdout, stderr, retcode = self.shim_cmd(cmd_str)
|
||||||
if not re.search(RSTR_RE, stdout) or not re.search(RSTR_RE, stderr):
|
if not re.search(RSTR_RE, stdout) or not re.search(RSTR_RE, stderr):
|
||||||
# If RSTR is not seen in both stdout and stderr then there
|
# If RSTR is not seen in both stdout and stderr then there
|
||||||
# was a thin deployment problem.
|
# was a thin deployment problem.
|
||||||
@ -910,7 +985,7 @@ ARGS = {9}\n'''.format(self.minion_config,
|
|||||||
shim_command = re.split(r'\r?\n', stdout, 1)[0].strip()
|
shim_command = re.split(r'\r?\n', stdout, 1)[0].strip()
|
||||||
if 'deploy' == shim_command and retcode == salt.defaults.exitcodes.EX_THIN_DEPLOY:
|
if 'deploy' == shim_command and retcode == salt.defaults.exitcodes.EX_THIN_DEPLOY:
|
||||||
self.deploy()
|
self.deploy()
|
||||||
stdout, stderr, retcode = self.shell.exec_cmd(cmd_str)
|
stdout, stderr, retcode = self.shim_cmd(cmd_str)
|
||||||
if not re.search(RSTR_RE, stdout) or not re.search(RSTR_RE, stderr):
|
if not re.search(RSTR_RE, stdout) or not re.search(RSTR_RE, stderr):
|
||||||
if not self.tty:
|
if not self.tty:
|
||||||
# If RSTR is not seen in both stdout and stderr then there
|
# If RSTR is not seen in both stdout and stderr then there
|
||||||
@ -927,7 +1002,7 @@ ARGS = {9}\n'''.format(self.minion_config,
|
|||||||
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
|
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
|
||||||
elif 'ext_mods' == shim_command:
|
elif 'ext_mods' == shim_command:
|
||||||
self.deploy_ext()
|
self.deploy_ext()
|
||||||
stdout, stderr, retcode = self.shell.exec_cmd(cmd_str)
|
stdout, stderr, retcode = self.shim_cmd(cmd_str)
|
||||||
if not re.search(RSTR_RE, stdout) or not re.search(RSTR_RE, stderr):
|
if not re.search(RSTR_RE, stdout) or not re.search(RSTR_RE, stderr):
|
||||||
# If RSTR is not seen in both stdout and stderr then there
|
# If RSTR is not seen in both stdout and stderr then there
|
||||||
# was a thin deployment problem.
|
# was a thin deployment problem.
|
||||||
|
60
salt/client/ssh/wrapper/mine.py
Normal file
60
salt/client/ssh/wrapper/mine.py
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
'''
|
||||||
|
Wrapper function for mine operations for salt-ssh
|
||||||
|
|
||||||
|
.. versionadded:: Lithium
|
||||||
|
'''
|
||||||
|
|
||||||
|
# Import python libs
|
||||||
|
import copy
|
||||||
|
|
||||||
|
# Import salt libs
|
||||||
|
import salt.client.ssh
|
||||||
|
|
||||||
|
|
||||||
|
def get(tgt, fun, expr_form='glob', roster='flat'):
|
||||||
|
'''
|
||||||
|
Get data from the mine based on the target, function and expr_form
|
||||||
|
|
||||||
|
This will actually run the function on all targeted minions (like
|
||||||
|
publish.publish), as salt-ssh clients can't update the mine themselves.
|
||||||
|
|
||||||
|
We will look for mine_functions in the roster, pillar, and master config,
|
||||||
|
in that order, looking for a match for the defined function
|
||||||
|
|
||||||
|
Targets can be matched based on any standard matching system that can be
|
||||||
|
matched on the defined roster (in salt-ssh) via these keywords::
|
||||||
|
|
||||||
|
CLI Example:
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
salt-ssh '*' mine.get '*' network.interfaces
|
||||||
|
salt-ssh '*' mine.get 'myminion' network.interfaces roster=flat
|
||||||
|
salt-ssh '*' mine.get '192.168.5.0' network.ipaddrs roster=scan
|
||||||
|
'''
|
||||||
|
# Set up opts for the SSH object
|
||||||
|
opts = copy.deepcopy(__opts__)
|
||||||
|
if roster:
|
||||||
|
opts['roster'] = roster
|
||||||
|
opts['argv'] = [fun]
|
||||||
|
opts['selected_target_option'] = expr_form
|
||||||
|
opts['tgt'] = tgt
|
||||||
|
opts['arg'] = []
|
||||||
|
|
||||||
|
# Create the SSH object to handle the actual call
|
||||||
|
ssh = salt.client.ssh.SSH(opts)
|
||||||
|
|
||||||
|
# Run salt-ssh to get the minion returns
|
||||||
|
rets = {}
|
||||||
|
for ret in ssh.run_iter(mine=True):
|
||||||
|
rets.update(ret)
|
||||||
|
|
||||||
|
cret = {}
|
||||||
|
for host in rets:
|
||||||
|
if 'return' in rets[host]:
|
||||||
|
cret[host] = rets[host]['return']
|
||||||
|
else:
|
||||||
|
cret[host] = rets[host]
|
||||||
|
return cret
|
@ -702,7 +702,7 @@ class Cloud(object):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
for vm_name, details in vms.items():
|
for vm_name, details in vms.items():
|
||||||
# XXX: The logic bellow can be removed once the aws driver
|
# XXX: The logic below can be removed once the aws driver
|
||||||
# is removed
|
# is removed
|
||||||
if vm_name not in names:
|
if vm_name not in names:
|
||||||
continue
|
continue
|
||||||
|
@ -4,9 +4,9 @@ Primary interfaces for the salt-cloud system
|
|||||||
'''
|
'''
|
||||||
# Need to get data from 4 sources!
|
# Need to get data from 4 sources!
|
||||||
# CLI options
|
# CLI options
|
||||||
# salt cloud config - /etc/salt/cloud
|
# salt cloud config - CONFIG_DIR + '/cloud'
|
||||||
# salt master config (for master integration)
|
# salt master config (for master integration)
|
||||||
# salt VM config, where VMs are defined - /etc/salt/cloud.profiles
|
# salt VM config, where VMs are defined - CONFIG_DIR + '/cloud.profiles'
|
||||||
#
|
#
|
||||||
# The cli, master and cloud configs will merge for opts
|
# The cli, master and cloud configs will merge for opts
|
||||||
# the VM data will be in opts['profiles']
|
# the VM data will be in opts['profiles']
|
||||||
@ -31,7 +31,7 @@ from salt.utils.verify import check_user, verify_env, verify_files
|
|||||||
import salt.cloud
|
import salt.cloud
|
||||||
from salt.exceptions import SaltCloudException, SaltCloudSystemExit
|
from salt.exceptions import SaltCloudException, SaltCloudSystemExit
|
||||||
import salt.ext.six as six
|
import salt.ext.six as six
|
||||||
|
import salt.syspaths as syspaths
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@ -50,9 +50,10 @@ class SaltCloud(parsers.SaltCloudParser):
|
|||||||
'If salt-cloud is running on a master machine, salt-cloud '
|
'If salt-cloud is running on a master machine, salt-cloud '
|
||||||
'needs to run as the same user as the salt-master, {0!r}. If '
|
'needs to run as the same user as the salt-master, {0!r}. If '
|
||||||
'salt-cloud is not running on a salt-master, the appropriate '
|
'salt-cloud is not running on a salt-master, the appropriate '
|
||||||
'write permissions must be granted to /etc/salt/. Please run '
|
'write permissions must be granted to {1!r}. Please run '
|
||||||
'salt-cloud as root, {0!r}, or change permissions for '
|
'salt-cloud as root, {0!r}, or change permissions for '
|
||||||
'/etc/salt/.'.format(salt_master_user)
|
'{1!r}.'.format(salt_master_user,
|
||||||
|
syspaths.CONFIG_DIR)
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -146,17 +146,22 @@ def list_nodes(call=None):
|
|||||||
'The list_nodes function must be called with -f or --function.'
|
'The list_nodes function must be called with -f or --function.'
|
||||||
)
|
)
|
||||||
|
|
||||||
items = query(method='droplets')
|
fetch = True
|
||||||
|
page = 1
|
||||||
ret = {}
|
ret = {}
|
||||||
for node in items['droplets']:
|
|
||||||
ret[node['name']] = {
|
while fetch:
|
||||||
'id': node['id'],
|
items = query(method='droplets', command='?page=' + str(page))
|
||||||
'image': node['image']['name'],
|
for node in items['droplets']:
|
||||||
'networks': str(node['networks']),
|
ret[node['name']] = {
|
||||||
'size': node['size_slug'],
|
'id': node['id'],
|
||||||
'state': str(node['status']),
|
'image': node['image']['name'],
|
||||||
}
|
'networks': str(node['networks']),
|
||||||
|
'size': node['size_slug'],
|
||||||
|
'state': str(node['status']),
|
||||||
|
}
|
||||||
|
page += 1
|
||||||
|
fetch = 'next' in items['links']['pages']
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
|
||||||
@ -169,16 +174,21 @@ def list_nodes_full(call=None, forOutput=True):
|
|||||||
'The list_nodes_full function must be called with -f or --function.'
|
'The list_nodes_full function must be called with -f or --function.'
|
||||||
)
|
)
|
||||||
|
|
||||||
items = query(method='droplets')
|
fetch = True
|
||||||
|
page = 1
|
||||||
ret = {}
|
ret = {}
|
||||||
for node in items['droplets']:
|
|
||||||
ret[node['name']] = {}
|
while fetch:
|
||||||
for item in node.keys():
|
items = query(method='droplets', command='?page=' + str(page))
|
||||||
value = node[item]
|
for node in items['droplets']:
|
||||||
if value is not None and forOutput:
|
ret[node['name']] = {}
|
||||||
value = str(value)
|
for item in node.keys():
|
||||||
ret[node['name']][item] = value
|
value = node[item]
|
||||||
|
if value is not None and forOutput:
|
||||||
|
value = str(value)
|
||||||
|
ret[node['name']][item] = value
|
||||||
|
page += 1
|
||||||
|
fetch = 'next' in items['links']['pages']
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
|
||||||
|
@ -314,8 +314,11 @@ def query(params=None, setname=None, requesturl=None, location=None,
|
|||||||
)
|
)
|
||||||
|
|
||||||
requesturl = 'https://{0}/'.format(endpoint)
|
requesturl = 'https://{0}/'.format(endpoint)
|
||||||
|
endpoint = _urlparse(requesturl).netloc
|
||||||
|
endpoint_path = _urlparse(requesturl).path
|
||||||
else:
|
else:
|
||||||
endpoint = _urlparse(requesturl).netloc
|
endpoint = _urlparse(requesturl).netloc
|
||||||
|
endpoint_path = _urlparse(requesturl).path
|
||||||
if endpoint == '':
|
if endpoint == '':
|
||||||
endpoint_err = (
|
endpoint_err = (
|
||||||
'Could not find a valid endpoint in the '
|
'Could not find a valid endpoint in the '
|
||||||
@ -347,9 +350,10 @@ def query(params=None, setname=None, requesturl=None, location=None,
|
|||||||
# %20, however urlencode uses '+'. So replace pluses with %20.
|
# %20, however urlencode uses '+'. So replace pluses with %20.
|
||||||
querystring = querystring.replace('+', '%20')
|
querystring = querystring.replace('+', '%20')
|
||||||
|
|
||||||
uri = '{0}\n{1}\n/\n{2}'.format(method.encode('utf-8'),
|
uri = '{0}\n{1}\n{2}\n{3}'.format(method.encode('utf-8'),
|
||||||
endpoint.encode('utf-8'),
|
endpoint.encode('utf-8'),
|
||||||
querystring.encode('utf-8'))
|
endpoint_path.encode('utf-8'),
|
||||||
|
querystring.encode('utf-8'))
|
||||||
|
|
||||||
hashed = hmac.new(provider['key'], uri, hashlib.sha256)
|
hashed = hmac.new(provider['key'], uri, hashlib.sha256)
|
||||||
sig = binascii.b2a_base64(hashed.digest())
|
sig = binascii.b2a_base64(hashed.digest())
|
||||||
@ -1325,7 +1329,7 @@ def _param_from_config(key, data):
|
|||||||
|
|
||||||
else:
|
else:
|
||||||
if isinstance(data, bool):
|
if isinstance(data, bool):
|
||||||
# convert boolean Trur/False to 'true'/'false'
|
# convert boolean True/False to 'true'/'false'
|
||||||
param.update({key: str(data).lower()})
|
param.update({key: str(data).lower()})
|
||||||
else:
|
else:
|
||||||
param.update({key: data})
|
param.update({key: data})
|
||||||
@ -1883,7 +1887,7 @@ def wait_for_instance(
|
|||||||
data = {}
|
data = {}
|
||||||
|
|
||||||
ssh_gateway_config = vm_.get(
|
ssh_gateway_config = vm_.get(
|
||||||
'ssh_gateway_config', get_ssh_gateway_config(vm_)
|
'gateway', get_ssh_gateway_config(vm_)
|
||||||
)
|
)
|
||||||
|
|
||||||
salt.utils.cloud.fire_event(
|
salt.utils.cloud.fire_event(
|
||||||
@ -2055,8 +2059,7 @@ def create(vm_=None, call=None):
|
|||||||
# Get SSH Gateway config early to verify the private_key,
|
# Get SSH Gateway config early to verify the private_key,
|
||||||
# if used, exists or not. We don't want to deploy an instance
|
# if used, exists or not. We don't want to deploy an instance
|
||||||
# and not be able to access it via the gateway.
|
# and not be able to access it via the gateway.
|
||||||
ssh_gateway_config = get_ssh_gateway_config(vm_)
|
vm_['gateway'] = get_ssh_gateway_config(vm_)
|
||||||
vm_['ssh_gateway_config'] = ssh_gateway_config
|
|
||||||
|
|
||||||
location = get_location(vm_)
|
location = get_location(vm_)
|
||||||
vm_['location'] = location
|
vm_['location'] = location
|
||||||
@ -2309,9 +2312,8 @@ def create_attach_volumes(name, kwargs, call=None, wait_to_finish=True):
|
|||||||
if 'volume_id' not in volume_dict:
|
if 'volume_id' not in volume_dict:
|
||||||
created_volume = create_volume(volume_dict, call='function', wait_to_finish=wait_to_finish)
|
created_volume = create_volume(volume_dict, call='function', wait_to_finish=wait_to_finish)
|
||||||
created = True
|
created = True
|
||||||
for item in created_volume:
|
if 'volumeId' in created_volume:
|
||||||
if 'volumeId' in item:
|
volume_dict['volume_id'] = created_volume['volumeId']
|
||||||
volume_dict['volume_id'] = item['volumeId']
|
|
||||||
|
|
||||||
attach = attach_volume(
|
attach = attach_volume(
|
||||||
name,
|
name,
|
||||||
@ -2876,7 +2878,7 @@ def _vm_provider_driver(vm_):
|
|||||||
|
|
||||||
|
|
||||||
def _extract_name_tag(item):
|
def _extract_name_tag(item):
|
||||||
if 'tagSet' in item:
|
if 'tagSet' in item and item['tagSet'] is not None:
|
||||||
tagset = item['tagSet']
|
tagset = item['tagSet']
|
||||||
if isinstance(tagset['item'], list):
|
if isinstance(tagset['item'], list):
|
||||||
for tag in tagset['item']:
|
for tag in tagset['item']:
|
||||||
|
@ -31,6 +31,19 @@ associated with that vm. An example profile might look like:
|
|||||||
image: centos-6
|
image: centos-6
|
||||||
location: us-east-1
|
location: us-east-1
|
||||||
|
|
||||||
|
This driver can also be used with the Joyent SmartDataCenter project. More
|
||||||
|
details can be found at:
|
||||||
|
|
||||||
|
.. _`SmartDataCenter`: https://github.com/joyent/sdc
|
||||||
|
|
||||||
|
This requires that an api_host_suffix is set. The default value for this is
|
||||||
|
`.api.joyentcloud.com`. All characters, including the leading `.`, should be
|
||||||
|
included:
|
||||||
|
|
||||||
|
.. code-block:: yaml
|
||||||
|
|
||||||
|
api_host_suffix: .api.myhostname.com
|
||||||
|
|
||||||
:depends: requests
|
:depends: requests
|
||||||
'''
|
'''
|
||||||
from __future__ import absolute_import
|
from __future__ import absolute_import
|
||||||
@ -996,13 +1009,13 @@ def delete_key(kwargs=None, call=None):
|
|||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
def get_location_path(location=DEFAULT_LOCATION):
|
def get_location_path(location=DEFAULT_LOCATION, api_host_suffix=JOYENT_API_HOST_SUFFIX):
|
||||||
'''
|
'''
|
||||||
create url from location variable
|
create url from location variable
|
||||||
:param location: joyent data center location
|
:param location: joyent data center location
|
||||||
:return: url
|
:return: url
|
||||||
'''
|
'''
|
||||||
return 'https://{0}{1}'.format(location, JOYENT_API_HOST_SUFFIX)
|
return 'https://{0}{1}'.format(location, api_host_suffix)
|
||||||
|
|
||||||
|
|
||||||
def query(action=None, command=None, args=None, method='GET', location=None,
|
def query(action=None, command=None, args=None, method='GET', location=None,
|
||||||
@ -1022,7 +1035,12 @@ def query(action=None, command=None, args=None, method='GET', location=None,
|
|||||||
if not location:
|
if not location:
|
||||||
location = get_location()
|
location = get_location()
|
||||||
|
|
||||||
path = get_location_path(location=location)
|
api_host_suffix = config.get_cloud_config_value(
|
||||||
|
'api_host_suffix', get_configured_provider(), __opts__,
|
||||||
|
search_global=False, default=JOYENT_API_HOST_SUFFIX
|
||||||
|
)
|
||||||
|
|
||||||
|
path = get_location_path(location=location, api_host_suffix=api_host_suffix)
|
||||||
|
|
||||||
if action:
|
if action:
|
||||||
path += action
|
path += action
|
||||||
|
@ -26,22 +26,26 @@ http://www.windowsazure.com/en-us/develop/python/how-to-guides/service-managemen
|
|||||||
|
|
||||||
from __future__ import absolute_import
|
from __future__ import absolute_import
|
||||||
|
|
||||||
# Import python libs
|
|
||||||
import time
|
|
||||||
import copy
|
import copy
|
||||||
import pprint
|
|
||||||
import logging
|
import logging
|
||||||
|
import pprint
|
||||||
|
import time
|
||||||
|
|
||||||
# Import salt cloud libs
|
|
||||||
import salt.config as config
|
import salt.config as config
|
||||||
import salt.utils.cloud
|
|
||||||
from salt.exceptions import SaltCloudSystemExit
|
from salt.exceptions import SaltCloudSystemExit
|
||||||
|
import salt.utils.cloud
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
|
||||||
|
# Import python libs
|
||||||
|
# Import salt cloud libs
|
||||||
# Import azure libs
|
# Import azure libs
|
||||||
HAS_LIBS = False
|
HAS_LIBS = False
|
||||||
try:
|
try:
|
||||||
import azure
|
import azure
|
||||||
import azure.servicemanagement
|
import azure.servicemanagement
|
||||||
|
from azure import (WindowsAzureConflictError,
|
||||||
|
WindowsAzureMissingResourceError)
|
||||||
HAS_LIBS = True
|
HAS_LIBS = True
|
||||||
except ImportError:
|
except ImportError:
|
||||||
pass
|
pass
|
||||||
@ -428,17 +432,21 @@ def create(vm_):
|
|||||||
conn = get_conn()
|
conn = get_conn()
|
||||||
|
|
||||||
label = vm_.get('label', vm_['name'])
|
label = vm_.get('label', vm_['name'])
|
||||||
|
service_name = vm_.get('service_name', vm_['name'])
|
||||||
service_kwargs = {
|
service_kwargs = {
|
||||||
'service_name': vm_['name'],
|
'service_name': service_name,
|
||||||
'label': label,
|
'label': label,
|
||||||
'description': vm_.get('desc', vm_['name']),
|
'description': vm_.get('desc', vm_['name']),
|
||||||
'location': vm_['location'],
|
'location': vm_['location'],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ssh_port = config.get_cloud_config_value('port', vm_, __opts__,
|
||||||
|
default='22', search_global=True)
|
||||||
|
|
||||||
ssh_endpoint = azure.servicemanagement.ConfigurationSetInputEndpoint(
|
ssh_endpoint = azure.servicemanagement.ConfigurationSetInputEndpoint(
|
||||||
name='SSH',
|
name='SSH',
|
||||||
protocol='TCP',
|
protocol='TCP',
|
||||||
port='22',
|
port=ssh_port,
|
||||||
local_port='22',
|
local_port='22',
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -460,8 +468,8 @@ def create(vm_):
|
|||||||
os_hd = azure.servicemanagement.OSVirtualHardDisk(vm_['image'], media_link)
|
os_hd = azure.servicemanagement.OSVirtualHardDisk(vm_['image'], media_link)
|
||||||
|
|
||||||
vm_kwargs = {
|
vm_kwargs = {
|
||||||
'service_name': vm_['name'],
|
'service_name': service_name,
|
||||||
'deployment_name': vm_['name'],
|
'deployment_name': service_name,
|
||||||
'deployment_slot': vm_['slot'],
|
'deployment_slot': vm_['slot'],
|
||||||
'label': label,
|
'label': label,
|
||||||
'role_name': vm_['name'],
|
'role_name': vm_['name'],
|
||||||
@ -491,7 +499,8 @@ def create(vm_):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
conn.create_hosted_service(**service_kwargs)
|
conn.create_hosted_service(**service_kwargs)
|
||||||
conn.create_virtual_machine_deployment(**vm_kwargs)
|
except WindowsAzureConflictError:
|
||||||
|
log.debug("Cloud service already exists")
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
error = 'The hosted service name is invalid.'
|
error = 'The hosted service name is invalid.'
|
||||||
if error in str(exc):
|
if error in str(exc):
|
||||||
@ -516,17 +525,54 @@ def create(vm_):
|
|||||||
exc_info_on_loglevel=logging.DEBUG
|
exc_info_on_loglevel=logging.DEBUG
|
||||||
)
|
)
|
||||||
return False
|
return False
|
||||||
|
try:
|
||||||
|
conn.create_virtual_machine_deployment(**vm_kwargs)
|
||||||
|
except WindowsAzureConflictError:
|
||||||
|
log.debug("Conflict error. The deployment may already exist, trying add_role")
|
||||||
|
# Deleting two useless keywords
|
||||||
|
del vm_kwargs['deployment_slot']
|
||||||
|
del vm_kwargs['label']
|
||||||
|
conn.add_role(**vm_kwargs)
|
||||||
|
except Exception as exc:
|
||||||
|
error = 'The hosted service name is invalid.'
|
||||||
|
if error in str(exc):
|
||||||
|
log.error(
|
||||||
|
'Error creating {0} on Azure.\n\n'
|
||||||
|
'The VM name is invalid. The name can contain '
|
||||||
|
'only letters, numbers, and hyphens. The name must start with '
|
||||||
|
'a letter and must end with a letter or a number.'.format(
|
||||||
|
vm_['name']
|
||||||
|
),
|
||||||
|
# Show the traceback if the debug logging level is enabled
|
||||||
|
exc_info_on_loglevel=logging.DEBUG
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
log.error(
|
||||||
|
'Error creating {0} on Azure.\n\n'
|
||||||
|
'The Virtual Machine could not be created. If you '
|
||||||
|
'are using an already existing Cloud Service, '
|
||||||
|
'make sure you set up the `port` variable corresponding '
|
||||||
|
'to the SSH port exists and that the port number is not '
|
||||||
|
'already in use.\nThe following exception was thrown when trying to '
|
||||||
|
'run the initial deployment: \n{1}'.format(
|
||||||
|
vm_['name'], str(exc)
|
||||||
|
),
|
||||||
|
# Show the traceback if the debug logging level is enabled
|
||||||
|
exc_info_on_loglevel=logging.DEBUG
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
def wait_for_hostname():
|
def wait_for_hostname():
|
||||||
'''
|
'''
|
||||||
Wait for the IP address to become available
|
Wait for the IP address to become available
|
||||||
'''
|
'''
|
||||||
try:
|
try:
|
||||||
data = show_instance(vm_['name'], call='action')
|
conn.get_role(service_name, service_name, vm_["name"])
|
||||||
except Exception:
|
data = show_instance(service_name, call='action')
|
||||||
|
if 'url' in data and data['url'] != str(''):
|
||||||
|
return data['url']
|
||||||
|
except WindowsAzureMissingResourceError:
|
||||||
pass
|
pass
|
||||||
if 'url' in data and data['url'] != str(''):
|
|
||||||
return data['url']
|
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -555,6 +601,7 @@ def create(vm_):
|
|||||||
deploy_kwargs = {
|
deploy_kwargs = {
|
||||||
'opts': __opts__,
|
'opts': __opts__,
|
||||||
'host': hostname,
|
'host': hostname,
|
||||||
|
'port': ssh_port,
|
||||||
'username': ssh_username,
|
'username': ssh_username,
|
||||||
'password': ssh_password,
|
'password': ssh_password,
|
||||||
'script': deploy_script,
|
'script': deploy_script,
|
||||||
@ -656,6 +703,37 @@ def create(vm_):
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Attaching volumes
|
||||||
|
volumes = config.get_cloud_config_value(
|
||||||
|
'volumes', vm_, __opts__, search_global=True
|
||||||
|
)
|
||||||
|
if volumes:
|
||||||
|
salt.utils.cloud.fire_event(
|
||||||
|
'event',
|
||||||
|
'attaching volumes',
|
||||||
|
'salt/cloud/{0}/attaching_volumes'.format(vm_['name']),
|
||||||
|
{'volumes': volumes},
|
||||||
|
transport=__opts__['transport']
|
||||||
|
)
|
||||||
|
|
||||||
|
log.info('Create and attach volumes to node {0}'.format(vm_['name']))
|
||||||
|
created = create_attach_volumes(
|
||||||
|
vm_['name'],
|
||||||
|
{
|
||||||
|
'volumes': volumes,
|
||||||
|
'service_name': service_name,
|
||||||
|
'deployment_name': vm_['name'],
|
||||||
|
'media_link': media_link,
|
||||||
|
'role_name': vm_['name'],
|
||||||
|
'del_all_vols_on_destroy': vm_.get('set_del_all_vols_on_destroy', False)
|
||||||
|
},
|
||||||
|
call='action'
|
||||||
|
)
|
||||||
|
ret['Attached Volumes'] = created
|
||||||
|
|
||||||
|
for key, value in salt.utils.cloud.bootstrap(vm_, __opts__).items():
|
||||||
|
ret.setdefault(key, value)
|
||||||
|
|
||||||
data = show_instance(vm_['name'], call='action')
|
data = show_instance(vm_['name'], call='action')
|
||||||
log.info('Created Cloud VM {0[name]!r}'.format(vm_))
|
log.info('Created Cloud VM {0[name]!r}'.format(vm_))
|
||||||
log.debug(
|
log.debug(
|
||||||
@ -681,9 +759,105 @@ def create(vm_):
|
|||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
|
||||||
def destroy(name, conn=None, call=None):
|
def create_attach_volumes(name, kwargs, call=None, wait_to_finish=True):
|
||||||
|
'''
|
||||||
|
Create and attach volumes to created node
|
||||||
|
'''
|
||||||
|
if call != 'action':
|
||||||
|
raise SaltCloudSystemExit(
|
||||||
|
'The create_attach_volumes action must be called with '
|
||||||
|
'-a or --action.'
|
||||||
|
)
|
||||||
|
|
||||||
|
if isinstance(kwargs['volumes'], str):
|
||||||
|
volumes = yaml.safe_load(kwargs['volumes'])
|
||||||
|
else:
|
||||||
|
volumes = kwargs['volumes']
|
||||||
|
|
||||||
|
# From the Azure .NET SDK doc
|
||||||
|
#
|
||||||
|
# The Create Data Disk operation adds a data disk to a virtual
|
||||||
|
# machine. There are three ways to create the data disk using the
|
||||||
|
# Add Data Disk operation.
|
||||||
|
# Option 1 - Attach an empty data disk to
|
||||||
|
# the role by specifying the disk label and location of the disk
|
||||||
|
# image. Do not include the DiskName and SourceMediaLink elements in
|
||||||
|
# the request body. Include the MediaLink element and reference a
|
||||||
|
# blob that is in the same geographical region as the role. You can
|
||||||
|
# also omit the MediaLink element. In this usage, Azure will create
|
||||||
|
# the data disk in the storage account configured as default for the
|
||||||
|
# role.
|
||||||
|
# Option 2 - Attach an existing data disk that is in the image
|
||||||
|
# repository. Do not include the DiskName and SourceMediaLink
|
||||||
|
# elements in the request body. Specify the data disk to use by
|
||||||
|
# including the DiskName element. Note: If included the in the
|
||||||
|
# response body, the MediaLink and LogicalDiskSizeInGB elements are
|
||||||
|
# ignored.
|
||||||
|
# Option 3 - Specify the location of a blob in your storage
|
||||||
|
# account that contain a disk image to use. Include the
|
||||||
|
# SourceMediaLink element. Note: If the MediaLink element
|
||||||
|
# isincluded, it is ignored. (see
|
||||||
|
# http://msdn.microsoft.com/en-us/library/windowsazure/jj157199.aspx
|
||||||
|
# for more information)
|
||||||
|
#
|
||||||
|
# Here only option 1 is implemented
|
||||||
|
conn = get_conn()
|
||||||
|
ret = []
|
||||||
|
for volume in volumes:
|
||||||
|
if "disk_name" in volume:
|
||||||
|
log.error("You cannot specify a disk_name. Only new volumes are allowed")
|
||||||
|
return False
|
||||||
|
# Use the size keyword to set a size, but you can use the
|
||||||
|
# azure name too. If neither is set, the disk has size 100GB
|
||||||
|
volume.setdefault("logical_disk_size_in_gb", volume.get("size", 100))
|
||||||
|
volume.setdefault("host_caching", "ReadOnly")
|
||||||
|
volume.setdefault("lun", 0)
|
||||||
|
# The media link is vm_name-disk-[0-15].vhd
|
||||||
|
volume.setdefault("media_link",
|
||||||
|
kwargs["media_link"][:-4] + "-disk-{0}.vhd".format(volume["lun"]))
|
||||||
|
volume.setdefault("disk_label",
|
||||||
|
kwargs["role_name"] + "-disk-{0}".format(volume["lun"]))
|
||||||
|
volume_dict = {
|
||||||
|
'volume_name': volume["lun"],
|
||||||
|
'disk_label': volume["disk_label"]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Preparing the volume dict to be passed with **
|
||||||
|
kwargs_add_data_disk = ["lun", "host_caching", "media_link",
|
||||||
|
"disk_label", "disk_name",
|
||||||
|
"logical_disk_size_in_gb",
|
||||||
|
"source_media_link"]
|
||||||
|
for key in set(volume.keys()) - set(kwargs_add_data_disk):
|
||||||
|
del volume[key]
|
||||||
|
|
||||||
|
attach = conn.add_data_disk(kwargs["service_name"], kwargs["deployment_name"], kwargs["role_name"],
|
||||||
|
**volume)
|
||||||
|
log.debug(attach)
|
||||||
|
|
||||||
|
# If attach is None then everything is fine
|
||||||
|
if attach:
|
||||||
|
msg = (
|
||||||
|
'{0} attached to {1} (aka {2})'.format(
|
||||||
|
volume_dict['volume_name'],
|
||||||
|
kwargs['role_name'],
|
||||||
|
name,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
log.info(msg)
|
||||||
|
ret.append(msg)
|
||||||
|
else:
|
||||||
|
log.error('Error attaching {0} on Azure'.format(volume_dict))
|
||||||
|
return ret
|
||||||
|
|
||||||
|
|
||||||
|
def destroy(name, conn=None, call=None, kwargs=None):
|
||||||
'''
|
'''
|
||||||
Destroy a VM
|
Destroy a VM
|
||||||
|
|
||||||
|
CLI Examples::
|
||||||
|
|
||||||
|
salt-cloud -d myminion
|
||||||
|
salt-cloud -a destroy myminion service_name=myservice
|
||||||
'''
|
'''
|
||||||
if call == 'function':
|
if call == 'function':
|
||||||
raise SaltCloudSystemExit(
|
raise SaltCloudSystemExit(
|
||||||
@ -694,10 +868,15 @@ def destroy(name, conn=None, call=None):
|
|||||||
if not conn:
|
if not conn:
|
||||||
conn = get_conn()
|
conn = get_conn()
|
||||||
|
|
||||||
|
if kwargs is None:
|
||||||
|
kwargs = {}
|
||||||
|
|
||||||
|
service_name = kwargs.get('service_name', name)
|
||||||
|
|
||||||
ret = {}
|
ret = {}
|
||||||
# TODO: Add the ability to delete or not delete a hosted service when
|
# TODO: Add the ability to delete or not delete a hosted service when
|
||||||
# deleting a VM
|
# deleting a VM
|
||||||
del_vm = conn.delete_deployment(service_name=name, deployment_name=name)
|
del_vm = conn.delete_deployment(service_name=service_name, deployment_name=name)
|
||||||
del_service = conn.delete_hosted_service
|
del_service = conn.delete_hosted_service
|
||||||
ret[name] = {
|
ret[name] = {
|
||||||
'request_id': del_vm.request_id,
|
'request_id': del_vm.request_id,
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
# CREATED: 10/15/2012 09:49:37 PM WEST
|
# CREATED: 10/15/2012 09:49:37 PM WEST
|
||||||
#======================================================================================================================
|
#======================================================================================================================
|
||||||
set -o nounset # Treat unset variables as an error
|
set -o nounset # Treat unset variables as an error
|
||||||
__ScriptVersion="2014.12.11"
|
__ScriptVersion="2015.01.12"
|
||||||
__ScriptName="bootstrap-salt.sh"
|
__ScriptName="bootstrap-salt.sh"
|
||||||
|
|
||||||
#======================================================================================================================
|
#======================================================================================================================
|
||||||
@ -1256,7 +1256,7 @@ __check_end_of_life_versions() {
|
|||||||
|
|
||||||
case "${DISTRO_NAME_L}" in
|
case "${DISTRO_NAME_L}" in
|
||||||
debian)
|
debian)
|
||||||
# Debian versions bellow 6 are not supported
|
# Debian versions below 6 are not supported
|
||||||
if [ "$DISTRO_MAJOR_VERSION" -lt 6 ]; then
|
if [ "$DISTRO_MAJOR_VERSION" -lt 6 ]; then
|
||||||
echoerror "End of life distributions are not supported."
|
echoerror "End of life distributions are not supported."
|
||||||
echoerror "Please consider upgrading to the next stable. See:"
|
echoerror "Please consider upgrading to the next stable. See:"
|
||||||
@ -1993,6 +1993,9 @@ install_debian_6_deps() {
|
|||||||
|
|
||||||
apt-get update
|
apt-get update
|
||||||
|
|
||||||
|
# Make sure wget is available
|
||||||
|
__apt_get_install_noinput wget
|
||||||
|
|
||||||
# Install Keys
|
# Install Keys
|
||||||
__apt_get_install_noinput debian-archive-keyring && apt-get update
|
__apt_get_install_noinput debian-archive-keyring && apt-get update
|
||||||
|
|
||||||
@ -2090,6 +2093,10 @@ install_debian_7_deps() {
|
|||||||
export DEBIAN_FRONTEND=noninteractive
|
export DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
apt-get update
|
apt-get update
|
||||||
|
|
||||||
|
# Make sure wget is available
|
||||||
|
__apt_get_install_noinput wget
|
||||||
|
|
||||||
# Install Keys
|
# Install Keys
|
||||||
__apt_get_install_noinput debian-archive-keyring && apt-get update
|
__apt_get_install_noinput debian-archive-keyring && apt-get update
|
||||||
|
|
||||||
@ -2577,6 +2584,7 @@ install_centos_stable_deps() {
|
|||||||
__install_epel_repository || return 1
|
__install_epel_repository || return 1
|
||||||
|
|
||||||
if [ "$_ENABLE_EXTERNAL_ZMQ_REPOS" -eq $BS_TRUE ]; then
|
if [ "$_ENABLE_EXTERNAL_ZMQ_REPOS" -eq $BS_TRUE ]; then
|
||||||
|
yum -y install python-hashlib || return 1
|
||||||
__install_saltstack_copr_zeromq_repository || return 1
|
__install_saltstack_copr_zeromq_repository || return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -2729,15 +2737,18 @@ install_centos_git_post() {
|
|||||||
[ $fname = "api" ] && ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || [ "$(which salt-${fname} 2>/dev/null)" = "" ]) && continue
|
[ $fname = "api" ] && ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || [ "$(which salt-${fname} 2>/dev/null)" = "" ]) && continue
|
||||||
[ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue
|
[ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue
|
||||||
|
|
||||||
if [ ! -f /usr/lib/systemd/system/salt-${fname}.service ] || ([ -f /usr/lib/systemd/system/salt-${fname}.service ] && [ $_FORCE_OVERWRITE -eq $BS_TRUE ]); then
|
if [ -f /bin/systemctl ]; then
|
||||||
copyfile "${__SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-${fname}.service" /usr/lib/systemd/system/
|
if [ ! -f /usr/lib/systemd/system/salt-${fname}.service ] || ([ -f /usr/lib/systemd/system/salt-${fname}.service ] && [ $_FORCE_OVERWRITE -eq $BS_TRUE ]); then
|
||||||
|
copyfile "${__SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-${fname}.service" /usr/lib/systemd/system/
|
||||||
|
fi
|
||||||
|
|
||||||
# Skip salt-api since the service should be opt-in and not necessarily started on boot
|
# Skip salt-api since the service should be opt-in and not necessarily started on boot
|
||||||
[ $fname = "api" ] && continue
|
[ $fname = "api" ] && continue
|
||||||
|
|
||||||
/bin/systemctl enable salt-${fname}.service
|
/bin/systemctl enable salt-${fname}.service
|
||||||
SYSTEMD_RELOAD=$BS_TRUE
|
SYSTEMD_RELOAD=$BS_TRUE
|
||||||
elif [ ! -f /usr/lib/systemd/system/salt-${fname}.service ] && [ ! -f /etc/init.d/salt-$fname ] || ([ -f /etc/init.d/salt-$fname ] && [ $_FORCE_OVERWRITE -eq $BS_TRUE ]); then
|
|
||||||
|
elif [ ! -f /etc/init.d/salt-$fname ] || ([ -f /etc/init.d/salt-$fname ] && [ $_FORCE_OVERWRITE -eq $BS_TRUE ]); then
|
||||||
copyfile "${__SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-${fname}" /etc/init.d/
|
copyfile "${__SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-${fname}" /etc/init.d/
|
||||||
chmod +x /etc/init.d/salt-${fname}
|
chmod +x /etc/init.d/salt-${fname}
|
||||||
|
|
||||||
@ -3369,10 +3380,19 @@ install_amazon_linux_ami_testing_post() {
|
|||||||
#
|
#
|
||||||
install_arch_linux_stable_deps() {
|
install_arch_linux_stable_deps() {
|
||||||
|
|
||||||
|
echoinfo "Running pacman db upgrade"
|
||||||
|
pacman-db-upgrade || return 1
|
||||||
|
|
||||||
if [ ! -f /etc/pacman.d/gnupg ]; then
|
if [ ! -f /etc/pacman.d/gnupg ]; then
|
||||||
pacman-key --init && pacman-key --populate archlinux || return 1
|
pacman-key --init && pacman-key --populate archlinux || return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
pacman -Sy --noconfirm --needed pacman || return 1
|
||||||
|
|
||||||
|
if [ "$(which pacman-db-upgrade)" != "" ]; then
|
||||||
|
pacman-db-upgrade || return 1
|
||||||
|
fi
|
||||||
|
|
||||||
if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then
|
if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then
|
||||||
pacman -Syyu --noconfirm --needed || return 1
|
pacman -Syyu --noconfirm --needed || return 1
|
||||||
fi
|
fi
|
||||||
@ -3392,9 +3412,8 @@ install_arch_linux_stable_deps() {
|
|||||||
install_arch_linux_git_deps() {
|
install_arch_linux_git_deps() {
|
||||||
install_arch_linux_stable_deps
|
install_arch_linux_stable_deps
|
||||||
|
|
||||||
pacman -Sy --noconfirm --needed pacman || return 1
|
|
||||||
# Don't fail if un-installing python2-distribute threw an error
|
# Don't fail if un-installing python2-distribute threw an error
|
||||||
pacman -R --noconfirm --needed python2-distribute
|
pacman -R --noconfirm python2-distribute
|
||||||
pacman -Sy --noconfirm --needed git python2-crypto python2-setuptools python2-jinja \
|
pacman -Sy --noconfirm --needed git python2-crypto python2-setuptools python2-jinja \
|
||||||
python2-m2crypto python2-markupsafe python2-msgpack python2-psutil python2-yaml \
|
python2-m2crypto python2-markupsafe python2-msgpack python2-psutil python2-yaml \
|
||||||
python2-pyzmq zeromq python2-requests python2-systemd || return 1
|
python2-pyzmq zeromq python2-requests python2-systemd || return 1
|
||||||
@ -3623,6 +3642,10 @@ install_freebsd_9_stable_deps() {
|
|||||||
/usr/local/sbin/pkg install ${SALT_PKG_FLAGS} -y ${_EXTRA_PACKAGES} || return 1
|
/usr/local/sbin/pkg install ${SALT_PKG_FLAGS} -y ${_EXTRA_PACKAGES} || return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then
|
||||||
|
pkg upgrade -y || return 1
|
||||||
|
fi
|
||||||
|
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2035,7 +2035,7 @@ def master_config(path, env_var='SALT_MASTER_CONFIG', defaults=None):
|
|||||||
# out or not present.
|
# out or not present.
|
||||||
if opts.get('nodegroups') is None:
|
if opts.get('nodegroups') is None:
|
||||||
opts['nodegroups'] = DEFAULT_MASTER_OPTS.get('nodegroups', {})
|
opts['nodegroups'] = DEFAULT_MASTER_OPTS.get('nodegroups', {})
|
||||||
if opts.get('transport') == 'raet' and not opts.get('zmq_behavior'):
|
if opts.get('transport') == 'raet' and not opts.get('zmq_behavior') and 'aes' in opts:
|
||||||
opts.pop('aes')
|
opts.pop('aes')
|
||||||
return opts
|
return opts
|
||||||
|
|
||||||
@ -2056,8 +2056,6 @@ def apply_master_config(overrides=None, defaults=None):
|
|||||||
if len(opts['sock_dir']) > len(opts['cachedir']) + 10:
|
if len(opts['sock_dir']) > len(opts['cachedir']) + 10:
|
||||||
opts['sock_dir'] = os.path.join(opts['cachedir'], '.salt-unix')
|
opts['sock_dir'] = os.path.join(opts['cachedir'], '.salt-unix')
|
||||||
|
|
||||||
opts['aes'] = salt.crypt.Crypticle.generate_key_string()
|
|
||||||
|
|
||||||
opts['extension_modules'] = (
|
opts['extension_modules'] = (
|
||||||
opts.get('extension_modules') or
|
opts.get('extension_modules') or
|
||||||
os.path.join(opts['cachedir'], 'extmods')
|
os.path.join(opts['cachedir'], 'extmods')
|
||||||
@ -2128,7 +2126,7 @@ def apply_master_config(overrides=None, defaults=None):
|
|||||||
if isinstance(opts['file_ignore_glob'], str):
|
if isinstance(opts['file_ignore_glob'], str):
|
||||||
opts['file_ignore_glob'] = [opts['file_ignore_glob']]
|
opts['file_ignore_glob'] = [opts['file_ignore_glob']]
|
||||||
|
|
||||||
# Let's make sure `worker_threads` does not drop bellow 3 which has proven
|
# Let's make sure `worker_threads` does not drop below 3 which has proven
|
||||||
# to make `salt.modules.publish` not work under the test-suite.
|
# to make `salt.modules.publish` not work under the test-suite.
|
||||||
if opts['worker_threads'] < 3 and opts.get('peer', None):
|
if opts['worker_threads'] < 3 and opts.get('peer', None):
|
||||||
log.warning(
|
log.warning(
|
||||||
|
182
salt/crypt.py
182
salt/crypt.py
@ -12,7 +12,6 @@ import os
|
|||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
import hmac
|
import hmac
|
||||||
import shutil
|
|
||||||
import hashlib
|
import hashlib
|
||||||
import logging
|
import logging
|
||||||
import traceback
|
import traceback
|
||||||
@ -40,61 +39,27 @@ from salt.exceptions import (
|
|||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def dropfile(cachedir, user=None, sock_dir=None):
|
def dropfile(cachedir, user=None):
|
||||||
'''
|
'''
|
||||||
Set an AES dropfile to update the publish session key
|
Set an AES dropfile to request the master update the publish session key
|
||||||
|
|
||||||
A dropfile is checked periodically by master workers to determine
|
|
||||||
if AES key rotation has occurred.
|
|
||||||
'''
|
'''
|
||||||
dfnt = os.path.join(cachedir, '.dfnt')
|
|
||||||
dfn = os.path.join(cachedir, '.dfn')
|
dfn = os.path.join(cachedir, '.dfn')
|
||||||
|
# set a mask (to avoid a race condition on file creation) and store original.
|
||||||
def ready():
|
|
||||||
'''
|
|
||||||
Because MWorker._update_aes uses second-precision mtime
|
|
||||||
to detect changes to the file, we must avoid writing two
|
|
||||||
versions with the same mtime.
|
|
||||||
|
|
||||||
Note that this only makes rapid updates in serial safe: concurrent
|
|
||||||
updates could still both pass this check and then write two different
|
|
||||||
keys with the same mtime.
|
|
||||||
'''
|
|
||||||
try:
|
|
||||||
stats = os.stat(dfn)
|
|
||||||
except os.error:
|
|
||||||
# Not there, go ahead and write it
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
if stats.st_mtime == time.time():
|
|
||||||
# The mtime is the current time, we must
|
|
||||||
# wait until time has moved on.
|
|
||||||
return False
|
|
||||||
else:
|
|
||||||
return True
|
|
||||||
|
|
||||||
while not ready():
|
|
||||||
log.warning('Waiting before writing {0}'.format(dfn))
|
|
||||||
time.sleep(1)
|
|
||||||
|
|
||||||
log.info('Rotating AES key')
|
|
||||||
aes = Crypticle.generate_key_string()
|
|
||||||
mask = os.umask(191)
|
mask = os.umask(191)
|
||||||
with salt.utils.fopen(dfnt, 'w+') as fp_:
|
try:
|
||||||
fp_.write(aes)
|
log.info('Rotating AES key')
|
||||||
if user:
|
|
||||||
try:
|
|
||||||
import pwd
|
|
||||||
uid = pwd.getpwnam(user).pw_uid
|
|
||||||
os.chown(dfnt, uid, -1)
|
|
||||||
except (KeyError, ImportError, OSError, IOError):
|
|
||||||
pass
|
|
||||||
|
|
||||||
shutil.move(dfnt, dfn)
|
with salt.utils.fopen(dfn, 'w+') as fp_:
|
||||||
os.umask(mask)
|
fp_.write('')
|
||||||
if sock_dir:
|
if user:
|
||||||
event = salt.utils.event.SaltEvent('master', sock_dir)
|
try:
|
||||||
event.fire_event({'rotate_aes_key': True}, tag='key')
|
import pwd
|
||||||
|
uid = pwd.getpwnam(user).pw_uid
|
||||||
|
os.chown(dfn, uid, -1)
|
||||||
|
except (KeyError, ImportError, OSError, IOError):
|
||||||
|
pass
|
||||||
|
finally:
|
||||||
|
os.umask(mask) # restore original umask
|
||||||
|
|
||||||
|
|
||||||
def gen_keys(keydir, keyname, keysize, user=None):
|
def gen_keys(keydir, keyname, keysize, user=None):
|
||||||
@ -288,12 +253,39 @@ class MasterKeys(dict):
|
|||||||
return self.pub_signature
|
return self.pub_signature
|
||||||
|
|
||||||
|
|
||||||
class Auth(object):
|
class SAuth(object):
|
||||||
'''
|
'''
|
||||||
The Auth class provides the sequence for setting up communication with
|
Set up an object to maintain authentication with the salt master
|
||||||
the master server from a minion.
|
|
||||||
'''
|
'''
|
||||||
|
# This class is only a singleton per minion/master pair
|
||||||
|
instances = {}
|
||||||
|
|
||||||
|
def __new__(cls, opts):
|
||||||
|
'''
|
||||||
|
Only create one instance of SAuth per __key()
|
||||||
|
'''
|
||||||
|
key = cls.__key(opts)
|
||||||
|
if key not in SAuth.instances:
|
||||||
|
log.debug('Initializing new SAuth for {0}'.format(key))
|
||||||
|
SAuth.instances[key] = object.__new__(cls)
|
||||||
|
SAuth.instances[key].__singleton_init__(opts)
|
||||||
|
else:
|
||||||
|
log.debug('Re-using SAuth for {0}'.format(key))
|
||||||
|
return SAuth.instances[key]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def __key(cls, opts):
|
||||||
|
return (opts['pki_dir'], # where the keys are stored
|
||||||
|
opts['id'], # minion ID
|
||||||
|
opts['master_uri'], # master ID
|
||||||
|
)
|
||||||
|
|
||||||
|
# has to remain empty for singletons, since __init__ will *always* be called
|
||||||
def __init__(self, opts):
|
def __init__(self, opts):
|
||||||
|
pass
|
||||||
|
|
||||||
|
# an init for the singleton instance to call
|
||||||
|
def __singleton_init__(self, opts):
|
||||||
'''
|
'''
|
||||||
Init an Auth instance
|
Init an Auth instance
|
||||||
|
|
||||||
@ -315,6 +307,41 @@ class Auth(object):
|
|||||||
if not os.path.isfile(self.pub_path):
|
if not os.path.isfile(self.pub_path):
|
||||||
self.get_keys()
|
self.get_keys()
|
||||||
|
|
||||||
|
self.authenticate()
|
||||||
|
|
||||||
|
def authenticate(self):
|
||||||
|
'''
|
||||||
|
Authenticate with the master, this method breaks the functional
|
||||||
|
paradigm, it will update the master information from a fresh sign
|
||||||
|
in, signing in can occur as often as needed to keep up with the
|
||||||
|
revolving master AES key.
|
||||||
|
|
||||||
|
:rtype: Crypticle
|
||||||
|
:returns: A crypticle used for encryption operations
|
||||||
|
'''
|
||||||
|
acceptance_wait_time = self.opts['acceptance_wait_time']
|
||||||
|
acceptance_wait_time_max = self.opts['acceptance_wait_time_max']
|
||||||
|
if not acceptance_wait_time_max:
|
||||||
|
acceptance_wait_time_max = acceptance_wait_time
|
||||||
|
|
||||||
|
while True:
|
||||||
|
creds = self.sign_in()
|
||||||
|
if creds == 'retry':
|
||||||
|
if self.opts.get('caller'):
|
||||||
|
print('Minion failed to authenticate with the master, '
|
||||||
|
'has the minion key been accepted?')
|
||||||
|
sys.exit(2)
|
||||||
|
if acceptance_wait_time:
|
||||||
|
log.info('Waiting {0} seconds before retry.'.format(acceptance_wait_time))
|
||||||
|
time.sleep(acceptance_wait_time)
|
||||||
|
if acceptance_wait_time < acceptance_wait_time_max:
|
||||||
|
acceptance_wait_time += acceptance_wait_time
|
||||||
|
log.debug('Authentication wait time is {0}'.format(acceptance_wait_time))
|
||||||
|
continue
|
||||||
|
break
|
||||||
|
self.creds = creds
|
||||||
|
self.crypticle = Crypticle(self.opts, creds['aes'])
|
||||||
|
|
||||||
def get_keys(self):
|
def get_keys(self):
|
||||||
'''
|
'''
|
||||||
Return keypair object for the minion.
|
Return keypair object for the minion.
|
||||||
@ -645,6 +672,8 @@ class Auth(object):
|
|||||||
|
|
||||||
m_pub_fn = os.path.join(self.opts['pki_dir'], self.mpub)
|
m_pub_fn = os.path.join(self.opts['pki_dir'], self.mpub)
|
||||||
|
|
||||||
|
auth['master_uri'] = self.opts['master_uri']
|
||||||
|
|
||||||
sreq = salt.payload.SREQ(
|
sreq = salt.payload.SREQ(
|
||||||
self.opts['master_uri'],
|
self.opts['master_uri'],
|
||||||
)
|
)
|
||||||
@ -745,7 +774,8 @@ class Crypticle(object):
|
|||||||
SIG_SIZE = hashlib.sha256().digest_size
|
SIG_SIZE = hashlib.sha256().digest_size
|
||||||
|
|
||||||
def __init__(self, opts, key_string, key_size=192):
|
def __init__(self, opts, key_string, key_size=192):
|
||||||
self.keys = self.extract_keys(key_string, key_size)
|
self.key_string = key_string
|
||||||
|
self.keys = self.extract_keys(self.key_string, key_size)
|
||||||
self.key_size = key_size
|
self.key_size = key_size
|
||||||
self.serial = salt.payload.Serial(opts)
|
self.serial = salt.payload.Serial(opts)
|
||||||
|
|
||||||
@ -811,45 +841,3 @@ class Crypticle(object):
|
|||||||
if not data.startswith(self.PICKLE_PAD):
|
if not data.startswith(self.PICKLE_PAD):
|
||||||
return {}
|
return {}
|
||||||
return self.serial.loads(data[len(self.PICKLE_PAD):])
|
return self.serial.loads(data[len(self.PICKLE_PAD):])
|
||||||
|
|
||||||
|
|
||||||
class SAuth(Auth):
|
|
||||||
'''
|
|
||||||
Set up an object to maintain the standalone authentication session
|
|
||||||
with the salt master
|
|
||||||
'''
|
|
||||||
def __init__(self, opts):
|
|
||||||
super(SAuth, self).__init__(opts)
|
|
||||||
self.crypticle = self.__authenticate()
|
|
||||||
|
|
||||||
def __authenticate(self):
|
|
||||||
'''
|
|
||||||
Authenticate with the master, this method breaks the functional
|
|
||||||
paradigm, it will update the master information from a fresh sign
|
|
||||||
in, signing in can occur as often as needed to keep up with the
|
|
||||||
revolving master AES key.
|
|
||||||
|
|
||||||
:rtype: Crypticle
|
|
||||||
:returns: A crypticle used for encryption operations
|
|
||||||
'''
|
|
||||||
acceptance_wait_time = self.opts['acceptance_wait_time']
|
|
||||||
acceptance_wait_time_max = self.opts['acceptance_wait_time_max']
|
|
||||||
if not acceptance_wait_time_max:
|
|
||||||
acceptance_wait_time_max = acceptance_wait_time
|
|
||||||
|
|
||||||
while True:
|
|
||||||
creds = self.sign_in()
|
|
||||||
if creds == 'retry':
|
|
||||||
if self.opts.get('caller'):
|
|
||||||
print('Minion failed to authenticate with the master, '
|
|
||||||
'has the minion key been accepted?')
|
|
||||||
sys.exit(2)
|
|
||||||
if acceptance_wait_time:
|
|
||||||
log.info('Waiting {0} seconds before retry.'.format(acceptance_wait_time))
|
|
||||||
time.sleep(acceptance_wait_time)
|
|
||||||
if acceptance_wait_time < acceptance_wait_time_max:
|
|
||||||
acceptance_wait_time += acceptance_wait_time
|
|
||||||
log.debug('Authentication wait time is {0}'.format(acceptance_wait_time))
|
|
||||||
continue
|
|
||||||
break
|
|
||||||
return Crypticle(self.opts, creds['aes'])
|
|
||||||
|
@ -846,8 +846,9 @@ class SaltRaetRouter(ioflo.base.deeding.Deed):
|
|||||||
'publish': '.salt.var.publish',
|
'publish': '.salt.var.publish',
|
||||||
'fun': '.salt.var.fun',
|
'fun': '.salt.var.fun',
|
||||||
'event': '.salt.event.events',
|
'event': '.salt.event.events',
|
||||||
'event_req': '.salt.event.event_req',
|
'event_req': '.salt.event.event_req', # deque
|
||||||
'presence_req': '.salt.presence.event_req',
|
'presence_req': '.salt.presence.event_req', # deque
|
||||||
|
'availables': '.salt.var.presence.availables', # set()
|
||||||
'workers': '.salt.track.workers',
|
'workers': '.salt.track.workers',
|
||||||
'worker_verify': '.salt.var.worker_verify',
|
'worker_verify': '.salt.var.worker_verify',
|
||||||
'lane_stack': '.salt.lane.manor.stack',
|
'lane_stack': '.salt.lane.manor.stack',
|
||||||
@ -879,9 +880,7 @@ class SaltRaetRouter(ioflo.base.deeding.Deed):
|
|||||||
self.lane_stack.value.local.name,
|
self.lane_stack.value.local.name,
|
||||||
msg))
|
msg))
|
||||||
|
|
||||||
if d_estate is None:
|
if d_estate is not None and d_estate != self.road_stack.value.local.name:
|
||||||
pass
|
|
||||||
elif d_estate != self.road_stack.value.local.name:
|
|
||||||
log.error(
|
log.error(
|
||||||
'Road Router Received message for wrong estate: {0}'.format(d_estate))
|
'Road Router Received message for wrong estate: {0}'.format(d_estate))
|
||||||
return
|
return
|
||||||
@ -951,6 +950,8 @@ class SaltRaetRouter(ioflo.base.deeding.Deed):
|
|||||||
return
|
return
|
||||||
|
|
||||||
if d_share == 'pub_ret':
|
if d_share == 'pub_ret':
|
||||||
|
# only publish to available minions
|
||||||
|
msg['return']['ret']['minions'] = self._availablize(msg['return']['ret']['minions'])
|
||||||
if msg.get('__worker_verify') == self.worker_verify.value:
|
if msg.get('__worker_verify') == self.worker_verify.value:
|
||||||
self.publish.value.append(msg)
|
self.publish.value.append(msg)
|
||||||
|
|
||||||
@ -1022,6 +1023,15 @@ class SaltRaetRouter(ioflo.base.deeding.Deed):
|
|||||||
|
|
||||||
return self.master_estate_name.value
|
return self.master_estate_name.value
|
||||||
|
|
||||||
|
def _availablize(self, minions):
|
||||||
|
'''
|
||||||
|
Return set that is intersection of associated minion estates for
|
||||||
|
roles in minions and the set of available minion estates.
|
||||||
|
'''
|
||||||
|
suffix = '_{0}'.format(kinds.APPL_KIND_NAMES[kinds.applKinds.minion])
|
||||||
|
return list(set(minions) &
|
||||||
|
set((name.rstrip(suffix) for name in self.availables.value)))
|
||||||
|
|
||||||
def action(self):
|
def action(self):
|
||||||
'''
|
'''
|
||||||
Process the messages!
|
Process the messages!
|
||||||
@ -1106,10 +1116,11 @@ class SaltRaetPresenter(ioflo.base.deeding.Deed):
|
|||||||
Ioinits = {'opts': '.salt.opts',
|
Ioinits = {'opts': '.salt.opts',
|
||||||
'presence_req': '.salt.presence.event_req',
|
'presence_req': '.salt.presence.event_req',
|
||||||
'lane_stack': '.salt.lane.manor.stack',
|
'lane_stack': '.salt.lane.manor.stack',
|
||||||
'aliveds': {'ipath': '.salt.var.presence.aliveds',
|
'alloweds': '.salt.var.presence.alloweds', # odict
|
||||||
'ival': odict()},
|
'aliveds': '.salt.var.presence.aliveds', # odict
|
||||||
'availables': {'ipath': '.salt.var.presence.availables',
|
'reapeds': '.salt.var.presence.reapeds', # odict
|
||||||
'ival': set()}, }
|
'availables': '.salt.var.presence.availables', # set
|
||||||
|
}
|
||||||
|
|
||||||
def _send_presence(self, msg):
|
def _send_presence(self, msg):
|
||||||
'''
|
'''
|
||||||
@ -1120,15 +1131,39 @@ class SaltRaetPresenter(ioflo.base.deeding.Deed):
|
|||||||
if y_name not in self.lane_stack.value.nameRemotes: # subscriber not a remote
|
if y_name not in self.lane_stack.value.nameRemotes: # subscriber not a remote
|
||||||
pass # drop msg don't answer
|
pass # drop msg don't answer
|
||||||
else:
|
else:
|
||||||
|
if 'data' in msg and 'state' in msg['data']:
|
||||||
|
state = msg['data']['state']
|
||||||
|
else:
|
||||||
|
state = None
|
||||||
|
|
||||||
# create answer message
|
# create answer message
|
||||||
present = odict()
|
if state in [None, 'available', 'present']:
|
||||||
for name in self.availables.value:
|
present = odict()
|
||||||
minion = self.aliveds.value[name]
|
for name in self.availables.value:
|
||||||
present[name] = minion.ha[0] if minion else None
|
minion = self.aliveds.value[name]
|
||||||
data = {'present': present}
|
present[name] = minion.ha[0] if minion else None
|
||||||
|
data = {'present': present}
|
||||||
|
else:
|
||||||
|
# TODO: update to really return joineds
|
||||||
|
states = {'joined': self.alloweds,
|
||||||
|
'allowed': self.alloweds,
|
||||||
|
'alived': self.aliveds,
|
||||||
|
'reaped': self.reapeds}
|
||||||
|
try:
|
||||||
|
minions = states[state].value
|
||||||
|
except KeyError:
|
||||||
|
# error: wrong/unknown state requested
|
||||||
|
log.error('Lane Router Received invalid message: {0}'.format(msg))
|
||||||
|
return
|
||||||
|
|
||||||
|
result = odict()
|
||||||
|
for name in minions:
|
||||||
|
result[name] = minions[name].ha[0]
|
||||||
|
data = {state: result}
|
||||||
|
|
||||||
tag = tagify('present', 'presence')
|
tag = tagify('present', 'presence')
|
||||||
route = {'dst': (None, None, 'event_fire'),
|
route = {'dst': (None, None, 'event_fire'),
|
||||||
'src': (None, self.lane_stack.value.local.name, None)}
|
'src': (None, self.lane_stack.value.local.name, None)}
|
||||||
msg = {'route': route, 'tag': tag, 'data': data}
|
msg = {'route': route, 'tag': tag, 'data': data}
|
||||||
self.lane_stack.value.transmit(msg,
|
self.lane_stack.value.transmit(msg,
|
||||||
self.lane_stack.value.fetchUidByName(y_name))
|
self.lane_stack.value.fetchUidByName(y_name))
|
||||||
@ -1156,8 +1191,8 @@ class SaltRaetPublisher(ioflo.base.deeding.Deed):
|
|||||||
Ioinits = {'opts': '.salt.opts',
|
Ioinits = {'opts': '.salt.opts',
|
||||||
'publish': '.salt.var.publish',
|
'publish': '.salt.var.publish',
|
||||||
'stack': '.salt.road.manor.stack',
|
'stack': '.salt.road.manor.stack',
|
||||||
'availables': {'ipath': '.salt.var.presence.availables',
|
'availables': '.salt.var.presence.availables',
|
||||||
'ival': set()}, }
|
}
|
||||||
|
|
||||||
def _publish(self, pub_msg):
|
def _publish(self, pub_msg):
|
||||||
'''
|
'''
|
||||||
|
@ -18,6 +18,7 @@ except ImportError:
|
|||||||
# In case a non-master needs to import this module
|
# In case a non-master needs to import this module
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
import tempfile
|
||||||
|
|
||||||
# Import salt libs
|
# Import salt libs
|
||||||
import salt.crypt
|
import salt.crypt
|
||||||
@ -687,12 +688,15 @@ class RemoteFuncs(object):
|
|||||||
if not os.path.isdir(cdir):
|
if not os.path.isdir(cdir):
|
||||||
os.makedirs(cdir)
|
os.makedirs(cdir)
|
||||||
datap = os.path.join(cdir, 'data.p')
|
datap = os.path.join(cdir, 'data.p')
|
||||||
with salt.utils.fopen(datap, 'w+b') as fp_:
|
tmpfh, tmpfname = tempfile.mkstemp(dir=cdir)
|
||||||
|
os.close(tmpfh)
|
||||||
|
with salt.utils.fopen(tmpfname, 'w+b') as fp_:
|
||||||
fp_.write(
|
fp_.write(
|
||||||
self.serial.dumps(
|
self.serial.dumps(
|
||||||
{'grains': load['grains'],
|
{'grains': load['grains'],
|
||||||
'pillar': data})
|
'pillar': data})
|
||||||
)
|
)
|
||||||
|
os.rename(tmpfname, datap)
|
||||||
return data
|
return data
|
||||||
|
|
||||||
def _minion_event(self, load):
|
def _minion_event(self, load):
|
||||||
|
@ -348,7 +348,7 @@ def _sunos_cpudata():
|
|||||||
|
|
||||||
grains['cpuarch'] = __salt__['cmd.run']('uname -p')
|
grains['cpuarch'] = __salt__['cmd.run']('uname -p')
|
||||||
psrinfo = '/usr/sbin/psrinfo 2>/dev/null'
|
psrinfo = '/usr/sbin/psrinfo 2>/dev/null'
|
||||||
grains['num_cpus'] = len(__salt__['cmd.run'](psrinfo).splitlines())
|
grains['num_cpus'] = len(__salt__['cmd.run'](psrinfo, python_shell=True).splitlines())
|
||||||
kstat_info = 'kstat -p cpu_info:0:*:brand'
|
kstat_info = 'kstat -p cpu_info:0:*:brand'
|
||||||
for line in __salt__['cmd.run'](kstat_info).splitlines():
|
for line in __salt__['cmd.run'](kstat_info).splitlines():
|
||||||
match = re.match(r'(\w+:\d+:\w+\d+:\w+)\s+(.+)', line)
|
match = re.match(r'(\w+:\d+:\w+\d+:\w+)\s+(.+)', line)
|
||||||
@ -394,7 +394,7 @@ def _memdata(osdata):
|
|||||||
grains['mem_total'] = int(mem) / 1024 / 1024
|
grains['mem_total'] = int(mem) / 1024 / 1024
|
||||||
elif osdata['kernel'] == 'SunOS':
|
elif osdata['kernel'] == 'SunOS':
|
||||||
prtconf = '/usr/sbin/prtconf 2>/dev/null'
|
prtconf = '/usr/sbin/prtconf 2>/dev/null'
|
||||||
for line in __salt__['cmd.run'](prtconf).splitlines():
|
for line in __salt__['cmd.run'](prtconf, python_shell=True).splitlines():
|
||||||
comps = line.split(' ')
|
comps = line.split(' ')
|
||||||
if comps[0].strip() == 'Memory' and comps[1].strip() == 'size:':
|
if comps[0].strip() == 'Memory' and comps[1].strip() == 'size:':
|
||||||
grains['mem_total'] = int(comps[2].strip())
|
grains['mem_total'] = int(comps[2].strip())
|
||||||
@ -1691,7 +1691,7 @@ def _smartos_zone_data():
|
|||||||
grains['pkgsrcpath'] = 'Unknown'
|
grains['pkgsrcpath'] = 'Unknown'
|
||||||
|
|
||||||
grains['zonename'] = __salt__['cmd.run']('zonename')
|
grains['zonename'] = __salt__['cmd.run']('zonename')
|
||||||
grains['zoneid'] = __salt__['cmd.run']('zoneadm list -p').split()[1]
|
grains['zoneid'] = __salt__['cmd.run']('zoneadm list -p | awk -F: \'{ print $1 }\'', python_shell=True)
|
||||||
grains['hypervisor_uuid'] = __salt__['cmd.run']('mdata-get sdc:server_uuid')
|
grains['hypervisor_uuid'] = __salt__['cmd.run']('mdata-get sdc:server_uuid')
|
||||||
grains['datacenter'] = __salt__['cmd.run']('mdata-get sdc:datacenter_name')
|
grains['datacenter'] = __salt__['cmd.run']('mdata-get sdc:datacenter_name')
|
||||||
if "FAILURE" in grains['datacenter'] or "No metadata" in grains['datacenter']:
|
if "FAILURE" in grains['datacenter'] or "No metadata" in grains['datacenter']:
|
||||||
|
10
salt/key.py
10
salt/key.py
@ -752,7 +752,7 @@ class Key(object):
|
|||||||
pass
|
pass
|
||||||
self.check_minion_cache(preserve_minions=matches.get('minions', []))
|
self.check_minion_cache(preserve_minions=matches.get('minions', []))
|
||||||
if self.opts.get('rotate_aes_key'):
|
if self.opts.get('rotate_aes_key'):
|
||||||
salt.crypt.dropfile(self.opts['cachedir'], self.opts['user'], self.opts['sock_dir'])
|
salt.crypt.dropfile(self.opts['cachedir'], self.opts['user'])
|
||||||
return (
|
return (
|
||||||
self.name_match(match) if match is not None
|
self.name_match(match) if match is not None
|
||||||
else self.dict_match(matches)
|
else self.dict_match(matches)
|
||||||
@ -774,7 +774,7 @@ class Key(object):
|
|||||||
pass
|
pass
|
||||||
self.check_minion_cache()
|
self.check_minion_cache()
|
||||||
if self.opts.get('rotate_aes_key'):
|
if self.opts.get('rotate_aes_key'):
|
||||||
salt.crypt.dropfile(self.opts['cachedir'], self.opts['user'], self.opts['sock_dir'])
|
salt.crypt.dropfile(self.opts['cachedir'], self.opts['user'])
|
||||||
return self.list_keys()
|
return self.list_keys()
|
||||||
|
|
||||||
def reject(self, match=None, match_dict=None, include_accepted=False):
|
def reject(self, match=None, match_dict=None, include_accepted=False):
|
||||||
@ -812,7 +812,7 @@ class Key(object):
|
|||||||
pass
|
pass
|
||||||
self.check_minion_cache()
|
self.check_minion_cache()
|
||||||
if self.opts.get('rotate_aes_key'):
|
if self.opts.get('rotate_aes_key'):
|
||||||
salt.crypt.dropfile(self.opts['cachedir'], self.opts['user'], self.opts['sock_dir'])
|
salt.crypt.dropfile(self.opts['cachedir'], self.opts['user'])
|
||||||
return (
|
return (
|
||||||
self.name_match(match) if match is not None
|
self.name_match(match) if match is not None
|
||||||
else self.dict_match(matches)
|
else self.dict_match(matches)
|
||||||
@ -843,7 +843,7 @@ class Key(object):
|
|||||||
pass
|
pass
|
||||||
self.check_minion_cache()
|
self.check_minion_cache()
|
||||||
if self.opts.get('rotate_aes_key'):
|
if self.opts.get('rotate_aes_key'):
|
||||||
salt.crypt.dropfile(self.opts['cachedir'], self.opts['user'], self.opts['sock_dir'])
|
salt.crypt.dropfile(self.opts['cachedir'], self.opts['user'])
|
||||||
return self.list_keys()
|
return self.list_keys()
|
||||||
|
|
||||||
def finger(self, match):
|
def finger(self, match):
|
||||||
@ -901,7 +901,7 @@ class RaetKey(Key):
|
|||||||
rejected = os.path.join(self.opts['pki_dir'], self.REJ)
|
rejected = os.path.join(self.opts['pki_dir'], self.REJ)
|
||||||
return accepted, pre, rejected
|
return accepted, pre, rejected
|
||||||
|
|
||||||
def check_minion_cache(self, preserve_minion_cache=False):
|
def check_minion_cache(self, preserve_minions=False):
|
||||||
'''
|
'''
|
||||||
Check the minion cache to make sure that old minion data is cleared
|
Check the minion cache to make sure that old minion data is cleared
|
||||||
'''
|
'''
|
||||||
|
@ -991,25 +991,25 @@ class Loader(object):
|
|||||||
reload(submodule)
|
reload(submodule)
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
continue
|
continue
|
||||||
except ImportError:
|
except ImportError as error:
|
||||||
if failhard:
|
if failhard:
|
||||||
log.debug(
|
log.debug(
|
||||||
'Failed to import {0} {1}, this is most likely NOT a '
|
'Failed to import {0} {1}, this is most likely NOT a '
|
||||||
'problem:\n'.format(
|
'problem, but the exception was {2}\n'.format(
|
||||||
self.tag, name
|
self.tag, name, error
|
||||||
),
|
),
|
||||||
exc_info=True
|
exc_info=True
|
||||||
)
|
)
|
||||||
if not failhard:
|
if not failhard:
|
||||||
log.debug('Failed to import {0} {1}. Another attempt will be made to try to resolve dependencies.'.format(
|
log.debug('Failed to import {0} {1}. The exeception was {2}. Another attempt will be made to try to resolve dependencies.'.format(
|
||||||
self.tag, name))
|
self.tag, name, error))
|
||||||
failed_loads[name] = path
|
failed_loads[name] = path
|
||||||
continue
|
continue
|
||||||
except Exception:
|
except Exception as error:
|
||||||
log.warning(
|
log.warning(
|
||||||
'Failed to import {0} {1}, this is due most likely to a '
|
'Failed to import {0} {1}, this is due most likely to a '
|
||||||
'syntax error. Traceback raised:\n'.format(
|
'syntax error. The exception is {2}. Traceback raised:\n'.format(
|
||||||
self.tag, name
|
self.tag, name, error
|
||||||
),
|
),
|
||||||
exc_info=True
|
exc_info=True
|
||||||
)
|
)
|
||||||
|
391
salt/log/handlers/fluent_mod.py
Normal file
391
salt/log/handlers/fluent_mod.py
Normal file
@ -0,0 +1,391 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
'''
|
||||||
|
Fluent Logging Handler
|
||||||
|
========================
|
||||||
|
|
||||||
|
.. versionadded:: 2015.2
|
||||||
|
|
||||||
|
This module provides some `Fluent`_ logging handlers.
|
||||||
|
|
||||||
|
|
||||||
|
Fluent Logging Handler
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
In the salt configuration file:
|
||||||
|
|
||||||
|
.. code-block:: yaml
|
||||||
|
|
||||||
|
fluent_handler:
|
||||||
|
host: localhost
|
||||||
|
port: 24224
|
||||||
|
|
||||||
|
In the `fluent`_ configuration file:
|
||||||
|
|
||||||
|
.. code-block:: text
|
||||||
|
|
||||||
|
<source>
|
||||||
|
type forward
|
||||||
|
port 24224
|
||||||
|
</source>
|
||||||
|
|
||||||
|
Log Level
|
||||||
|
.........
|
||||||
|
|
||||||
|
The ``fluent_handler``
|
||||||
|
configuration section accepts an additional setting ``log_level``. If not
|
||||||
|
set, the logging level used will be the one defined for ``log_level`` in
|
||||||
|
the global configuration file section.
|
||||||
|
|
||||||
|
.. admonition:: Inspiration
|
||||||
|
|
||||||
|
This work was inspired in `fluent-logger-python`_
|
||||||
|
|
||||||
|
.. _`fluentd`: http://www.fluentd.org
|
||||||
|
.. _`fluent-logger-python`: https://github.com/fluent/fluent-logger-python
|
||||||
|
|
||||||
|
'''
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
# Import python libs
|
||||||
|
import logging
|
||||||
|
import logging.handlers
|
||||||
|
import time
|
||||||
|
import datetime
|
||||||
|
import socket
|
||||||
|
import threading
|
||||||
|
|
||||||
|
|
||||||
|
# Import salt libs
|
||||||
|
from salt._compat import string_types
|
||||||
|
from salt.log.setup import LOG_LEVELS
|
||||||
|
from salt.log.mixins import NewStyleClassMixIn
|
||||||
|
import salt.utils.network
|
||||||
|
|
||||||
|
# Import Third party libs
|
||||||
|
try:
|
||||||
|
import simplejson as json
|
||||||
|
except ImportError:
|
||||||
|
import json
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Attempt to import msgpack
|
||||||
|
import msgpack
|
||||||
|
# There is a serialization issue on ARM and potentially other platforms
|
||||||
|
# for some msgpack bindings, check for it
|
||||||
|
if msgpack.loads(msgpack.dumps([1, 2, 3]), use_list=True) is None:
|
||||||
|
raise ImportError
|
||||||
|
except ImportError:
|
||||||
|
# Fall back to msgpack_pure
|
||||||
|
try:
|
||||||
|
import msgpack_pure as msgpack
|
||||||
|
except ImportError:
|
||||||
|
# TODO: Come up with a sane way to get a configured logfile
|
||||||
|
# and write to the logfile when this error is hit also
|
||||||
|
LOG_FORMAT = '[%(levelname)-8s] %(message)s'
|
||||||
|
salt.log.setup_console_logger(log_format=LOG_FORMAT)
|
||||||
|
log.fatal('Unable to import msgpack or msgpack_pure python modules')
|
||||||
|
# Don't exit if msgpack is not available, this is to make local mode
|
||||||
|
# work without msgpack
|
||||||
|
#sys.exit(salt.exitcodes.EX_GENERIC)
|
||||||
|
|
||||||
|
# Define the module's virtual name
|
||||||
|
__virtualname__ = 'fluent'
|
||||||
|
|
||||||
|
_global_sender = None
|
||||||
|
|
||||||
|
|
||||||
|
def setup(tag, **kwargs):
|
||||||
|
host = kwargs.get('host', 'localhost')
|
||||||
|
port = kwargs.get('port', 24224)
|
||||||
|
|
||||||
|
global _global_sender
|
||||||
|
_global_sender = FluentSender(tag, host=host, port=port)
|
||||||
|
|
||||||
|
|
||||||
|
def get_global_sender():
|
||||||
|
return _global_sender
|
||||||
|
|
||||||
|
|
||||||
|
def __virtual__():
|
||||||
|
if not any(['fluent_handler' in __opts__]):
|
||||||
|
log.trace(
|
||||||
|
'The required configuration section, \'fluent_handler\', '
|
||||||
|
'was not found the in the configuration. Not loading the fluent '
|
||||||
|
'logging handlers module.'
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
return __virtualname__
|
||||||
|
|
||||||
|
|
||||||
|
def setup_handlers():
|
||||||
|
host = port = address = None
|
||||||
|
|
||||||
|
if 'fluent_handler' in __opts__:
|
||||||
|
host = __opts__['fluent_handler'].get('host', None)
|
||||||
|
port = __opts__['fluent_handler'].get('port', None)
|
||||||
|
version = __opts__['fluent_handler'].get('version', 1)
|
||||||
|
|
||||||
|
if host is None and port is None:
|
||||||
|
log.debug(
|
||||||
|
'The required \'fluent_handler\' configuration keys, '
|
||||||
|
'\'host\' and/or \'port\', are not properly configured. Not '
|
||||||
|
'configuring the fluent logging handler.'
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logstash_formatter = LogstashFormatter(version=version)
|
||||||
|
fluent_handler = FluentHandler('salt', host=host, port=port)
|
||||||
|
fluent_handler.setFormatter(logstash_formatter)
|
||||||
|
fluent_handler.setLevel(
|
||||||
|
LOG_LEVELS[
|
||||||
|
__opts__['fluent_handler'].get(
|
||||||
|
'log_level',
|
||||||
|
# Not set? Get the main salt log_level setting on the
|
||||||
|
# configuration file
|
||||||
|
__opts__.get(
|
||||||
|
'log_level',
|
||||||
|
# Also not set?! Default to 'error'
|
||||||
|
'error'
|
||||||
|
)
|
||||||
|
)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
yield fluent_handler
|
||||||
|
|
||||||
|
if host is None and port is None and address is None:
|
||||||
|
yield False
|
||||||
|
|
||||||
|
|
||||||
|
class LogstashFormatter(logging.Formatter, NewStyleClassMixIn):
|
||||||
|
def __init__(self, msg_type='logstash', msg_path='logstash', version=1):
|
||||||
|
self.msg_path = msg_path
|
||||||
|
self.msg_type = msg_type
|
||||||
|
self.version = version
|
||||||
|
self.format = getattr(self, 'format_v{0}'.format(version))
|
||||||
|
super(LogstashFormatter, self).__init__(fmt=None, datefmt=None)
|
||||||
|
|
||||||
|
def formatTime(self, record, datefmt=None):
|
||||||
|
return datetime.datetime.utcfromtimestamp(record.created).isoformat()[:-3] + 'Z'
|
||||||
|
|
||||||
|
def format_v0(self, record):
|
||||||
|
host = salt.utils.network.get_fqhostname()
|
||||||
|
message_dict = {
|
||||||
|
'@timestamp': self.formatTime(record),
|
||||||
|
'@fields': {
|
||||||
|
'levelname': record.levelname,
|
||||||
|
'logger': record.name,
|
||||||
|
'lineno': record.lineno,
|
||||||
|
'pathname': record.pathname,
|
||||||
|
'process': record.process,
|
||||||
|
'threadName': record.threadName,
|
||||||
|
'funcName': record.funcName,
|
||||||
|
'processName': record.processName
|
||||||
|
},
|
||||||
|
'@message': record.getMessage(),
|
||||||
|
'@source': '{0}://{1}/{2}'.format(
|
||||||
|
self.msg_type,
|
||||||
|
host,
|
||||||
|
self.msg_path
|
||||||
|
),
|
||||||
|
'@source_host': host,
|
||||||
|
'@source_path': self.msg_path,
|
||||||
|
'@tags': ['salt'],
|
||||||
|
'@type': self.msg_type,
|
||||||
|
}
|
||||||
|
|
||||||
|
if record.exc_info:
|
||||||
|
message_dict['@fields']['exc_info'] = self.formatException(
|
||||||
|
record.exc_info
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add any extra attributes to the message field
|
||||||
|
for key, value in record.__dict__.items():
|
||||||
|
if key in ('args', 'asctime', 'created', 'exc_info', 'exc_text',
|
||||||
|
'filename', 'funcName', 'id', 'levelname', 'levelno',
|
||||||
|
'lineno', 'module', 'msecs', 'msecs', 'message', 'msg',
|
||||||
|
'name', 'pathname', 'process', 'processName',
|
||||||
|
'relativeCreated', 'thread', 'threadName'):
|
||||||
|
# These are already handled above or not handled at all
|
||||||
|
continue
|
||||||
|
|
||||||
|
if value is None:
|
||||||
|
message_dict['@fields'][key] = value
|
||||||
|
continue
|
||||||
|
|
||||||
|
if isinstance(value, (string_types, bool, dict, float, int, list)):
|
||||||
|
message_dict['@fields'][key] = value
|
||||||
|
continue
|
||||||
|
|
||||||
|
message_dict['@fields'][key] = repr(value)
|
||||||
|
return json.dumps(message_dict)
|
||||||
|
|
||||||
|
def format_v1(self, record):
|
||||||
|
message_dict = {
|
||||||
|
'@version': 1,
|
||||||
|
'@timestamp': self.formatTime(record),
|
||||||
|
'host': salt.utils.network.get_fqhostname(),
|
||||||
|
'levelname': record.levelname,
|
||||||
|
'logger': record.name,
|
||||||
|
'lineno': record.lineno,
|
||||||
|
'pathname': record.pathname,
|
||||||
|
'process': record.process,
|
||||||
|
'threadName': record.threadName,
|
||||||
|
'funcName': record.funcName,
|
||||||
|
'processName': record.processName,
|
||||||
|
'message': record.getMessage(),
|
||||||
|
'tags': ['salt'],
|
||||||
|
'type': self.msg_type
|
||||||
|
}
|
||||||
|
|
||||||
|
if record.exc_info:
|
||||||
|
message_dict['exc_info'] = self.formatException(
|
||||||
|
record.exc_info
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add any extra attributes to the message field
|
||||||
|
for key, value in record.__dict__.items():
|
||||||
|
if key in ('args', 'asctime', 'created', 'exc_info', 'exc_text',
|
||||||
|
'filename', 'funcName', 'id', 'levelname', 'levelno',
|
||||||
|
'lineno', 'module', 'msecs', 'msecs', 'message', 'msg',
|
||||||
|
'name', 'pathname', 'process', 'processName',
|
||||||
|
'relativeCreated', 'thread', 'threadName'):
|
||||||
|
# These are already handled above or not handled at all
|
||||||
|
continue
|
||||||
|
|
||||||
|
if value is None:
|
||||||
|
message_dict[key] = value
|
||||||
|
continue
|
||||||
|
|
||||||
|
if isinstance(value, (string_types, bool, dict, float, int, list)):
|
||||||
|
message_dict[key] = value
|
||||||
|
continue
|
||||||
|
|
||||||
|
message_dict[key] = repr(value)
|
||||||
|
return json.dumps(message_dict)
|
||||||
|
|
||||||
|
|
||||||
|
class FluentHandler(logging.Handler):
|
||||||
|
'''
|
||||||
|
Logging Handler for fluent.
|
||||||
|
'''
|
||||||
|
def __init__(self,
|
||||||
|
tag,
|
||||||
|
host='localhost',
|
||||||
|
port=24224,
|
||||||
|
timeout=3.0,
|
||||||
|
verbose=False):
|
||||||
|
|
||||||
|
self.tag = tag
|
||||||
|
self.sender = FluentSender(tag,
|
||||||
|
host=host, port=port,
|
||||||
|
timeout=timeout, verbose=verbose)
|
||||||
|
logging.Handler.__init__(self)
|
||||||
|
|
||||||
|
def emit(self, record):
|
||||||
|
data = self.format(record)
|
||||||
|
self.sender.emit(None, data)
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
self.acquire()
|
||||||
|
try:
|
||||||
|
self.sender._close()
|
||||||
|
logging.Handler.close(self)
|
||||||
|
finally:
|
||||||
|
self.release()
|
||||||
|
|
||||||
|
|
||||||
|
class FluentSender(object):
|
||||||
|
def __init__(self,
|
||||||
|
tag,
|
||||||
|
host='localhost',
|
||||||
|
port=24224,
|
||||||
|
bufmax=1 * 1024 * 1024,
|
||||||
|
timeout=3.0,
|
||||||
|
verbose=False):
|
||||||
|
|
||||||
|
self.tag = tag
|
||||||
|
self.host = host
|
||||||
|
self.port = port
|
||||||
|
self.bufmax = bufmax
|
||||||
|
self.timeout = timeout
|
||||||
|
self.verbose = verbose
|
||||||
|
|
||||||
|
self.socket = None
|
||||||
|
self.pendings = None
|
||||||
|
self.packer = msgpack.Packer()
|
||||||
|
self.lock = threading.Lock()
|
||||||
|
|
||||||
|
try:
|
||||||
|
self._reconnect()
|
||||||
|
except Exception:
|
||||||
|
# will be retried in emit()
|
||||||
|
self._close()
|
||||||
|
|
||||||
|
def emit(self, label, data):
|
||||||
|
cur_time = int(time.time())
|
||||||
|
self.emit_with_time(label, cur_time, data)
|
||||||
|
|
||||||
|
def emit_with_time(self, label, timestamp, data):
|
||||||
|
bytes_ = self._make_packet(label, timestamp, data)
|
||||||
|
self._send(bytes_)
|
||||||
|
|
||||||
|
def _make_packet(self, label, timestamp, data):
|
||||||
|
if label:
|
||||||
|
tag = '.'.join((self.tag, label))
|
||||||
|
else:
|
||||||
|
tag = self.tag
|
||||||
|
packet = (tag, timestamp, data)
|
||||||
|
if self.verbose:
|
||||||
|
print(packet)
|
||||||
|
return self.packer.pack(packet)
|
||||||
|
|
||||||
|
def _send(self, bytes_):
|
||||||
|
self.lock.acquire()
|
||||||
|
try:
|
||||||
|
self._send_internal(bytes_)
|
||||||
|
finally:
|
||||||
|
self.lock.release()
|
||||||
|
|
||||||
|
def _send_internal(self, bytes_):
|
||||||
|
# buffering
|
||||||
|
if self.pendings:
|
||||||
|
self.pendings += bytes_
|
||||||
|
bytes_ = self.pendings
|
||||||
|
|
||||||
|
try:
|
||||||
|
# reconnect if possible
|
||||||
|
self._reconnect()
|
||||||
|
|
||||||
|
# send message
|
||||||
|
self.socket.sendall(bytes_)
|
||||||
|
|
||||||
|
# send finished
|
||||||
|
self.pendings = None
|
||||||
|
except Exception:
|
||||||
|
# close socket
|
||||||
|
self._close()
|
||||||
|
# clear buffer if it exceeds max bufer size
|
||||||
|
if self.pendings and (len(self.pendings) > self.bufmax):
|
||||||
|
# TODO: add callback handler here
|
||||||
|
self.pendings = None
|
||||||
|
else:
|
||||||
|
self.pendings = bytes_
|
||||||
|
|
||||||
|
def _reconnect(self):
|
||||||
|
if not self.socket:
|
||||||
|
if self.host.startswith('unix://'):
|
||||||
|
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||||
|
sock.settimeout(self.timeout)
|
||||||
|
sock.connect(self.host[len('unix://'):])
|
||||||
|
else:
|
||||||
|
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||||
|
sock.settimeout(self.timeout)
|
||||||
|
sock.connect((self.host, self.port))
|
||||||
|
self.socket = sock
|
||||||
|
|
||||||
|
def _close(self):
|
||||||
|
if self.socket:
|
||||||
|
self.socket.close()
|
||||||
|
self.socket = None
|
@ -24,6 +24,8 @@ import socket
|
|||||||
import logging
|
import logging
|
||||||
import logging.handlers
|
import logging.handlers
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
|
from salt.textformat import TextFormat
|
||||||
from salt.ext.six import PY3
|
from salt.ext.six import PY3
|
||||||
from salt.ext.six import string_types, text_type, with_metaclass
|
from salt.ext.six import string_types, text_type, with_metaclass
|
||||||
from salt.ext.six.moves.urllib.parse import urlparse # pylint: disable=import-error,no-name-in-module
|
from salt.ext.six.moves.urllib.parse import urlparse # pylint: disable=import-error,no-name-in-module
|
||||||
@ -38,6 +40,7 @@ QUIET = logging.QUIET = 1000
|
|||||||
from salt.log.handlers import TemporaryLoggingHandler, StreamHandler, SysLogHandler, WatchedFileHandler
|
from salt.log.handlers import TemporaryLoggingHandler, StreamHandler, SysLogHandler, WatchedFileHandler
|
||||||
from salt.log.mixins import LoggingMixInMeta, NewStyleClassMixIn
|
from salt.log.mixins import LoggingMixInMeta, NewStyleClassMixIn
|
||||||
|
|
||||||
|
|
||||||
LOG_LEVELS = {
|
LOG_LEVELS = {
|
||||||
'all': logging.NOTSET,
|
'all': logging.NOTSET,
|
||||||
'debug': logging.DEBUG,
|
'debug': logging.DEBUG,
|
||||||
@ -50,6 +53,35 @@ LOG_LEVELS = {
|
|||||||
'warning': logging.WARNING,
|
'warning': logging.WARNING,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
LOG_COLORS = {
|
||||||
|
'levels': {
|
||||||
|
'QUIET': TextFormat('reset'),
|
||||||
|
'CRITICAL': TextFormat('bold', 'red'),
|
||||||
|
'ERROR': TextFormat('bold', 'red'),
|
||||||
|
'WARNING': TextFormat('bold', 'yellow'),
|
||||||
|
'INFO': TextFormat('bold', 'green'),
|
||||||
|
'DEBUG': TextFormat('bold', 'cyan'),
|
||||||
|
'TRACE': TextFormat('bold', 'magenta'),
|
||||||
|
'GARBAGE': TextFormat('bold', 'blue'),
|
||||||
|
'NOTSET': TextFormat('reset'),
|
||||||
|
},
|
||||||
|
'msgs': {
|
||||||
|
'QUIET': TextFormat('reset'),
|
||||||
|
'CRITICAL': TextFormat('bold', 'red'),
|
||||||
|
'ERROR': TextFormat('red'),
|
||||||
|
'WARNING': TextFormat('yellow'),
|
||||||
|
'INFO': TextFormat('green'),
|
||||||
|
'DEBUG': TextFormat('cyan'),
|
||||||
|
'TRACE': TextFormat('magenta'),
|
||||||
|
'GARBAGE': TextFormat('blue'),
|
||||||
|
'NOTSET': TextFormat('reset'),
|
||||||
|
},
|
||||||
|
'name': TextFormat('bold', 'green'),
|
||||||
|
'process': TextFormat('bold', 'blue'),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
# Make a list of log level names sorted by log level
|
# Make a list of log level names sorted by log level
|
||||||
SORTED_LEVEL_NAMES = [
|
SORTED_LEVEL_NAMES = [
|
||||||
l[0] for l in sorted(LOG_LEVELS.items(), key=lambda x: x[1])
|
l[0] for l in sorted(LOG_LEVELS.items(), key=lambda x: x[1])
|
||||||
@ -96,6 +128,65 @@ LOGGING_TEMP_HANDLER = StreamHandler(sys.stderr)
|
|||||||
LOGGING_STORE_HANDLER = TemporaryLoggingHandler()
|
LOGGING_STORE_HANDLER = TemporaryLoggingHandler()
|
||||||
|
|
||||||
|
|
||||||
|
class SaltLogRecord(logging.LogRecord):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
logging.LogRecord.__init__(self, *args, **kwargs)
|
||||||
|
self.bracketname = '[{name:17}]'.format(name=self.name)
|
||||||
|
self.bracketlevel = '[{levelname:8}]'.format(levelname=self.levelname)
|
||||||
|
self.bracketprocess = '[{process}]'.format(process=self.process)
|
||||||
|
|
||||||
|
|
||||||
|
class SaltColorLogRecord(logging.LogRecord):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
logging.LogRecord.__init__(self, *args, **kwargs)
|
||||||
|
|
||||||
|
self.colorname = '{tf}[{name:17}]{end}'.format(
|
||||||
|
tf=LOG_COLORS['name'],
|
||||||
|
name=self.name,
|
||||||
|
end=TextFormat('reset')
|
||||||
|
)
|
||||||
|
self.colorlevel = '{tf}[{levelname:8}]{end}'.format(
|
||||||
|
tf=LOG_COLORS['levels'][self.levelname],
|
||||||
|
levelname=self.levelname,
|
||||||
|
end=TextFormat('reset')
|
||||||
|
)
|
||||||
|
self.colorprocess = '{tf}[{process}]{end}'.format(
|
||||||
|
tf=LOG_COLORS['process'],
|
||||||
|
process=self.process,
|
||||||
|
end=TextFormat('reset')
|
||||||
|
)
|
||||||
|
self.colormsg = u'{tf}{msg}{end}'.format(
|
||||||
|
tf=LOG_COLORS['msgs'][self.levelname],
|
||||||
|
msg=self.msg,
|
||||||
|
end=TextFormat('reset')
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
_log_record_factory = SaltLogRecord
|
||||||
|
|
||||||
|
|
||||||
|
def set_log_record_factory(factory):
|
||||||
|
'''
|
||||||
|
Set the factory to be used when instantiating a log record.
|
||||||
|
|
||||||
|
:param factory: A callable which will be called to instantiate
|
||||||
|
a log record.
|
||||||
|
'''
|
||||||
|
global _log_record_factory
|
||||||
|
_log_record_factory = factory
|
||||||
|
|
||||||
|
|
||||||
|
def get_log_record_factory():
|
||||||
|
'''
|
||||||
|
Return the factory to be used when instantiating a log record.
|
||||||
|
'''
|
||||||
|
|
||||||
|
return _log_record_factory
|
||||||
|
|
||||||
|
|
||||||
|
set_log_record_factory(SaltLogRecord)
|
||||||
|
|
||||||
|
|
||||||
class SaltLoggingClass(with_metaclass(LoggingMixInMeta, LOGGING_LOGGER_CLASS, NewStyleClassMixIn)): # pylint: disable=W0232
|
class SaltLoggingClass(with_metaclass(LoggingMixInMeta, LOGGING_LOGGER_CLASS, NewStyleClassMixIn)): # pylint: disable=W0232
|
||||||
def __new__(cls, *args): # pylint: disable=W0613, E1002
|
def __new__(cls, *args): # pylint: disable=W0613, E1002
|
||||||
'''
|
'''
|
||||||
@ -198,46 +289,35 @@ class SaltLoggingClass(with_metaclass(LoggingMixInMeta, LOGGING_LOGGER_CLASS, Ne
|
|||||||
|
|
||||||
# Let's try to make every logging message unicode
|
# Let's try to make every logging message unicode
|
||||||
if isinstance(msg, string_types) and not isinstance(msg, text_type):
|
if isinstance(msg, string_types) and not isinstance(msg, text_type):
|
||||||
if PY3:
|
try:
|
||||||
try:
|
_msg = msg.decode('utf-8', 'replace')
|
||||||
logrecord = LOGGING_LOGGER_CLASS.makeRecord(
|
except UnicodeDecodeError:
|
||||||
self, name, level, fn, lno,
|
_msg = msg.decode('utf-8', 'ignore'),
|
||||||
msg.decode('utf-8', 'replace'),
|
|
||||||
args, exc_info, func, extra, sinfo
|
|
||||||
)
|
|
||||||
except UnicodeDecodeError:
|
|
||||||
logrecord = LOGGING_LOGGER_CLASS.makeRecord(
|
|
||||||
self, name, level, fn, lno,
|
|
||||||
msg.decode('utf-8', 'ignore'),
|
|
||||||
args, exc_info, func, extra, sinfo
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
logrecord = LOGGING_LOGGER_CLASS.makeRecord(
|
|
||||||
self, name, level, fn, lno,
|
|
||||||
msg.decode('utf-8', 'replace'),
|
|
||||||
args, exc_info, func, extra
|
|
||||||
)
|
|
||||||
except UnicodeDecodeError:
|
|
||||||
logrecord = LOGGING_LOGGER_CLASS.makeRecord(
|
|
||||||
self, name, level, fn, lno,
|
|
||||||
msg.decode('utf-8', 'ignore'),
|
|
||||||
args, exc_info, func, extra
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
if PY3:
|
_msg = msg
|
||||||
logrecord = LOGGING_LOGGER_CLASS.makeRecord(
|
|
||||||
self, name, level, fn, lno, msg, args, exc_info, func, extra, sinfo
|
if PY3:
|
||||||
)
|
logrecord = _log_record_factory(name, level, fn, lno, _msg, args,
|
||||||
else:
|
exc_info, func, sinfo)
|
||||||
logrecord = LOGGING_LOGGER_CLASS.makeRecord(
|
else:
|
||||||
self, name, level, fn, lno, msg, args, exc_info, func, extra
|
logrecord = _log_record_factory(name, level, fn, lno, _msg, args,
|
||||||
)
|
exc_info, func)
|
||||||
|
|
||||||
|
if extra is not None:
|
||||||
|
for key in extra:
|
||||||
|
if (key in ['message', 'asctime']) \
|
||||||
|
or (key in logrecord.__dict__):
|
||||||
|
raise KeyError(
|
||||||
|
'Attempt to overwrite {0!r} in LogRecord'.format(key)
|
||||||
|
)
|
||||||
|
logrecord.__dict__[key] = extra[key]
|
||||||
|
|
||||||
if exc_info_on_loglevel is not None:
|
if exc_info_on_loglevel is not None:
|
||||||
# Let's add some custom attributes to the LogRecord class in order to include the exc_info on a per
|
# Let's add some custom attributes to the LogRecord class in order
|
||||||
# handler basis. This will allow showing tracebacks on logfiles but not on console if the logfile
|
# to include the exc_info on a per handler basis. This will allow
|
||||||
# handler is enabled for the log level "exc_info_on_loglevel" and console handler is not.
|
# showing tracebacks on logfiles but not on console if the logfile
|
||||||
|
# handler is enabled for the log level "exc_info_on_loglevel" and
|
||||||
|
# console handler is not.
|
||||||
logrecord.exc_info_on_loglevel_instance = sys.exc_info()
|
logrecord.exc_info_on_loglevel_instance = sys.exc_info()
|
||||||
logrecord.exc_info_on_loglevel_formatted = None
|
logrecord.exc_info_on_loglevel_formatted = None
|
||||||
|
|
||||||
@ -351,6 +431,8 @@ def setup_console_logger(log_level='error', log_format=None, date_format=None):
|
|||||||
|
|
||||||
level = LOG_LEVELS.get(log_level.lower(), logging.ERROR)
|
level = LOG_LEVELS.get(log_level.lower(), logging.ERROR)
|
||||||
|
|
||||||
|
set_log_record_factory(SaltColorLogRecord)
|
||||||
|
|
||||||
handler = None
|
handler = None
|
||||||
for handler in logging.root.handlers:
|
for handler in logging.root.handlers:
|
||||||
if handler is LOGGING_STORE_HANDLER:
|
if handler is LOGGING_STORE_HANDLER:
|
||||||
|
104
salt/master.py
104
salt/master.py
@ -7,6 +7,7 @@ involves preparing the three listeners and the workers needed by the master.
|
|||||||
from __future__ import absolute_import
|
from __future__ import absolute_import
|
||||||
|
|
||||||
# Import python libs
|
# Import python libs
|
||||||
|
import ctypes
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import time
|
import time
|
||||||
@ -17,6 +18,7 @@ import hashlib
|
|||||||
import resource
|
import resource
|
||||||
import multiprocessing
|
import multiprocessing
|
||||||
import sys
|
import sys
|
||||||
|
import tempfile
|
||||||
|
|
||||||
# Import third party libs
|
# Import third party libs
|
||||||
import zmq
|
import zmq
|
||||||
@ -77,6 +79,7 @@ class SMaster(object):
|
|||||||
:param dict opts: The salt options dictionary
|
:param dict opts: The salt options dictionary
|
||||||
'''
|
'''
|
||||||
self.opts = opts
|
self.opts = opts
|
||||||
|
self.opts['aes'] = multiprocessing.Array(ctypes.c_char, salt.crypt.Crypticle.generate_key_string())
|
||||||
self.master_key = salt.crypt.MasterKeys(self.opts)
|
self.master_key = salt.crypt.MasterKeys(self.opts)
|
||||||
self.key = self.__prep_key()
|
self.key = self.__prep_key()
|
||||||
self.crypticle = self.__prep_crypticle()
|
self.crypticle = self.__prep_crypticle()
|
||||||
@ -85,7 +88,7 @@ class SMaster(object):
|
|||||||
'''
|
'''
|
||||||
Return the crypticle used for AES
|
Return the crypticle used for AES
|
||||||
'''
|
'''
|
||||||
return salt.crypt.Crypticle(self.opts, self.opts['aes'])
|
return salt.crypt.Crypticle(self.opts, self.opts['aes'].value)
|
||||||
|
|
||||||
def __prep_key(self):
|
def __prep_key(self):
|
||||||
'''
|
'''
|
||||||
@ -225,20 +228,36 @@ class Maintenance(multiprocessing.Process):
|
|||||||
|
|
||||||
def handle_key_rotate(self, now):
|
def handle_key_rotate(self, now):
|
||||||
'''
|
'''
|
||||||
Rotate the AES key on a schedule
|
Rotate the AES key rotation
|
||||||
'''
|
'''
|
||||||
|
to_rotate = False
|
||||||
|
dfn = os.path.join(self.opts['cachedir'], '.dfn')
|
||||||
|
try:
|
||||||
|
stats = os.stat(dfn)
|
||||||
|
if stats.st_mode == 0o100400:
|
||||||
|
to_rotate = True
|
||||||
|
else:
|
||||||
|
log.error('Found dropfile with incorrect permissions, ignoring...')
|
||||||
|
os.remove(dfn)
|
||||||
|
except os.error:
|
||||||
|
pass
|
||||||
|
|
||||||
if self.opts.get('publish_session'):
|
if self.opts.get('publish_session'):
|
||||||
if now - self.rotate >= self.opts['publish_session']:
|
if now - self.rotate >= self.opts['publish_session']:
|
||||||
salt.crypt.dropfile(
|
to_rotate = True
|
||||||
self.opts['cachedir'],
|
|
||||||
self.opts['user'],
|
if to_rotate:
|
||||||
self.opts['sock_dir'])
|
log.info('Rotating master AES key')
|
||||||
self.rotate = now
|
# should be unecessary-- since no one else should be modifying
|
||||||
if self.opts.get('ping_on_rotate'):
|
with self.opts['aes'].get_lock():
|
||||||
# Ping all minions to get them to pick up the new key
|
self.opts['aes'].value = salt.crypt.Crypticle.generate_key_string()
|
||||||
log.debug('Pinging all connected minions '
|
self.event.fire_event({'rotate_aes_key': True}, tag='key')
|
||||||
'due to AES key rotation')
|
self.rotate = now
|
||||||
salt.utils.master.ping_all_connected_minions(self.opts)
|
if self.opts.get('ping_on_rotate'):
|
||||||
|
# Ping all minions to get them to pick up the new key
|
||||||
|
log.debug('Pinging all connected minions '
|
||||||
|
'due to AES key rotation')
|
||||||
|
salt.utils.master.ping_all_connected_minions(self.opts)
|
||||||
|
|
||||||
def handle_pillargit(self):
|
def handle_pillargit(self):
|
||||||
'''
|
'''
|
||||||
@ -794,7 +813,9 @@ class MWorker(multiprocessing.Process):
|
|||||||
try:
|
try:
|
||||||
data = self.crypticle.loads(load)
|
data = self.crypticle.loads(load)
|
||||||
except Exception:
|
except Exception:
|
||||||
return ''
|
# return something not encrypted so the minions know that they aren't
|
||||||
|
# encrypting correctly.
|
||||||
|
return 'bad load'
|
||||||
if 'cmd' not in data:
|
if 'cmd' not in data:
|
||||||
log.error('Received malformed command {0}'.format(data))
|
log.error('Received malformed command {0}'.format(data))
|
||||||
return {}
|
return {}
|
||||||
@ -808,27 +829,10 @@ class MWorker(multiprocessing.Process):
|
|||||||
Check to see if a fresh AES key is available and update the components
|
Check to see if a fresh AES key is available and update the components
|
||||||
of the worker
|
of the worker
|
||||||
'''
|
'''
|
||||||
dfn = os.path.join(self.opts['cachedir'], '.dfn')
|
if self.opts['aes'].value != self.crypticle.key_string:
|
||||||
try:
|
self.crypticle = salt.crypt.Crypticle(self.opts, self.opts['aes'].value)
|
||||||
stats = os.stat(dfn)
|
|
||||||
except os.error:
|
|
||||||
return
|
|
||||||
if stats.st_mode != 0o100400:
|
|
||||||
# Invalid dfn, return
|
|
||||||
return
|
|
||||||
if stats.st_mtime > self.k_mtime:
|
|
||||||
# new key, refresh crypticle
|
|
||||||
with salt.utils.fopen(dfn) as fp_:
|
|
||||||
aes = fp_.read()
|
|
||||||
if len(aes) != 76:
|
|
||||||
return
|
|
||||||
log.debug('New master AES key found by pid {0}'.format(os.getpid()))
|
|
||||||
self.crypticle = salt.crypt.Crypticle(self.opts, aes)
|
|
||||||
self.clear_funcs.crypticle = self.crypticle
|
self.clear_funcs.crypticle = self.crypticle
|
||||||
self.clear_funcs.opts['aes'] = aes
|
|
||||||
self.aes_funcs.crypticle = self.crypticle
|
self.aes_funcs.crypticle = self.crypticle
|
||||||
self.aes_funcs.opts['aes'] = aes
|
|
||||||
self.k_mtime = stats.st_mtime
|
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
'''
|
'''
|
||||||
@ -1220,12 +1224,15 @@ class AESFuncs(object):
|
|||||||
if not os.path.isdir(cdir):
|
if not os.path.isdir(cdir):
|
||||||
os.makedirs(cdir)
|
os.makedirs(cdir)
|
||||||
datap = os.path.join(cdir, 'data.p')
|
datap = os.path.join(cdir, 'data.p')
|
||||||
with salt.utils.fopen(datap, 'w+b') as fp_:
|
tmpfh, tmpfname = tempfile.mkstemp(dir=cdir)
|
||||||
|
os.close(tmpfh)
|
||||||
|
with salt.utils.fopen(tmpfname, 'w+b') as fp_:
|
||||||
fp_.write(
|
fp_.write(
|
||||||
self.serial.dumps(
|
self.serial.dumps(
|
||||||
{'grains': load['grains'],
|
{'grains': load['grains'],
|
||||||
'pillar': data})
|
'pillar': data})
|
||||||
)
|
)
|
||||||
|
os.rename(tmpfname, datap)
|
||||||
for mod in mods:
|
for mod in mods:
|
||||||
sys.modules[mod].__grains__ = self.opts['grains']
|
sys.modules[mod].__grains__ = self.opts['grains']
|
||||||
return data
|
return data
|
||||||
@ -1788,11 +1795,20 @@ class ClearFuncs(object):
|
|||||||
|
|
||||||
log.info('Authentication accepted from {id}'.format(**load))
|
log.info('Authentication accepted from {id}'.format(**load))
|
||||||
# only write to disk if you are adding the file, and in open mode,
|
# only write to disk if you are adding the file, and in open mode,
|
||||||
# which implies we accept any key from a minion (key needs to be
|
# which implies we accept any key from a minion.
|
||||||
# written every time because what's on disk is used for encrypting)
|
if not os.path.isfile(pubfn) and not self.opts['open_mode']:
|
||||||
if not os.path.isfile(pubfn) or self.opts['open_mode']:
|
|
||||||
with salt.utils.fopen(pubfn, 'w+') as fp_:
|
with salt.utils.fopen(pubfn, 'w+') as fp_:
|
||||||
fp_.write(load['pub'])
|
fp_.write(load['pub'])
|
||||||
|
elif self.opts['open_mode']:
|
||||||
|
disk_key = ''
|
||||||
|
if os.path.isfile(pubfn):
|
||||||
|
with salt.utils.fopen(pubfn, 'r') as fp_:
|
||||||
|
disk_key = fp_.read()
|
||||||
|
if load['pub'] and load['pub'] != disk_key:
|
||||||
|
log.debug('Host key change detected in open mode.')
|
||||||
|
with salt.utils.fopen(pubfn, 'w+') as fp_:
|
||||||
|
fp_.write(load['pub'])
|
||||||
|
|
||||||
pub = None
|
pub = None
|
||||||
|
|
||||||
# the con_cache is enabled, send the minion id to the cache
|
# the con_cache is enabled, send the minion id to the cache
|
||||||
@ -1832,13 +1848,13 @@ class ClearFuncs(object):
|
|||||||
if 'token' in load:
|
if 'token' in load:
|
||||||
try:
|
try:
|
||||||
mtoken = self.master_key.key.private_decrypt(load['token'], 4)
|
mtoken = self.master_key.key.private_decrypt(load['token'], 4)
|
||||||
aes = '{0}_|-{1}'.format(self.opts['aes'], mtoken)
|
aes = '{0}_|-{1}'.format(self.opts['aes'].value, mtoken)
|
||||||
except Exception:
|
except Exception:
|
||||||
# Token failed to decrypt, send back the salty bacon to
|
# Token failed to decrypt, send back the salty bacon to
|
||||||
# support older minions
|
# support older minions
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
aes = self.opts['aes']
|
aes = self.opts['aes'].value
|
||||||
|
|
||||||
ret['aes'] = pub.public_encrypt(aes, 4)
|
ret['aes'] = pub.public_encrypt(aes, 4)
|
||||||
else:
|
else:
|
||||||
@ -1853,8 +1869,8 @@ class ClearFuncs(object):
|
|||||||
# support older minions
|
# support older minions
|
||||||
pass
|
pass
|
||||||
|
|
||||||
aes = self.opts['aes']
|
aes = self.opts['aes'].value
|
||||||
ret['aes'] = pub.public_encrypt(self.opts['aes'], 4)
|
ret['aes'] = pub.public_encrypt(self.opts['aes'].value, 4)
|
||||||
# Be aggressive about the signature
|
# Be aggressive about the signature
|
||||||
digest = hashlib.sha256(aes).hexdigest()
|
digest = hashlib.sha256(aes).hexdigest()
|
||||||
ret['sig'] = self.master_key.key.private_encrypt(digest, 5)
|
ret['sig'] = self.master_key.key.private_encrypt(digest, 5)
|
||||||
@ -2222,11 +2238,17 @@ class ClearFuncs(object):
|
|||||||
# If a group_auth_match is set it means only that we have a user which matches at least one or more
|
# If a group_auth_match is set it means only that we have a user which matches at least one or more
|
||||||
# of the groups defined in the configuration file.
|
# of the groups defined in the configuration file.
|
||||||
|
|
||||||
|
external_auth_in_db = False
|
||||||
|
for d in self.opts['external_auth'][extra['eauth']]:
|
||||||
|
if d.startswith('^'):
|
||||||
|
external_auth_in_db = True
|
||||||
|
|
||||||
# If neither a catchall, a named membership or a group membership is found, there is no need
|
# If neither a catchall, a named membership or a group membership is found, there is no need
|
||||||
# to continue. Simply deny the user access.
|
# to continue. Simply deny the user access.
|
||||||
if not ((name in self.opts['external_auth'][extra['eauth']]) |
|
if not ((name in self.opts['external_auth'][extra['eauth']]) |
|
||||||
('*' in self.opts['external_auth'][extra['eauth']]) |
|
('*' in self.opts['external_auth'][extra['eauth']]) |
|
||||||
group_auth_match):
|
group_auth_match | external_auth_in_db):
|
||||||
|
|
||||||
# A group def is defined and the user is a member
|
# A group def is defined and the user is a member
|
||||||
#[group for groups in ['external_auth'][extra['eauth']]]):
|
#[group for groups in ['external_auth'][extra['eauth']]]):
|
||||||
# Auth successful, but no matching user found in config
|
# Auth successful, but no matching user found in config
|
||||||
|
114
salt/minion.py
114
salt/minion.py
@ -281,11 +281,10 @@ class SMinion(object):
|
|||||||
self.opts,
|
self.opts,
|
||||||
self.opts['grains'],
|
self.opts['grains'],
|
||||||
self.opts['id'],
|
self.opts['id'],
|
||||||
self.opts['environment'],
|
self.opts['environment']
|
||||||
).compile_pillar()
|
).compile_pillar()
|
||||||
self.functions = salt.loader.minion_mods(self.opts, include_errors=True)
|
self.functions = salt.loader.minion_mods(self.opts, include_errors=True)
|
||||||
self.function_errors = self.functions['_errors']
|
self.function_errors = self.functions.pop('_errors') # Keep the funcs clean
|
||||||
self.functions.pop('_errors') # Keep the funcs clean
|
|
||||||
self.returners = salt.loader.returners(self.opts, self.functions)
|
self.returners = salt.loader.returners(self.opts, self.functions)
|
||||||
self.states = salt.loader.states(self.opts, self.functions)
|
self.states = salt.loader.states(self.opts, self.functions)
|
||||||
self.rend = salt.loader.render(self.opts, self.functions)
|
self.rend = salt.loader.render(self.opts, self.functions)
|
||||||
@ -592,6 +591,7 @@ class Minion(MinionBase):
|
|||||||
Pass in the options dict
|
Pass in the options dict
|
||||||
'''
|
'''
|
||||||
self._running = None
|
self._running = None
|
||||||
|
self.win_proc = []
|
||||||
|
|
||||||
# Warn if ZMQ < 3.2
|
# Warn if ZMQ < 3.2
|
||||||
if HAS_ZMQ:
|
if HAS_ZMQ:
|
||||||
@ -619,14 +619,13 @@ class Minion(MinionBase):
|
|||||||
timeout,
|
timeout,
|
||||||
safe)
|
safe)
|
||||||
|
|
||||||
self.functions, self.returners, self.function_errors = self._load_modules()
|
|
||||||
self.opts['pillar'] = salt.pillar.get_pillar(
|
self.opts['pillar'] = salt.pillar.get_pillar(
|
||||||
opts,
|
opts,
|
||||||
opts['grains'],
|
opts['grains'],
|
||||||
opts['id'],
|
opts['id'],
|
||||||
opts['environment'],
|
opts['environment']
|
||||||
funcs=self.functions
|
|
||||||
).compile_pillar()
|
).compile_pillar()
|
||||||
|
self.functions, self.returners, self.function_errors = self._load_modules()
|
||||||
self.serial = salt.payload.Serial(self.opts)
|
self.serial = salt.payload.Serial(self.opts)
|
||||||
self.mod_opts = self._prep_mod_opts()
|
self.mod_opts = self._prep_mod_opts()
|
||||||
self.matcher = Matcher(self.opts, self.functions)
|
self.matcher = Matcher(self.opts, self.functions)
|
||||||
@ -658,7 +657,7 @@ class Minion(MinionBase):
|
|||||||
'seconds': opts['master_alive_interval'],
|
'seconds': opts['master_alive_interval'],
|
||||||
'jid_include': True,
|
'jid_include': True,
|
||||||
'maxrunning': 1,
|
'maxrunning': 1,
|
||||||
'kwargs': {'master_ip': self.opts['master'],
|
'kwargs': {'master': self.opts['master'],
|
||||||
'connected': True}
|
'connected': True}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -678,7 +677,7 @@ class Minion(MinionBase):
|
|||||||
if pid > 0:
|
if pid > 0:
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
proxyminion = salt.ProxyMinion()
|
proxyminion = ProxyMinion(self.opts)
|
||||||
proxyminion.start(self.opts['pillar']['proxy'][p])
|
proxyminion.start(self.opts['pillar']['proxy'][p])
|
||||||
self.clean_die(signal.SIGTERM, None)
|
self.clean_die(signal.SIGTERM, None)
|
||||||
else:
|
else:
|
||||||
@ -908,19 +907,7 @@ class Minion(MinionBase):
|
|||||||
try:
|
try:
|
||||||
data = self.crypticle.loads(load)
|
data = self.crypticle.loads(load)
|
||||||
except AuthenticationError:
|
except AuthenticationError:
|
||||||
# decryption of the payload failed, try to re-auth but wait
|
# decryption of the payload failed, try to re-auth
|
||||||
# random seconds if set in config with random_reauth_delay
|
|
||||||
if 'random_reauth_delay' in self.opts:
|
|
||||||
reauth_delay = randint(0, float(self.opts['random_reauth_delay']))
|
|
||||||
# This mitigates the issue wherein a long-running job might not return
|
|
||||||
# on a master key rotation. However, new commands issued during the re-auth
|
|
||||||
# splay period will still fail to return.
|
|
||||||
if not salt.utils.minion.running(self.opts):
|
|
||||||
log.debug('Waiting {0} seconds to re-authenticate'.format(reauth_delay))
|
|
||||||
time.sleep(reauth_delay)
|
|
||||||
else:
|
|
||||||
log.warning('Ignoring re-auth delay because jobs are running')
|
|
||||||
|
|
||||||
self.authenticate()
|
self.authenticate()
|
||||||
data = self.crypticle.loads(load)
|
data = self.crypticle.loads(load)
|
||||||
|
|
||||||
@ -1012,12 +999,15 @@ class Minion(MinionBase):
|
|||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
process = threading.Thread(
|
process = threading.Thread(
|
||||||
target=target, args=(instance, self.opts, data),
|
target=target,
|
||||||
|
args=(instance, self.opts, data),
|
||||||
name=data['jid']
|
name=data['jid']
|
||||||
)
|
)
|
||||||
process.start()
|
process.start()
|
||||||
if not sys.platform.startswith('win'):
|
if not sys.platform.startswith('win'):
|
||||||
process.join()
|
process.join()
|
||||||
|
else:
|
||||||
|
self.win_proc.append(process)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def _thread_return(cls, minion_instance, opts, data):
|
def _thread_return(cls, minion_instance, opts, data):
|
||||||
@ -1412,39 +1402,21 @@ class Minion(MinionBase):
|
|||||||
self.opts['master_ip']
|
self.opts['master_ip']
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
auth = salt.crypt.Auth(self.opts)
|
auth = salt.crypt.SAuth(self.opts)
|
||||||
|
auth.authenticate()
|
||||||
|
# TODO: remove these and just use a local reference to auth??
|
||||||
self.tok = auth.gen_token('salt')
|
self.tok = auth.gen_token('salt')
|
||||||
acceptance_wait_time = self.opts['acceptance_wait_time']
|
self.crypticle = auth.crypticle
|
||||||
acceptance_wait_time_max = self.opts['acceptance_wait_time_max']
|
|
||||||
if not acceptance_wait_time_max:
|
|
||||||
acceptance_wait_time_max = acceptance_wait_time
|
|
||||||
|
|
||||||
while True:
|
|
||||||
creds = auth.sign_in(timeout, safe)
|
|
||||||
if creds == 'full':
|
|
||||||
return creds
|
|
||||||
elif creds != 'retry':
|
|
||||||
log.info('Authentication with master at {0} successful!'.format(self.opts['master_ip']))
|
|
||||||
break
|
|
||||||
log.info('Waiting for minion key to be accepted by the master.')
|
|
||||||
if acceptance_wait_time:
|
|
||||||
log.info('Waiting {0} seconds before retry.'.format(acceptance_wait_time))
|
|
||||||
time.sleep(acceptance_wait_time)
|
|
||||||
if acceptance_wait_time < acceptance_wait_time_max:
|
|
||||||
acceptance_wait_time += acceptance_wait_time
|
|
||||||
log.debug('Authentication wait time is {0}'.format(acceptance_wait_time))
|
|
||||||
|
|
||||||
self.aes = creds['aes']
|
|
||||||
if self.opts.get('syndic_master_publish_port'):
|
if self.opts.get('syndic_master_publish_port'):
|
||||||
self.publish_port = self.opts.get('syndic_master_publish_port')
|
self.publish_port = self.opts.get('syndic_master_publish_port')
|
||||||
else:
|
else:
|
||||||
self.publish_port = creds['publish_port']
|
self.publish_port = auth.creds['publish_port']
|
||||||
self.crypticle = salt.crypt.Crypticle(self.opts, self.aes)
|
|
||||||
|
|
||||||
def module_refresh(self, force_refresh=False):
|
def module_refresh(self, force_refresh=False):
|
||||||
'''
|
'''
|
||||||
Refresh the functions and returners.
|
Refresh the functions and returners.
|
||||||
'''
|
'''
|
||||||
|
log.debug('Refreshing modules')
|
||||||
self.functions, self.returners, _ = self._load_modules(force_refresh)
|
self.functions, self.returners, _ = self._load_modules(force_refresh)
|
||||||
self.schedule.functions = self.functions
|
self.schedule.functions = self.functions
|
||||||
self.schedule.returners = self.returners
|
self.schedule.returners = self.returners
|
||||||
@ -1453,6 +1425,7 @@ class Minion(MinionBase):
|
|||||||
'''
|
'''
|
||||||
Refresh the pillar
|
Refresh the pillar
|
||||||
'''
|
'''
|
||||||
|
log.debug('Refreshing pillar')
|
||||||
self.opts['pillar'] = salt.pillar.get_pillar(
|
self.opts['pillar'] = salt.pillar.get_pillar(
|
||||||
self.opts,
|
self.opts,
|
||||||
self.opts['grains'],
|
self.opts['grains'],
|
||||||
@ -1596,7 +1569,7 @@ class Minion(MinionBase):
|
|||||||
'seconds': self.opts['master_alive_interval'],
|
'seconds': self.opts['master_alive_interval'],
|
||||||
'jid_include': True,
|
'jid_include': True,
|
||||||
'maxrunning': 2,
|
'maxrunning': 2,
|
||||||
'kwargs': {'master_ip': self.opts['master'],
|
'kwargs': {'master': self.opts['master'],
|
||||||
'connected': False}
|
'connected': False}
|
||||||
}
|
}
|
||||||
self.schedule.modify_job(name='__master_alive',
|
self.schedule.modify_job(name='__master_alive',
|
||||||
@ -1634,7 +1607,7 @@ class Minion(MinionBase):
|
|||||||
'seconds': self.opts['master_alive_interval'],
|
'seconds': self.opts['master_alive_interval'],
|
||||||
'jid_include': True,
|
'jid_include': True,
|
||||||
'maxrunning': 2,
|
'maxrunning': 2,
|
||||||
'kwargs': {'master_ip': self.opts['master'],
|
'kwargs': {'master': self.opts['master'],
|
||||||
'connected': True}
|
'connected': True}
|
||||||
}
|
}
|
||||||
self.schedule.modify_job(name='__master_alive',
|
self.schedule.modify_job(name='__master_alive',
|
||||||
@ -1652,7 +1625,7 @@ class Minion(MinionBase):
|
|||||||
'seconds': self.opts['master_alive_interval'],
|
'seconds': self.opts['master_alive_interval'],
|
||||||
'jid_include': True,
|
'jid_include': True,
|
||||||
'maxrunning': 2,
|
'maxrunning': 2,
|
||||||
'kwargs': {'master_ip': self.opts['master'],
|
'kwargs': {'master': self.opts['master'],
|
||||||
'connected': True}
|
'connected': True}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1663,6 +1636,21 @@ class Minion(MinionBase):
|
|||||||
log.debug('Forwarding salt error event tag={tag}'.format(tag=tag))
|
log.debug('Forwarding salt error event tag={tag}'.format(tag=tag))
|
||||||
self._fire_master(data, tag)
|
self._fire_master(data, tag)
|
||||||
|
|
||||||
|
def _windows_thread_cleanup(self):
|
||||||
|
'''
|
||||||
|
Cleanup Windows threads
|
||||||
|
'''
|
||||||
|
if not salt.utils.is_windows():
|
||||||
|
return
|
||||||
|
for thread in self.win_proc:
|
||||||
|
if not thread.is_alive():
|
||||||
|
thread.join()
|
||||||
|
try:
|
||||||
|
self.win_proc.remove(thread)
|
||||||
|
del thread
|
||||||
|
except (ValueError, NameError):
|
||||||
|
pass
|
||||||
|
|
||||||
# Main Minion Tune In
|
# Main Minion Tune In
|
||||||
def tune_in(self):
|
def tune_in(self):
|
||||||
'''
|
'''
|
||||||
@ -1728,6 +1716,7 @@ class Minion(MinionBase):
|
|||||||
|
|
||||||
while self._running is True:
|
while self._running is True:
|
||||||
loop_interval = self.process_schedule(self, loop_interval)
|
loop_interval = self.process_schedule(self, loop_interval)
|
||||||
|
self._windows_thread_cleanup()
|
||||||
try:
|
try:
|
||||||
socks = self._do_poll(loop_interval)
|
socks = self._do_poll(loop_interval)
|
||||||
|
|
||||||
@ -1940,7 +1929,6 @@ class Syndic(Minion):
|
|||||||
data['ret'],
|
data['ret'],
|
||||||
data['jid'],
|
data['jid'],
|
||||||
data['to'],
|
data['to'],
|
||||||
user=data.get('user', ''),
|
|
||||||
**kwargs)
|
**kwargs)
|
||||||
|
|
||||||
def _setsockopts(self):
|
def _setsockopts(self):
|
||||||
@ -2115,7 +2103,10 @@ class Syndic(Minion):
|
|||||||
self.event_forward_timeout = (
|
self.event_forward_timeout = (
|
||||||
time.time() + self.opts['syndic_event_forward_timeout']
|
time.time() + self.opts['syndic_event_forward_timeout']
|
||||||
)
|
)
|
||||||
if salt.utils.jid.is_jid(event['tag']) and 'return' in event['data']:
|
tag_parts = event['tag'].split('/')
|
||||||
|
if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \
|
||||||
|
salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \
|
||||||
|
'return' in event['data']:
|
||||||
if 'jid' not in event['data']:
|
if 'jid' not in event['data']:
|
||||||
# Not a job return
|
# Not a job return
|
||||||
continue
|
continue
|
||||||
@ -2129,7 +2120,8 @@ class Syndic(Minion):
|
|||||||
self.mminion.returners[fstr](event['data']['jid'])
|
self.mminion.returners[fstr](event['data']['jid'])
|
||||||
)
|
)
|
||||||
if 'master_id' in event['data']:
|
if 'master_id' in event['data']:
|
||||||
jdict['master_id'] = event['data']['master_id']
|
# __'s to make sure it doesn't print out on the master cli
|
||||||
|
jdict['__master_id__'] = event['data']['master_id']
|
||||||
jdict[event['data']['id']] = event['data']['return']
|
jdict[event['data']['id']] = event['data']['return']
|
||||||
else:
|
else:
|
||||||
# Add generic event aggregation here
|
# Add generic event aggregation here
|
||||||
@ -2347,12 +2339,16 @@ class MultiSyndic(MinionBase):
|
|||||||
if e.errno == errno.EAGAIN or e.errno == errno.EINTR:
|
if e.errno == errno.EAGAIN or e.errno == errno.EINTR:
|
||||||
break
|
break
|
||||||
raise
|
raise
|
||||||
|
|
||||||
log.trace('Got event {0}'.format(event['tag']))
|
log.trace('Got event {0}'.format(event['tag']))
|
||||||
if self.event_forward_timeout is None:
|
if self.event_forward_timeout is None:
|
||||||
self.event_forward_timeout = (
|
self.event_forward_timeout = (
|
||||||
time.time() + self.opts['syndic_event_forward_timeout']
|
time.time() + self.opts['syndic_event_forward_timeout']
|
||||||
)
|
)
|
||||||
if salt.utils.jid.is_jid(event['tag']) and 'return' in event['data']:
|
tag_parts = event['tag'].split('/')
|
||||||
|
if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \
|
||||||
|
salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \
|
||||||
|
'return' in event['data']:
|
||||||
if 'jid' not in event['data']:
|
if 'jid' not in event['data']:
|
||||||
# Not a job return
|
# Not a job return
|
||||||
continue
|
continue
|
||||||
@ -2600,11 +2596,8 @@ class Matcher(object):
|
|||||||
# subexpression
|
# subexpression
|
||||||
if results or match in ['(', ')']:
|
if results or match in ['(', ')']:
|
||||||
if match == 'not':
|
if match == 'not':
|
||||||
if results[-1] == 'and':
|
match_suffix = results[-1]
|
||||||
pass
|
if not (match_suffix == 'and' or match_suffix == 'or'):
|
||||||
elif results[-1] == 'or':
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
results.append('and')
|
results.append('and')
|
||||||
results.append(match)
|
results.append(match)
|
||||||
else:
|
else:
|
||||||
@ -2673,14 +2666,13 @@ class ProxyMinion(Minion):
|
|||||||
opts.update(resolve_dns(opts))
|
opts.update(resolve_dns(opts))
|
||||||
self.opts = opts
|
self.opts = opts
|
||||||
self.authenticate(timeout, safe)
|
self.authenticate(timeout, safe)
|
||||||
self.functions, self.returners, self.function_errors = self._load_modules()
|
|
||||||
self.opts['pillar'] = salt.pillar.get_pillar(
|
self.opts['pillar'] = salt.pillar.get_pillar(
|
||||||
opts,
|
opts,
|
||||||
opts['grains'],
|
opts['grains'],
|
||||||
opts['id'],
|
opts['id'],
|
||||||
opts['environment'],
|
opts['environment']
|
||||||
funcs=self.functions
|
|
||||||
).compile_pillar()
|
).compile_pillar()
|
||||||
|
self.functions, self.returners, self.function_errors = self._load_modules()
|
||||||
self.serial = salt.payload.Serial(self.opts)
|
self.serial = salt.payload.Serial(self.opts)
|
||||||
self.mod_opts = self._prep_mod_opts()
|
self.mod_opts = self._prep_mod_opts()
|
||||||
self.matcher = Matcher(self.opts, self.functions)
|
self.matcher = Matcher(self.opts, self.functions)
|
||||||
|
@ -6,11 +6,11 @@ A module to wrap (non-Windows) archive calls
|
|||||||
'''
|
'''
|
||||||
from __future__ import absolute_import
|
from __future__ import absolute_import
|
||||||
import os
|
import os
|
||||||
from salt.exceptions import CommandExecutionError
|
|
||||||
|
|
||||||
from salt.ext.six import string_types
|
|
||||||
|
|
||||||
# Import salt libs
|
# Import salt libs
|
||||||
|
from salt.exceptions import SaltInvocationError, CommandExecutionError
|
||||||
|
from salt.ext.six import string_types
|
||||||
from salt.utils import \
|
from salt.utils import \
|
||||||
which as _which, which_bin as _which_bin, is_windows as _is_windows
|
which as _which, which_bin as _which_bin, is_windows as _is_windows
|
||||||
import salt.utils.decorators as decorators
|
import salt.utils.decorators as decorators
|
||||||
@ -53,60 +53,65 @@ def tar(options, tarfile, sources=None, dest=None, cwd=None, template=None, runa
|
|||||||
command. Beginning with 0.17.0, ``sources`` must be a comma-separated
|
command. Beginning with 0.17.0, ``sources`` must be a comma-separated
|
||||||
list, and the ``cwd`` and ``template`` arguments are optional.
|
list, and the ``cwd`` and ``template`` arguments are optional.
|
||||||
|
|
||||||
Uses the tar command to pack, unpack, etc tar files
|
Uses the tar command to pack, unpack, etc. tar files
|
||||||
|
|
||||||
|
|
||||||
options:
|
options
|
||||||
Options to pass to the ``tar`` binary.
|
Options to pass to the tar command
|
||||||
|
|
||||||
tarfile:
|
tarfile
|
||||||
The tar filename to pack/unpack.
|
The filename of the tar archive to pack/unpack
|
||||||
|
|
||||||
sources:
|
sources
|
||||||
Comma delimited list of files to **pack** into the tarfile.
|
Comma delimited list of files to **pack** into the tarfile. Can also be
|
||||||
|
passed as a python list.
|
||||||
|
|
||||||
dest:
|
dest
|
||||||
The destination directory to **unpack** the tarfile to.
|
The destination directory into which to **unpack** the tarfile
|
||||||
|
|
||||||
cwd:
|
cwd : None
|
||||||
The directory in which the tar command should be executed.
|
The directory in which the tar command should be executed. If not
|
||||||
|
specified, will default to the home directory of the user under which
|
||||||
|
the salt minion process is running.
|
||||||
|
|
||||||
template:
|
template : None
|
||||||
Template engine name to render the command arguments before execution.
|
Can be set to 'jinja' or another supported template engine to render
|
||||||
|
the command arguments before execution:
|
||||||
|
|
||||||
CLI Example:
|
.. code-block:: bash
|
||||||
|
|
||||||
|
salt '*' archive.tar cjvf /tmp/salt.tar.bz2 {{grains.saltpath}} template=jinja
|
||||||
|
|
||||||
|
CLI Examples:
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
|
# Create a tarfile
|
||||||
salt '*' archive.tar cjvf /tmp/tarfile.tar.bz2 /tmp/file_1,/tmp/file_2
|
salt '*' archive.tar cjvf /tmp/tarfile.tar.bz2 /tmp/file_1,/tmp/file_2
|
||||||
|
# Unpack a tarfile
|
||||||
|
|
||||||
The template arg can be set to ``jinja`` or another supported template
|
|
||||||
engine to render the command arguments before execution. For example:
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
salt '*' archive.tar cjvf /tmp/salt.tar.bz2 {{grains.saltpath}} template=jinja
|
|
||||||
|
|
||||||
|
|
||||||
To unpack a tarfile, for example:
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
salt '*' archive.tar xf foo.tar dest=/target/directory
|
salt '*' archive.tar xf foo.tar dest=/target/directory
|
||||||
|
|
||||||
'''
|
'''
|
||||||
|
if not options:
|
||||||
|
# Catch instances were people pass an empty string for the "options"
|
||||||
|
# argument. Someone would have to be really silly to do this, but we
|
||||||
|
# should at least let them know of their silliness.
|
||||||
|
raise SaltInvocationError('Tar options can not be empty')
|
||||||
|
|
||||||
if isinstance(sources, string_types):
|
if isinstance(sources, string_types):
|
||||||
sources = [s.strip() for s in sources.split(',')]
|
sources = [s.strip() for s in sources.split(',')]
|
||||||
|
|
||||||
|
cmd = ['tar']
|
||||||
if dest:
|
if dest:
|
||||||
options = 'C {0} -{1}'.format(dest, options)
|
cmd.extend(['-C', '{0}'.format(dest)])
|
||||||
|
|
||||||
cmd = 'tar -{0} {1}'.format(options, tarfile)
|
cmd.extend(['-{0}'.format(options), '{0}'.format(tarfile)])
|
||||||
if sources:
|
cmd.extend(sources)
|
||||||
cmd += ' {0}'.format(' '.join(sources))
|
|
||||||
|
|
||||||
return __salt__['cmd.run'](cmd, cwd=cwd, template=template, runas=runas).splitlines()
|
return __salt__['cmd.run'](cmd,
|
||||||
|
cwd=cwd,
|
||||||
|
template=template,
|
||||||
|
runas=runas,
|
||||||
|
python_shell=False).splitlines()
|
||||||
|
|
||||||
|
|
||||||
@decorators.which('gzip')
|
@decorators.which('gzip')
|
||||||
@ -114,24 +119,26 @@ def gzip(sourcefile, template=None, runas=None):
|
|||||||
'''
|
'''
|
||||||
Uses the gzip command to create gzip files
|
Uses the gzip command to create gzip files
|
||||||
|
|
||||||
CLI Example to create ``/tmp/sourcefile.txt.gz``:
|
template : None
|
||||||
|
Can be set to 'jinja' or another supported template engine to render
|
||||||
|
the command arguments before execution:
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
salt '*' archive.gzip /tmp/sourcefile.txt
|
salt '*' archive.gzip template=jinja /tmp/{{grains.id}}.txt
|
||||||
|
|
||||||
The template arg can be set to 'jinja' or another supported template
|
|
||||||
engine to render the command arguments before execution.
|
|
||||||
|
|
||||||
CLI Example:
|
CLI Example:
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
salt '*' archive.gzip template=jinja /tmp/{{grains.id}}.txt
|
# Create /tmp/sourcefile.txt.gz
|
||||||
|
salt '*' archive.gzip /tmp/sourcefile.txt
|
||||||
'''
|
'''
|
||||||
cmd = 'gzip {0}'.format(sourcefile)
|
cmd = ['gzip', '{0}'.format(sourcefile)]
|
||||||
return __salt__['cmd.run'](cmd, template=template, runas=runas).splitlines()
|
return __salt__['cmd.run'](cmd,
|
||||||
|
template=template,
|
||||||
|
runas=runas,
|
||||||
|
python_shell=False).splitlines()
|
||||||
|
|
||||||
|
|
||||||
@decorators.which('gunzip')
|
@decorators.which('gunzip')
|
||||||
@ -139,51 +146,83 @@ def gunzip(gzipfile, template=None, runas=None):
|
|||||||
'''
|
'''
|
||||||
Uses the gunzip command to unpack gzip files
|
Uses the gunzip command to unpack gzip files
|
||||||
|
|
||||||
CLI Example to create ``/tmp/sourcefile.txt``:
|
template : None
|
||||||
|
Can be set to 'jinja' or another supported template engine to render
|
||||||
|
the command arguments before execution:
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
salt '*' archive.gunzip /tmp/sourcefile.txt.gz
|
salt '*' archive.gunzip template=jinja /tmp/{{grains.id}}.txt.gz
|
||||||
|
|
||||||
The template arg can be set to 'jinja' or another supported template
|
|
||||||
engine to render the command arguments before execution.
|
|
||||||
|
|
||||||
CLI Example:
|
CLI Example:
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
salt '*' archive.gunzip template=jinja /tmp/{{grains.id}}.txt.gz
|
# Create /tmp/sourcefile.txt
|
||||||
|
salt '*' archive.gunzip /tmp/sourcefile.txt.gz
|
||||||
'''
|
'''
|
||||||
cmd = 'gunzip {0}'.format(gzipfile)
|
cmd = ['gunzip', '{0}'.format(gzipfile)]
|
||||||
return __salt__['cmd.run'](cmd, template=template, runas=runas).splitlines()
|
return __salt__['cmd.run'](cmd,
|
||||||
|
template=template,
|
||||||
|
runas=runas,
|
||||||
|
python_shell=False).splitlines()
|
||||||
|
|
||||||
|
|
||||||
@decorators.which('zip')
|
@decorators.which('zip')
|
||||||
def cmd_zip_(zip_file, sources, template=None, runas=None):
|
def cmd_zip_(zip_file, sources, template=None,
|
||||||
|
cwd=None, recurse=False, runas=None):
|
||||||
'''
|
'''
|
||||||
Uses the zip command to create zip files
|
Uses the zip command to create zip files
|
||||||
|
|
||||||
|
zip_file
|
||||||
|
Path of zip file to be created
|
||||||
|
|
||||||
|
sources
|
||||||
|
Comma-separated list of sources to include in the zip file. Sources can
|
||||||
|
also be passed in a python list.
|
||||||
|
|
||||||
|
template : None
|
||||||
|
Can be set to 'jinja' or another supported template engine to render
|
||||||
|
the command arguments before execution:
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
salt '*' archive.zip template=jinja /tmp/zipfile.zip /tmp/sourcefile1,/tmp/{{grains.id}}.txt
|
||||||
|
|
||||||
|
cwd : None
|
||||||
|
Run the zip command from the specified directory. Use this argument
|
||||||
|
along with relative file paths to create zip files which do not
|
||||||
|
contain the leading directories. If not specified, this will default
|
||||||
|
to the home directory of the user under which the salt minion process
|
||||||
|
is running.
|
||||||
|
|
||||||
|
.. versionadded:: 2014.7.1
|
||||||
|
|
||||||
|
recurse : False
|
||||||
|
Recursively include contents of sources which are directories. Combine
|
||||||
|
this with the ``cwd`` argument and use relative paths for the sources
|
||||||
|
to create a zip file which does not contain the leading directories.
|
||||||
|
|
||||||
|
.. versionadded:: 2014.7.1
|
||||||
|
|
||||||
CLI Example:
|
CLI Example:
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
salt '*' archive.zip /tmp/zipfile.zip /tmp/sourcefile1,/tmp/sourcefile2
|
salt '*' archive.zip /tmp/zipfile.zip /tmp/sourcefile1,/tmp/sourcefile2
|
||||||
|
|
||||||
The template arg can be set to 'jinja' or another supported template
|
|
||||||
engine to render the command arguments before execution.
|
|
||||||
|
|
||||||
For example:
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
salt '*' archive.zip template=jinja /tmp/zipfile.zip /tmp/sourcefile1,/tmp/{{grains.id}}.txt
|
|
||||||
|
|
||||||
'''
|
'''
|
||||||
if isinstance(sources, string_types):
|
if isinstance(sources, string_types):
|
||||||
sources = [s.strip() for s in sources.split(',')]
|
sources = [s.strip() for s in sources.split(',')]
|
||||||
cmd = 'zip {0} {1}'.format(zip_file, ' '.join(sources))
|
cmd = ['zip']
|
||||||
return __salt__['cmd.run'](cmd, template=template, runas=runas).splitlines()
|
if recurse:
|
||||||
|
cmd.append('-r')
|
||||||
|
cmd.append('{0}'.format(zip_file))
|
||||||
|
cmd.extend(sources)
|
||||||
|
return __salt__['cmd.run'](cmd,
|
||||||
|
cwd=cwd,
|
||||||
|
template=template,
|
||||||
|
runas=runas,
|
||||||
|
python_shell=False).splitlines()
|
||||||
|
|
||||||
|
|
||||||
@decorators.depends('zipfile', fallback_function=cmd_zip_)
|
@decorators.depends('zipfile', fallback_function=cmd_zip_)
|
||||||
@ -236,36 +275,50 @@ def cmd_unzip_(zip_file, dest, excludes=None, template=None, options=None, runas
|
|||||||
'''
|
'''
|
||||||
Uses the unzip command to unpack zip files
|
Uses the unzip command to unpack zip files
|
||||||
|
|
||||||
options:
|
zip_file
|
||||||
Options to pass to the ``unzip`` binary.
|
Path of zip file to be unpacked
|
||||||
|
|
||||||
|
dest
|
||||||
|
The destination directory into which the file should be unpacked
|
||||||
|
|
||||||
|
options : None
|
||||||
|
Options to pass to the ``unzip`` binary
|
||||||
|
|
||||||
|
template : None
|
||||||
|
Can be set to 'jinja' or another supported template engine to render
|
||||||
|
the command arguments before execution:
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
salt '*' archive.unzip template=jinja /tmp/zipfile.zip /tmp/{{grains.id}}/ excludes=file_1,file_2
|
||||||
|
|
||||||
CLI Example:
|
CLI Example:
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
salt '*' archive.unzip /tmp/zipfile.zip /home/strongbad/ excludes=file_1,file_2
|
salt '*' archive.unzip /tmp/zipfile.zip /home/strongbad/ excludes=file_1,file_2
|
||||||
|
|
||||||
The template arg can be set to 'jinja' or another supported template
|
|
||||||
engine to render the command arguments before execution.
|
|
||||||
|
|
||||||
For example:
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
salt '*' archive.unzip template=jinja /tmp/zipfile.zip /tmp/{{grains.id}}/ excludes=file_1,file_2
|
|
||||||
|
|
||||||
'''
|
'''
|
||||||
if isinstance(excludes, string_types):
|
if isinstance(excludes, string_types):
|
||||||
excludes = [entry.strip() for entry in excludes.split(',')]
|
excludes = [entry.strip() for entry in excludes.split(',')]
|
||||||
|
|
||||||
|
cmd = ['unzip']
|
||||||
if options:
|
if options:
|
||||||
cmd = 'unzip -{0} {1} -d {2}'.format(options, zip_file, dest)
|
try:
|
||||||
else:
|
if not options.startswith('-'):
|
||||||
cmd = 'unzip {0} -d {1}'.format(zip_file, dest)
|
options = '-{0}'.format(options)
|
||||||
|
except AttributeError:
|
||||||
|
raise SaltInvocationError(
|
||||||
|
'Invalid option(s): {0}'.format(options)
|
||||||
|
)
|
||||||
|
cmd.append(options)
|
||||||
|
cmd.extend(['{0}'.format(zip_file), '-d', '{0}'.format(dest)])
|
||||||
|
|
||||||
if excludes is not None:
|
if excludes is not None:
|
||||||
cmd += ' -x {0}'.format(' '.join(excludes))
|
cmd.append('-x')
|
||||||
return __salt__['cmd.run'](cmd, template=template, runas=runas).splitlines()
|
cmd.extend(excludes)
|
||||||
|
return __salt__['cmd.run'](cmd,
|
||||||
|
template=template,
|
||||||
|
python_shell=False).splitlines()
|
||||||
|
|
||||||
|
|
||||||
@decorators.depends('zipfile', fallback_function=cmd_unzip_)
|
@decorators.depends('zipfile', fallback_function=cmd_unzip_)
|
||||||
@ -309,39 +362,73 @@ def unzip(archive, dest, excludes=None, template=None, options=None, runas=None)
|
|||||||
|
|
||||||
|
|
||||||
@decorators.which('rar')
|
@decorators.which('rar')
|
||||||
def rar(rarfile, sources, template=None, runas=None):
|
def rar(rarfile, sources, template=None, cwd=None, runas=None):
|
||||||
'''
|
'''
|
||||||
Uses the rar command to create rar files
|
Uses `rar for Linux`_ to create rar files
|
||||||
Uses rar for Linux from http://www.rarlab.com/
|
|
||||||
|
.. _`rar for Linux`: http://www.rarlab.com/
|
||||||
|
|
||||||
|
rarfile
|
||||||
|
Path of rar file to be created
|
||||||
|
|
||||||
|
sources
|
||||||
|
Comma-separated list of sources to include in the rar file. Sources can
|
||||||
|
also be passed in a python list.
|
||||||
|
|
||||||
|
cwd : None
|
||||||
|
Run the rar command from the specified directory. Use this argument
|
||||||
|
along with relative file paths to create rar files which do not
|
||||||
|
contain the leading directories. If not specified, this will default
|
||||||
|
to the home directory of the user under which the salt minion process
|
||||||
|
is running.
|
||||||
|
|
||||||
|
.. versionadded:: 2014.7.1
|
||||||
|
|
||||||
|
template : None
|
||||||
|
Can be set to 'jinja' or another supported template engine to render
|
||||||
|
the command arguments before execution:
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
salt '*' archive.rar template=jinja /tmp/rarfile.rar '/tmp/sourcefile1,/tmp/{{grains.id}}.txt'
|
||||||
|
|
||||||
CLI Example:
|
CLI Example:
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
salt '*' archive.rar /tmp/rarfile.rar /tmp/sourcefile1,/tmp/sourcefile2
|
salt '*' archive.rar /tmp/rarfile.rar /tmp/sourcefile1,/tmp/sourcefile2
|
||||||
|
|
||||||
The template arg can be set to 'jinja' or another supported template
|
|
||||||
engine to render the command arguments before execution.
|
|
||||||
|
|
||||||
For example:
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
salt '*' archive.rar template=jinja /tmp/rarfile.rar /tmp/sourcefile1,/tmp/{{grains.id}}.txt
|
|
||||||
|
|
||||||
|
|
||||||
'''
|
'''
|
||||||
if isinstance(sources, string_types):
|
if isinstance(sources, string_types):
|
||||||
sources = [s.strip() for s in sources.split(',')]
|
sources = [s.strip() for s in sources.split(',')]
|
||||||
cmd = 'rar a -idp {0} {1}'.format(rarfile, ' '.join(sources))
|
cmd = ['rar', 'a', '-idp', '{0}'.format(rarfile)]
|
||||||
return __salt__['cmd.run'](cmd, template=template, runas=runas).splitlines()
|
cmd.extend(sources)
|
||||||
|
return __salt__['cmd.run'](cmd,
|
||||||
|
cwd=cwd,
|
||||||
|
template=template,
|
||||||
|
runas=runas,
|
||||||
|
python_shell=False).splitlines()
|
||||||
|
|
||||||
|
|
||||||
@decorators.which_bin(('unrar', 'rar'))
|
@decorators.which_bin(('unrar', 'rar'))
|
||||||
def unrar(rarfile, dest, excludes=None, template=None, runas=None):
|
def unrar(rarfile, dest, excludes=None, template=None, runas=None):
|
||||||
'''
|
'''
|
||||||
Uses the unrar command to unpack rar files
|
Uses `rar for Linux`_ to unpack rar files
|
||||||
Uses rar for Linux from http://www.rarlab.com/
|
|
||||||
|
.. _`rar for Linux`: http://www.rarlab.com/
|
||||||
|
|
||||||
|
rarfile
|
||||||
|
Name of rar file to be unpacked
|
||||||
|
|
||||||
|
dest
|
||||||
|
The destination directory into which to **unpack** the rar file
|
||||||
|
|
||||||
|
template : None
|
||||||
|
Can be set to 'jinja' or another supported template engine to render
|
||||||
|
the command arguments before execution:
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
salt '*' archive.unrar template=jinja /tmp/rarfile.rar /tmp/{{grains.id}}/ excludes=file_1,file_2
|
||||||
|
|
||||||
CLI Example:
|
CLI Example:
|
||||||
|
|
||||||
@ -349,25 +436,19 @@ def unrar(rarfile, dest, excludes=None, template=None, runas=None):
|
|||||||
|
|
||||||
salt '*' archive.unrar /tmp/rarfile.rar /home/strongbad/ excludes=file_1,file_2
|
salt '*' archive.unrar /tmp/rarfile.rar /home/strongbad/ excludes=file_1,file_2
|
||||||
|
|
||||||
The template arg can be set to 'jinja' or another supported template
|
|
||||||
engine to render the command arguments before execution.
|
|
||||||
|
|
||||||
For example:
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
salt '*' archive.unrar template=jinja /tmp/rarfile.rar /tmp/{{grains.id}}/ excludes=file_1,file_2
|
|
||||||
|
|
||||||
'''
|
'''
|
||||||
if isinstance(excludes, string_types):
|
if isinstance(excludes, string_types):
|
||||||
excludes = [entry.strip() for entry in excludes.split(',')]
|
excludes = [entry.strip() for entry in excludes.split(',')]
|
||||||
|
|
||||||
cmd = [_which_bin(('unrar', 'rar')), 'x', '-idp', rarfile]
|
cmd = [_which_bin(('unrar', 'rar')), 'x', '-idp', '{0}'.format(rarfile)]
|
||||||
if excludes is not None:
|
if excludes is not None:
|
||||||
for exclude in excludes:
|
for exclude in excludes:
|
||||||
cmd.extend(['-x', exclude])
|
cmd.extend(['-x', '{0}'.format(exclude)])
|
||||||
cmd.append(dest)
|
cmd.append('{0}'.format(dest))
|
||||||
return __salt__['cmd.run'](' '.join(cmd), template=template, runas=runas).splitlines()
|
return __salt__['cmd.run'](cmd,
|
||||||
|
template=template,
|
||||||
|
runas=runas,
|
||||||
|
python_shell=False).splitlines()
|
||||||
|
|
||||||
|
|
||||||
def _render_filenames(filenames, zip_file, saltenv, template):
|
def _render_filenames(filenames, zip_file, saltenv, template):
|
||||||
|
@ -61,7 +61,7 @@ def _run_aws(cmd, region, opts, user, **kwargs):
|
|||||||
region=_region(region),
|
region=_region(region),
|
||||||
out=_OUTPUT)
|
out=_OUTPUT)
|
||||||
|
|
||||||
rtn = __salt__['cmd.run'](cmd, runas=user)
|
rtn = __salt__['cmd.run'](cmd, runas=user, python_shell=False)
|
||||||
|
|
||||||
return json.loads(rtn) if rtn else ''
|
return json.loads(rtn) if rtn else ''
|
||||||
|
|
||||||
|
@ -58,7 +58,7 @@ def tune(device, **kwargs):
|
|||||||
else:
|
else:
|
||||||
opts += '--{0} {1} '.format(switch, kwargs[key])
|
opts += '--{0} {1} '.format(switch, kwargs[key])
|
||||||
cmd = 'blockdev {0}{1}'.format(opts, device)
|
cmd = 'blockdev {0}{1}'.format(opts, device)
|
||||||
out = __salt__['cmd.run'](cmd).splitlines()
|
out = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
|
||||||
return dump(device, args)
|
return dump(device, args)
|
||||||
|
|
||||||
|
|
||||||
@ -75,7 +75,7 @@ def wipe(device):
|
|||||||
|
|
||||||
cmd = 'wipefs {0}'.format(device)
|
cmd = 'wipefs {0}'.format(device)
|
||||||
try:
|
try:
|
||||||
out = __salt__['cmd.run_all'](cmd)
|
out = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||||
except subprocess.CalledProcessError as err:
|
except subprocess.CalledProcessError as err:
|
||||||
return False
|
return False
|
||||||
if out['retcode'] == 0:
|
if out['retcode'] == 0:
|
||||||
@ -94,7 +94,7 @@ def dump(device, args=None):
|
|||||||
cmd = 'blockdev --getro --getsz --getss --getpbsz --getiomin --getioopt --getalignoff --getmaxsect --getsize --getsize64 --getra --getfra {0}'.format(device)
|
cmd = 'blockdev --getro --getsz --getss --getpbsz --getiomin --getioopt --getalignoff --getmaxsect --getsize --getsize64 --getra --getfra {0}'.format(device)
|
||||||
ret = {}
|
ret = {}
|
||||||
opts = [c[2:] for c in cmd.split() if c.startswith('--')]
|
opts = [c[2:] for c in cmd.split() if c.startswith('--')]
|
||||||
out = __salt__['cmd.run_all'](cmd)
|
out = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||||
if out['retcode'] == 0:
|
if out['retcode'] == 0:
|
||||||
lines = [line for line in out['stdout'].splitlines() if line]
|
lines = [line for line in out['stdout'].splitlines() if line]
|
||||||
count = 0
|
count = 0
|
||||||
@ -124,7 +124,7 @@ def resize2fs(device):
|
|||||||
ret = {}
|
ret = {}
|
||||||
cmd = 'resize2fs {0}'.format(device)
|
cmd = 'resize2fs {0}'.format(device)
|
||||||
try:
|
try:
|
||||||
out = __salt__['cmd.run_all'](cmd)
|
out = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||||
except subprocess.CalledProcessError as err:
|
except subprocess.CalledProcessError as err:
|
||||||
return False
|
return False
|
||||||
if out['retcode'] == 0:
|
if out['retcode'] == 0:
|
||||||
|
@ -416,6 +416,25 @@ def delete_role_policy(role_name, policy_name, region=None, key=None,
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def get_account_id(region=None, key=None, keyid=None, profile=None):
|
||||||
|
'''
|
||||||
|
Get a the AWS account id associated with the used credentials.
|
||||||
|
|
||||||
|
CLI example::
|
||||||
|
|
||||||
|
salt myminion boto_iam.get_account_id
|
||||||
|
'''
|
||||||
|
cache_key = 'boto_iam.account_id'
|
||||||
|
if cache_key not in __context__:
|
||||||
|
conn = _get_conn(region, key, keyid, profile)
|
||||||
|
ret = conn.get_user()
|
||||||
|
# the get_user call returns an user ARN:
|
||||||
|
# arn:aws:iam::027050522557:user/salt-test
|
||||||
|
arn = ret['get_user_response']['get_user_result']['user']['arn']
|
||||||
|
__context__[cache_key] = arn.split(':')[4]
|
||||||
|
return __context__[cache_key]
|
||||||
|
|
||||||
|
|
||||||
def _get_conn(region, key, keyid, profile):
|
def _get_conn(region, key, keyid, profile):
|
||||||
'''
|
'''
|
||||||
Get a boto connection to IAM.
|
Get a boto connection to IAM.
|
||||||
|
@ -36,7 +36,7 @@ def _list_taps():
|
|||||||
return _call_brew(cmd)['stdout'].splitlines()
|
return _call_brew(cmd)['stdout'].splitlines()
|
||||||
|
|
||||||
|
|
||||||
def _tap(tap):
|
def _tap(tap, runas=None):
|
||||||
'''
|
'''
|
||||||
Add unofficial Github repos to the list of formulas that brew tracks,
|
Add unofficial Github repos to the list of formulas that brew tracks,
|
||||||
updates, and installs from.
|
updates, and installs from.
|
||||||
@ -45,7 +45,7 @@ def _tap(tap):
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
cmd = 'brew tap {0}'.format(tap)
|
cmd = 'brew tap {0}'.format(tap)
|
||||||
if _call_brew(cmd)['retcode']:
|
if __salt__['cmd.retcode'](cmd, python_shell=False, runas=runas):
|
||||||
log.error('Failed to tap "{0}"'.format(tap))
|
log.error('Failed to tap "{0}"'.format(tap))
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -66,7 +66,10 @@ def _call_brew(cmd):
|
|||||||
Calls the brew command with the user user account of brew
|
Calls the brew command with the user user account of brew
|
||||||
'''
|
'''
|
||||||
user = __salt__['file.get_user'](_homebrew_bin())
|
user = __salt__['file.get_user'](_homebrew_bin())
|
||||||
return __salt__['cmd.run_all'](cmd, runas=user, output_loglevel='trace')
|
return __salt__['cmd.run_all'](cmd,
|
||||||
|
runas=user,
|
||||||
|
output_loglevel='trace',
|
||||||
|
python_shell=False)
|
||||||
|
|
||||||
|
|
||||||
def list_pkgs(versions_as_list=False, **kwargs):
|
def list_pkgs(versions_as_list=False, **kwargs):
|
||||||
|
@ -56,7 +56,7 @@ def _linux_brshow(br=None):
|
|||||||
|
|
||||||
brs = {}
|
brs = {}
|
||||||
|
|
||||||
for line in __salt__['cmd.run'](cmd).splitlines():
|
for line in __salt__['cmd.run'](cmd, python_shell=False).splitlines():
|
||||||
# get rid of first line
|
# get rid of first line
|
||||||
if line.startswith('bridge name'):
|
if line.startswith('bridge name'):
|
||||||
continue
|
continue
|
||||||
@ -95,7 +95,8 @@ def _linux_bradd(br):
|
|||||||
Internal, creates the bridge
|
Internal, creates the bridge
|
||||||
'''
|
'''
|
||||||
brctl = _tool_path('brctl')
|
brctl = _tool_path('brctl')
|
||||||
return __salt__['cmd.run']('{0} addbr {1}'.format(brctl, br))
|
return __salt__['cmd.run']('{0} addbr {1}'.format(brctl, br),
|
||||||
|
python_shell=False)
|
||||||
|
|
||||||
|
|
||||||
def _linux_brdel(br):
|
def _linux_brdel(br):
|
||||||
@ -103,7 +104,8 @@ def _linux_brdel(br):
|
|||||||
Internal, deletes the bridge
|
Internal, deletes the bridge
|
||||||
'''
|
'''
|
||||||
brctl = _tool_path('brctl')
|
brctl = _tool_path('brctl')
|
||||||
return __salt__['cmd.run']('{0} delbr {1}'.format(brctl, br))
|
return __salt__['cmd.run']('{0} delbr {1}'.format(brctl, br),
|
||||||
|
python_shell=False)
|
||||||
|
|
||||||
|
|
||||||
def _linux_addif(br, iface):
|
def _linux_addif(br, iface):
|
||||||
@ -111,7 +113,8 @@ def _linux_addif(br, iface):
|
|||||||
Internal, adds an interface to a bridge
|
Internal, adds an interface to a bridge
|
||||||
'''
|
'''
|
||||||
brctl = _tool_path('brctl')
|
brctl = _tool_path('brctl')
|
||||||
return __salt__['cmd.run']('{0} addif {1} {2}'.format(brctl, br, iface))
|
return __salt__['cmd.run']('{0} addif {1} {2}'.format(brctl, br, iface),
|
||||||
|
python_shell=False)
|
||||||
|
|
||||||
|
|
||||||
def _linux_delif(br, iface):
|
def _linux_delif(br, iface):
|
||||||
@ -119,7 +122,8 @@ def _linux_delif(br, iface):
|
|||||||
Internal, removes an interface from a bridge
|
Internal, removes an interface from a bridge
|
||||||
'''
|
'''
|
||||||
brctl = _tool_path('brctl')
|
brctl = _tool_path('brctl')
|
||||||
return __salt__['cmd.run']('{0} delif {1} {2}'.format(brctl, br, iface))
|
return __salt__['cmd.run']('{0} delif {1} {2}'.format(brctl, br, iface),
|
||||||
|
python_shell=False)
|
||||||
|
|
||||||
|
|
||||||
def _linux_stp(br, state):
|
def _linux_stp(br, state):
|
||||||
@ -127,7 +131,8 @@ def _linux_stp(br, state):
|
|||||||
Internal, sets STP state
|
Internal, sets STP state
|
||||||
'''
|
'''
|
||||||
brctl = _tool_path('brctl')
|
brctl = _tool_path('brctl')
|
||||||
return __salt__['cmd.run']('{0} stp {1} {2}'.format(brctl, br, state))
|
return __salt__['cmd.run']('{0} stp {1} {2}'.format(brctl, br, state),
|
||||||
|
python_shell=False)
|
||||||
|
|
||||||
|
|
||||||
def _bsd_brshow(br=None):
|
def _bsd_brshow(br=None):
|
||||||
@ -145,14 +150,14 @@ def _bsd_brshow(br=None):
|
|||||||
ifaces[br] = br
|
ifaces[br] = br
|
||||||
else:
|
else:
|
||||||
cmd = '{0} -g bridge'.format(ifconfig)
|
cmd = '{0} -g bridge'.format(ifconfig)
|
||||||
for line in __salt__['cmd.run'](cmd).splitlines():
|
for line in __salt__['cmd.run'](cmd, python_shell=False).splitlines():
|
||||||
ifaces[line] = line
|
ifaces[line] = line
|
||||||
|
|
||||||
brs = {}
|
brs = {}
|
||||||
|
|
||||||
for iface in ifaces:
|
for iface in ifaces:
|
||||||
cmd = '{0} {1}'.format(ifconfig, iface)
|
cmd = '{0} {1}'.format(ifconfig, iface)
|
||||||
for line in __salt__['cmd.run'](cmd).splitlines():
|
for line in __salt__['cmd.run'](cmd, python_shell=False).splitlines():
|
||||||
brs[iface] = {
|
brs[iface] = {
|
||||||
'interfaces': [],
|
'interfaces': [],
|
||||||
'stp': 'no'
|
'stp': 'no'
|
||||||
@ -182,7 +187,7 @@ def _netbsd_brshow(br=None):
|
|||||||
brs = {}
|
brs = {}
|
||||||
start_int = False
|
start_int = False
|
||||||
|
|
||||||
for line in __salt__['cmd.run'](cmd).splitlines():
|
for line in __salt__['cmd.run'](cmd, python_shell=False).splitlines():
|
||||||
if line.startswith('bridge'):
|
if line.startswith('bridge'):
|
||||||
start_int = False
|
start_int = False
|
||||||
brname = line.split(':')[0] # on NetBSD, always ^bridge([0-9]+):
|
brname = line.split(':')[0] # on NetBSD, always ^bridge([0-9]+):
|
||||||
@ -218,13 +223,15 @@ def _bsd_bradd(br):
|
|||||||
if not br:
|
if not br:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if __salt__['cmd.retcode']('{0} {1} create up'.format(ifconfig, br)) != 0:
|
if __salt__['cmd.retcode']('{0} {1} create up'.format(ifconfig, br),
|
||||||
|
python_shell=False) != 0:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# NetBSD is two cmds
|
# NetBSD is two cmds
|
||||||
if kernel == 'NetBSD':
|
if kernel == 'NetBSD':
|
||||||
brconfig = _tool_path('brconfig')
|
brconfig = _tool_path('brconfig')
|
||||||
if __salt__['cmd.retcode']('{0} {1} up'.format(brconfig, br)) != 0:
|
if __salt__['cmd.retcode']('{0} {1} up'.format(brconfig, br),
|
||||||
|
python_shell=False) != 0:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
return True
|
return True
|
||||||
@ -237,7 +244,8 @@ def _bsd_brdel(br):
|
|||||||
ifconfig = _tool_path('ifconfig')
|
ifconfig = _tool_path('ifconfig')
|
||||||
if not br:
|
if not br:
|
||||||
return False
|
return False
|
||||||
return __salt__['cmd.run']('{0} {1} destroy'.format(ifconfig, br))
|
return __salt__['cmd.run']('{0} {1} destroy'.format(ifconfig, br),
|
||||||
|
python_shell=False)
|
||||||
|
|
||||||
|
|
||||||
def _bsd_addif(br, iface):
|
def _bsd_addif(br, iface):
|
||||||
@ -255,8 +263,8 @@ def _bsd_addif(br, iface):
|
|||||||
if not br or not iface:
|
if not br or not iface:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
return __salt__['cmd.run']('{0} {1} {2} {3}'.
|
return __salt__['cmd.run']('{0} {1} {2} {3}'.format(cmd, br, brcmd, iface),
|
||||||
format(cmd, br, brcmd, iface))
|
python_shell=False)
|
||||||
|
|
||||||
|
|
||||||
def _bsd_delif(br, iface):
|
def _bsd_delif(br, iface):
|
||||||
@ -274,8 +282,8 @@ def _bsd_delif(br, iface):
|
|||||||
if not br or not iface:
|
if not br or not iface:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
return __salt__['cmd.run']('{0} {1} {2} {3}'.
|
return __salt__['cmd.run']('{0} {1} {2} {3}'.format(cmd, br, brcmd, iface),
|
||||||
format(cmd, br, brcmd, iface))
|
python_shell=False)
|
||||||
|
|
||||||
|
|
||||||
def _bsd_stp(br, state, iface):
|
def _bsd_stp(br, state, iface):
|
||||||
@ -292,8 +300,8 @@ def _bsd_stp(br, state, iface):
|
|||||||
if not br or not iface:
|
if not br or not iface:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
return __salt__['cmd.run']('{0} {1} {2} {3}'.
|
return __salt__['cmd.run']('{0} {1} {2} {3}'.format(cmd, br, state, iface),
|
||||||
format(cmd, br, state, iface))
|
python_shell=False)
|
||||||
|
|
||||||
|
|
||||||
def _os_dispatch(func, *args, **kwargs):
|
def _os_dispatch(func, *args, **kwargs):
|
||||||
|
@ -6,7 +6,6 @@ from __future__ import absolute_import
|
|||||||
|
|
||||||
# Import Python libs
|
# Import Python libs
|
||||||
import logging
|
import logging
|
||||||
import tempfile
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
# Import Salt libs
|
# Import Salt libs
|
||||||
@ -22,13 +21,30 @@ def __virtual__():
|
|||||||
'''
|
'''
|
||||||
if not salt.utils.which('chef-client'):
|
if not salt.utils.which('chef-client'):
|
||||||
return False
|
return False
|
||||||
if not salt.utils.which('script'):
|
|
||||||
return False
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def _default_logfile(exe_name):
|
||||||
|
|
||||||
|
if salt.utils.is_windows():
|
||||||
|
logfile = salt.utils.path_join(
|
||||||
|
os.environ['TMP'],
|
||||||
|
'{0}.log'.format(exe_name)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logfile = salt.utils.path_join(
|
||||||
|
'/var/log',
|
||||||
|
'{0}.log'.format(exe_name)
|
||||||
|
)
|
||||||
|
|
||||||
|
return logfile
|
||||||
|
|
||||||
|
|
||||||
@decorators.which('chef-client')
|
@decorators.which('chef-client')
|
||||||
def client(whyrun=False, localmode=False, logfile='/var/log/chef-client.log', **kwargs):
|
def client(whyrun=False,
|
||||||
|
localmode=False,
|
||||||
|
logfile=_default_logfile('chef-client'),
|
||||||
|
**kwargs):
|
||||||
'''
|
'''
|
||||||
Execute a chef client run and return a dict with the stderr, stdout,
|
Execute a chef client run and return a dict with the stderr, stdout,
|
||||||
return code, and pid.
|
return code, and pid.
|
||||||
@ -96,7 +112,11 @@ def client(whyrun=False, localmode=False, logfile='/var/log/chef-client.log', **
|
|||||||
Enable whyrun mode when set to True
|
Enable whyrun mode when set to True
|
||||||
|
|
||||||
'''
|
'''
|
||||||
args = ['chef-client', '--no-color', '--once', '--logfile {0}'.format(logfile)]
|
args = ['chef-client',
|
||||||
|
'--no-color',
|
||||||
|
'--once',
|
||||||
|
'--logfile "{0}"'.format(logfile),
|
||||||
|
'--format doc']
|
||||||
|
|
||||||
if whyrun:
|
if whyrun:
|
||||||
args.append('--why-run')
|
args.append('--why-run')
|
||||||
@ -108,7 +128,9 @@ def client(whyrun=False, localmode=False, logfile='/var/log/chef-client.log', **
|
|||||||
|
|
||||||
|
|
||||||
@decorators.which('chef-solo')
|
@decorators.which('chef-solo')
|
||||||
def solo(whyrun=False, logfile='/var/log/chef-solo.log', **kwargs):
|
def solo(whyrun=False,
|
||||||
|
logfile=_default_logfile('chef-solo'),
|
||||||
|
**kwargs):
|
||||||
'''
|
'''
|
||||||
Execute a chef solo run and return a dict with the stderr, stdout,
|
Execute a chef solo run and return a dict with the stderr, stdout,
|
||||||
return code, and pid.
|
return code, and pid.
|
||||||
@ -157,6 +179,10 @@ def solo(whyrun=False, logfile='/var/log/chef-solo.log', **kwargs):
|
|||||||
whyrun
|
whyrun
|
||||||
Enable whyrun mode when set to True
|
Enable whyrun mode when set to True
|
||||||
'''
|
'''
|
||||||
|
args = ['chef-solo',
|
||||||
|
'--no-color',
|
||||||
|
'--logfile "{0}"'.format(logfile),
|
||||||
|
'--format doc']
|
||||||
|
|
||||||
args = ['chef-solo', '--no-color', '--logfile {0}'.format(logfile)]
|
args = ['chef-solo', '--no-color', '--logfile {0}'.format(logfile)]
|
||||||
|
|
||||||
@ -171,20 +197,10 @@ def _exec_cmd(*args, **kwargs):
|
|||||||
# Compile the command arguments
|
# Compile the command arguments
|
||||||
cmd_args = ' '.join(args)
|
cmd_args = ' '.join(args)
|
||||||
cmd_kwargs = ''.join([
|
cmd_kwargs = ''.join([
|
||||||
' --{0} {1}'.format(k, v) for k, v in kwargs.items() if not k.startswith('__')]
|
' --{0} {1}'.format(k, v)
|
||||||
|
for k, v in kwargs.items() if not k.startswith('__')]
|
||||||
)
|
)
|
||||||
cmd_exec = '{0}{1}'.format(cmd_args, cmd_kwargs)
|
cmd_exec = '{0}{1}'.format(cmd_args, cmd_kwargs)
|
||||||
log.debug('Chef command: {0}'.format(cmd_exec))
|
log.debug('Chef command: {0}'.format(cmd_exec))
|
||||||
|
|
||||||
# The only way to capture all the command output, including the
|
return __salt__['cmd.run_all'](cmd_exec, python_shell=False)
|
||||||
# summary line, is to use the script command to write out to a file
|
|
||||||
(filedesc, filename) = tempfile.mkstemp()
|
|
||||||
result = __salt__['cmd.run_all']('script -q -c "{0}" {1}'.format(cmd_exec, filename))
|
|
||||||
|
|
||||||
# Read the output from the script command, stripping the first line
|
|
||||||
with salt.utils.fopen(filename, 'r') as outfile:
|
|
||||||
stdout = outfile.readlines()
|
|
||||||
result['stdout'] = ''.join(stdout[1:])
|
|
||||||
os.remove(filename)
|
|
||||||
|
|
||||||
return result
|
|
||||||
|
@ -81,7 +81,8 @@ def chocolatey_version():
|
|||||||
try:
|
try:
|
||||||
return __context__['chocolatey._version']
|
return __context__['chocolatey._version']
|
||||||
except KeyError:
|
except KeyError:
|
||||||
out = __salt__['cmd.run']('{0} help'.format(_find_chocolatey()))
|
cmd = [_find_chocolatey(), 'help']
|
||||||
|
out = __salt__['cmd.run'](cmd, python_shell=False)
|
||||||
for line in out.splitlines():
|
for line in out.splitlines():
|
||||||
if line.lower().startswith('version: '):
|
if line.lower().startswith('version: '):
|
||||||
try:
|
try:
|
||||||
@ -150,7 +151,8 @@ def bootstrap(force=False):
|
|||||||
url = ps_downloads[(__grains__['osrelease'], __grains__['cpuarch'])]
|
url = ps_downloads[(__grains__['osrelease'], __grains__['cpuarch'])]
|
||||||
dest = os.path.join(temp_dir, 'powershell.exe')
|
dest = os.path.join(temp_dir, 'powershell.exe')
|
||||||
__salt__['cp.get_url'](url, dest)
|
__salt__['cp.get_url'](url, dest)
|
||||||
result = __salt__['cmd.run_all'](dest + ' /quiet /norestart')
|
cmd = [dest, '/quiet', '/norestart']
|
||||||
|
result = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||||
if result['retcode'] != 0:
|
if result['retcode'] != 0:
|
||||||
err = ('Installing Windows PowerShell failed. Please run the '
|
err = ('Installing Windows PowerShell failed. Please run the '
|
||||||
'installer GUI on the host to get a more specific '
|
'installer GUI on the host to get a more specific '
|
||||||
@ -165,7 +167,8 @@ def bootstrap(force=False):
|
|||||||
# Run the .NET Framework 4 web installer
|
# Run the .NET Framework 4 web installer
|
||||||
dest = os.path.join(temp_dir, 'dotnet4.exe')
|
dest = os.path.join(temp_dir, 'dotnet4.exe')
|
||||||
__salt__['cp.get_url'](net4_url, dest)
|
__salt__['cp.get_url'](net4_url, dest)
|
||||||
result = __salt__['cmd.run_all'](dest + ' /q /norestart')
|
cmd = [dest, '/q', '/norestart']
|
||||||
|
result = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||||
if result['retcode'] != 0:
|
if result['retcode'] != 0:
|
||||||
err = ('Installing .NET v4.0 failed. Please run the installer GUI on '
|
err = ('Installing .NET v4.0 failed. Please run the installer GUI on '
|
||||||
'the host to get a more specific reason.')
|
'the host to get a more specific reason.')
|
||||||
@ -173,13 +176,14 @@ def bootstrap(force=False):
|
|||||||
raise CommandExecutionError(err)
|
raise CommandExecutionError(err)
|
||||||
|
|
||||||
# Run the Chocolatey bootstrap.
|
# Run the Chocolatey bootstrap.
|
||||||
result = __salt__['cmd.run_all'](
|
cmd = (
|
||||||
'{0} -NoProfile -ExecutionPolicy unrestricted '
|
'{0} -NoProfile -ExecutionPolicy unrestricted '
|
||||||
'-Command "iex ((new-object net.webclient).'
|
'-Command "iex ((new-object net.webclient).'
|
||||||
'DownloadString(\'https://chocolatey.org/install.ps1\'))" '
|
'DownloadString(\'https://chocolatey.org/install.ps1\'))" '
|
||||||
'&& SET PATH=%PATH%;%systemdrive%\\chocolatey\\bin'
|
'&& SET PATH=%PATH%;%systemdrive%\\chocolatey\\bin'
|
||||||
.format(ps_path)
|
.format(ps_path)
|
||||||
)
|
)
|
||||||
|
result = __salt__['cmd.run_all'](cmd, python_shell=True)
|
||||||
|
|
||||||
if result['retcode'] != 0:
|
if result['retcode'] != 0:
|
||||||
err = 'Bootstrapping Chocolatey failed: {0}'.format(result['stderr'])
|
err = 'Bootstrapping Chocolatey failed: {0}'.format(result['stderr'])
|
||||||
@ -214,15 +218,15 @@ def list_(filter, all_versions=False, pre_versions=False, source=None):
|
|||||||
salt '*' chocolatey.list <filter> all_versions=True
|
salt '*' chocolatey.list <filter> all_versions=True
|
||||||
'''
|
'''
|
||||||
choc_path = _find_chocolatey()
|
choc_path = _find_chocolatey()
|
||||||
cmd = choc_path + ' list ' + filter
|
cmd = [choc_path, 'list', filter]
|
||||||
if salt.utils.is_true(all_versions):
|
if salt.utils.is_true(all_versions):
|
||||||
cmd += ' -AllVersions'
|
cmd.append('-AllVersions')
|
||||||
if salt.utils.is_true(pre_versions):
|
if salt.utils.is_true(pre_versions):
|
||||||
cmd += ' -Prerelease'
|
cmd.append('-Prerelease')
|
||||||
if source:
|
if source:
|
||||||
cmd += ' -Source ' + source
|
cmd.extend(['-Source', source])
|
||||||
|
|
||||||
result = __salt__['cmd.run_all'](cmd)
|
result = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||||
|
|
||||||
if result['retcode'] != 0:
|
if result['retcode'] != 0:
|
||||||
err = 'Running chocolatey failed: {0}'.format(result['stderr'])
|
err = 'Running chocolatey failed: {0}'.format(result['stderr'])
|
||||||
@ -239,7 +243,6 @@ def list_(filter, all_versions=False, pre_versions=False, source=None):
|
|||||||
ret[name] = []
|
ret[name] = []
|
||||||
ret[name].append(ver)
|
ret[name].append(ver)
|
||||||
|
|
||||||
#log.info(repr(ret))
|
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
|
||||||
@ -254,8 +257,8 @@ def list_webpi():
|
|||||||
salt '*' chocolatey.list_webpi
|
salt '*' chocolatey.list_webpi
|
||||||
'''
|
'''
|
||||||
choc_path = _find_chocolatey()
|
choc_path = _find_chocolatey()
|
||||||
cmd = choc_path + ' list -Source webpi'
|
cmd = [choc_path, 'list', '-Source', 'webpi']
|
||||||
result = __salt__['cmd.run_all'](cmd)
|
result = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||||
|
|
||||||
if result['retcode'] != 0:
|
if result['retcode'] != 0:
|
||||||
err = 'Running chocolatey failed: {0}'.format(result['stderr'])
|
err = 'Running chocolatey failed: {0}'.format(result['stderr'])
|
||||||
@ -277,8 +280,8 @@ def list_windowsfeatures():
|
|||||||
salt '*' chocolatey.list_windowsfeatures
|
salt '*' chocolatey.list_windowsfeatures
|
||||||
'''
|
'''
|
||||||
choc_path = _find_chocolatey()
|
choc_path = _find_chocolatey()
|
||||||
cmd = choc_path + ' list -Source windowsfeatures'
|
cmd = [choc_path, 'list', '-Source', 'windowsfeatures']
|
||||||
result = __salt__['cmd.run_all'](cmd)
|
result = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||||
|
|
||||||
if result['retcode'] != 0:
|
if result['retcode'] != 0:
|
||||||
err = 'Running chocolatey failed: {0}'.format(result['stderr'])
|
err = 'Running chocolatey failed: {0}'.format(result['stderr'])
|
||||||
@ -314,14 +317,14 @@ def install(name, version=None, source=None, force=False):
|
|||||||
'''
|
'''
|
||||||
choc_path = _find_chocolatey()
|
choc_path = _find_chocolatey()
|
||||||
# chocolatey helpfully only supports a single package argument
|
# chocolatey helpfully only supports a single package argument
|
||||||
cmd = choc_path + ' install ' + name
|
cmd = [choc_path, 'install', name]
|
||||||
if version:
|
if version:
|
||||||
cmd += ' -Version ' + version
|
cmd.extend(['-Version', version])
|
||||||
if source:
|
if source:
|
||||||
cmd += ' -Source ' + source
|
cmd.extend(['-Source', source])
|
||||||
if salt.utils.is_true(force):
|
if salt.utils.is_true(force):
|
||||||
cmd += ' -Force'
|
cmd.append('-Force')
|
||||||
result = __salt__['cmd.run_all'](cmd)
|
result = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||||
|
|
||||||
if result['retcode'] != 0:
|
if result['retcode'] != 0:
|
||||||
err = 'Running chocolatey failed: {0}'.format(result['stderr'])
|
err = 'Running chocolatey failed: {0}'.format(result['stderr'])
|
||||||
@ -345,8 +348,8 @@ def install_cygwin(name):
|
|||||||
salt '*' chocolatey.install_cygwin <package name>
|
salt '*' chocolatey.install_cygwin <package name>
|
||||||
'''
|
'''
|
||||||
choc_path = _find_chocolatey()
|
choc_path = _find_chocolatey()
|
||||||
cmd = choc_path + ' cygwin ' + name
|
cmd = [choc_path, 'cygwin', name]
|
||||||
result = __salt__['cmd.run_all'](cmd)
|
result = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||||
|
|
||||||
if result['retcode'] != 0:
|
if result['retcode'] != 0:
|
||||||
err = 'Running chocolatey failed: {0}'.format(result['stderr'])
|
err = 'Running chocolatey failed: {0}'.format(result['stderr'])
|
||||||
@ -375,10 +378,10 @@ def install_gem(name, version=None):
|
|||||||
salt '*' chocolatey.install_gem <package name> version=<package version>
|
salt '*' chocolatey.install_gem <package name> version=<package version>
|
||||||
'''
|
'''
|
||||||
choc_path = _find_chocolatey()
|
choc_path = _find_chocolatey()
|
||||||
cmd = choc_path + ' gem ' + name
|
cmd = [choc_path, 'gem', name]
|
||||||
if version:
|
if version:
|
||||||
cmd += ' -Version ' + version
|
cmd.extend(['-Version', version])
|
||||||
result = __salt__['cmd.run_all'](cmd)
|
result = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||||
|
|
||||||
if result['retcode'] != 0:
|
if result['retcode'] != 0:
|
||||||
err = 'Running chocolatey failed: {0}'.format(result['stderr'])
|
err = 'Running chocolatey failed: {0}'.format(result['stderr'])
|
||||||
@ -422,12 +425,12 @@ def install_missing(name, version=None, source=None):
|
|||||||
return install(name, version=version)
|
return install(name, version=version)
|
||||||
|
|
||||||
# chocolatey helpfully only supports a single package argument
|
# chocolatey helpfully only supports a single package argument
|
||||||
cmd = choc_path + ' installmissing ' + name
|
cmd = [choc_path, 'installmissing', name]
|
||||||
if version:
|
if version:
|
||||||
cmd += ' -Version ' + version
|
cmd.extend(['-Version', version])
|
||||||
if source:
|
if source:
|
||||||
cmd += ' -Source ' + source
|
cmd.extend(['-Source', source])
|
||||||
result = __salt__['cmd.run_all'](cmd)
|
result = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||||
|
|
||||||
if result['retcode'] != 0:
|
if result['retcode'] != 0:
|
||||||
err = 'Running chocolatey failed: {0}'.format(result['stderr'])
|
err = 'Running chocolatey failed: {0}'.format(result['stderr'])
|
||||||
@ -456,10 +459,10 @@ def install_python(name, version=None):
|
|||||||
salt '*' chocolatey.install_python <package name> version=<package version>
|
salt '*' chocolatey.install_python <package name> version=<package version>
|
||||||
'''
|
'''
|
||||||
choc_path = _find_chocolatey()
|
choc_path = _find_chocolatey()
|
||||||
cmd = choc_path + ' python ' + name
|
cmd = [choc_path, 'python', name]
|
||||||
if version:
|
if version:
|
||||||
cmd += ' -Version ' + version
|
cmd.extend(['-Version', version])
|
||||||
result = __salt__['cmd.run_all'](cmd)
|
result = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||||
|
|
||||||
if result['retcode'] != 0:
|
if result['retcode'] != 0:
|
||||||
err = 'Running chocolatey failed: {0}'.format(result['stderr'])
|
err = 'Running chocolatey failed: {0}'.format(result['stderr'])
|
||||||
@ -484,8 +487,8 @@ def install_windowsfeatures(name):
|
|||||||
salt '*' chocolatey.install_windowsfeatures <package name>
|
salt '*' chocolatey.install_windowsfeatures <package name>
|
||||||
'''
|
'''
|
||||||
choc_path = _find_chocolatey()
|
choc_path = _find_chocolatey()
|
||||||
cmd = choc_path + ' windowsfeatures ' + name
|
cmd = [choc_path, 'windowsfeatures', name]
|
||||||
result = __salt__['cmd.run_all'](cmd)
|
result = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||||
|
|
||||||
if result['retcode'] != 0:
|
if result['retcode'] != 0:
|
||||||
err = 'Running chocolatey failed: {0}'.format(result['stderr'])
|
err = 'Running chocolatey failed: {0}'.format(result['stderr'])
|
||||||
@ -509,8 +512,8 @@ def install_webpi(name):
|
|||||||
salt '*' chocolatey.install_webpi <package name>
|
salt '*' chocolatey.install_webpi <package name>
|
||||||
'''
|
'''
|
||||||
choc_path = _find_chocolatey()
|
choc_path = _find_chocolatey()
|
||||||
cmd = choc_path + ' webpi ' + name
|
cmd = [choc_path, 'webpi', name]
|
||||||
result = __salt__['cmd.run_all'](cmd)
|
result = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||||
|
|
||||||
if result['retcode'] != 0:
|
if result['retcode'] != 0:
|
||||||
err = 'Running chocolatey failed: {0}'.format(result['stderr'])
|
err = 'Running chocolatey failed: {0}'.format(result['stderr'])
|
||||||
@ -540,10 +543,10 @@ def uninstall(name, version=None):
|
|||||||
'''
|
'''
|
||||||
choc_path = _find_chocolatey()
|
choc_path = _find_chocolatey()
|
||||||
# chocolatey helpfully only supports a single package argument
|
# chocolatey helpfully only supports a single package argument
|
||||||
cmd = choc_path + ' uninstall ' + name
|
cmd = [choc_path, 'uninstall', name]
|
||||||
if version:
|
if version:
|
||||||
cmd += ' -Version ' + version
|
cmd.extend(['-Version', version])
|
||||||
result = __salt__['cmd.run_all'](cmd)
|
result = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||||
|
|
||||||
if result['retcode'] != 0:
|
if result['retcode'] != 0:
|
||||||
err = 'Running chocolatey failed: {0}'.format(result['stderr'])
|
err = 'Running chocolatey failed: {0}'.format(result['stderr'])
|
||||||
@ -577,12 +580,12 @@ def update(name, source=None, pre_versions=False):
|
|||||||
'''
|
'''
|
||||||
# chocolatey helpfully only supports a single package argument
|
# chocolatey helpfully only supports a single package argument
|
||||||
choc_path = _find_chocolatey()
|
choc_path = _find_chocolatey()
|
||||||
cmd = choc_path + ' update ' + name
|
cmd = [choc_path, 'update', name]
|
||||||
if source:
|
if source:
|
||||||
cmd += ' -Source ' + source
|
cmd.extend(['-Source', source])
|
||||||
if salt.utils.is_true(pre_versions):
|
if salt.utils.is_true(pre_versions):
|
||||||
cmd += ' -PreRelease'
|
cmd.append('-PreRelease')
|
||||||
result = __salt__['cmd.run_all'](cmd)
|
result = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||||
|
|
||||||
if result['retcode'] != 0:
|
if result['retcode'] != 0:
|
||||||
err = 'Running chocolatey failed: {0}'.format(result['stderr'])
|
err = 'Running chocolatey failed: {0}'.format(result['stderr'])
|
||||||
@ -624,15 +627,15 @@ def version(name, check_remote=False, source=None, pre_versions=False):
|
|||||||
log.error(err)
|
log.error(err)
|
||||||
raise CommandExecutionError(err)
|
raise CommandExecutionError(err)
|
||||||
|
|
||||||
cmd = choc_path + ' version ' + name
|
cmd = [choc_path, 'version', name]
|
||||||
if not salt.utils.is_true(check_remote):
|
if not salt.utils.is_true(check_remote):
|
||||||
cmd += ' -LocalOnly'
|
cmd.append('-LocalOnly')
|
||||||
if salt.utils.is_true(pre_versions):
|
if salt.utils.is_true(pre_versions):
|
||||||
cmd += ' -Prerelease'
|
cmd.append('-Prerelease')
|
||||||
if source:
|
if source:
|
||||||
cmd += ' -Source ' + source
|
cmd.extend(['-Source', source])
|
||||||
|
|
||||||
result = __salt__['cmd.run_all'](cmd)
|
result = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||||
|
|
||||||
if result['retcode'] != 0:
|
if result['retcode'] != 0:
|
||||||
err = 'Running chocolatey failed: {0}'.format(result['stderr'])
|
err = 'Running chocolatey failed: {0}'.format(result['stderr'])
|
||||||
|
@ -17,6 +17,7 @@ import shutil
|
|||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
import traceback
|
import traceback
|
||||||
|
import shlex
|
||||||
from salt.utils import vt
|
from salt.utils import vt
|
||||||
|
|
||||||
# Import salt libs
|
# Import salt libs
|
||||||
@ -131,6 +132,9 @@ def _check_loglevel(level='info', quiet=False):
|
|||||||
)
|
)
|
||||||
return LOG_LEVELS['info']
|
return LOG_LEVELS['info']
|
||||||
|
|
||||||
|
if salt.utils.is_true(quiet) or str(level).lower() == 'quiet':
|
||||||
|
return None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
level = level.lower()
|
level = level.lower()
|
||||||
if level not in LOG_LEVELS:
|
if level not in LOG_LEVELS:
|
||||||
@ -138,8 +142,6 @@ def _check_loglevel(level='info', quiet=False):
|
|||||||
except AttributeError:
|
except AttributeError:
|
||||||
return _bad_level(level)
|
return _bad_level(level)
|
||||||
|
|
||||||
if salt.utils.is_true(quiet) or level == 'quiet':
|
|
||||||
return None
|
|
||||||
return LOG_LEVELS[level]
|
return LOG_LEVELS[level]
|
||||||
|
|
||||||
|
|
||||||
@ -308,7 +310,8 @@ def _run(cmd,
|
|||||||
env.setdefault('LC_ALL', 'C')
|
env.setdefault('LC_ALL', 'C')
|
||||||
else:
|
else:
|
||||||
# On Windows set the codepage to US English.
|
# On Windows set the codepage to US English.
|
||||||
cmd = 'chcp 437 > nul & ' + cmd
|
if python_shell:
|
||||||
|
cmd = 'chcp 437 > nul & ' + cmd
|
||||||
|
|
||||||
if clean_env:
|
if clean_env:
|
||||||
run_env = env
|
run_env = env
|
||||||
@ -359,6 +362,8 @@ def _run(cmd,
|
|||||||
.format(cwd)
|
.format(cwd)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if python_shell is not True and not isinstance(cmd, list):
|
||||||
|
cmd = shlex.split(cmd)
|
||||||
if not use_vt:
|
if not use_vt:
|
||||||
# This is where the magic happens
|
# This is where the magic happens
|
||||||
try:
|
try:
|
||||||
@ -395,7 +400,8 @@ def _run(cmd,
|
|||||||
to = ''
|
to = ''
|
||||||
if timeout:
|
if timeout:
|
||||||
to = ' (timeout: {0}s)'.format(timeout)
|
to = ' (timeout: {0}s)'.format(timeout)
|
||||||
log.debug('Running {0} in VT{1}'.format(cmd, to))
|
if _check_loglevel(output_loglevel, quiet) is not None:
|
||||||
|
log.debug('Running {0} in VT{1}'.format(cmd, to))
|
||||||
stdout, stderr = '', ''
|
stdout, stderr = '', ''
|
||||||
now = time.time()
|
now = time.time()
|
||||||
if timeout:
|
if timeout:
|
||||||
|
@ -147,7 +147,10 @@ def install(dir,
|
|||||||
if optimize is True:
|
if optimize is True:
|
||||||
cmd += ' --optimize-autoloader'
|
cmd += ' --optimize-autoloader'
|
||||||
|
|
||||||
result = __salt__['cmd.run_all'](cmd, runas=runas, env={'COMPOSER_HOME': composer_home})
|
result = __salt__['cmd.run_all'](cmd,
|
||||||
|
runas=runas,
|
||||||
|
env={'COMPOSER_HOME': composer_home},
|
||||||
|
python_shell=False)
|
||||||
|
|
||||||
if result['retcode'] != 0:
|
if result['retcode'] != 0:
|
||||||
raise CommandExecutionError(result['stderr'])
|
raise CommandExecutionError(result['stderr'])
|
||||||
|
@ -160,7 +160,8 @@ def write_cron_file(user, path):
|
|||||||
|
|
||||||
salt '*' cron.write_cron_file root /tmp/new_cron
|
salt '*' cron.write_cron_file root /tmp/new_cron
|
||||||
'''
|
'''
|
||||||
return __salt__['cmd.retcode'](_get_cron_cmdstr(user, path)) == 0
|
return __salt__['cmd.retcode'](_get_cron_cmdstr(user, path),
|
||||||
|
python_shell=False) == 0
|
||||||
|
|
||||||
|
|
||||||
def write_cron_file_verbose(user, path):
|
def write_cron_file_verbose(user, path):
|
||||||
@ -173,7 +174,8 @@ def write_cron_file_verbose(user, path):
|
|||||||
|
|
||||||
salt '*' cron.write_cron_file_verbose root /tmp/new_cron
|
salt '*' cron.write_cron_file_verbose root /tmp/new_cron
|
||||||
'''
|
'''
|
||||||
return __salt__['cmd.run_all'](_get_cron_cmdstr(user, path))
|
return __salt__['cmd.run_all'](_get_cron_cmdstr(user, path),
|
||||||
|
python_shell=False)
|
||||||
|
|
||||||
|
|
||||||
def _write_cron_lines(user, lines):
|
def _write_cron_lines(user, lines):
|
||||||
@ -184,8 +186,10 @@ def _write_cron_lines(user, lines):
|
|||||||
with salt.utils.fopen(path, 'w+') as fp_:
|
with salt.utils.fopen(path, 'w+') as fp_:
|
||||||
fp_.writelines(lines)
|
fp_.writelines(lines)
|
||||||
if __grains__.get('os_family') in ('Solaris', 'AIX') and user != "root":
|
if __grains__.get('os_family') in ('Solaris', 'AIX') and user != "root":
|
||||||
__salt__['cmd.run']('chown {0} {1}'.format(user, path))
|
__salt__['cmd.run']('chown {0} {1}'.format(user, path),
|
||||||
ret = __salt__['cmd.run_all'](_get_cron_cmdstr(user, path))
|
python_shell=False)
|
||||||
|
ret = __salt__['cmd.run_all'](_get_cron_cmdstr(user, path),
|
||||||
|
python_shell=False)
|
||||||
os.remove(path)
|
os.remove(path)
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
@ -214,7 +218,10 @@ def raw_cron(user):
|
|||||||
cmd = 'crontab -l {0}'.format(user)
|
cmd = 'crontab -l {0}'.format(user)
|
||||||
else:
|
else:
|
||||||
cmd = 'crontab -l -u {0}'.format(user)
|
cmd = 'crontab -l -u {0}'.format(user)
|
||||||
lines = __salt__['cmd.run_stdout'](cmd, runas=user, rstrip=False).splitlines()
|
lines = __salt__['cmd.run_stdout'](cmd,
|
||||||
|
runas=user,
|
||||||
|
rstrip=False,
|
||||||
|
python_shell=False).splitlines()
|
||||||
if len(lines) != 0 and lines[0].startswith('# DO NOT EDIT THIS FILE - edit the master and reinstall.'):
|
if len(lines) != 0 and lines[0].startswith('# DO NOT EDIT THIS FILE - edit the master and reinstall.'):
|
||||||
del lines[0:3]
|
del lines[0:3]
|
||||||
return '\n'.join(lines)
|
return '\n'.join(lines)
|
||||||
|
@ -60,7 +60,7 @@ def start(name):
|
|||||||
'''
|
'''
|
||||||
__salt__['file.remove']('{0}/down'.format(_service_path(name)))
|
__salt__['file.remove']('{0}/down'.format(_service_path(name)))
|
||||||
cmd = 'svc -u {0}'.format(_service_path(name))
|
cmd = 'svc -u {0}'.format(_service_path(name))
|
||||||
return not __salt__['cmd.retcode'](cmd)
|
return not __salt__['cmd.retcode'](cmd, python_shell=False)
|
||||||
|
|
||||||
|
|
||||||
#-- states.service compatible args
|
#-- states.service compatible args
|
||||||
@ -76,7 +76,7 @@ def stop(name):
|
|||||||
'''
|
'''
|
||||||
__salt__['file.touch']('{0}/down'.format(_service_path(name)))
|
__salt__['file.touch']('{0}/down'.format(_service_path(name)))
|
||||||
cmd = 'svc -d {0}'.format(_service_path(name))
|
cmd = 'svc -d {0}'.format(_service_path(name))
|
||||||
return not __salt__['cmd.retcode'](cmd)
|
return not __salt__['cmd.retcode'](cmd, python_shell=False)
|
||||||
|
|
||||||
|
|
||||||
def term(name):
|
def term(name):
|
||||||
@ -90,7 +90,7 @@ def term(name):
|
|||||||
salt '*' daemontools.term <service name>
|
salt '*' daemontools.term <service name>
|
||||||
'''
|
'''
|
||||||
cmd = 'svc -t {0}'.format(_service_path(name))
|
cmd = 'svc -t {0}'.format(_service_path(name))
|
||||||
return not __salt__['cmd.retcode'](cmd)
|
return not __salt__['cmd.retcode'](cmd, python_shell=False)
|
||||||
|
|
||||||
|
|
||||||
#-- states.service compatible
|
#-- states.service compatible
|
||||||
@ -150,7 +150,7 @@ def status(name, sig=None):
|
|||||||
salt '*' daemontools.status <service name>
|
salt '*' daemontools.status <service name>
|
||||||
'''
|
'''
|
||||||
cmd = 'svstat {0}'.format(_service_path(name))
|
cmd = 'svstat {0}'.format(_service_path(name))
|
||||||
out = __salt__['cmd.run_stdout'](cmd)
|
out = __salt__['cmd.run_stdout'](cmd, python_shell=False)
|
||||||
try:
|
try:
|
||||||
pid = re.search(r'\(pid (\d+)\)', out).group(1)
|
pid = re.search(r'\(pid (\d+)\)', out).group(1)
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
|
84
salt/modules/darwin_pkgutil.py
Normal file
84
salt/modules/darwin_pkgutil.py
Normal file
@ -0,0 +1,84 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
'''
|
||||||
|
Installer support for OS X.
|
||||||
|
|
||||||
|
Installer is the native .pkg/.mpkg package manager for OS X.
|
||||||
|
'''
|
||||||
|
import os.path
|
||||||
|
|
||||||
|
from salt.ext.six.moves import urllib
|
||||||
|
|
||||||
|
|
||||||
|
PKGUTIL = "/usr/sbin/pkgutil"
|
||||||
|
|
||||||
|
|
||||||
|
def __virtual__():
|
||||||
|
if __grains__['os'] == 'MacOS':
|
||||||
|
return 'darwin_pkgutil'
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def list():
|
||||||
|
'''
|
||||||
|
List the installed packages.
|
||||||
|
|
||||||
|
CLI Example:
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
salt '*' darwin_pkgutil.list
|
||||||
|
'''
|
||||||
|
cmd = PKGUTIL + ' --pkgs'
|
||||||
|
return __salt__['cmd.run_stdout'](cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def is_installed(package_id):
|
||||||
|
'''
|
||||||
|
Returns whether a given package id is installed.
|
||||||
|
|
||||||
|
CLI Example:
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
salt '*' darwin_pkgutil.is_installed com.apple.pkg.gcc4.2Leo
|
||||||
|
'''
|
||||||
|
def has_package_id(lines):
|
||||||
|
for line in lines:
|
||||||
|
if line == package_id:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
cmd = PKGUTIL + ' --pkgs'
|
||||||
|
out = __salt__['cmd.run_stdout'](cmd)
|
||||||
|
return has_package_id(out.splitlines())
|
||||||
|
|
||||||
|
|
||||||
|
def _install_from_path(path):
|
||||||
|
if not os.path.exists(path):
|
||||||
|
msg = "Path {0!r} does not exist, cannot install".format(path)
|
||||||
|
raise ValueError(msg)
|
||||||
|
else:
|
||||||
|
cmd = 'installer -pkg "{0}" -target /'.format(path)
|
||||||
|
return __salt__['cmd.retcode'](cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def install(source, package_id=None):
|
||||||
|
'''
|
||||||
|
Install a .pkg from an URI or an absolute path.
|
||||||
|
|
||||||
|
CLI Example:
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
salt '*' darwin_pkgutil.install source=/vagrant/build_essentials.pkg \
|
||||||
|
package_id=com.apple.pkg.gcc4.2Leo
|
||||||
|
'''
|
||||||
|
if package_id is not None and is_installed(package_id):
|
||||||
|
return ''
|
||||||
|
|
||||||
|
uri = urllib.parse.urlparse(source)
|
||||||
|
if uri.scheme == "":
|
||||||
|
return _install_from_path(source)
|
||||||
|
else:
|
||||||
|
msg = "Unsupported scheme for source uri: {0!r}".format(uri.scheme)
|
||||||
|
raise ValueError(msg)
|
@ -48,7 +48,7 @@ def show(config_file=False):
|
|||||||
)
|
)
|
||||||
cmd = 'sysctl -a'
|
cmd = 'sysctl -a'
|
||||||
ret = {}
|
ret = {}
|
||||||
out = __salt__['cmd.run'](cmd, output_loglevel='trace')
|
out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False)
|
||||||
comps = ['']
|
comps = ['']
|
||||||
for line in out.splitlines():
|
for line in out.splitlines():
|
||||||
# This might need to be converted to a regex, and more, as sysctl output
|
# This might need to be converted to a regex, and more, as sysctl output
|
||||||
@ -88,7 +88,7 @@ def get(name):
|
|||||||
salt '*' sysctl.get hw.physmem
|
salt '*' sysctl.get hw.physmem
|
||||||
'''
|
'''
|
||||||
cmd = 'sysctl -n {0}'.format(name)
|
cmd = 'sysctl -n {0}'.format(name)
|
||||||
out = __salt__['cmd.run'](cmd)
|
out = __salt__['cmd.run'](cmd, python_shell=False)
|
||||||
return out
|
return out
|
||||||
|
|
||||||
|
|
||||||
@ -110,7 +110,7 @@ def assign(name, value):
|
|||||||
'''
|
'''
|
||||||
ret = {}
|
ret = {}
|
||||||
cmd = 'sysctl -w {0}="{1}"'.format(name, value)
|
cmd = 'sysctl -w {0}="{1}"'.format(name, value)
|
||||||
data = __salt__['cmd.run_all'](cmd)
|
data = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||||
|
|
||||||
if data['retcode'] != 0:
|
if data['retcode'] != 0:
|
||||||
raise CommandExecutionError('sysctl failed: {0}'.format(
|
raise CommandExecutionError('sysctl failed: {0}'.format(
|
||||||
|
@ -104,7 +104,7 @@ def _set_file(path):
|
|||||||
'''
|
'''
|
||||||
cmd = 'debconf-set-selections {0}'.format(path)
|
cmd = 'debconf-set-selections {0}'.format(path)
|
||||||
|
|
||||||
__salt__['cmd.run_stdout'](cmd)
|
__salt__['cmd.run_stdout'](cmd, python_shell=False)
|
||||||
|
|
||||||
|
|
||||||
def set_(package, question, type, value, *extra):
|
def set_(package, question, type, value, *extra):
|
||||||
|
@ -1485,10 +1485,12 @@ def build_bond(iface, **settings):
|
|||||||
path = os.path.join(_DEB_NETWORK_CONF_FILES, '{0}.conf'.format(iface))
|
path = os.path.join(_DEB_NETWORK_CONF_FILES, '{0}.conf'.format(iface))
|
||||||
if deb_major == '5':
|
if deb_major == '5':
|
||||||
__salt__['cmd.run'](
|
__salt__['cmd.run'](
|
||||||
'sed -i -e "/^alias\\s{0}.*/d" /etc/modprobe.conf'.format(iface)
|
'sed -i -e "/^alias\\s{0}.*/d" /etc/modprobe.conf'.format(iface),
|
||||||
|
python_shell=False
|
||||||
)
|
)
|
||||||
__salt__['cmd.run'](
|
__salt__['cmd.run'](
|
||||||
'sed -i -e "/^options\\s{0}.*/d" /etc/modprobe.conf'.format(iface)
|
'sed -i -e "/^options\\s{0}.*/d" /etc/modprobe.conf'.format(iface),
|
||||||
|
python_shell=False
|
||||||
)
|
)
|
||||||
__salt__['file.append']('/etc/modprobe.conf', path)
|
__salt__['file.append']('/etc/modprobe.conf', path)
|
||||||
|
|
||||||
|
@ -95,7 +95,7 @@ def A(host, nameserver=None):
|
|||||||
if nameserver is not None:
|
if nameserver is not None:
|
||||||
dig.append('@{0}'.format(nameserver))
|
dig.append('@{0}'.format(nameserver))
|
||||||
|
|
||||||
cmd = __salt__['cmd.run_all'](' '.join(dig))
|
cmd = __salt__['cmd.run_all'](dig, python_shell=False)
|
||||||
# In this case, 0 is not the same as False
|
# In this case, 0 is not the same as False
|
||||||
if cmd['retcode'] != 0:
|
if cmd['retcode'] != 0:
|
||||||
log.warn(
|
log.warn(
|
||||||
@ -127,7 +127,7 @@ def AAAA(host, nameserver=None):
|
|||||||
if nameserver is not None:
|
if nameserver is not None:
|
||||||
dig.append('@{0}'.format(nameserver))
|
dig.append('@{0}'.format(nameserver))
|
||||||
|
|
||||||
cmd = __salt__['cmd.run_all'](' '.join(dig))
|
cmd = __salt__['cmd.run_all'](dig, python_shell=False)
|
||||||
# In this case, 0 is not the same as False
|
# In this case, 0 is not the same as False
|
||||||
if cmd['retcode'] != 0:
|
if cmd['retcode'] != 0:
|
||||||
log.warn(
|
log.warn(
|
||||||
@ -159,7 +159,7 @@ def NS(domain, resolve=True, nameserver=None):
|
|||||||
if nameserver is not None:
|
if nameserver is not None:
|
||||||
dig.append('@{0}'.format(nameserver))
|
dig.append('@{0}'.format(nameserver))
|
||||||
|
|
||||||
cmd = __salt__['cmd.run_all'](' '.join(dig))
|
cmd = __salt__['cmd.run_all'](dig, python_shell=False)
|
||||||
# In this case, 0 is not the same as False
|
# In this case, 0 is not the same as False
|
||||||
if cmd['retcode'] != 0:
|
if cmd['retcode'] != 0:
|
||||||
log.warn(
|
log.warn(
|
||||||
@ -200,7 +200,7 @@ def SPF(domain, record='SPF', nameserver=None):
|
|||||||
if nameserver is not None:
|
if nameserver is not None:
|
||||||
cmd.append('@{0}'.format(nameserver))
|
cmd.append('@{0}'.format(nameserver))
|
||||||
|
|
||||||
result = __salt__['cmd.run_all'](' '.join(cmd))
|
result = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||||
# In this case, 0 is not the same as False
|
# In this case, 0 is not the same as False
|
||||||
if result['retcode'] != 0:
|
if result['retcode'] != 0:
|
||||||
log.warn(
|
log.warn(
|
||||||
@ -257,7 +257,7 @@ def MX(domain, resolve=False, nameserver=None):
|
|||||||
if nameserver is not None:
|
if nameserver is not None:
|
||||||
dig.append('@{0}'.format(nameserver))
|
dig.append('@{0}'.format(nameserver))
|
||||||
|
|
||||||
cmd = __salt__['cmd.run_all'](' '.join(dig))
|
cmd = __salt__['cmd.run_all'](dig, python_shell=False)
|
||||||
# In this case, 0 is not the same as False
|
# In this case, 0 is not the same as False
|
||||||
if cmd['retcode'] != 0:
|
if cmd['retcode'] != 0:
|
||||||
log.warn(
|
log.warn(
|
||||||
@ -295,7 +295,7 @@ def TXT(host, nameserver=None):
|
|||||||
if nameserver is not None:
|
if nameserver is not None:
|
||||||
dig.append('@{0}'.format(nameserver))
|
dig.append('@{0}'.format(nameserver))
|
||||||
|
|
||||||
cmd = __salt__['cmd.run_all'](' '.join(dig))
|
cmd = __salt__['cmd.run_all'](dig, python_shell=False)
|
||||||
|
|
||||||
if cmd['retcode'] != 0:
|
if cmd['retcode'] != 0:
|
||||||
log.warn(
|
log.warn(
|
||||||
|
@ -72,7 +72,7 @@ def usage(args=None):
|
|||||||
if flags:
|
if flags:
|
||||||
cmd += ' -{0}'.format(flags)
|
cmd += ' -{0}'.format(flags)
|
||||||
ret = {}
|
ret = {}
|
||||||
out = __salt__['cmd.run'](cmd).splitlines()
|
out = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
|
||||||
for line in out:
|
for line in out:
|
||||||
if not line:
|
if not line:
|
||||||
continue
|
continue
|
||||||
@ -123,7 +123,7 @@ def inodeusage(args=None):
|
|||||||
if flags:
|
if flags:
|
||||||
cmd += ' -{0}'.format(flags)
|
cmd += ' -{0}'.format(flags)
|
||||||
ret = {}
|
ret = {}
|
||||||
out = __salt__['cmd.run'](cmd).splitlines()
|
out = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
|
||||||
for line in out:
|
for line in out:
|
||||||
if line.startswith('Filesystem'):
|
if line.startswith('Filesystem'):
|
||||||
continue
|
continue
|
||||||
@ -172,7 +172,7 @@ def percent(args=None):
|
|||||||
else:
|
else:
|
||||||
cmd = 'df'
|
cmd = 'df'
|
||||||
ret = {}
|
ret = {}
|
||||||
out = __salt__['cmd.run'](cmd).splitlines()
|
out = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
|
||||||
for line in out:
|
for line in out:
|
||||||
if not line:
|
if not line:
|
||||||
continue
|
continue
|
||||||
@ -212,7 +212,7 @@ def blkid(device=None):
|
|||||||
args = " " + device
|
args = " " + device
|
||||||
|
|
||||||
ret = {}
|
ret = {}
|
||||||
for line in __salt__['cmd.run_stdout']('blkid' + args).split('\n'):
|
for line in __salt__['cmd.run_stdout']('blkid' + args, python_shell=False).split('\n'):
|
||||||
comps = line.split()
|
comps = line.split()
|
||||||
device = comps[0][:-1]
|
device = comps[0][:-1]
|
||||||
info = {}
|
info = {}
|
||||||
|
@ -65,7 +65,7 @@ def command(settings_module,
|
|||||||
for key, value in kwargs.items():
|
for key, value in kwargs.items():
|
||||||
if not key.startswith('__'):
|
if not key.startswith('__'):
|
||||||
cmd = '{0} --{1}={2}'.format(cmd, key, value)
|
cmd = '{0} --{1}={2}'.format(cmd, key, value)
|
||||||
return __salt__['cmd.run'](cmd, env=env)
|
return __salt__['cmd.run'](cmd, env=env, python_shell=False)
|
||||||
|
|
||||||
|
|
||||||
def syncdb(settings_module,
|
def syncdb(settings_module,
|
||||||
|
@ -68,7 +68,7 @@ def list_pkgs(*packages):
|
|||||||
'''
|
'''
|
||||||
pkgs = {}
|
pkgs = {}
|
||||||
cmd = 'dpkg -l {0}'.format(' '.join(packages))
|
cmd = 'dpkg -l {0}'.format(' '.join(packages))
|
||||||
out = __salt__['cmd.run_all'](cmd)
|
out = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||||
if out['retcode'] != 0:
|
if out['retcode'] != 0:
|
||||||
msg = 'Error: ' + out['stderr']
|
msg = 'Error: ' + out['stderr']
|
||||||
log.error(msg)
|
log.error(msg)
|
||||||
@ -100,7 +100,7 @@ def file_list(*packages):
|
|||||||
ret = set([])
|
ret = set([])
|
||||||
pkgs = {}
|
pkgs = {}
|
||||||
cmd = 'dpkg -l {0}'.format(' '.join(packages))
|
cmd = 'dpkg -l {0}'.format(' '.join(packages))
|
||||||
out = __salt__['cmd.run_all'](cmd)
|
out = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||||
if out['retcode'] != 0:
|
if out['retcode'] != 0:
|
||||||
msg = 'Error: ' + out['stderr']
|
msg = 'Error: ' + out['stderr']
|
||||||
log.error(msg)
|
log.error(msg)
|
||||||
@ -117,7 +117,7 @@ def file_list(*packages):
|
|||||||
for pkg in pkgs:
|
for pkg in pkgs:
|
||||||
files = []
|
files = []
|
||||||
cmd = 'dpkg -L {0}'.format(pkg)
|
cmd = 'dpkg -L {0}'.format(pkg)
|
||||||
for line in __salt__['cmd.run'](cmd).splitlines():
|
for line in __salt__['cmd.run'](cmd, python_shell=False).splitlines():
|
||||||
files.append(line)
|
files.append(line)
|
||||||
fileset = set(files)
|
fileset = set(files)
|
||||||
ret = ret.union(fileset)
|
ret = ret.union(fileset)
|
||||||
@ -142,7 +142,7 @@ def file_dict(*packages):
|
|||||||
ret = {}
|
ret = {}
|
||||||
pkgs = {}
|
pkgs = {}
|
||||||
cmd = 'dpkg -l {0}'.format(' '.join(packages))
|
cmd = 'dpkg -l {0}'.format(' '.join(packages))
|
||||||
out = __salt__['cmd.run_all'](cmd)
|
out = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||||
if out['retcode'] != 0:
|
if out['retcode'] != 0:
|
||||||
msg = 'Error: ' + out['stderr']
|
msg = 'Error: ' + out['stderr']
|
||||||
log.error(msg)
|
log.error(msg)
|
||||||
@ -159,7 +159,7 @@ def file_dict(*packages):
|
|||||||
for pkg in pkgs:
|
for pkg in pkgs:
|
||||||
files = []
|
files = []
|
||||||
cmd = 'dpkg -L {0}'.format(pkg)
|
cmd = 'dpkg -L {0}'.format(pkg)
|
||||||
for line in __salt__['cmd.run'](cmd).splitlines():
|
for line in __salt__['cmd.run'](cmd, python_shell=False).splitlines():
|
||||||
files.append(line)
|
files.append(line)
|
||||||
ret[pkg] = files
|
ret[pkg] = files
|
||||||
return {'errors': errors, 'packages': ret}
|
return {'errors': errors, 'packages': ret}
|
||||||
|
@ -402,16 +402,17 @@ def refresh_db():
|
|||||||
# We prefer 'delta-webrsync' to 'webrsync'
|
# We prefer 'delta-webrsync' to 'webrsync'
|
||||||
if salt.utils.which('emerge-delta-webrsync'):
|
if salt.utils.which('emerge-delta-webrsync'):
|
||||||
cmd = 'emerge-delta-webrsync -q'
|
cmd = 'emerge-delta-webrsync -q'
|
||||||
return __salt__['cmd.retcode'](cmd) == 0
|
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
|
||||||
else:
|
else:
|
||||||
if __salt__['cmd.retcode']('emerge --sync --ask n --quiet') == 0:
|
if __salt__['cmd.retcode']('emerge --sync --ask n --quiet',
|
||||||
|
python_shell=False) == 0:
|
||||||
return True
|
return True
|
||||||
# We fall back to "webrsync" if "rsync" fails for some reason
|
# We fall back to "webrsync" if "rsync" fails for some reason
|
||||||
cmd = 'emerge-webrsync -q'
|
cmd = 'emerge-webrsync -q'
|
||||||
# We prefer 'delta-webrsync' to 'webrsync'
|
# We prefer 'delta-webrsync' to 'webrsync'
|
||||||
if salt.utils.which('emerge-delta-webrsync'):
|
if salt.utils.which('emerge-delta-webrsync'):
|
||||||
cmd = 'emerge-delta-webrsync -q'
|
cmd = 'emerge-delta-webrsync -q'
|
||||||
return __salt__['cmd.retcode'](cmd) == 0
|
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
|
||||||
|
|
||||||
|
|
||||||
def install(name=None,
|
def install(name=None,
|
||||||
@ -611,7 +612,9 @@ def install(name=None,
|
|||||||
cmd = 'emerge --quiet {0} --ask n {1} {2}'.format(bin_opts, emerge_opts, ' '.join(targets))
|
cmd = 'emerge --quiet {0} --ask n {1} {2}'.format(bin_opts, emerge_opts, ' '.join(targets))
|
||||||
|
|
||||||
old = list_pkgs()
|
old = list_pkgs()
|
||||||
call = __salt__['cmd.run_all'](cmd, output_loglevel='trace')
|
call = __salt__['cmd.run_all'](cmd,
|
||||||
|
output_loglevel='trace',
|
||||||
|
python_shell=False)
|
||||||
__context__.pop('pkg.list_pkgs', None)
|
__context__.pop('pkg.list_pkgs', None)
|
||||||
if call['retcode'] != 0:
|
if call['retcode'] != 0:
|
||||||
return _process_emerge_err(call['stdout'], call['stderr'])
|
return _process_emerge_err(call['stdout'], call['stderr'])
|
||||||
@ -667,7 +670,9 @@ def update(pkg, slot=None, fromrepo=None, refresh=False, binhost=None):
|
|||||||
|
|
||||||
old = list_pkgs()
|
old = list_pkgs()
|
||||||
cmd = 'emerge --update --newuse --oneshot --ask n --quiet {0} {1}'.format(bin_opts, full_atom)
|
cmd = 'emerge --update --newuse --oneshot --ask n --quiet {0} {1}'.format(bin_opts, full_atom)
|
||||||
call = __salt__['cmd.run_all'](cmd, output_loglevel='trace')
|
call = __salt__['cmd.run_all'](cmd,
|
||||||
|
output_loglevel='trace',
|
||||||
|
python_shell=False)
|
||||||
__context__.pop('pkg.list_pkgs', None)
|
__context__.pop('pkg.list_pkgs', None)
|
||||||
if call['retcode'] != 0:
|
if call['retcode'] != 0:
|
||||||
return _process_emerge_err(call['stdout'], call['stderr'])
|
return _process_emerge_err(call['stdout'], call['stderr'])
|
||||||
@ -712,7 +717,9 @@ def upgrade(refresh=True, binhost=None):
|
|||||||
|
|
||||||
old = list_pkgs()
|
old = list_pkgs()
|
||||||
cmd = 'emerge --update --newuse --deep --ask n --quiet {0} world'.format(bin_opts)
|
cmd = 'emerge --update --newuse --deep --ask n --quiet {0} world'.format(bin_opts)
|
||||||
call = __salt__['cmd.run_all'](cmd, output_loglevel='trace')
|
call = __salt__['cmd.run_all'](cmd,
|
||||||
|
output_loglevel='trace',
|
||||||
|
python_shell=False)
|
||||||
if call['retcode'] != 0:
|
if call['retcode'] != 0:
|
||||||
ret['result'] = False
|
ret['result'] = False
|
||||||
if 'stderr' in call:
|
if 'stderr' in call:
|
||||||
@ -778,7 +785,9 @@ def remove(name=None, slot=None, fromrepo=None, pkgs=None, **kwargs):
|
|||||||
return {}
|
return {}
|
||||||
cmd = 'emerge --unmerge --quiet --quiet-unmerge-warn --ask n' \
|
cmd = 'emerge --unmerge --quiet --quiet-unmerge-warn --ask n' \
|
||||||
'{0}'.format(' '.join(targets))
|
'{0}'.format(' '.join(targets))
|
||||||
__salt__['cmd.run_all'](cmd, output_loglevel='trace')
|
__salt__['cmd.run_all'](cmd,
|
||||||
|
output_loglevel='trace',
|
||||||
|
python_shell=False)
|
||||||
__context__.pop('pkg.list_pkgs', None)
|
__context__.pop('pkg.list_pkgs', None)
|
||||||
new = list_pkgs()
|
new = list_pkgs()
|
||||||
return salt.utils.compare_dicts(old, new)
|
return salt.utils.compare_dicts(old, new)
|
||||||
@ -867,7 +876,9 @@ def depclean(name=None, slot=None, fromrepo=None, pkgs=None):
|
|||||||
targets = [x for x in pkg_params if x in old]
|
targets = [x for x in pkg_params if x in old]
|
||||||
|
|
||||||
cmd = 'emerge --depclean --ask n --quiet {0}'.format(' '.join(targets))
|
cmd = 'emerge --depclean --ask n --quiet {0}'.format(' '.join(targets))
|
||||||
__salt__['cmd.run_all'](cmd, output_loglevel='trace')
|
__salt__['cmd.run_all'](cmd,
|
||||||
|
output_loglevel='trace',
|
||||||
|
python_shell=False)
|
||||||
__context__.pop('pkg.list_pkgs', None)
|
__context__.pop('pkg.list_pkgs', None)
|
||||||
new = list_pkgs()
|
new = list_pkgs()
|
||||||
return salt.utils.compare_dicts(old, new)
|
return salt.utils.compare_dicts(old, new)
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user