mirror of
https://github.com/valitydev/salt.git
synced 2024-11-07 08:58:59 +00:00
sync with upstream
This commit is contained in:
commit
146f8abc4c
@ -1,5 +1,5 @@
|
||||
{
|
||||
"skipTitle": "Merge forward",
|
||||
"userBlacklist": []
|
||||
"userBlacklist": ["cvrebert", "markusgattol", "olliewalsh"]
|
||||
}
|
||||
|
||||
|
35
.travis.yml
35
.travis.yml
@ -1,35 +0,0 @@
|
||||
language: python
|
||||
|
||||
python:
|
||||
- '2.6'
|
||||
- '2.7'
|
||||
|
||||
before_install:
|
||||
- sudo apt-get update
|
||||
- sudo apt-get install --fix-broken --ignore-missing -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" swig rabbitmq-server ruby python-apt mysql-server libmysqlclient-dev
|
||||
- (git describe && git fetch --tags) || (git remote add upstream git://github.com/saltstack/salt.git && git fetch --tags upstream)
|
||||
- pip install mock
|
||||
- pip install --allow-external http://dl.dropbox.com/u/174789/m2crypto-0.20.1.tar.gz
|
||||
- pip install --upgrade pep8 'pylint<=1.2.0'
|
||||
- pip install --upgrade coveralls
|
||||
- "if [[ $TRAVIS_PYTHON_VERSION == '2.6' ]]; then pip install unittest2 ordereddict; fi"
|
||||
- pip install git+https://github.com/saltstack/salt-testing.git#egg=SaltTesting
|
||||
|
||||
install:
|
||||
- pip install -r requirements/zeromq.txt -r requirements/cloud.txt
|
||||
- pip install --allow-all-external -r requirements/opt.txt
|
||||
|
||||
before_script:
|
||||
- "/home/travis/virtualenv/python${TRAVIS_PYTHON_VERSION}/bin/pylint --rcfile=.testing.pylintrc salt/ && echo 'Finished Pylint Check Cleanly' || echo 'Finished Pylint Check With Errors'"
|
||||
- "/home/travis/virtualenv/python${TRAVIS_PYTHON_VERSION}/bin/pep8 --ignore=E501,E12 salt/ && echo 'Finished PEP-8 Check Cleanly' || echo 'Finished PEP-8 Check With Errors'"
|
||||
|
||||
script: "sudo -E /home/travis/virtualenv/python${TRAVIS_PYTHON_VERSION}/bin/python setup.py test --runtests-opts='--run-destructive --sysinfo -v --coverage'"
|
||||
|
||||
after_success:
|
||||
- coveralls
|
||||
|
||||
notifications:
|
||||
irc:
|
||||
channels: "irc.freenode.org#salt-devel"
|
||||
on_success: change
|
||||
on_failure: change
|
@ -362,6 +362,15 @@
|
||||
#
|
||||
#token_expire_user_override: False
|
||||
|
||||
# Set to True to enable keeping the calculated user's auth list in the token
|
||||
# file. This is disabled by default and the auth list is calculated or requested
|
||||
# from the eauth driver each time.
|
||||
#keep_acl_in_token: False
|
||||
|
||||
# Auth subsystem module to use to get authorized access list for a user. By default it's
|
||||
# the same module used for external authentication.
|
||||
#eauth_acl_module: django
|
||||
|
||||
# Allow minions to push files to the master. This is disabled by default, for
|
||||
# security purposes.
|
||||
#file_recv: False
|
||||
|
3
doc/_themes/saltstack2/layout.html
vendored
3
doc/_themes/saltstack2/layout.html
vendored
@ -164,6 +164,9 @@
|
||||
<!--start navbar-->
|
||||
<div class="row">
|
||||
<div class="col-sm-12 col-md-11 col-md-offset-1 col-lg-10 col-lg-offset-1">
|
||||
<nav class="pull-left text-muted">
|
||||
<a href="https://github.com/saltstack/salt/edit/develop/doc/{{pagename}}.rst">Edit on GitHub</a>
|
||||
</nav>
|
||||
<nav id="localnav">
|
||||
<ul class="nav navbar-nav">
|
||||
{%- block relbar_small %}{{ relbar() }}{% endblock %}
|
||||
|
@ -268,7 +268,7 @@ derivatives start the service after the package installation by default.
|
||||
To prevent this, we need to create policy layer which will prevent the Minion
|
||||
service to restart right after the upgrade:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{%- if grains['os_family'] == 'Debian' %}
|
||||
|
||||
@ -316,7 +316,7 @@ Restart using states
|
||||
Now we can apply the workaround to restart the Minion in reliable way.
|
||||
The following example works on both UNIX-like and Windows operating systems:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
Restart Salt Minion:
|
||||
cmd.run:
|
||||
@ -333,7 +333,7 @@ However, it requires more advanced tricks to upgrade from legacy version of
|
||||
Salt (before ``2016.3.0``), where executing commands in the background is not
|
||||
supported:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
Restart Salt Minion:
|
||||
cmd.run:
|
||||
|
@ -68497,6 +68497,9 @@ key id to load with the keyserver argument
|
||||
.B key_url
|
||||
URL to a GPG key to add to the APT GPG keyring
|
||||
.TP
|
||||
.B key_text
|
||||
GPG key in string form to add to the APT GPG keyring
|
||||
.TP
|
||||
.B consolidate
|
||||
if \fBTrue\fP, will attempt to de\-dup and consolidate sources
|
||||
.TP
|
||||
|
@ -23,3 +23,11 @@ Example minion configuration file
|
||||
|
||||
.. literalinclude:: ../../../conf/minion
|
||||
:language: yaml
|
||||
|
||||
.. _configuration-examples-proxy:
|
||||
|
||||
Example proxy minion configuration file
|
||||
=======================================
|
||||
|
||||
.. literalinclude:: ../../../conf/proxy
|
||||
:language: yaml
|
||||
|
@ -51,6 +51,19 @@ After updating the configuration file, restart the Salt minion.
|
||||
See the :ref:`minion configuration reference <configuration-salt-minion>`
|
||||
for more details about other configurable options.
|
||||
|
||||
Proxy Minion Configuration
|
||||
==========================
|
||||
|
||||
A proxy minion emulates the behaviour of a regular minion
|
||||
and inherits their options.
|
||||
|
||||
Similarly, the configuration file is ``/etc/salt/proxy`` and the proxy
|
||||
tries to connect to the DNS name "salt".
|
||||
|
||||
In addition to the regular minion options,
|
||||
there are several proxy-specific - see the
|
||||
:ref:`proxy minion configuration reference <configuration-salt-proxy>`.
|
||||
|
||||
Running Salt
|
||||
============
|
||||
|
||||
|
@ -1031,6 +1031,35 @@ and usernames may be given:
|
||||
ldap:
|
||||
- gary
|
||||
|
||||
.. conf_master:: keep_acl_in_token
|
||||
|
||||
``keep_acl_in_token``
|
||||
---------------------
|
||||
|
||||
Default: ``False``
|
||||
|
||||
Set to True to enable keeping the calculated user's auth list in the token
|
||||
file. This is disabled by default and the auth list is calculated or requested
|
||||
from the eauth driver each time.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
keep_acl_in_token: False
|
||||
|
||||
.. conf_master:: eauth_acl_module
|
||||
|
||||
``eauth_acl_module``
|
||||
---------------------
|
||||
|
||||
Default: ``''``
|
||||
|
||||
Auth subsystem module to use to get authorized access list for a user. By default it's
|
||||
the same module used for external authentication.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
eauth_acl_module: django
|
||||
|
||||
.. conf_master:: file_recv
|
||||
|
||||
``file_recv``
|
||||
|
@ -2287,17 +2287,6 @@ file.
|
||||
files are prefixed with an underscore. A common example of this is the
|
||||
``_schedule.conf`` file.
|
||||
|
||||
.. note::
|
||||
|
||||
The configuration system supports adding the special token ``{id}`` to this
|
||||
option. At startup ``{id}`` will be replaced by the minion's ID, and the
|
||||
default_include directory will be set here. For example, if the minion's
|
||||
ID is 'webserver' and ``default_include`` is set to ``minion.d/{id}/*.conf``
|
||||
then the default_include directive will be set to ``minion.d/webserver/*.conf``.
|
||||
This is for situations when there are multiple minions or proxy minions
|
||||
running on a single machine that need different configurations, specifically for
|
||||
their schedulers.
|
||||
|
||||
|
||||
``include``
|
||||
-----------
|
||||
|
120
doc/ref/configuration/proxy.rst
Normal file
120
doc/ref/configuration/proxy.rst
Normal file
@ -0,0 +1,120 @@
|
||||
.. _configuration-salt-proxy:
|
||||
|
||||
=================================
|
||||
Configuring the Salt Proxy Minion
|
||||
=================================
|
||||
|
||||
The Salt system is amazingly simple and easy to configure. The two components
|
||||
of the Salt system each have a respective configuration file. The
|
||||
:command:`salt-master` is configured via the master configuration file, and the
|
||||
:command:`salt-proxy` is configured via the proxy configuration file.
|
||||
|
||||
.. seealso::
|
||||
:ref:`example proxy minion configuration file <configuration-examples-proxy>`
|
||||
|
||||
The Salt Minion configuration is very simple. Typically, the only value that
|
||||
needs to be set is the master value so the proxy knows where to locate its master.
|
||||
|
||||
By default, the salt-proxy configuration will be in :file:`/etc/salt/proxy`.
|
||||
A notable exception is FreeBSD, where the configuration will be in
|
||||
:file:`/usr/local/etc/salt/proxy`.
|
||||
|
||||
|
||||
|
||||
Proxy-specific Configuration Options
|
||||
====================================
|
||||
|
||||
.. conf_proxy:: add_proxymodule_to_opts
|
||||
|
||||
``add_proxymodule_to_opts``
|
||||
--------------------------
|
||||
|
||||
.. versionadded:: 2015.8.2
|
||||
|
||||
.. versionchanged:: 2016.3.0
|
||||
|
||||
Default: ``False``
|
||||
|
||||
Add the proxymodule LazyLoader object to opts.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
add_proxymodule_to_opts: True
|
||||
|
||||
|
||||
.. conf_proxy:: proxy_merge_grains_in_module
|
||||
|
||||
``proxy_merge_grains_in_module``
|
||||
--------------------------------
|
||||
|
||||
.. versionadded:: 2016.3.0
|
||||
|
||||
.. versionchanged:: Nitrogen
|
||||
|
||||
Default: ``True``
|
||||
|
||||
If a proxymodule has a function called ``grains``, then call it during
|
||||
regular grains loading and merge the results with the proxy's grains
|
||||
dictionary. Otherwise it is assumed that the module calls the grains
|
||||
function in a custom way and returns the data elsewhere.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
proxy_merge_grains_in_module: False
|
||||
|
||||
|
||||
.. conf_proxy:: proxy_keep_alive
|
||||
|
||||
``proxy_keep_alive``
|
||||
--------------------
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
Default: ``True``
|
||||
|
||||
Whether the connection with the remote device should be restarted
|
||||
when dead. The proxy module must implement the ``alive`` function,
|
||||
otherwise the connection is considered alive.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
proxy_keep_alive: False
|
||||
|
||||
|
||||
.. conf_proxy:: proxy_keep_alive_interval
|
||||
|
||||
``proxy_keep_alive_interval``
|
||||
-----------------------------
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
Default: ``1``
|
||||
|
||||
The frequency of keepalive checks, in minutes. It requires the
|
||||
:conf_minion:`proxy_keep_alive` option to be enabled
|
||||
(and the proxy module to implement the ``alive`` function).
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
proxy_keep_alive_interval: 5
|
||||
|
||||
|
||||
.. conf_proxy:: proxy_always_alive
|
||||
|
||||
``proxy_always_alive``
|
||||
----------------------
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
Default: ``True``
|
||||
|
||||
Wheter the proxy should maintain the connection with the remote
|
||||
device. Similarly to :conf_minion:`proxy_keep_alive`, this option
|
||||
is very specific to the design of the proxy module.
|
||||
When :conf_minion:`proxy_always_alive` is set to ``False``,
|
||||
the connection with the remote device is not maintained and
|
||||
has to be closed after every command.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
proxy_always_alive: False
|
@ -201,6 +201,7 @@ execution modules
|
||||
localemod
|
||||
locate
|
||||
logadm
|
||||
logmod
|
||||
logrotate
|
||||
lvs
|
||||
lxc
|
||||
|
5
doc/ref/modules/all/salt.modules.logmod.rst
Normal file
5
doc/ref/modules/all/salt.modules.logmod.rst
Normal file
@ -0,0 +1,5 @@
|
||||
salt.modules.logmod module
|
||||
==========================
|
||||
|
||||
.. automodule:: salt.modules.logmod
|
||||
:members:
|
6
doc/ref/states/all/salt.states.docker_container.rst
Normal file
6
doc/ref/states/all/salt.states.docker_container.rst
Normal file
@ -0,0 +1,6 @@
|
||||
============================
|
||||
salt.states.docker_container
|
||||
============================
|
||||
|
||||
.. automodule:: salt.states.docker_container
|
||||
:members:
|
6
doc/ref/states/all/salt.states.docker_image.rst
Normal file
6
doc/ref/states/all/salt.states.docker_image.rst
Normal file
@ -0,0 +1,6 @@
|
||||
========================
|
||||
salt.states.docker_image
|
||||
========================
|
||||
|
||||
.. automodule:: salt.states.docker_image
|
||||
:members:
|
6
doc/ref/states/all/salt.states.docker_network.rst
Normal file
6
doc/ref/states/all/salt.states.docker_network.rst
Normal file
@ -0,0 +1,6 @@
|
||||
==========================
|
||||
salt.states.docker_network
|
||||
==========================
|
||||
|
||||
.. automodule:: salt.states.docker_network
|
||||
:members:
|
6
doc/ref/states/all/salt.states.docker_volume.rst
Normal file
6
doc/ref/states/all/salt.states.docker_volume.rst
Normal file
@ -0,0 +1,6 @@
|
||||
=========================
|
||||
salt.states.docker_volume
|
||||
=========================
|
||||
|
||||
.. automodule:: salt.states.docker_volume
|
||||
:members:
|
@ -18,7 +18,7 @@ are run.
|
||||
The most basic usage of Jinja in state files is using control structures to
|
||||
wrap conditional or redundant state elements:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{% if grains['os'] != 'FreeBSD' %}
|
||||
tcsh:
|
||||
@ -43,7 +43,7 @@ Writing **if-else** blocks can lead to very redundant state files however. In
|
||||
this case, using :ref:`pillars<pillar>`, or using a previously
|
||||
defined variable might be easier:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{% set motd = ['/etc/motd'] %}
|
||||
{% if grains['os'] == 'Debian' %}
|
||||
@ -61,7 +61,7 @@ list of MOTD files to update, adding a state block for each file.
|
||||
|
||||
The filter_by function can also be used to set variables based on grains:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{% set auditd = salt['grains.filter_by']({
|
||||
'RedHat': { 'package': 'audit' },
|
||||
@ -76,7 +76,7 @@ Include and Import
|
||||
Includes and imports_ can be used to share common, reusable state configuration
|
||||
between state files and between files.
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{% from 'lib.sls' import test %}
|
||||
|
||||
@ -85,7 +85,7 @@ state element, from the file ``lib.sls``. In the case that the included file
|
||||
performs checks against grains, or something else that requires context, passing
|
||||
the context into the included file is required:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{% from 'lib.sls' import test with context %}
|
||||
|
||||
@ -95,7 +95,7 @@ Including Context During Include/Import
|
||||
By adding ``with context`` to the include/import directive, the
|
||||
current context can be passed to an included/imported template.
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{% import 'openssl/vars.sls' as ssl with context %}
|
||||
|
||||
@ -110,7 +110,7 @@ mini-templates to repeat blocks of strings with a few parameterized variables.
|
||||
Be aware that stripping whitespace from the template block, as well as
|
||||
contained blocks, may be necessary to emulate a variable return from the macro.
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
# init.sls
|
||||
{% from 'lib.sls' import pythonpkg with context %}
|
||||
@ -123,7 +123,7 @@ contained blocks, may be necessary to emulate a variable return from the macro.
|
||||
pkg.installed:
|
||||
- name: {{ pythonpkg('fabric') }}
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
# lib.sls
|
||||
{% macro pythonpkg(pkg) -%}
|
||||
@ -161,13 +161,13 @@ strftime
|
||||
:ref:`exhaustive list <python2:strftime-strptime-behavior>` can be found in
|
||||
the official Python documentation.
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{% set curtime = None | strftime() %}
|
||||
|
||||
Fuzzy dates require the `timelib`_ Python module is installed.
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{{ "2002/12/25"|strftime("%y") }}
|
||||
{{ "1040814000"|strftime("%Y-%m-%d") }}
|
||||
@ -184,7 +184,7 @@ yaml_encode
|
||||
unicode. It will *not* work for multi-objects such as sequences or
|
||||
maps.
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{%- set bar = 7 %}
|
||||
{%- set baz = none %}
|
||||
@ -209,7 +209,7 @@ yaml_dquote
|
||||
resulting string will be emitted with opening and closing double
|
||||
quotes.
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{%- set bar = '"The quick brown fox . . ."' %}
|
||||
{%- set baz = 'The word of the day is "salty".' %}
|
||||
@ -1087,7 +1087,7 @@ Jinja_ can be used in the same way in managed files:
|
||||
- context:
|
||||
bind: 127.0.0.1
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
# lib.sls
|
||||
{% set port = 6379 %}
|
||||
@ -1165,7 +1165,7 @@ dictionary of :term:`execution function <Execution Function>`.
|
||||
|
||||
.. versionadded:: 2014.7.0
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
# The following two function calls are equivalent.
|
||||
{{ salt['cmd.run']('whoami') }}
|
||||
@ -1179,7 +1179,7 @@ in the current Jinja context.
|
||||
|
||||
.. versionadded:: 2014.7.0
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
Context is: {{ show_full_context() }}
|
||||
|
||||
@ -1212,7 +1212,7 @@ distribute to Salt minions.
|
||||
Functions in custom execution modules are available in the Salt execution
|
||||
module dictionary just like the built-in execution modules:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{{ salt['my_custom_module.my_custom_function']() }}
|
||||
|
||||
|
@ -15,9 +15,10 @@ and others):
|
||||
* External Job Cache
|
||||
* Master Job Cache
|
||||
|
||||
The major difference between these two
|
||||
mechanism is from where results are returned (from the Salt Master or Salt
|
||||
Minion).
|
||||
The major difference between these two mechanism is from where results are
|
||||
returned (from the Salt Master or Salt Minion). Configuring either of these
|
||||
options will also make the :py:mod:`Jobs Runner functions <salt.runners.jobs>`
|
||||
to automatically query the remote stores for infomation.
|
||||
|
||||
External Job Cache - Minion-Side Returner
|
||||
-----------------------------------------
|
||||
|
@ -90,7 +90,7 @@ by their ``os`` grain:
|
||||
|
||||
``/srv/pillar/packages.sls``
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{% if grains['os'] == 'RedHat' %}
|
||||
apache: httpd
|
||||
@ -116,13 +116,13 @@ of ``Foo Industries``.
|
||||
Consequently this data can be used from within modules, renderers, State SLS
|
||||
files, and more via the shared pillar :ref:`dict <python2:typesmapping>`:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
apache:
|
||||
pkg.installed:
|
||||
- name: {{ pillar['apache'] }}
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
git:
|
||||
pkg.installed:
|
||||
@ -155,7 +155,7 @@ And the actual pillar file at '/srv/pillar/common_pillar.sls':
|
||||
environment has its own top file, the jinja placeholder ``{{ saltenv }}``
|
||||
can be used in place of the environment name:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{{ saltenv }}:
|
||||
'*':
|
||||
|
@ -75,7 +75,7 @@ They differ because of the addition of the ``tag`` and ``data`` variables.
|
||||
|
||||
Here is a simple reactor sls:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{% if data['id'] == 'mysql1' %}
|
||||
highstate_run:
|
||||
@ -92,7 +92,7 @@ API and the runner system. In this example, a command is published to the
|
||||
``mysql1`` minion with a function of :py:func:`state.apply
|
||||
<salt.modules.state.apply_>`. Similarly, a runner can be called:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{% if data['data']['custom_var'] == 'runit' %}
|
||||
call_runit_orch:
|
||||
@ -161,7 +161,7 @@ so using the Reactor to kick off an Orchestrate run is a very common pairing.
|
||||
|
||||
For example:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
# /etc/salt/master.d/reactor.conf
|
||||
# A custom event containing: {"foo": "Foo!", "bar: "bar*", "baz": "Baz!"}
|
||||
@ -169,7 +169,7 @@ For example:
|
||||
- myco/custom/event:
|
||||
- /srv/reactor/some_event.sls
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
# /srv/reactor/some_event.sls
|
||||
invoke_orchestrate_file:
|
||||
@ -180,7 +180,7 @@ For example:
|
||||
event_tag: {{ tag }}
|
||||
event_data: {{ data|json() }}
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
# /srv/salt/_orch/do_complex_thing.sls
|
||||
{% set tag = salt.pillar.get('event_tag') %}
|
||||
@ -478,7 +478,7 @@ from the event to the state file via inline Pillar.
|
||||
|
||||
:file:`/srv/salt/haproxy/react_new_minion.sls`:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{% if data['act'] == 'accept' and data['id'].startswith('web') %}
|
||||
add_new_minion_to_pool:
|
||||
@ -524,7 +524,7 @@ won't yet direct traffic to it.
|
||||
|
||||
:file:`/srv/salt/haproxy/refresh_pool.sls`:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{% set new_minion = salt['pillar.get']('new_minion') %}
|
||||
|
||||
@ -574,7 +574,7 @@ authentication every ten seconds by default.
|
||||
|
||||
:file:`/srv/reactor/auth-pending.sls`:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{# Ink server failed to authenticate -- remove accepted key #}
|
||||
{% if not data['result'] and data['id'].startswith('ink') %}
|
||||
@ -600,7 +600,7 @@ Ink servers in the master configuration.
|
||||
|
||||
:file:`/srv/reactor/auth-complete.sls`:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{# When an Ink server connects, run state.apply. #}
|
||||
highstate_run:
|
||||
@ -630,7 +630,7 @@ each minion fires when it first starts up and connects to the master.
|
||||
On the master, create **/srv/reactor/sync_grains.sls** with the following
|
||||
contents:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
sync_grains:
|
||||
local.saltutil.sync_grains:
|
||||
|
1188
doc/topics/releases/2016.3.6.rst
Normal file
1188
doc/topics/releases/2016.3.6.rst
Normal file
File diff suppressed because it is too large
Load Diff
@ -61,6 +61,50 @@ State Module Changes
|
||||
start/stop the service using the ``--no-block`` flag in the ``systemctl``
|
||||
command. On non-systemd minions, a warning will be issued.
|
||||
|
||||
- The :py:func:`module.run <salt.states.module.run>` state has dropped its previous
|
||||
syntax with ``m_`` prefix for reserved keywords. Additionally, it allows
|
||||
running several functions in a batch.
|
||||
|
||||
.. note::
|
||||
It is nesessary to explicitly turn on the new behaviour (see below)
|
||||
|
||||
Before and after:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# Before
|
||||
run_something:
|
||||
module.run:
|
||||
- name: mymodule.something
|
||||
- m_name: 'some name'
|
||||
- kwargs: {
|
||||
first_arg: 'one',
|
||||
second_arg: 'two',
|
||||
do_stuff: 'True'
|
||||
}
|
||||
|
||||
# After
|
||||
run_something:
|
||||
module.run:
|
||||
mymodule.something:
|
||||
- name: some name
|
||||
- first_arg: one
|
||||
- second_arg: two
|
||||
- do_stuff: True
|
||||
|
||||
- Previous behaviour of the function :py:func:`module.run <salt.states.module.run>` is
|
||||
still kept by default and can be bypassed in case you want to use behaviour above.
|
||||
Please keep in mind that the old syntax will no longer be supported in the ``Oxygen``
|
||||
release of Salt. To enable the new behavior, add the following to the minion config file:
|
||||
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
use_superseded:
|
||||
- module.run
|
||||
|
||||
|
||||
|
||||
Execution Module Changes
|
||||
========================
|
||||
|
||||
@ -119,6 +163,12 @@ Master Configuration Additions
|
||||
when connected to multiple masters to be able to send events to all connected
|
||||
masters.
|
||||
|
||||
- :conf_master:`eauth_acl_module` - In case external auth is enabled master can
|
||||
get authenticate and get the authorization list from different auth modules.
|
||||
|
||||
- :conf_master:`keep_acl_in_token` - Option that allows master to build ACL once
|
||||
for each user being authenticated and keep it in the token.
|
||||
|
||||
Minion Configuration Additions
|
||||
==============================
|
||||
|
||||
@ -161,17 +211,93 @@ git_pillar "mountpoints" Feature Added
|
||||
|
||||
See :ref:`here <git-pillar-mountpoints>` for detailed documentation.
|
||||
|
||||
``dockerng`` State/Execution Module Renamed to ``docker``
|
||||
=========================================================
|
||||
Big Improvements to Docker Support
|
||||
==================================
|
||||
|
||||
The old ``docker`` state and execution modules have been moved to
|
||||
salt-contrib_. The ``dockerng`` state and execution module have been renamed to
|
||||
``docker`` and now serve as the official Docker state and execution modules.
|
||||
salt-contrib_. The ``dockerng`` execution module has been renamed to
|
||||
:mod:`docker <salt.modules.docker>` and now serves as Salt's official Docker
|
||||
execution module.
|
||||
|
||||
These state and execution modules can be used interchangeably both with
|
||||
``docker`` and ``dockerng`` to preserve backward-compatibility, but it is
|
||||
recommended to update your SLS files to use ``docker`` instead of ``dockerng``
|
||||
in event that the ``dockerng`` alias is dropped in a future release.
|
||||
The old ``dockerng`` state module has been split into 4 state modules:
|
||||
|
||||
- :mod:`docker_container <salt.states.docker_container>` - States to manage
|
||||
Docker containers
|
||||
- :mod:`docker_image <salt.states.docker_image>` - States to manage Docker
|
||||
images
|
||||
- :mod:`docker_volume <salt.states.docker_volume>` - States to manage
|
||||
Docker volumes
|
||||
- :mod:`docker_network <salt.states.docker_network>` - States to manage
|
||||
Docker networks
|
||||
|
||||
The reason for this change was to make states and requisites more clear. For
|
||||
example, imagine this SLS:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
myuser/appimage:
|
||||
docker.image_present:
|
||||
- sls: docker.images.appimage
|
||||
|
||||
myapp:
|
||||
docker.running:
|
||||
- image: myuser/appimage
|
||||
- require:
|
||||
- docker: myuser/appimage
|
||||
|
||||
The new syntax would be:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
myuser/appimage:
|
||||
docker_image.present:
|
||||
- sls: docker.images.appimage
|
||||
|
||||
myapp:
|
||||
docker_container.running:
|
||||
- image: myuser/appimage
|
||||
- require:
|
||||
- docker_image: myuser/appimage
|
||||
|
||||
This is similar to how Salt handles MySQL, MongoDB, Zabbix, and other cases
|
||||
where the same execution module is used to manage several different kinds
|
||||
of objects (users, databases, roles, etc.).
|
||||
|
||||
The old syntax will continue to work until the **Fluorine** release of Salt.
|
||||
The old ``dockerng`` naming will also continue to work until that release, so
|
||||
no immediate changes need to be made to your SLS files (unless you were still
|
||||
using the old docker states that have been moved to salt-contrib_).
|
||||
|
||||
The :py:func:`docker_container.running <salt.states.docker_container.running>`
|
||||
state has undergone a significant change in how it determines whether or not a
|
||||
container needs to be replaced. Rather than comparing individual arguments to
|
||||
their corresponding values in the named container, a temporary container is
|
||||
created (but not started) using the passed arguments. The two containers are
|
||||
then compared to each other to determine whether or not there are changes, and
|
||||
if so, the old container is stopped and destroyed, and the temporary container
|
||||
is renamed and started.
|
||||
|
||||
Salt still needs to translate arguments into the format which docker-py
|
||||
expects, but if it does not properly do so, the :ref:`skip_translate
|
||||
<docker-container-running-skip-translate>` argument can be used to skip input
|
||||
translation on an argument-by-argument basis, and you can then format your SLS
|
||||
file to pass the data in the format that the docker-py expects. This allows you
|
||||
to work around any changes in Docker's API or issues with the input
|
||||
translation, and continue to manage your Docker containers using Salt. Read the
|
||||
documentation for :ref:`skip_translate
|
||||
<docker-container-running-skip-translate>` for more information.
|
||||
|
||||
.. note::
|
||||
When running the :py:func:`docker_container.running
|
||||
<salt.states.docker_container.running>` state for the first time after
|
||||
upgrading to Nitrogen, your container(s) may be replaced. The changes may
|
||||
show diffs for certain parameters which say that the old value was an empty
|
||||
string, and the new value is ``None``. This is due to the fact that in
|
||||
prior releases Salt was passing empty strings for these values when
|
||||
creating the container if they were undefined in the SLS file, where now
|
||||
Salt simply does not pass any arguments not explicitly defined in the SLS
|
||||
file. Subsequent runs of the state should not replace the container if the
|
||||
configuration remains unchanged.
|
||||
|
||||
.. _salt-contrib: https://github.com/saltstack/salt-contrib
|
||||
|
||||
|
@ -216,33 +216,42 @@ Using Salt Virt
|
||||
===============
|
||||
|
||||
With hypervisors set up and virtual machine images ready, Salt can start
|
||||
issuing cloud commands.
|
||||
issuing cloud commands using the `virt runner`.
|
||||
|
||||
Start by running a Salt Virt hypervisor info command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-run virt.hyper_info
|
||||
salt-run virt.host_info
|
||||
|
||||
This will query what the running hypervisor stats are and display information
|
||||
for all configured hypervisors. This command will also validate that the
|
||||
hypervisors are properly configured.
|
||||
This will query the running hypervisor(s) for stats and display useful
|
||||
information such as the number of cpus and amount of memory.
|
||||
|
||||
Now that hypervisors are available a virtual machine can be provisioned. The
|
||||
``virt.init`` routine will create a new virtual machine:
|
||||
You can also list all VMs and their current states on all hypervisor
|
||||
nodes:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-run virt.list
|
||||
|
||||
Now that hypervisors are available a virtual machine can be provisioned.
|
||||
The ``virt.init`` routine will create a new virtual machine:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-run virt.init centos1 2 512 salt://centos.img
|
||||
|
||||
This command assumes that the CentOS virtual machine image is sitting in the
|
||||
root of the Salt fileserver. Salt Virt will now select a hypervisor to deploy
|
||||
the new virtual machine on and copy the virtual machine image down to the
|
||||
hypervisor.
|
||||
The Salt Virt runner will now automatically select a hypervisor to deploy
|
||||
the new virtual machine on. Using ``salt://`` assumes that the CentOS virtual
|
||||
machine image is located in the root of the :ref:`file-server` on the master.
|
||||
When images are cloned (i.e. copied locatlly after retrieval from the file server)
|
||||
the destination directory on the hypervisor minion is determined by the ``virt.images``
|
||||
config option; by default this is ``/srv/salt/salt-images/``.
|
||||
|
||||
Once the VM image has been copied down the new virtual machine will be seeded.
|
||||
Seeding the VMs involves setting pre-authenticated Salt keys on the new VM and
|
||||
if needed, will install the Salt Minion on the new VM before it is started.
|
||||
When a VM is initialized using ``virt.init`` the image is copied to the hypervisor
|
||||
using ``cp.cache_file`` and will be mounted and seeded with a minion. Seeding includes
|
||||
setting pre-authenticated keys on the new machine. A minion will only be installed if
|
||||
one can not be found on the image using the default arguments to ``seed.apply``.
|
||||
|
||||
.. note::
|
||||
|
||||
@ -250,6 +259,13 @@ if needed, will install the Salt Minion on the new VM before it is started.
|
||||
installed. Making sure that the source VM images already have Salt
|
||||
installed will GREATLY speed up virtual machine deployment.
|
||||
|
||||
You can also deploy an image on a particular minion by directly calling the
|
||||
`virt` execution module with an absolute image path. This can be quite handy for testing:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt 'hypervisor*' virt.init centos1 2 512 image=/var/lib/libvirt/images/centos.img
|
||||
|
||||
Now that the new VM has been prepared, it can be seen via the ``virt.query``
|
||||
command:
|
||||
|
||||
@ -285,11 +301,12 @@ opened on hypervisors:
|
||||
|
||||
:ref:`Opening the Firewall up for Salt <firewall>`
|
||||
|
||||
Salt also needs an additional flag to be turned on as well. The ``virt.tunnel``
|
||||
option needs to be turned on. This flag tells Salt to run migrations securely
|
||||
via the libvirt TLS tunnel and to use port 16514. Without ``virt.tunnel`` libvirt
|
||||
tries to bind to random ports when running migrations. To turn on ``virt.tunnel``
|
||||
simple apply it to the master config file:
|
||||
Salt also needs the ``virt.tunnel`` option to be turned on.
|
||||
This flag tells Salt to run migrations securely via the libvirt TLS tunnel and to
|
||||
use port 16514. Without ``virt.tunnel`` libvirt tries to bind to random ports when
|
||||
running migrations.
|
||||
|
||||
To turn on ``virt.tunnel`` simple apply it to the master config file:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
@ -312,9 +329,11 @@ migrate routine:
|
||||
VNC Consoles
|
||||
============
|
||||
|
||||
Salt Virt also sets up VNC consoles by default, allowing for remote visual
|
||||
consoles to be oped up. The information from a ``virt.query`` routine will
|
||||
display the vnc console port for the specific vms:
|
||||
Although not enabled by default, Salt Virt can also set up VNC consoles allowing for remote visual
|
||||
consoles to be opened up. When creating a new VM using ``virt.init`` pass the ``enable_vnc=True``
|
||||
parameter to have a console configured for the new VM.
|
||||
|
||||
The information from a ``virt.query`` routine will display the vnc console port for the specific vms:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
|
@ -367,7 +367,7 @@ for the Grains to be accessed from within the template. A few examples:
|
||||
|
||||
``apache/init.sls:``
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
apache:
|
||||
pkg.installed:
|
||||
@ -411,7 +411,7 @@ a MooseFS distributed filesystem chunkserver:
|
||||
|
||||
``moosefs/chunk.sls:``
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
include:
|
||||
- moosefs
|
||||
|
@ -93,7 +93,7 @@ variable in a Salt state.
|
||||
|
||||
MYENVVAR="world" salt-call state.template test.sls
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
Create a file with contents from an environment variable:
|
||||
file.managed:
|
||||
@ -102,7 +102,7 @@ variable in a Salt state.
|
||||
|
||||
Error checking:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{% set myenvvar = salt['environ.get']('MYENVVAR') %}
|
||||
{% if myenvvar %}
|
||||
|
@ -144,7 +144,7 @@ And finally, the SLS to deploy the website:
|
||||
|
||||
``/srv/salt/prod/webserver/foobarcom.sls:``
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{% if pillar.get('webserver_role', '') %}
|
||||
/var/www/foobarcom:
|
||||
|
191
doc/topics/venafi/venafi.rst
Normal file
191
doc/topics/venafi/venafi.rst
Normal file
@ -0,0 +1,191 @@
|
||||
=====================
|
||||
Venafi Tools for Salt
|
||||
=====================
|
||||
|
||||
Introduction
|
||||
~~~~~~~~~~~~
|
||||
Before using these modules you need to register an account with Venafi, and
|
||||
configure it in your ``master`` configuration file.
|
||||
|
||||
First, you need to add a placeholder to the ``master`` file. This is because
|
||||
the module will not load unless it finds an ``api_key`` setting, valid or not.
|
||||
Open up ``/etc/salt/master`` and add:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
venafi:
|
||||
api_key: None
|
||||
|
||||
Then register your email address with Venafi using the following command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-run venafi.register <youremail@yourdomain.com>
|
||||
|
||||
This command will not return an ``api_key`` to you; that will be send to you
|
||||
via email from Venafi. Once you have received that key, open up your ``master``
|
||||
file and set the ``api_key`` to it:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
venafi:
|
||||
api_key: abcdef01-2345-6789-abcd-ef0123456789
|
||||
|
||||
Example Usage
|
||||
~~~~~~~~~~~~~~~~
|
||||
Generate a CSR and submit it to Venafi for issuance, using the 'Internet' zone:
|
||||
salt-run venafi.request minion.example.com minion.example.com zone=Internet
|
||||
|
||||
Retrieve a certificate for a previously submitted request with request ID
|
||||
aaa-bbb-ccc-dddd:
|
||||
salt-run venafi.pickup aaa-bbb-ccc-dddd
|
||||
|
||||
Runner Functions
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
gen_key
|
||||
-------
|
||||
Generate and return a ``private_key``. If a ``dns_name`` is passed in, the
|
||||
``private_key`` will be cached under that name.
|
||||
|
||||
The key will be generated based on the policy values that were configured
|
||||
by the Venafi administrator. A default Certificate Use Policy is associated
|
||||
with a zone; the key type and key length parameters associated with this value
|
||||
will be used.
|
||||
|
||||
:param str minion_id: Required. The name of the minion which hosts the domain
|
||||
name in question.
|
||||
|
||||
:param str dns_name: Required. The FQDN of the domain that will be hosted on
|
||||
the minion.
|
||||
|
||||
:param str zone: Required. Default value is "default". The zone on Venafi that
|
||||
the domain belongs to.
|
||||
|
||||
:param str password: Optional. If specified, the password to use to access the
|
||||
generated key.
|
||||
|
||||
|
||||
gen_csr
|
||||
-------
|
||||
Generate a csr using the host's private_key. Analogous to:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
VCert gencsr -cn [CN Value] -o "Beta Organization" -ou "Beta Group" \
|
||||
-l "Palo Alto" -st "California" -c US
|
||||
|
||||
:param str minion_id: Required.
|
||||
|
||||
:param str dns_name: Required.
|
||||
|
||||
:param str zone: Optional. Default value is "default". The zone on Venafi that
|
||||
the domain belongs to.
|
||||
|
||||
:param str country=None: Optional. The two-letter ISO abbreviation for your
|
||||
country.
|
||||
|
||||
:param str state=None: Optional. The state/county/region where your
|
||||
organisation is legally located. Must not be abbreviated.
|
||||
|
||||
:param str loc=None: Optional. The city where your organisation is legally
|
||||
located.
|
||||
|
||||
:param str org=None: Optional. The exact legal name of your organisation. Do
|
||||
not abbreviate your organisation name.
|
||||
|
||||
:param str org_unit=None: Optional. Section of the organisation, can be left
|
||||
empty if this does not apply to your case.
|
||||
|
||||
:param str password=None: Optional. Password for the CSR.
|
||||
|
||||
|
||||
request
|
||||
-------
|
||||
Request a new certificate. Analogous to:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
VCert enroll -z <zone> -k <api key> -cn <domain name>
|
||||
|
||||
:param str minion_id: Required.
|
||||
|
||||
:param str dns_name: Required.
|
||||
|
||||
:param str zone: Required. Default value is "default". The zone on Venafi that
|
||||
the certificate request will be submitted to.
|
||||
|
||||
:param str country=None: Optional. The two-letter ISO abbreviation for your
|
||||
country.
|
||||
|
||||
:param str state=None: Optional. The state/county/region where your
|
||||
organisation is legally located. Must not be abbreviated.
|
||||
|
||||
:param str loc=None: Optional. The city where your organisation is legally
|
||||
located.
|
||||
|
||||
:param str org=None: Optional. The exact legal name of your organisation. Do
|
||||
not abbreviate your organisation name.
|
||||
|
||||
:param str org_unit=None: Optional. Section of the organisation, can be left
|
||||
empty if this does not apply to your case.
|
||||
|
||||
:param str password=None: Optional. Password for the CSR.
|
||||
|
||||
:param str company_id=None: Required, but may be configured in ``master`` file
|
||||
instead.
|
||||
|
||||
register
|
||||
--------
|
||||
Register a new user account
|
||||
|
||||
:param str email: Required. The email address to use for the new Venafi account.
|
||||
|
||||
|
||||
show_company
|
||||
------------
|
||||
Show company information, especially the company id
|
||||
|
||||
:param str domain: Required. The domain name to look up information for.
|
||||
|
||||
|
||||
show_csrs
|
||||
---------
|
||||
Show certificate requests for the configured API key.
|
||||
|
||||
|
||||
show_zones
|
||||
----------
|
||||
Show zones for the specified company id.
|
||||
|
||||
:param str company_id: Required. The company id to show the zones for.
|
||||
|
||||
|
||||
pickup, show_cert
|
||||
-----------------
|
||||
Show certificate requests for the specified certificate id. Analogous to the
|
||||
VCert pickup command.
|
||||
|
||||
:param str id_: Required. The id of the certificate to look up.
|
||||
|
||||
|
||||
show_rsa
|
||||
--------
|
||||
Show a private RSA key.
|
||||
|
||||
:param str minion_id: The name of the minion to display the key for.
|
||||
|
||||
:param str dns_name: The domain name to display the key for.
|
||||
|
||||
|
||||
list_domain_cache
|
||||
-----------------
|
||||
List domains that have been cached on this master.
|
||||
|
||||
|
||||
del_cached_domain
|
||||
-----------------
|
||||
Delete a domain from this master's cache.
|
||||
|
||||
:param str domains: A domain name, or a comma-separated list of domain names,
|
||||
to delete from this master's cache.
|
@ -21,8 +21,8 @@ pkgin -y rm salt py27-zmq
|
||||
pip install --no-use-wheel --egg esky bbfreeze
|
||||
|
||||
## bzfreeze-loader
|
||||
COMPILE="gcc -fno-strict-aliasing -O2 -pipe -O2 -DHAVE_DB_185_H -I/usr/include -I/opt/local/include -I/opt/local/include/db4 -I/opt/local/include/gettext -I/opt/local/include/ncurses -DNDEBUG -O2 -pipe -O2 -DHAVE_DB_185_H -I/usr/include -I/opt/local/include -I/opt/local/include/db4 -I/opt/local/include/gettext -I/opt/local/include/ncurses -fPIC -I/opt/local/include/python2.7 -static-libgcc"
|
||||
LINK_OPTS="-L/opt/local/lib -L/opt/local/lib/python2.7/config -L/opt/local/lib -lsocket -lnsl -ldl -lrt -lm -static-libgcc"
|
||||
COMPILE="gcc -fno-strict-aliasing -O2 -pipe -DHAVE_DB_185_H -I/usr/include -I/opt/local/include -I/opt/local/include/db4 -I/opt/local/include/gettext -I/opt/local/include/ncurses -DNDEBUG -fPIC -I/opt/local/include/python2.7 -static-libgcc"
|
||||
LINK_OPTS="-L/opt/local/lib -L/opt/local/lib/python2.7/config -lsocket -lnsl -ldl -lrt -lm -lssp -static-libgcc"
|
||||
mkdir -p ${BLDPATH}
|
||||
cd ${BLDPATH}
|
||||
curl -kO 'https://pypi.python.org/packages/source/b/bbfreeze-loader/bbfreeze-loader-1.1.0.zip'
|
||||
@ -30,12 +30,11 @@ unzip bbfreeze-loader-1.1.0.zip
|
||||
${COMPILE} -c bbfreeze-loader-1.1.0/_bbfreeze_loader/console.c -o ${BLDPATH}/console.o
|
||||
${COMPILE} -c bbfreeze-loader-1.1.0/_bbfreeze_loader/getpath.c -o ${BLDPATH}/getpath.o
|
||||
gcc ${BLDPATH}/console.o ${BLDPATH}/getpath.o /opt/local/lib/python2.7/config/libpython2.7.a ${LINK_OPTS} -o ${BLDPATH}/console.exe
|
||||
patchelf --set-rpath '$ORIGIN:$ORIGIN/../lib' ${BLDPATH}/console.exe
|
||||
find /opt/local -name console.exe -exec cp ${BLDPATH}/console.exe {} \;
|
||||
|
||||
## clone saltstack repo
|
||||
cd ${SALTBASE}
|
||||
git clone git://github.com/saltstack/salt -b 2016.3
|
||||
git clone git://github.com/saltstack/salt -b 2016.11
|
||||
|
||||
## salt requirements
|
||||
cd ${SALTBASE}/salt
|
||||
|
@ -23,10 +23,13 @@ cp $PKG_DIR/*.xml $PKG_DIR/install.sh $BUILD_DIR/install
|
||||
chmod +x $BUILD_DIR/install/install.sh
|
||||
unzip -d $BUILD_DIR/bin dist/*.zip
|
||||
cp $BUILD_DIR/bin/*/libgcc_s.so.1 $BUILD_DIR/bin/
|
||||
cp $BUILD_DIR/bin/*/libssp.so.0 $BUILD_DIR/bin/
|
||||
find build/output/salt/bin/ -mindepth 1 -maxdepth 1 -type d -not -name appdata -exec mv {} $BUILD_DIR/bin/appdata/ \;
|
||||
PYZMQ=$(find ${BUILD_DIR}/bin/ -mindepth 1 -type d -name 'pyzmq-*.egg')/zmq
|
||||
find /opt/local/lib/ -maxdepth 1 -type l -regextype sed -regex '.*/libzmq.so.[0-9]\+$' -exec cp {} ${PYZMQ}/ \;
|
||||
find /opt/local/lib/ -maxdepth 1 -type l -regextype sed -regex '.*/libsodium.so.[0-9]\+$' -exec cp {} ${PYZMQ}/ \;
|
||||
find ${PYZMQ}/ -maxdepth 1 -type f -name '*.so.*' -exec patchelf --set-rpath '$ORIGIN:$ORIGIN/../../:$ORIGIN/../lib' {} \;
|
||||
find ${PYZMQ}/ -maxdepth 1 -type f -name '*.so.*' -exec cp {} ${PYZMQ}/../../ \;
|
||||
find ${PYZMQ}/ -maxdepth 3 -type f -name '*.so' -exec patchelf --set-rpath '$ORIGIN:$ORIGIN/../../:$ORIGIN/../lib:$ORIGIN/../../../../' {} \;
|
||||
gtar -C $BUILD_DIR/.. -czvf dist/salt-$(awk '/^Version:/{print $2}' < PKG-INFO)-esky-smartos.tar.gz salt
|
||||
echo "tarball built"
|
||||
|
@ -5,6 +5,11 @@ apache-libcloud>=0.14.0
|
||||
boto>=2.32.1
|
||||
boto3>=1.2.1
|
||||
moto>=0.3.6
|
||||
# httpretty Needs to be here for now even though it's a dependency of boto.
|
||||
# A pip install on a fresh system will decide to target httpretty 0.8.10 to
|
||||
# satisfy other requirements, and httpretty 0.8.10 has bugs in setup.py that
|
||||
# prevent it from being successfully installed (at least on Python 3.4).
|
||||
httpretty
|
||||
SaltPyLint>=v2017.2.29
|
||||
GitPython>=0.3
|
||||
pytest
|
||||
|
@ -50,11 +50,12 @@ class LoadAuth(object):
|
||||
'''
|
||||
Wrap the authentication system to handle peripheral components
|
||||
'''
|
||||
def __init__(self, opts):
|
||||
def __init__(self, opts, ckminions=None):
|
||||
self.opts = opts
|
||||
self.max_fail = 1.0
|
||||
self.serial = salt.payload.Serial(opts)
|
||||
self.auth = salt.loader.auth(opts)
|
||||
self.ckminions = ckminions or salt.utils.minions.CkMinions(opts)
|
||||
|
||||
def load_name(self, load):
|
||||
'''
|
||||
@ -66,11 +67,9 @@ class LoadAuth(object):
|
||||
fstr = '{0}.auth'.format(load['eauth'])
|
||||
if fstr not in self.auth:
|
||||
return ''
|
||||
fcall = salt.utils.format_call(self.auth[fstr],
|
||||
load,
|
||||
expected_extra_kws=AUTH_INTERNAL_KEYWORDS)
|
||||
try:
|
||||
return fcall['args'][0]
|
||||
pname_arg = salt.utils.arg_lookup(self.auth[fstr])['args'][0]
|
||||
return load[pname_arg]
|
||||
except IndexError:
|
||||
return ''
|
||||
|
||||
@ -118,6 +117,45 @@ class LoadAuth(object):
|
||||
time.sleep(0.001)
|
||||
return False
|
||||
|
||||
def __get_acl(self, load):
|
||||
'''
|
||||
Returns ACL for a specific user.
|
||||
Returns None if eauth doesn't provide any for the user. I. e. None means: use acl declared
|
||||
in master config.
|
||||
'''
|
||||
if 'eauth' not in load:
|
||||
return None
|
||||
mod = self.opts['eauth_acl_module']
|
||||
if not mod:
|
||||
mod = load['eauth']
|
||||
fstr = '{0}.acl'.format(mod)
|
||||
if fstr not in self.auth:
|
||||
return None
|
||||
fcall = salt.utils.format_call(self.auth[fstr],
|
||||
load,
|
||||
expected_extra_kws=AUTH_INTERNAL_KEYWORDS)
|
||||
try:
|
||||
return self.auth[fstr](*fcall['args'], **fcall['kwargs'])
|
||||
except Exception as e:
|
||||
log.debug('Authentication module threw {0}'.format(e))
|
||||
return None
|
||||
|
||||
def __process_acl(self, load, auth_list):
|
||||
'''
|
||||
Allows eauth module to modify the access list right before it'll be applied to the request.
|
||||
For example ldap auth module expands entries
|
||||
'''
|
||||
if 'eauth' not in load:
|
||||
return auth_list
|
||||
fstr = '{0}.process_acl'.format(load['eauth'])
|
||||
if fstr not in self.auth:
|
||||
return auth_list
|
||||
try:
|
||||
return self.auth[fstr](auth_list, self.opts)
|
||||
except Exception as e:
|
||||
log.debug('Authentication module threw {0}'.format(e))
|
||||
return auth_list
|
||||
|
||||
def get_groups(self, load):
|
||||
'''
|
||||
Read in a load and return the groups a user is a member of
|
||||
@ -159,10 +197,7 @@ class LoadAuth(object):
|
||||
'''
|
||||
Run time_auth and create a token. Return False or the token
|
||||
'''
|
||||
auth_ret = self.time_auth(load)
|
||||
if not isinstance(auth_ret, list) and not isinstance(auth_ret, bool):
|
||||
auth_ret = False
|
||||
if auth_ret is False:
|
||||
if not self.authenticate_eauth(load):
|
||||
return {}
|
||||
fstr = '{0}.auth'.format(load['eauth'])
|
||||
hash_type = getattr(hashlib, self.opts.get('hash_type', 'md5'))
|
||||
@ -171,9 +206,6 @@ class LoadAuth(object):
|
||||
while os.path.isfile(t_path):
|
||||
tok = str(hash_type(os.urandom(512)).hexdigest())
|
||||
t_path = os.path.join(self.opts['token_dir'], tok)
|
||||
fcall = salt.utils.format_call(self.auth[fstr],
|
||||
load,
|
||||
expected_extra_kws=AUTH_INTERNAL_KEYWORDS)
|
||||
|
||||
if self._allow_custom_expire(load):
|
||||
token_expire = load.pop('token_expire', self.opts['token_expire'])
|
||||
@ -183,18 +215,23 @@ class LoadAuth(object):
|
||||
|
||||
tdata = {'start': time.time(),
|
||||
'expire': time.time() + token_expire,
|
||||
'name': fcall['args'][0],
|
||||
'name': self.load_name(load),
|
||||
'eauth': load['eauth'],
|
||||
'token': tok}
|
||||
|
||||
if auth_ret is not True:
|
||||
tdata['auth_list'] = auth_ret
|
||||
if self.opts['keep_acl_in_token']:
|
||||
acl_ret = self.__get_acl(load)
|
||||
tdata['auth_list'] = acl_ret
|
||||
|
||||
if 'groups' in load:
|
||||
tdata['groups'] = load['groups']
|
||||
|
||||
with salt.utils.fopen(t_path, 'w+b') as fp_:
|
||||
fp_.write(self.serial.dumps(tdata))
|
||||
try:
|
||||
with salt.utils.fopen(t_path, 'w+b') as fp_:
|
||||
fp_.write(self.serial.dumps(tdata))
|
||||
except (IOError, OSError):
|
||||
log.warning('Authentication failure: can not write token file "{0}".'.format(t_path))
|
||||
return {}
|
||||
return tdata
|
||||
|
||||
def get_tok(self, tok):
|
||||
@ -205,8 +242,12 @@ class LoadAuth(object):
|
||||
t_path = os.path.join(self.opts['token_dir'], tok)
|
||||
if not os.path.isfile(t_path):
|
||||
return {}
|
||||
with salt.utils.fopen(t_path, 'rb') as fp_:
|
||||
tdata = self.serial.loads(fp_.read())
|
||||
try:
|
||||
with salt.utils.fopen(t_path, 'rb') as fp_:
|
||||
tdata = self.serial.loads(fp_.read())
|
||||
except (IOError, OSError):
|
||||
log.warning('Authentication failure: can not read token file "{0}".'.format(t_path))
|
||||
return {}
|
||||
rm_tok = False
|
||||
if 'expire' not in tdata:
|
||||
# invalid token, delete it!
|
||||
@ -221,6 +262,158 @@ class LoadAuth(object):
|
||||
pass
|
||||
return tdata
|
||||
|
||||
def authenticate_token(self, load):
|
||||
'''
|
||||
Authenticate a user by the token specified in load.
|
||||
Return the token object or False if auth failed.
|
||||
'''
|
||||
token = self.get_tok(load['token'])
|
||||
|
||||
# Bail if the token is empty or if the eauth type specified is not allowed
|
||||
if not token or token['eauth'] not in self.opts['external_auth']:
|
||||
log.warning('Authentication failure of type "token" occurred.')
|
||||
return False
|
||||
|
||||
return token
|
||||
|
||||
def authenticate_eauth(self, load):
|
||||
'''
|
||||
Authenticate a user by the external auth module specified in load.
|
||||
Return True on success or False on failure.
|
||||
'''
|
||||
if 'eauth' not in load:
|
||||
log.warning('Authentication failure of type "eauth" occurred.')
|
||||
return False
|
||||
|
||||
if load['eauth'] not in self.opts['external_auth']:
|
||||
# The eauth system is not enabled, fail
|
||||
log.warning('Authentication failure of type "eauth" occurred.')
|
||||
return False
|
||||
|
||||
# Perform the actual authentication. If we fail here, do not
|
||||
# continue.
|
||||
if not self.time_auth(load):
|
||||
log.warning('Authentication failure of type "eauth" occurred.')
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def authenticate_key(self, load, key):
|
||||
'''
|
||||
Authenticate a user by the key passed in load.
|
||||
Return the effective user id (name) if it's differ from the specified one (for sudo).
|
||||
If the effective user id is the same as passed one return True on success or False on
|
||||
failure.
|
||||
'''
|
||||
auth_key = load.pop('key')
|
||||
if not auth_key:
|
||||
log.warning('Authentication failure of type "user" occurred.')
|
||||
return False
|
||||
if 'user' in load:
|
||||
auth_user = AuthUser(load['user'])
|
||||
if auth_user.is_sudo():
|
||||
# If someone sudos check to make sure there is no ACL's around their username
|
||||
if auth_key != key[self.opts.get('user', 'root')]:
|
||||
log.warning('Authentication failure of type "user" occurred.')
|
||||
return False
|
||||
return auth_user.sudo_name()
|
||||
elif load['user'] == self.opts.get('user', 'root') or load['user'] == 'root':
|
||||
if auth_key != key[self.opts.get('user', 'root')]:
|
||||
log.warning('Authentication failure of type "user" occurred.')
|
||||
return False
|
||||
elif auth_user.is_running_user():
|
||||
if auth_key != key.get(load['user']):
|
||||
log.warning('Authentication failure of type "user" occurred.')
|
||||
return False
|
||||
elif auth_key == key.get('root'):
|
||||
pass
|
||||
else:
|
||||
if load['user'] in key:
|
||||
# User is authorised, check key and check perms
|
||||
if auth_key != key[load['user']]:
|
||||
log.warning('Authentication failure of type "user" occurred.')
|
||||
return False
|
||||
return load['user']
|
||||
else:
|
||||
log.warning('Authentication failure of type "user" occurred.')
|
||||
return False
|
||||
else:
|
||||
if auth_key != key[salt.utils.get_user()]:
|
||||
log.warning('Authentication failure of type "other" occurred.')
|
||||
return False
|
||||
return True
|
||||
|
||||
def get_auth_list(self, load):
|
||||
'''
|
||||
Retrieve access list for the user specified in load.
|
||||
The list is built by eauth module or from master eauth configuration.
|
||||
Return None if current configuration doesn't provide any ACL for the user. Return an empty
|
||||
list if the user has no rights to execute anything on this master and returns non-empty list
|
||||
if user is allowed to execute particular functions.
|
||||
'''
|
||||
# Get acl from eauth module.
|
||||
auth_list = self.__get_acl(load)
|
||||
if auth_list is not None:
|
||||
return auth_list
|
||||
|
||||
if load['eauth'] not in self.opts['external_auth']:
|
||||
# No matching module is allowed in config
|
||||
log.warning('Authorization failure occurred.')
|
||||
return None
|
||||
|
||||
name = self.load_name(load) # The username we are attempting to auth with
|
||||
groups = self.get_groups(load) # The groups this user belongs to
|
||||
eauth_config = self.opts['external_auth'][load['eauth']]
|
||||
if groups is None or groups is False:
|
||||
groups = []
|
||||
group_perm_keys = [item for item in eauth_config if item.endswith('%')] # The configured auth groups
|
||||
|
||||
# First we need to know if the user is allowed to proceed via any of their group memberships.
|
||||
group_auth_match = False
|
||||
for group_config in group_perm_keys:
|
||||
group_config = group_config.rstrip('%')
|
||||
for group in groups:
|
||||
if group == group_config:
|
||||
group_auth_match = True
|
||||
# If a group_auth_match is set it means only that we have a
|
||||
# user which matches at least one or more of the groups defined
|
||||
# in the configuration file.
|
||||
|
||||
external_auth_in_db = False
|
||||
for entry in eauth_config:
|
||||
if entry.startswith('^'):
|
||||
external_auth_in_db = True
|
||||
break
|
||||
|
||||
# If neither a catchall, a named membership or a group
|
||||
# membership is found, there is no need to continue. Simply
|
||||
# deny the user access.
|
||||
if not ((name in eauth_config) |
|
||||
('*' in eauth_config) |
|
||||
group_auth_match | external_auth_in_db):
|
||||
# Auth successful, but no matching user found in config
|
||||
log.warning('Authorization failure occurred.')
|
||||
return None
|
||||
|
||||
# We now have an authenticated session and it is time to determine
|
||||
# what the user has access to.
|
||||
auth_list = []
|
||||
if name in eauth_config:
|
||||
auth_list = eauth_config[name]
|
||||
elif '*' in eauth_config:
|
||||
auth_list = eauth_config['*']
|
||||
if group_auth_match:
|
||||
auth_list = self.ckminions.fill_auth_list_from_groups(
|
||||
eauth_config,
|
||||
groups,
|
||||
auth_list)
|
||||
|
||||
auth_list = self.__process_acl(load, auth_list)
|
||||
|
||||
log.trace("Compiled auth_list: {0}".format(auth_list))
|
||||
|
||||
return auth_list
|
||||
|
||||
|
||||
class Authorize(object):
|
||||
'''
|
||||
@ -288,7 +481,7 @@ class Authorize(object):
|
||||
merge_lists=merge_lists)
|
||||
|
||||
if 'ldap' in auth_data and __opts__.get('auth.ldap.activedirectory', False):
|
||||
auth_data['ldap'] = salt.auth.ldap.expand_ldap_entries(auth_data['ldap'])
|
||||
auth_data['ldap'] = salt.auth.ldap.__expand_ldap_entries(auth_data['ldap'])
|
||||
log.debug(auth_data['ldap'])
|
||||
|
||||
#for auth_back in self.opts.get('external_auth_sources', []):
|
||||
@ -533,3 +726,10 @@ class AuthUser(object):
|
||||
this process and False if not.
|
||||
'''
|
||||
return self.user == salt.utils.get_user()
|
||||
|
||||
def sudo_name(self):
|
||||
'''
|
||||
Returns the username of the sudoer, i.e. self.user without the
|
||||
'sudo_' prefix.
|
||||
'''
|
||||
return self.user.split('_', 1)[-1]
|
||||
|
@ -90,7 +90,7 @@ def is_connection_usable():
|
||||
return True
|
||||
|
||||
|
||||
def django_auth_setup():
|
||||
def __django_auth_setup():
|
||||
'''
|
||||
Prepare the connection to the Django authentication framework
|
||||
'''
|
||||
@ -125,7 +125,7 @@ def auth(username, password):
|
||||
sys.path.append(django_auth_path)
|
||||
os.environ.setdefault('DJANGO_SETTINGS_MODULE', __opts__['django_auth_settings'])
|
||||
|
||||
django_auth_setup()
|
||||
__django_auth_setup()
|
||||
|
||||
if not is_connection_usable():
|
||||
connection.close()
|
||||
@ -135,22 +135,19 @@ def auth(username, password):
|
||||
if user is not None:
|
||||
if user.is_active:
|
||||
log.debug('Django authentication successful')
|
||||
|
||||
auth_dict_from_db = retrieve_auth_entries(username)[username]
|
||||
if auth_dict_from_db is not None:
|
||||
return auth_dict_from_db
|
||||
|
||||
return True
|
||||
else:
|
||||
log.debug('Django authentication: the password is valid but the account is disabled.')
|
||||
else:
|
||||
log.debug('Django authentication failed.')
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def retrieve_auth_entries(u=None):
|
||||
def acl(username):
|
||||
'''
|
||||
|
||||
:param u: Username to filter for
|
||||
:param username: Username to filter for
|
||||
:return: Dictionary that can be slotted into the ``__opts__`` structure for
|
||||
eauth that designates the user associated ACL
|
||||
|
||||
@ -185,12 +182,12 @@ def retrieve_auth_entries(u=None):
|
||||
- .*
|
||||
|
||||
'''
|
||||
django_auth_setup()
|
||||
__django_auth_setup()
|
||||
|
||||
if u is None:
|
||||
if username is None:
|
||||
db_records = DJANGO_AUTH_CLASS.objects.all()
|
||||
else:
|
||||
db_records = DJANGO_AUTH_CLASS.objects.filter(user_fk__username=u)
|
||||
db_records = DJANGO_AUTH_CLASS.objects.filter(user_fk__username=username)
|
||||
auth_dict = {}
|
||||
|
||||
for a in db_records:
|
||||
|
@ -390,7 +390,7 @@ def groups(username, **kwargs):
|
||||
return group_list
|
||||
|
||||
|
||||
def expand_ldap_entries(entries, opts=None):
|
||||
def __expand_ldap_entries(entries, opts=None):
|
||||
'''
|
||||
|
||||
:param entries: ldap subtree in external_auth config option
|
||||
@ -456,5 +456,24 @@ def expand_ldap_entries(entries, opts=None):
|
||||
else:
|
||||
acl_tree.append({minion_or_ou: matchers})
|
||||
|
||||
log.trace('expand_ldap_entries: {0}'.format(acl_tree))
|
||||
log.trace('__expand_ldap_entries: {0}'.format(acl_tree))
|
||||
return acl_tree
|
||||
|
||||
|
||||
def process_acl(auth_list, opts=None):
|
||||
'''
|
||||
Query LDAP, retrieve list of minion_ids from an OU or other search.
|
||||
For each minion_id returned from the LDAP search, copy the perms
|
||||
matchers into the auth dictionary
|
||||
:param auth_list:
|
||||
:param opts: __opts__ for when __opts__ is not injected
|
||||
:return: Modified auth list.
|
||||
'''
|
||||
ou_names = []
|
||||
for item in auth_list:
|
||||
if isinstance(item, six.string_types):
|
||||
continue
|
||||
ou_names.extend([potential_ou for potential_ou in item.keys() if potential_ou.startswith('ldap(')])
|
||||
if ou_names:
|
||||
auth_list = __expand_ldap_entries(auth_list, opts)
|
||||
return auth_list
|
||||
|
5
salt/cache/__init__.py
vendored
5
salt/cache/__init__.py
vendored
@ -9,7 +9,8 @@ Loader mechanism for caching data, with data expiration, etc.
|
||||
from __future__ import absolute_import
|
||||
import time
|
||||
|
||||
# Import Salt lobs
|
||||
# Import Salt libs
|
||||
import salt.config
|
||||
import salt.loader
|
||||
import salt.syspaths
|
||||
from salt.payload import Serial
|
||||
@ -54,7 +55,7 @@ class Cache(object):
|
||||
self.cachedir = opts.get('cachedir', salt.syspaths.CACHE_DIR)
|
||||
else:
|
||||
self.cachedir = cachedir
|
||||
self.driver = opts['cache']
|
||||
self.driver = opts.get('cache', salt.config.DEFAULT_MASTER_OPTS)
|
||||
self.serial = Serial(opts)
|
||||
self._modules = None
|
||||
|
||||
|
5
salt/cache/localfs.py
vendored
5
salt/cache/localfs.py
vendored
@ -138,7 +138,10 @@ def list_(bank, cachedir):
|
||||
)
|
||||
ret = []
|
||||
for item in items:
|
||||
ret.append(item.rstrip('.p'))
|
||||
if item.endswith('.p'):
|
||||
ret.append(item.rstrip(item[-2:]))
|
||||
else:
|
||||
ret.append(item)
|
||||
return ret
|
||||
|
||||
|
||||
|
@ -166,7 +166,7 @@ class Batch(object):
|
||||
|
||||
if next_:
|
||||
if not self.quiet:
|
||||
print_cli('\nExecuting run on {0}\n'.format(next_))
|
||||
print_cli('\nExecuting run on {0}\n'.format(sorted(next_)))
|
||||
# create a new iterator for this batch of minions
|
||||
new_iter = self.local.cmd_iter_no_block(
|
||||
*args,
|
||||
|
@ -231,7 +231,7 @@ class LocalClient(object):
|
||||
Return the information about a given job
|
||||
'''
|
||||
log.debug('Checking whether jid {0} is still running'.format(jid))
|
||||
timeout = self.opts['gather_job_timeout']
|
||||
timeout = kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])
|
||||
|
||||
pub_data = self.run_job(tgt,
|
||||
'saltutil.find_job',
|
||||
@ -1113,6 +1113,7 @@ class LocalClient(object):
|
||||
|
||||
if timeout is None:
|
||||
timeout = self.opts['timeout']
|
||||
gather_job_timeout = kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])
|
||||
start = int(time.time())
|
||||
|
||||
# timeouts per minion, id_ -> timeout time
|
||||
@ -1213,7 +1214,7 @@ class LocalClient(object):
|
||||
jinfo_iter = []
|
||||
else:
|
||||
jinfo_iter = self.get_returns_no_block('salt/job/{0}'.format(jinfo['jid']))
|
||||
timeout_at = time.time() + self.opts['gather_job_timeout']
|
||||
timeout_at = time.time() + gather_job_timeout
|
||||
# if you are a syndic, wait a little longer
|
||||
if self.opts['order_masters']:
|
||||
timeout_at += self.opts.get('syndic_wait', 1)
|
||||
|
@ -207,6 +207,13 @@ def _expand_node(node):
|
||||
zone = ret['extra']['zone']
|
||||
ret['extra']['zone'] = {}
|
||||
ret['extra']['zone'].update(zone.__dict__)
|
||||
|
||||
# Remove unserializable GCENodeDriver objects
|
||||
if 'driver' in ret:
|
||||
del ret['driver']
|
||||
if 'driver' in ret['extra']['zone']:
|
||||
del ret['extra']['zone']['driver']
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
|
@ -31,6 +31,8 @@ from __future__ import absolute_import
|
||||
import time
|
||||
import pprint
|
||||
import logging
|
||||
import re
|
||||
import json
|
||||
|
||||
# Import salt libs
|
||||
import salt.ext.six as six
|
||||
@ -44,6 +46,7 @@ from salt.exceptions import (
|
||||
SaltCloudExecutionFailure,
|
||||
SaltCloudExecutionTimeout
|
||||
)
|
||||
from salt.ext.six.moves import range
|
||||
|
||||
# Import Third Party Libs
|
||||
try:
|
||||
@ -106,6 +109,7 @@ url = None
|
||||
ticket = None
|
||||
csrf = None
|
||||
verify_ssl = None
|
||||
api = None
|
||||
|
||||
|
||||
def _authenticate():
|
||||
@ -617,6 +621,60 @@ def create(vm_):
|
||||
return ret
|
||||
|
||||
|
||||
def _import_api():
|
||||
'''
|
||||
Download https://<url>/pve-docs/api-viewer/apidoc.js
|
||||
Extract content of pveapi var (json formated)
|
||||
Load this json content into global variable "api"
|
||||
'''
|
||||
global api
|
||||
full_url = 'https://{0}:8006/pve-docs/api-viewer/apidoc.js'.format(url)
|
||||
returned_data = requests.get(full_url, verify=verify_ssl)
|
||||
|
||||
re_filter = re.compile('(?<=pveapi =)(.*)(?=^;)', re.DOTALL | re.MULTILINE)
|
||||
api_json = re_filter.findall(returned_data.text)[0]
|
||||
api = json.loads(api_json)
|
||||
|
||||
|
||||
def _get_properties(path="", method="GET", forced_params=None):
|
||||
'''
|
||||
Return the parameter list from api for defined path and HTTP method
|
||||
'''
|
||||
if api is None:
|
||||
_import_api()
|
||||
|
||||
sub = api
|
||||
path_levels = [level for level in path.split('/') if level != '']
|
||||
search_path = ''
|
||||
props = []
|
||||
parameters = set([] if forced_params is None else forced_params)
|
||||
# Browse all path elements but last
|
||||
for elem in path_levels[:-1]:
|
||||
search_path += '/' + elem
|
||||
# Lookup for a dictionnary with path = "requested path" in list" and return its children
|
||||
sub = (item for item in sub if item["path"] == search_path).next()['children']
|
||||
# Get leaf element in path
|
||||
search_path += '/' + path_levels[-1]
|
||||
sub = next((item for item in sub if item["path"] == search_path))
|
||||
try:
|
||||
# get list of properties for requested method
|
||||
props = sub['info'][method]['parameters']['properties'].keys()
|
||||
except KeyError as exc:
|
||||
log.error('method not found: "{0}"'.format(str(exc)))
|
||||
except:
|
||||
raise
|
||||
for prop in props:
|
||||
numerical = re.match(r'(\w+)\[n\]', prop)
|
||||
# generate (arbitrarily) 10 properties for duplicatable properties identified by:
|
||||
# "prop[n]"
|
||||
if numerical:
|
||||
for i in range(10):
|
||||
parameters.add(numerical.group(1) + str(i))
|
||||
else:
|
||||
parameters.add(prop)
|
||||
return parameters
|
||||
|
||||
|
||||
def create_node(vm_, newid):
|
||||
'''
|
||||
Build and submit the requestdata to create a new node
|
||||
@ -665,7 +723,11 @@ def create_node(vm_, newid):
|
||||
newnode['hostname'] = vm_['name']
|
||||
newnode['ostemplate'] = vm_['image']
|
||||
|
||||
for prop in 'cpuunits', 'description', 'memory', 'onboot', 'net0', 'password', 'nameserver', 'swap', 'storage', 'rootfs':
|
||||
static_props = ('cpuunits', 'description', 'memory', 'onboot', 'net0',
|
||||
'password', 'nameserver', 'swap', 'storage', 'rootfs')
|
||||
for prop in _get_properties('/nodes/{node}/lxc',
|
||||
'POST',
|
||||
static_props):
|
||||
if prop in vm_: # if the property is set, use it for the VM request
|
||||
newnode[prop] = vm_[prop]
|
||||
|
||||
@ -687,7 +749,10 @@ def create_node(vm_, newid):
|
||||
|
||||
elif vm_['technology'] == 'qemu':
|
||||
# optional Qemu settings
|
||||
for prop in 'acpi', 'cores', 'cpu', 'pool', 'storage', 'sata0', 'ostype', 'ide2', 'net0':
|
||||
static_props = ('acpi', 'cores', 'cpu', 'pool', 'storage', 'sata0', 'ostype', 'ide2', 'net0')
|
||||
for prop in _get_properties('/nodes/{node}/qemu',
|
||||
'POST',
|
||||
static_props):
|
||||
if prop in vm_: # if the property is set, use it for the VM request
|
||||
newnode[prop] = vm_[prop]
|
||||
|
||||
|
@ -1983,15 +1983,24 @@ def start(name, call=None):
|
||||
return 'powered on'
|
||||
|
||||
|
||||
def stop(name, call=None):
|
||||
def stop(name, soft=False, call=None):
|
||||
'''
|
||||
To stop/power off a VM using its name
|
||||
|
||||
.. note::
|
||||
|
||||
If ``soft=True`` then issues a command to the guest operating system
|
||||
asking it to perform a clean shutdown of all services.
|
||||
Default is soft=False
|
||||
|
||||
For ``soft=True`` vmtools should be installed on guest system.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -a stop vmname
|
||||
salt-cloud -a stop vmname soft=True
|
||||
'''
|
||||
if call != 'action':
|
||||
raise SaltCloudSystemExit(
|
||||
@ -2014,8 +2023,11 @@ def stop(name, call=None):
|
||||
return ret
|
||||
try:
|
||||
log.info('Stopping VM {0}'.format(name))
|
||||
task = vm["object"].PowerOff()
|
||||
salt.utils.vmware.wait_for_task(task, name, 'power off')
|
||||
if soft:
|
||||
vm["object"].ShutdownGuest()
|
||||
else:
|
||||
task = vm["object"].PowerOff()
|
||||
salt.utils.vmware.wait_for_task(task, name, 'power off')
|
||||
except Exception as exc:
|
||||
log.error(
|
||||
'Error while powering off VM {0}: {1}'.format(
|
||||
@ -2081,15 +2093,24 @@ def suspend(name, call=None):
|
||||
return 'suspended'
|
||||
|
||||
|
||||
def reset(name, call=None):
|
||||
def reset(name, soft=False, call=None):
|
||||
'''
|
||||
To reset a VM using its name
|
||||
|
||||
.. note::
|
||||
|
||||
If ``soft=True`` then issues a command to the guest operating system
|
||||
asking it to perform a reboot. Otherwise hypervisor will terminate VM and start it again.
|
||||
Default is soft=False
|
||||
|
||||
For ``soft=True`` vmtools should be installed on guest system.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -a reset vmname
|
||||
salt-cloud -a reset vmname soft=True
|
||||
'''
|
||||
if call != 'action':
|
||||
raise SaltCloudSystemExit(
|
||||
@ -2112,8 +2133,11 @@ def reset(name, call=None):
|
||||
return ret
|
||||
try:
|
||||
log.info('Resetting VM {0}'.format(name))
|
||||
task = vm["object"].ResetVM_Task()
|
||||
salt.utils.vmware.wait_for_task(task, name, 'reset')
|
||||
if soft:
|
||||
vm["object"].RebootGuest()
|
||||
else:
|
||||
task = vm["object"].ResetVM_Task()
|
||||
salt.utils.vmware.wait_for_task(task, name, 'reset')
|
||||
except Exception as exc:
|
||||
log.error(
|
||||
'Error while resetting VM {0}: {1}'.format(
|
||||
|
@ -563,6 +563,12 @@ VALID_OPTS = {
|
||||
# False in 2016.3.0
|
||||
'add_proxymodule_to_opts': bool,
|
||||
|
||||
# In some particular cases, always alive proxies are not beneficial.
|
||||
# This option can be used in those less dynamic environments:
|
||||
# the user can request the connection
|
||||
# always alive, or init-shutdown per command.
|
||||
'proxy_always_alive': bool,
|
||||
|
||||
# Poll the connection state with the proxy minion
|
||||
# If enabled, this option requires the function `alive`
|
||||
# to be implemented in the proxy module
|
||||
@ -688,6 +694,13 @@ VALID_OPTS = {
|
||||
'fileserver_limit_traversal': bool,
|
||||
'fileserver_verify_config': bool,
|
||||
|
||||
# Optionally enables keeping the calculated user's auth list in the token file.
|
||||
'keep_acl_in_token': bool,
|
||||
|
||||
# Auth subsystem module to use to get authorized access list for a user. By default it's the
|
||||
# same module used for external authentication.
|
||||
'eauth_acl_module': str,
|
||||
|
||||
# The number of open files a daemon is allowed to have open. Frequently needs to be increased
|
||||
# higher than the system default in order to account for the way zeromq consumes file handles.
|
||||
'max_open_files': int,
|
||||
@ -1019,6 +1032,7 @@ DEFAULT_MINION_OPTS = {
|
||||
'master_failback': False,
|
||||
'master_failback_interval': 0,
|
||||
'verify_master_pubkey_sign': False,
|
||||
'sign_pub_messages': True,
|
||||
'always_verify_signature': False,
|
||||
'master_sign_key_name': 'master_sign',
|
||||
'syndic_finger': '',
|
||||
@ -1386,6 +1400,8 @@ DEFAULT_MASTER_OPTS = {
|
||||
'external_auth': {},
|
||||
'token_expire': 43200,
|
||||
'token_expire_user_override': False,
|
||||
'keep_acl_in_token': False,
|
||||
'eauth_acl_module': '',
|
||||
'extension_modules': os.path.join(salt.syspaths.CACHE_DIR, 'master', 'extmods'),
|
||||
'file_recv': False,
|
||||
'file_recv_max_size': 100,
|
||||
@ -1489,7 +1505,7 @@ DEFAULT_MASTER_OPTS = {
|
||||
'tcp_keepalive_idle': 300,
|
||||
'tcp_keepalive_cnt': -1,
|
||||
'tcp_keepalive_intvl': -1,
|
||||
'sign_pub_messages': False,
|
||||
'sign_pub_messages': True,
|
||||
'keysize': 2048,
|
||||
'transport': 'zeromq',
|
||||
'gather_job_timeout': 10,
|
||||
@ -1560,10 +1576,18 @@ DEFAULT_MASTER_OPTS = {
|
||||
DEFAULT_PROXY_MINION_OPTS = {
|
||||
'conf_file': os.path.join(salt.syspaths.CONFIG_DIR, 'proxy'),
|
||||
'log_file': os.path.join(salt.syspaths.LOGS_DIR, 'proxy'),
|
||||
'sign_pub_messages': True,
|
||||
'add_proxymodule_to_opts': False,
|
||||
'proxy_merge_grains_in_module': True,
|
||||
'append_minionid_config_dirs': ['cachedir', 'pidfile'],
|
||||
'default_include': 'proxy.d/*.conf',
|
||||
|
||||
# By default, proxies will preserve the connection.
|
||||
# If this option is set to False,
|
||||
# the connection with the remote dumb device
|
||||
# is closed after each command request.
|
||||
'proxy_always_alive': True,
|
||||
|
||||
'proxy_keep_alive': True, # by default will try to keep alive the connection
|
||||
'proxy_keep_alive_interval': 1 # frequency of the proxy keepalive in minutes
|
||||
}
|
||||
@ -2051,7 +2075,6 @@ def minion_config(path,
|
||||
overrides = load_config(path, env_var, DEFAULT_MINION_OPTS['conf_file'])
|
||||
default_include = overrides.get('default_include',
|
||||
defaults['default_include'])
|
||||
|
||||
include = overrides.get('include', [])
|
||||
|
||||
overrides.update(include_config(default_include, path, verbose=False,
|
||||
@ -3234,9 +3257,6 @@ def apply_minion_config(overrides=None,
|
||||
newdirectory = os.path.join(opts[directory], opts['id'])
|
||||
opts[directory] = newdirectory
|
||||
|
||||
if 'default_include' in overrides and '{id}' in overrides['default_include']:
|
||||
opts['default_include'] = overrides['default_include'].replace('{id}', opts['id'])
|
||||
|
||||
# pidfile can be in the list of append_minionid_config_dirs, but pidfile
|
||||
# is the actual path with the filename, not a directory.
|
||||
if 'pidfile' in opts.get('append_minionid_config_dirs', []):
|
||||
|
@ -36,6 +36,7 @@ import salt.utils.verify
|
||||
import salt.utils.minions
|
||||
import salt.utils.gzip_util
|
||||
import salt.utils.jid
|
||||
from salt.defaults import DEFAULT_TARGET_DELIM
|
||||
from salt.pillar import git_pillar
|
||||
from salt.utils.event import tagify
|
||||
from salt.exceptions import FileserverConfigError, SaltMasterError
|
||||
@ -499,16 +500,13 @@ class RemoteFuncs(object):
|
||||
for arg in load['arg']:
|
||||
arg_.append(arg.split())
|
||||
load['arg'] = arg_
|
||||
good = self.ckminions.auth_check(
|
||||
return self.ckminions.auth_check(
|
||||
perms,
|
||||
load['fun'],
|
||||
load['arg'],
|
||||
load['tgt'],
|
||||
load.get('tgt_type', 'glob'),
|
||||
publish_validate=True)
|
||||
if not good:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _master_opts(self, load):
|
||||
'''
|
||||
@ -563,7 +561,7 @@ class RemoteFuncs(object):
|
||||
if fun not in self.opts.get('master_tops', {}):
|
||||
continue
|
||||
try:
|
||||
ret.update(self.tops[fun](opts=opts, grains=grains))
|
||||
ret = salt.utils.dictupdate.merge(ret, self.tops[fun](opts=opts, grains=grains), merge_lists=True)
|
||||
except Exception as exc:
|
||||
# If anything happens in the top generation, log it and move on
|
||||
log.error(
|
||||
@ -1057,105 +1055,43 @@ class LocalFuncs(object):
|
||||
Send a master control function back to the runner system
|
||||
'''
|
||||
if 'token' in load:
|
||||
try:
|
||||
token = self.loadauth.get_tok(load['token'])
|
||||
except Exception as exc:
|
||||
msg = 'Exception occurred when generating auth token: {0}'.format(
|
||||
exc)
|
||||
log.error(msg)
|
||||
return dict(error=dict(name='TokenAuthenticationError',
|
||||
message=msg))
|
||||
auth_type = 'token'
|
||||
err_name = 'TokenAuthenticationError'
|
||||
token = self.loadauth.authenticate_token(load)
|
||||
if not token:
|
||||
msg = 'Authentication failure of type "token" occurred.'
|
||||
log.warning(msg)
|
||||
return dict(error=dict(name='TokenAuthenticationError',
|
||||
message=msg))
|
||||
if token['eauth'] not in self.opts['external_auth']:
|
||||
msg = 'Authentication failure of type "token" occurred.'
|
||||
log.warning(msg)
|
||||
return dict(error=dict(name='TokenAuthenticationError',
|
||||
message=msg))
|
||||
good = self.ckminions.runner_check(
|
||||
self.opts['external_auth'][token['eauth']][token['name']]
|
||||
if token['name'] in self.opts['external_auth'][token['eauth']]
|
||||
else self.opts['external_auth'][token['eauth']]['*'],
|
||||
load['fun'])
|
||||
if not good:
|
||||
msg = ('Authentication failure of type "token" occurred for '
|
||||
'user {0}.').format(token['name'])
|
||||
log.warning(msg)
|
||||
return dict(error=dict(name='TokenAuthenticationError',
|
||||
message=msg))
|
||||
return dict(error=dict(name=err_name,
|
||||
message='Authentication failure of type "token" occurred.'))
|
||||
username = token['name']
|
||||
if self.opts['keep_acl_in_token'] and 'auth_list' in token:
|
||||
auth_list = token['auth_list']
|
||||
else:
|
||||
load['eauth'] = token['eauth']
|
||||
load['username'] = username
|
||||
auth_list = self.loadauth.get_auth_list(load)
|
||||
else:
|
||||
auth_type = 'eauth'
|
||||
err_name = 'EauthAuthenticationError'
|
||||
username = load.get('username', 'UNKNOWN')
|
||||
if not self.loadauth.authenticate_eauth(load):
|
||||
return dict(error=dict(name=err_name,
|
||||
message=('Authentication failure of type "eauth" occurred '
|
||||
'for user {0}.').format(username)))
|
||||
auth_list = self.loadauth.get_auth_list(load)
|
||||
|
||||
try:
|
||||
fun = load.pop('fun')
|
||||
runner_client = salt.runner.RunnerClient(self.opts)
|
||||
return runner_client.async(
|
||||
fun,
|
||||
load.get('kwarg', {}),
|
||||
token['name'])
|
||||
except Exception as exc:
|
||||
log.error('Exception occurred while '
|
||||
'introspecting {0}: {1}'.format(fun, exc))
|
||||
return dict(error=dict(name=exc.__class__.__name__,
|
||||
args=exc.args,
|
||||
message=str(exc)))
|
||||
|
||||
if 'eauth' not in load:
|
||||
msg = ('Authentication failure of type "eauth" occurred for '
|
||||
'user {0}.').format(load.get('username', 'UNKNOWN'))
|
||||
log.warning(msg)
|
||||
return dict(error=dict(name='EauthAuthenticationError',
|
||||
message=msg))
|
||||
if load['eauth'] not in self.opts['external_auth']:
|
||||
# The eauth system is not enabled, fail
|
||||
msg = ('Authentication failure of type "eauth" occurred for '
|
||||
'user {0}.').format(load.get('username', 'UNKNOWN'))
|
||||
log.warning(msg)
|
||||
return dict(error=dict(name='EauthAuthenticationError',
|
||||
message=msg))
|
||||
if not self.ckminions.runner_check(auth_list, load['fun']):
|
||||
return dict(error=dict(name=err_name,
|
||||
message=('Authentication failure of type "{0}" occurred '
|
||||
'for user {1}.').format(auth_type, username)))
|
||||
|
||||
try:
|
||||
name = self.loadauth.load_name(load)
|
||||
if not (name in self.opts['external_auth'][load['eauth']]) | ('*' in self.opts['external_auth'][load['eauth']]):
|
||||
msg = ('Authentication failure of type "eauth" occurred for '
|
||||
'user {0}.').format(load.get('username', 'UNKNOWN'))
|
||||
log.warning(msg)
|
||||
return dict(error=dict(name='EauthAuthenticationError',
|
||||
message=msg))
|
||||
if not self.loadauth.time_auth(load):
|
||||
msg = ('Authentication failure of type "eauth" occurred for '
|
||||
'user {0}.').format(load.get('username', 'UNKNOWN'))
|
||||
log.warning(msg)
|
||||
return dict(error=dict(name='EauthAuthenticationError',
|
||||
message=msg))
|
||||
good = self.ckminions.runner_check(
|
||||
self.opts['external_auth'][load['eauth']][name] if name in self.opts['external_auth'][load['eauth']] else self.opts['external_auth'][load['eauth']]['*'],
|
||||
load['fun'])
|
||||
if not good:
|
||||
msg = ('Authentication failure of type "eauth" occurred for '
|
||||
'user {0}.').format(load.get('username', 'UNKNOWN'))
|
||||
log.warning(msg)
|
||||
return dict(error=dict(name='EauthAuthenticationError',
|
||||
message=msg))
|
||||
|
||||
try:
|
||||
fun = load.pop('fun')
|
||||
runner_client = salt.runner.RunnerClient(self.opts)
|
||||
return runner_client.async(fun,
|
||||
load.get('kwarg', {}),
|
||||
load.get('username', 'UNKNOWN'))
|
||||
except Exception as exc:
|
||||
log.error('Exception occurred while '
|
||||
'introspecting {0}: {1}'.format(fun, exc))
|
||||
return dict(error=dict(name=exc.__class__.__name__,
|
||||
args=exc.args,
|
||||
message=str(exc)))
|
||||
|
||||
fun = load.pop('fun')
|
||||
runner_client = salt.runner.RunnerClient(self.opts)
|
||||
return runner_client.async(fun,
|
||||
load.get('kwarg', {}),
|
||||
username)
|
||||
except Exception as exc:
|
||||
log.error(
|
||||
'Exception occurred in the runner system: {0}'.format(exc)
|
||||
)
|
||||
log.error('Exception occurred while '
|
||||
'introspecting {0}: {1}'.format(fun, exc))
|
||||
return dict(error=dict(name=exc.__class__.__name__,
|
||||
args=exc.args,
|
||||
message=str(exc)))
|
||||
@ -1166,96 +1102,42 @@ class LocalFuncs(object):
|
||||
'''
|
||||
# All wheel ops pass through eauth
|
||||
if 'token' in load:
|
||||
atype = 'token'
|
||||
try:
|
||||
token = self.loadauth.get_tok(load['token'])
|
||||
except Exception as exc:
|
||||
msg = 'Exception occurred when generating auth token: {0}'.format(
|
||||
exc)
|
||||
log.error(msg)
|
||||
return dict(error=dict(name='TokenAuthenticationError',
|
||||
message=msg))
|
||||
auth_type = 'token'
|
||||
err_name = 'TokenAuthenticationError'
|
||||
token = self.loadauth.authenticate_token(load)
|
||||
if not token:
|
||||
msg = 'Authentication failure of type "token" occurred.'
|
||||
log.warning(msg)
|
||||
return dict(error=dict(name='TokenAuthenticationError',
|
||||
message=msg))
|
||||
if token['eauth'] not in self.opts['external_auth']:
|
||||
msg = 'Authentication failure of type "token" occurred.'
|
||||
log.warning(msg)
|
||||
return dict(error=dict(name='TokenAuthenticationError',
|
||||
message=msg))
|
||||
return dict(error=dict(name=err_name,
|
||||
message='Authentication failure of type "token" occurred.'))
|
||||
username = token['name']
|
||||
elif 'eauth' in load:
|
||||
atype = 'eauth'
|
||||
try:
|
||||
if load['eauth'] not in self.opts['external_auth']:
|
||||
# The eauth system is not enabled, fail
|
||||
msg = ('Authentication failure of type "eauth" occurred for '
|
||||
'user {0}.').format(load.get('username', 'UNKNOWN'))
|
||||
log.warning(msg)
|
||||
return dict(error=dict(name='EauthAuthenticationError',
|
||||
message=msg))
|
||||
|
||||
username = self.loadauth.load_name(load)
|
||||
if not ((username in self.opts['external_auth'][load['eauth']]) |
|
||||
('*' in self.opts['external_auth'][load['eauth']])):
|
||||
msg = ('Authentication failure of type "eauth" occurred for '
|
||||
'user {0}.').format(load.get('username', 'UNKNOWN'))
|
||||
log.warning(msg)
|
||||
return dict(error=dict(name='EauthAuthenticationError',
|
||||
message=msg))
|
||||
if not self.loadauth.time_auth(load):
|
||||
msg = ('Authentication failure of type "eauth" occurred for '
|
||||
'user {0}.').format(load.get('username', 'UNKNOWN'))
|
||||
log.warning(msg)
|
||||
return dict(error=dict(name='EauthAuthenticationError',
|
||||
message=msg))
|
||||
except Exception as exc:
|
||||
log.error(
|
||||
'Exception occurred in the wheel system: {0}'.format(exc)
|
||||
)
|
||||
return dict(error=dict(name=exc.__class__.__name__,
|
||||
args=exc.args,
|
||||
message=str(exc)))
|
||||
else:
|
||||
atype = 'user'
|
||||
if 'key' not in load:
|
||||
msg = 'Authentication failure of type "user" occurred'
|
||||
log.warning(msg)
|
||||
return dict(error=dict(name='UserAuthenticationError',
|
||||
message=msg))
|
||||
key = load.pop('key')
|
||||
|
||||
if 'user' in load:
|
||||
username = load['user']
|
||||
auth_user = salt.auth.AuthUser(username)
|
||||
if auth_user.is_sudo:
|
||||
username = self.opts.get('user', 'root')
|
||||
if self.opts['keep_acl_in_token'] and 'auth_list' in token:
|
||||
auth_list = token['auth_list']
|
||||
else:
|
||||
username = salt.utils.get_user()
|
||||
load['eauth'] = token['eauth']
|
||||
load['username'] = username
|
||||
auth_list = self.loadauth.get_auth_list(load)
|
||||
elif 'eauth' in load:
|
||||
auth_type = 'eauth'
|
||||
err_name = 'EauthAuthenticationError'
|
||||
username = load.get('username', 'UNKNOWN')
|
||||
if not self.loadauth.authenticate_eauth(load):
|
||||
return dict(error=dict(name=err_name,
|
||||
message=('Authentication failure of type "eauth" occurred for '
|
||||
'user {0}.').format(username)))
|
||||
auth_list = self.loadauth.get_auth_list(load)
|
||||
else:
|
||||
auth_type = 'user'
|
||||
err_name = 'UserAuthenticationError'
|
||||
username = load.get('username', 'UNKNOWN')
|
||||
if not self.loadauth.authenticate_key(load, self.key):
|
||||
return dict(error=dict(name=err_name,
|
||||
message=('Authentication failure of type "user" occurred for '
|
||||
'user {0}.').format(username)))
|
||||
|
||||
if username not in self.key and \
|
||||
key != self.key[username] and \
|
||||
key != self.key['root']:
|
||||
msg = 'Authentication failure of type "user" occurred for user {0}'.format(username)
|
||||
log.warning(msg)
|
||||
return dict(error=dict(name='UserAuthenticationError',
|
||||
message=msg))
|
||||
|
||||
if not atype == 'user':
|
||||
auth_list = []
|
||||
if '*' in self.opts['external_auth'][load['eauth']]:
|
||||
auth_list.extend(self.opts['external_auth'][load['eauth']]['*'])
|
||||
if username in self.opts['external_auth'][load['eauth']]:
|
||||
auth_list = self.opts['external_auth'][load['eauth']][username]
|
||||
good = self.ckminions.wheel_check(auth_list, load['fun'])
|
||||
if not good:
|
||||
msg = ('Authentication failure of type "{0}" occurred for '
|
||||
'user {1}.').format(atype, load.get('username', 'UNKNOWN'))
|
||||
log.warning(msg)
|
||||
return dict(error=dict(name='EauthAuthenticationError',
|
||||
message=msg))
|
||||
if auth_type != 'user':
|
||||
if not self.ckminions.wheel_check(auth_list, load['fun']):
|
||||
return dict(error=dict(name=err_name,
|
||||
message=('Authentication failure of type "{0}" occurred for '
|
||||
'user {1}.').format(auth_type, username)))
|
||||
|
||||
# Authenticated. Do the job.
|
||||
jid = salt.utils.jid.gen_jid()
|
||||
@ -1291,28 +1173,11 @@ class LocalFuncs(object):
|
||||
Create and return an authentication token, the clear load needs to
|
||||
contain the eauth key and the needed authentication creds.
|
||||
'''
|
||||
if 'eauth' not in load:
|
||||
token = self.loadauth.mk_token(load)
|
||||
if not token:
|
||||
log.warning('Authentication failure of type "eauth" occurred.')
|
||||
return ''
|
||||
if load['eauth'] not in self.opts['external_auth']:
|
||||
# The eauth system is not enabled, fail
|
||||
log.warning('Authentication failure of type "eauth" occurred.')
|
||||
return ''
|
||||
try:
|
||||
name = self.loadauth.load_name(load)
|
||||
if not ((name in self.opts['external_auth'][load['eauth']]) |
|
||||
('*' in self.opts['external_auth'][load['eauth']])):
|
||||
log.warning('Authentication failure of type "eauth" occurred.')
|
||||
return ''
|
||||
if not self.loadauth.time_auth(load):
|
||||
log.warning('Authentication failure of type "eauth" occurred.')
|
||||
return ''
|
||||
return self.loadauth.mk_token(load)
|
||||
except Exception as exc:
|
||||
log.error(
|
||||
'Exception occurred while authenticating: {0}'.format(exc)
|
||||
)
|
||||
return ''
|
||||
return token
|
||||
|
||||
def get_token(self, load):
|
||||
'''
|
||||
@ -1329,13 +1194,10 @@ class LocalFuncs(object):
|
||||
'''
|
||||
extra = load.get('kwargs', {})
|
||||
|
||||
# check blacklist/whitelist
|
||||
# Check if the user is blacklisted
|
||||
publisher_acl = salt.acl.PublisherACL(self.opts['publisher_acl_blacklist'])
|
||||
good = not publisher_acl.user_is_blacklisted(load['user']) and \
|
||||
not publisher_acl.cmd_is_blacklisted(load['fun'])
|
||||
|
||||
if good is False:
|
||||
if publisher_acl.user_is_blacklisted(load['user']) or \
|
||||
publisher_acl.cmd_is_blacklisted(load['fun']):
|
||||
log.error(
|
||||
'{user} does not have permissions to run {function}. Please '
|
||||
'contact your local administrator if you believe this is in '
|
||||
@ -1345,168 +1207,101 @@ class LocalFuncs(object):
|
||||
)
|
||||
)
|
||||
return ''
|
||||
# to make sure we don't step on anyone else's toes
|
||||
del good
|
||||
|
||||
# Retrieve the minions list
|
||||
delimiter = load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
|
||||
minions = self.ckminions.check_minions(
|
||||
load['tgt'],
|
||||
load.get('tgt_type', 'glob'),
|
||||
delimiter
|
||||
)
|
||||
|
||||
# Check for external auth calls
|
||||
if extra.get('token', False):
|
||||
# A token was passed, check it
|
||||
try:
|
||||
token = self.loadauth.get_tok(extra['token'])
|
||||
except Exception as exc:
|
||||
log.error(
|
||||
'Exception occurred when generating auth token: {0}'.format(
|
||||
exc
|
||||
)
|
||||
)
|
||||
return ''
|
||||
# Authenticate
|
||||
token = self.loadauth.authenticate_token(extra)
|
||||
if not token:
|
||||
log.warning('Authentication failure of type "token" occurred. \
|
||||
Token could not be retrieved.')
|
||||
return ''
|
||||
if token['eauth'] not in self.opts['external_auth']:
|
||||
log.warning('Authentication failure of type "token" occurred. \
|
||||
Authentication type of {0} not present.').format(token['eauth'])
|
||||
return ''
|
||||
if not ((token['name'] in self.opts['external_auth'][token['eauth']]) |
|
||||
('*' in self.opts['external_auth'][token['eauth']])):
|
||||
log.warning('Authentication failure of type "token" occurred. \
|
||||
Token does not verify against eauth provider: {0}').format(
|
||||
self.opts['external_auth'])
|
||||
return ''
|
||||
good = self.ckminions.auth_check(
|
||||
self.opts['external_auth'][token['eauth']][token['name']]
|
||||
if token['name'] in self.opts['external_auth'][token['eauth']]
|
||||
else self.opts['external_auth'][token['eauth']]['*'],
|
||||
|
||||
# Get acl from eauth module.
|
||||
if self.opts['keep_acl_in_token'] and 'auth_list' in token:
|
||||
auth_list = token['auth_list']
|
||||
else:
|
||||
extra['eauth'] = token['eauth']
|
||||
extra['username'] = token['name']
|
||||
auth_list = self.loadauth.get_auth_list(extra)
|
||||
|
||||
# Authorize the request
|
||||
if not self.ckminions.auth_check(
|
||||
auth_list,
|
||||
load['fun'],
|
||||
load['arg'],
|
||||
load['tgt'],
|
||||
load.get('tgt_type', 'glob'))
|
||||
if not good:
|
||||
# Accept find_job so the CLI will function cleanly
|
||||
if load['fun'] != 'saltutil.find_job':
|
||||
log.warning(
|
||||
'Authentication failure of type "token" occurred.'
|
||||
)
|
||||
return ''
|
||||
load.get('tgt_type', 'glob'),
|
||||
minions=minions,
|
||||
# always accept find_job
|
||||
whitelist=['saltutil.find_job'],
|
||||
):
|
||||
log.warning('Authentication failure of type "token" occurred.')
|
||||
return ''
|
||||
load['user'] = token['name']
|
||||
log.debug('Minion tokenized user = "{0}"'.format(load['user']))
|
||||
elif 'eauth' in extra:
|
||||
if extra['eauth'] not in self.opts['external_auth']:
|
||||
# The eauth system is not enabled, fail
|
||||
log.warning(
|
||||
'Authentication failure of type "eauth" occurred.'
|
||||
)
|
||||
# Authenticate.
|
||||
if not self.loadauth.authenticate_eauth(extra):
|
||||
return ''
|
||||
try:
|
||||
name = self.loadauth.load_name(extra)
|
||||
if not ((name in self.opts['external_auth'][extra['eauth']]) |
|
||||
('*' in self.opts['external_auth'][extra['eauth']])):
|
||||
log.warning(
|
||||
'Authentication failure of type "eauth" occurred.'
|
||||
)
|
||||
return ''
|
||||
if not self.loadauth.time_auth(extra):
|
||||
log.warning(
|
||||
'Authentication failure of type "eauth" occurred.'
|
||||
)
|
||||
return ''
|
||||
except Exception as exc:
|
||||
log.error(
|
||||
'Exception occurred while authenticating: {0}'.format(exc)
|
||||
)
|
||||
return ''
|
||||
good = self.ckminions.auth_check(
|
||||
self.opts['external_auth'][extra['eauth']][name]
|
||||
if name in self.opts['external_auth'][extra['eauth']]
|
||||
else self.opts['external_auth'][extra['eauth']]['*'],
|
||||
|
||||
# Get acl from eauth module.
|
||||
auth_list = self.loadauth.get_auth_list(extra)
|
||||
|
||||
# Authorize the request
|
||||
if not self.ckminions.auth_check(
|
||||
auth_list,
|
||||
load['fun'],
|
||||
load['arg'],
|
||||
load['tgt'],
|
||||
load.get('tgt_type', 'glob'))
|
||||
if not good:
|
||||
# Accept find_job so the CLI will function cleanly
|
||||
if load['fun'] != 'saltutil.find_job':
|
||||
log.warning(
|
||||
'Authentication failure of type "eauth" occurred.'
|
||||
)
|
||||
return ''
|
||||
load['user'] = name
|
||||
# Verify that the caller has root on master
|
||||
elif 'user' in load:
|
||||
if load['user'].startswith('sudo_'):
|
||||
# If someone can sudo, allow them to act as root
|
||||
if load.get('key', 'invalid') == self.key.get('root'):
|
||||
load.pop('key')
|
||||
elif load.pop('key') != self.key[self.opts.get('user', 'root')]:
|
||||
log.warning(
|
||||
'Authentication failure of type "user" occurred.'
|
||||
)
|
||||
return ''
|
||||
elif load['user'] == self.opts.get('user', 'root'):
|
||||
if load.pop('key') != self.key[self.opts.get('user', 'root')]:
|
||||
log.warning(
|
||||
'Authentication failure of type "user" occurred.'
|
||||
)
|
||||
return ''
|
||||
elif load['user'] == 'root':
|
||||
if load.pop('key') != self.key.get(self.opts.get('user', 'root')):
|
||||
log.warning(
|
||||
'Authentication failure of type "user" occurred.'
|
||||
)
|
||||
return ''
|
||||
elif load['user'] == salt.utils.get_user():
|
||||
if load.pop('key') != self.key.get(load['user']):
|
||||
log.warning(
|
||||
'Authentication failure of type "user" occurred.'
|
||||
)
|
||||
return ''
|
||||
else:
|
||||
if load['user'] in self.key:
|
||||
# User is authorised, check key and check perms
|
||||
if load.pop('key') != self.key[load['user']]:
|
||||
log.warning(
|
||||
'Authentication failure of type "user" occurred.'
|
||||
)
|
||||
return ''
|
||||
acl = salt.utils.get_values_of_matching_keys(
|
||||
self.opts['publisher_acl'],
|
||||
load['user'])
|
||||
if load['user'] not in acl:
|
||||
log.warning(
|
||||
'Authentication failure of type "user" occurred.'
|
||||
)
|
||||
return ''
|
||||
good = self.ckminions.auth_check(
|
||||
acl,
|
||||
load['fun'],
|
||||
load['arg'],
|
||||
load['tgt'],
|
||||
load.get('tgt_type', 'glob'))
|
||||
if not good:
|
||||
# Accept find_job so the CLI will function cleanly
|
||||
if load['fun'] != 'saltutil.find_job':
|
||||
log.warning(
|
||||
'Authentication failure of type "user" '
|
||||
'occurred.'
|
||||
)
|
||||
return ''
|
||||
else:
|
||||
log.warning(
|
||||
'Authentication failure of type "user" occurred.'
|
||||
)
|
||||
return ''
|
||||
else:
|
||||
if load.pop('key') != self.key[salt.utils.get_user()]:
|
||||
log.warning(
|
||||
'Authentication failure of type "other" occurred.'
|
||||
)
|
||||
load.get('tgt_type', 'glob'),
|
||||
minions=minions,
|
||||
# always accept find_job
|
||||
whitelist=['saltutil.find_job'],
|
||||
):
|
||||
log.warning('Authentication failure of type "eauth" occurred.')
|
||||
return ''
|
||||
# Retrieve the minions list
|
||||
minions = self.ckminions.check_minions(
|
||||
load['tgt'],
|
||||
load.get('tgt_type', 'glob')
|
||||
)
|
||||
load['user'] = self.loadauth.load_name(extra) # The username we are attempting to auth with
|
||||
# Verify that the caller has root on master
|
||||
else:
|
||||
auth_ret = self.loadauth.authenticate_key(load, self.key)
|
||||
if auth_ret is False:
|
||||
return ''
|
||||
|
||||
if auth_ret is not True:
|
||||
if salt.auth.AuthUser(load['user']).is_sudo():
|
||||
if not self.opts['sudo_acl'] or not self.opts['publisher_acl']:
|
||||
auth_ret = True
|
||||
|
||||
if auth_ret is not True:
|
||||
auth_list = salt.utils.get_values_of_matching_keys(
|
||||
self.opts['publisher_acl'],
|
||||
auth_ret)
|
||||
if not auth_list:
|
||||
log.warning(
|
||||
'Authentication failure of type "user" occurred.'
|
||||
)
|
||||
return ''
|
||||
|
||||
if not self.ckminions.auth_check(
|
||||
auth_list,
|
||||
load['fun'],
|
||||
load['arg'],
|
||||
load['tgt'],
|
||||
load.get('tgt_type', 'glob'),
|
||||
minions=minions,
|
||||
# always accept find_job
|
||||
whitelist=['saltutil.find_job'],
|
||||
):
|
||||
log.warning('Authentication failure of type "user" occurred.')
|
||||
return ''
|
||||
|
||||
# If we order masters (via a syndic), don't short circuit if no minions
|
||||
# are found
|
||||
if not self.opts.get('order_masters'):
|
||||
|
@ -6,6 +6,7 @@ from __future__ import absolute_import
|
||||
|
||||
# Import python libs
|
||||
import contextlib
|
||||
import errno
|
||||
import logging
|
||||
import os
|
||||
import string
|
||||
@ -142,12 +143,19 @@ class Client(object):
|
||||
path)
|
||||
destdir = os.path.dirname(dest)
|
||||
cumask = os.umask(63)
|
||||
if not os.path.isdir(destdir):
|
||||
# remove destdir if it is a regular file to avoid an OSError when
|
||||
# running os.makedirs below
|
||||
if os.path.isfile(destdir):
|
||||
os.remove(destdir)
|
||||
|
||||
# remove destdir if it is a regular file to avoid an OSError when
|
||||
# running os.makedirs below
|
||||
if os.path.isfile(destdir):
|
||||
os.remove(destdir)
|
||||
|
||||
# ensure destdir exists
|
||||
try:
|
||||
os.makedirs(destdir)
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.EEXIST: # ignore if it was there already
|
||||
raise
|
||||
|
||||
yield dest
|
||||
os.umask(cumask)
|
||||
|
||||
|
@ -1559,6 +1559,10 @@ class LazyLoader(salt.utils.lazy.LazyDict):
|
||||
|
||||
self.loaded = True
|
||||
|
||||
def reload_modules(self):
|
||||
self.loaded_files = set()
|
||||
self._load_all()
|
||||
|
||||
def _apply_outputter(self, func, mod):
|
||||
'''
|
||||
Apply the __outputter__ variable to the functions
|
||||
|
580
salt/master.py
580
salt/master.py
@ -17,7 +17,6 @@ import signal
|
||||
import stat
|
||||
import logging
|
||||
import multiprocessing
|
||||
import traceback
|
||||
|
||||
# Import third party libs
|
||||
from Crypto.PublicKey import RSA
|
||||
@ -1657,153 +1656,61 @@ class ClearFuncs(object):
|
||||
# Make a masterapi object
|
||||
self.masterapi = salt.daemons.masterapi.LocalFuncs(opts, key)
|
||||
|
||||
def process_token(self, tok, fun, auth_type):
|
||||
'''
|
||||
Process a token and determine if a command is authorized
|
||||
'''
|
||||
try:
|
||||
token = self.loadauth.get_tok(tok)
|
||||
except Exception as exc:
|
||||
msg = 'Exception occurred when generating auth token: {0}'.format(
|
||||
exc)
|
||||
log.error(msg)
|
||||
return dict(error=dict(name='TokenAuthenticationError',
|
||||
message=msg))
|
||||
if not token:
|
||||
msg = 'Authentication failure of type "token" occurred.'
|
||||
log.warning(msg)
|
||||
return dict(error=dict(name='TokenAuthenticationError',
|
||||
message=msg))
|
||||
if token['eauth'] not in self.opts['external_auth']:
|
||||
msg = 'Authentication failure of type "token" occurred.'
|
||||
log.warning(msg)
|
||||
return dict(error=dict(name='TokenAuthenticationError',
|
||||
message=msg))
|
||||
|
||||
check_fun = getattr(self.ckminions,
|
||||
'{auth}_check'.format(auth=auth_type))
|
||||
if token['name'] in self.opts['external_auth'][token['eauth']]:
|
||||
good = check_fun(self.opts['external_auth'][token['eauth']][token['name']], fun)
|
||||
elif any(key.endswith('%') for key in self.opts['external_auth'][token['eauth']]):
|
||||
for group in self.opts['external_auth'][token['eauth']]:
|
||||
if group.endswith('%'):
|
||||
for group in self.opts['external_auth'][token['eauth']]:
|
||||
good = check_fun(self.opts['external_auth'][token['eauth']][group], fun)
|
||||
if good:
|
||||
break
|
||||
else:
|
||||
good = check_fun(self.opts['external_auth'][token['eauth']]['*'], fun)
|
||||
if not good:
|
||||
msg = ('Authentication failure of type "token" occurred for '
|
||||
'user {0}.').format(token['name'])
|
||||
log.warning(msg)
|
||||
return dict(error=dict(name='TokenAuthenticationError',
|
||||
message=msg))
|
||||
return None
|
||||
|
||||
def process_eauth(self, clear_load, auth_type):
|
||||
'''
|
||||
Process a clear load to determine eauth perms
|
||||
|
||||
Any return other than None is an eauth failure
|
||||
'''
|
||||
if 'eauth' not in clear_load:
|
||||
msg = ('Authentication failure of type "eauth" occurred for '
|
||||
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
|
||||
log.warning(msg)
|
||||
return dict(error=dict(name='EauthAuthenticationError',
|
||||
message=msg))
|
||||
if clear_load['eauth'] not in self.opts['external_auth']:
|
||||
# The eauth system is not enabled, fail
|
||||
msg = ('Authentication failure of type "eauth" occurred for '
|
||||
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
|
||||
log.warning(msg)
|
||||
return dict(error=dict(name='EauthAuthenticationError',
|
||||
message=msg))
|
||||
|
||||
name = self.loadauth.load_name(clear_load)
|
||||
if self.loadauth.time_auth(clear_load) is False:
|
||||
msg = ('Authentication failure of type "eauth" occurred for '
|
||||
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
|
||||
log.warning(msg)
|
||||
return dict(error=dict(name='EauthAuthenticationError',
|
||||
message=msg))
|
||||
|
||||
check_fun = getattr(self.ckminions,
|
||||
'{auth}_check'.format(auth=auth_type))
|
||||
if name in self.opts['external_auth'][clear_load['eauth']]:
|
||||
good = check_fun(self.opts['external_auth'][clear_load['eauth']][name], clear_load['fun'])
|
||||
elif any(key.endswith('%') for key in self.opts['external_auth'][clear_load['eauth']]):
|
||||
for group in self.opts['external_auth'][clear_load['eauth']]:
|
||||
if group.endswith('%'):
|
||||
good = check_fun(self.opts['external_auth'][clear_load['eauth']][group], clear_load['fun'])
|
||||
if good:
|
||||
break
|
||||
else:
|
||||
good = check_fun(self.opts['external_auth'][clear_load['eauth']]['*'], clear_load['fun'])
|
||||
if not good:
|
||||
msg = ('Authentication failure of type "eauth" occurred for '
|
||||
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
|
||||
log.warning(msg)
|
||||
return dict(error=dict(name='EauthAuthenticationError',
|
||||
message=msg))
|
||||
return None
|
||||
|
||||
def runner(self, clear_load):
|
||||
'''
|
||||
Send a master control function back to the runner system
|
||||
'''
|
||||
# All runner ops pass through eauth
|
||||
if 'token' in clear_load:
|
||||
auth_error = self.process_token(clear_load['token'],
|
||||
clear_load['fun'],
|
||||
'runner')
|
||||
if auth_error:
|
||||
return auth_error
|
||||
else:
|
||||
token = self.loadauth.get_tok(clear_load.pop('token'))
|
||||
username = token['name']
|
||||
elif 'eauth' in clear_load:
|
||||
try:
|
||||
eauth_error = self.process_eauth(clear_load, 'runner')
|
||||
if eauth_error:
|
||||
return eauth_error
|
||||
# No error occurred, consume the password from the clear_load if
|
||||
# passed
|
||||
username = clear_load.pop('username', 'UNKNOWN')
|
||||
clear_load.pop('password', None)
|
||||
# Authenticate
|
||||
token = self.loadauth.authenticate_token(clear_load)
|
||||
|
||||
except Exception as exc:
|
||||
log.error(
|
||||
'Exception occurred in the runner system: {0}'.format(exc)
|
||||
)
|
||||
return dict(error=dict(name=exc.__class__.__name__,
|
||||
args=exc.args,
|
||||
message=str(exc)))
|
||||
if not token:
|
||||
return dict(error=dict(name='TokenAuthenticationError',
|
||||
message='Authentication failure of type "token" occurred.'))
|
||||
|
||||
# Authorize
|
||||
if self.opts['keep_acl_in_token'] and 'auth_list' in token:
|
||||
auth_list = token['auth_list']
|
||||
else:
|
||||
clear_load['eauth'] = token['eauth']
|
||||
clear_load['username'] = token['name']
|
||||
auth_list = self.loadauth.get_auth_list(clear_load)
|
||||
|
||||
if not self.ckminions.runner_check(auth_list, clear_load['fun']):
|
||||
return dict(error=dict(name='TokenAuthenticationError',
|
||||
message=('Authentication failure of type "token" occurred for '
|
||||
'user {0}.').format(token['name'])))
|
||||
clear_load.pop('token')
|
||||
username = token['name']
|
||||
elif 'eauth' in clear_load:
|
||||
if not self.loadauth.authenticate_eauth(clear_load):
|
||||
return dict(error=dict(name='EauthAuthenticationError',
|
||||
message=('Authentication failure of type "eauth" occurred for '
|
||||
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))))
|
||||
|
||||
auth_list = self.loadauth.get_auth_list(clear_load)
|
||||
if not self.ckminions.runner_check(auth_list, clear_load['fun']):
|
||||
return dict(error=dict(name='EauthAuthenticationError',
|
||||
message=('Authentication failure of type "eauth" occurred for '
|
||||
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))))
|
||||
|
||||
# No error occurred, consume the password from the clear_load if
|
||||
# passed
|
||||
username = clear_load.pop('username', 'UNKNOWN')
|
||||
clear_load.pop('password', None)
|
||||
else:
|
||||
if 'key' not in clear_load:
|
||||
msg = 'Authentication failure of type "user" occurred'
|
||||
log.warning(msg)
|
||||
if not self.loadauth.authenticate_key(clear_load, self.key):
|
||||
return dict(error=dict(name='UserAuthenticationError',
|
||||
message=msg))
|
||||
key = clear_load.pop('key')
|
||||
message='Authentication failure of type "user" occurred'))
|
||||
|
||||
if 'user' in clear_load:
|
||||
username = clear_load['user']
|
||||
auth_user = salt.auth.AuthUser(username)
|
||||
if auth_user.is_sudo:
|
||||
if salt.auth.AuthUser(username).is_sudo():
|
||||
username = self.opts.get('user', 'root')
|
||||
else:
|
||||
username = salt.utils.get_user()
|
||||
|
||||
if username not in self.key and \
|
||||
key != self.key[username] and \
|
||||
key != self.key['root']:
|
||||
msg = 'Authentication failure of type "user" occurred for user {0}'.format(username)
|
||||
log.warning(msg)
|
||||
return dict(error=dict(name='UserAuthenticationError',
|
||||
message=msg))
|
||||
|
||||
# Authorized. Do the job!
|
||||
try:
|
||||
fun = clear_load.pop('fun')
|
||||
@ -1825,55 +1732,53 @@ class ClearFuncs(object):
|
||||
# All wheel ops pass through eauth
|
||||
username = None
|
||||
if 'token' in clear_load:
|
||||
auth_error = self.process_token(clear_load['token'],
|
||||
clear_load['fun'],
|
||||
'wheel')
|
||||
if auth_error:
|
||||
return auth_error
|
||||
# Authenticate
|
||||
token = self.loadauth.authenticate_token(clear_load)
|
||||
if not token:
|
||||
return dict(error=dict(name='TokenAuthenticationError',
|
||||
message='Authentication failure of type "token" occurred.'))
|
||||
|
||||
# Authorize
|
||||
if self.opts['keep_acl_in_token'] and 'auth_list' in token:
|
||||
auth_list = token['auth_list']
|
||||
else:
|
||||
token = self.loadauth.get_tok(clear_load.pop('token'))
|
||||
clear_load['eauth'] = token['eauth']
|
||||
clear_load['username'] = token['name']
|
||||
auth_list = self.loadauth.get_auth_list(clear_load)
|
||||
if not self.ckminions.wheel_check(auth_list, clear_load['fun']):
|
||||
return dict(error=dict(name='TokenAuthenticationError',
|
||||
message=('Authentication failure of type "token" occurred for '
|
||||
'user {0}.').format(token['name'])))
|
||||
clear_load.pop('token')
|
||||
username = token['name']
|
||||
elif 'eauth' in clear_load:
|
||||
try:
|
||||
eauth_error = self.process_eauth(clear_load, 'wheel')
|
||||
if eauth_error:
|
||||
return eauth_error
|
||||
if not self.loadauth.authenticate_eauth(clear_load):
|
||||
return dict(error=dict(name='EauthAuthenticationError',
|
||||
message=('Authentication failure of type "eauth" occurred for '
|
||||
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))))
|
||||
|
||||
# No error occurred, consume the password from the clear_load if
|
||||
# passed
|
||||
clear_load.pop('password', None)
|
||||
username = clear_load.pop('username', 'UNKNOWN')
|
||||
except Exception as exc:
|
||||
log.error(
|
||||
'Exception occurred in the wheel system: {0}'.format(exc)
|
||||
)
|
||||
return dict(error=dict(name=exc.__class__.__name__,
|
||||
args=exc.args,
|
||||
message=str(exc)))
|
||||
auth_list = self.loadauth.get_auth_list(clear_load)
|
||||
if not self.ckminions.wheel_check(auth_list, clear_load['fun']):
|
||||
return dict(error=dict(name='EauthAuthenticationError',
|
||||
message=('Authentication failure of type "eauth" occurred for '
|
||||
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))))
|
||||
|
||||
# No error occurred, consume the password from the clear_load if
|
||||
# passed
|
||||
clear_load.pop('password', None)
|
||||
username = clear_load.pop('username', 'UNKNOWN')
|
||||
else:
|
||||
if 'key' not in clear_load:
|
||||
msg = 'Authentication failure of type "user" occurred'
|
||||
log.warning(msg)
|
||||
if not self.loadauth.authenticate_key(clear_load, self.key):
|
||||
return dict(error=dict(name='UserAuthenticationError',
|
||||
message=msg))
|
||||
key = clear_load.pop('key')
|
||||
message='Authentication failure of type "user" occurred'))
|
||||
|
||||
if 'user' in clear_load:
|
||||
username = clear_load['user']
|
||||
auth_user = salt.auth.AuthUser(username)
|
||||
if auth_user.is_sudo:
|
||||
if salt.auth.AuthUser(username).is_sudo():
|
||||
username = self.opts.get('user', 'root')
|
||||
else:
|
||||
username = salt.utils.get_user()
|
||||
|
||||
if username not in self.key and \
|
||||
key != self.key[username] and \
|
||||
key != self.key['root']:
|
||||
msg = 'Authentication failure of type "user" occurred for user {0}'.format(username)
|
||||
log.warning(msg)
|
||||
return dict(error=dict(name='UserAuthenticationError',
|
||||
message=msg))
|
||||
|
||||
# Authorized. Do the job!
|
||||
try:
|
||||
jid = salt.utils.jid.gen_jid()
|
||||
@ -1909,42 +1814,11 @@ class ClearFuncs(object):
|
||||
Create and return an authentication token, the clear load needs to
|
||||
contain the eauth key and the needed authentication creds.
|
||||
'''
|
||||
|
||||
if 'eauth' not in clear_load:
|
||||
token = self.loadauth.mk_token(clear_load)
|
||||
if not token:
|
||||
log.warning('Authentication failure of type "eauth" occurred.')
|
||||
return ''
|
||||
if clear_load['eauth'] not in self.opts['external_auth']:
|
||||
# The eauth system is not enabled, fail
|
||||
log.warning('Authentication failure of type "eauth" occurred.')
|
||||
return ''
|
||||
try:
|
||||
name = self.loadauth.load_name(clear_load)
|
||||
groups = self.loadauth.get_groups(clear_load)
|
||||
eauth_config = self.opts['external_auth'][clear_load['eauth']]
|
||||
if '^model' not in eauth_config and '*' not in eauth_config and name not in eauth_config:
|
||||
found = False
|
||||
for group in groups:
|
||||
if "{0}%".format(group) in eauth_config:
|
||||
found = True
|
||||
break
|
||||
if not found:
|
||||
log.warning('Authentication failure of type "eauth" occurred.')
|
||||
return ''
|
||||
|
||||
clear_load['groups'] = groups
|
||||
token = self.loadauth.mk_token(clear_load)
|
||||
if not token:
|
||||
log.warning('Authentication failure of type "eauth" occurred.')
|
||||
return ''
|
||||
else:
|
||||
return token
|
||||
except Exception as exc:
|
||||
type_, value_, traceback_ = sys.exc_info()
|
||||
log.error(
|
||||
'Exception occurred while authenticating: {0}'.format(exc)
|
||||
)
|
||||
log.error(traceback.format_exception(type_, value_, traceback_))
|
||||
return ''
|
||||
return token
|
||||
|
||||
def get_token(self, clear_load):
|
||||
'''
|
||||
@ -1985,263 +1859,89 @@ class ClearFuncs(object):
|
||||
|
||||
# Check for external auth calls
|
||||
if extra.get('token', False):
|
||||
# A token was passed, check it
|
||||
try:
|
||||
token = self.loadauth.get_tok(extra['token'])
|
||||
except Exception as exc:
|
||||
log.error(
|
||||
'Exception occurred when generating auth token: {0}'.format(
|
||||
exc
|
||||
)
|
||||
)
|
||||
# Authenticate.
|
||||
token = self.loadauth.authenticate_token(extra)
|
||||
if not token:
|
||||
return ''
|
||||
|
||||
# Bail if the token is empty or if the eauth type specified is not allowed
|
||||
if not token or token['eauth'] not in self.opts['external_auth']:
|
||||
# Get acl
|
||||
if self.opts['keep_acl_in_token'] and 'auth_list' in token:
|
||||
auth_list = token['auth_list']
|
||||
else:
|
||||
extra['eauth'] = token['eauth']
|
||||
extra['username'] = token['name']
|
||||
auth_list = self.loadauth.get_auth_list(extra)
|
||||
|
||||
# Authorize the request
|
||||
if not self.ckminions.auth_check(
|
||||
auth_list,
|
||||
clear_load['fun'],
|
||||
clear_load['arg'],
|
||||
clear_load['tgt'],
|
||||
clear_load.get('tgt_type', 'glob'),
|
||||
minions=minions,
|
||||
# always accept find_job
|
||||
whitelist=['saltutil.find_job'],
|
||||
):
|
||||
log.warning('Authentication failure of type "token" occurred.')
|
||||
return ''
|
||||
|
||||
# Fetch eauth config and collect users and groups configured for access
|
||||
eauth_config = self.opts['external_auth'][token['eauth']]
|
||||
eauth_users = []
|
||||
eauth_groups = []
|
||||
for entry in eauth_config:
|
||||
if entry.endswith('%'):
|
||||
eauth_groups.append(entry.rstrip('%'))
|
||||
else:
|
||||
eauth_users.append(entry)
|
||||
|
||||
# If there are groups in the token, check if any of them are listed in the eauth config
|
||||
group_auth_match = False
|
||||
try:
|
||||
if token.get('groups'):
|
||||
for group in token['groups']:
|
||||
if group in eauth_groups:
|
||||
group_auth_match = True
|
||||
break
|
||||
except KeyError:
|
||||
pass
|
||||
if '^model' not in eauth_users and '*' not in eauth_users and token['name'] not in eauth_users \
|
||||
and not group_auth_match:
|
||||
log.warning('Authentication failure of type "token" occurred.')
|
||||
return ''
|
||||
|
||||
# Compile list of authorized actions for the user
|
||||
auth_list = token.get('auth_list')
|
||||
if auth_list is None:
|
||||
auth_list = []
|
||||
# Add permissions for '*' or user-specific to the auth list
|
||||
for user_key in ('*', token['name']):
|
||||
auth_list.extend(eauth_config.get(user_key, []))
|
||||
# Add any add'l permissions allowed by group membership
|
||||
if group_auth_match:
|
||||
auth_list = self.ckminions.fill_auth_list_from_groups(eauth_config, token['groups'], auth_list)
|
||||
auth_list = self.ckminions.fill_auth_list_from_ou(auth_list, self.opts)
|
||||
log.trace("Compiled auth_list: {0}".format(auth_list))
|
||||
|
||||
good = self.ckminions.auth_check(
|
||||
auth_list,
|
||||
clear_load['fun'],
|
||||
clear_load['arg'],
|
||||
clear_load['tgt'],
|
||||
clear_load.get('tgt_type', 'glob'),
|
||||
minions=minions)
|
||||
if not good:
|
||||
# Accept find_job so the CLI will function cleanly
|
||||
if clear_load['fun'] != 'saltutil.find_job':
|
||||
log.warning(
|
||||
'Authentication failure of type "token" occurred.'
|
||||
)
|
||||
return ''
|
||||
clear_load['user'] = token['name']
|
||||
log.debug('Minion tokenized user = "{0}"'.format(clear_load['user']))
|
||||
elif 'eauth' in extra:
|
||||
if extra['eauth'] not in self.opts['external_auth']:
|
||||
# The eauth system is not enabled, fail
|
||||
log.warning(
|
||||
'Authentication failure of type "eauth" occurred.'
|
||||
)
|
||||
return ''
|
||||
auth_list = None
|
||||
try:
|
||||
name = self.loadauth.load_name(extra) # The username we are attempting to auth with
|
||||
groups = self.loadauth.get_groups(extra) # The groups this user belongs to
|
||||
if groups is None or groups is False:
|
||||
groups = []
|
||||
group_perm_keys = [item for item in self.opts['external_auth'][extra['eauth']] if item.endswith('%')] # The configured auth groups
|
||||
|
||||
# First we need to know if the user is allowed to proceed via any of their group memberships.
|
||||
group_auth_match = False
|
||||
for group_config in group_perm_keys:
|
||||
group_config = group_config.rstrip('%')
|
||||
for group in groups:
|
||||
if group == group_config:
|
||||
group_auth_match = True
|
||||
# If a group_auth_match is set it means only that we have a
|
||||
# user which matches at least one or more of the groups defined
|
||||
# in the configuration file.
|
||||
|
||||
external_auth_in_db = False
|
||||
for entry in self.opts['external_auth'][extra['eauth']]:
|
||||
if entry.startswith('^'):
|
||||
external_auth_in_db = True
|
||||
break
|
||||
|
||||
# If neither a catchall, a named membership or a group
|
||||
# membership is found, there is no need to continue. Simply
|
||||
# deny the user access.
|
||||
if not ((name in self.opts['external_auth'][extra['eauth']]) |
|
||||
('*' in self.opts['external_auth'][extra['eauth']]) |
|
||||
group_auth_match | external_auth_in_db):
|
||||
|
||||
# A group def is defined and the user is a member
|
||||
#[group for groups in ['external_auth'][extra['eauth']]]):
|
||||
# Auth successful, but no matching user found in config
|
||||
log.warning(
|
||||
'Authentication failure of type "eauth" occurred.'
|
||||
)
|
||||
return ''
|
||||
|
||||
# Perform the actual authentication. If we fail here, do not
|
||||
# continue.
|
||||
auth_ret = self.loadauth.time_auth(extra)
|
||||
if isinstance(auth_ret, list):
|
||||
auth_list = auth_ret
|
||||
elif not auth_ret:
|
||||
log.warning(
|
||||
'Authentication failure of type "eauth" occurred.'
|
||||
)
|
||||
return ''
|
||||
|
||||
except Exception as exc:
|
||||
type_, value_, traceback_ = sys.exc_info()
|
||||
log.error(
|
||||
'Exception occurred while authenticating: {0}'.format(exc)
|
||||
)
|
||||
log.error(traceback.format_exception(
|
||||
type_, value_, traceback_))
|
||||
# Authenticate.
|
||||
if not self.loadauth.authenticate_eauth(extra):
|
||||
return ''
|
||||
|
||||
# We now have an authenticated session and it is time to determine
|
||||
# what the user has access to.
|
||||
if auth_list is None:
|
||||
auth_list = []
|
||||
if '*' in self.opts['external_auth'][extra['eauth']]:
|
||||
auth_list.extend(self.opts['external_auth'][extra['eauth']]['*'])
|
||||
if name in self.opts['external_auth'][extra['eauth']]:
|
||||
auth_list = self.opts['external_auth'][extra['eauth']][name]
|
||||
if group_auth_match:
|
||||
auth_list = self.ckminions.fill_auth_list_from_groups(
|
||||
self.opts['external_auth'][extra['eauth']],
|
||||
groups,
|
||||
auth_list)
|
||||
if extra['eauth'] == 'ldap':
|
||||
auth_list = self.ckminions.fill_auth_list_from_ou(auth_list, self.opts)
|
||||
good = self.ckminions.auth_check(
|
||||
auth_list,
|
||||
clear_load['fun'],
|
||||
clear_load['arg'],
|
||||
clear_load['tgt'],
|
||||
clear_load.get('tgt_type', 'glob'),
|
||||
minions=minions
|
||||
)
|
||||
if not good:
|
||||
# Accept find_job so the CLI will function cleanly
|
||||
if clear_load['fun'] != 'saltutil.find_job':
|
||||
log.warning(
|
||||
'Authentication failure of type "eauth" occurred.'
|
||||
)
|
||||
return ''
|
||||
clear_load['user'] = name
|
||||
# Get acl from eauth module.
|
||||
auth_list = self.loadauth.get_auth_list(extra)
|
||||
|
||||
# Authorize the request
|
||||
if not self.ckminions.auth_check(
|
||||
auth_list,
|
||||
clear_load['fun'],
|
||||
clear_load['arg'],
|
||||
clear_load['tgt'],
|
||||
clear_load.get('tgt_type', 'glob'),
|
||||
minions=minions,
|
||||
# always accept find_job
|
||||
whitelist=['saltutil.find_job'],
|
||||
):
|
||||
log.warning('Authentication failure of type "eauth" occurred.')
|
||||
return ''
|
||||
clear_load['user'] = self.loadauth.load_name(extra) # The username we are attempting to auth with
|
||||
# Verify that the caller has root on master
|
||||
elif 'user' in clear_load:
|
||||
auth_user = salt.auth.AuthUser(clear_load['user'])
|
||||
if auth_user.is_sudo():
|
||||
# If someone sudos check to make sure there is no ACL's around their username
|
||||
if clear_load.get('key', 'invalid') == self.key.get('root'):
|
||||
clear_load.pop('key')
|
||||
elif clear_load.pop('key') != self.key[self.opts.get('user', 'root')]:
|
||||
else:
|
||||
auth_ret = self.loadauth.authenticate_key(clear_load, self.key)
|
||||
if auth_ret is False:
|
||||
return ''
|
||||
|
||||
if auth_ret is not True:
|
||||
if salt.auth.AuthUser(clear_load['user']).is_sudo():
|
||||
if not self.opts['sudo_acl'] or not self.opts['publisher_acl']:
|
||||
auth_ret = True
|
||||
|
||||
if auth_ret is not True:
|
||||
auth_list = salt.utils.get_values_of_matching_keys(
|
||||
self.opts['publisher_acl'],
|
||||
auth_ret)
|
||||
if not auth_list:
|
||||
log.warning(
|
||||
'Authentication failure of type "user" occurred.'
|
||||
)
|
||||
return ''
|
||||
publisher_acl = self.opts['publisher_acl']
|
||||
if self.opts['sudo_acl'] and publisher_acl:
|
||||
publisher_acl = salt.utils.get_values_of_matching_keys(
|
||||
publisher_acl,
|
||||
clear_load['user'].split('_', 1)[-1])
|
||||
good = self.ckminions.auth_check(
|
||||
publisher_acl,
|
||||
clear_load['fun'],
|
||||
clear_load['arg'],
|
||||
clear_load['tgt'],
|
||||
clear_load.get('tgt_type', 'glob'),
|
||||
minions=minions)
|
||||
if not good:
|
||||
# Accept find_job so the CLI will function cleanly
|
||||
if clear_load['fun'] != 'saltutil.find_job':
|
||||
log.warning(
|
||||
'Authentication failure of type "user" '
|
||||
'occurred.'
|
||||
)
|
||||
return ''
|
||||
elif clear_load['user'] == self.opts.get('user', 'root') or clear_load['user'] == 'root':
|
||||
if clear_load.pop('key') != self.key[self.opts.get('user', 'root')]:
|
||||
log.warning(
|
||||
'Authentication failure of type "user" occurred.'
|
||||
)
|
||||
return ''
|
||||
elif auth_user.is_running_user():
|
||||
if clear_load.pop('key') != self.key.get(clear_load['user']):
|
||||
log.warning(
|
||||
'Authentication failure of type "user" occurred.'
|
||||
)
|
||||
return ''
|
||||
elif clear_load.get('key', 'invalid') == self.key.get('root'):
|
||||
clear_load.pop('key')
|
||||
else:
|
||||
if clear_load['user'] in self.key:
|
||||
# User is authorised, check key and check perms
|
||||
if clear_load.pop('key') != self.key[clear_load['user']]:
|
||||
log.warning(
|
||||
'Authentication failure of type "user" occurred.'
|
||||
)
|
||||
return ''
|
||||
# Build ACL matching the user name
|
||||
acl = salt.utils.get_values_of_matching_keys(
|
||||
self.opts['publisher_acl'],
|
||||
clear_load['user'])
|
||||
if not acl:
|
||||
log.warning(
|
||||
'Authentication failure of type "user" occurred.'
|
||||
)
|
||||
return ''
|
||||
good = self.ckminions.auth_check(
|
||||
acl,
|
||||
|
||||
if not self.ckminions.auth_check(
|
||||
auth_list,
|
||||
clear_load['fun'],
|
||||
clear_load['arg'],
|
||||
clear_load['tgt'],
|
||||
clear_load.get('tgt_type', 'glob'),
|
||||
minions=minions)
|
||||
if not good:
|
||||
# Accept find_job so the CLI will function cleanly
|
||||
if clear_load['fun'] != 'saltutil.find_job':
|
||||
log.warning(
|
||||
'Authentication failure of type "user" '
|
||||
'occurred.'
|
||||
)
|
||||
return ''
|
||||
else:
|
||||
log.warning(
|
||||
'Authentication failure of type "user" occurred.'
|
||||
)
|
||||
minions=minions,
|
||||
# always accept find_job
|
||||
whitelist=['saltutil.find_job'],
|
||||
):
|
||||
log.warning('Authentication failure of type "user" occurred.')
|
||||
return ''
|
||||
else:
|
||||
if clear_load.pop('key') != self.key[salt.utils.get_user()]:
|
||||
log.warning(
|
||||
'Authentication failure of type "other" occurred.'
|
||||
)
|
||||
return ''
|
||||
|
||||
# If we order masters (via a syndic), don't short circuit if no minions
|
||||
# are found
|
||||
|
@ -133,7 +133,7 @@ log = logging.getLogger(__name__)
|
||||
# 6. Handle publications
|
||||
|
||||
|
||||
def resolve_dns(opts, fallback=True, connect=True):
|
||||
def resolve_dns(opts, fallback=True):
|
||||
'''
|
||||
Resolves the master_ip and master_uri options
|
||||
'''
|
||||
@ -150,7 +150,7 @@ def resolve_dns(opts, fallback=True, connect=True):
|
||||
if opts['master'] == '':
|
||||
raise SaltSystemExit
|
||||
ret['master_ip'] = \
|
||||
salt.utils.dns_check(opts['master'], opts['master_port'], True, opts['ipv6'], connect)
|
||||
salt.utils.dns_check(opts['master'], opts['master_port'], True, opts['ipv6'])
|
||||
except SaltClientError:
|
||||
if opts['retry_dns']:
|
||||
while True:
|
||||
@ -164,7 +164,7 @@ def resolve_dns(opts, fallback=True, connect=True):
|
||||
time.sleep(opts['retry_dns'])
|
||||
try:
|
||||
ret['master_ip'] = salt.utils.dns_check(
|
||||
opts['master'], opts['master_port'], True, opts['ipv6'], connect
|
||||
opts['master'], opts['master_port'], True, opts['ipv6']
|
||||
)
|
||||
break
|
||||
except SaltClientError:
|
||||
@ -1822,17 +1822,17 @@ class Minion(MinionBase):
|
||||
elif func == 'add':
|
||||
self.schedule.add_job(schedule, persist)
|
||||
elif func == 'modify':
|
||||
self.schedule.modify_job(name, schedule, persist, where)
|
||||
self.schedule.modify_job(name, schedule, persist)
|
||||
elif func == 'enable':
|
||||
self.schedule.enable_schedule()
|
||||
elif func == 'disable':
|
||||
self.schedule.disable_schedule()
|
||||
elif func == 'enable_job':
|
||||
self.schedule.enable_job(name, persist, where)
|
||||
self.schedule.enable_job(name, persist)
|
||||
elif func == 'run_job':
|
||||
self.schedule.run_job(name)
|
||||
elif func == 'disable_job':
|
||||
self.schedule.disable_job(name, persist, where)
|
||||
self.schedule.disable_job(name, persist)
|
||||
elif func == 'reload':
|
||||
self.schedule.reload(schedule)
|
||||
elif func == 'list':
|
||||
@ -3109,6 +3109,9 @@ class ProxyMinion(Minion):
|
||||
self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
|
||||
self.proxy.pack['__utils__'] = self.utils
|
||||
|
||||
# Reload all modules so all dunder variables are injected
|
||||
self.proxy.reload_modules()
|
||||
|
||||
# Start engines here instead of in the Minion superclass __init__
|
||||
# This is because we need to inject the __proxy__ variable but
|
||||
# it is not setup until now.
|
||||
@ -3135,10 +3138,21 @@ class ProxyMinion(Minion):
|
||||
uid = salt.utils.get_uid(user=self.opts.get('user', None))
|
||||
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
|
||||
|
||||
self.schedule = salt.utils.schedule.Schedule(
|
||||
self.opts,
|
||||
self.functions,
|
||||
self.returners)
|
||||
if self.connected and self.opts['pillar']:
|
||||
# The pillar has changed due to the connection to the master.
|
||||
# Reload the functions so that they can use the new pillar data.
|
||||
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
|
||||
if hasattr(self, 'schedule'):
|
||||
self.schedule.functions = self.functions
|
||||
self.schedule.returners = self.returners
|
||||
|
||||
if not hasattr(self, 'schedule'):
|
||||
self.schedule = salt.utils.schedule.Schedule(
|
||||
self.opts,
|
||||
self.functions,
|
||||
self.returners,
|
||||
cleanup=[master_event(type='alive')],
|
||||
proxy=self.proxy)
|
||||
|
||||
# add default scheduling jobs to the minions scheduler
|
||||
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
|
||||
|
@ -2106,6 +2106,9 @@ def mod_repo(repo, saltenv='base', **kwargs):
|
||||
key_url
|
||||
URL to a GPG key to add to the APT GPG keyring
|
||||
|
||||
key_text
|
||||
GPG key in string form to add to the APT GPG keyring
|
||||
|
||||
consolidate
|
||||
if ``True``, will attempt to de-dup and consolidate sources
|
||||
|
||||
@ -2305,6 +2308,16 @@ def mod_repo(repo, saltenv='base', **kwargs):
|
||||
'Error: failed to add key from {0}'.format(key_url)
|
||||
)
|
||||
|
||||
elif 'key_text' in kwargs:
|
||||
key_text = kwargs['key_text']
|
||||
cmd = ['apt-key', 'add', '-']
|
||||
out = __salt__['cmd.run_stdout'](cmd, stdin=key_text,
|
||||
python_shell=False, **kwargs)
|
||||
if not out.upper().startswith('OK'):
|
||||
raise CommandExecutionError(
|
||||
'Error: failed to add key:\n{0}'.format(key_text)
|
||||
)
|
||||
|
||||
if 'comps' in kwargs:
|
||||
kwargs['comps'] = kwargs['comps'].split(',')
|
||||
full_comp_list |= set(kwargs['comps'])
|
||||
|
@ -206,6 +206,22 @@ def _filedata(infile):
|
||||
return f.read()
|
||||
|
||||
|
||||
def _resolve_vpcconfig(conf, region=None, key=None, keyid=None, profile=None):
|
||||
if isinstance(conf, six.string_types):
|
||||
conf = json.loads(conf)
|
||||
if not conf:
|
||||
return None
|
||||
if not isinstance(conf, dict):
|
||||
raise SaltInvocationError('VpcConfig must be a dict.')
|
||||
sns = [__salt__['boto_vpc.get_resource_id']('subnet', s, region=region, key=key,
|
||||
keyid=keyid, profile=profile).get('id') for s in conf.pop('SubnetNames', [])]
|
||||
sgs = [__salt__['boto_secgroup.get_group_id'](s, region=region, key=key, keyid=keyid,
|
||||
profile=profile) for s in conf.pop('SecurityGroupNames', [])]
|
||||
conf.setdefault('SubnetIds', []).extend(sns)
|
||||
conf.setdefault('SecurityGroupIds', []).extend(sgs)
|
||||
return conf
|
||||
|
||||
|
||||
def create_function(FunctionName, Runtime, Role, Handler, ZipFile=None,
|
||||
S3Bucket=None, S3Key=None, S3ObjectVersion=None,
|
||||
Description="", Timeout=3, MemorySize=128, Publish=False,
|
||||
@ -260,7 +276,7 @@ def create_function(FunctionName, Runtime, Role, Handler, ZipFile=None,
|
||||
code['S3ObjectVersion'] = S3ObjectVersion
|
||||
kwargs = {}
|
||||
if VpcConfig is not None:
|
||||
kwargs['VpcConfig'] = VpcConfig
|
||||
kwargs['VpcConfig'] = _resolve_vpcconfig(VpcConfig, region=region, key=key, keyid=keyid, profile=profile)
|
||||
if Environment is not None:
|
||||
kwargs['Environment'] = Environment
|
||||
if WaitForRole:
|
||||
@ -390,14 +406,15 @@ def update_function_config(FunctionName, Role=None, Handler=None,
|
||||
'VpcConfig': VpcConfig,
|
||||
'Environment': Environment}
|
||||
|
||||
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
|
||||
for val, var in six.iteritems(options):
|
||||
if var:
|
||||
args[val] = var
|
||||
if Role:
|
||||
role_arn = _get_role_arn(Role, region, key, keyid, profile)
|
||||
args['Role'] = role_arn
|
||||
args['Role'] = _get_role_arn(Role, region, key, keyid, profile)
|
||||
if VpcConfig:
|
||||
args['VpcConfig'] = _resolve_vpcconfig(VpcConfig, region=region, key=key, keyid=keyid, profile=profile)
|
||||
try:
|
||||
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
|
||||
if WaitForRole:
|
||||
retrycount = RoleRetries
|
||||
else:
|
||||
|
@ -276,10 +276,10 @@ def create(name, allocated_storage, db_instance_class, engine,
|
||||
kwargs = {}
|
||||
boto_params = set(boto3_param_map.keys())
|
||||
keys = set(locals().keys())
|
||||
for key in keys.intersection(boto_params):
|
||||
val = locals()[key]
|
||||
for param_key in keys.intersection(boto_params):
|
||||
val = locals()[param_key]
|
||||
if val is not None:
|
||||
mapped = boto3_param_map[key]
|
||||
mapped = boto3_param_map[param_key]
|
||||
kwargs[mapped[0]] = mapped[1](val)
|
||||
|
||||
taglist = _tag_doc(tags)
|
||||
@ -551,7 +551,7 @@ def describe(name, tags=None, region=None, key=None, keyid=None,
|
||||
'CopyTagsToSnapshot', 'MonitoringInterval',
|
||||
'MonitoringRoleArn', 'PromotionTier',
|
||||
'DomainMemberships')
|
||||
return {'rds': dict([(k, rds.get('DBInstances', [{}])[0].get(k)) for k in keys])}
|
||||
return {'rds': dict([(k, rds.get(k)) for k in keys])}
|
||||
else:
|
||||
return {'rds': None}
|
||||
except ClientError as e:
|
||||
|
@ -149,7 +149,7 @@ _IP_FILEDS = [
|
||||
'next_ip'
|
||||
]
|
||||
|
||||
_DEFAULT_SERVICES = {}
|
||||
_SERVICES = {}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# helper functions -- will not be exported
|
||||
@ -208,8 +208,8 @@ def _get_services_mapping():
|
||||
services shortcut and they will need to specify the protocol / port combination
|
||||
using the source_port / destination_port & protocol fields.
|
||||
'''
|
||||
services = {}
|
||||
services.update(_DEFAULT_SERVICES)
|
||||
if _SERVICES:
|
||||
return _SERVICES
|
||||
services_txt = ''
|
||||
try:
|
||||
with salt.utils.fopen('/etc/services', 'r') as srv_f:
|
||||
@ -217,27 +217,38 @@ def _get_services_mapping():
|
||||
except IOError as ioe:
|
||||
log.error('Unable to read from /etc/services:')
|
||||
log.error(ioe)
|
||||
return services # no mapping possible, sorry
|
||||
return _SERVICES # no mapping possible, sorry
|
||||
# will return the default mapping
|
||||
service_rgx = re.compile(r'^([a-zA-Z0-9-]+)\s+(\d+)\/(tcp|udp)(.*)$')
|
||||
for line in services_txt.splitlines():
|
||||
service_rgx_s = service_rgx.search(line)
|
||||
if service_rgx_s and len(service_rgx_s.groups()) == 4:
|
||||
srv_name, port, protocol, _ = service_rgx_s.groups()
|
||||
if srv_name not in services:
|
||||
services[srv_name] = {
|
||||
if srv_name not in _SERVICES:
|
||||
_SERVICES[srv_name] = {
|
||||
'port': [],
|
||||
'protocol': []
|
||||
}
|
||||
try:
|
||||
services[srv_name]['port'].append(int(port))
|
||||
_SERVICES[srv_name]['port'].append(int(port))
|
||||
except ValueError as verr:
|
||||
log.error(verr)
|
||||
log.error('Did not read that properly:')
|
||||
log.error(line)
|
||||
log.error('Please report the above error: {port} does not seem a valid port value!'.format(port=port))
|
||||
services[srv_name]['protocol'].append(protocol)
|
||||
return services
|
||||
_SERVICES[srv_name]['protocol'].append(protocol)
|
||||
return _SERVICES
|
||||
|
||||
|
||||
def _translate_port(port):
|
||||
'''
|
||||
Look into services and return the port value using the
|
||||
service name as lookup value.
|
||||
'''
|
||||
services = _get_services_mapping()
|
||||
if port in services and services[port]['port']:
|
||||
return services[port]['port'][0]
|
||||
return port
|
||||
|
||||
|
||||
def _make_it_list(dict_, field_name, value):
|
||||
@ -269,9 +280,23 @@ def _make_it_list(dict_, field_name, value):
|
||||
portval.append((port, port))
|
||||
else:
|
||||
portval.append(port)
|
||||
return list(set(prev_value + portval))
|
||||
translated_portval = []
|
||||
# and the ports sent as string, e.g. ntp instead of 123
|
||||
# needs to be translated
|
||||
# again, using the same /etc/services
|
||||
for port_start, port_end in portval:
|
||||
if not isinstance(port_start, int):
|
||||
port_start = _translate_port(port_start)
|
||||
if not isinstance(port_end, int):
|
||||
port_end = _translate_port(port_end)
|
||||
translated_portval.append(
|
||||
(port_start, port_end)
|
||||
)
|
||||
return list(set(prev_value + translated_portval))
|
||||
return list(set(prev_value + list(value)))
|
||||
if field_name in ('source_port', 'destination_port'):
|
||||
if not isinstance(value, int):
|
||||
value = _translate_port(value)
|
||||
return list(set(prev_value + [(value, value)])) # a list of tuples
|
||||
# anything else will be enclosed in a list-type
|
||||
return list(set(prev_value + [value]))
|
||||
@ -420,7 +445,7 @@ def _get_policy_object(platform,
|
||||
]
|
||||
filter_options = filter_config.pop('options', None)
|
||||
if filter_options:
|
||||
filter_options = _make_it_list(filter_options, filter_name, filter_options)
|
||||
filter_options = _make_it_list({}, filter_name, filter_options)
|
||||
# make sure the filter options are sent as list
|
||||
target_opts.extend(filter_options)
|
||||
target = aclgen.policy.Target(target_opts)
|
||||
@ -584,7 +609,7 @@ def get_term_config(platform,
|
||||
To see what fields are supported, please consult the list of supported keywords_.
|
||||
Some platforms have few other optional_ keywords.
|
||||
|
||||
.. _keywords:https://github.com/google/capirca/wiki/Policy-format#keywords
|
||||
.. _keywords: https://github.com/google/capirca/wiki/Policy-format#keywords
|
||||
.. _optional: https://github.com/google/capirca/wiki/Policy-format#optionally-supported-keywords
|
||||
|
||||
.. note::
|
||||
@ -881,7 +906,7 @@ def get_filter_config(platform,
|
||||
# new terms won't be removed
|
||||
filters = {
|
||||
filter_name: {
|
||||
'options': _make_it_list(filter_options, filter_name, filter_options)
|
||||
'options': _make_it_list({}, filter_name, filter_options)
|
||||
}
|
||||
}
|
||||
filters[filter_name].update(terms)
|
||||
|
@ -439,7 +439,7 @@ def _run(cmd,
|
||||
env_encoded = env_encoded.encode(__salt_system_encoding__)
|
||||
env_runas = dict(list(zip(*[iter(env_encoded.split(b'\0'))]*2)))
|
||||
|
||||
env_runas = {sdecode(k): sdecode(v) for k, v in six.iteritems(env_runas)}
|
||||
env_runas = dict((sdecode(k), sdecode(v)) for k, v in six.iteritems(env_runas))
|
||||
env_runas.update(env)
|
||||
env = env_runas
|
||||
# Encode unicode kwargs to filesystem encoding to avoid a
|
||||
@ -768,10 +768,10 @@ def run(cmd,
|
||||
|
||||
:param str stdin: A string of standard input can be specified for the
|
||||
command to be run using the ``stdin`` parameter. This can be useful in cases
|
||||
where sensitive information must be read from standard input.:
|
||||
where sensitive information must be read from standard input.
|
||||
|
||||
:param str runas: User to run script as. If running on a Windows minion you
|
||||
must also pass a password
|
||||
must also pass a password.
|
||||
|
||||
:param str password: Windows only. Required when specifying ``runas``. This
|
||||
parameter will be ignored on non-Windows platforms.
|
||||
@ -781,10 +781,14 @@ def run(cmd,
|
||||
:param str shell: Shell to execute under. Defaults to the system default
|
||||
shell.
|
||||
|
||||
:param bool python_shell: If False, let python handle the positional
|
||||
arguments. Set to True to use shell features, such as pipes or redirection
|
||||
:param bool python_shell: If ``False``, let python handle the positional
|
||||
arguments. Set to ``True`` to use shell features, such as pipes or
|
||||
redirection.
|
||||
|
||||
:param bool bg: If True, run command in background and do not await or deliver it's results
|
||||
:param bool bg: If ``True``, run command in background and do not await or
|
||||
deliver it's results
|
||||
|
||||
.. versionadded:: 2016.3.0
|
||||
|
||||
:param list env: A list of environment variables to be set prior to
|
||||
execution.
|
||||
@ -817,7 +821,7 @@ def run(cmd,
|
||||
|
||||
One can still use the existing $PATH by using a bit of Jinja:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
|
||||
|
||||
@ -1025,7 +1029,7 @@ def shell(cmd,
|
||||
|
||||
One can still use the existing $PATH by using a bit of Jinja:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
|
||||
|
||||
@ -1210,7 +1214,7 @@ def run_stdout(cmd,
|
||||
|
||||
One can still use the existing $PATH by using a bit of Jinja:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
|
||||
|
||||
@ -1391,7 +1395,7 @@ def run_stderr(cmd,
|
||||
|
||||
One can still use the existing $PATH by using a bit of Jinja:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
|
||||
|
||||
@ -1573,7 +1577,7 @@ def run_all(cmd,
|
||||
|
||||
One can still use the existing $PATH by using a bit of Jinja:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
|
||||
|
||||
@ -1609,14 +1613,23 @@ def run_all(cmd,
|
||||
``env`` represents the environment variables for the command, and
|
||||
should be formatted as a dict, or a YAML string which resolves to a dict.
|
||||
|
||||
redirect_stderr : False
|
||||
If set to ``True``, then stderr will be redirected to stdout. This is
|
||||
helpful for cases where obtaining both the retcode and output is
|
||||
desired, but it is not desired to have the output separated into both
|
||||
stdout and stderr.
|
||||
:param bool redirect_stderr: If set to ``True``, then stderr will be
|
||||
redirected to stdout. This is helpful for cases where obtaining both the
|
||||
retcode and output is desired, but it is not desired to have the output
|
||||
separated into both stdout and stderr.
|
||||
|
||||
.. versionadded:: 2015.8.2
|
||||
|
||||
:param str password: Windows only. Required when specifying ``runas``. This
|
||||
parameter will be ignored on non-Windows platforms.
|
||||
|
||||
.. versionadded:: 2016.3.0
|
||||
|
||||
:param bool bg: If ``True``, run command in background and do not await or
|
||||
deliver it's results
|
||||
|
||||
.. versionadded:: 2016.3.6
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -1763,7 +1776,7 @@ def retcode(cmd,
|
||||
|
||||
One can still use the existing $PATH by using a bit of Jinja:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
|
||||
|
||||
@ -1999,7 +2012,7 @@ def script(source,
|
||||
|
||||
One can still use the existing $PATH by using a bit of Jinja:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
|
||||
|
||||
@ -2224,7 +2237,7 @@ def script_retcode(source,
|
||||
|
||||
One can still use the existing $PATH by using a bit of Jinja:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
|
||||
|
||||
@ -2388,7 +2401,7 @@ def exec_code_all(lang, code, cwd=None):
|
||||
return ret
|
||||
|
||||
|
||||
def tty(device, echo=None):
|
||||
def tty(device, echo=''):
|
||||
'''
|
||||
Echo a string to a specific tty
|
||||
|
||||
@ -2407,7 +2420,7 @@ def tty(device, echo=None):
|
||||
return {'Error': 'The specified device is not a valid TTY'}
|
||||
try:
|
||||
with salt.utils.fopen(teletype, 'wb') as tty_device:
|
||||
tty_device.write(echo)
|
||||
tty_device.write(salt.utils.to_bytes(echo))
|
||||
return {
|
||||
'Success': 'Message was successfully echoed to {0}'.format(teletype)
|
||||
}
|
||||
@ -2500,7 +2513,7 @@ def run_chroot(root,
|
||||
|
||||
One can still use the existing $PATH by using a bit of Jinja:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
|
||||
|
||||
@ -2956,7 +2969,7 @@ def powershell(cmd,
|
||||
|
||||
One can still use the existing $PATH by using a bit of Jinja:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
|
||||
|
||||
@ -3142,7 +3155,7 @@ def run_bg(cmd,
|
||||
|
||||
One can still use the existing $PATH by using a bit of Jinja:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -124,7 +124,6 @@ except ImportError:
|
||||
USE_FILTERCLASS = False
|
||||
|
||||
MIN_DOCKERCOMPOSE = (1, 5, 0)
|
||||
MAX_DOCKERCOMPOSE = (1, 9, 0)
|
||||
VERSION_RE = r'([\d.]+)'
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
@ -139,7 +138,7 @@ def __virtual__():
|
||||
match = re.match(VERSION_RE, str(compose.__version__))
|
||||
if match:
|
||||
version = tuple([int(x) for x in match.group(1).split('.')])
|
||||
if version >= MIN_DOCKERCOMPOSE and version <= MAX_DOCKERCOMPOSE:
|
||||
if version >= MIN_DOCKERCOMPOSE:
|
||||
return __virtualname__
|
||||
return (False, 'The dockercompose execution module not loaded: '
|
||||
'compose python library not available.')
|
||||
|
@ -4567,6 +4567,8 @@ def manage_file(name,
|
||||
follow_symlinks=True,
|
||||
skip_verify=False,
|
||||
keep_mode=False,
|
||||
encoding=None,
|
||||
encoding_errors='strict',
|
||||
**kwargs):
|
||||
'''
|
||||
Checks the destination against what was retrieved with get_managed and
|
||||
@ -4633,6 +4635,21 @@ def manage_file(name,
|
||||
a local file on the minion), the mode of the destination file will be
|
||||
set to the mode of the source file.
|
||||
|
||||
encoding : None
|
||||
If None, str() will be applied to contents.
|
||||
If not None, specified encoding will be used.
|
||||
See https://docs.python.org/3/library/codecs.html#standard-encodings
|
||||
for the list of available encodings.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
encoding_errors : 'strict'
|
||||
Default is ```'strict'```.
|
||||
See https://docs.python.org/2/library/codecs.html#codec-base-classes
|
||||
for the error handling schemes.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -4751,7 +4768,11 @@ def manage_file(name,
|
||||
if salt.utils.is_windows():
|
||||
contents = os.linesep.join(contents.splitlines())
|
||||
with salt.utils.fopen(tmp, 'w') as tmp_:
|
||||
tmp_.write(str(contents))
|
||||
if encoding:
|
||||
log.debug('File will be encoded with {0}'.format(encoding))
|
||||
tmp_.write(contents.encode(encoding=encoding, errors=encoding_errors))
|
||||
else:
|
||||
tmp_.write(str(contents))
|
||||
|
||||
# Compare contents of files to know if we need to replace
|
||||
with salt.utils.fopen(tmp, 'r') as src:
|
||||
@ -4955,7 +4976,12 @@ def manage_file(name,
|
||||
if salt.utils.is_windows():
|
||||
contents = os.linesep.join(contents.splitlines())
|
||||
with salt.utils.fopen(tmp, 'w') as tmp_:
|
||||
tmp_.write(str(contents))
|
||||
if encoding:
|
||||
log.debug('File will be encoded with {0}'.format(encoding))
|
||||
tmp_.write(contents.encode(encoding=encoding, errors=encoding_errors))
|
||||
else:
|
||||
tmp_.write(str(contents))
|
||||
|
||||
# Copy into place
|
||||
salt.utils.files.copyfile(tmp,
|
||||
name,
|
||||
@ -5805,7 +5831,7 @@ def pardir():
|
||||
|
||||
This can be useful when constructing Salt Formulas.
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{% set pardir = salt['file.pardir']() %}
|
||||
{% set final_path = salt['file.join']('subdir', pardir, 'confdir') %}
|
||||
@ -5827,7 +5853,7 @@ def normpath(path):
|
||||
|
||||
This can be useful at the CLI but is frequently useful when scripting.
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{%- from salt['file.normpath'](tpldir + '/../vars.jinja') import parent_vars %}
|
||||
|
||||
@ -5848,7 +5874,7 @@ def basename(path):
|
||||
|
||||
This can be useful at the CLI but is frequently useful when scripting.
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{%- set filename = salt['file.basename'](source_file) %}
|
||||
|
||||
@ -5869,7 +5895,7 @@ def dirname(path):
|
||||
|
||||
This can be useful at the CLI but is frequently useful when scripting.
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{%- from salt['file.dirname'](tpldir) + '/vars.jinja' import parent_vars %}
|
||||
|
||||
@ -5891,7 +5917,7 @@ def join(*args):
|
||||
This can be useful at the CLI but is frequently useful when scripting
|
||||
combining path variables:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{% set www_root = '/var' %}
|
||||
{% set app_dir = 'myapp' %}
|
||||
|
@ -355,6 +355,7 @@ def _which_git_config(global_, cwd, user, password):
|
||||
def add(cwd,
|
||||
filename,
|
||||
opts='',
|
||||
git_opts='',
|
||||
user=None,
|
||||
password=None,
|
||||
ignore_retcode=False):
|
||||
@ -378,6 +379,13 @@ def add(cwd,
|
||||
necessary to precede them with ``opts=`` (as in the CLI examples
|
||||
below) to avoid causing errors with Salt's own argument parsing.
|
||||
|
||||
git_opts
|
||||
Any additional options to add to git command itself (not the ``add``
|
||||
subcommand), in a single string. This is useful for passing ``-c`` to
|
||||
run git with temporary changes to the git configuration.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
user
|
||||
User under which to run the git command. By default, the command is run
|
||||
by the user under which the minion is running.
|
||||
@ -407,7 +415,8 @@ def add(cwd,
|
||||
cwd = _expand_path(cwd, user)
|
||||
if not isinstance(filename, six.string_types):
|
||||
filename = str(filename)
|
||||
command = ['git', 'add', '--verbose']
|
||||
command = ['git'] + _format_opts(git_opts)
|
||||
command.extend(['add', '--verbose'])
|
||||
command.extend(
|
||||
[x for x in _format_opts(opts) if x not in ('-v', '--verbose')]
|
||||
)
|
||||
@ -423,6 +432,7 @@ def archive(cwd,
|
||||
output,
|
||||
rev='HEAD',
|
||||
prefix=None,
|
||||
git_opts='',
|
||||
user=None,
|
||||
password=None,
|
||||
ignore_retcode=False,
|
||||
@ -491,6 +501,13 @@ def archive(cwd,
|
||||
specifying a prefix, if the prefix is intended to create a
|
||||
top-level directory.
|
||||
|
||||
git_opts
|
||||
Any additional options to add to git command itself (not the
|
||||
``archive`` subcommand), in a single string. This is useful for passing
|
||||
``-c`` to run git with temporary changes to the git configuration.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
user
|
||||
User under which to run the git command. By default, the command is run
|
||||
by the user under which the minion is running.
|
||||
@ -527,7 +544,8 @@ def archive(cwd,
|
||||
if kwargs:
|
||||
salt.utils.invalid_kwargs(kwargs)
|
||||
|
||||
command = ['git', 'archive']
|
||||
command = ['git'] + _format_opts(git_opts)
|
||||
command.append('archive')
|
||||
# If prefix was set to '' then we skip adding the --prefix option
|
||||
if prefix != '':
|
||||
if prefix:
|
||||
@ -557,6 +575,7 @@ def archive(cwd,
|
||||
def branch(cwd,
|
||||
name=None,
|
||||
opts='',
|
||||
git_opts='',
|
||||
user=None,
|
||||
password=None,
|
||||
ignore_retcode=False):
|
||||
@ -584,6 +603,13 @@ def branch(cwd,
|
||||
examples below) to avoid causing errors with Salt's own argument
|
||||
parsing.
|
||||
|
||||
git_opts
|
||||
Any additional options to add to git command itself (not the ``branch``
|
||||
subcommand), in a single string. This is useful for passing ``-c`` to
|
||||
run git with temporary changes to the git configuration.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
user
|
||||
User under which to run the git command. By default, the command is run
|
||||
by the user under which the minion is running.
|
||||
@ -617,7 +643,8 @@ def branch(cwd,
|
||||
salt myminion git.branch /path/to/repo newbranch opts='-m oldbranch'
|
||||
'''
|
||||
cwd = _expand_path(cwd, user)
|
||||
command = ['git', 'branch']
|
||||
command = ['git'] + _format_opts(git_opts)
|
||||
command.append('branch')
|
||||
command.extend(_format_opts(opts))
|
||||
if name is not None:
|
||||
command.append(name)
|
||||
@ -633,6 +660,7 @@ def checkout(cwd,
|
||||
rev=None,
|
||||
force=False,
|
||||
opts='',
|
||||
git_opts='',
|
||||
user=None,
|
||||
password=None,
|
||||
ignore_retcode=False):
|
||||
@ -650,6 +678,14 @@ def checkout(cwd,
|
||||
necessary to precede them with ``opts=`` (as in the CLI examples
|
||||
below) to avoid causing errors with Salt's own argument parsing.
|
||||
|
||||
git_opts
|
||||
Any additional options to add to git command itself (not the
|
||||
``checkout`` subcommand), in a single string. This is useful for
|
||||
passing ``-c`` to run git with temporary changes to the git
|
||||
configuration.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
rev
|
||||
The remote branch or revision to checkout.
|
||||
|
||||
@ -692,7 +728,8 @@ def checkout(cwd,
|
||||
salt myminion git.checkout /path/to/repo opts='-b newbranch'
|
||||
'''
|
||||
cwd = _expand_path(cwd, user)
|
||||
command = ['git', 'checkout']
|
||||
command = ['git'] + _format_opts(git_opts)
|
||||
command.append('checkout')
|
||||
if force:
|
||||
command.append('--force')
|
||||
opts = _format_opts(opts)
|
||||
@ -720,6 +757,7 @@ def clone(cwd,
|
||||
url=None, # Remove default value once 'repository' arg is removed
|
||||
name=None,
|
||||
opts='',
|
||||
git_opts='',
|
||||
user=None,
|
||||
password=None,
|
||||
identity=None,
|
||||
@ -752,6 +790,13 @@ def clone(cwd,
|
||||
opts
|
||||
Any additional options to add to the command line, in a single string
|
||||
|
||||
git_opts
|
||||
Any additional options to add to git command itself (not the ``clone``
|
||||
subcommand), in a single string. This is useful for passing ``-c`` to
|
||||
run git with temporary changes to the git configuration.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
user
|
||||
User under which to run the git command. By default, the command is run
|
||||
by the user under which the minion is running.
|
||||
@ -827,7 +872,8 @@ def clone(cwd,
|
||||
except ValueError as exc:
|
||||
raise SaltInvocationError(exc.__str__())
|
||||
|
||||
command = ['git', 'clone']
|
||||
command = ['git'] + _format_opts(git_opts)
|
||||
command.append('clone')
|
||||
command.extend(_format_opts(opts))
|
||||
command.extend(['--', url])
|
||||
if name is not None:
|
||||
@ -859,6 +905,7 @@ def clone(cwd,
|
||||
def commit(cwd,
|
||||
message,
|
||||
opts='',
|
||||
git_opts='',
|
||||
user=None,
|
||||
password=None,
|
||||
filename=None,
|
||||
@ -873,7 +920,8 @@ def commit(cwd,
|
||||
Commit message
|
||||
|
||||
opts
|
||||
Any additional options to add to the command line, in a single string
|
||||
Any additional options to add to the command line, in a single string.
|
||||
These opts will be added to the end of the git command being run.
|
||||
|
||||
.. note::
|
||||
On the Salt CLI, if the opts are preceded with a dash, it is
|
||||
@ -883,6 +931,13 @@ def commit(cwd,
|
||||
The ``-m`` option should not be passed here, as the commit message
|
||||
will be defined by the ``message`` argument.
|
||||
|
||||
git_opts
|
||||
Any additional options to add to git command itself (not the ``commit``
|
||||
subcommand), in a single string. This is useful for passing ``-c`` to
|
||||
run git with temporary changes to the git configuration.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
user
|
||||
User under which to run the git command. By default, the command is run
|
||||
by the user under which the minion is running.
|
||||
@ -921,7 +976,8 @@ def commit(cwd,
|
||||
salt myminion git.commit /path/to/repo 'The commit message' filename=foo/bar.py
|
||||
'''
|
||||
cwd = _expand_path(cwd, user)
|
||||
command = ['git', 'commit', '-m', message]
|
||||
command = ['git'] + _format_opts(git_opts)
|
||||
command.extend(['commit', '-m', message])
|
||||
command.extend(_format_opts(opts))
|
||||
if filename:
|
||||
if not isinstance(filename, six.string_types):
|
||||
@ -1472,6 +1528,7 @@ def diff(cwd,
|
||||
item1=None,
|
||||
item2=None,
|
||||
opts='',
|
||||
git_opts='',
|
||||
user=None,
|
||||
password=None,
|
||||
no_index=False,
|
||||
@ -1501,6 +1558,13 @@ def diff(cwd,
|
||||
necessary to precede them with ``opts=`` (as in the CLI examples
|
||||
below) to avoid causing errors with Salt's own argument parsing.
|
||||
|
||||
git_opts
|
||||
Any additional options to add to git command itself (not the ``diff``
|
||||
subcommand), in a single string. This is useful for passing ``-c`` to
|
||||
run git with temporary changes to the git configuration.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
user
|
||||
User under which to run the git command. By default, the command is run
|
||||
by the user under which the minion is running.
|
||||
@ -1561,7 +1625,8 @@ def diff(cwd,
|
||||
'The \'no_index\' and \'cached\' options cannot be used together'
|
||||
)
|
||||
|
||||
command = ['git', 'diff']
|
||||
command = ['git'] + _format_opts(git_opts)
|
||||
command.append('diff')
|
||||
command.extend(_format_opts(opts))
|
||||
|
||||
if paths is not None and not isinstance(paths, (list, tuple)):
|
||||
@ -1620,6 +1685,7 @@ def fetch(cwd,
|
||||
force=False,
|
||||
refspecs=None,
|
||||
opts='',
|
||||
git_opts='',
|
||||
user=None,
|
||||
password=None,
|
||||
identity=None,
|
||||
@ -1660,6 +1726,13 @@ def fetch(cwd,
|
||||
necessary to precede them with ``opts=`` (as in the CLI examples
|
||||
below) to avoid causing errors with Salt's own argument parsing.
|
||||
|
||||
git_opts
|
||||
Any additional options to add to git command itself (not the ``fetch``
|
||||
subcommand), in a single string. This is useful for passing ``-c`` to
|
||||
run git with temporary changes to the git configuration.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
user
|
||||
User under which to run the git command. By default, the command is run
|
||||
by the user under which the minion is running.
|
||||
@ -1715,7 +1788,8 @@ def fetch(cwd,
|
||||
salt myminion git.fetch /path/to/repo identity=/root/.ssh/id_rsa
|
||||
'''
|
||||
cwd = _expand_path(cwd, user)
|
||||
command = ['git', 'fetch']
|
||||
command = ['git'] + _format_opts(git_opts)
|
||||
command.append('fetch')
|
||||
if force:
|
||||
command.append('--force')
|
||||
command.extend(
|
||||
@ -1780,6 +1854,7 @@ def init(cwd,
|
||||
separate_git_dir=None,
|
||||
shared=None,
|
||||
opts='',
|
||||
git_opts='',
|
||||
user=None,
|
||||
password=None,
|
||||
ignore_retcode=False):
|
||||
@ -1818,6 +1893,13 @@ def init(cwd,
|
||||
necessary to precede them with ``opts=`` (as in the CLI examples
|
||||
below) to avoid causing errors with Salt's own argument parsing.
|
||||
|
||||
git_opts
|
||||
Any additional options to add to git command itself (not the ``init``
|
||||
subcommand), in a single string. This is useful for passing ``-c`` to
|
||||
run git with temporary changes to the git configuration.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
user
|
||||
User under which to run the git command. By default, the command is run
|
||||
by the user under which the minion is running.
|
||||
@ -1849,7 +1931,8 @@ def init(cwd,
|
||||
salt myminion git.init /path/to/bare/repo.git bare=True
|
||||
'''
|
||||
cwd = _expand_path(cwd, user)
|
||||
command = ['git', 'init']
|
||||
command = ['git'] + _format_opts(git_opts)
|
||||
command.append('init')
|
||||
if bare:
|
||||
command.append('--bare')
|
||||
if template is not None:
|
||||
@ -2334,6 +2417,7 @@ def ls_remote(cwd=None,
|
||||
remote='origin',
|
||||
ref=None,
|
||||
opts='',
|
||||
git_opts='',
|
||||
user=None,
|
||||
password=None,
|
||||
identity=None,
|
||||
@ -2374,6 +2458,14 @@ def ls_remote(cwd=None,
|
||||
|
||||
.. versionadded:: 2015.8.0
|
||||
|
||||
git_opts
|
||||
Any additional options to add to git command itself (not the
|
||||
``ls-remote`` subcommand), in a single string. This is useful for
|
||||
passing ``-c`` to run git with temporary changes to the git
|
||||
configuration.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
user
|
||||
User under which to run the git command. By default, the command is run
|
||||
by the user under which the minion is running.
|
||||
@ -2447,7 +2539,8 @@ def ls_remote(cwd=None,
|
||||
https_only=True)
|
||||
except ValueError as exc:
|
||||
raise SaltInvocationError(exc.__str__())
|
||||
command = ['git', 'ls-remote']
|
||||
command = ['git'] + _format_opts(git_opts)
|
||||
command.append('ls-remote')
|
||||
command.extend(_format_opts(opts))
|
||||
if not isinstance(remote, six.string_types):
|
||||
remote = str(remote)
|
||||
@ -2476,6 +2569,7 @@ def ls_remote(cwd=None,
|
||||
def merge(cwd,
|
||||
rev=None,
|
||||
opts='',
|
||||
git_opts='',
|
||||
user=None,
|
||||
password=None,
|
||||
ignore_retcode=False,
|
||||
@ -2500,6 +2594,13 @@ def merge(cwd,
|
||||
necessary to precede them with ``opts=`` (as in the CLI examples
|
||||
below) to avoid causing errors with Salt's own argument parsing.
|
||||
|
||||
git_opts
|
||||
Any additional options to add to git command itself (not the ``merge``
|
||||
subcommand), in a single string. This is useful for passing ``-c`` to
|
||||
run git with temporary changes to the git configuration.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
user
|
||||
User under which to run the git command. By default, the command is run
|
||||
by the user under which the minion is running.
|
||||
@ -2535,7 +2636,8 @@ def merge(cwd,
|
||||
salt.utils.invalid_kwargs(kwargs)
|
||||
|
||||
cwd = _expand_path(cwd, user)
|
||||
command = ['git', 'merge']
|
||||
command = ['git'] + _format_opts(git_opts)
|
||||
command.append('merge')
|
||||
command.extend(_format_opts(opts))
|
||||
if rev:
|
||||
if not isinstance(rev, six.string_types):
|
||||
@ -2555,6 +2657,7 @@ def merge_base(cwd,
|
||||
independent=False,
|
||||
fork_point=None,
|
||||
opts='',
|
||||
git_opts='',
|
||||
user=None,
|
||||
password=None,
|
||||
ignore_retcode=False,
|
||||
@ -2619,6 +2722,14 @@ def merge_base(cwd,
|
||||
This option should not be necessary unless new CLI arguments are
|
||||
added to `git-merge-base(1)`_ and are not yet supported in Salt.
|
||||
|
||||
git_opts
|
||||
Any additional options to add to git command itself (not the
|
||||
``merge-base`` subcommand), in a single string. This is useful for
|
||||
passing ``-c`` to run git with temporary changes to the git
|
||||
configuration.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
user
|
||||
User under which to run the git command. By default, the command is run
|
||||
by the user under which the minion is running.
|
||||
@ -2708,7 +2819,8 @@ def merge_base(cwd,
|
||||
password=password,
|
||||
ignore_retcode=ignore_retcode) == first_commit
|
||||
|
||||
command = ['git', 'merge-base']
|
||||
command = ['git'] + _format_opts(git_opts)
|
||||
command.append('merge-base')
|
||||
command.extend(_format_opts(opts))
|
||||
if all_:
|
||||
command.append('--all')
|
||||
@ -2814,6 +2926,7 @@ def merge_tree(cwd,
|
||||
|
||||
def pull(cwd,
|
||||
opts='',
|
||||
git_opts='',
|
||||
user=None,
|
||||
password=None,
|
||||
identity=None,
|
||||
@ -2833,6 +2946,13 @@ def pull(cwd,
|
||||
necessary to precede them with ``opts=`` (as in the CLI examples
|
||||
below) to avoid causing errors with Salt's own argument parsing.
|
||||
|
||||
git_opts
|
||||
Any additional options to add to git command itself (not the ``pull``
|
||||
subcommand), in a single string. This is useful for passing ``-c`` to
|
||||
run git with temporary changes to the git configuration.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
user
|
||||
User under which to run the git command. By default, the command is run
|
||||
by the user under which the minion is running.
|
||||
@ -2886,7 +3006,8 @@ def pull(cwd,
|
||||
salt myminion git.pull /path/to/repo opts='--rebase origin master'
|
||||
'''
|
||||
cwd = _expand_path(cwd, user)
|
||||
command = ['git', 'pull']
|
||||
command = ['git'] + _format_opts(git_opts)
|
||||
command.append('pull')
|
||||
command.extend(_format_opts(opts))
|
||||
return _git_run(command,
|
||||
cwd=cwd,
|
||||
@ -2901,6 +3022,7 @@ def push(cwd,
|
||||
remote=None,
|
||||
ref=None,
|
||||
opts='',
|
||||
git_opts='',
|
||||
user=None,
|
||||
password=None,
|
||||
identity=None,
|
||||
@ -2933,6 +3055,13 @@ def push(cwd,
|
||||
necessary to precede them with ``opts=`` (as in the CLI examples
|
||||
below) to avoid causing errors with Salt's own argument parsing.
|
||||
|
||||
git_opts
|
||||
Any additional options to add to git command itself (not the ``push``
|
||||
subcommand), in a single string. This is useful for passing ``-c`` to
|
||||
run git with temporary changes to the git configuration.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
user
|
||||
User under which to run the git command. By default, the command is run
|
||||
by the user under which the minion is running.
|
||||
@ -2996,7 +3125,8 @@ def push(cwd,
|
||||
salt.utils.invalid_kwargs(kwargs)
|
||||
|
||||
cwd = _expand_path(cwd, user)
|
||||
command = ['git', 'push']
|
||||
command = ['git'] + _format_opts(git_opts)
|
||||
command.append('push')
|
||||
command.extend(_format_opts(opts))
|
||||
if not isinstance(remote, six.string_types):
|
||||
remote = str(remote)
|
||||
@ -3015,6 +3145,7 @@ def push(cwd,
|
||||
def rebase(cwd,
|
||||
rev='master',
|
||||
opts='',
|
||||
git_opts='',
|
||||
user=None,
|
||||
password=None,
|
||||
ignore_retcode=False):
|
||||
@ -3035,6 +3166,13 @@ def rebase(cwd,
|
||||
necessary to precede them with ``opts=`` (as in the CLI examples
|
||||
below) to avoid causing errors with Salt's own argument parsing.
|
||||
|
||||
git_opts
|
||||
Any additional options to add to git command itself (not the ``rebase``
|
||||
subcommand), in a single string. This is useful for passing ``-c`` to
|
||||
run git with temporary changes to the git configuration.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
user
|
||||
User under which to run the git command. By default, the command is run
|
||||
by the user under which the minion is running.
|
||||
@ -3066,7 +3204,8 @@ def rebase(cwd,
|
||||
opts = _format_opts(opts)
|
||||
if any(x for x in opts if x in ('-i', '--interactive')):
|
||||
raise SaltInvocationError('Interactive rebases are not supported')
|
||||
command = ['git', 'rebase']
|
||||
command = ['git'] + _format_opts(git_opts)
|
||||
command.append('rebase')
|
||||
command.extend(opts)
|
||||
if not isinstance(rev, six.string_types):
|
||||
rev = str(rev)
|
||||
@ -3455,6 +3594,7 @@ def remotes(cwd,
|
||||
|
||||
def reset(cwd,
|
||||
opts='',
|
||||
git_opts='',
|
||||
user=None,
|
||||
password=None,
|
||||
ignore_retcode=False):
|
||||
@ -3472,6 +3612,13 @@ def reset(cwd,
|
||||
necessary to precede them with ``opts=`` (as in the CLI examples
|
||||
below) to avoid causing errors with Salt's own argument parsing.
|
||||
|
||||
git_opts
|
||||
Any additional options to add to git command itself (not the ``reset``
|
||||
subcommand), in a single string. This is useful for passing ``-c`` to
|
||||
run git with temporary changes to the git configuration.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
user
|
||||
User under which to run the git command. By default, the command is run
|
||||
by the user under which the minion is running.
|
||||
@ -3501,7 +3648,8 @@ def reset(cwd,
|
||||
salt myminion git.reset /path/to/repo opts='--hard origin/master'
|
||||
'''
|
||||
cwd = _expand_path(cwd, user)
|
||||
command = ['git', 'reset']
|
||||
command = ['git'] + _format_opts(git_opts)
|
||||
command.append('reset')
|
||||
command.extend(_format_opts(opts))
|
||||
return _git_run(command,
|
||||
cwd=cwd,
|
||||
@ -3513,6 +3661,7 @@ def reset(cwd,
|
||||
def rev_parse(cwd,
|
||||
rev=None,
|
||||
opts='',
|
||||
git_opts='',
|
||||
user=None,
|
||||
password=None,
|
||||
ignore_retcode=False):
|
||||
@ -3534,6 +3683,14 @@ def rev_parse(cwd,
|
||||
opts
|
||||
Any additional options to add to the command line, in a single string
|
||||
|
||||
git_opts
|
||||
Any additional options to add to git command itself (not the
|
||||
``rev-parse`` subcommand), in a single string. This is useful for
|
||||
passing ``-c`` to run git with temporary changes to the git
|
||||
configuration.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
user
|
||||
User under which to run the git command. By default, the command is run
|
||||
by the user under which the minion is running.
|
||||
@ -3569,7 +3726,8 @@ def rev_parse(cwd,
|
||||
salt myminion git.rev_parse /path/to/repo opts='--is-bare-repository'
|
||||
'''
|
||||
cwd = _expand_path(cwd, user)
|
||||
command = ['git', 'rev-parse']
|
||||
command = ['git'] + _format_opts(git_opts)
|
||||
command.append('rev-parse')
|
||||
command.extend(_format_opts(opts))
|
||||
if rev is not None:
|
||||
if not isinstance(rev, six.string_types):
|
||||
@ -3639,6 +3797,7 @@ def revision(cwd,
|
||||
def rm_(cwd,
|
||||
filename,
|
||||
opts='',
|
||||
git_opts='',
|
||||
user=None,
|
||||
password=None,
|
||||
ignore_retcode=False):
|
||||
@ -3663,6 +3822,13 @@ def rm_(cwd,
|
||||
necessary to precede them with ``opts=`` (as in the CLI examples
|
||||
below) to avoid causing errors with Salt's own argument parsing.
|
||||
|
||||
git_opts
|
||||
Any additional options to add to git command itself (not the ``rm``
|
||||
subcommand), in a single string. This is useful for passing ``-c`` to
|
||||
run git with temporary changes to the git configuration.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
user
|
||||
User under which to run the git command. By default, the command is run
|
||||
by the user under which the minion is running.
|
||||
@ -3691,7 +3857,8 @@ def rm_(cwd,
|
||||
salt myminion git.rm /path/to/repo foo/baz opts='-r'
|
||||
'''
|
||||
cwd = _expand_path(cwd, user)
|
||||
command = ['git', 'rm']
|
||||
command = ['git'] + _format_opts(git_opts)
|
||||
command.append('rm')
|
||||
command.extend(_format_opts(opts))
|
||||
command.extend(['--', filename])
|
||||
return _git_run(command,
|
||||
@ -3704,6 +3871,7 @@ def rm_(cwd,
|
||||
def stash(cwd,
|
||||
action='save',
|
||||
opts='',
|
||||
git_opts='',
|
||||
user=None,
|
||||
password=None,
|
||||
ignore_retcode=False):
|
||||
@ -3720,6 +3888,13 @@ def stash(cwd,
|
||||
``'show'``, etc.). Omitting this argument will simply run ``git
|
||||
stash``.
|
||||
|
||||
git_opts
|
||||
Any additional options to add to git command itself (not the ``stash``
|
||||
subcommand), in a single string. This is useful for passing ``-c`` to
|
||||
run git with temporary changes to the git configuration.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
user
|
||||
User under which to run the git command. By default, the command is run
|
||||
by the user under which the minion is running.
|
||||
@ -3753,7 +3928,8 @@ def stash(cwd,
|
||||
# No numeric actions but this will prevent a traceback when the git
|
||||
# command is run.
|
||||
action = str(action)
|
||||
command = ['git', 'stash', action]
|
||||
command = ['git'] + _format_opts(git_opts)
|
||||
command.extend(['stash', action])
|
||||
command.extend(_format_opts(opts))
|
||||
return _git_run(command,
|
||||
cwd=cwd,
|
||||
@ -3824,6 +4000,7 @@ def status(cwd,
|
||||
def submodule(cwd,
|
||||
command,
|
||||
opts='',
|
||||
git_opts='',
|
||||
user=None,
|
||||
password=None,
|
||||
identity=None,
|
||||
@ -3858,6 +4035,14 @@ def submodule(cwd,
|
||||
necessary to precede them with ``opts=`` (as in the CLI examples
|
||||
below) to avoid causing errors with Salt's own argument parsing.
|
||||
|
||||
git_opts
|
||||
Any additional options to add to git command itself (not the
|
||||
``submodule`` subcommand), in a single string. This is useful for
|
||||
passing ``-c`` to run git with temporary changes to the git
|
||||
configuration.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
init : False
|
||||
If ``True``, ensures that new submodules are initialized
|
||||
|
||||
@ -3943,7 +4128,8 @@ def submodule(cwd,
|
||||
)
|
||||
if not isinstance(command, six.string_types):
|
||||
command = str(command)
|
||||
cmd = ['git', 'submodule', command]
|
||||
cmd = ['git'] + _format_opts(git_opts)
|
||||
cmd.extend(['submodule', command])
|
||||
cmd.extend(_format_opts(opts))
|
||||
return _git_run(cmd,
|
||||
cwd=cwd,
|
||||
@ -3958,6 +4144,7 @@ def symbolic_ref(cwd,
|
||||
ref,
|
||||
value=None,
|
||||
opts='',
|
||||
git_opts='',
|
||||
user=None,
|
||||
password=None,
|
||||
ignore_retcode=False):
|
||||
@ -3983,6 +4170,14 @@ def symbolic_ref(cwd,
|
||||
opts
|
||||
Any additional options to add to the command line, in a single string
|
||||
|
||||
git_opts
|
||||
Any additional options to add to git command itself (not the
|
||||
``symbolic-refs`` subcommand), in a single string. This is useful for
|
||||
passing ``-c`` to run git with temporary changes to the git
|
||||
configuration.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
user
|
||||
User under which to run the git command. By default, the command is run
|
||||
by the user under which the minion is running.
|
||||
@ -4014,7 +4209,8 @@ def symbolic_ref(cwd,
|
||||
salt myminion git.symbolic_ref /path/to/repo FOO opts='--delete'
|
||||
'''
|
||||
cwd = _expand_path(cwd, user)
|
||||
command = ['git', 'symbolic-ref']
|
||||
command = ['git'] + _format_opts(git_opts)
|
||||
command.append('symbolic-ref')
|
||||
opts = _format_opts(opts)
|
||||
if value is not None and any(x in opts for x in ('-d', '--delete')):
|
||||
raise SaltInvocationError(
|
||||
@ -4089,6 +4285,7 @@ def worktree_add(cwd,
|
||||
force=None,
|
||||
detach=False,
|
||||
opts='',
|
||||
git_opts='',
|
||||
user=None,
|
||||
password=None,
|
||||
ignore_retcode=False,
|
||||
@ -4141,6 +4338,14 @@ def worktree_add(cwd,
|
||||
argument is unnecessary unless new CLI arguments are added to
|
||||
`git-worktree(1)`_ and are not yet supported in Salt.
|
||||
|
||||
git_opts
|
||||
Any additional options to add to git command itself (not the
|
||||
``worktree`` subcommand), in a single string. This is useful for
|
||||
passing ``-c`` to run git with temporary changes to the git
|
||||
configuration.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
user
|
||||
User under which to run the git command. By default, the command is run
|
||||
by the user under which the minion is running.
|
||||
@ -4179,7 +4384,8 @@ def worktree_add(cwd,
|
||||
'Only one of \'branch\' and \'detach\' is allowed'
|
||||
)
|
||||
|
||||
command = ['git', 'worktree', 'add']
|
||||
command = ['git'] + _format_opts(git_opts)
|
||||
command.extend(['worktree', 'add'])
|
||||
if detach:
|
||||
if force:
|
||||
log.warning(
|
||||
@ -4215,6 +4421,7 @@ def worktree_prune(cwd,
|
||||
verbose=True,
|
||||
expire=None,
|
||||
opts='',
|
||||
git_opts='',
|
||||
user=None,
|
||||
password=None,
|
||||
ignore_retcode=False):
|
||||
@ -4253,6 +4460,14 @@ def worktree_prune(cwd,
|
||||
argument is unnecessary unless new CLI arguments are added to
|
||||
`git-worktree(1)`_ and are not yet supported in Salt.
|
||||
|
||||
git_opts
|
||||
Any additional options to add to git command itself (not the
|
||||
``worktree`` subcommand), in a single string. This is useful for
|
||||
passing ``-c`` to run git with temporary changes to the git
|
||||
configuration.
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
user
|
||||
User under which to run the git command. By default, the command is run
|
||||
by the user under which the minion is running.
|
||||
@ -4283,7 +4498,8 @@ def worktree_prune(cwd,
|
||||
'''
|
||||
_check_worktree_support()
|
||||
cwd = _expand_path(cwd, user)
|
||||
command = ['git', 'worktree', 'prune']
|
||||
command = ['git'] + _format_opts(git_opts)
|
||||
command.append(['worktree', 'prune'])
|
||||
if dry_run:
|
||||
command.append('--dry-run')
|
||||
if verbose:
|
||||
|
@ -177,7 +177,6 @@ def persist(name, value, config=None):
|
||||
'''
|
||||
if config is None:
|
||||
config = default_config()
|
||||
running = show()
|
||||
edited = False
|
||||
# If the sysctl.conf is not present, add it
|
||||
if not os.path.isfile(config):
|
||||
@ -229,15 +228,11 @@ def persist(name, value, config=None):
|
||||
# This is the line to edit
|
||||
if str(comps[1]) == str(value):
|
||||
# It is correct in the config, check if it is correct in /proc
|
||||
if name in running:
|
||||
if str(running[name]) != str(value):
|
||||
assign(name, value)
|
||||
return 'Updated'
|
||||
else:
|
||||
return 'Already set'
|
||||
# It is missing from the running config. We can not set it.
|
||||
if str(get(name)) != str(value):
|
||||
assign(name, value)
|
||||
return 'Updated'
|
||||
else:
|
||||
raise CommandExecutionError('sysctl {0} does not exist'.format(name))
|
||||
return 'Already set'
|
||||
|
||||
nlines.append('{0} = {1}\n'.format(name, value))
|
||||
edited = True
|
||||
|
@ -12,6 +12,7 @@ from __future__ import absolute_import
|
||||
|
||||
# Import python libs
|
||||
import copy
|
||||
import functools
|
||||
import json
|
||||
import logging
|
||||
|
||||
@ -125,7 +126,8 @@ def list_pkgs(versions_as_list=False, **kwargs):
|
||||
name_and_versions = line.split(' ')
|
||||
name = name_and_versions[0]
|
||||
installed_versions = name_and_versions[1:]
|
||||
newest_version = sorted(installed_versions, cmp=salt.utils.version_cmp).pop()
|
||||
key_func = functools.cmp_to_key(salt.utils.version_cmp)
|
||||
newest_version = sorted(installed_versions, key=key_func).pop()
|
||||
except ValueError:
|
||||
continue
|
||||
__salt__['pkg_resource.add_pkg'](ret, name, newest_version)
|
||||
|
@ -309,7 +309,8 @@ def glob(tgt, minion_id=None):
|
||||
def filter_by(lookup,
|
||||
tgt_type='compound',
|
||||
minion_id=None,
|
||||
expr_form=None):
|
||||
expr_form=None,
|
||||
default='default'):
|
||||
'''
|
||||
Return the first match in a dictionary of target patterns
|
||||
|
||||
@ -323,13 +324,13 @@ def filter_by(lookup,
|
||||
|
||||
Pillar Example:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
# Filter the data for the current minion into a variable:
|
||||
{% set roles = salt['match.filter_by']({
|
||||
'web*': ['app', 'caching'],
|
||||
'db*': ['db'],
|
||||
}) %}
|
||||
}, default='web*') %}
|
||||
|
||||
# Make the filtered data available to Pillar:
|
||||
roles: {{ roles | yaml() }}
|
||||
@ -353,7 +354,7 @@ def filter_by(lookup,
|
||||
if expr_funcs[tgt_type](*params):
|
||||
return lookup[key]
|
||||
|
||||
return None
|
||||
return lookup.get(default, None)
|
||||
|
||||
|
||||
def search_by(lookup, tgt_type='compound', minion_id=None):
|
||||
|
@ -281,7 +281,7 @@ def get(tgt,
|
||||
:py:func:`saltutil.runner <salt.modules.saltutil.runner>` module. For
|
||||
example:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{% set minion_ips = salt.saltutil.runner('mine.get',
|
||||
tgt='*',
|
||||
|
@ -50,7 +50,7 @@ When the key is defined in the master config you can use it from the nacl runner
|
||||
|
||||
Now you can create a pillar with protected data like:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
pillarexample:
|
||||
user: root
|
||||
@ -58,7 +58,7 @@ Now you can create a pillar with protected data like:
|
||||
|
||||
Or do something interesting with grains like:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
salt-call nacl.enc minionname:dbrole
|
||||
AL24Z2C5OlkReer3DuQTFdrNLchLuz3NGIhGjZkLtKRYry/b/CksWM8O9yskLwH2AGVLoEXI5jAa
|
||||
@ -92,7 +92,7 @@ Pillar data should look the same, even though the secret will be quite long. How
|
||||
multiline encrypted secrets from pillar in a state, use the following format to avoid issues with /n
|
||||
creating extra whitespace at the beginning of each line in the cert file:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
secret.txt:
|
||||
file.managed:
|
||||
|
@ -219,7 +219,7 @@ def load_term_config(filter_name,
|
||||
To see what fields are supported, please consult the list of supported keywords_.
|
||||
Some platforms have few other optional_ keyworkds.
|
||||
|
||||
.. _keywords:https://github.com/google/capirca/wiki/Policy-format#keywords
|
||||
.. _keywords: https://github.com/google/capirca/wiki/Policy-format#keywords
|
||||
.. _optional: https://github.com/google/capirca/wiki/Policy-format#optionally-supported-keywords
|
||||
|
||||
.. note::
|
||||
@ -466,7 +466,8 @@ def load_filter_config(filter_name,
|
||||
revision_date_format='%Y/%m/%d',
|
||||
test=False,
|
||||
commit=True,
|
||||
debug=False):
|
||||
debug=False,
|
||||
**kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
Generate and load the configuration of a policy filter.
|
||||
|
||||
@ -656,7 +657,8 @@ def load_policy_config(filters=None,
|
||||
revision_date_format='%Y/%m/%d',
|
||||
test=False,
|
||||
commit=True,
|
||||
debug=False):
|
||||
debug=False,
|
||||
**kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
Generate and load the configuration of the whole policy.
|
||||
|
||||
|
@ -403,16 +403,28 @@ def cli(*commands, **kwargs): # pylint: disable=unused-argument
|
||||
|
||||
|
||||
@proxy_napalm_wrap
|
||||
def traceroute(destination, source=None, ttl=None, timeout=None, **kwargs): # pylint: disable=unused-argument
|
||||
def traceroute(destination, source=None, ttl=None, timeout=None, vrf=None, **kwargs): # pylint: disable=unused-argument
|
||||
|
||||
'''
|
||||
Calls the method traceroute from the NAPALM driver object and returns a dictionary with the result of the traceroute
|
||||
command executed on the device.
|
||||
|
||||
:param destination: Hostname or address of remote host
|
||||
:param source: Source address to use in outgoing traceroute packets
|
||||
:param ttl: IP maximum time-to-live value (or IPv6 maximum hop-limit value)
|
||||
:param timeout: Number of seconds to wait for response (seconds)
|
||||
destination
|
||||
Hostname or address of remote host
|
||||
|
||||
source
|
||||
Source address to use in outgoing traceroute packets
|
||||
|
||||
ttl
|
||||
IP maximum time-to-live value (or IPv6 maximum hop-limit value)
|
||||
|
||||
timeout
|
||||
Number of seconds to wait for response (seconds)
|
||||
|
||||
vrf
|
||||
VRF (routing instance) for traceroute attempt
|
||||
|
||||
.. versionadded:: 2016.11.4
|
||||
|
||||
CLI Example:
|
||||
|
||||
@ -429,23 +441,40 @@ def traceroute(destination, source=None, ttl=None, timeout=None, **kwargs): # p
|
||||
'destination': destination,
|
||||
'source': source,
|
||||
'ttl': ttl,
|
||||
'timeout': timeout
|
||||
'timeout': timeout,
|
||||
'vrf': vrf
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@proxy_napalm_wrap
|
||||
def ping(destination, source=None, ttl=None, timeout=None, size=None, count=None, **kwargs): # pylint: disable=unused-argument
|
||||
def ping(destination, source=None, ttl=None, timeout=None, size=None, count=None, vrf=None, **kwargs): # pylint: disable=unused-argument
|
||||
|
||||
'''
|
||||
Executes a ping on the network device and returns a dictionary as a result.
|
||||
|
||||
:param destination: Hostname or IP address of remote host
|
||||
:param source: Source address of echo request
|
||||
:param ttl: IP time-to-live value (IPv6 hop-limit value) (1..255 hops)
|
||||
:param timeout: Maximum wait time after sending final packet (seconds)
|
||||
:param size: Size of request packets (0..65468 bytes)
|
||||
:param count: Number of ping requests to send (1..2000000000 packets)
|
||||
destination
|
||||
Hostname or IP address of remote host
|
||||
|
||||
source
|
||||
Source address of echo request
|
||||
|
||||
ttl
|
||||
IP time-to-live value (IPv6 hop-limit value) (1..255 hops)
|
||||
|
||||
timeout
|
||||
Maximum wait time after sending final packet (seconds)
|
||||
|
||||
size
|
||||
Size of request packets (0..65468 bytes)
|
||||
|
||||
count
|
||||
Number of ping requests to send (1..2000000000 packets)
|
||||
|
||||
vrf
|
||||
VRF (routing instance) for ping attempt
|
||||
|
||||
.. versionadded:: 2016.11.4
|
||||
|
||||
CLI Example:
|
||||
|
||||
@ -465,7 +494,8 @@ def ping(destination, source=None, ttl=None, timeout=None, size=None, count=None
|
||||
'ttl': ttl,
|
||||
'timeout': timeout,
|
||||
'size': size,
|
||||
'count': count
|
||||
'count': count,
|
||||
'vrf': vrf
|
||||
}
|
||||
)
|
||||
|
||||
@ -1227,6 +1257,7 @@ def load_template(template_name,
|
||||
_loaded['comment'] = 'Error while rendering the template.'
|
||||
return _loaded
|
||||
_rendered = open(_temp_tpl_file).read()
|
||||
__salt__['file.remove'](_temp_tpl_file)
|
||||
else:
|
||||
return _loaded # exit
|
||||
|
||||
|
@ -216,7 +216,7 @@ def _ppid():
|
||||
'''
|
||||
ret = {}
|
||||
if __grains__['kernel'] == 'SunOS':
|
||||
cmd = 'ps -a -o pid,ppid | tail -n+2'
|
||||
cmd = 'ps -a -o pid,ppid | tail +2'
|
||||
else:
|
||||
cmd = 'ps -ax -o pid,ppid | tail -n+2'
|
||||
out = __salt__['cmd.run'](cmd, python_shell=True)
|
||||
@ -308,10 +308,11 @@ def _netstat_sunos():
|
||||
Return netstat information for SunOS flavors
|
||||
'''
|
||||
log.warning('User and program not (yet) supported on SunOS')
|
||||
|
||||
ret = []
|
||||
for addr_family in ('inet', 'inet6'):
|
||||
# Lookup TCP connections
|
||||
cmd = 'netstat -f {0} -P tcp -an | tail -n+5'.format(addr_family)
|
||||
cmd = 'netstat -f {0} -P tcp -an | tail +5'.format(addr_family)
|
||||
out = __salt__['cmd.run'](cmd, python_shell=True)
|
||||
for line in out.splitlines():
|
||||
comps = line.split()
|
||||
@ -323,7 +324,7 @@ def _netstat_sunos():
|
||||
'remote-address': comps[1],
|
||||
'state': comps[6]})
|
||||
# Lookup UDP connections
|
||||
cmd = 'netstat -f {0} -P udp -an | tail -n+5'.format(addr_family)
|
||||
cmd = 'netstat -f {0} -P udp -an | tail +5'.format(addr_family)
|
||||
out = __salt__['cmd.run'](cmd, python_shell=True)
|
||||
for line in out.splitlines():
|
||||
comps = line.split()
|
||||
@ -480,7 +481,7 @@ def _netstat_route_sunos():
|
||||
Return netstat routing information for SunOS
|
||||
'''
|
||||
ret = []
|
||||
cmd = 'netstat -f inet -rn | tail -n+5'
|
||||
cmd = 'netstat -f inet -rn | tail +5'
|
||||
out = __salt__['cmd.run'](cmd, python_shell=True)
|
||||
for line in out.splitlines():
|
||||
comps = line.split()
|
||||
@ -491,7 +492,7 @@ def _netstat_route_sunos():
|
||||
'netmask': '',
|
||||
'flags': comps[2],
|
||||
'interface': comps[5] if len(comps) >= 6 else ''})
|
||||
cmd = 'netstat -f inet6 -rn | tail -n+5'
|
||||
cmd = 'netstat -f inet6 -rn | tail +5'
|
||||
out = __salt__['cmd.run'](cmd, python_shell=True)
|
||||
for line in out.splitlines():
|
||||
comps = line.split()
|
||||
@ -1511,7 +1512,7 @@ def ifacestartswith(cidr):
|
||||
|
||||
def iphexval(ip):
|
||||
'''
|
||||
Retrieve the interface name from a specific CIDR
|
||||
Retrieve the hexadecimal representation of an IP address
|
||||
|
||||
.. versionadded:: 2016.11.0
|
||||
|
||||
@ -1522,7 +1523,5 @@ def iphexval(ip):
|
||||
salt '*' network.iphexval 10.0.0.1
|
||||
'''
|
||||
a = ip.split('.')
|
||||
hexval = ""
|
||||
for val in a:
|
||||
hexval = ''.join([hexval, hex(int(val))[2:].zfill(2)])
|
||||
return hexval.upper()
|
||||
hexval = ['%02X' % int(x) for x in a] # pylint: disable=E1321
|
||||
return ''.join(hexval)
|
||||
|
@ -87,6 +87,7 @@ import salt.utils
|
||||
import tempfile
|
||||
import salt.utils.locales
|
||||
import salt.utils.url
|
||||
import salt.ext.six as six
|
||||
from salt.ext.six import string_types, iteritems
|
||||
from salt.exceptions import CommandExecutionError, CommandNotFoundError
|
||||
|
||||
@ -119,17 +120,20 @@ def _get_pip_bin(bin_env):
|
||||
'''
|
||||
if not bin_env:
|
||||
which_result = __salt__['cmd.which_bin'](['pip', 'pip2', 'pip3', 'pip-python'])
|
||||
if salt.utils.is_windows() and six.PY2:
|
||||
which_result.encode('string-escape')
|
||||
if which_result is None:
|
||||
raise CommandNotFoundError('Could not find a `pip` binary')
|
||||
if salt.utils.is_windows():
|
||||
return which_result.encode('string-escape')
|
||||
return which_result
|
||||
|
||||
# try to get pip bin from virtualenv, bin_env
|
||||
if os.path.isdir(bin_env):
|
||||
if salt.utils.is_windows():
|
||||
pip_bin = os.path.join(
|
||||
bin_env, 'Scripts', 'pip.exe').encode('string-escape')
|
||||
if six.PY2:
|
||||
pip_bin = os.path.join(
|
||||
bin_env, 'Scripts', 'pip.exe').encode('string-escape')
|
||||
else:
|
||||
pip_bin = os.path.join(bin_env, 'Scripts', 'pip.exe')
|
||||
else:
|
||||
pip_bin = os.path.join(bin_env, 'bin', 'pip')
|
||||
if os.path.isfile(pip_bin):
|
||||
@ -1058,6 +1062,9 @@ def list_(prefix=None,
|
||||
elif line.startswith('-e'):
|
||||
line = line.split('-e ')[1]
|
||||
version_, name = line.split('#egg=')
|
||||
elif len(line.split('===')) >= 2:
|
||||
name = line.split('===')[0]
|
||||
version_ = line.split('===')[1]
|
||||
elif len(line.split('==')) >= 2:
|
||||
name = line.split('==')[0]
|
||||
version_ = line.split('==')[1]
|
||||
|
@ -300,7 +300,7 @@ def _merge_flags(new_flags, old_flags=None, conf='any'):
|
||||
|
||||
# Next sort is just aesthetic, can be commented for a small performance
|
||||
# boost
|
||||
tmp.sort(cmp=lambda x, y: cmp(x.lstrip('-'), y.lstrip('-')))
|
||||
tmp.sort(key=lambda x: x.lstrip('-'))
|
||||
return tmp
|
||||
|
||||
|
||||
@ -349,7 +349,7 @@ def append_to_package_conf(conf, atom='', flags=None, string='', overwrite=False
|
||||
|
||||
# Next sort is just aesthetic, can be commented for a small performance
|
||||
# boost
|
||||
new_flags.sort(cmp=lambda x, y: cmp(x.lstrip('-'), y.lstrip('-')))
|
||||
new_flags.sort(key=lambda x: x.lstrip('-'))
|
||||
|
||||
complete_file_path = _get_config_file(conf, atom)
|
||||
pdir = os.path.dirname(complete_file_path)
|
||||
|
@ -369,7 +369,7 @@ def _psql_prepare_and_run(cmd,
|
||||
|
||||
|
||||
def psql_query(query, user=None, host=None, port=None, maintenance_db=None,
|
||||
password=None, runas=None):
|
||||
password=None, runas=None, write=False):
|
||||
'''
|
||||
Run an SQL-Query and return the results as a list. This command
|
||||
only supports SELECT statements. This limitation can be worked around
|
||||
@ -399,6 +399,9 @@ def psql_query(query, user=None, host=None, port=None, maintenance_db=None,
|
||||
runas
|
||||
User to run the command as.
|
||||
|
||||
write
|
||||
Mark query as READ WRITE transaction.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -410,6 +413,11 @@ def psql_query(query, user=None, host=None, port=None, maintenance_db=None,
|
||||
csv_query = 'COPY ({0}) TO STDOUT WITH CSV HEADER'.format(
|
||||
query.strip().rstrip(';'))
|
||||
|
||||
# Mark transaction as R/W to achieve write will be allowed
|
||||
# Commit is necessary due to transaction
|
||||
if write:
|
||||
csv_query = 'START TRANSACTION READ WRITE; {0}; COMMIT TRANSACTION;'.format(csv_query)
|
||||
|
||||
# always use the same datestyle settings to allow parsing dates
|
||||
# regardless what server settings are configured
|
||||
cmdret = _psql_prepare_and_run(['-v', 'datestyle=ISO,MDY',
|
||||
@ -431,6 +439,10 @@ def psql_query(query, user=None, host=None, port=None, maintenance_db=None,
|
||||
continue
|
||||
ret.append(dict(zip(header, row)))
|
||||
|
||||
# Remove 'COMMIT' message if query is inside R/W transction
|
||||
if write:
|
||||
ret = ret[0:-1]
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
|
@ -995,7 +995,7 @@ def build_interface(iface, iface_type, enabled, **settings):
|
||||
if iface_type == 'bridge':
|
||||
__salt__['pkg.install']('bridge-utils')
|
||||
|
||||
if iface_type in ['eth', 'bond', 'bridge', 'slave', 'vlan', 'ipip', 'ib']:
|
||||
if iface_type in ['eth', 'bond', 'bridge', 'slave', 'vlan', 'ipip', 'ib', 'alias']:
|
||||
opts = _parse_settings_eth(settings, iface_type, enabled, iface)
|
||||
try:
|
||||
template = JINJA.get_template('rh{0}_eth.jinja'.format(rh_major))
|
||||
|
@ -599,7 +599,7 @@ def info(*packages, **attr):
|
||||
# pick only latest versions
|
||||
# (in case multiple packages installed, e.g. kernel)
|
||||
ret = dict()
|
||||
for pkg_data in reversed(sorted(_ret, cmp=lambda a_vrs, b_vrs: version_cmp(a_vrs['edition'], b_vrs['edition']))):
|
||||
for pkg_data in reversed(sorted(_ret, key=lambda x: x['edition'])):
|
||||
pkg_name = pkg_data.pop('name')
|
||||
# Filter out GPG public keys packages
|
||||
if pkg_name.startswith('gpg-pubkey'):
|
||||
|
@ -27,9 +27,6 @@ try:
|
||||
except ImportError:
|
||||
HAS_CRYPT = False
|
||||
|
||||
# Import 3rd-party libs
|
||||
import salt.ext.six as six
|
||||
|
||||
|
||||
def __virtual__():
|
||||
return __grains__.get('kernel', '') == 'Linux'
|
||||
@ -293,8 +290,7 @@ def set_password(name, password, use_usermod=False):
|
||||
lines = []
|
||||
with salt.utils.fopen(s_file, 'rb') as fp_:
|
||||
for line in fp_:
|
||||
if six.PY3:
|
||||
line = line.decode(__salt_system_encoding__)
|
||||
line = salt.utils.to_str(line)
|
||||
comps = line.strip().split(':')
|
||||
if comps[0] != name:
|
||||
lines.append(line)
|
||||
|
@ -496,7 +496,10 @@ def get_computer_desc():
|
||||
desc = None
|
||||
hostname_cmd = salt.utils.which('hostnamectl')
|
||||
if hostname_cmd:
|
||||
desc = __salt__['cmd.run']('{0} status --pretty'.format(hostname_cmd))
|
||||
desc = __salt__['cmd.run'](
|
||||
[hostname_cmd, 'status', '--pretty'],
|
||||
python_shell=False
|
||||
)
|
||||
else:
|
||||
pattern = re.compile(r'^\s*PRETTY_HOSTNAME=(.*)$')
|
||||
try:
|
||||
@ -510,7 +513,7 @@ def get_computer_desc():
|
||||
except IOError:
|
||||
return False
|
||||
if six.PY3:
|
||||
desc = desc.replace('\\"', '"').decode('unicode_escape')
|
||||
desc = desc.replace('\\"', '"')
|
||||
else:
|
||||
desc = desc.replace('\\"', '"').decode('string_escape')
|
||||
return desc
|
||||
@ -532,17 +535,20 @@ def set_computer_desc(desc):
|
||||
salt '*' system.set_computer_desc "Michael's laptop"
|
||||
'''
|
||||
if six.PY3:
|
||||
desc = desc.encode('unicode_escape').replace('"', '\\"')
|
||||
desc = desc.replace('"', '\\"')
|
||||
else:
|
||||
desc = desc.encode('string_escape').replace('"', '\\"')
|
||||
hostname_cmd = salt.utils.which('hostnamectl')
|
||||
if hostname_cmd:
|
||||
result = __salt__['cmd.retcode']('{0} set-hostname --pretty "{1}"'.format(hostname_cmd, desc))
|
||||
result = __salt__['cmd.retcode'](
|
||||
[hostname_cmd, 'set-hostname', '--pretty', desc],
|
||||
python_shell=False
|
||||
)
|
||||
return True if result == 0 else False
|
||||
|
||||
if not os.path.isfile('/etc/machine-info'):
|
||||
f = salt.utils.fopen('/etc/machine-info', 'a')
|
||||
f.close()
|
||||
with salt.utils.fopen('/etc/machine-info', 'w'):
|
||||
pass
|
||||
|
||||
is_pretty_hostname_found = False
|
||||
pattern = re.compile(r'^\s*PRETTY_HOSTNAME=(.*)$')
|
||||
|
@ -237,7 +237,7 @@ def _gen_xml(name,
|
||||
Generate the XML string to define a libvirt VM
|
||||
'''
|
||||
hypervisor = 'vmware' if hypervisor == 'esxi' else hypervisor
|
||||
mem = mem * 1024 # MB
|
||||
mem = int(mem) * 1024 # MB
|
||||
context = {
|
||||
'hypervisor': hypervisor,
|
||||
'name': name,
|
||||
@ -561,6 +561,7 @@ def init(name,
|
||||
priv_key=None,
|
||||
seed_cmd='seed.apply',
|
||||
enable_vnc=False,
|
||||
enable_qcow=False,
|
||||
**kwargs):
|
||||
'''
|
||||
Initialize a new vm
|
||||
@ -570,6 +571,7 @@ def init(name,
|
||||
.. code-block:: bash
|
||||
|
||||
salt 'hypervisor' virt.init vm_name 4 512 salt://path/to/image.raw
|
||||
salt 'hypervisor' virt.init vm_name 4 512 /var/lib/libvirt/images/img.raw
|
||||
salt 'hypervisor' virt.init vm_name 4 512 nic=profile disk=profile
|
||||
'''
|
||||
hypervisor = __salt__['config.get']('libvirt:hypervisor', hypervisor)
|
||||
@ -603,23 +605,39 @@ def init(name,
|
||||
)
|
||||
elif hypervisor in ['qemu', 'kvm']:
|
||||
img_dir = __salt__['config.option']('virt.images')
|
||||
log.debug('Image directory from config option `virt.images` is {0}'
|
||||
.format(img_dir))
|
||||
img_dest = os.path.join(
|
||||
img_dir,
|
||||
name,
|
||||
disk_file_name
|
||||
)
|
||||
log.debug('Image destination will be {0}'.format(img_dest))
|
||||
img_dir = os.path.dirname(img_dest)
|
||||
log.debug('Image destination directory is {0}'.format(img_dir))
|
||||
sfn = __salt__['cp.cache_file'](image, saltenv)
|
||||
log.debug('Image directory is {0}'.format(img_dir))
|
||||
|
||||
try:
|
||||
os.makedirs(img_dir)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
qcow2 = False
|
||||
if salt.utils.which('qemu-img'):
|
||||
res = __salt__['cmd.run']('qemu-img info {}'.format(sfn))
|
||||
imageinfo = yaml.load(res)
|
||||
qcow2 = imageinfo['file format'] == 'qcow2'
|
||||
|
||||
try:
|
||||
log.debug('Copying {0} to {1}'.format(sfn, img_dest))
|
||||
salt.utils.files.copyfile(sfn, img_dest)
|
||||
if enable_qcow and qcow2:
|
||||
log.info('Cloning qcow2 image {} using copy on write'
|
||||
.format(sfn))
|
||||
__salt__['cmd.run'](
|
||||
'qemu-img create -f qcow2 -o backing_file={} {}'
|
||||
.format(sfn, img_dest).split())
|
||||
else:
|
||||
log.debug('Copying {0} to {1}'.format(sfn, img_dest))
|
||||
salt.utils.files.copyfile(sfn, img_dest)
|
||||
mask = os.umask(0)
|
||||
os.umask(mask)
|
||||
# Apply umask and remove exec bit
|
||||
@ -660,14 +678,18 @@ def init(name,
|
||||
xml = _gen_xml(name, cpu, mem, diskp, nicp, hypervisor, **kwargs)
|
||||
try:
|
||||
define_xml_str(xml)
|
||||
except libvirtError:
|
||||
# This domain already exists
|
||||
pass
|
||||
except libvirtError as err:
|
||||
# check if failure is due to this domain already existing
|
||||
if "domain '{}' already exists".format(name) in str(err):
|
||||
# continue on to seeding
|
||||
log.warn(err)
|
||||
else:
|
||||
raise err # a real error we should report upwards
|
||||
|
||||
if seed and seedable:
|
||||
log.debug('Seed command is {0}'.format(seed_cmd))
|
||||
__salt__[seed_cmd](
|
||||
img_dest,
|
||||
img_dest,
|
||||
id_=name,
|
||||
config=kwargs.get('config'),
|
||||
install=install,
|
||||
|
@ -74,7 +74,7 @@ HAS_WIN_DACL = False
|
||||
try:
|
||||
if salt.utils.is_windows():
|
||||
import salt.utils.win_dacl
|
||||
HAS_WIN_DACL = salt.utils.win_dacl.HAS_WIN32
|
||||
HAS_WIN_DACL = True
|
||||
except ImportError:
|
||||
HAS_WIN_DACL = False
|
||||
|
||||
|
@ -511,7 +511,7 @@ def list_bindings(site):
|
||||
|
||||
|
||||
def create_binding(site, hostheader='', ipaddress='*', port=80, protocol='http',
|
||||
sslflags=0):
|
||||
sslflags=None):
|
||||
'''
|
||||
Create an IIS Web Binding.
|
||||
|
||||
@ -541,7 +541,6 @@ def create_binding(site, hostheader='', ipaddress='*', port=80, protocol='http',
|
||||
salt '*' win_iis.create_binding site='site0' hostheader='example.com' ipaddress='*' port='80'
|
||||
'''
|
||||
protocol = str(protocol).lower()
|
||||
sslflags = int(sslflags)
|
||||
name = _get_binding_info(hostheader, ipaddress, port)
|
||||
|
||||
if protocol not in _VALID_PROTOCOLS:
|
||||
@ -549,10 +548,12 @@ def create_binding(site, hostheader='', ipaddress='*', port=80, protocol='http',
|
||||
' {1}').format(protocol, _VALID_PROTOCOLS)
|
||||
raise SaltInvocationError(message)
|
||||
|
||||
if sslflags not in _VALID_SSL_FLAGS:
|
||||
message = ("Invalid sslflags '{0}' specified. Valid sslflags range:"
|
||||
' {1}..{2}').format(sslflags, _VALID_SSL_FLAGS[0], _VALID_SSL_FLAGS[-1])
|
||||
raise SaltInvocationError(message)
|
||||
if sslflags:
|
||||
sslflags = int(sslflags)
|
||||
if sslflags not in _VALID_SSL_FLAGS:
|
||||
message = ("Invalid sslflags '{0}' specified. Valid sslflags range:"
|
||||
' {1}..{2}').format(sslflags, _VALID_SSL_FLAGS[0], _VALID_SSL_FLAGS[-1])
|
||||
raise SaltInvocationError(message)
|
||||
|
||||
current_bindings = list_bindings(site)
|
||||
|
||||
@ -560,13 +561,21 @@ def create_binding(site, hostheader='', ipaddress='*', port=80, protocol='http',
|
||||
log.debug('Binding already present: {0}'.format(name))
|
||||
return True
|
||||
|
||||
ps_cmd = ['New-WebBinding',
|
||||
'-Name', "'{0}'".format(site),
|
||||
'-HostHeader', "'{0}'".format(hostheader),
|
||||
'-IpAddress', "'{0}'".format(ipaddress),
|
||||
'-Port', "'{0}'".format(str(port)),
|
||||
'-Protocol', "'{0}'".format(protocol),
|
||||
'-SslFlags', '{0}'.format(sslflags)]
|
||||
if sslflags:
|
||||
ps_cmd = ['New-WebBinding',
|
||||
'-Name', "'{0}'".format(site),
|
||||
'-HostHeader', "'{0}'".format(hostheader),
|
||||
'-IpAddress', "'{0}'".format(ipaddress),
|
||||
'-Port', "'{0}'".format(str(port)),
|
||||
'-Protocol', "'{0}'".format(protocol),
|
||||
'-SslFlags', '{0}'.format(sslflags)]
|
||||
else:
|
||||
ps_cmd = ['New-WebBinding',
|
||||
'-Name', "'{0}'".format(site),
|
||||
'-HostHeader', "'{0}'".format(hostheader),
|
||||
'-IpAddress', "'{0}'".format(ipaddress),
|
||||
'-Port', "'{0}'".format(str(port)),
|
||||
'-Protocol', "'{0}'".format(protocol)]
|
||||
|
||||
cmd_ret = _srvmgr(ps_cmd)
|
||||
|
||||
@ -1809,10 +1818,10 @@ def get_webapp_settings(name, site, settings):
|
||||
for setting in settings:
|
||||
if setting in availableSettings:
|
||||
if setting == "userName" or setting == "password":
|
||||
pscmd.append(r" $Property = Get-WebConfigurationProperty -Filter \"system.applicationHost/sites/site[@name='{0}']/application[@path='/{1}']/virtualDirectory[@path='/']\"".format(site, name))
|
||||
pscmd.append(r" -Name \"{0}\" -ErrorAction Stop | select Value;".format(setting))
|
||||
pscmd.append(r" $Property = $Property | Select-Object -ExpandProperty Value;")
|
||||
pscmd.append(r" $Settings['{0}'] = [String] $Property;".format(setting))
|
||||
pscmd.append(" $Property = Get-WebConfigurationProperty -Filter \"system.applicationHost/sites/site[@name='{0}']/application[@path='/{1}']/virtualDirectory[@path='/']\"".format(site, name))
|
||||
pscmd.append(" -Name \"{0}\" -ErrorAction Stop | select Value;".format(setting))
|
||||
pscmd.append(" $Property = $Property | Select-Object -ExpandProperty Value;")
|
||||
pscmd.append(" $Settings['{0}'] = [String] $Property;".format(setting))
|
||||
pscmd.append(r' $Property = $Null;')
|
||||
|
||||
if setting == "physicalPath" or setting == "applicationPool":
|
||||
|
@ -4600,7 +4600,7 @@ def _lookup_admin_template(policy_name,
|
||||
if len(adml_search_results) > 1:
|
||||
multiple_adml_entries = True
|
||||
for adml_search_result in adml_search_results:
|
||||
if not adml_search_result.attrib['text'].strip() == policy_name:
|
||||
if not getattr(adml_search_result, 'text', '').strip() == policy_name:
|
||||
adml_search_results.remove(adml_search_result)
|
||||
for adml_search_result in adml_search_results:
|
||||
dmsg = 'found an ADML entry matching the string! {0} -- {1}'
|
||||
|
@ -39,17 +39,12 @@ import logging
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
from functools import cmp_to_key
|
||||
|
||||
# Import third party libs
|
||||
import salt.ext.six as six
|
||||
# pylint: disable=import-error,no-name-in-module
|
||||
from salt.ext.six.moves.urllib.parse import urlparse as _urlparse
|
||||
# pylint: disable=import-error
|
||||
try:
|
||||
import msgpack
|
||||
except ImportError:
|
||||
import msgpack_pure as msgpack
|
||||
# pylint: enable=import-error
|
||||
|
||||
# Import salt libs
|
||||
from salt.exceptions import (CommandExecutionError,
|
||||
@ -57,8 +52,9 @@ from salt.exceptions import (CommandExecutionError,
|
||||
SaltRenderError)
|
||||
import salt.utils
|
||||
import salt.syspaths
|
||||
import salt.payload
|
||||
from salt.exceptions import MinionError
|
||||
from salt.utils.versions import LooseVersion as _LooseVersion
|
||||
from salt.utils.versions import LooseVersion
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@ -84,16 +80,22 @@ def latest_version(*names, **kwargs):
|
||||
If the latest version of a given package is already installed, an empty
|
||||
string will be returned for that package.
|
||||
|
||||
Args:
|
||||
names (str): A single or multiple names to lookup
|
||||
|
||||
Kwargs:
|
||||
saltenv (str): Salt environment. Default ``base``
|
||||
refresh (bool): Refresh package metadata. Default ``True``
|
||||
|
||||
Returns:
|
||||
dict: A dictionary of packages with the latest version available
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' pkg.latest_version <package name>
|
||||
salt '*' pkg.latest_version <package1> <package2> <package3> ...
|
||||
|
||||
*Keyword Arguments (kwargs)*
|
||||
:param str saltenv: Salt environment. Default ``base``
|
||||
:param bool refresh: Refresh package metadata. Default ``True``
|
||||
'''
|
||||
if len(names) == 0:
|
||||
return ''
|
||||
@ -106,22 +108,24 @@ def latest_version(*names, **kwargs):
|
||||
saltenv = kwargs.get('saltenv', 'base')
|
||||
# Refresh before looking for the latest version available
|
||||
refresh = salt.utils.is_true(kwargs.get('refresh', True))
|
||||
# no need to call _refresh_db_conditional as list_pkgs will do it
|
||||
|
||||
installed_pkgs = list_pkgs(versions_as_list=True, saltenv=saltenv, refresh=refresh)
|
||||
# no need to call _refresh_db_conditional as list_pkgs will do it
|
||||
installed_pkgs = list_pkgs(
|
||||
versions_as_list=True, saltenv=saltenv, refresh=refresh)
|
||||
log.trace('List of installed packages: {0}'.format(installed_pkgs))
|
||||
|
||||
# iterate over all requested package names
|
||||
for name in names:
|
||||
latest_installed = '0'
|
||||
latest_available = '0'
|
||||
|
||||
# get latest installed version of package
|
||||
if name in installed_pkgs:
|
||||
log.trace('Determining latest installed version of %s', name)
|
||||
try:
|
||||
latest_installed = sorted(
|
||||
installed_pkgs[name], cmp=_reverse_cmp_pkg_versions).pop()
|
||||
installed_pkgs[name],
|
||||
key=cmp_to_key(_reverse_cmp_pkg_versions)
|
||||
).pop()
|
||||
except IndexError:
|
||||
log.warning(
|
||||
'%s was empty in pkg.list_pkgs return data, this is '
|
||||
@ -161,13 +165,15 @@ def upgrade_available(name, **kwargs):
|
||||
'''
|
||||
Check whether or not an upgrade is available for a given package
|
||||
|
||||
:param name: The name of a single package
|
||||
:return: Return True if newer version available
|
||||
:rtype: bool
|
||||
Args:
|
||||
name (str): The name of a single package
|
||||
|
||||
*Keyword Arguments (kwargs)*
|
||||
:param str saltenv: Salt environment
|
||||
:param bool refresh: Refresh package metadata. Default ``True``
|
||||
Kwargs:
|
||||
refresh (bool): Refresh package metadata. Default ``True``
|
||||
saltenv (str): The salt environment. Default ``base``
|
||||
|
||||
Returns:
|
||||
bool: True if new version available, otherwise False
|
||||
|
||||
CLI Example:
|
||||
|
||||
@ -180,17 +186,24 @@ def upgrade_available(name, **kwargs):
|
||||
# same default as latest_version
|
||||
refresh = salt.utils.is_true(kwargs.get('refresh', True))
|
||||
|
||||
return latest_version(name, saltenv=saltenv, refresh=refresh) != ''
|
||||
current = version(name, saltenv=saltenv, refresh=refresh).get(name)
|
||||
latest = latest_version(name, saltenv=saltenv, refresh=False)
|
||||
|
||||
return compare_versions(latest, '>', current)
|
||||
|
||||
|
||||
def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613
|
||||
def list_upgrades(refresh=True, **kwargs):
|
||||
'''
|
||||
List all available package upgrades on this system
|
||||
|
||||
:param bool refresh: Refresh package metadata. Default ``True``
|
||||
Args:
|
||||
refresh (bool): Refresh package metadata. Default ``True``
|
||||
|
||||
*Keyword Arguments (kwargs)*
|
||||
:param str saltenv: Salt environment. Default ``base``
|
||||
Kwargs:
|
||||
saltenv (str): Salt environment. Default ``base``
|
||||
|
||||
Returns:
|
||||
dict: A dictionary of packages with available upgrades
|
||||
|
||||
CLI Example:
|
||||
|
||||
@ -202,34 +215,46 @@ def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613
|
||||
refresh = salt.utils.is_true(refresh)
|
||||
_refresh_db_conditional(saltenv, force=refresh)
|
||||
|
||||
ret = {}
|
||||
for name, data in six.iteritems(get_repo_data(saltenv).get('repo', {})):
|
||||
if version(name):
|
||||
latest = latest_version(name, refresh=False, saltenv=saltenv)
|
||||
if latest:
|
||||
ret[name] = latest
|
||||
return ret
|
||||
installed_pkgs = list_pkgs(refresh=False, saltenv=saltenv)
|
||||
available_pkgs = get_repo_data(saltenv).get('repo')
|
||||
pkgs = {}
|
||||
for pkg in installed_pkgs:
|
||||
if pkg in available_pkgs:
|
||||
latest_ver = latest_version(pkg, refresh=False, saltenv=saltenv)
|
||||
install_ver = installed_pkgs[pkg]
|
||||
if compare_versions(latest_ver, '>', install_ver):
|
||||
pkgs[pkg] = latest_ver
|
||||
|
||||
return pkgs
|
||||
|
||||
|
||||
def list_available(*names, **kwargs):
|
||||
'''
|
||||
Return a list of available versions of the specified package.
|
||||
|
||||
:param str name: One or more package names
|
||||
:return:
|
||||
For multiple package names listed returns dict of package names and versions
|
||||
For single package name returns a version string
|
||||
:rtype: dict or string
|
||||
Args:
|
||||
names (str): One or more package names
|
||||
|
||||
Kwargs:
|
||||
|
||||
saltenv (str): The salt environment to use. Default ``base``.
|
||||
|
||||
refresh (bool): Refresh package metadata. Default ``True``.
|
||||
|
||||
return_dict_always (bool):
|
||||
Default ``False`` dict when a single package name is queried.
|
||||
|
||||
Returns:
|
||||
dict: The package name with its available versions
|
||||
|
||||
.. code-block:: cfg
|
||||
|
||||
{'<package name>': ['<version>', '<version>', ]}
|
||||
|
||||
*Keyword Arguments (kwargs)*
|
||||
:param str saltenv: The salt environment to use. Default ``base``.
|
||||
:param bool refresh: Refresh package metadata. Default ``True``.
|
||||
:param bool return_dict_always: Default ``False`` dict when a single package name is queried.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' pkg.list_available <package name> return_dict_always=True
|
||||
salt '*' pkg.list_available <package name01> <package name02>
|
||||
'''
|
||||
@ -237,24 +262,29 @@ def list_available(*names, **kwargs):
|
||||
return ''
|
||||
|
||||
saltenv = kwargs.get('saltenv', 'base')
|
||||
refresh = salt.utils.is_true(kwargs.get('refresh', False))
|
||||
_refresh_db_conditional(saltenv, force=refresh)
|
||||
refresh = salt.utils.is_true(kwargs.get('refresh', True))
|
||||
return_dict_always = \
|
||||
salt.utils.is_true(kwargs.get('return_dict_always', False))
|
||||
|
||||
_refresh_db_conditional(saltenv, force=refresh)
|
||||
if len(names) == 1 and not return_dict_always:
|
||||
pkginfo = _get_package_info(names[0], saltenv=saltenv)
|
||||
if not pkginfo:
|
||||
return ''
|
||||
versions = list(pkginfo.keys())
|
||||
versions = sorted(versions, cmp=_reverse_cmp_pkg_versions)
|
||||
versions = sorted(
|
||||
list(pkginfo.keys()),
|
||||
key=cmp_to_key(_reverse_cmp_pkg_versions)
|
||||
)
|
||||
else:
|
||||
versions = {}
|
||||
for name in names:
|
||||
pkginfo = _get_package_info(name, saltenv=saltenv)
|
||||
if not pkginfo:
|
||||
continue
|
||||
verlist = list(pkginfo.keys()) if pkginfo else []
|
||||
verlist = sorted(verlist, cmp=_reverse_cmp_pkg_versions)
|
||||
verlist = sorted(
|
||||
list(pkginfo.keys()) if pkginfo else [],
|
||||
key=cmp_to_key(_reverse_cmp_pkg_versions)
|
||||
)
|
||||
versions[name] = verlist
|
||||
return versions
|
||||
|
||||
@ -263,52 +293,57 @@ def version(*names, **kwargs):
|
||||
'''
|
||||
Returns a version if the package is installed, else returns an empty string
|
||||
|
||||
:param str name: One or more package names
|
||||
:return:
|
||||
For multiple package names listed returns dict of package names and current version
|
||||
For single package name returns a current version string
|
||||
:rtype: dict or string
|
||||
Args:
|
||||
name (str): One or more package names
|
||||
|
||||
Kwargs:
|
||||
saltenv (str): The salt environment to use. Default ``base``.
|
||||
refresh (bool): Refresh package metadata. Default ``False``.
|
||||
|
||||
Returns:
|
||||
dict: The package name(s) with the installed versions.
|
||||
|
||||
.. code-block:: cfg
|
||||
|
||||
{'<package name>': ['<version>', '<version>', ]}
|
||||
|
||||
*Keyword Arguments (kwargs)*
|
||||
:param str saltenv: The salt environment to use. Default ``base``.
|
||||
:param bool refresh: Refresh package metadata. Default ``False``.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' pkg.version <package name>
|
||||
salt '*' pkg.version <package name01> <package name02>
|
||||
'''
|
||||
# pkg_resource calls list_pkgs refresh kwargs will be passed on
|
||||
saltenv = kwargs.get('saltenv', 'base')
|
||||
|
||||
installed_pkgs = list_pkgs(refresh=kwargs.get('refresh', False))
|
||||
available_pkgs = get_repo_data(saltenv).get('repo')
|
||||
|
||||
ret = {}
|
||||
if len(names) == 1:
|
||||
val = __salt__['pkg_resource.version'](*names, **kwargs)
|
||||
if len(val):
|
||||
return val
|
||||
return ''
|
||||
if len(names) > 1:
|
||||
reverse_dict = {}
|
||||
nums = __salt__['pkg_resource.version'](*names, **kwargs)
|
||||
if len(nums):
|
||||
for num, val in six.iteritems(nums):
|
||||
if len(val) > 0:
|
||||
try:
|
||||
ret[reverse_dict[num]] = val
|
||||
except KeyError:
|
||||
ret[num] = val
|
||||
return ret
|
||||
return dict([(x, '') for x in names])
|
||||
for name in names:
|
||||
if name in available_pkgs:
|
||||
ret[name] = installed_pkgs.get(name, '')
|
||||
else:
|
||||
ret[name] = 'not available'
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def list_pkgs(versions_as_list=False, **kwargs):
|
||||
'''
|
||||
List the packages currently installed in a dict::
|
||||
List the packages currently installed
|
||||
|
||||
*Keyword Arguments (kwargs)*
|
||||
:param str saltenv: The salt environment to use. Default ``base``.
|
||||
:param bool refresh: Refresh package metadata. Default ``False`.
|
||||
Args:
|
||||
version_as_list (bool): Returns the versions as a list
|
||||
|
||||
Kwargs:
|
||||
saltenv (str): The salt environment to use. Default ``base``.
|
||||
refresh (bool): Refresh package metadata. Default ``False`.
|
||||
|
||||
Returns:
|
||||
dict: A dictionary of installed software with versions installed
|
||||
|
||||
.. code-block:: cfg
|
||||
|
||||
{'<package_name>': '<version>'}
|
||||
|
||||
@ -353,9 +388,9 @@ def list_pkgs(versions_as_list=False, **kwargs):
|
||||
|
||||
def _search_software(target):
|
||||
'''
|
||||
This searches the msi product databases for name matches
|
||||
of the list of target products, it will return a dict with
|
||||
values added to the list passed in
|
||||
This searches the msi product databases for name matches of the list of
|
||||
target products, it will return a dict with values added to the list passed
|
||||
in
|
||||
'''
|
||||
search_results = {}
|
||||
software = dict(_get_reg_software().items())
|
||||
@ -368,9 +403,9 @@ def _search_software(target):
|
||||
|
||||
def _get_reg_software():
|
||||
'''
|
||||
This searches the uninstall keys in the registry to find
|
||||
a match in the sub keys, it will return a dict with the
|
||||
display name as the key and the version as the value
|
||||
This searches the uninstall keys in the registry to find a match in the sub
|
||||
keys, it will return a dict with the display name as the key and the
|
||||
version as the value
|
||||
'''
|
||||
ignore_list = ['AddressBook',
|
||||
'Connection Manager',
|
||||
@ -428,9 +463,22 @@ def _refresh_db_conditional(saltenv, **kwargs):
|
||||
returns True or False. And supports check the age of the existing
|
||||
generated metadata db, as well as ensure metadata db exists to begin with
|
||||
|
||||
:param str saltenv: Salt environment
|
||||
:return: True Fetched or Cache uptodate, False to indicate an issue
|
||||
:rtype: bool
|
||||
Args:
|
||||
saltenv (str): Salt environment
|
||||
|
||||
Kwargs:
|
||||
|
||||
force (bool):
|
||||
Force a refresh if the minimum age has been reached. Default is
|
||||
False.
|
||||
|
||||
failhard (bool):
|
||||
If ``True``, an error will be raised if any repo SLS files failed to
|
||||
process. If ``False``, no error will be raised, and a dictionary
|
||||
containing the full results will be returned.
|
||||
|
||||
Returns:
|
||||
bool: True Fetched or Cache uptodate, False to indicate an issue
|
||||
|
||||
:codeauthor: Damon Atkins <https://github.com/damon-atkins>
|
||||
'''
|
||||
@ -485,11 +533,14 @@ def refresh_db(**kwargs):
|
||||
<salt.modules.win_pkg.genrepo>` to compile updated repository metadata.
|
||||
|
||||
Kwargs:
|
||||
|
||||
saltenv (str): Salt environment. Default: ``base``
|
||||
|
||||
verbose (bool):
|
||||
Return verbose data structure which includes 'success_list', a list
|
||||
of all sls files and the package names contained within. Default
|
||||
'False'
|
||||
|
||||
failhard (bool):
|
||||
If ``True``, an error will be raised if any repo SLS files failed to
|
||||
process. If ``False``, no error will be raised, and a dictionary
|
||||
@ -542,7 +593,7 @@ def refresh_db(**kwargs):
|
||||
)
|
||||
|
||||
# Cache repo-ng locally
|
||||
cached_files = __salt__['cp.cache_dir'](
|
||||
__salt__['cp.cache_dir'](
|
||||
repo_details.winrepo_source_dir,
|
||||
saltenv,
|
||||
include_pat='*.sls'
|
||||
@ -578,7 +629,7 @@ def _get_repo_details(saltenv):
|
||||
)
|
||||
else:
|
||||
log.error(
|
||||
'minion cofiguration option \'winrepo_cachefile\' has been '
|
||||
'minion configuration option \'winrepo_cachefile\' has been '
|
||||
'ignored as its value (%s) is invalid. Please ensure this '
|
||||
'option is set to a valid filename.',
|
||||
__opts__['winrepo_cachefile']
|
||||
@ -635,6 +686,23 @@ def genrepo(**kwargs):
|
||||
'''
|
||||
Generate package metedata db based on files within the winrepo_source_dir
|
||||
|
||||
Kwargs:
|
||||
|
||||
saltenv (str): Salt environment. Default: ``base``
|
||||
|
||||
verbose (bool):
|
||||
Return verbose data structure which includes 'success_list', a list
|
||||
of all sls files and the package names contained within. Default
|
||||
'False'
|
||||
|
||||
failhard (bool):
|
||||
If ``True``, an error will be raised if any repo SLS files failed
|
||||
to process. If ``False``, no error will be raised, and a dictionary
|
||||
containing the full results will be returned.
|
||||
|
||||
Returns:
|
||||
dict: A dictionary of the results of the command
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -642,19 +710,6 @@ def genrepo(**kwargs):
|
||||
salt-run pkg.genrepo
|
||||
salt -G 'os:windows' pkg.genrepo verbose=true failhard=false
|
||||
salt -G 'os:windows' pkg.genrepo saltenv=base
|
||||
|
||||
*Keyword Arguments (kwargs)*
|
||||
|
||||
:param str saltenv: Salt environment. Default: ``base``
|
||||
|
||||
:param bool verbose:
|
||||
Return verbose data structure which includes 'success_list', a list of
|
||||
all sls files and the package names contained within. Default 'False'
|
||||
|
||||
:param bool failhard:
|
||||
If ``True``, an error will be raised if any repo SLS files failed to
|
||||
proess. If ``False``, no error will be raised, and a dictionary
|
||||
containing the full results will be returned.
|
||||
'''
|
||||
saltenv = kwargs.pop('saltenv', 'base')
|
||||
verbose = salt.utils.is_true(kwargs.pop('verbose', False))
|
||||
@ -680,8 +735,10 @@ def genrepo(**kwargs):
|
||||
ret,
|
||||
successful_verbose
|
||||
)
|
||||
with salt.utils.fopen(repo_details.winrepo_file, 'w+b') as repo_cache:
|
||||
repo_cache.write(msgpack.dumps(ret))
|
||||
serial = salt.payload.Serial(__opts__)
|
||||
mode = 'w+' if six.PY2 else 'wb+'
|
||||
with salt.utils.fopen(repo_details.winrepo_file, mode) as repo_cache:
|
||||
repo_cache.write(serial.dumps(ret))
|
||||
# save reading it back again. ! this breaks due to utf8 issues
|
||||
#__context__['winrepo.data'] = ret
|
||||
successful_count = len(successful_verbose)
|
||||
@ -832,56 +889,57 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
||||
r'''
|
||||
Install the passed package(s) on the system using winrepo
|
||||
|
||||
:param name:
|
||||
The name of a single package, or a comma-separated list of packages to
|
||||
install. (no spaces after the commas)
|
||||
:type name: str, list, or None
|
||||
Args:
|
||||
|
||||
:param bool refresh: Boolean value representing whether or not to refresh
|
||||
the winrepo db
|
||||
name (str):
|
||||
The name of a single package, or a comma-separated list of packages
|
||||
to install. (no spaces after the commas)
|
||||
|
||||
:param pkgs: A list of packages to install from a software repository.
|
||||
All packages listed under ``pkgs`` will be installed via a single
|
||||
command.
|
||||
refresh (bool):
|
||||
Boolean value representing whether or not to refresh the winrepo db
|
||||
|
||||
:type pkgs: list or None
|
||||
pkgs (list):
|
||||
A list of packages to install from a software repository. All
|
||||
packages listed under ``pkgs`` will be installed via a single
|
||||
command.
|
||||
|
||||
*Keyword Arguments (kwargs)*
|
||||
Kwargs:
|
||||
|
||||
:param str version:
|
||||
The specific version to install. If omitted, the latest version will be
|
||||
installed. If passed with multiple install, the version will apply to
|
||||
all packages. Recommended for single installation only.
|
||||
version (str):
|
||||
The specific version to install. If omitted, the latest version
|
||||
will be installed. If passed with multiple install, the version
|
||||
will apply to all packages. Recommended for single installation
|
||||
only.
|
||||
|
||||
:param str cache_file:
|
||||
A single file to copy down for use with the installer. Copied to the
|
||||
same location as the installer. Use this over ``cache_dir`` if there
|
||||
are many files in the directory and you only need a specific file and
|
||||
don't want to cache additional files that may reside in the installer
|
||||
directory. Only applies to files on ``salt://``
|
||||
cache_file (str):
|
||||
A single file to copy down for use with the installer. Copied to
|
||||
the same location as the installer. Use this over ``cache_dir`` if
|
||||
there are many files in the directory and you only need a specific
|
||||
file and don't want to cache additional files that may reside in
|
||||
the installer directory. Only applies to files on ``salt://``
|
||||
|
||||
:param bool cache_dir:
|
||||
True will copy the contents of the installer directory. This is useful
|
||||
for installations that are not a single file. Only applies to
|
||||
directories on ``salt://``
|
||||
cache_dir (bool):
|
||||
True will copy the contents of the installer directory. This is
|
||||
useful for installations that are not a single file. Only applies
|
||||
to directories on ``salt://``
|
||||
|
||||
:param str saltenv: Salt environment. Default 'base'
|
||||
saltenv (str): Salt environment. Default 'base'
|
||||
|
||||
:param bool report_reboot_exit_codes:
|
||||
If the installer exits with a recognized exit code indicating that
|
||||
a reboot is required, the module function
|
||||
report_reboot_exit_codes (bool):
|
||||
If the installer exits with a recognized exit code indicating that
|
||||
a reboot is required, the module function
|
||||
|
||||
*win_system.set_reboot_required_witnessed*
|
||||
*win_system.set_reboot_required_witnessed*
|
||||
|
||||
will be called, preserving the knowledge of this event
|
||||
for the remainder of the current boot session. For the time being,
|
||||
3010 is the only recognized exit code. The value of this param
|
||||
defaults to True.
|
||||
will be called, preserving the knowledge of this event for the
|
||||
remainder of the current boot session. For the time being, 3010 is
|
||||
the only recognized exit code. The value of this param defaults to
|
||||
True.
|
||||
|
||||
.. versionadded:: 2016.11.0
|
||||
.. versionadded:: 2016.11.0
|
||||
|
||||
:return: Return a dict containing the new package names and versions::
|
||||
:rtype: dict
|
||||
Returns:
|
||||
dict: Return a dict containing the new package names and versions
|
||||
|
||||
If the package is installed by ``pkg.install``:
|
||||
|
||||
@ -896,8 +954,8 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
||||
|
||||
{'<package>': {'current': '<current-version>'}}
|
||||
|
||||
The following example will refresh the winrepo and install a single package,
|
||||
7zip.
|
||||
The following example will refresh the winrepo and install a single
|
||||
package, 7zip.
|
||||
|
||||
CLI Example:
|
||||
|
||||
@ -916,8 +974,8 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
||||
WinRepo Definition File Examples:
|
||||
|
||||
The following example demonstrates the use of ``cache_file``. This would be
|
||||
used if you have multiple installers in the same directory that use the same
|
||||
``install.ini`` file and you don't want to download the additional
|
||||
used if you have multiple installers in the same directory that use the
|
||||
same ``install.ini`` file and you don't want to download the additional
|
||||
installers.
|
||||
|
||||
.. code-block:: bash
|
||||
@ -949,6 +1007,7 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
||||
'''
|
||||
ret = {}
|
||||
saltenv = kwargs.pop('saltenv', 'base')
|
||||
|
||||
refresh = salt.utils.is_true(refresh)
|
||||
# no need to call _refresh_db_conditional as list_pkgs will do it
|
||||
|
||||
@ -994,14 +1053,15 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
||||
# Get the version number passed or the latest available
|
||||
version_num = ''
|
||||
if options:
|
||||
version_num = options.get('version', False)
|
||||
if options.get('version') is not None:
|
||||
version_num = str(options.get('version'))
|
||||
|
||||
if not version_num:
|
||||
version_num = _get_latest_pkg_version(pkginfo)
|
||||
|
||||
# Check if the version is already installed
|
||||
if version_num in old.get(pkg_name, '').split(',') \
|
||||
or (pkg_name in old and old[pkg_name] == 'Not Found'):
|
||||
or (old.get(pkg_name) == 'Not Found'):
|
||||
# Desired version number already installed
|
||||
ret[pkg_name] = {'current': version_num}
|
||||
continue
|
||||
@ -1017,9 +1077,9 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
||||
latest.append(pkg_name)
|
||||
|
||||
# Get the installer settings from winrepo.p
|
||||
installer = pkginfo[version_num].get('installer', False)
|
||||
installer = pkginfo[version_num].get('installer', '')
|
||||
cache_dir = pkginfo[version_num].get('cache_dir', False)
|
||||
cache_file = pkginfo[version_num].get('cache_file', False)
|
||||
cache_file = pkginfo[version_num].get('cache_file', '')
|
||||
|
||||
# Is there an installer configured?
|
||||
if not installer:
|
||||
@ -1083,8 +1143,7 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
||||
if __salt__['cp.hash_file'](installer, saltenv) != \
|
||||
__salt__['cp.hash_file'](cached_pkg):
|
||||
try:
|
||||
cached_pkg = __salt__['cp.cache_file'](installer,
|
||||
saltenv)
|
||||
cached_pkg = __salt__['cp.cache_file'](installer, saltenv)
|
||||
except MinionError as exc:
|
||||
return '{0}: {1}'.format(exc, installer)
|
||||
|
||||
@ -1121,7 +1180,7 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
||||
log.debug('Source hash matches package hash.')
|
||||
|
||||
# Get install flags
|
||||
install_flags = '{0}'.format(pkginfo[version_num].get('install_flags'))
|
||||
install_flags = pkginfo[version_num].get('install_flags', '')
|
||||
if options and options.get('extra_install_flags'):
|
||||
install_flags = '{0} {1}'.format(
|
||||
install_flags,
|
||||
@ -1133,7 +1192,7 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
||||
if pkginfo[version_num].get('use_scheduler', False):
|
||||
|
||||
# Build Scheduled Task Parameters
|
||||
if pkginfo[version_num].get('msiexec'):
|
||||
if pkginfo[version_num].get('msiexec', False):
|
||||
cmd = 'msiexec.exe'
|
||||
arguments = ['/i', cached_pkg]
|
||||
if pkginfo['version_num'].get('allusers', True):
|
||||
@ -1164,7 +1223,7 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
||||
else:
|
||||
# Build the install command
|
||||
cmd = []
|
||||
if pkginfo[version_num].get('msiexec'):
|
||||
if pkginfo[version_num].get('msiexec', False):
|
||||
cmd.extend(['msiexec', '/i', cached_pkg])
|
||||
if pkginfo[version_num].get('allusers', True):
|
||||
cmd.append('ALLUSERS="1"')
|
||||
@ -1183,9 +1242,7 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
||||
elif result['retcode'] == 3010:
|
||||
# 3010 is ERROR_SUCCESS_REBOOT_REQUIRED
|
||||
report_reboot_exit_codes = kwargs.pop(
|
||||
'report_reboot_exit_codes',
|
||||
True
|
||||
)
|
||||
'report_reboot_exit_codes', True)
|
||||
if report_reboot_exit_codes:
|
||||
__salt__['system.set_reboot_required_witnessed']()
|
||||
ret[pkg_name] = {'install status': 'success, reboot required'}
|
||||
@ -1222,13 +1279,16 @@ def upgrade(**kwargs):
|
||||
'''
|
||||
Upgrade all software. Currently not implemented
|
||||
|
||||
*Keyword Arguments (kwargs)*
|
||||
:param str saltenv: The salt environment to use. Default ``base``.
|
||||
:param bool refresh: Refresh package metadata. Default ``True``.
|
||||
Kwargs:
|
||||
saltenv (str): The salt environment to use. Default ``base``.
|
||||
refresh (bool): Refresh package metadata. Default ``True``.
|
||||
|
||||
.. note::
|
||||
This feature is not yet implemented for Windows.
|
||||
|
||||
Returns:
|
||||
dict: Empty dict, until implemented
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -1249,32 +1309,27 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
|
||||
'''
|
||||
Remove the passed package(s) from the system using winrepo
|
||||
|
||||
:param name:
|
||||
The name of the package to be uninstalled.
|
||||
:type name: str, list, or None
|
||||
|
||||
:param str version:
|
||||
The version of the package to be uninstalled. If this option is used to
|
||||
to uninstall multiple packages, then this version will be applied to all
|
||||
targeted packages. Recommended using only when uninstalling a single
|
||||
package. If this parameter is omitted, the latest version will be
|
||||
uninstalled.
|
||||
|
||||
Multiple Package Options:
|
||||
|
||||
:param pkgs:
|
||||
A list of packages to delete. Must be passed as a python list. The
|
||||
``name`` parameter will be ignored if this option is passed.
|
||||
:type pkgs: list or None
|
||||
|
||||
.. versionadded:: 0.16.0
|
||||
|
||||
*Keyword Arguments (kwargs)*
|
||||
:param str saltenv: Salt environment. Default ``base``
|
||||
:param bool refresh: Refresh package metadata. Default ``False``
|
||||
Args:
|
||||
name (str): The name(s) of the package(s) to be uninstalled. Can be a
|
||||
single package or a comma delimted list of packages, no spaces.
|
||||
version (str):
|
||||
The version of the package to be uninstalled. If this option is
|
||||
used to to uninstall multiple packages, then this version will be
|
||||
applied to all targeted packages. Recommended using only when
|
||||
uninstalling a single package. If this parameter is omitted, the
|
||||
latest version will be uninstalled.
|
||||
pkgs (list):
|
||||
A list of packages to delete. Must be passed as a python list. The
|
||||
``name`` parameter will be ignored if this option is passed.
|
||||
|
||||
:return: Returns a dict containing the changes.
|
||||
:rtype: dict
|
||||
Kwargs:
|
||||
saltenv (str): Salt environment. Default ``base``
|
||||
refresh (bool): Refresh package metadata. Default ``False``
|
||||
|
||||
Returns:
|
||||
dict: Returns a dict containing the changes.
|
||||
|
||||
If the package is removed by ``pkg.remove``:
|
||||
|
||||
@ -1322,9 +1377,7 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
|
||||
ret[pkgname] = msg
|
||||
continue
|
||||
|
||||
if version_num is not None \
|
||||
and version_num not in pkginfo \
|
||||
and 'latest' in pkginfo:
|
||||
if version_num is None and 'latest' in pkginfo:
|
||||
version_num = 'latest'
|
||||
|
||||
# Check to see if package is installed on the system
|
||||
@ -1337,7 +1390,7 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
|
||||
if version_num is None:
|
||||
removal_targets.extend(old[pkgname])
|
||||
elif version_num not in old[pkgname] \
|
||||
and 'Not Found' not in old['pkgname'] \
|
||||
and 'Not Found' not in old[pkgname] \
|
||||
and version_num != 'latest':
|
||||
log.error('%s %s not installed', pkgname, version)
|
||||
ret[pkgname] = {
|
||||
@ -1348,12 +1401,14 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
|
||||
removal_targets.append(version_num)
|
||||
|
||||
for target in removal_targets:
|
||||
|
||||
# Get the uninstaller
|
||||
uninstaller = pkginfo[target].get('uninstaller')
|
||||
uninstaller = pkginfo[target].get('uninstaller', '')
|
||||
cache_dir = pkginfo[target].get('cache_dir', False)
|
||||
|
||||
# If no uninstaller found, use the installer
|
||||
if not uninstaller:
|
||||
uninstaller = pkginfo[target].get('installer')
|
||||
uninstaller = pkginfo[target].get('installer', '')
|
||||
|
||||
# If still no uninstaller found, fail
|
||||
if not uninstaller:
|
||||
@ -1367,19 +1422,49 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
|
||||
# Where is the uninstaller
|
||||
if uninstaller.startswith(('salt:', 'http:', 'https:', 'ftp:')):
|
||||
|
||||
# Check for the 'cache_dir' parameter in the .sls file
|
||||
# If true, the entire directory will be cached instead of the
|
||||
# individual file. This is useful for installations that are not
|
||||
# single files
|
||||
if cache_dir and uninstaller.startswith('salt:'):
|
||||
path, _ = os.path.split(uninstaller)
|
||||
__salt__['cp.cache_dir'](path,
|
||||
saltenv,
|
||||
False,
|
||||
None,
|
||||
'E@init.sls$')
|
||||
|
||||
# Check to see if the uninstaller is cached
|
||||
cached_pkg = __salt__['cp.is_cached'](uninstaller)
|
||||
cached_pkg = __salt__['cp.is_cached'](uninstaller, saltenv)
|
||||
if not cached_pkg:
|
||||
# It's not cached. Cache it, mate.
|
||||
cached_pkg = __salt__['cp.cache_file'](uninstaller)
|
||||
cached_pkg = __salt__['cp.cache_file'](uninstaller, saltenv)
|
||||
|
||||
# Check if the uninstaller was cached successfully
|
||||
if not cached_pkg:
|
||||
log.error('Unable to cache %s', uninstaller)
|
||||
ret[pkgname] = {'unable to cache': uninstaller}
|
||||
continue
|
||||
|
||||
# Compare the hash of the cached installer to the source only if
|
||||
# the file is hosted on salt:
|
||||
if uninstaller.startswith('salt:'):
|
||||
if __salt__['cp.hash_file'](uninstaller, saltenv) != \
|
||||
__salt__['cp.hash_file'](cached_pkg):
|
||||
try:
|
||||
cached_pkg = __salt__['cp.cache_file'](
|
||||
uninstaller, saltenv)
|
||||
except MinionError as exc:
|
||||
return '{0}: {1}'.format(exc, uninstaller)
|
||||
|
||||
# Check if the installer was cached successfully
|
||||
if not cached_pkg:
|
||||
log.error('Unable to cache {0}'.format(uninstaller))
|
||||
ret[pkgname] = {'unable to cache': uninstaller}
|
||||
continue
|
||||
else:
|
||||
# Run the uninstaller directly (not hosted on salt:, https:, etc.)
|
||||
# Run the uninstaller directly
|
||||
# (not hosted on salt:, https:, etc.)
|
||||
cached_pkg = uninstaller
|
||||
|
||||
# Fix non-windows slashes
|
||||
@ -1390,21 +1475,18 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
|
||||
expanded_cached_pkg = str(os.path.expandvars(cached_pkg))
|
||||
|
||||
# Get uninstall flags
|
||||
uninstall_flags = '{0}'.format(
|
||||
pkginfo[target].get('uninstall_flags', '')
|
||||
)
|
||||
uninstall_flags = pkginfo[target].get('uninstall_flags', '')
|
||||
|
||||
if kwargs.get('extra_uninstall_flags'):
|
||||
uninstall_flags = '{0} {1}'.format(
|
||||
uninstall_flags,
|
||||
kwargs.get('extra_uninstall_flags', "")
|
||||
)
|
||||
uninstall_flags, kwargs.get('extra_uninstall_flags', ''))
|
||||
|
||||
# Uninstall the software
|
||||
# Check Use Scheduler Option
|
||||
if pkginfo[target].get('use_scheduler', False):
|
||||
|
||||
# Build Scheduled Task Parameters
|
||||
if pkginfo[target].get('msiexec'):
|
||||
if pkginfo[target].get('msiexec', False):
|
||||
cmd = 'msiexec.exe'
|
||||
arguments = ['/x']
|
||||
arguments.extend(salt.utils.shlex_split(uninstall_flags))
|
||||
@ -1433,16 +1515,17 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
|
||||
else:
|
||||
# Build the install command
|
||||
cmd = []
|
||||
if pkginfo[target].get('msiexec'):
|
||||
if pkginfo[target].get('msiexec', False):
|
||||
cmd.extend(['msiexec', '/x', expanded_cached_pkg])
|
||||
else:
|
||||
cmd.append(expanded_cached_pkg)
|
||||
cmd.extend(salt.utils.shlex_split(uninstall_flags))
|
||||
# Launch the command
|
||||
result = __salt__['cmd.run_all'](cmd,
|
||||
output_loglevel='trace',
|
||||
python_shell=False,
|
||||
redirect_stderr=True)
|
||||
result = __salt__['cmd.run_all'](
|
||||
cmd,
|
||||
output_loglevel='trace',
|
||||
python_shell=False,
|
||||
redirect_stderr=True)
|
||||
if not result['retcode']:
|
||||
ret[pkgname] = {'uninstall status': 'success'}
|
||||
changed.append(pkgname)
|
||||
@ -1480,29 +1563,26 @@ def purge(name=None, pkgs=None, version=None, **kwargs):
|
||||
Package purges are not supported, this function is identical to
|
||||
``remove()``.
|
||||
|
||||
name
|
||||
The name of the package to be deleted.
|
||||
|
||||
version
|
||||
The version of the package to be deleted. If this option is used in
|
||||
combination with the ``pkgs`` option below, then this version will be
|
||||
applied to all targeted packages.
|
||||
|
||||
|
||||
Multiple Package Options:
|
||||
|
||||
pkgs
|
||||
A list of packages to delete. Must be passed as a python list. The
|
||||
``name`` parameter will be ignored if this option is passed.
|
||||
|
||||
*Keyword Arguments (kwargs)*
|
||||
:param str saltenv: Salt environment. Default ``base``
|
||||
:param bool refresh: Refresh package metadata. Default ``False``
|
||||
|
||||
.. versionadded:: 0.16.0
|
||||
|
||||
Args:
|
||||
|
||||
Returns a dict containing the changes.
|
||||
name (str): The name of the package to be deleted.
|
||||
|
||||
version (str): The version of the package to be deleted. If this option
|
||||
is used in combination with the ``pkgs`` option below, then this
|
||||
version will be applied to all targeted packages.
|
||||
|
||||
pkgs (list): A list of packages to delete. Must be passed as a python
|
||||
list. The ``name`` parameter will be ignored if this option is
|
||||
passed.
|
||||
|
||||
Kwargs:
|
||||
saltenv (str): Salt environment. Default ``base``
|
||||
refresh (bool): Refresh package metadata. Default ``False``
|
||||
|
||||
Returns:
|
||||
dict: A dict containing the changes.
|
||||
|
||||
CLI Example:
|
||||
|
||||
@ -1520,13 +1600,14 @@ def purge(name=None, pkgs=None, version=None, **kwargs):
|
||||
|
||||
def get_repo_data(saltenv='base'):
|
||||
'''
|
||||
Returns the existing package meteadata db.
|
||||
Will create it, if it does not exist, however will not refresh it.
|
||||
Returns the existing package meteadata db. Will create it, if it does not
|
||||
exist, however will not refresh it.
|
||||
|
||||
:param str saltenv: Salt environment. Default ``base``
|
||||
Args:
|
||||
saltenv (str): Salt environment. Default ``base``
|
||||
|
||||
:return: Returns a dict containing contents of metadata db.
|
||||
:rtype: dict
|
||||
Returns:
|
||||
dict: A dict containing contents of metadata db.
|
||||
|
||||
CLI Example:
|
||||
|
||||
@ -1538,6 +1619,7 @@ def get_repo_data(saltenv='base'):
|
||||
# the existing data even if its old, other parts of the code call this,
|
||||
# but they will call refresh if they need too.
|
||||
repo_details = _get_repo_details(saltenv)
|
||||
|
||||
if repo_details.winrepo_age == -1:
|
||||
# no repo meta db
|
||||
log.debug('No winrepo.p cache file. Refresh pkg db now.')
|
||||
@ -1550,9 +1632,10 @@ def get_repo_data(saltenv='base'):
|
||||
log.trace('get_repo_data called reading from disk')
|
||||
|
||||
try:
|
||||
serial = salt.payload.Serial(__opts__)
|
||||
with salt.utils.fopen(repo_details.winrepo_file, 'rb') as repofile:
|
||||
try:
|
||||
repodata = msgpack.loads(repofile.read()) or {}
|
||||
repodata = serial.loads(repofile.read()) or {}
|
||||
__context__['winrepo.data'] = repodata
|
||||
return repodata
|
||||
except Exception as exc:
|
||||
@ -1570,6 +1653,10 @@ def _get_name_map(saltenv='base'):
|
||||
'''
|
||||
u_name_map = {}
|
||||
name_map = get_repo_data(saltenv).get('name_map', {})
|
||||
|
||||
if not six.PY2:
|
||||
return name_map
|
||||
|
||||
for k in name_map:
|
||||
u_name_map[k.decode('utf-8')] = name_map[k]
|
||||
return u_name_map
|
||||
@ -1577,8 +1664,7 @@ def _get_name_map(saltenv='base'):
|
||||
|
||||
def _get_package_info(name, saltenv='base'):
|
||||
'''
|
||||
Return package info.
|
||||
Returns empty map if package not available
|
||||
Return package info. Returns empty map if package not available
|
||||
TODO: Add option for version
|
||||
'''
|
||||
return get_repo_data(saltenv).get('repo', {}).get(name, {})
|
||||
@ -1588,17 +1674,17 @@ def _reverse_cmp_pkg_versions(pkg1, pkg2):
|
||||
'''
|
||||
Compare software package versions
|
||||
'''
|
||||
if _LooseVersion(pkg1) > _LooseVersion(pkg2):
|
||||
return 1
|
||||
else:
|
||||
return -1
|
||||
return 1 if LooseVersion(pkg1) > LooseVersion(pkg2) else -1
|
||||
|
||||
|
||||
def _get_latest_pkg_version(pkginfo):
|
||||
if len(pkginfo) == 1:
|
||||
return next(six.iterkeys(pkginfo))
|
||||
try:
|
||||
return sorted(pkginfo, cmp=_reverse_cmp_pkg_versions).pop()
|
||||
return sorted(
|
||||
pkginfo,
|
||||
key=cmp_to_key(_reverse_cmp_pkg_versions)
|
||||
).pop()
|
||||
except IndexError:
|
||||
return ''
|
||||
|
||||
@ -1612,7 +1698,8 @@ def compare_versions(ver1='', oper='==', ver2=''):
|
||||
oper (str): The operand to use to compare
|
||||
ver2 (str): A software version to compare
|
||||
|
||||
Returns (bool): True if the comparison is valid, otherwise False
|
||||
Returns:
|
||||
bool: True if the comparison is valid, otherwise False
|
||||
|
||||
CLI Example:
|
||||
|
||||
|
@ -355,7 +355,7 @@ def _get_private_key_obj(private_key, passphrase=None):
|
||||
Returns a private key object based on PEM text.
|
||||
'''
|
||||
private_key = _text_or_file(private_key)
|
||||
private_key = get_pem_entry(private_key, pem_type='RSA PRIVATE KEY')
|
||||
private_key = get_pem_entry(private_key, pem_type='(?:RSA )?PRIVATE KEY')
|
||||
rsaprivkey = M2Crypto.RSA.load_key_string(
|
||||
private_key, callback=_passphrase_callback(passphrase))
|
||||
evpprivkey = M2Crypto.EVP.PKey()
|
||||
@ -765,7 +765,7 @@ def write_pem(text, path, overwrite=True, pem_type=None):
|
||||
except salt.exceptions.SaltInvocationError:
|
||||
pass
|
||||
try:
|
||||
_private_key = get_pem_entry(_filecontents, 'RSA PRIVATE KEY')
|
||||
_private_key = get_pem_entry(_filecontents, '(?:RSA )?PRIVATE KEY')
|
||||
except salt.exceptions.SaltInvocationError:
|
||||
pass
|
||||
with salt.utils.fopen(path, 'w') as _fp:
|
||||
@ -842,7 +842,7 @@ def create_private_key(path=None,
|
||||
return write_pem(
|
||||
text=bio.read_all(),
|
||||
path=path,
|
||||
pem_type='RSA PRIVATE KEY'
|
||||
pem_type='(?:RSA )?PRIVATE KEY'
|
||||
)
|
||||
else:
|
||||
return bio.read_all()
|
||||
|
@ -1021,6 +1021,7 @@ def install(name=None,
|
||||
skip_verify=False,
|
||||
pkgs=None,
|
||||
sources=None,
|
||||
downloadonly=False,
|
||||
reinstall=False,
|
||||
normalize=True,
|
||||
update_holds=False,
|
||||
@ -1079,6 +1080,9 @@ def install(name=None,
|
||||
skip_verify
|
||||
Skip the GPG verification check (e.g., ``--nogpgcheck``)
|
||||
|
||||
downloadonly
|
||||
Only download the packages, do not install.
|
||||
|
||||
version
|
||||
Install a specific version of the package, e.g. 1.2.3-4.el5. Ignored
|
||||
if "pkgs" or "sources" is passed.
|
||||
@ -1356,6 +1360,8 @@ def install(name=None,
|
||||
cmd.extend(arg)
|
||||
if skip_verify:
|
||||
cmd.append('--nogpgcheck')
|
||||
if downloadonly:
|
||||
cmd.append('--downloadonly')
|
||||
|
||||
try:
|
||||
holds = list_holds(full=False)
|
||||
|
@ -2469,7 +2469,7 @@ class Webhook(object):
|
||||
|
||||
And finally deploy the new build:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
{% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %}
|
||||
{% set build = data.get('post', {}) %}
|
||||
|
@ -3,6 +3,8 @@ r'''
|
||||
Display Pony output data structure
|
||||
==================================
|
||||
|
||||
:depends: - ponysay CLI program
|
||||
|
||||
Display output from a pony. Ponies are better than cows
|
||||
because everybody wants a pony.
|
||||
|
||||
@ -41,15 +43,20 @@ Example output:
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
# Import Salt libs
|
||||
import salt.utils
|
||||
import salt.utils.locales
|
||||
|
||||
|
||||
__virtualname__ = 'pony'
|
||||
|
||||
|
||||
def __virtual__():
|
||||
return os.path.isfile('/usr/bin/ponysay')
|
||||
if salt.utils.which('ponysay'):
|
||||
return __virtualname__
|
||||
return False
|
||||
|
||||
|
||||
def output(data, **kwargs): # pylint: disable=unused-argument
|
||||
|
@ -3,6 +3,8 @@
|
||||
Display output in a table format
|
||||
=================================
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
This outputter displays a sequence of rows as table.
|
||||
|
||||
Example output::
|
||||
|
@ -40,10 +40,13 @@ def get_pillar(opts, grains, minion_id, saltenv=None, ext=None, funcs=None,
|
||||
'''
|
||||
Return the correct pillar driver based on the file_client option
|
||||
'''
|
||||
file_client = opts['file_client']
|
||||
if opts.get('master_type') == 'disable' and file_client == 'remote':
|
||||
file_client = 'local'
|
||||
ptype = {
|
||||
'remote': RemotePillar,
|
||||
'local': Pillar
|
||||
}.get(opts['file_client'], Pillar)
|
||||
}.get(file_client, Pillar)
|
||||
# If local pillar and we're caching, run through the cache system first
|
||||
log.debug('Determining pillar cache')
|
||||
if opts['pillar_cache']:
|
||||
@ -61,10 +64,13 @@ def get_async_pillar(opts, grains, minion_id, saltenv=None, ext=None, funcs=None
|
||||
'''
|
||||
Return the correct pillar driver based on the file_client option
|
||||
'''
|
||||
file_client = opts['file_client']
|
||||
if opts.get('master_type') == 'disable' and file_client == 'remote':
|
||||
file_client = 'local'
|
||||
ptype = {
|
||||
'remote': AsyncRemotePillar,
|
||||
'local': AsyncPillar,
|
||||
}.get(opts['file_client'], AsyncPillar)
|
||||
}.get(file_client, AsyncPillar)
|
||||
return ptype(opts, grains, minion_id, saltenv, ext, functions=funcs,
|
||||
pillar=pillar, pillarenv=pillarenv)
|
||||
|
||||
|
26
salt/pillar/gpg.py
Normal file
26
salt/pillar/gpg.py
Normal file
@ -0,0 +1,26 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
'''
|
||||
Decrypt pillar data through the builtin GPG renderer
|
||||
|
||||
In most cases, you'll want to make this the last external pillar used. For
|
||||
example, to pair with the builtin stack pillar you could do something like
|
||||
this:
|
||||
|
||||
.. code:: yaml
|
||||
|
||||
ext_pillar:
|
||||
- stack: /path/to/stack.cfg
|
||||
- gpg: {}
|
||||
|
||||
Set ``gpg_keydir`` in your config to adjust the homedir the renderer uses.
|
||||
|
||||
'''
|
||||
|
||||
from __future__ import absolute_import
|
||||
import salt.utils
|
||||
|
||||
|
||||
def ext_pillar(minion_id, pillar, *args, **kwargs):
|
||||
render_function = salt.loader.render(__opts__, __salt__).get("gpg")
|
||||
return render_function(pillar)
|
@ -211,7 +211,7 @@ This was designed to be run as a build job in Jenkins or similar tool. You can p
|
||||
|
||||
**File Example: host/validation/network.yaml**
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: jinja
|
||||
|
||||
network..dns..search:
|
||||
type: list
|
||||
|
45
salt/pillar/venafi.py
Normal file
45
salt/pillar/venafi.py
Normal file
@ -0,0 +1,45 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Venafi Pillar Certificates
|
||||
|
||||
This module will only return pillar data if the ``venafi`` runner module has
|
||||
already been used to create certificates.
|
||||
|
||||
To configure this module, set ``venafi`` to ``True`` in the ``ext_pillar``
|
||||
section of your ``master`` configuration file:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
ext_pillar:
|
||||
- venafi: True
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
import salt.utils
|
||||
import salt.cache
|
||||
import salt.syspaths as syspaths
|
||||
|
||||
__virtualname__ = 'venafi'
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
No special requirements outside of Salt itself
|
||||
'''
|
||||
return __virtualname__
|
||||
|
||||
|
||||
def ext_pillar(minion_id, pillar, conf):
|
||||
'''
|
||||
Return an existing set of certificates
|
||||
'''
|
||||
cache = salt.cache.Cache(__opts__, syspaths.CACHE_DIR)
|
||||
|
||||
ret = {}
|
||||
dns_names = cache.fetch('venafi/minions', minion_id)
|
||||
for dns_name in dns_names:
|
||||
data = cache.fetch('venafi/domains', dns_name)
|
||||
ret[dns_name] = data
|
||||
del ret[dns_name]['csr']
|
||||
return {'venafi': ret}
|
@ -89,7 +89,7 @@ def init(opts):
|
||||
'baud',
|
||||
'attempts',
|
||||
'auto_probe',
|
||||
'ssh_private_key',
|
||||
'ssh_private_key_file',
|
||||
'ssh_config',
|
||||
'normalize'
|
||||
]
|
||||
|
@ -74,7 +74,9 @@ class Roster(object):
|
||||
self.backends = backends
|
||||
if not backends:
|
||||
self.backends = ['flat']
|
||||
self.rosters = salt.loader.roster(opts, runner=salt.loader.runner(self.opts))
|
||||
utils = salt.loader.utils(self.opts)
|
||||
runner = salt.loader.runner(self.opts, utils=utils)
|
||||
self.rosters = salt.loader.roster(self.opts, runner=runner)
|
||||
|
||||
def _gen_back(self):
|
||||
'''
|
||||
|
@ -20,6 +20,7 @@ usually located at /etc/salt/cloud. For example, add the following:
|
||||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
import os
|
||||
|
||||
# Import Salt libs
|
||||
import salt.loader
|
||||
@ -37,13 +38,15 @@ def targets(tgt, tgt_type='glob', **kwargs): # pylint: disable=W0613
|
||||
'''
|
||||
ret = {}
|
||||
|
||||
cloud_opts = salt.config.cloud_config('/etc/salt/cloud')
|
||||
cloud_opts = salt.config.cloud_config(
|
||||
os.path.join(os.path.dirname(__opts__['conf_file']), 'cloud')
|
||||
)
|
||||
|
||||
minions = __runner__['cache.cloud'](tgt)
|
||||
for minion_id, full_info in minions.items():
|
||||
profile, provider = full_info.get('profile', None), full_info.get('provider', None)
|
||||
vm_ = {
|
||||
'provider': provider,
|
||||
'driver': provider,
|
||||
'profile': profile,
|
||||
}
|
||||
public_ips = full_info.get('public_ips', [])
|
||||
@ -63,7 +66,7 @@ def targets(tgt, tgt_type='glob', **kwargs): # pylint: disable=W0613
|
||||
ret[minion_id] = __opts__.get('roster_defaults', {})
|
||||
ret[minion_id].update({'host': preferred_ip})
|
||||
|
||||
ssh_username = salt.utils.cloud.ssh_usernames({}, cloud_opts)
|
||||
ssh_username = salt.utils.cloud.ssh_usernames(vm_, cloud_opts)
|
||||
if isinstance(ssh_username, string_types):
|
||||
ret[minion_id]['user'] = ssh_username
|
||||
elif isinstance(ssh_username, list):
|
||||
|
@ -6,6 +6,7 @@ from __future__ import absolute_import
|
||||
# Import python libs
|
||||
import fnmatch
|
||||
import logging
|
||||
import os
|
||||
|
||||
# Import salt libs
|
||||
import salt.config
|
||||
@ -14,6 +15,7 @@ import salt.log
|
||||
import salt.utils
|
||||
import salt.utils.master
|
||||
import salt.payload
|
||||
import salt.cache
|
||||
from salt.exceptions import SaltInvocationError
|
||||
from salt.fileserver import clear_lock as _clear_lock
|
||||
from salt.fileserver.gitfs import PER_REMOTE_OVERRIDES as __GITFS_OVERRIDES
|
||||
@ -386,13 +388,36 @@ def cloud(tgt, provider=None):
|
||||
'''
|
||||
if not isinstance(tgt, six.string_types):
|
||||
return {}
|
||||
ret = {}
|
||||
opts = salt.config.cloud_config('/etc/salt/cloud')
|
||||
|
||||
opts = salt.config.cloud_config(
|
||||
os.path.join(os.path.dirname(__opts__['conf_file']), 'cloud')
|
||||
)
|
||||
if not opts.get('update_cachedir'):
|
||||
return {}
|
||||
|
||||
cloud_cache = __utils__['cloud.list_cache_nodes_full'](opts=opts, provider=provider)
|
||||
for driver, providers in cloud_cache.items():
|
||||
for provider, servers in providers.items():
|
||||
for name, data in servers.items():
|
||||
if cloud_cache is None:
|
||||
return {}
|
||||
|
||||
ret = {}
|
||||
for driver, providers in six.iteritems(cloud_cache):
|
||||
for provider, servers in six.iteritems(providers):
|
||||
for name, data in six.iteritems(servers):
|
||||
if fnmatch.fnmatch(name, tgt):
|
||||
ret[name] = data
|
||||
ret['name']['provider'] = provider
|
||||
ret[name]['provider'] = provider
|
||||
return ret
|
||||
|
||||
|
||||
def fetch(bank, key, cachedir=None):
|
||||
'''
|
||||
Fetch data from a salt.cache bank
|
||||
'''
|
||||
if cachedir is None:
|
||||
cachedir = __opts__['cachedir']
|
||||
|
||||
try:
|
||||
cache = salt.cache.Cache(__opts__, cachedir)
|
||||
except TypeError:
|
||||
cache = salt.cache.Cache(__opts__)
|
||||
return cache.fetch(bank, key)
|
||||
|
@ -34,7 +34,7 @@ FINGERPRINT_REGEX = re.compile(r'^([a-f0-9]{2}:){15}([a-f0-9]{2})$')
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _ping(tgt, tgt_type, timeout):
|
||||
def _ping(tgt, tgt_type, timeout, gather_job_timeout):
|
||||
client = salt.client.get_local_client(__opts__['conf_file'])
|
||||
pub_data = client.run_job(tgt, 'test.ping', (), tgt_type, '', timeout, '')
|
||||
|
||||
@ -47,7 +47,8 @@ def _ping(tgt, tgt_type, timeout):
|
||||
pub_data['minions'],
|
||||
client._get_timeout(timeout),
|
||||
tgt,
|
||||
tgt_type):
|
||||
tgt_type,
|
||||
gather_job_timeout=gather_job_timeout):
|
||||
|
||||
if fn_ret:
|
||||
for mid, _ in six.iteritems(fn_ret):
|
||||
@ -58,7 +59,7 @@ def _ping(tgt, tgt_type, timeout):
|
||||
return list(returned), list(not_returned)
|
||||
|
||||
|
||||
def status(output=True, tgt='*', tgt_type='glob', expr_form=None):
|
||||
def status(output=True, tgt='*', tgt_type='glob', expr_form=None, timeout=None, gather_job_timeout=None):
|
||||
'''
|
||||
.. versionchanged:: Nitrogen
|
||||
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier
|
||||
@ -72,6 +73,7 @@ def status(output=True, tgt='*', tgt_type='glob', expr_form=None):
|
||||
|
||||
salt-run manage.status
|
||||
salt-run manage.status tgt="webservers" tgt_type="nodegroup"
|
||||
salt-run manage.status timeout=5 gather_job_timeout=10
|
||||
'''
|
||||
# remember to remove the expr_form argument from this function when
|
||||
# performing the cleanup on this deprecation.
|
||||
@ -85,7 +87,13 @@ def status(output=True, tgt='*', tgt_type='glob', expr_form=None):
|
||||
tgt_type = expr_form
|
||||
|
||||
ret = {}
|
||||
ret['up'], ret['down'] = _ping(tgt, tgt_type, __opts__['timeout'])
|
||||
|
||||
if not timeout:
|
||||
timeout = __opts__['timeout']
|
||||
if not gather_job_timeout:
|
||||
gather_job_timeout = __opts__['gather_job_timeout']
|
||||
|
||||
ret['up'], ret['down'] = _ping(tgt, tgt_type, timeout, gather_job_timeout)
|
||||
return ret
|
||||
|
||||
|
||||
@ -167,7 +175,7 @@ def down(removekeys=False, tgt='*', tgt_type='glob', expr_form=None):
|
||||
return ret
|
||||
|
||||
|
||||
def up(tgt='*', tgt_type='glob', expr_form=None): # pylint: disable=C0103
|
||||
def up(tgt='*', tgt_type='glob', expr_form=None, timeout=None, gather_job_timeout=None): # pylint: disable=C0103
|
||||
'''
|
||||
.. versionchanged:: Nitrogen
|
||||
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier
|
||||
@ -181,8 +189,15 @@ def up(tgt='*', tgt_type='glob', expr_form=None): # pylint: disable=C0103
|
||||
|
||||
salt-run manage.up
|
||||
salt-run manage.up tgt="webservers" tgt_type="nodegroup"
|
||||
salt-run manage.up timeout=5 gather_job_timeout=10
|
||||
'''
|
||||
ret = status(output=False, tgt=tgt, tgt_type=tgt_type).get('up', [])
|
||||
ret = status(
|
||||
output=False,
|
||||
tgt=tgt,
|
||||
tgt_type=tgt_type,
|
||||
timeout=timeout,
|
||||
gather_job_timeout=gather_job_timeout
|
||||
).get('up', [])
|
||||
return ret
|
||||
|
||||
|
||||
|
@ -52,10 +52,8 @@ master and it will run the specified amount of commands per time period.
|
||||
schedule:
|
||||
runner queue:
|
||||
schedule:
|
||||
function: saltutil.runner
|
||||
function: queue.process_runner
|
||||
minutes: 1
|
||||
args:
|
||||
- queue.process_runner
|
||||
kwargs:
|
||||
quantity: 2
|
||||
|
||||
|
692
salt/runners/venafiapi.py
Normal file
692
salt/runners/venafiapi.py
Normal file
@ -0,0 +1,692 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Support for Venafi
|
||||
|
||||
Before using this module you need to register an account with Venafi, and
|
||||
configure it in your ``master`` configuration file.
|
||||
|
||||
First, you need to add a placeholder to the ``master`` file. This is because
|
||||
the module will not load unless it finds an ``api_key`` setting, valid or not.
|
||||
Open up ``/etc/salt/master`` and add:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
venafi:
|
||||
api_key: None
|
||||
|
||||
Then register your email address with Venagi using the following command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-run venafi.register <youremail@yourdomain.com>
|
||||
|
||||
This command will not return an ``api_key`` to you; that will be sent to you
|
||||
via email from Venafi. Once you have received that key, open up your ``master``
|
||||
file and set the ``api_key`` to it:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
venafi:
|
||||
api_key: abcdef01-2345-6789-abcd-ef0123456789
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
import os
|
||||
import logging
|
||||
import tempfile
|
||||
from Crypto.PublicKey import RSA
|
||||
import json
|
||||
import salt.syspaths as syspaths
|
||||
import salt.cache
|
||||
import salt.utils
|
||||
import salt.utils.http
|
||||
import salt.ext.six as six
|
||||
from salt.exceptions import CommandExecutionError
|
||||
|
||||
__virtualname__ = 'venafi'
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only load the module if venafi is installed
|
||||
'''
|
||||
if __opts__.get('venafi', {}).get('api_key'):
|
||||
return __virtualname__
|
||||
return False
|
||||
|
||||
|
||||
def _base_url():
|
||||
'''
|
||||
Return the base_url
|
||||
'''
|
||||
return __opts__.get('venafi', {}).get(
|
||||
'base_url', 'https://api.venafi.io/v1'
|
||||
)
|
||||
|
||||
|
||||
def _api_key():
|
||||
'''
|
||||
Return the API key
|
||||
'''
|
||||
return __opts__.get('venafi', {}).get('api_key', '')
|
||||
|
||||
|
||||
def gen_key(minion_id, dns_name=None, zone='default', password=None):
|
||||
'''
|
||||
Generate and return an private_key. If a ``dns_name`` is passed in, the
|
||||
private_key will be cached under that name. The type of key and the
|
||||
parameters used to generate the key are based on the default certificate
|
||||
use policy associated with the specified zone.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-run venafi.gen_key <minion_id> [dns_name] [zone] [password]
|
||||
'''
|
||||
# Get the default certificate use policy associated with the zone
|
||||
# so we can generate keys that conform with policy
|
||||
|
||||
# The /v1/zones/tag/{name} API call is a shortcut to get the zoneID
|
||||
# directly from the name
|
||||
|
||||
qdata = salt.utils.http.query(
|
||||
'{0}/zones/tag/{1}'.format(_base_url(), zone),
|
||||
method='GET',
|
||||
decode=True,
|
||||
decode_type='json',
|
||||
header_dict={
|
||||
'tppl-api-key': _api_key(),
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
)
|
||||
|
||||
zone_id = qdata['dict']['id']
|
||||
|
||||
# the /v1/certificatepolicies?zoneId API call returns the default
|
||||
# certificate use and certificate identity policies
|
||||
|
||||
qdata = salt.utils.http.query(
|
||||
'{0}/certificatepolicies?zoneId={1}'.format(_base_url(), zone_id),
|
||||
method='GET',
|
||||
decode=True,
|
||||
decode_type='json',
|
||||
header_dict={
|
||||
'tppl-api-key': _api_key(),
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
)
|
||||
|
||||
policies = qdata['dict']['certificatePolicies']
|
||||
|
||||
# Extract the key length and key type from the certificate use policy
|
||||
# and generate the private key accordingly
|
||||
|
||||
for policy in policies:
|
||||
if policy['certificatePolicyType'] == "CERTIFICATE_USE":
|
||||
keyTypes = policy['keyTypes']
|
||||
# in case multiple keytypes and key lengths are supported
|
||||
# always use the first key type and key length
|
||||
keygen_type = keyTypes[0]['keyType']
|
||||
key_len = keyTypes[0]['keyLengths'][0]
|
||||
|
||||
if int(key_len) < 2048:
|
||||
key_len = 2048
|
||||
|
||||
if keygen_type == "RSA":
|
||||
gen = RSA.generate(bits=key_len)
|
||||
private_key = gen.exportKey('PEM', password)
|
||||
if dns_name is not None:
|
||||
bank = 'venafi/domains'
|
||||
cache = salt.cache.Cache(__opts__, syspaths.CACHE_DIR)
|
||||
try:
|
||||
data = cache.fetch(bank, dns_name)
|
||||
data['private_key'] = private_key
|
||||
data['minion_id'] = minion_id
|
||||
except TypeError:
|
||||
data = {'private_key': private_key,
|
||||
'minion_id': minion_id}
|
||||
cache.store(bank, dns_name, data)
|
||||
return private_key
|
||||
|
||||
|
||||
def gen_csr(
|
||||
minion_id,
|
||||
dns_name,
|
||||
zone='default',
|
||||
country=None,
|
||||
state=None,
|
||||
loc=None,
|
||||
org=None,
|
||||
org_unit=None,
|
||||
password=None,
|
||||
):
|
||||
'''
|
||||
Generate a csr using the host's private_key.
|
||||
Analogous to:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
VCert gencsr -cn [CN Value] -o "Beta Organization" -ou "Beta Group" \
|
||||
-l "Palo Alto" -st "California" -c US
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-run venafi.gen_csr <minion_id> <dns_name>
|
||||
'''
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
os.chmod(tmpdir, 0o700)
|
||||
|
||||
bank = 'venafi/domains'
|
||||
cache = salt.cache.Cache(__opts__, syspaths.CACHE_DIR)
|
||||
data = cache.fetch(bank, dns_name)
|
||||
if data is None:
|
||||
data = {}
|
||||
if 'private_key' not in data:
|
||||
data['private_key'] = gen_key(minion_id, dns_name, zone, password)
|
||||
|
||||
tmppriv = '{0}/priv'.format(tmpdir)
|
||||
tmpcsr = '{0}/csr'.format(tmpdir)
|
||||
with salt.utils.fopen(tmppriv, 'w') as if_:
|
||||
if_.write(data['private_key'])
|
||||
|
||||
if country is None:
|
||||
country = __opts__.get('venafi', {}).get('country')
|
||||
|
||||
if state is None:
|
||||
state = __opts__.get('venafi', {}).get('state')
|
||||
|
||||
if loc is None:
|
||||
loc = __opts__.get('venafi', {}).get('loc')
|
||||
|
||||
if org is None:
|
||||
org = __opts__.get('venafi', {}).get('org')
|
||||
|
||||
if org_unit is None:
|
||||
org_unit = __opts__.get('venafi', {}).get('org_unit')
|
||||
|
||||
subject = '/C={0}/ST={1}/L={2}/O={3}/OU={4}/CN={5}'.format(
|
||||
country,
|
||||
state,
|
||||
loc,
|
||||
org,
|
||||
org_unit,
|
||||
dns_name,
|
||||
)
|
||||
|
||||
cmd = "openssl req -new -sha256 -key {0} -out {1} -subj '{2}'".format(
|
||||
tmppriv,
|
||||
tmpcsr,
|
||||
subject
|
||||
)
|
||||
output = __salt__['salt.cmd']('cmd.run', cmd)
|
||||
|
||||
if 'problems making Certificate Request' in output:
|
||||
raise CommandExecutionError(
|
||||
'There was a problem generating the CSR. Please ensure that you '
|
||||
'have the following variables set either on the command line, or '
|
||||
'in the venafi section of your master configuration file: '
|
||||
'country, state, loc, org, org_unit'
|
||||
)
|
||||
|
||||
with salt.utils.fopen(tmpcsr, 'r') as of_:
|
||||
csr = of_.read()
|
||||
|
||||
data['minion_id'] = minion_id
|
||||
data['csr'] = csr
|
||||
cache.store(bank, dns_name, data)
|
||||
return csr
|
||||
|
||||
|
||||
def request(
|
||||
minion_id,
|
||||
dns_name=None,
|
||||
zone='default',
|
||||
request_id=None,
|
||||
country='US',
|
||||
state='California',
|
||||
loc='Palo Alto',
|
||||
org='Beta Organization',
|
||||
org_unit='Beta Group',
|
||||
password=None,
|
||||
zone_id=None,
|
||||
):
|
||||
'''
|
||||
Request a new certificate
|
||||
|
||||
Uses the following command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
VCert enroll -z <zone> -k <api key> -cn <domain name>
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-run venafi.request <minion_id> <dns_name>
|
||||
'''
|
||||
if password is not None:
|
||||
if password.startswith('sdb://'):
|
||||
password = __salt__['sdb.get'](password)
|
||||
|
||||
if zone_id is None:
|
||||
zone_id = __opts__.get('venafi', {}).get('zone_id')
|
||||
|
||||
if zone_id is None and zone is not None:
|
||||
zone_id = get_zone_id(zone)
|
||||
|
||||
if zone_id is None:
|
||||
raise CommandExecutionError(
|
||||
'Either a zone or a zone_id must be passed in or '
|
||||
'configured in the master file. This id can be retreived using '
|
||||
'venafi.show_company <domain>'
|
||||
)
|
||||
|
||||
private_key = gen_key(minion_id, dns_name, zone, password)
|
||||
|
||||
csr = gen_csr(
|
||||
minion_id,
|
||||
dns_name,
|
||||
zone=zone,
|
||||
country=country,
|
||||
state=state,
|
||||
loc=loc,
|
||||
org=org,
|
||||
org_unit=org_unit,
|
||||
)
|
||||
|
||||
pdata = json.dumps({
|
||||
'zoneId': zone_id,
|
||||
'certificateSigningRequest': csr,
|
||||
})
|
||||
|
||||
qdata = salt.utils.http.query(
|
||||
'{0}/certificaterequests'.format(_base_url()),
|
||||
method='POST',
|
||||
data=pdata,
|
||||
decode=True,
|
||||
decode_type='json',
|
||||
header_dict={
|
||||
'tppl-api-key': _api_key(),
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
)
|
||||
|
||||
request_id = qdata['dict']['certificateRequests'][0]['id']
|
||||
ret = {
|
||||
'request_id': request_id,
|
||||
'private_key': private_key,
|
||||
'csr': csr,
|
||||
'zone': zone,
|
||||
}
|
||||
|
||||
bank = 'venafi/domains'
|
||||
cache = salt.cache.Cache(__opts__, syspaths.CACHE_DIR)
|
||||
data = cache.fetch(bank, dns_name)
|
||||
if data is None:
|
||||
data = {}
|
||||
data.update({
|
||||
'minion_id': minion_id,
|
||||
'request_id': request_id,
|
||||
'private_key': private_key,
|
||||
'zone': zone,
|
||||
'csr': csr,
|
||||
})
|
||||
cache.store(bank, dns_name, data)
|
||||
_id_map(minion_id, dns_name)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
# Request and renew are the same, so far as this module is concerned
|
||||
renew = request
|
||||
|
||||
|
||||
def _id_map(minion_id, dns_name):
|
||||
'''
|
||||
Maintain a relationship between a minion and a dns name
|
||||
'''
|
||||
bank = 'venafi/minions'
|
||||
cache = salt.cache.Cache(__opts__, syspaths.CACHE_DIR)
|
||||
dns_names = cache.fetch(bank, minion_id)
|
||||
if dns_names is None:
|
||||
dns_names = []
|
||||
if dns_name not in dns_names:
|
||||
dns_names.append(dns_name)
|
||||
cache.store(bank, minion_id, dns_names)
|
||||
|
||||
|
||||
def register(email):
|
||||
'''
|
||||
Register a new user account
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-run venafi.register email@example.com
|
||||
'''
|
||||
data = salt.utils.http.query(
|
||||
'{0}/useraccounts'.format(_base_url()),
|
||||
method='POST',
|
||||
data=json.dumps({
|
||||
'username': email,
|
||||
'userAccountType': 'API',
|
||||
}),
|
||||
status=True,
|
||||
decode=True,
|
||||
decode_type='json',
|
||||
header_dict={
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
)
|
||||
status = data['status']
|
||||
if str(status).startswith('4') or str(status).startswith('5'):
|
||||
raise CommandExecutionError(
|
||||
'There was an API error: {0}'.format(data['error'])
|
||||
)
|
||||
return data.get('dict', {})
|
||||
|
||||
|
||||
def show_company(domain):
|
||||
'''
|
||||
Show company information, especially the company id
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-run venafi.show_company example.com
|
||||
'''
|
||||
data = salt.utils.http.query(
|
||||
'{0}/companies/domain/{1}'.format(_base_url(), domain),
|
||||
status=True,
|
||||
decode=True,
|
||||
decode_type='json',
|
||||
header_dict={
|
||||
'tppl-api-key': _api_key(),
|
||||
},
|
||||
)
|
||||
status = data['status']
|
||||
if str(status).startswith('4') or str(status).startswith('5'):
|
||||
raise CommandExecutionError(
|
||||
'There was an API error: {0}'.format(data['error'])
|
||||
)
|
||||
return data.get('dict', {})
|
||||
|
||||
|
||||
def show_csrs():
|
||||
'''
|
||||
Show certificate requests for this API key
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-run venafi.show_csrs
|
||||
'''
|
||||
data = salt.utils.http.query(
|
||||
'{0}/certificaterequests'.format(_base_url()),
|
||||
status=True,
|
||||
decode=True,
|
||||
decode_type='json',
|
||||
header_dict={
|
||||
'tppl-api-key': _api_key(),
|
||||
},
|
||||
)
|
||||
status = data['status']
|
||||
if str(status).startswith('4') or str(status).startswith('5'):
|
||||
raise CommandExecutionError(
|
||||
'There was an API error: {0}'.format(data['error'])
|
||||
)
|
||||
return data.get('dict', {})
|
||||
|
||||
|
||||
def get_zone_id(zone_name):
|
||||
'''
|
||||
Get the zone ID for the given zone name
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-run venafi.get_zone_id default
|
||||
'''
|
||||
data = salt.utils.http.query(
|
||||
'{0}/zones/tag/{1}'.format(_base_url(), zone_name),
|
||||
status=True,
|
||||
decode=True,
|
||||
decode_type='json',
|
||||
header_dict={
|
||||
'tppl-api-key': _api_key(),
|
||||
},
|
||||
)
|
||||
|
||||
status = data['status']
|
||||
if str(status).startswith('4') or str(status).startswith('5'):
|
||||
raise CommandExecutionError(
|
||||
'There was an API error: {0}'.format(data['error'])
|
||||
)
|
||||
return data['dict']['id']
|
||||
|
||||
|
||||
def show_policies():
|
||||
'''
|
||||
Show zone details for the API key owner's company
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-run venafi.show_zones
|
||||
'''
|
||||
data = salt.utils.http.query(
|
||||
'{0}/certificatepolicies'.format(_base_url()),
|
||||
status=True,
|
||||
decode=True,
|
||||
decode_type='json',
|
||||
header_dict={
|
||||
'tppl-api-key': _api_key(),
|
||||
},
|
||||
)
|
||||
status = data['status']
|
||||
if str(status).startswith('4') or str(status).startswith('5'):
|
||||
raise CommandExecutionError(
|
||||
'There was an API error: {0}'.format(data['error'])
|
||||
)
|
||||
return data['dict']
|
||||
|
||||
|
||||
def show_zones():
|
||||
'''
|
||||
Show zone details for the API key owner's company
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-run venafi.show_zones
|
||||
'''
|
||||
data = salt.utils.http.query(
|
||||
'{0}/zones'.format(_base_url()),
|
||||
status=True,
|
||||
decode=True,
|
||||
decode_type='json',
|
||||
header_dict={
|
||||
'tppl-api-key': _api_key(),
|
||||
},
|
||||
)
|
||||
status = data['status']
|
||||
if str(status).startswith('4') or str(status).startswith('5'):
|
||||
raise CommandExecutionError(
|
||||
'There was an API error: {0}'.format(data['error'])
|
||||
)
|
||||
return data['dict']
|
||||
|
||||
|
||||
def show_cert(id_):
|
||||
'''
|
||||
Show certificate requests for this API key
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-run venafi.show_cert 01234567-89ab-cdef-0123-456789abcdef
|
||||
'''
|
||||
data = salt.utils.http.query(
|
||||
'{0}/certificaterequests/{1}/certificate'.format(_base_url(), id_),
|
||||
params={
|
||||
'format': 'PEM',
|
||||
'chainOrder': 'ROOT_FIRST'
|
||||
},
|
||||
status=True,
|
||||
text=True,
|
||||
header_dict={'tppl-api-key': _api_key()},
|
||||
)
|
||||
status = data['status']
|
||||
if str(status).startswith('4') or str(status).startswith('5'):
|
||||
raise CommandExecutionError(
|
||||
'There was an API error: {0}'.format(data['error'])
|
||||
)
|
||||
data = data.get('body', '')
|
||||
csr_data = salt.utils.http.query(
|
||||
'{0}/certificaterequests/{1}'.format(_base_url(), id_),
|
||||
status=True,
|
||||
decode=True,
|
||||
decode_type='json',
|
||||
header_dict={'tppl-api-key': _api_key()},
|
||||
)
|
||||
status = csr_data['status']
|
||||
if str(status).startswith('4') or str(status).startswith('5'):
|
||||
raise CommandExecutionError(
|
||||
'There was an API error: {0}'.format(csr_data['error'])
|
||||
)
|
||||
csr_data = csr_data.get('dict', {})
|
||||
certs = _parse_certs(data)
|
||||
dns_name = ''
|
||||
for item in csr_data['certificateName'].split(','):
|
||||
if item.startswith('cn='):
|
||||
dns_name = item.split('=')[1]
|
||||
#certs['CSR Data'] = csr_data
|
||||
|
||||
cache = salt.cache.Cache(__opts__, syspaths.CACHE_DIR)
|
||||
domain_data = cache.fetch('venafi/domains', dns_name)
|
||||
if domain_data is None:
|
||||
domain_data = {}
|
||||
certs['private_key'] = domain_data.get('private_key')
|
||||
domain_data.update(certs)
|
||||
cache.store('venafi/domains', dns_name, domain_data)
|
||||
|
||||
certs['request_id'] = id_
|
||||
return certs
|
||||
|
||||
|
||||
pickup = show_cert
|
||||
|
||||
|
||||
def show_rsa(minion_id, dns_name):
|
||||
'''
|
||||
Show a private RSA key
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-run venafi.show_rsa myminion domain.example.com
|
||||
'''
|
||||
cache = salt.cache.Cache(__opts__, syspaths.CACHE_DIR)
|
||||
bank = 'venafi/domains'
|
||||
data = cache.fetch(
|
||||
bank, dns_name
|
||||
)
|
||||
return data['private_key']
|
||||
|
||||
|
||||
def list_domain_cache():
|
||||
'''
|
||||
List domains that have been cached
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-run venafi.list_domain_cache
|
||||
'''
|
||||
cache = salt.cache.Cache(__opts__, syspaths.CACHE_DIR)
|
||||
return cache.list('venafi/domains')
|
||||
|
||||
|
||||
def del_cached_domain(domains):
|
||||
'''
|
||||
Delete cached domains from the master
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-run venafi.del_cached_domain domain1.example.com,domain2.example.com
|
||||
'''
|
||||
cache = salt.cache.Cache(__opts__, syspaths.CACHE_DIR)
|
||||
if isinstance(domains, six.string_types):
|
||||
domains = domains.split(',')
|
||||
if not isinstance(domains, list):
|
||||
raise CommandExecutionError(
|
||||
'You must pass in either a string containing one or more domains '
|
||||
'separated by commas, or a list of single domain strings'
|
||||
)
|
||||
success = []
|
||||
failed = []
|
||||
for domain in domains:
|
||||
try:
|
||||
cache.flush('venafi/domains', domain)
|
||||
success.append(domain)
|
||||
except CommandExecutionError:
|
||||
failed.append(domain)
|
||||
return {'Succeeded': success, 'Failed': failed}
|
||||
|
||||
|
||||
def _parse_certs(data):
|
||||
cert_mode = False
|
||||
cert = ''
|
||||
certs = []
|
||||
rsa_key = ''
|
||||
for line in data.splitlines():
|
||||
if not line.strip():
|
||||
continue
|
||||
if 'Successfully posted request' in line:
|
||||
comps = line.split(' for ')
|
||||
request_id = comps[-1].strip()
|
||||
continue
|
||||
if 'END CERTIFICATE' in line or 'END RSA private_key' in line:
|
||||
if 'RSA' in line:
|
||||
rsa_key = rsa_key + line
|
||||
else:
|
||||
cert = cert + line
|
||||
certs.append(cert)
|
||||
cert_mode = False
|
||||
continue
|
||||
if 'BEGIN CERTIFICATE' in line or 'BEGIN RSA private_key' in line:
|
||||
if 'RSA' in line:
|
||||
rsa_key = line + '\n'
|
||||
else:
|
||||
cert = line + '\n'
|
||||
cert_mode = True
|
||||
continue
|
||||
if cert_mode is True:
|
||||
cert = cert + line + '\n'
|
||||
continue
|
||||
|
||||
rcert = certs.pop(0)
|
||||
eecert = certs.pop(-1)
|
||||
ret = {
|
||||
'end_entity_certificate': eecert,
|
||||
'private_key': rsa_key,
|
||||
'root_certificate': rcert,
|
||||
'intermediate_certificates': certs
|
||||
}
|
||||
|
||||
return ret
|
@ -185,7 +185,7 @@ def init(
|
||||
The number of cpus to allocate to this new virtual machine.
|
||||
|
||||
mem
|
||||
The amount of memory to allocate tot his virtual machine. The number
|
||||
The amount of memory to allocate to this virtual machine. The number
|
||||
is interpreted in megabytes.
|
||||
|
||||
image
|
||||
@ -370,7 +370,7 @@ def force_off(name):
|
||||
try:
|
||||
cmd_ret = client.cmd_iter(
|
||||
host,
|
||||
'virt.destroy',
|
||||
'virt.stop',
|
||||
[name],
|
||||
timeout=600)
|
||||
except SaltClientError as client_error:
|
||||
|
@ -1680,7 +1680,12 @@ class State(object):
|
||||
troot = os.path.join(self.opts['cachedir'], self.jid)
|
||||
tfile = os.path.join(troot, tag)
|
||||
if not os.path.isdir(troot):
|
||||
os.makedirs(troot)
|
||||
try:
|
||||
os.makedirs(troot)
|
||||
except OSError:
|
||||
# Looks like the directory was created between the check
|
||||
# and the attempt, we are safe to pass
|
||||
pass
|
||||
with salt.utils.fopen(tfile, 'wb+') as fp_:
|
||||
fp_.write(msgpack.dumps(ret))
|
||||
|
||||
|
@ -147,12 +147,24 @@ def function_present(name, FunctionName, Runtime, Role, Handler, ZipFile=None,
|
||||
64 MB.
|
||||
|
||||
VpcConfig
|
||||
If your Lambda function accesses resources in a VPC, you provide this
|
||||
parameter identifying the list of security group IDs and subnet IDs.
|
||||
These must belong to the same VPC. You must provide at least one
|
||||
security group and one subnet ID.
|
||||
If your Lambda function accesses resources in a VPC, you must provide this parameter
|
||||
identifying the list of security group IDs/Names and subnet IDs/Name. These must all belong
|
||||
to the same VPC. This is a dict of the form:
|
||||
|
||||
.. versionadded:: 2016.11.0
|
||||
.. code-block:: yaml
|
||||
VpcConfig:
|
||||
SecurityGroupNames:
|
||||
- mysecgroup1
|
||||
- mysecgroup2
|
||||
SecurityGroupIds:
|
||||
- sg-abcdef1234
|
||||
SubnetNames:
|
||||
- mysubnet1
|
||||
SubnetIds:
|
||||
- subnet-1234abcd
|
||||
- subnet-abcd1234
|
||||
|
||||
If VpcConfig is provided at all, you MUST pass at least one security group and one subnet.
|
||||
|
||||
Permissions
|
||||
A list of permission definitions to be added to the function's policy
|
||||
@ -305,6 +317,22 @@ def _get_role_arn(name, region=None, key=None, keyid=None, profile=None):
|
||||
return 'arn:aws:iam::{0}:role/{1}'.format(account_id, name)
|
||||
|
||||
|
||||
def _resolve_vpcconfig(conf, region=None, key=None, keyid=None, profile=None):
|
||||
if isinstance(conf, six.string_types):
|
||||
conf = json.loads(conf)
|
||||
if not conf:
|
||||
return None
|
||||
if not isinstance(conf, dict):
|
||||
raise SaltInvocationError('VpcConfig must be a dict.')
|
||||
sns = [__salt__['boto_vpc.get_resource_id']('subnet', s, region=region, key=key,
|
||||
keyid=keyid, profile=profile).get('id') for s in conf.pop('SubnetNames', [])]
|
||||
sgs = [__salt__['boto_secgroup.get_group_id'](s, region=region, key=key, keyid=keyid,
|
||||
profile=profile) for s in conf.pop('SecurityGroupNames', [])]
|
||||
conf.setdefault('SubnetIds', []).extend(sns)
|
||||
conf.setdefault('SecurityGroupIds', []).extend(sgs)
|
||||
return conf
|
||||
|
||||
|
||||
def _function_config_present(FunctionName, Role, Handler, Description, Timeout,
|
||||
MemorySize, VpcConfig, Environment, region,
|
||||
key, keyid, profile, RoleRetries):
|
||||
@ -329,7 +357,8 @@ def _function_config_present(FunctionName, Role, Handler, Description, Timeout,
|
||||
oldval = func.get('VpcConfig')
|
||||
if oldval is not None:
|
||||
oldval.pop('VpcId', None)
|
||||
if oldval != VpcConfig:
|
||||
fixed_VpcConfig = _resolve_vpcconfig(VpcConfig, region, key, keyid, profile)
|
||||
if __utils__['boto3.ordered'](oldval) != __utils__['boto3.ordered'](fixed_VpcConfig):
|
||||
need_update = True
|
||||
ret['changes'].setdefault('new', {})['VpcConfig'] = VpcConfig
|
||||
ret['changes'].setdefault(
|
||||
|
@ -767,6 +767,12 @@ def run(name,
|
||||
interactively to the console and the logs.
|
||||
This is experimental.
|
||||
|
||||
bg
|
||||
If ``True``, run command in background and do not await or deliver it's
|
||||
results.
|
||||
|
||||
.. versionadded:: 2016.3.6
|
||||
|
||||
.. note::
|
||||
|
||||
cmd.run supports the usage of ``reload_modules``. This functionality
|
||||
@ -787,10 +793,10 @@ def run(name,
|
||||
- reload_modules: True
|
||||
|
||||
'''
|
||||
### NOTE: The keyword arguments in **kwargs are ignored in this state, but
|
||||
### cannot be removed from the function definition, otherwise the use
|
||||
### of unsupported arguments in a cmd.run state will result in a
|
||||
### traceback.
|
||||
### NOTE: The keyword arguments in **kwargs are passed directly to the
|
||||
### ``cmd.run_all`` function and cannot be removed from the function
|
||||
### definition, otherwise the use of unsupported arguments in a
|
||||
### ``cmd.run`` state will result in a traceback.
|
||||
|
||||
test_name = None
|
||||
if not isinstance(stateful, list):
|
||||
|
File diff suppressed because it is too large
Load Diff
2036
salt/states/docker_container.py
Normal file
2036
salt/states/docker_container.py
Normal file
File diff suppressed because it is too large
Load Diff
427
salt/states/docker_image.py
Normal file
427
salt/states/docker_image.py
Normal file
@ -0,0 +1,427 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Management of Docker images
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
:depends: docker_ Python module
|
||||
|
||||
.. note::
|
||||
Older releases of the Python bindings for Docker were called docker-py_ in
|
||||
PyPI. All releases of docker_, and releases of docker-py_ >= 1.6.0 are
|
||||
supported. These python bindings can easily be installed using
|
||||
:py:func:`pip.install <salt.modules.pip.install>`:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt myminion pip.install docker
|
||||
|
||||
To upgrade from docker-py_ to docker_, you must first uninstall docker-py_,
|
||||
and then install docker_:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt myminion pip.uninstall docker-py
|
||||
salt myminion pip.install docker
|
||||
|
||||
.. _docker: https://pypi.python.org/pypi/docker
|
||||
.. _docker-py: https://pypi.python.org/pypi/docker-py
|
||||
|
||||
These states were moved from the :mod:`docker <salt.states.docker>` state
|
||||
module (formerly called **dockerng**) in the Nitrogen release.
|
||||
|
||||
.. note::
|
||||
To pull from a Docker registry, authentication must be configured. See
|
||||
:ref:`here <docker-authentication>` for more information on how to
|
||||
configure access to docker registries in :ref:`Pillar <pillar>` data.
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils.docker
|
||||
|
||||
# Enable proper logging
|
||||
log = logging.getLogger(__name__) # pylint: disable=invalid-name
|
||||
|
||||
# Define the module's virtual name
|
||||
__virtualname__ = 'docker_image'
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only load if the docker execution module is available
|
||||
'''
|
||||
if 'docker.version' in __salt__:
|
||||
return __virtualname__
|
||||
return (False, __salt__.missing_fun_string('docker.version'))
|
||||
|
||||
|
||||
def present(name,
|
||||
build=None,
|
||||
load=None,
|
||||
force=False,
|
||||
insecure_registry=False,
|
||||
client_timeout=salt.utils.docker.CLIENT_TIMEOUT,
|
||||
dockerfile=None,
|
||||
sls=None,
|
||||
base='opensuse/python',
|
||||
saltenv='base',
|
||||
**kwargs):
|
||||
'''
|
||||
Ensure that an image is present. The image can either be pulled from a
|
||||
Docker registry, built from a Dockerfile, or loaded from a saved image.
|
||||
Image names can be specified either using ``repo:tag`` notation, or just
|
||||
the repo name (in which case a tag of ``latest`` is assumed).
|
||||
Repo identifier is mandatory, we don't assume the default repository
|
||||
is docker hub.
|
||||
|
||||
If neither of the ``build`` or ``load`` arguments are used, then Salt will
|
||||
pull from the :ref:`configured registries <docker-authentication>`. If the
|
||||
specified image already exists, it will not be pulled unless ``force`` is
|
||||
set to ``True``. Here is an example of a state that will pull an image from
|
||||
the Docker Hub:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
myuser/myimage:mytag:
|
||||
docker_image.present
|
||||
|
||||
build
|
||||
Path to directory on the Minion containing a Dockerfile
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
myuser/myimage:mytag:
|
||||
docker_image.present:
|
||||
- build: /home/myuser/docker/myimage
|
||||
|
||||
|
||||
myuser/myimage:mytag:
|
||||
docker_image.present:
|
||||
- build: /home/myuser/docker/myimage
|
||||
- dockerfile: Dockerfile.alternative
|
||||
|
||||
.. versionadded:: 2016.11.0
|
||||
|
||||
The image will be built using :py:func:`docker.build
|
||||
<salt.modules.docker.build>` and the specified image name and tag
|
||||
will be applied to it.
|
||||
|
||||
load
|
||||
Loads a tar archive created with :py:func:`docker.load
|
||||
<salt.modules.docker.load>` (or the ``docker load`` Docker CLI
|
||||
command), and assigns it the specified repo and tag.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
myuser/myimage:mytag:
|
||||
docker_image.present:
|
||||
- load: salt://path/to/image.tar
|
||||
|
||||
force : False
|
||||
Set this parameter to ``True`` to force Salt to pull/build/load the
|
||||
image even if it is already present.
|
||||
|
||||
client_timeout
|
||||
Timeout in seconds for the Docker client. This is not a timeout for
|
||||
the state, but for receiving a response from the API.
|
||||
|
||||
dockerfile
|
||||
Allows for an alternative Dockerfile to be specified. Path to alternative
|
||||
Dockefile is relative to the build path for the Docker container.
|
||||
|
||||
.. versionadded:: 2016.11.0
|
||||
|
||||
sls
|
||||
Allow for building images with ``dockerng.sls_build`` by specify the
|
||||
SLS files to build with. This can be a list or comma-seperated string.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
myuser/myimage:mytag:
|
||||
dockerng.image_present:
|
||||
- sls:
|
||||
- webapp1
|
||||
- webapp2
|
||||
- base: centos
|
||||
- saltenv: base
|
||||
|
||||
.. versionadded: Nitrogen
|
||||
|
||||
base
|
||||
Base image with which to start ``dockerng.sls_build``
|
||||
|
||||
.. versionadded: Nitrogen
|
||||
|
||||
saltenv
|
||||
environment from which to pull sls files for ``dockerng.sls_build``.
|
||||
|
||||
.. versionadded: Nitrogen
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': False,
|
||||
'comment': ''}
|
||||
|
||||
if build is not None and load is not None:
|
||||
ret['comment'] = 'Only one of \'build\' or \'load\' is permitted.'
|
||||
return ret
|
||||
|
||||
# Ensure that we have repo:tag notation
|
||||
image = ':'.join(salt.utils.docker.get_repo_tag(name))
|
||||
all_tags = __salt__['docker.list_tags']()
|
||||
|
||||
if image in all_tags:
|
||||
if not force:
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'Image \'{0}\' already present'.format(name)
|
||||
return ret
|
||||
else:
|
||||
try:
|
||||
image_info = __salt__['docker.inspect_image'](name)
|
||||
except Exception as exc:
|
||||
ret['comment'] = \
|
||||
'Unable to get info for image \'{0}\': {1}'.format(name, exc)
|
||||
return ret
|
||||
else:
|
||||
image_info = None
|
||||
|
||||
if build or sls:
|
||||
action = 'built'
|
||||
elif load:
|
||||
action = 'loaded'
|
||||
else:
|
||||
action = 'pulled'
|
||||
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
if (image in all_tags and force) or image not in all_tags:
|
||||
ret['comment'] = 'Image \'{0}\' will be {1}'.format(name, action)
|
||||
return ret
|
||||
|
||||
if build:
|
||||
try:
|
||||
image_update = __salt__['docker.build'](path=build,
|
||||
image=image,
|
||||
dockerfile=dockerfile)
|
||||
except Exception as exc:
|
||||
ret['comment'] = (
|
||||
'Encountered error building {0} as {1}: {2}'
|
||||
.format(build, image, exc)
|
||||
)
|
||||
return ret
|
||||
if image_info is None or image_update['Id'] != image_info['Id'][:12]:
|
||||
ret['changes'] = image_update
|
||||
|
||||
elif sls:
|
||||
if isinstance(sls, list):
|
||||
sls = ','.join(sls)
|
||||
try:
|
||||
image_update = __salt__['dockerng.sls_build'](name=image,
|
||||
base=base,
|
||||
mods=sls,
|
||||
saltenv=saltenv)
|
||||
except Exception as exc:
|
||||
ret['comment'] = (
|
||||
'Encountered error using sls {0} for building {1}: {2}'
|
||||
.format(sls, image, exc)
|
||||
)
|
||||
return ret
|
||||
if image_info is None or image_update['Id'] != image_info['Id'][:12]:
|
||||
ret['changes'] = image_update
|
||||
|
||||
elif load:
|
||||
try:
|
||||
image_update = __salt__['docker.load'](path=load, image=image)
|
||||
except Exception as exc:
|
||||
ret['comment'] = (
|
||||
'Encountered error loading {0} as {1}: {2}'
|
||||
.format(load, image, exc)
|
||||
)
|
||||
return ret
|
||||
if image_info is None or image_update.get('Layers', []):
|
||||
ret['changes'] = image_update
|
||||
|
||||
else:
|
||||
try:
|
||||
image_update = __salt__['docker.pull'](
|
||||
image,
|
||||
insecure_registry=insecure_registry,
|
||||
client_timeout=client_timeout
|
||||
)
|
||||
except Exception as exc:
|
||||
ret['comment'] = (
|
||||
'Encountered error pulling {0}: {1}'
|
||||
.format(image, exc)
|
||||
)
|
||||
return ret
|
||||
if (image_info is not None and image_info['Id'][:12] == image_update
|
||||
.get('Layers', {})
|
||||
.get('Already_Pulled', [None])[0]):
|
||||
# Image was pulled again (because of force) but was also
|
||||
# already there. No new image was available on the registry.
|
||||
pass
|
||||
elif image_info is None or image_update.get('Layers', {}).get('Pulled'):
|
||||
# Only add to the changes dict if layers were pulled
|
||||
ret['changes'] = image_update
|
||||
|
||||
ret['result'] = image in __salt__['docker.list_tags']()
|
||||
|
||||
if not ret['result']:
|
||||
# This shouldn't happen, failure to pull should be caught above
|
||||
ret['comment'] = 'Image \'{0}\' could not be {1}'.format(name, action)
|
||||
elif not ret['changes']:
|
||||
ret['comment'] = (
|
||||
'Image \'{0}\' was {1}, but there were no changes'
|
||||
.format(name, action)
|
||||
)
|
||||
else:
|
||||
ret['comment'] = 'Image \'{0}\' was {1}'.format(name, action)
|
||||
return ret
|
||||
|
||||
|
||||
def absent(name=None, images=None, force=False):
|
||||
'''
|
||||
Ensure that an image is absent from the Minion. Image names can be
|
||||
specified either using ``repo:tag`` notation, or just the repo name (in
|
||||
which case a tag of ``latest`` is assumed).
|
||||
|
||||
images
|
||||
Run this state on more than one image at a time. The following two
|
||||
examples accomplish the same thing:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
remove_images:
|
||||
docker_image.absent:
|
||||
- names:
|
||||
- busybox
|
||||
- centos:6
|
||||
- nginx
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
remove_images:
|
||||
docker_image.absent:
|
||||
- images:
|
||||
- busybox
|
||||
- centos:6
|
||||
- nginx
|
||||
|
||||
However, the second example will be a bit quicker since Salt will do
|
||||
all the deletions in a single run, rather than executing the state
|
||||
separately on each image (as it would in the first example).
|
||||
|
||||
force : False
|
||||
Salt will fail to remove any images currently in use by a container.
|
||||
Set this option to true to remove the image even if it is already
|
||||
present.
|
||||
|
||||
.. note::
|
||||
|
||||
This option can also be overridden by Pillar data. If the Minion
|
||||
has a pillar variable named ``docker.running.force`` which is
|
||||
set to ``True``, it will turn on this option. This pillar variable
|
||||
can even be set at runtime. For example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt myminion state.sls docker_stuff pillar="{docker.force: True}"
|
||||
|
||||
If this pillar variable is present and set to ``False``, then it
|
||||
will turn off this option.
|
||||
|
||||
For more granular control, setting a pillar variable named
|
||||
``docker.force.image_name`` will affect only the named image.
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': False,
|
||||
'comment': ''}
|
||||
|
||||
if not name and not images:
|
||||
ret['comment'] = 'One of \'name\' and \'images\' must be provided'
|
||||
return ret
|
||||
elif images is not None:
|
||||
targets = []
|
||||
for target in images:
|
||||
try:
|
||||
targets.append(':'.join(salt.utils.docker.get_repo_tag(target)))
|
||||
except TypeError:
|
||||
# Don't stomp on images with unicode characters in Python 2,
|
||||
# only force image to be a str if it wasn't already (which is
|
||||
# very unlikely).
|
||||
targets.append(':'.join(salt.utils.docker.get_repo_tag(str(target))))
|
||||
elif name:
|
||||
try:
|
||||
targets = [':'.join(salt.utils.docker.get_repo_tag(name))]
|
||||
except TypeError:
|
||||
targets = [':'.join(salt.utils.docker.get_repo_tag(str(name)))]
|
||||
|
||||
pre_tags = __salt__['docker.list_tags']()
|
||||
to_delete = [x for x in targets if x in pre_tags]
|
||||
log.debug('targets = {0}'.format(targets))
|
||||
log.debug('to_delete = {0}'.format(to_delete))
|
||||
|
||||
if not to_delete:
|
||||
ret['result'] = True
|
||||
if len(targets) == 1:
|
||||
ret['comment'] = 'Image \'{0}\' is not present'.format(name)
|
||||
else:
|
||||
ret['comment'] = 'All specified images are not present'
|
||||
return ret
|
||||
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
if len(to_delete) == 1:
|
||||
ret['comment'] = ('Image \'{0}\' will be removed'
|
||||
.format(to_delete[0]))
|
||||
else:
|
||||
ret['comment'] = ('The following images will be removed: {0}'
|
||||
.format(', '.join(to_delete)))
|
||||
return ret
|
||||
|
||||
result = __salt__['docker.rmi'](*to_delete, force=force)
|
||||
post_tags = __salt__['docker.list_tags']()
|
||||
failed = [x for x in to_delete if x in post_tags]
|
||||
|
||||
if failed:
|
||||
if [x for x in to_delete if x not in post_tags]:
|
||||
ret['changes'] = result
|
||||
ret['comment'] = (
|
||||
'The following image(s) failed to be removed: {0}'
|
||||
.format(', '.join(failed))
|
||||
)
|
||||
else:
|
||||
ret['comment'] = 'None of the specified images were removed'
|
||||
if 'Errors' in result:
|
||||
ret['comment'] += (
|
||||
'. The following errors were encountered: {0}'
|
||||
.format('; '.join(result['Errors']))
|
||||
)
|
||||
else:
|
||||
ret['changes'] = result
|
||||
if len(to_delete) == 1:
|
||||
ret['comment'] = 'Image \'{0}\' was removed'.format(to_delete[0])
|
||||
else:
|
||||
ret['comment'] = (
|
||||
'The following images were removed: {0}'
|
||||
.format(', '.join(to_delete))
|
||||
)
|
||||
ret['result'] = True
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def mod_watch(name, sfun=None, **kwargs):
|
||||
if sfun == 'present':
|
||||
# Force image to be updated
|
||||
kwargs['force'] = True
|
||||
return present(name, **kwargs)
|
||||
|
||||
return {'name': name,
|
||||
'changes': {},
|
||||
'result': False,
|
||||
'comment': ('watch requisite is not'
|
||||
' implemented for {0}'.format(sfun))}
|
169
salt/states/docker_network.py
Normal file
169
salt/states/docker_network.py
Normal file
@ -0,0 +1,169 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Management of Docker networks
|
||||
|
||||
.. versionadded:: Nitrogen
|
||||
|
||||
:depends: docker_ Python module
|
||||
|
||||
.. note::
|
||||
Older releases of the Python bindings for Docker were called docker-py_ in
|
||||
PyPI. All releases of docker_, and releases of docker-py_ >= 1.6.0 are
|
||||
supported. These python bindings can easily be installed using
|
||||
:py:func:`pip.install <salt.modules.pip.install>`:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt myminion pip.install docker
|
||||
|
||||
To upgrade from docker-py_ to docker_, you must first uninstall docker-py_,
|
||||
and then install docker_:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt myminion pip.uninstall docker-py
|
||||
salt myminion pip.install docker
|
||||
|
||||
.. _docker: https://pypi.python.org/pypi/docker
|
||||
.. _docker-py: https://pypi.python.org/pypi/docker-py
|
||||
|
||||
These states were moved from the :mod:`docker <salt.states.docker>` state
|
||||
module (formerly called **dockerng**) in the Nitrogen release.
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
|
||||
# Enable proper logging
|
||||
log = logging.getLogger(__name__) # pylint: disable=invalid-name
|
||||
|
||||
# Define the module's virtual name
|
||||
__virtualname__ = 'docker_network'
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only load if the docker execution module is available
|
||||
'''
|
||||
if 'docker.version' in __salt__:
|
||||
return __virtualname__
|
||||
return (False, __salt__.missing_fun_string('docker.version'))
|
||||
|
||||
|
||||
def present(name, driver=None, containers=None):
|
||||
'''
|
||||
Ensure that a network is present.
|
||||
|
||||
name
|
||||
Name of the network
|
||||
|
||||
driver
|
||||
Type of driver for that network.
|
||||
|
||||
containers:
|
||||
List of container names that should be part of this network
|
||||
|
||||
Usage Examples:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
network_foo:
|
||||
docker_network.present
|
||||
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
network_bar:
|
||||
docker_network.present
|
||||
- name: bar
|
||||
- containers:
|
||||
- cont1
|
||||
- cont2
|
||||
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': False,
|
||||
'comment': ''}
|
||||
if containers is None:
|
||||
containers = []
|
||||
# map containers to container's Ids.
|
||||
containers = [__salt__['docker.inspect_container'](c)['Id'] for c in containers]
|
||||
networks = __salt__['docker.networks'](names=[name])
|
||||
if networks:
|
||||
network = networks[0] # we expect network's name to be unique
|
||||
if all(c in network['Containers'] for c in containers):
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'Network \'{0}\' already exists.'.format(name)
|
||||
return ret
|
||||
result = True
|
||||
for container in containers:
|
||||
if container not in network['Containers']:
|
||||
try:
|
||||
ret['changes']['connected'] = __salt__['docker.connect_container_to_network'](
|
||||
container, name)
|
||||
except Exception as exc:
|
||||
ret['comment'] = ('Failed to connect container \'{0}\' to network \'{1}\' {2}'.format(
|
||||
container, name, exc))
|
||||
result = False
|
||||
ret['result'] = result
|
||||
|
||||
else:
|
||||
try:
|
||||
ret['changes']['created'] = __salt__['docker.create_network'](
|
||||
name, driver=driver)
|
||||
except Exception as exc:
|
||||
ret['comment'] = ('Failed to create network \'{0}\': {1}'
|
||||
.format(name, exc))
|
||||
else:
|
||||
result = True
|
||||
for container in containers:
|
||||
try:
|
||||
ret['changes']['connected'] = __salt__['docker.connect_container_to_network'](
|
||||
container, name)
|
||||
except Exception as exc:
|
||||
ret['comment'] = ('Failed to connect container \'{0}\' to network \'{1}\' {2}'.format(
|
||||
container, name, exc))
|
||||
result = False
|
||||
ret['result'] = result
|
||||
return ret
|
||||
|
||||
|
||||
def absent(name, driver=None):
|
||||
'''
|
||||
Ensure that a network is absent.
|
||||
|
||||
name
|
||||
Name of the network
|
||||
|
||||
Usage Examples:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
network_foo:
|
||||
docker_network.absent
|
||||
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': False,
|
||||
'comment': ''}
|
||||
|
||||
networks = __salt__['docker.networks'](names=[name])
|
||||
if not networks:
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'Network \'{0}\' already absent'.format(name)
|
||||
return ret
|
||||
|
||||
for container in networks[0]['Containers']:
|
||||
try:
|
||||
ret['changes']['disconnected'] = __salt__['docker.disconnect_container_from_network'](container, name)
|
||||
except Exception as exc:
|
||||
ret['comment'] = ('Failed to disconnect container \'{0}\' to network \'{1}\' {2}'.format(
|
||||
container, name, exc))
|
||||
try:
|
||||
ret['changes']['removed'] = __salt__['docker.remove_network'](name)
|
||||
ret['result'] = True
|
||||
except Exception as exc:
|
||||
ret['comment'] = ('Failed to remove network \'{0}\': {1}'
|
||||
.format(name, exc))
|
||||
return ret
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user