mirror of
https://github.com/valitydev/salt.git
synced 2024-11-08 09:23:56 +00:00
Merge branch 'develop' into boto_rds_parameter_group_fix
This commit is contained in:
commit
e216cc17c2
4
.github/stale.yml
vendored
4
.github/stale.yml
vendored
@ -1,8 +1,8 @@
|
||||
# Probot Stale configuration file
|
||||
|
||||
# Number of days of inactivity before an issue becomes stale
|
||||
# 1030 is approximately 2 years and 10 months
|
||||
daysUntilStale: 1030
|
||||
# 1000 is approximately 2 years and 9 months
|
||||
daysUntilStale: 1000
|
||||
|
||||
# Number of days of inactivity before a stale issue is closed
|
||||
daysUntilClose: 7
|
||||
|
@ -3,7 +3,7 @@
|
||||
# directory is identical.
|
||||
|
||||
#my-digitalocean-config:
|
||||
# driver: digital_ocean
|
||||
# driver: digitalocean
|
||||
# client_key: wFGEwgregeqw3435gDger
|
||||
# api_key: GDE43t43REGTrkilg43934t34qT43t4dgegerGEgg
|
||||
# location: New York 1
|
||||
|
@ -1,5 +1,5 @@
|
||||
#my-digitalocean-config:
|
||||
# driver: digital_ocean
|
||||
# driver: digitalocean
|
||||
# client_key: wFGEwgregeqw3435gDger
|
||||
# api_key: GDE43t43REGTrkilg43934t34qT43t4dgegerGEgg
|
||||
# location: New York 1
|
||||
|
@ -689,6 +689,12 @@
|
||||
# for a full explanation.
|
||||
#multiprocessing: True
|
||||
|
||||
# Limit the maximum amount of processes or threads created by salt-minion.
|
||||
# This is useful to avoid resource exhaustion in case the minion receives more
|
||||
# publications than it is able to handle, as it limits the number of spawned
|
||||
# processes or threads. -1 is the default and disables the limit.
|
||||
#process_count_max: -1
|
||||
|
||||
|
||||
##### Logging settings #####
|
||||
##########################################
|
||||
|
@ -10795,6 +10795,7 @@ cmd_whitelist_glob:
|
||||
.UNINDENT
|
||||
.UNINDENT
|
||||
.SS Thread Settings
|
||||
.SS \fBmultiprocessing\fP
|
||||
.sp
|
||||
Default: \fBTrue\fP
|
||||
.sp
|
||||
|
@ -136,7 +136,7 @@ Query Options
|
||||
.. versionadded:: 2014.7.0
|
||||
|
||||
Display a list of configured profiles. Pass in a cloud provider to view
|
||||
the provider's associated profiles, such as ``digital_ocean``, or pass in
|
||||
the provider's associated profiles, such as ``digitalocean``, or pass in
|
||||
``all`` to list all the configured profiles.
|
||||
|
||||
|
||||
|
@ -13,7 +13,7 @@ Full list of Salt Cloud modules
|
||||
aliyun
|
||||
azurearm
|
||||
cloudstack
|
||||
digital_ocean
|
||||
digitalocean
|
||||
dimensiondata
|
||||
ec2
|
||||
gce
|
||||
|
@ -1,6 +1,6 @@
|
||||
===============================
|
||||
salt.cloud.clouds.digital_ocean
|
||||
salt.cloud.clouds.digitalocean
|
||||
===============================
|
||||
|
||||
.. automodule:: salt.cloud.clouds.digital_ocean
|
||||
.. automodule:: salt.cloud.clouds.digitalocean
|
||||
:members:
|
@ -4175,7 +4175,9 @@ information.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
reactor: []
|
||||
reactor:
|
||||
- 'salt/minion/*/start':
|
||||
- salt://reactor/startup_tasks.sls
|
||||
|
||||
.. conf_master:: reactor_refresh_interval
|
||||
|
||||
|
@ -2404,11 +2404,14 @@ Thread Settings
|
||||
|
||||
.. conf_minion:: multiprocessing
|
||||
|
||||
``multiprocessing``
|
||||
-------
|
||||
|
||||
Default: ``True``
|
||||
|
||||
If `multiprocessing` is enabled when a minion receives a
|
||||
If ``multiprocessing`` is enabled when a minion receives a
|
||||
publication a new process is spawned and the command is executed therein.
|
||||
Conversely, if `multiprocessing` is disabled the new publication will be run
|
||||
Conversely, if ``multiprocessing`` is disabled the new publication will be run
|
||||
executed in a thread.
|
||||
|
||||
|
||||
@ -2416,6 +2419,23 @@ executed in a thread.
|
||||
|
||||
multiprocessing: True
|
||||
|
||||
.. conf_minion:: process_count_max
|
||||
|
||||
``process_count_max``
|
||||
-------
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
Default: ``-1``
|
||||
|
||||
Limit the maximum amount of processes or threads created by ``salt-minion``.
|
||||
This is useful to avoid resource exhaustion in case the minion receives more
|
||||
publications than it is able to handle, as it limits the number of spawned
|
||||
processes or threads. ``-1`` is the default and disables the limit.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
process_count_max: -1
|
||||
|
||||
.. _minion-logging-settings:
|
||||
|
||||
|
@ -327,6 +327,7 @@ execution modules
|
||||
ps
|
||||
publish
|
||||
puppet
|
||||
purefa
|
||||
pushbullet
|
||||
pushover_notify
|
||||
pw_group
|
||||
|
6
doc/ref/modules/all/salt.modules.purefa.rst
Normal file
6
doc/ref/modules/all/salt.modules.purefa.rst
Normal file
@ -0,0 +1,6 @@
|
||||
===================
|
||||
salt.modules.purefa
|
||||
===================
|
||||
|
||||
.. automodule:: salt.modules.purefa
|
||||
:members:
|
@ -25,6 +25,9 @@ configuration:
|
||||
- web*:
|
||||
- test.*
|
||||
- pkg.*
|
||||
# Allow managers to use saltutil module functions
|
||||
manager_.*:
|
||||
- saltutil.*
|
||||
|
||||
Permission Issues
|
||||
-----------------
|
||||
|
@ -1,5 +1,5 @@
|
||||
salt.runners.auth module
|
||||
========================
|
||||
salt.runners.auth
|
||||
=================
|
||||
|
||||
.. automodule:: salt.runners.auth
|
||||
:members:
|
||||
|
@ -1,5 +1,5 @@
|
||||
salt.runners.digicertapi module
|
||||
===============================
|
||||
salt.runners.digicertapi
|
||||
========================
|
||||
|
||||
.. automodule:: salt.runners.digicertapi
|
||||
:members:
|
||||
|
@ -1,5 +1,5 @@
|
||||
salt.runners.event module
|
||||
=========================
|
||||
salt.runners.event
|
||||
==================
|
||||
|
||||
.. automodule:: salt.runners.event
|
||||
:members:
|
||||
|
@ -1,5 +1,11 @@
|
||||
salt.runners.mattermost module
|
||||
==============================
|
||||
salt.runners.mattermost
|
||||
=======================
|
||||
|
||||
**Note for 2017.7 releases!**
|
||||
|
||||
Due to the `salt.runners.config <https://github.com/saltstack/salt/blob/develop/salt/runners/config.py>`_ module not being available in this release series, importing the `salt.runners.config <https://github.com/saltstack/salt/blob/develop/salt/runners/config.py>`_ module from the develop branch is required to make this module work.
|
||||
|
||||
Ref: `Mattermost runner failing to retrieve config values due to unavailable config runner #43479 <https://github.com/saltstack/salt/issues/43479>`_
|
||||
|
||||
.. automodule:: salt.runners.mattermost
|
||||
:members:
|
||||
|
@ -1,5 +1,5 @@
|
||||
salt.runners.smartos_vmadm module
|
||||
=================================
|
||||
salt.runners.smartos_vmadm
|
||||
==========================
|
||||
|
||||
.. automodule:: salt.runners.smartos_vmadm
|
||||
:members:
|
||||
|
@ -1,5 +1,5 @@
|
||||
salt.runners.vault module
|
||||
=========================
|
||||
salt.runners.vault
|
||||
==================
|
||||
|
||||
.. automodule:: salt.runners.vault
|
||||
:members:
|
||||
|
@ -1,5 +1,5 @@
|
||||
salt.runners.venafiapi module
|
||||
=============================
|
||||
salt.runners.venafiapi
|
||||
======================
|
||||
|
||||
.. automodule:: salt.runners.venafiapi
|
||||
:members:
|
||||
|
@ -1,5 +1,5 @@
|
||||
salt.runners.vistara module
|
||||
===========================
|
||||
salt.runners.vistara
|
||||
====================
|
||||
|
||||
.. automodule:: salt.runners.vistara
|
||||
:members:
|
||||
|
@ -253,9 +253,8 @@ in ``/etc/salt/master.d/reactor.conf``:
|
||||
|
||||
.. note::
|
||||
You can have only one top level ``reactor`` section, so if one already
|
||||
exists, add this code to the existing section. See :ref:`Understanding the
|
||||
Structure of Reactor Formulas <reactor-structure>` to learn more about
|
||||
reactor SLS syntax.
|
||||
exists, add this code to the existing section. See :ref:`here
|
||||
<reactor-sls>` to learn more about reactor SLS syntax.
|
||||
|
||||
|
||||
Start the Salt Master in Debug Mode
|
||||
|
@ -183,7 +183,7 @@ imports should be absent from the Salt Cloud module.
|
||||
|
||||
A good example of a non-libcloud driver is the DigitalOcean driver:
|
||||
|
||||
https://github.com/saltstack/salt/tree/develop/salt/cloud/clouds/digital_ocean.py
|
||||
https://github.com/saltstack/salt/tree/develop/salt/cloud/clouds/digitalocean.py
|
||||
|
||||
The ``create()`` Function
|
||||
-------------------------
|
||||
|
@ -444,7 +444,7 @@ under the API Access tab.
|
||||
.. code-block:: yaml
|
||||
|
||||
my-digitalocean-config:
|
||||
driver: digital_ocean
|
||||
driver: digitalocean
|
||||
personal_access_token: xxx
|
||||
location: New York 1
|
||||
|
||||
|
@ -19,7 +19,7 @@ under the "SSH Keys" section.
|
||||
# /etc/salt/cloud.providers.d/ directory.
|
||||
|
||||
my-digitalocean-config:
|
||||
driver: digital_ocean
|
||||
driver: digitalocean
|
||||
personal_access_token: xxx
|
||||
ssh_key_file: /path/to/ssh/key/file
|
||||
ssh_key_names: my-key-name,my-key-name-2
|
||||
@ -63,7 +63,7 @@ command:
|
||||
# salt-cloud --list-locations my-digitalocean-config
|
||||
my-digitalocean-config:
|
||||
----------
|
||||
digital_ocean:
|
||||
digitalocean:
|
||||
----------
|
||||
Amsterdam 1:
|
||||
----------
|
||||
@ -87,7 +87,7 @@ command:
|
||||
# salt-cloud --list-sizes my-digitalocean-config
|
||||
my-digitalocean-config:
|
||||
----------
|
||||
digital_ocean:
|
||||
digitalocean:
|
||||
----------
|
||||
512MB:
|
||||
----------
|
||||
@ -117,7 +117,7 @@ command:
|
||||
# salt-cloud --list-images my-digitalocean-config
|
||||
my-digitalocean-config:
|
||||
----------
|
||||
digital_ocean:
|
||||
digitalocean:
|
||||
----------
|
||||
10.1:
|
||||
----------
|
||||
@ -142,7 +142,7 @@ Profile Specifics:
|
||||
ssh_username
|
||||
------------
|
||||
|
||||
If using a FreeBSD image from Digital Ocean, you'll need to set the ``ssh_username``
|
||||
If using a FreeBSD image from DigitalOcean, you'll need to set the ``ssh_username``
|
||||
setting to ``freebsd`` in your profile configuration.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
@ -219,7 +219,7 @@ the default cloud provider configuration file for DigitalOcean looks like this:
|
||||
.. code-block:: yaml
|
||||
|
||||
digitalocean-config:
|
||||
driver: digital_ocean
|
||||
driver: digitalocean
|
||||
client_key: ''
|
||||
api_key: ''
|
||||
location: New York 1
|
||||
@ -230,7 +230,7 @@ must be provided:
|
||||
.. code-block:: yaml
|
||||
|
||||
digitalocean-config:
|
||||
driver: digital_ocean
|
||||
driver: digitalocean
|
||||
client_key: wFGEwgregeqw3435gDger
|
||||
api_key: GDE43t43REGTrkilg43934t34qT43t4dgegerGEgg
|
||||
location: New York 1
|
||||
|
@ -541,7 +541,7 @@ provider configuration file in the integration test file directory located at
|
||||
``tests/integration/files/conf/cloud.*.d/``.
|
||||
|
||||
The following is an example of the default profile configuration file for Digital
|
||||
Ocean, located at: ``tests/integration/files/conf/cloud.profiles.d/digital_ocean.conf``:
|
||||
Ocean, located at: ``tests/integration/files/conf/cloud.profiles.d/digitalocean.conf``:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
@ -557,12 +557,12 @@ be provided by the user by editing the provider configuration file before runnin
|
||||
tests.
|
||||
|
||||
The following is an example of the default provider configuration file for Digital
|
||||
Ocean, located at: ``tests/integration/files/conf/cloud.providers.d/digital_ocean.conf``:
|
||||
Ocean, located at: ``tests/integration/files/conf/cloud.providers.d/digitalocean.conf``:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
digitalocean-config:
|
||||
driver: digital_ocean
|
||||
driver: digitalocean
|
||||
client_key: ''
|
||||
api_key: ''
|
||||
location: New York 1
|
||||
|
@ -27,7 +27,12 @@ Salt engines are configured under an ``engines`` top-level section in your Salt
|
||||
port: 5959
|
||||
proto: tcp
|
||||
|
||||
Salt engines must be in the Salt path, or you can add the ``engines_dirs`` option in your Salt master configuration with a list of directories under which Salt attempts to find Salt engines.
|
||||
Salt engines must be in the Salt path, or you can add the ``engines_dirs`` option in your Salt master configuration with a list of directories under which Salt attempts to find Salt engines. This option should be formatted as a list of directories to search, such as:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
engines_dirs:
|
||||
- /home/bob/engines
|
||||
|
||||
Writing an Engine
|
||||
=================
|
||||
|
@ -18,7 +18,7 @@ Installation from official Debian and Raspbian repositories is described
|
||||
Installation from the Official SaltStack Repository
|
||||
===================================================
|
||||
|
||||
Packages for Debian 8 (Jessie) and Debian 7 (Wheezy) are available in the
|
||||
Packages for Debian 9 (Stretch) and Debian 8 (Jessie) are available in the
|
||||
Official SaltStack repository.
|
||||
|
||||
Instructions are at https://repo.saltstack.com/#debian.
|
||||
|
@ -27,9 +27,9 @@ event bus is an open system used for sending information notifying Salt and
|
||||
other systems about operations.
|
||||
|
||||
The event system fires events with a very specific criteria. Every event has a
|
||||
:strong:`tag`. Event tags allow for fast top level filtering of events. In
|
||||
addition to the tag, each event has a data structure. This data structure is a
|
||||
dict, which contains information about the event.
|
||||
**tag**. Event tags allow for fast top-level filtering of events. In addition
|
||||
to the tag, each event has a data structure. This data structure is a
|
||||
dictionary, which contains information about the event.
|
||||
|
||||
.. _reactor-mapping-events:
|
||||
|
||||
@ -65,15 +65,12 @@ and each event tag has a list of reactor SLS files to be run.
|
||||
the :ref:`querystring syntax <querystring-syntax>` (e.g.
|
||||
``salt://reactor/mycustom.sls?saltenv=reactor``).
|
||||
|
||||
Reactor sls files are similar to state and pillar sls files. They are
|
||||
by default yaml + Jinja templates and are passed familiar context variables.
|
||||
Reactor SLS files are similar to State and Pillar SLS files. They are by
|
||||
default YAML + Jinja templates and are passed familiar context variables.
|
||||
Click :ref:`here <reactor-jinja-context>` for more detailed information on the
|
||||
variables availble in Jinja templating.
|
||||
|
||||
They differ because of the addition of the ``tag`` and ``data`` variables.
|
||||
|
||||
- The ``tag`` variable is just the tag in the fired event.
|
||||
- The ``data`` variable is the event's data dict.
|
||||
|
||||
Here is a simple reactor sls:
|
||||
Here is the SLS for a simple reaction:
|
||||
|
||||
.. code-block:: jinja
|
||||
|
||||
@ -90,71 +87,278 @@ data structure and compiler used for the state system is used for the reactor
|
||||
system. The only difference is that the data is matched up to the salt command
|
||||
API and the runner system. In this example, a command is published to the
|
||||
``mysql1`` minion with a function of :py:func:`state.apply
|
||||
<salt.modules.state.apply_>`. Similarly, a runner can be called:
|
||||
<salt.modules.state.apply_>`, which performs a :ref:`highstate
|
||||
<running-highstate>`. Similarly, a runner can be called:
|
||||
|
||||
.. code-block:: jinja
|
||||
|
||||
{% if data['data']['custom_var'] == 'runit' %}
|
||||
call_runit_orch:
|
||||
runner.state.orchestrate:
|
||||
- mods: _orch.runit
|
||||
- args:
|
||||
- mods: orchestrate.runit
|
||||
{% endif %}
|
||||
|
||||
This example will execute the state.orchestrate runner and intiate an execution
|
||||
of the runit orchestrator located at ``/srv/salt/_orch/runit.sls``. Using
|
||||
``_orch/`` is any arbitrary path but it is recommended to avoid using "orchestrate"
|
||||
as this is most likely to cause confusion.
|
||||
of the ``runit`` orchestrator located at ``/srv/salt/orchestrate/runit.sls``.
|
||||
|
||||
Writing SLS Files
|
||||
-----------------
|
||||
Types of Reactions
|
||||
==================
|
||||
|
||||
Reactor SLS files are stored in the same location as State SLS files. This means
|
||||
that both ``file_roots`` and ``gitfs_remotes`` impact what SLS files are
|
||||
available to the reactor and orchestrator.
|
||||
============================== ==================================================================================
|
||||
Name Description
|
||||
============================== ==================================================================================
|
||||
:ref:`local <reactor-local>` Runs a :ref:`remote-execution function <all-salt.modules>` on targeted minions
|
||||
:ref:`runner <reactor-runner>` Executes a :ref:`runner function <all-salt.runners>`
|
||||
:ref:`wheel <reactor-wheel>` Executes a :ref:`wheel function <all-salt.wheel>` on the master
|
||||
:ref:`caller <reactor-caller>` Runs a :ref:`remote-execution function <all-salt.modules>` on a masterless minion
|
||||
============================== ==================================================================================
|
||||
|
||||
It is recommended to keep reactor and orchestrator SLS files in their own uniquely
|
||||
named subdirectories such as ``_orch/``, ``orch/``, ``_orchestrate/``, ``react/``,
|
||||
``_reactor/``, etc. Keeping a unique name helps prevent confusion when trying to
|
||||
read through this a few years down the road.
|
||||
.. note::
|
||||
The ``local`` and ``caller`` reaction types will be renamed for the Oxygen
|
||||
release. These reaction types were named after Salt's internal client
|
||||
interfaces, and are not intuitively named. Both ``local`` and ``caller``
|
||||
will continue to work in Reactor SLS files, but for the Oxygen release the
|
||||
documentation will be updated to reflect the new preferred naming.
|
||||
|
||||
The Goal of Writing Reactor SLS Files
|
||||
=====================================
|
||||
Where to Put Reactor SLS Files
|
||||
==============================
|
||||
|
||||
Reactor SLS files share the familiar syntax from Salt States but there are
|
||||
important differences. The goal of a Reactor file is to process a Salt event as
|
||||
quickly as possible and then to optionally start a **new** process in response.
|
||||
Reactor SLS files can come both from files local to the master, and from any of
|
||||
backends enabled via the :conf_master:`fileserver_backend` config option. Files
|
||||
placed in the Salt fileserver can be referenced using a ``salt://`` URL, just
|
||||
like they can in State SLS files.
|
||||
|
||||
1. The Salt Reactor watches Salt's event bus for new events.
|
||||
2. The event tag is matched against the list of event tags under the
|
||||
``reactor`` section in the Salt Master config.
|
||||
3. The SLS files for any matches are Rendered into a data structure that
|
||||
represents one or more function calls.
|
||||
4. That data structure is given to a pool of worker threads for execution.
|
||||
It is recommended to place reactor and orchestrator SLS files in their own
|
||||
uniquely-named subdirectories such as ``orch/``, ``orchestrate/``, ``react/``,
|
||||
``reactor/``, etc., to keep them organized.
|
||||
|
||||
.. _reactor-sls:
|
||||
|
||||
Writing Reactor SLS
|
||||
===================
|
||||
|
||||
The different reaction types were developed separately and have historically
|
||||
had different methods for passing arguments. For the 2017.7.2 release a new,
|
||||
unified configuration schema has been introduced, which applies to all reaction
|
||||
types.
|
||||
|
||||
The old config schema will continue to be supported, and there is no plan to
|
||||
deprecate it at this time.
|
||||
|
||||
.. _reactor-local:
|
||||
|
||||
Local Reactions
|
||||
---------------
|
||||
|
||||
A ``local`` reaction runs a :ref:`remote-execution function <all-salt.modules>`
|
||||
on the targeted minions.
|
||||
|
||||
The old config schema required the positional and keyword arguments to be
|
||||
manually separated by the user under ``arg`` and ``kwarg`` parameters. However,
|
||||
this is not very user-friendly, as it forces the user to distinguish which type
|
||||
of argument is which, and make sure that positional arguments are ordered
|
||||
properly. Therefore, the new config schema is recommended if the master is
|
||||
running a supported release.
|
||||
|
||||
The below two examples are equivalent:
|
||||
|
||||
+---------------------------------+-----------------------------+
|
||||
| Supported in 2017.7.2 and later | Supported in all releases |
|
||||
+=================================+=============================+
|
||||
| :: | :: |
|
||||
| | |
|
||||
| install_zsh: | install_zsh: |
|
||||
| local.state.single: | local.state.single: |
|
||||
| - tgt: 'kernel:Linux' | - tgt: 'kernel:Linux' |
|
||||
| - tgt_type: grain | - tgt_type: grain |
|
||||
| - args: | - arg: |
|
||||
| - fun: pkg.installed | - pkg.installed |
|
||||
| - name: zsh | - zsh |
|
||||
| - fromrepo: updates | - kwarg: |
|
||||
| | fromrepo: updates |
|
||||
+---------------------------------+-----------------------------+
|
||||
|
||||
This reaction would be equvalent to running the following Salt command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt -G 'kernel:Linux' state.single pkg.installed name=zsh fromrepo=updates
|
||||
|
||||
.. note::
|
||||
Any other parameters in the :py:meth:`LocalClient().cmd_async()
|
||||
<salt.client.LocalClient.cmd_async>` method can be passed at the same
|
||||
indentation level as ``tgt``.
|
||||
|
||||
.. note::
|
||||
``tgt_type`` is only required when the target expression defined in ``tgt``
|
||||
uses a :ref:`target type <targeting>` other than a minion ID glob.
|
||||
|
||||
The ``tgt_type`` argument was named ``expr_form`` in releases prior to
|
||||
2017.7.0.
|
||||
|
||||
.. _reactor-runner:
|
||||
|
||||
Runner Reactions
|
||||
----------------
|
||||
|
||||
Runner reactions execute :ref:`runner functions <all-salt.runners>` locally on
|
||||
the master.
|
||||
|
||||
The old config schema called for passing arguments to the reaction directly
|
||||
under the name of the runner function. However, this can cause unpredictable
|
||||
interactions with the Reactor system's internal arguments. It is also possible
|
||||
to pass positional and keyword arguments under ``arg`` and ``kwarg`` like above
|
||||
in :ref:`local reactions <reactor-local>`, but as noted above this is not very
|
||||
user-friendly. Therefore, the new config schema is recommended if the master
|
||||
is running a supported release.
|
||||
|
||||
The below two examples are equivalent:
|
||||
|
||||
+-------------------------------------------------+-------------------------------------------------+
|
||||
| Supported in 2017.7.2 and later | Supported in all releases |
|
||||
+=================================================+=================================================+
|
||||
| :: | :: |
|
||||
| | |
|
||||
| deploy_app: | deploy_app: |
|
||||
| runner.state.orchestrate: | runner.state.orchestrate: |
|
||||
| - args: | - mods: orchestrate.deploy_app |
|
||||
| - mods: orchestrate.deploy_app | - kwarg: |
|
||||
| - pillar: | pillar: |
|
||||
| event_tag: {{ tag }} | event_tag: {{ tag }} |
|
||||
| event_data: {{ data['data']|json }} | event_data: {{ data['data']|json }} |
|
||||
+-------------------------------------------------+-------------------------------------------------+
|
||||
|
||||
Assuming that the event tag is ``foo``, and the data passed to the event is
|
||||
``{'bar': 'baz'}``, then this reaction is equvalent to running the following
|
||||
Salt command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-run state.orchestrate mods=orchestrate.deploy_app pillar='{"event_tag": "foo", "event_data": {"bar": "baz"}}'
|
||||
|
||||
.. _reactor-wheel:
|
||||
|
||||
Wheel Reactions
|
||||
---------------
|
||||
|
||||
Wheel reactions run :ref:`wheel functions <all-salt.wheel>` locally on the
|
||||
master.
|
||||
|
||||
Like :ref:`runner reactions <reactor-runner>`, the old config schema called for
|
||||
wheel reactions to have arguments passed directly under the name of the
|
||||
:ref:`wheel function <all-salt.wheel>` (or in ``arg`` or ``kwarg`` parameters).
|
||||
|
||||
The below two examples are equivalent:
|
||||
|
||||
+-----------------------------------+---------------------------------+
|
||||
| Supported in 2017.7.2 and later | Supported in all releases |
|
||||
+===================================+=================================+
|
||||
| :: | :: |
|
||||
| | |
|
||||
| remove_key: | remove_key: |
|
||||
| wheel.key.delete: | wheel.key.delete: |
|
||||
| - args: | - match: {{ data['id'] }} |
|
||||
| - match: {{ data['id'] }} | |
|
||||
+-----------------------------------+---------------------------------+
|
||||
|
||||
.. _reactor-caller:
|
||||
|
||||
Caller Reactions
|
||||
----------------
|
||||
|
||||
Caller reactions run :ref:`remote-execution functions <all-salt.modules>` on a
|
||||
minion daemon's Reactor system. To run a Reactor on the minion, it is necessary
|
||||
to configure the :mod:`Reactor Engine <salt.engines.reactor>` in the minion
|
||||
config file, and then setup your watched events in a ``reactor`` section in the
|
||||
minion config file as well.
|
||||
|
||||
.. note:: Masterless Minions use this Reactor
|
||||
|
||||
This is the only way to run the Reactor if you use masterless minions.
|
||||
|
||||
Both the old and new config schemas involve passing arguments under an ``args``
|
||||
parameter. However, the old config schema only supports positional arguments.
|
||||
Therefore, the new config schema is recommended if the masterless minion is
|
||||
running a supported release.
|
||||
|
||||
The below two examples are equivalent:
|
||||
|
||||
+---------------------------------+---------------------------+
|
||||
| Supported in 2017.7.2 and later | Supported in all releases |
|
||||
+=================================+===========================+
|
||||
| :: | :: |
|
||||
| | |
|
||||
| touch_file: | touch_file: |
|
||||
| caller.file.touch: | caller.file.touch: |
|
||||
| - args: | - args: |
|
||||
| - name: /tmp/foo | - /tmp/foo |
|
||||
+---------------------------------+---------------------------+
|
||||
|
||||
This reaction is equvalent to running the following Salt command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-call file.touch name=/tmp/foo
|
||||
|
||||
Best Practices for Writing Reactor SLS Files
|
||||
============================================
|
||||
|
||||
The Reactor works as follows:
|
||||
|
||||
1. The Salt Reactor watches Salt's event bus for new events.
|
||||
2. Each event's tag is matched against the list of event tags configured under
|
||||
the :conf_master:`reactor` section in the Salt Master config.
|
||||
3. The SLS files for any matches are rendered into a data structure that
|
||||
represents one or more function calls.
|
||||
4. That data structure is given to a pool of worker threads for execution.
|
||||
|
||||
Matching and rendering Reactor SLS files is done sequentially in a single
|
||||
process. Complex Jinja that calls out to slow Execution or Runner modules slows
|
||||
down the rendering and causes other reactions to pile up behind the current
|
||||
one. The worker pool is designed to handle complex and long-running processes
|
||||
such as Salt Orchestrate.
|
||||
process. For that reason, reactor SLS files should contain few individual
|
||||
reactions (one, if at all possible). Also, keep in mind that reactions are
|
||||
fired asynchronously (with the exception of :ref:`caller <reactor-caller>`) and
|
||||
do *not* support :ref:`requisites <requisites>`.
|
||||
|
||||
tl;dr: Rendering Reactor SLS files MUST be simple and quick. The new process
|
||||
started by the worker threads can be long-running. Using the reactor to fire
|
||||
an orchestrate runner would be ideal.
|
||||
Complex Jinja templating that calls out to slow :ref:`remote-execution
|
||||
<all-salt.modules>` or :ref:`runner <all-salt.runners>` functions slows down
|
||||
the rendering and causes other reactions to pile up behind the current one. The
|
||||
worker pool is designed to handle complex and long-running processes like
|
||||
:ref:`orchestration <orchestrate-runner>` jobs.
|
||||
|
||||
Therefore, when complex tasks are in order, :ref:`orchestration
|
||||
<orchestrate-runner>` is a natural fit. Orchestration SLS files can be more
|
||||
complex, and use requisites. Performing a complex task using orchestration lets
|
||||
the Reactor system fire off the orchestration job and proceed with processing
|
||||
other reactions.
|
||||
|
||||
.. _reactor-jinja-context:
|
||||
|
||||
Jinja Context
|
||||
-------------
|
||||
=============
|
||||
|
||||
Reactor files only have access to a minimal Jinja context. ``grains`` and
|
||||
``pillar`` are not available. The ``salt`` object is available for calling
|
||||
Runner and Execution modules but it should be used sparingly and only for quick
|
||||
tasks for the reasons mentioned above.
|
||||
Reactor SLS files only have access to a minimal Jinja context. ``grains`` and
|
||||
``pillar`` are *not* available. The ``salt`` object is available for calling
|
||||
:ref:`remote-execution <all-salt.modules>` or :ref:`runner <all-salt.runners>`
|
||||
functions, but it should be used sparingly and only for quick tasks for the
|
||||
reasons mentioned above.
|
||||
|
||||
In addition to the ``salt`` object, the following variables are available in
|
||||
the Jinja context:
|
||||
|
||||
- ``tag`` - the tag from the event that triggered execution of the Reactor SLS
|
||||
file
|
||||
- ``data`` - the event's data dictionary
|
||||
|
||||
The ``data`` dict will contain an ``id`` key containing the minion ID, if the
|
||||
event was fired from a minion, and a ``data`` key containing the data passed to
|
||||
the event.
|
||||
|
||||
Advanced State System Capabilities
|
||||
----------------------------------
|
||||
==================================
|
||||
|
||||
Reactor SLS files, by design, do not support Requisites, ordering,
|
||||
``onlyif``/``unless`` conditionals and most other powerful constructs from
|
||||
Salt's State system.
|
||||
Reactor SLS files, by design, do not support :ref:`requisites <requisites>`,
|
||||
ordering, ``onlyif``/``unless`` conditionals and most other powerful constructs
|
||||
from Salt's State system.
|
||||
|
||||
Complex Master-side operations are best performed by Salt's Orchestrate system
|
||||
so using the Reactor to kick off an Orchestrate run is a very common pairing.
|
||||
@ -166,7 +370,7 @@ For example:
|
||||
# /etc/salt/master.d/reactor.conf
|
||||
# A custom event containing: {"foo": "Foo!", "bar: "bar*", "baz": "Baz!"}
|
||||
reactor:
|
||||
- myco/custom/event:
|
||||
- my/custom/event:
|
||||
- /srv/reactor/some_event.sls
|
||||
|
||||
.. code-block:: jinja
|
||||
@ -174,15 +378,15 @@ For example:
|
||||
# /srv/reactor/some_event.sls
|
||||
invoke_orchestrate_file:
|
||||
runner.state.orchestrate:
|
||||
- mods: _orch.do_complex_thing # /srv/salt/_orch/do_complex_thing.sls
|
||||
- kwarg:
|
||||
pillar:
|
||||
event_tag: {{ tag }}
|
||||
event_data: {{ data|json() }}
|
||||
- args:
|
||||
- mods: orchestrate.do_complex_thing
|
||||
- pillar:
|
||||
event_tag: {{ tag }}
|
||||
event_data: {{ data|json }}
|
||||
|
||||
.. code-block:: jinja
|
||||
|
||||
# /srv/salt/_orch/do_complex_thing.sls
|
||||
# /srv/salt/orchestrate/do_complex_thing.sls
|
||||
{% set tag = salt.pillar.get('event_tag') %}
|
||||
{% set data = salt.pillar.get('event_data') %}
|
||||
|
||||
@ -209,7 +413,7 @@ For example:
|
||||
.. _beacons-and-reactors:
|
||||
|
||||
Beacons and Reactors
|
||||
--------------------
|
||||
====================
|
||||
|
||||
An event initiated by a beacon, when it arrives at the master will be wrapped
|
||||
inside a second event, such that the data object containing the beacon
|
||||
@ -219,27 +423,52 @@ For example, to access the ``id`` field of the beacon event in a reactor file,
|
||||
you will need to reference ``{{ data['data']['id'] }}`` rather than ``{{
|
||||
data['id'] }}`` as for events initiated directly on the event bus.
|
||||
|
||||
Similarly, the data dictionary attached to the event would be located in
|
||||
``{{ data['data']['data'] }}`` instead of ``{{ data['data'] }}``.
|
||||
|
||||
See the :ref:`beacon documentation <beacon-example>` for examples.
|
||||
|
||||
Fire an event
|
||||
=============
|
||||
Manually Firing an Event
|
||||
========================
|
||||
|
||||
To fire an event from a minion call ``event.send``
|
||||
From the Master
|
||||
---------------
|
||||
|
||||
Use the :py:func:`event.send <salt.runners.event.send>` runner:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-call event.send 'foo' '{orchestrate: refresh}'
|
||||
salt-run event.send foo '{orchestrate: refresh}'
|
||||
|
||||
After this is called, any reactor sls files matching event tag ``foo`` will
|
||||
execute with ``{{ data['data']['orchestrate'] }}`` equal to ``'refresh'``.
|
||||
From the Minion
|
||||
---------------
|
||||
|
||||
See :py:mod:`salt.modules.event` for more information.
|
||||
To fire an event to the master from a minion, call :py:func:`event.send
|
||||
<salt.modules.event.send>`:
|
||||
|
||||
Knowing what event is being fired
|
||||
=================================
|
||||
.. code-block:: bash
|
||||
|
||||
The best way to see exactly what events are fired and what data is available in
|
||||
each event is to use the :py:func:`state.event runner
|
||||
salt-call event.send foo '{orchestrate: refresh}'
|
||||
|
||||
To fire an event to the minion's local event bus, call :py:func:`event.fire
|
||||
<salt.modules.event.fire>`:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-call event.fire '{orchestrate: refresh}' foo
|
||||
|
||||
Referencing Data Passed in Events
|
||||
---------------------------------
|
||||
|
||||
Assuming any of the above examples, any reactor SLS files triggered by watching
|
||||
the event tag ``foo`` will execute with ``{{ data['data']['orchestrate'] }}``
|
||||
equal to ``'refresh'``.
|
||||
|
||||
Getting Information About Events
|
||||
================================
|
||||
|
||||
The best way to see exactly what events have been fired and what data is
|
||||
available in each event is to use the :py:func:`state.event runner
|
||||
<salt.runners.state.event>`.
|
||||
|
||||
.. seealso:: :ref:`Common Salt Events <event-master_events>`
|
||||
@ -308,156 +537,10 @@ rendered SLS file (or any errors generated while rendering the SLS file).
|
||||
view the result of referencing Jinja variables. If the result is empty then
|
||||
Jinja produced an empty result and the Reactor will ignore it.
|
||||
|
||||
.. _reactor-structure:
|
||||
Passing Event Data to Minions or Orchestration as Pillar
|
||||
--------------------------------------------------------
|
||||
|
||||
Understanding the Structure of Reactor Formulas
|
||||
===============================================
|
||||
|
||||
**I.e., when to use `arg` and `kwarg` and when to specify the function
|
||||
arguments directly.**
|
||||
|
||||
While the reactor system uses the same basic data structure as the state
|
||||
system, the functions that will be called using that data structure are
|
||||
different functions than are called via Salt's state system. The Reactor can
|
||||
call Runner modules using the `runner` prefix, Wheel modules using the `wheel`
|
||||
prefix, and can also cause minions to run Execution modules using the `local`
|
||||
prefix.
|
||||
|
||||
.. versionchanged:: 2014.7.0
|
||||
The ``cmd`` prefix was renamed to ``local`` for consistency with other
|
||||
parts of Salt. A backward-compatible alias was added for ``cmd``.
|
||||
|
||||
The Reactor runs on the master and calls functions that exist on the master. In
|
||||
the case of Runner and Wheel functions the Reactor can just call those
|
||||
functions directly since they exist on the master and are run on the master.
|
||||
|
||||
In the case of functions that exist on minions and are run on minions, the
|
||||
Reactor still needs to call a function on the master in order to send the
|
||||
necessary data to the minion so the minion can execute that function.
|
||||
|
||||
The Reactor calls functions exposed in :ref:`Salt's Python API documentation
|
||||
<client-apis>`. and thus the structure of Reactor files very transparently
|
||||
reflects the function signatures of those functions.
|
||||
|
||||
Calling Execution modules on Minions
|
||||
------------------------------------
|
||||
|
||||
The Reactor sends commands down to minions in the exact same way Salt's CLI
|
||||
interface does. It calls a function locally on the master that sends the name
|
||||
of the function as well as a list of any arguments and a dictionary of any
|
||||
keyword arguments that the minion should use to execute that function.
|
||||
|
||||
Specifically, the Reactor calls the async version of :py:meth:`this function
|
||||
<salt.client.LocalClient.cmd>`. You can see that function has 'arg' and 'kwarg'
|
||||
parameters which are both values that are sent down to the minion.
|
||||
|
||||
Executing remote commands maps to the :strong:`LocalClient` interface which is
|
||||
used by the :strong:`salt` command. This interface more specifically maps to
|
||||
the :strong:`cmd_async` method inside of the :strong:`LocalClient` class. This
|
||||
means that the arguments passed are being passed to the :strong:`cmd_async`
|
||||
method, not the remote method. A field starts with :strong:`local` to use the
|
||||
:strong:`LocalClient` subsystem. The result is, to execute a remote command,
|
||||
a reactor formula would look like this:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
clean_tmp:
|
||||
local.cmd.run:
|
||||
- tgt: '*'
|
||||
- arg:
|
||||
- rm -rf /tmp/*
|
||||
|
||||
The ``arg`` option takes a list of arguments as they would be presented on the
|
||||
command line, so the above declaration is the same as running this salt
|
||||
command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cmd.run 'rm -rf /tmp/*'
|
||||
|
||||
Use the ``tgt_type`` argument to specify a matcher:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
clean_tmp:
|
||||
local.cmd.run:
|
||||
- tgt: 'os:Ubuntu'
|
||||
- tgt_type: grain
|
||||
- arg:
|
||||
- rm -rf /tmp/*
|
||||
|
||||
|
||||
clean_tmp:
|
||||
local.cmd.run:
|
||||
- tgt: 'G@roles:hbase_master'
|
||||
- tgt_type: compound
|
||||
- arg:
|
||||
- rm -rf /tmp/*
|
||||
|
||||
.. note::
|
||||
The ``tgt_type`` argument was named ``expr_form`` in releases prior to
|
||||
2017.7.0 (2016.11.x and earlier).
|
||||
|
||||
Any other parameters in the :py:meth:`LocalClient().cmd()
|
||||
<salt.client.LocalClient.cmd>` method can be specified as well.
|
||||
|
||||
Executing Reactors from the Minion
|
||||
----------------------------------
|
||||
|
||||
The minion can be setup to use the Reactor via a reactor engine. This just
|
||||
sets up and listens to the minions event bus, instead of to the masters.
|
||||
|
||||
The biggest difference is that you have to use the caller method on the
|
||||
Reactor, which is the equivalent of salt-call, to run your commands.
|
||||
|
||||
:mod:`Reactor Engine setup <salt.engines.reactor>`
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
clean_tmp:
|
||||
caller.cmd.run:
|
||||
- arg:
|
||||
- rm -rf /tmp/*
|
||||
|
||||
.. note:: Masterless Minions use this Reactor
|
||||
|
||||
This is the only way to run the Reactor if you use masterless minions.
|
||||
|
||||
Calling Runner modules and Wheel modules
|
||||
----------------------------------------
|
||||
|
||||
Calling Runner modules and Wheel modules from the Reactor uses a more direct
|
||||
syntax since the function is being executed locally instead of sending a
|
||||
command to a remote system to be executed there. There are no 'arg' or 'kwarg'
|
||||
parameters (unless the Runner function or Wheel function accepts a parameter
|
||||
with either of those names.)
|
||||
|
||||
For example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
clear_the_grains_cache_for_all_minions:
|
||||
runner.cache.clear_grains
|
||||
|
||||
If the :py:func:`the runner takes arguments <salt.runners.cloud.profile>` then
|
||||
they must be specified as keyword arguments.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
spin_up_more_web_machines:
|
||||
runner.cloud.profile:
|
||||
- prof: centos_6
|
||||
- instances:
|
||||
- web11 # These VM names would be generated via Jinja in a
|
||||
- web12 # real-world example.
|
||||
|
||||
To determine the proper names for the arguments, check the documentation
|
||||
or source code for the runner function you wish to call.
|
||||
|
||||
Passing event data to Minions or Orchestrate as Pillar
|
||||
------------------------------------------------------
|
||||
|
||||
An interesting trick to pass data from the Reactor script to
|
||||
An interesting trick to pass data from the Reactor SLS file to
|
||||
:py:func:`state.apply <salt.modules.state.apply_>` is to pass it as inline
|
||||
Pillar data since both functions take a keyword argument named ``pillar``.
|
||||
|
||||
@ -484,10 +567,9 @@ from the event to the state file via inline Pillar.
|
||||
add_new_minion_to_pool:
|
||||
local.state.apply:
|
||||
- tgt: 'haproxy*'
|
||||
- arg:
|
||||
- haproxy.refresh_pool
|
||||
- kwarg:
|
||||
pillar:
|
||||
- args:
|
||||
- mods: haproxy.refresh_pool
|
||||
- pillar:
|
||||
new_minion: {{ data['id'] }}
|
||||
{% endif %}
|
||||
|
||||
@ -503,17 +585,16 @@ This works with Orchestrate files as well:
|
||||
|
||||
call_some_orchestrate_file:
|
||||
runner.state.orchestrate:
|
||||
- mods: _orch.some_orchestrate_file
|
||||
- pillar:
|
||||
stuff: things
|
||||
- args:
|
||||
- mods: orchestrate.some_orchestrate_file
|
||||
- pillar:
|
||||
stuff: things
|
||||
|
||||
Which is equivalent to the following command at the CLI:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-run state.orchestrate _orch.some_orchestrate_file pillar='{stuff: things}'
|
||||
|
||||
This expects to find a file at /srv/salt/_orch/some_orchestrate_file.sls.
|
||||
salt-run state.orchestrate orchestrate.some_orchestrate_file pillar='{stuff: things}'
|
||||
|
||||
Finally, that data is available in the state file using the normal Pillar
|
||||
lookup syntax. The following example is grabbing web server names and IP
|
||||
@ -564,7 +645,7 @@ includes the minion id, which we can use for matching.
|
||||
- 'salt/minion/ink*/start':
|
||||
- /srv/reactor/auth-complete.sls
|
||||
|
||||
In this sls file, we say that if the key was rejected we will delete the key on
|
||||
In this SLS file, we say that if the key was rejected we will delete the key on
|
||||
the master and then also tell the master to ssh in to the minion and tell it to
|
||||
restart the minion, since a minion process will die if the key is rejected.
|
||||
|
||||
@ -580,19 +661,21 @@ authentication every ten seconds by default.
|
||||
{% if not data['result'] and data['id'].startswith('ink') %}
|
||||
minion_remove:
|
||||
wheel.key.delete:
|
||||
- match: {{ data['id'] }}
|
||||
- args:
|
||||
- match: {{ data['id'] }}
|
||||
minion_rejoin:
|
||||
local.cmd.run:
|
||||
- tgt: salt-master.domain.tld
|
||||
- arg:
|
||||
- ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "{{ data['id'] }}" 'sleep 10 && /etc/init.d/salt-minion restart'
|
||||
- args:
|
||||
- cmd: ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "{{ data['id'] }}" 'sleep 10 && /etc/init.d/salt-minion restart'
|
||||
{% endif %}
|
||||
|
||||
{# Ink server is sending new key -- accept this key #}
|
||||
{% if 'act' in data and data['act'] == 'pend' and data['id'].startswith('ink') %}
|
||||
minion_add:
|
||||
wheel.key.accept:
|
||||
- match: {{ data['id'] }}
|
||||
- args:
|
||||
- match: {{ data['id'] }}
|
||||
{% endif %}
|
||||
|
||||
No if statements are needed here because we already limited this action to just
|
||||
|
1719
doc/topics/releases/2016.11.8.rst
Normal file
1719
doc/topics/releases/2016.11.8.rst
Normal file
File diff suppressed because it is too large
Load Diff
@ -116,6 +116,14 @@ Newer PyWinRM Versions
|
||||
Versions of ``pywinrm>=0.2.1`` are finally able to disable validation of self
|
||||
signed certificates. :ref:`Here<new-pywinrm>` for more information.
|
||||
|
||||
DigitalOcean
|
||||
------------
|
||||
|
||||
The DigitalOcean driver has been renamed to conform to the companies name. The
|
||||
new driver name is ``digitalocean``. The old name ``digital_ocean`` and a
|
||||
short one ``do`` will still be supported through virtual aliases, this is mostly
|
||||
cosmetic.
|
||||
|
||||
Solaris Logical Domains In Virtual Grain
|
||||
----------------------------------------
|
||||
|
||||
|
@ -13,7 +13,7 @@ Using Apache Libcloud for declarative and procedural multi-cloud orchestration
|
||||
|
||||
Apache Libcloud is a Python library which hides differences between different cloud provider APIs and allows
|
||||
you to manage different cloud resources through a unified and easy to use API. Apache Libcloud supports over
|
||||
60 cloud platforms, including Amazon, Microsoft Azure, Digital Ocean, Google Cloud Platform and OpenStack.
|
||||
60 cloud platforms, including Amazon, Microsoft Azure, DigitalOcean, Google Cloud Platform and OpenStack.
|
||||
|
||||
Execution and state modules are available for Compute, DNS, Storage and Load Balancer drivers from Apache Libcloud in
|
||||
SaltStack.
|
||||
|
@ -132,7 +132,7 @@ fi
|
||||
###############################################################################
|
||||
# Remove the salt from the paths.d
|
||||
###############################################################################
|
||||
if [ ! -f "/etc/paths.d/salt" ]; then
|
||||
if [ -f "/etc/paths.d/salt" ]; then
|
||||
echo "Path: Removing salt from the path..." >> "$TEMP_DIR/preinstall.txt"
|
||||
rm "/etc/paths.d/salt"
|
||||
echo "Path: Removed Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
|
@ -35,8 +35,9 @@ _salt_get_keys(){
|
||||
}
|
||||
|
||||
_salt(){
|
||||
local _salt_cache_functions=${SALT_COMP_CACHE_FUNCTIONS:-"$HOME/.cache/salt-comp-cache_functions"}
|
||||
local _salt_cache_timeout=${SALT_COMP_CACHE_TIMEOUT:-"last hour"}
|
||||
CACHE_DIR="$HOME/.cache/salt-comp-cache_functions"
|
||||
local _salt_cache_functions=${SALT_COMP_CACHE_FUNCTIONS:=$CACHE_DIR}
|
||||
local _salt_cache_timeout=${SALT_COMP_CACHE_TIMEOUT:='last hour'}
|
||||
|
||||
if [ ! -d "$(dirname ${_salt_cache_functions})" ]; then
|
||||
mkdir -p "$(dirname ${_salt_cache_functions})"
|
||||
|
@ -369,46 +369,13 @@ class LoadAuth(object):
|
||||
eauth_config = self.opts['external_auth'][eauth]
|
||||
if not groups:
|
||||
groups = []
|
||||
group_perm_keys = [item for item in eauth_config if item.endswith('%')] # The configured auth groups
|
||||
|
||||
# First we need to know if the user is allowed to proceed via any of their group memberships.
|
||||
group_auth_match = False
|
||||
for group_config in group_perm_keys:
|
||||
if group_config.rstrip('%') in groups:
|
||||
group_auth_match = True
|
||||
break
|
||||
# If a group_auth_match is set it means only that we have a
|
||||
# user which matches at least one or more of the groups defined
|
||||
# in the configuration file.
|
||||
|
||||
external_auth_in_db = False
|
||||
for entry in eauth_config:
|
||||
if entry.startswith('^'):
|
||||
external_auth_in_db = True
|
||||
break
|
||||
|
||||
# If neither a catchall, a named membership or a group
|
||||
# membership is found, there is no need to continue. Simply
|
||||
# deny the user access.
|
||||
if not ((name in eauth_config) |
|
||||
('*' in eauth_config) |
|
||||
group_auth_match | external_auth_in_db):
|
||||
# Auth successful, but no matching user found in config
|
||||
log.warning('Authorization failure occurred.')
|
||||
return None
|
||||
|
||||
# We now have an authenticated session and it is time to determine
|
||||
# what the user has access to.
|
||||
auth_list = []
|
||||
if name in eauth_config:
|
||||
auth_list = eauth_config[name]
|
||||
elif '*' in eauth_config:
|
||||
auth_list = eauth_config['*']
|
||||
if group_auth_match:
|
||||
auth_list = self.ckminions.fill_auth_list_from_groups(
|
||||
eauth_config,
|
||||
groups,
|
||||
auth_list)
|
||||
auth_list = self.ckminions.fill_auth_list(
|
||||
eauth_config,
|
||||
name,
|
||||
groups)
|
||||
|
||||
auth_list = self.__process_acl(load, auth_list)
|
||||
|
||||
|
@ -10,14 +10,19 @@ Beacon to fire events at failed login of users
|
||||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
import os
|
||||
import struct
|
||||
import time
|
||||
|
||||
# Import Salt Libs
|
||||
import salt.utils.files
|
||||
|
||||
# Import 3rd-party libs
|
||||
from salt.ext import six
|
||||
import salt.ext.six
|
||||
# pylint: disable=import-error
|
||||
from salt.ext.six.moves import map
|
||||
# pylint: enable=import-error
|
||||
|
||||
__virtualname__ = 'btmp'
|
||||
BTMP = '/var/log/btmp'
|
||||
@ -37,6 +42,15 @@ FIELDS = [
|
||||
SIZE = struct.calcsize(FMT)
|
||||
LOC_KEY = 'btmp.loc'
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# pylint: disable=import-error
|
||||
try:
|
||||
import dateutil.parser as dateutil_parser
|
||||
_TIME_SUPPORTED = True
|
||||
except ImportError:
|
||||
_TIME_SUPPORTED = False
|
||||
|
||||
|
||||
def __virtual__():
|
||||
if os.path.isfile(BTMP):
|
||||
@ -44,6 +58,20 @@ def __virtual__():
|
||||
return False
|
||||
|
||||
|
||||
def _check_time_range(time_range, now):
|
||||
'''
|
||||
Check time range
|
||||
'''
|
||||
if _TIME_SUPPORTED:
|
||||
_start = int(time.mktime(dateutil_parser.parse(time_range['start']).timetuple()))
|
||||
_end = int(time.mktime(dateutil_parser.parse(time_range['end']).timetuple()))
|
||||
|
||||
return bool(_start <= now <= _end)
|
||||
else:
|
||||
log.error('Dateutil is required.')
|
||||
return False
|
||||
|
||||
|
||||
def _get_loc():
|
||||
'''
|
||||
return the active file location
|
||||
@ -60,6 +88,45 @@ def validate(config):
|
||||
if not isinstance(config, list):
|
||||
return False, ('Configuration for btmp beacon must '
|
||||
'be a list.')
|
||||
else:
|
||||
_config = {}
|
||||
list(map(_config.update, config))
|
||||
|
||||
if 'users' in _config:
|
||||
if not isinstance(_config['users'], dict):
|
||||
return False, ('User configuration for btmp beacon must '
|
||||
'be a dictionary.')
|
||||
else:
|
||||
for user in _config['users']:
|
||||
if _config['users'][user] and \
|
||||
'time_range' in _config['users'][user]:
|
||||
_time_range = _config['users'][user]['time_range']
|
||||
if not isinstance(_time_range, dict):
|
||||
return False, ('The time_range parameter for '
|
||||
'btmp beacon must '
|
||||
'be a dictionary.')
|
||||
else:
|
||||
if not all(k in _time_range for k in ('start', 'end')):
|
||||
return False, ('The time_range parameter for '
|
||||
'btmp beacon must contain '
|
||||
'start & end options.')
|
||||
if 'defaults' in _config:
|
||||
if not isinstance(_config['defaults'], dict):
|
||||
return False, ('Defaults configuration for btmp beacon must '
|
||||
'be a dictionary.')
|
||||
else:
|
||||
if 'time_range' in _config['defaults']:
|
||||
_time_range = _config['defaults']['time_range']
|
||||
if not isinstance(_time_range, dict):
|
||||
return False, ('The time_range parameter for '
|
||||
'btmp beacon must '
|
||||
'be a dictionary.')
|
||||
else:
|
||||
if not all(k in _time_range for k in ('start', 'end')):
|
||||
return False, ('The time_range parameter for '
|
||||
'btmp beacon must contain '
|
||||
'start & end options.')
|
||||
|
||||
return True, 'Valid beacon configuration'
|
||||
|
||||
|
||||
@ -72,8 +139,40 @@ def beacon(config):
|
||||
|
||||
beacons:
|
||||
btmp: []
|
||||
|
||||
beacons:
|
||||
btmp:
|
||||
- users:
|
||||
gareth:
|
||||
- defaults:
|
||||
time_range:
|
||||
start: '8am'
|
||||
end: '4pm'
|
||||
|
||||
beacons:
|
||||
btmp:
|
||||
- users:
|
||||
gareth:
|
||||
time_range:
|
||||
start: '8am'
|
||||
end: '4pm'
|
||||
- defaults:
|
||||
time_range:
|
||||
start: '8am'
|
||||
end: '4pm'
|
||||
'''
|
||||
ret = []
|
||||
|
||||
users = None
|
||||
defaults = None
|
||||
|
||||
for config_item in config:
|
||||
if 'users' in config_item:
|
||||
users = config_item['users']
|
||||
|
||||
if 'defaults' in config_item:
|
||||
defaults = config_item['defaults']
|
||||
|
||||
with salt.utils.files.fopen(BTMP, 'rb') as fp_:
|
||||
loc = __context__.get(LOC_KEY, 0)
|
||||
if loc == 0:
|
||||
@ -83,6 +182,7 @@ def beacon(config):
|
||||
else:
|
||||
fp_.seek(loc)
|
||||
while True:
|
||||
now = int(time.time())
|
||||
raw = fp_.read(SIZE)
|
||||
if len(raw) != SIZE:
|
||||
return ret
|
||||
@ -91,7 +191,30 @@ def beacon(config):
|
||||
event = {}
|
||||
for ind, field in enumerate(FIELDS):
|
||||
event[field] = pack[ind]
|
||||
if isinstance(event[field], six.string_types):
|
||||
event[field] = event[field].strip('\x00')
|
||||
ret.append(event)
|
||||
if isinstance(event[field], salt.ext.six.string_types):
|
||||
if isinstance(event[field], bytes):
|
||||
event[field] = event[field].decode()
|
||||
event[field] = event[field].strip('b\x00')
|
||||
else:
|
||||
event[field] = event[field].strip('\x00')
|
||||
|
||||
if users:
|
||||
if event['user'] in users:
|
||||
_user = users[event['user']]
|
||||
if isinstance(_user, dict) and 'time_range' in _user:
|
||||
if _check_time_range(_user['time_range'], now):
|
||||
ret.append(event)
|
||||
else:
|
||||
if defaults and 'time_range' in defaults:
|
||||
if _check_time_range(defaults['time_range'],
|
||||
now):
|
||||
ret.append(event)
|
||||
else:
|
||||
ret.append(event)
|
||||
else:
|
||||
if defaults and 'time_range' in defaults:
|
||||
if _check_time_range(defaults['time_range'], now):
|
||||
ret.append(event)
|
||||
else:
|
||||
ret.append(event)
|
||||
return ret
|
||||
|
@ -23,7 +23,9 @@ import re
|
||||
|
||||
# Import salt libs
|
||||
import salt.ext.six
|
||||
# pylint: disable=import-error
|
||||
from salt.ext.six.moves import map
|
||||
# pylint: enable=import-error
|
||||
|
||||
# Import third party libs
|
||||
try:
|
||||
|
@ -10,14 +10,19 @@ Beacon to fire events at login of users as registered in the wtmp file
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
import os
|
||||
import struct
|
||||
import time
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils.files
|
||||
|
||||
# Import 3rd-party libs
|
||||
from salt.ext import six
|
||||
import salt.ext.six
|
||||
# pylint: disable=import-error
|
||||
from salt.ext.six.moves import map
|
||||
# pylint: enable=import-error
|
||||
|
||||
__virtualname__ = 'wtmp'
|
||||
WTMP = '/var/log/wtmp'
|
||||
@ -37,9 +42,15 @@ FIELDS = [
|
||||
SIZE = struct.calcsize(FMT)
|
||||
LOC_KEY = 'wtmp.loc'
|
||||
|
||||
import logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# pylint: disable=import-error
|
||||
try:
|
||||
import dateutil.parser as dateutil_parser
|
||||
_TIME_SUPPORTED = True
|
||||
except ImportError:
|
||||
_TIME_SUPPORTED = False
|
||||
|
||||
|
||||
def __virtual__():
|
||||
if os.path.isfile(WTMP):
|
||||
@ -47,6 +58,20 @@ def __virtual__():
|
||||
return False
|
||||
|
||||
|
||||
def _check_time_range(time_range, now):
|
||||
'''
|
||||
Check time range
|
||||
'''
|
||||
if _TIME_SUPPORTED:
|
||||
_start = int(time.mktime(dateutil_parser.parse(time_range['start']).timetuple()))
|
||||
_end = int(time.mktime(dateutil_parser.parse(time_range['end']).timetuple()))
|
||||
|
||||
return bool(_start <= now <= _end)
|
||||
else:
|
||||
log.error('Dateutil is required.')
|
||||
return False
|
||||
|
||||
|
||||
def _get_loc():
|
||||
'''
|
||||
return the active file location
|
||||
@ -62,6 +87,44 @@ def validate(config):
|
||||
# Configuration for wtmp beacon should be a list of dicts
|
||||
if not isinstance(config, list):
|
||||
return False, ('Configuration for wtmp beacon must be a list.')
|
||||
else:
|
||||
_config = {}
|
||||
list(map(_config.update, config))
|
||||
|
||||
if 'users' in _config:
|
||||
if not isinstance(_config['users'], dict):
|
||||
return False, ('User configuration for btmp beacon must '
|
||||
'be a dictionary.')
|
||||
else:
|
||||
for user in _config['users']:
|
||||
if _config['users'][user] and \
|
||||
'time_range' in _config['users'][user]:
|
||||
_time_range = _config['users'][user]['time_range']
|
||||
if not isinstance(_time_range, dict):
|
||||
return False, ('The time_range parameter for '
|
||||
'btmp beacon must '
|
||||
'be a dictionary.')
|
||||
else:
|
||||
if not all(k in _time_range for k in ('start', 'end')):
|
||||
return False, ('The time_range parameter for '
|
||||
'btmp beacon must contain '
|
||||
'start & end options.')
|
||||
if 'defaults' in _config:
|
||||
if not isinstance(_config['defaults'], dict):
|
||||
return False, ('Defaults configuration for btmp beacon must '
|
||||
'be a dictionary.')
|
||||
else:
|
||||
if 'time_range' in _config['defaults']:
|
||||
_time_range = _config['defaults']['time_range']
|
||||
if not isinstance(_time_range, dict):
|
||||
return False, ('The time_range parameter for '
|
||||
'btmp beacon must '
|
||||
'be a dictionary.')
|
||||
else:
|
||||
if not all(k in _time_range for k in ('start', 'end')):
|
||||
return False, ('The time_range parameter for '
|
||||
'btmp beacon must contain '
|
||||
'start & end options.')
|
||||
return True, 'Valid beacon configuration'
|
||||
|
||||
|
||||
@ -74,8 +137,40 @@ def beacon(config):
|
||||
|
||||
beacons:
|
||||
wtmp: []
|
||||
'''
|
||||
|
||||
beacons:
|
||||
wtmp:
|
||||
- users:
|
||||
gareth:
|
||||
- defaults:
|
||||
time_range:
|
||||
start: '8am'
|
||||
end: '4pm'
|
||||
|
||||
beacons:
|
||||
wtmp:
|
||||
- users:
|
||||
gareth:
|
||||
time_range:
|
||||
start: '8am'
|
||||
end: '4pm'
|
||||
- defaults:
|
||||
time_range:
|
||||
start: '8am'
|
||||
end: '4pm'
|
||||
'''
|
||||
ret = []
|
||||
|
||||
users = None
|
||||
defaults = None
|
||||
|
||||
for config_item in config:
|
||||
if 'users' in config_item:
|
||||
users = config_item['users']
|
||||
|
||||
if 'defaults' in config_item:
|
||||
defaults = config_item['defaults']
|
||||
|
||||
with salt.utils.files.fopen(WTMP, 'rb') as fp_:
|
||||
loc = __context__.get(LOC_KEY, 0)
|
||||
if loc == 0:
|
||||
@ -85,6 +180,7 @@ def beacon(config):
|
||||
else:
|
||||
fp_.seek(loc)
|
||||
while True:
|
||||
now = int(time.time())
|
||||
raw = fp_.read(SIZE)
|
||||
if len(raw) != SIZE:
|
||||
return ret
|
||||
@ -93,7 +189,30 @@ def beacon(config):
|
||||
event = {}
|
||||
for ind, field in enumerate(FIELDS):
|
||||
event[field] = pack[ind]
|
||||
if isinstance(event[field], six.string_types):
|
||||
event[field] = event[field].strip('\x00')
|
||||
ret.append(event)
|
||||
if isinstance(event[field], salt.ext.six.string_types):
|
||||
if isinstance(event[field], bytes):
|
||||
event[field] = event[field].decode()
|
||||
event[field] = event[field].strip('b\x00')
|
||||
else:
|
||||
event[field] = event[field].strip('\x00')
|
||||
|
||||
if users:
|
||||
if event['user'] in users:
|
||||
_user = users[event['user']]
|
||||
if isinstance(_user, dict) and 'time_range' in _user:
|
||||
if _check_time_range(_user['time_range'], now):
|
||||
ret.append(event)
|
||||
else:
|
||||
if defaults and 'time_range' in defaults:
|
||||
if _check_time_range(defaults['time_range'],
|
||||
now):
|
||||
ret.append(event)
|
||||
else:
|
||||
ret.append(event)
|
||||
else:
|
||||
if defaults and 'time_range' in defaults:
|
||||
if _check_time_range(defaults['time_range'], now):
|
||||
ret.append(event)
|
||||
else:
|
||||
ret.append(event)
|
||||
return ret
|
||||
|
2
salt/cache/__init__.py
vendored
2
salt/cache/__init__.py
vendored
@ -73,7 +73,7 @@ class Cache(object):
|
||||
self.cachedir = opts.get('cachedir', salt.syspaths.CACHE_DIR)
|
||||
else:
|
||||
self.cachedir = cachedir
|
||||
self.driver = opts.get('cache', salt.config.DEFAULT_MASTER_OPTS)
|
||||
self.driver = opts.get('cache', salt.config.DEFAULT_MASTER_OPTS['cache'])
|
||||
self.serial = Serial(opts)
|
||||
self._modules = None
|
||||
self._kwargs = kwargs
|
||||
|
21
salt/cache/redis_cache.py
vendored
21
salt/cache/redis_cache.py
vendored
@ -481,18 +481,17 @@ def list_(bank):
|
||||
Lists entries stored in the specified bank.
|
||||
'''
|
||||
redis_server = _get_redis_server()
|
||||
bank_keys_redis_key = _get_bank_keys_redis_key(bank)
|
||||
bank_keys = None
|
||||
bank_redis_key = _get_bank_redis_key(bank)
|
||||
try:
|
||||
bank_keys = redis_server.smembers(bank_keys_redis_key)
|
||||
banks = redis_server.smembers(bank_redis_key)
|
||||
except (RedisConnectionError, RedisResponseError) as rerr:
|
||||
mesg = 'Cannot list the Redis cache key {rkey}: {rerr}'.format(rkey=bank_keys_redis_key,
|
||||
mesg = 'Cannot list the Redis cache key {rkey}: {rerr}'.format(rkey=bank_redis_key,
|
||||
rerr=rerr)
|
||||
log.error(mesg)
|
||||
raise SaltCacheError(mesg)
|
||||
if not bank_keys:
|
||||
if not banks:
|
||||
return []
|
||||
return list(bank_keys)
|
||||
return list(banks)
|
||||
|
||||
|
||||
def contains(bank, key):
|
||||
@ -500,15 +499,11 @@ def contains(bank, key):
|
||||
Checks if the specified bank contains the specified key.
|
||||
'''
|
||||
redis_server = _get_redis_server()
|
||||
bank_keys_redis_key = _get_bank_keys_redis_key(bank)
|
||||
bank_keys = None
|
||||
bank_redis_key = _get_bank_redis_key(bank)
|
||||
try:
|
||||
bank_keys = redis_server.smembers(bank_keys_redis_key)
|
||||
return redis_server.sismember(bank_redis_key, key)
|
||||
except (RedisConnectionError, RedisResponseError) as rerr:
|
||||
mesg = 'Cannot retrieve the Redis cache key {rkey}: {rerr}'.format(rkey=bank_keys_redis_key,
|
||||
mesg = 'Cannot retrieve the Redis cache key {rkey}: {rerr}'.format(rkey=bank_redis_key,
|
||||
rerr=rerr)
|
||||
log.error(mesg)
|
||||
raise SaltCacheError(mesg)
|
||||
if not bank_keys:
|
||||
return False
|
||||
return key in bank_keys
|
||||
|
@ -364,29 +364,19 @@ class SyncClientMixin(object):
|
||||
# packed into the top level object. The plan is to move away from
|
||||
# that since the caller knows what is an arg vs a kwarg, but while
|
||||
# we make the transition we will load "kwargs" using format_call if
|
||||
# there are no kwargs in the low object passed in
|
||||
f_call = None
|
||||
if u'arg' not in low:
|
||||
# there are no kwargs in the low object passed in.
|
||||
|
||||
if u'arg' in low and u'kwarg' in low:
|
||||
args = low[u'arg']
|
||||
kwargs = low[u'kwarg']
|
||||
else:
|
||||
f_call = salt.utils.format_call(
|
||||
self.functions[fun],
|
||||
low,
|
||||
expected_extra_kws=CLIENT_INTERNAL_KEYWORDS
|
||||
)
|
||||
args = f_call.get(u'args', ())
|
||||
else:
|
||||
args = low[u'arg']
|
||||
|
||||
if u'kwarg' not in low:
|
||||
log.critical(
|
||||
u'kwargs must be passed inside the low data within the '
|
||||
u'\'kwarg\' key. See usage of '
|
||||
u'salt.utils.args.parse_input() and '
|
||||
u'salt.minion.load_args_and_kwargs() elsewhere in the '
|
||||
u'codebase.'
|
||||
)
|
||||
kwargs = {}
|
||||
else:
|
||||
kwargs = low[u'kwarg']
|
||||
kwargs = f_call.get(u'kwargs', {})
|
||||
|
||||
# Update the event data with loaded args and kwargs
|
||||
data[u'fun_args'] = list(args) + ([kwargs] if kwargs else [])
|
||||
|
@ -20,7 +20,7 @@ under the "SSH Keys" section.
|
||||
personal_access_token: xxx
|
||||
ssh_key_file: /path/to/ssh/key/file
|
||||
ssh_key_names: my-key-name,my-key-name-2
|
||||
driver: digital_ocean
|
||||
driver: digitalocean
|
||||
|
||||
:depends: requests
|
||||
'''
|
||||
@ -59,10 +59,11 @@ except ImportError:
|
||||
# Get logging started
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__virtualname__ = 'digital_ocean'
|
||||
__virtualname__ = 'digitalocean'
|
||||
__virtual_aliases__ = ('digital_ocean', 'do')
|
||||
|
||||
|
||||
# Only load in this module if the DIGITAL_OCEAN configurations are in place
|
||||
# Only load in this module if the DIGITALOCEAN configurations are in place
|
||||
def __virtual__():
|
||||
'''
|
||||
Check for DigitalOcean configurations
|
||||
@ -274,7 +275,7 @@ def create(vm_):
|
||||
try:
|
||||
# Check for required profile parameters before sending any API calls.
|
||||
if vm_['profile'] and config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'digital_ocean',
|
||||
__active_provider_name__ or 'digitalocean',
|
||||
vm_['profile'],
|
||||
vm_=vm_) is False:
|
||||
return False
|
||||
@ -441,7 +442,7 @@ def create(vm_):
|
||||
ret = create_node(kwargs)
|
||||
except Exception as exc:
|
||||
log.error(
|
||||
'Error creating {0} on DIGITAL_OCEAN\n\n'
|
||||
'Error creating {0} on DIGITALOCEAN\n\n'
|
||||
'The following exception was thrown when trying to '
|
||||
'run the initial deployment: {1}'.format(
|
||||
vm_['name'],
|
||||
@ -716,12 +717,12 @@ def import_keypair(kwargs=None, call=None):
|
||||
with salt.utils.files.fopen(kwargs['file'], 'r') as public_key_filename:
|
||||
public_key_content = public_key_filename.read()
|
||||
|
||||
digital_ocean_kwargs = {
|
||||
digitalocean_kwargs = {
|
||||
'name': kwargs['keyname'],
|
||||
'public_key': public_key_content
|
||||
}
|
||||
|
||||
created_result = create_key(digital_ocean_kwargs, call=call)
|
||||
created_result = create_key(digitalocean_kwargs, call=call)
|
||||
return created_result
|
||||
|
||||
|
||||
@ -938,11 +939,11 @@ def show_pricing(kwargs=None, call=None):
|
||||
if not profile:
|
||||
return {'Error': 'The requested profile was not found'}
|
||||
|
||||
# Make sure the profile belongs to Digital Ocean
|
||||
# Make sure the profile belongs to DigitalOcean
|
||||
provider = profile.get('provider', '0:0')
|
||||
comps = provider.split(':')
|
||||
if len(comps) < 2 or comps[1] != 'digital_ocean':
|
||||
return {'Error': 'The requested profile does not belong to Digital Ocean'}
|
||||
if len(comps) < 2 or comps[1] != 'digitalocean':
|
||||
return {'Error': 'The requested profile does not belong to DigitalOcean'}
|
||||
|
||||
raw = {}
|
||||
ret = {}
|
||||
@ -968,7 +969,7 @@ def list_floating_ips(call=None):
|
||||
|
||||
CLI Examples:
|
||||
|
||||
... code-block:: bash
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -f list_floating_ips my-digitalocean-config
|
||||
'''
|
||||
@ -1008,7 +1009,7 @@ def show_floating_ip(kwargs=None, call=None):
|
||||
|
||||
CLI Examples:
|
||||
|
||||
... code-block:: bash
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -f show_floating_ip my-digitalocean-config floating_ip='45.55.96.47'
|
||||
'''
|
||||
@ -1041,7 +1042,7 @@ def create_floating_ip(kwargs=None, call=None):
|
||||
|
||||
CLI Examples:
|
||||
|
||||
... code-block:: bash
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -f create_floating_ip my-digitalocean-config region='NYC2'
|
||||
|
||||
@ -1083,7 +1084,7 @@ def delete_floating_ip(kwargs=None, call=None):
|
||||
|
||||
CLI Examples:
|
||||
|
||||
... code-block:: bash
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -f delete_floating_ip my-digitalocean-config floating_ip='45.55.96.47'
|
||||
'''
|
||||
@ -1118,7 +1119,7 @@ def assign_floating_ip(kwargs=None, call=None):
|
||||
|
||||
CLI Examples:
|
||||
|
||||
... code-block:: bash
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -f assign_floating_ip my-digitalocean-config droplet_id=1234567 floating_ip='45.55.96.47'
|
||||
'''
|
||||
@ -1151,7 +1152,7 @@ def unassign_floating_ip(kwargs=None, call=None):
|
||||
|
||||
CLI Examples:
|
||||
|
||||
... code-block:: bash
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -f unassign_floating_ip my-digitalocean-config floating_ip='45.55.96.47'
|
||||
'''
|
@ -3543,16 +3543,15 @@ def list_nodes_min(location=None, call=None):
|
||||
|
||||
for instance in instances:
|
||||
if isinstance(instance['instancesSet']['item'], list):
|
||||
for item in instance['instancesSet']['item']:
|
||||
state = item['instanceState']['name']
|
||||
name = _extract_name_tag(item)
|
||||
id = item['instanceId']
|
||||
items = instance['instancesSet']['item']
|
||||
else:
|
||||
item = instance['instancesSet']['item']
|
||||
items = [instance['instancesSet']['item']]
|
||||
|
||||
for item in items:
|
||||
state = item['instanceState']['name']
|
||||
name = _extract_name_tag(item)
|
||||
id = item['instanceId']
|
||||
ret[name] = {'state': state, 'id': id}
|
||||
ret[name] = {'state': state, 'id': id}
|
||||
return ret
|
||||
|
||||
|
||||
|
@ -2643,7 +2643,7 @@ def show_pricing(kwargs=None, call=None):
|
||||
if not profile:
|
||||
return {'Error': 'The requested profile was not found'}
|
||||
|
||||
# Make sure the profile belongs to Digital Ocean
|
||||
# Make sure the profile belongs to DigitalOcean
|
||||
provider = profile.get('provider', '0:0')
|
||||
comps = provider.split(':')
|
||||
if len(comps) < 2 or comps[1] != 'gce':
|
||||
|
@ -41,6 +41,7 @@ Example profile:
|
||||
master_port: 5506
|
||||
|
||||
Tested on:
|
||||
- Fedora 26 (libvirt 3.2.1, qemu 2.9.1)
|
||||
- Fedora 25 (libvirt 1.3.3.2, qemu 2.6.1)
|
||||
- Fedora 23 (libvirt 1.2.18, qemu 2.4.1)
|
||||
- Centos 7 (libvirt 1.2.17, qemu 1.5.3)
|
||||
@ -82,9 +83,6 @@ from salt.exceptions import (
|
||||
SaltCloudSystemExit
|
||||
)
|
||||
|
||||
# Get logging started
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
VIRT_STATE_NAME_MAP = {0: 'running',
|
||||
1: 'running',
|
||||
2: 'running',
|
||||
@ -99,6 +97,20 @@ IP_LEARNING_XML = """<filterref filter='clean-traffic'>
|
||||
|
||||
__virtualname__ = 'libvirt'
|
||||
|
||||
# Set up logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def libvirt_error_handler(ctx, error): # pylint: disable=unused-argument
|
||||
'''
|
||||
Redirect stderr prints from libvirt to salt logging.
|
||||
'''
|
||||
log.debug("libvirt error {0}".format(error))
|
||||
|
||||
|
||||
if HAS_LIBVIRT:
|
||||
libvirt.registerErrorHandler(f=libvirt_error_handler, ctx=None)
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
@ -280,7 +292,7 @@ def create(vm_):
|
||||
|
||||
validate_xml = vm_.get('validate_xml') if vm_.get('validate_xml') is not None else True
|
||||
|
||||
log.info("Cloning machine '{0}' with strategy '{1}' validate_xml='{2}'".format(vm_['name'], clone_strategy, validate_xml))
|
||||
log.info("Cloning '{0}' with strategy '{1}' validate_xml='{2}'".format(vm_['name'], clone_strategy, validate_xml))
|
||||
|
||||
try:
|
||||
# Check for required profile parameters before sending any API calls.
|
||||
@ -516,7 +528,7 @@ def destroy(name, call=None):
|
||||
'event',
|
||||
'destroying instance',
|
||||
'salt/cloud/{0}/destroying'.format(name),
|
||||
{'name': name},
|
||||
args={'name': name},
|
||||
sock_dir=__opts__['sock_dir'],
|
||||
transport=__opts__['transport']
|
||||
)
|
||||
@ -527,7 +539,7 @@ def destroy(name, call=None):
|
||||
'event',
|
||||
'destroyed instance',
|
||||
'salt/cloud/{0}/destroyed'.format(name),
|
||||
{'name': name},
|
||||
args={'name': name},
|
||||
sock_dir=__opts__['sock_dir'],
|
||||
transport=__opts__['transport']
|
||||
)
|
||||
|
@ -337,6 +337,9 @@ VALID_OPTS = {
|
||||
# Whether or not processes should be forked when needed. The alternative is to use threading.
|
||||
'multiprocessing': bool,
|
||||
|
||||
# Maximum number of concurrently active processes at any given point in time
|
||||
'process_count_max': int,
|
||||
|
||||
# Whether or not the salt minion should run scheduled mine updates
|
||||
'mine_enabled': bool,
|
||||
|
||||
@ -746,6 +749,10 @@ VALID_OPTS = {
|
||||
'fileserver_limit_traversal': bool,
|
||||
'fileserver_verify_config': bool,
|
||||
|
||||
# Optionally apply '*' permissioins to any user. By default '*' is a fallback case that is
|
||||
# applied only if the user didn't matched by other matchers.
|
||||
'permissive_acl': bool,
|
||||
|
||||
# Optionally enables keeping the calculated user's auth list in the token file.
|
||||
'keep_acl_in_token': bool,
|
||||
|
||||
@ -1258,6 +1265,7 @@ DEFAULT_MINION_OPTS = {
|
||||
'auto_accept': True,
|
||||
'autosign_timeout': 120,
|
||||
'multiprocessing': True,
|
||||
'process_count_max': -1,
|
||||
'mine_enabled': True,
|
||||
'mine_return_job': False,
|
||||
'mine_interval': 60,
|
||||
@ -1526,6 +1534,7 @@ DEFAULT_MASTER_OPTS = {
|
||||
'external_auth': {},
|
||||
'token_expire': 43200,
|
||||
'token_expire_user_override': False,
|
||||
'permissive_acl': False,
|
||||
'keep_acl_in_token': False,
|
||||
'eauth_acl_module': '',
|
||||
'eauth_tokens': 'localfs',
|
||||
@ -2721,7 +2730,7 @@ def old_to_new(opts):
|
||||
providers = (
|
||||
'AWS',
|
||||
'CLOUDSTACK',
|
||||
'DIGITAL_OCEAN',
|
||||
'DIGITALOCEAN',
|
||||
'EC2',
|
||||
'GOGRID',
|
||||
'IBMSCE',
|
||||
|
@ -4,7 +4,7 @@
|
||||
|
||||
|
||||
salt.config.schemas.esxcluster
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
ESX Cluster configuration schemas
|
||||
'''
|
||||
@ -14,9 +14,180 @@ from __future__ import absolute_import
|
||||
|
||||
# Import Salt libs
|
||||
from salt.utils.schema import (Schema,
|
||||
DefinitionsSchema,
|
||||
ComplexSchemaItem,
|
||||
DictItem,
|
||||
ArrayItem,
|
||||
IntegerItem,
|
||||
StringItem)
|
||||
BooleanItem,
|
||||
StringItem,
|
||||
AnyOfItem)
|
||||
|
||||
|
||||
class OptionValueItem(ComplexSchemaItem):
|
||||
'''Sechma item of the OptionValue'''
|
||||
|
||||
title = 'OptionValue'
|
||||
key = StringItem(title='Key', required=True)
|
||||
value = AnyOfItem(items=[StringItem(), BooleanItem(), IntegerItem()])
|
||||
|
||||
|
||||
class AdmissionControlPolicyItem(ComplexSchemaItem):
|
||||
'''
|
||||
Schema item of the HA admission control policy
|
||||
'''
|
||||
|
||||
title = 'Admission Control Policy'
|
||||
|
||||
cpu_failover_percent = IntegerItem(
|
||||
title='CPU Failover Percent',
|
||||
minimum=0, maximum=100)
|
||||
memory_failover_percent = IntegerItem(
|
||||
title='Memory Failover Percent',
|
||||
minimum=0, maximum=100)
|
||||
|
||||
|
||||
class DefaultVmSettingsItem(ComplexSchemaItem):
|
||||
'''
|
||||
Schema item of the HA default vm settings
|
||||
'''
|
||||
|
||||
title = 'Default VM Settings'
|
||||
|
||||
isolation_response = StringItem(
|
||||
title='Isolation Response',
|
||||
enum=['clusterIsolationResponse', 'none', 'powerOff', 'shutdown'])
|
||||
restart_priority = StringItem(
|
||||
title='Restart Priority',
|
||||
enum=['clusterRestartPriority', 'disabled', 'high', 'low', 'medium'])
|
||||
|
||||
|
||||
class HAConfigItem(ComplexSchemaItem):
|
||||
'''
|
||||
Schema item of ESX cluster high availability
|
||||
'''
|
||||
|
||||
title = 'HA Configuration'
|
||||
description = 'ESX cluster HA configuration json schema item'
|
||||
|
||||
enabled = BooleanItem(
|
||||
title='Enabled',
|
||||
description='Specifies if HA should be enabled')
|
||||
admission_control_enabled = BooleanItem(
|
||||
title='Admission Control Enabled')
|
||||
admission_control_policy = AdmissionControlPolicyItem()
|
||||
default_vm_settings = DefaultVmSettingsItem()
|
||||
hb_ds_candidate_policy = StringItem(
|
||||
title='Heartbeat Datastore Candidate Policy',
|
||||
enum=['allFeasibleDs', 'allFeasibleDsWithUserPreference',
|
||||
'userSelectedDs'])
|
||||
host_monitoring = StringItem(title='Host Monitoring',
|
||||
choices=['enabled', 'disabled'])
|
||||
options = ArrayItem(min_items=1, items=OptionValueItem())
|
||||
vm_monitoring = StringItem(
|
||||
title='Vm Monitoring',
|
||||
choices=['vmMonitoringDisabled', 'vmAndAppMonitoring',
|
||||
'vmMonitoringOnly'])
|
||||
|
||||
|
||||
class vSANClusterConfigItem(ComplexSchemaItem):
|
||||
'''
|
||||
Schema item of the ESX cluster vSAN configuration
|
||||
'''
|
||||
|
||||
title = 'vSAN Configuration'
|
||||
description = 'ESX cluster vSAN configurationi item'
|
||||
|
||||
enabled = BooleanItem(
|
||||
title='Enabled',
|
||||
description='Specifies if vSAN should be enabled')
|
||||
auto_claim_storage = BooleanItem(
|
||||
title='Auto Claim Storage',
|
||||
description='Specifies whether the storage of member ESXi hosts should '
|
||||
'be automatically claimed for vSAN')
|
||||
dedup_enabled = BooleanItem(
|
||||
title='Enabled',
|
||||
description='Specifies dedup should be enabled')
|
||||
compression_enabled = BooleanItem(
|
||||
title='Enabled',
|
||||
description='Specifies if compression should be enabled')
|
||||
|
||||
|
||||
class DRSConfigItem(ComplexSchemaItem):
|
||||
'''
|
||||
Schema item of the ESX cluster DRS configuration
|
||||
'''
|
||||
|
||||
title = 'DRS Configuration'
|
||||
description = 'ESX cluster DRS configuration item'
|
||||
|
||||
enabled = BooleanItem(
|
||||
title='Enabled',
|
||||
description='Specifies if DRS should be enabled')
|
||||
vmotion_rate = IntegerItem(
|
||||
title='vMotion rate',
|
||||
description='Aggressiveness to do automatic vMotions: '
|
||||
'1 (least aggressive) - 5 (most aggressive)',
|
||||
minimum=1,
|
||||
maximum=5)
|
||||
default_vm_behavior = StringItem(
|
||||
title='Default VM DRS Behavior',
|
||||
description='Specifies the default VM DRS behavior',
|
||||
enum=['fullyAutomated', 'partiallyAutomated', 'manual'])
|
||||
|
||||
|
||||
class ESXClusterConfigSchema(DefinitionsSchema):
|
||||
'''
|
||||
Schema of the ESX cluster config
|
||||
'''
|
||||
|
||||
title = 'ESX Cluster Configuration Schema'
|
||||
description = 'ESX cluster configuration schema'
|
||||
|
||||
ha = HAConfigItem()
|
||||
vsan = vSANClusterConfigItem()
|
||||
drs = DRSConfigItem()
|
||||
vm_swap_placement = StringItem(title='VM Swap Placement')
|
||||
|
||||
|
||||
class ESXClusterEntitySchema(Schema):
|
||||
'''Schema of the ESX cluster entity'''
|
||||
|
||||
title = 'ESX Cluster Entity Schema'
|
||||
description = 'ESX cluster entity schema'
|
||||
|
||||
type = StringItem(title='Type',
|
||||
description='Specifies the entity type',
|
||||
required=True,
|
||||
enum=['cluster'])
|
||||
|
||||
datacenter = StringItem(title='Datacenter',
|
||||
description='Specifies the cluster datacenter',
|
||||
required=True,
|
||||
pattern=r'\w+')
|
||||
|
||||
cluster = StringItem(title='Cluster',
|
||||
description='Specifies the cluster name',
|
||||
required=True,
|
||||
pattern=r'\w+')
|
||||
|
||||
|
||||
class LicenseSchema(Schema):
|
||||
'''
|
||||
Schema item of the ESX cluster vSAN configuration
|
||||
'''
|
||||
|
||||
title = 'Licenses schema'
|
||||
description = 'License configuration schema'
|
||||
|
||||
licenses = DictItem(
|
||||
title='Licenses',
|
||||
description='Dictionary containing the license name to key mapping',
|
||||
required=True,
|
||||
additional_properties=StringItem(
|
||||
title='License Key',
|
||||
description='Specifies the license key',
|
||||
pattern=r'^(\w{5}-\w{5}-\w{5}-\w{5}-\w{5})$'))
|
||||
|
||||
|
||||
class EsxclusterProxySchema(Schema):
|
||||
|
33
salt/config/schemas/vcenter.py
Normal file
33
salt/config/schemas/vcenter.py
Normal file
@ -0,0 +1,33 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
:codeauthor: :email:`Rod McKenzie (roderick.mckenzie@morganstanley.com)`
|
||||
:codeauthor: :email:`Alexandru Bleotu (alexandru.bleotu@morganstanley.com)`
|
||||
|
||||
salt.config.schemas.vcenter
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
VCenter configuration schemas
|
||||
'''
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Salt libs
|
||||
from salt.utils.schema import (Schema,
|
||||
StringItem)
|
||||
|
||||
|
||||
class VCenterEntitySchema(Schema):
|
||||
'''
|
||||
Entity Schema for a VCenter.
|
||||
'''
|
||||
title = 'VCenter Entity Schema'
|
||||
description = 'VCenter entity schema'
|
||||
type = StringItem(title='Type',
|
||||
description='Specifies the entity type',
|
||||
required=True,
|
||||
enum=['vcenter'])
|
||||
|
||||
vcenter = StringItem(title='vCenter',
|
||||
description='Specifies the vcenter hostname',
|
||||
required=True)
|
@ -170,6 +170,14 @@ def clean_old_jobs(opts):
|
||||
|
||||
|
||||
def mk_key(opts, user):
|
||||
if HAS_PWD:
|
||||
uid = None
|
||||
try:
|
||||
uid = pwd.getpwnam(user).pw_uid
|
||||
except KeyError:
|
||||
# User doesn't exist in the system
|
||||
if opts['client_acl_verify']:
|
||||
return None
|
||||
if salt.utils.platform.is_windows():
|
||||
# The username may contain '\' if it is in Windows
|
||||
# 'DOMAIN\username' format. Fix this for the keyfile path.
|
||||
@ -197,9 +205,9 @@ def mk_key(opts, user):
|
||||
# Write access is necessary since on subsequent runs, if the file
|
||||
# exists, it needs to be written to again. Windows enforces this.
|
||||
os.chmod(keyfile, 0o600)
|
||||
if HAS_PWD:
|
||||
if HAS_PWD and uid is not None:
|
||||
try:
|
||||
os.chown(keyfile, pwd.getpwnam(user).pw_uid, -1)
|
||||
os.chown(keyfile, uid, -1)
|
||||
except OSError:
|
||||
# The master is not being run as root and can therefore not
|
||||
# chown the key file
|
||||
@ -214,27 +222,26 @@ def access_keys(opts):
|
||||
'''
|
||||
# TODO: Need a way to get all available users for systems not supported by pwd module.
|
||||
# For now users pattern matching will not work for publisher_acl.
|
||||
users = []
|
||||
keys = {}
|
||||
publisher_acl = opts['publisher_acl']
|
||||
acl_users = set(publisher_acl.keys())
|
||||
if opts.get('user'):
|
||||
acl_users.add(opts['user'])
|
||||
acl_users.add(salt.utils.get_user())
|
||||
for user in acl_users:
|
||||
log.info('Preparing the %s key for local communication', user)
|
||||
key = mk_key(opts, user)
|
||||
if key is not None:
|
||||
keys[user] = key
|
||||
|
||||
# Check other users matching ACL patterns
|
||||
if opts['client_acl_verify'] and HAS_PWD:
|
||||
log.profile('Beginning pwd.getpwall() call in masterarpi access_keys function')
|
||||
for user in pwd.getpwall():
|
||||
users.append(user.pw_name)
|
||||
log.profile('End pwd.getpwall() call in masterarpi access_keys function')
|
||||
for user in acl_users:
|
||||
log.info('Preparing the %s key for local communication', user)
|
||||
keys[user] = mk_key(opts, user)
|
||||
|
||||
# Check other users matching ACL patterns
|
||||
if HAS_PWD:
|
||||
for user in users:
|
||||
user = user.pw_name
|
||||
if user not in keys and salt.utils.check_whitelist_blacklist(user, whitelist=acl_users):
|
||||
keys[user] = mk_key(opts, user)
|
||||
log.profile('End pwd.getpwall() call in masterarpi access_keys function')
|
||||
|
||||
return keys
|
||||
|
||||
|
@ -266,6 +266,12 @@ class SaltCacheError(SaltException):
|
||||
'''
|
||||
|
||||
|
||||
class TimeoutError(SaltException):
|
||||
'''
|
||||
Thrown when an opration cannot be completet within a given time limit.
|
||||
'''
|
||||
|
||||
|
||||
class SaltReqTimeoutError(SaltException):
|
||||
'''
|
||||
Thrown when a salt master request call fails to return within the timeout
|
||||
@ -403,6 +409,12 @@ class ArgumentValueError(CommandExecutionError):
|
||||
'''
|
||||
|
||||
|
||||
class InvalidEntityError(CommandExecutionError):
|
||||
'''
|
||||
Used when an entity fails validation
|
||||
'''
|
||||
|
||||
|
||||
# VMware related exceptions
|
||||
class VMwareSaltError(CommandExecutionError):
|
||||
'''
|
||||
|
@ -163,4 +163,3 @@ def WaitForTasks(tasks, si):
|
||||
finally:
|
||||
if filter:
|
||||
filter.Destroy()
|
||||
|
||||
|
143
salt/ext/vsan/vsanmgmtObjects.py
Normal file
143
salt/ext/vsan/vsanmgmtObjects.py
Normal file
File diff suppressed because one or more lines are too long
@ -185,12 +185,13 @@ class Client(object):
|
||||
'''
|
||||
raise NotImplementedError
|
||||
|
||||
def cache_file(self, path, saltenv=u'base', cachedir=None):
|
||||
def cache_file(self, path, saltenv=u'base', cachedir=None, source_hash=None):
|
||||
'''
|
||||
Pull a file down from the file server and store it in the minion
|
||||
file cache
|
||||
'''
|
||||
return self.get_url(path, u'', True, saltenv, cachedir=cachedir)
|
||||
return self.get_url(
|
||||
path, u'', True, saltenv, cachedir=cachedir, source_hash=source_hash)
|
||||
|
||||
def cache_files(self, paths, saltenv=u'base', cachedir=None):
|
||||
'''
|
||||
@ -470,7 +471,7 @@ class Client(object):
|
||||
return ret
|
||||
|
||||
def get_url(self, url, dest, makedirs=False, saltenv=u'base',
|
||||
no_cache=False, cachedir=None):
|
||||
no_cache=False, cachedir=None, source_hash=None):
|
||||
'''
|
||||
Get a single file from a URL.
|
||||
'''
|
||||
@ -525,6 +526,18 @@ class Client(object):
|
||||
return u''
|
||||
elif not no_cache:
|
||||
dest = self._extrn_path(url, saltenv, cachedir=cachedir)
|
||||
if source_hash is not None:
|
||||
try:
|
||||
source_hash = source_hash.split('=')[-1]
|
||||
form = salt.utils.files.HASHES_REVMAP[len(source_hash)]
|
||||
if salt.utils.get_hash(dest, form) == source_hash:
|
||||
log.debug(
|
||||
'Cached copy of %s (%s) matches source_hash %s, '
|
||||
'skipping download', url, dest, source_hash
|
||||
)
|
||||
return dest
|
||||
except (AttributeError, KeyError, IOError, OSError):
|
||||
pass
|
||||
destdir = os.path.dirname(dest)
|
||||
if not os.path.isdir(destdir):
|
||||
os.makedirs(destdir)
|
||||
@ -532,7 +545,9 @@ class Client(object):
|
||||
if url_data.scheme == u's3':
|
||||
try:
|
||||
def s3_opt(key, default=None):
|
||||
u'''Get value of s3.<key> from Minion config or from Pillar'''
|
||||
'''
|
||||
Get value of s3.<key> from Minion config or from Pillar
|
||||
'''
|
||||
if u's3.' + key in self.opts:
|
||||
return self.opts[u's3.' + key]
|
||||
try:
|
||||
@ -785,7 +800,7 @@ class Client(object):
|
||||
|
||||
def _extrn_path(self, url, saltenv, cachedir=None):
|
||||
'''
|
||||
Return the extn_filepath for a given url
|
||||
Return the extrn_filepath for a given url
|
||||
'''
|
||||
url_data = urlparse(url)
|
||||
if salt.utils.platform.is_windows():
|
||||
|
@ -16,6 +16,7 @@ import os
|
||||
import json
|
||||
import socket
|
||||
import sys
|
||||
import glob
|
||||
import re
|
||||
import platform
|
||||
import logging
|
||||
@ -65,6 +66,7 @@ __salt__ = {
|
||||
'cmd.run_all': salt.modules.cmdmod._run_all_quiet,
|
||||
'smbios.records': salt.modules.smbios.records,
|
||||
'smbios.get': salt.modules.smbios.get,
|
||||
'cmd.run_ps': salt.modules.cmdmod.powershell,
|
||||
}
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@ -1205,6 +1207,10 @@ _OS_FAMILY_MAP = {
|
||||
'Raspbian': 'Debian',
|
||||
'Devuan': 'Debian',
|
||||
'antiX': 'Debian',
|
||||
'Kali': 'Debian',
|
||||
'neon': 'Debian',
|
||||
'Cumulus': 'Debian',
|
||||
'Deepin': 'Debian',
|
||||
'NILinuxRT': 'NILinuxRT',
|
||||
'NILinuxRT-XFCE': 'NILinuxRT',
|
||||
'KDE neon': 'Debian',
|
||||
@ -2446,7 +2452,7 @@ def default_gateway():
|
||||
ip_gw: True # True if either of the above is True, False otherwise
|
||||
'''
|
||||
grains = {}
|
||||
if not salt.utils.which('ip'):
|
||||
if not salt.utils.path.which('ip'):
|
||||
return {}
|
||||
grains['ip_gw'] = False
|
||||
grains['ip4_gw'] = False
|
||||
@ -2468,3 +2474,119 @@ def default_gateway():
|
||||
except Exception as exc:
|
||||
pass
|
||||
return grains
|
||||
|
||||
|
||||
def fc_wwn():
|
||||
'''
|
||||
Return list of fiber channel HBA WWNs
|
||||
'''
|
||||
grains = {}
|
||||
grains['fc_wwn'] = False
|
||||
if salt.utils.platform.is_linux():
|
||||
grains['fc_wwn'] = _linux_wwns()
|
||||
elif salt.utils.platform.is_windows():
|
||||
grains['fc_wwn'] = _windows_wwns()
|
||||
return grains
|
||||
|
||||
|
||||
def iscsi_iqn():
|
||||
'''
|
||||
Return iSCSI IQN
|
||||
'''
|
||||
grains = {}
|
||||
grains['iscsi_iqn'] = False
|
||||
if salt.utils.platform.is_linux():
|
||||
grains['iscsi_iqn'] = _linux_iqn()
|
||||
elif salt.utils.platform.is_windows():
|
||||
grains['iscsi_iqn'] = _windows_iqn()
|
||||
elif salt.utils.platform.is_aix():
|
||||
grains['iscsi_iqn'] = _aix_iqn()
|
||||
return grains
|
||||
|
||||
|
||||
def _linux_iqn():
|
||||
'''
|
||||
Return iSCSI IQN from a Linux host.
|
||||
'''
|
||||
ret = []
|
||||
|
||||
initiator = '/etc/iscsi/initiatorname.iscsi'
|
||||
|
||||
if os.path.isfile(initiator):
|
||||
with salt.utils.files.fopen(initiator, 'r') as _iscsi:
|
||||
for line in _iscsi:
|
||||
if line.find('InitiatorName') != -1:
|
||||
iqn = line.split('=')
|
||||
ret.extend([iqn[1]])
|
||||
return ret
|
||||
|
||||
|
||||
def _aix_iqn():
|
||||
'''
|
||||
Return iSCSI IQN from an AIX host.
|
||||
'''
|
||||
ret = []
|
||||
|
||||
aixcmd = 'lsattr -E -l iscsi0 | grep initiator_name'
|
||||
|
||||
aixret = __salt__['cmd.run'](aixcmd)
|
||||
if aixret[0].isalpha():
|
||||
iqn = aixret.split()
|
||||
ret.extend([iqn[1]])
|
||||
return ret
|
||||
|
||||
|
||||
def _linux_wwns():
|
||||
'''
|
||||
Return Fibre Channel port WWNs from a Linux host.
|
||||
'''
|
||||
ret = []
|
||||
|
||||
for fcfile in glob.glob('/sys/class/fc_host/*/port_name'):
|
||||
with salt.utils.files.fopen(fcfile, 'r') as _wwn:
|
||||
for line in _wwn:
|
||||
ret.extend([line[2:]])
|
||||
return ret
|
||||
|
||||
|
||||
def _windows_iqn():
|
||||
'''
|
||||
Return iSCSI IQN from a Windows host.
|
||||
'''
|
||||
ret = []
|
||||
|
||||
wmic = salt.utils.path.which('wmic')
|
||||
|
||||
if not wmic:
|
||||
return ret
|
||||
|
||||
namespace = r'\\root\WMI'
|
||||
mspath = 'MSiSCSIInitiator_MethodClass'
|
||||
get = 'iSCSINodeName'
|
||||
|
||||
cmdret = __salt__['cmd.run_all'](
|
||||
'{0} /namespace:{1} path {2} get {3} /format:table'.format(
|
||||
wmic, namespace, mspath, get))
|
||||
|
||||
for line in cmdret['stdout'].splitlines():
|
||||
if line[0].isalpha():
|
||||
continue
|
||||
ret.extend([line])
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def _windows_wwns():
|
||||
'''
|
||||
Return Fibre Channel port WWNs from a Windows host.
|
||||
'''
|
||||
ps_cmd = r'Get-WmiObject -class MSFC_FibrePortHBAAttributes -namespace "root\WMI" | Select -Expandproperty Attributes | %{($_.PortWWN | % {"{0:x2}" -f $_}) -join ""}'
|
||||
|
||||
ret = []
|
||||
|
||||
cmdret = __salt__['cmd.run_ps'](ps_cmd)
|
||||
|
||||
for line in cmdret:
|
||||
ret.append(line)
|
||||
|
||||
return ret
|
||||
|
@ -11,6 +11,7 @@ import logging
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils.files
|
||||
import salt.utils.platform
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@ -21,7 +22,14 @@ def shell():
|
||||
'''
|
||||
# Provides:
|
||||
# shell
|
||||
return {'shell': os.environ.get('SHELL', '/bin/sh')}
|
||||
if salt.utils.platform.is_windows():
|
||||
env_var = 'COMSPEC'
|
||||
default = r'C:\Windows\system32\cmd.exe'
|
||||
else:
|
||||
env_var = 'SHELL'
|
||||
default = '/bin/sh'
|
||||
|
||||
return {'shell': os.environ.get(env_var, default)}
|
||||
|
||||
|
||||
def config():
|
||||
|
@ -11,7 +11,18 @@
|
||||
Fluent Logging Handler
|
||||
-------------------
|
||||
|
||||
In the salt configuration file:
|
||||
In the `fluent` configuration file:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
<source>
|
||||
type forward
|
||||
bind localhost
|
||||
port 24224
|
||||
</source>
|
||||
|
||||
Then, to send logs via fluent in Logstash format, add the
|
||||
following to the salt (master and/or minion) configuration file:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
@ -19,14 +30,32 @@
|
||||
host: localhost
|
||||
port: 24224
|
||||
|
||||
In the `fluent`_ configuration file:
|
||||
To send logs via fluent in the Graylog raw json format, add the
|
||||
following to the salt (master and/or minion) configuration file:
|
||||
|
||||
.. code-block:: text
|
||||
.. code-block:: yaml
|
||||
|
||||
<source>
|
||||
type forward
|
||||
port 24224
|
||||
</source>
|
||||
fluent_handler:
|
||||
host: localhost
|
||||
port: 24224
|
||||
payload_type: graylog
|
||||
tags:
|
||||
- salt_master.SALT
|
||||
|
||||
The above also illustrates the `tags` option, which allows
|
||||
one to set descriptive (or useful) tags on records being
|
||||
sent. If not provided, this defaults to the single tag:
|
||||
'salt'. Also note that, via Graylog "magic", the 'facility'
|
||||
of the logged message is set to 'SALT' (the portion of the
|
||||
tag after the first period), while the tag itself will be
|
||||
set to simply 'salt_master'. This is a feature, not a bug :)
|
||||
|
||||
Note:
|
||||
There is a third emitter, for the GELF format, but it is
|
||||
largely untested, and I don't currently have a setup supporting
|
||||
this config, so while it runs cleanly and outputs what LOOKS to
|
||||
be valid GELF, any real-world feedback on its usefulness, and
|
||||
correctness, will be appreciated.
|
||||
|
||||
Log Level
|
||||
.........
|
||||
@ -53,7 +82,7 @@ import time
|
||||
import datetime
|
||||
import socket
|
||||
import threading
|
||||
|
||||
import types
|
||||
|
||||
# Import salt libs
|
||||
from salt.log.setup import LOG_LEVELS
|
||||
@ -91,6 +120,19 @@ __virtualname__ = 'fluent'
|
||||
|
||||
_global_sender = None
|
||||
|
||||
# Python logger's idea of "level" is wildly at variance with
|
||||
# Graylog's (and, incidentally, the rest of the civilized world).
|
||||
syslog_levels = {
|
||||
'EMERG': 0,
|
||||
'ALERT': 2,
|
||||
'CRIT': 2,
|
||||
'ERR': 3,
|
||||
'WARNING': 4,
|
||||
'NOTICE': 5,
|
||||
'INFO': 6,
|
||||
'DEBUG': 7
|
||||
}
|
||||
|
||||
|
||||
def setup(tag, **kwargs):
|
||||
host = kwargs.get('host', 'localhost')
|
||||
@ -116,55 +158,133 @@ def __virtual__():
|
||||
|
||||
|
||||
def setup_handlers():
|
||||
host = port = address = None
|
||||
host = port = None
|
||||
|
||||
if 'fluent_handler' in __opts__:
|
||||
host = __opts__['fluent_handler'].get('host', None)
|
||||
port = __opts__['fluent_handler'].get('port', None)
|
||||
version = __opts__['fluent_handler'].get('version', 1)
|
||||
payload_type = __opts__['fluent_handler'].get('payload_type', None)
|
||||
# in general, you want the value of tag to ALSO be a member of tags
|
||||
tags = __opts__['fluent_handler'].get('tags', ['salt'])
|
||||
tag = tags[0] if len(tags) else 'salt'
|
||||
if payload_type == 'graylog':
|
||||
version = 0
|
||||
elif payload_type == 'gelf':
|
||||
# We only support version 1.1 (the latest) of GELF...
|
||||
version = 1.1
|
||||
else:
|
||||
# Default to logstash for backwards compat
|
||||
payload_type = 'logstash'
|
||||
version = __opts__['fluent_handler'].get('version', 1)
|
||||
|
||||
if host is None and port is None:
|
||||
log.debug(
|
||||
'The required \'fluent_handler\' configuration keys, '
|
||||
'\'host\' and/or \'port\', are not properly configured. Not '
|
||||
'configuring the fluent logging handler.'
|
||||
'enabling the fluent logging handler.'
|
||||
)
|
||||
else:
|
||||
logstash_formatter = LogstashFormatter(version=version)
|
||||
fluent_handler = FluentHandler('salt', host=host, port=port)
|
||||
fluent_handler.setFormatter(logstash_formatter)
|
||||
formatter = MessageFormatter(payload_type=payload_type, version=version, tags=tags)
|
||||
fluent_handler = FluentHandler(tag, host=host, port=port)
|
||||
fluent_handler.setFormatter(formatter)
|
||||
fluent_handler.setLevel(
|
||||
LOG_LEVELS[
|
||||
__opts__['fluent_handler'].get(
|
||||
'log_level',
|
||||
# Not set? Get the main salt log_level setting on the
|
||||
# configuration file
|
||||
__opts__.get(
|
||||
'log_level',
|
||||
# Also not set?! Default to 'error'
|
||||
'error'
|
||||
)
|
||||
)
|
||||
]
|
||||
LOG_LEVELS[__opts__['fluent_handler'].get('log_level', __opts__.get('log_level', 'error'))]
|
||||
)
|
||||
yield fluent_handler
|
||||
|
||||
if host is None and port is None and address is None:
|
||||
if host is None and port is None:
|
||||
yield False
|
||||
|
||||
|
||||
class LogstashFormatter(logging.Formatter, NewStyleClassMixIn):
|
||||
def __init__(self, msg_type='logstash', msg_path='logstash', version=1):
|
||||
self.msg_path = msg_path
|
||||
self.msg_type = msg_type
|
||||
class MessageFormatter(logging.Formatter, NewStyleClassMixIn):
|
||||
def __init__(self, payload_type, version, tags, msg_type=None, msg_path=None):
|
||||
self.payload_type = payload_type
|
||||
self.version = version
|
||||
self.format = getattr(self, 'format_v{0}'.format(version))
|
||||
super(LogstashFormatter, self).__init__(fmt=None, datefmt=None)
|
||||
self.tag = tags[0] if len(tags) else 'salt' # 'salt' for backwards compat
|
||||
self.tags = tags
|
||||
self.msg_path = msg_path if msg_path else payload_type
|
||||
self.msg_type = msg_type if msg_type else payload_type
|
||||
format_func = 'format_{0}_v{1}'.format(payload_type, version).replace('.', '_')
|
||||
self.format = getattr(self, format_func)
|
||||
super(MessageFormatter, self).__init__(fmt=None, datefmt=None)
|
||||
|
||||
def formatTime(self, record, datefmt=None):
|
||||
if self.payload_type == 'gelf': # GELF uses epoch times
|
||||
return record.created
|
||||
return datetime.datetime.utcfromtimestamp(record.created).isoformat()[:-3] + 'Z'
|
||||
|
||||
def format_v0(self, record):
|
||||
def format_graylog_v0(self, record):
|
||||
'''
|
||||
Graylog 'raw' format is essentially the raw record, minimally munged to provide
|
||||
the bare minimum that td-agent requires to accept and route the event. This is
|
||||
well suited to a config where the client td-agents log directly to Graylog.
|
||||
'''
|
||||
message_dict = {
|
||||
'message': record.getMessage(),
|
||||
'timestamp': self.formatTime(record),
|
||||
# Graylog uses syslog levels, not whatever it is Python does...
|
||||
'level': syslog_levels.get(record.levelname, 'ALERT'),
|
||||
'tag': self.tag
|
||||
}
|
||||
|
||||
if record.exc_info:
|
||||
exc_info = self.formatException(record.exc_info)
|
||||
message_dict.update({'full_message': exc_info})
|
||||
|
||||
# Add any extra attributes to the message field
|
||||
for key, value in six.iteritems(record.__dict__):
|
||||
if key in ('args', 'asctime', 'bracketlevel', 'bracketname', 'bracketprocess',
|
||||
'created', 'exc_info', 'exc_text', 'id', 'levelname', 'levelno', 'msecs',
|
||||
'msecs', 'message', 'msg', 'relativeCreated', 'version'):
|
||||
# These are already handled above or explicitly pruned.
|
||||
continue
|
||||
|
||||
if isinstance(value, (six.string_types, bool, dict, float, int, list, types.NoneType)): # pylint: disable=W1699
|
||||
val = value
|
||||
else:
|
||||
val = repr(value)
|
||||
message_dict.update({'{0}'.format(key): val})
|
||||
return message_dict
|
||||
|
||||
def format_gelf_v1_1(self, record):
|
||||
'''
|
||||
If your agent is (or can be) configured to forward pre-formed GELF to Graylog
|
||||
with ZERO fluent processing, this function is for YOU, pal...
|
||||
'''
|
||||
message_dict = {
|
||||
'version': self.version,
|
||||
'host': salt.utils.network.get_fqhostname(),
|
||||
'short_message': record.getMessage(),
|
||||
'timestamp': self.formatTime(record),
|
||||
'level': syslog_levels.get(record.levelname, 'ALERT'),
|
||||
"_tag": self.tag
|
||||
}
|
||||
|
||||
if record.exc_info:
|
||||
exc_info = self.formatException(record.exc_info)
|
||||
message_dict.update({'full_message': exc_info})
|
||||
|
||||
# Add any extra attributes to the message field
|
||||
for key, value in six.iteritems(record.__dict__):
|
||||
if key in ('args', 'asctime', 'bracketlevel', 'bracketname', 'bracketprocess',
|
||||
'created', 'exc_info', 'exc_text', 'id', 'levelname', 'levelno', 'msecs',
|
||||
'msecs', 'message', 'msg', 'relativeCreated', 'version'):
|
||||
# These are already handled above or explicitly avoided.
|
||||
continue
|
||||
|
||||
if isinstance(value, (six.string_types, bool, dict, float, int, list, types.NoneType)): # pylint: disable=W1699
|
||||
val = value
|
||||
else:
|
||||
val = repr(value)
|
||||
# GELF spec require "non-standard" fields to be prefixed with '_' (underscore).
|
||||
message_dict.update({'_{0}'.format(key): val})
|
||||
|
||||
return message_dict
|
||||
|
||||
def format_logstash_v0(self, record):
|
||||
'''
|
||||
Messages are formatted in logstash's expected format.
|
||||
'''
|
||||
host = salt.utils.network.get_fqhostname()
|
||||
message_dict = {
|
||||
'@timestamp': self.formatTime(record),
|
||||
@ -186,7 +306,7 @@ class LogstashFormatter(logging.Formatter, NewStyleClassMixIn):
|
||||
),
|
||||
'@source_host': host,
|
||||
'@source_path': self.msg_path,
|
||||
'@tags': ['salt'],
|
||||
'@tags': self.tags,
|
||||
'@type': self.msg_type,
|
||||
}
|
||||
|
||||
@ -216,7 +336,10 @@ class LogstashFormatter(logging.Formatter, NewStyleClassMixIn):
|
||||
message_dict['@fields'][key] = repr(value)
|
||||
return message_dict
|
||||
|
||||
def format_v1(self, record):
|
||||
def format_logstash_v1(self, record):
|
||||
'''
|
||||
Messages are formatted in logstash's expected format.
|
||||
'''
|
||||
message_dict = {
|
||||
'@version': 1,
|
||||
'@timestamp': self.formatTime(record),
|
||||
@ -230,7 +353,7 @@ class LogstashFormatter(logging.Formatter, NewStyleClassMixIn):
|
||||
'funcName': record.funcName,
|
||||
'processName': record.processName,
|
||||
'message': record.getMessage(),
|
||||
'tags': ['salt'],
|
||||
'tags': self.tags,
|
||||
'type': self.msg_type
|
||||
}
|
||||
|
||||
|
@ -1333,6 +1333,7 @@ class Minion(MinionBase):
|
||||
self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
|
||||
return True
|
||||
|
||||
@tornado.gen.coroutine
|
||||
def _handle_decoded_payload(self, data):
|
||||
'''
|
||||
Override this method if you wish to handle the decoded data
|
||||
@ -1365,6 +1366,15 @@ class Minion(MinionBase):
|
||||
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
|
||||
self.schedule.functions = self.functions
|
||||
self.schedule.returners = self.returners
|
||||
|
||||
process_count_max = self.opts.get('process_count_max')
|
||||
if process_count_max > 0:
|
||||
process_count = len(salt.utils.minion.running(self.opts))
|
||||
while process_count >= process_count_max:
|
||||
log.warn("Maximum number of processes reached while executing jid {0}, waiting...".format(data['jid']))
|
||||
yield tornado.gen.sleep(10)
|
||||
process_count = len(salt.utils.minion.running(self.opts))
|
||||
|
||||
# We stash an instance references to allow for the socket
|
||||
# communication in Windows. You can't pickle functions, and thus
|
||||
# python needs to be able to reconstruct the reference on the other
|
||||
@ -1643,13 +1653,24 @@ class Minion(MinionBase):
|
||||
minion side execution.
|
||||
'''
|
||||
salt.utils.appendproctitle(u'{0}._thread_multi_return {1}'.format(cls.__name__, data[u'jid']))
|
||||
ret = {
|
||||
u'return': {},
|
||||
u'retcode': {},
|
||||
u'success': {}
|
||||
}
|
||||
for ind in range(0, len(data[u'fun'])):
|
||||
ret[u'success'][data[u'fun'][ind]] = False
|
||||
multifunc_ordered = opts.get(u'multifunc_ordered', False)
|
||||
num_funcs = len(data[u'fun'])
|
||||
if multifunc_ordered:
|
||||
ret = {
|
||||
u'return': [None] * num_funcs,
|
||||
u'retcode': [None] * num_funcs,
|
||||
u'success': [False] * num_funcs
|
||||
}
|
||||
else:
|
||||
ret = {
|
||||
u'return': {},
|
||||
u'retcode': {},
|
||||
u'success': {}
|
||||
}
|
||||
|
||||
for ind in range(0, num_funcs):
|
||||
if not multifunc_ordered:
|
||||
ret[u'success'][data[u'fun'][ind]] = False
|
||||
try:
|
||||
minion_blackout_violation = False
|
||||
if minion_instance.connected and minion_instance.opts[u'pillar'].get(u'minion_blackout', False):
|
||||
@ -1673,16 +1694,27 @@ class Minion(MinionBase):
|
||||
data[u'arg'][ind],
|
||||
data)
|
||||
minion_instance.functions.pack[u'__context__'][u'retcode'] = 0
|
||||
ret[u'return'][data[u'fun'][ind]] = func(*args, **kwargs)
|
||||
ret[u'retcode'][data[u'fun'][ind]] = minion_instance.functions.pack[u'__context__'].get(
|
||||
u'retcode',
|
||||
0
|
||||
)
|
||||
ret[u'success'][data[u'fun'][ind]] = True
|
||||
if multifunc_ordered:
|
||||
ret[u'return'][ind] = func(*args, **kwargs)
|
||||
ret[u'retcode'][ind] = minion_instance.functions.pack[u'__context__'].get(
|
||||
u'retcode',
|
||||
0
|
||||
)
|
||||
ret[u'success'][ind] = True
|
||||
else:
|
||||
ret[u'return'][data[u'fun'][ind]] = func(*args, **kwargs)
|
||||
ret[u'retcode'][data[u'fun'][ind]] = minion_instance.functions.pack[u'__context__'].get(
|
||||
u'retcode',
|
||||
0
|
||||
)
|
||||
ret[u'success'][data[u'fun'][ind]] = True
|
||||
except Exception as exc:
|
||||
trb = traceback.format_exc()
|
||||
log.warning(u'The minion function caused an exception: %s', exc)
|
||||
ret[u'return'][data[u'fun'][ind]] = trb
|
||||
if multifunc_ordered:
|
||||
ret[u'return'][ind] = trb
|
||||
else:
|
||||
ret[u'return'][data[u'fun'][ind]] = trb
|
||||
ret[u'jid'] = data[u'jid']
|
||||
ret[u'fun'] = data[u'fun']
|
||||
ret[u'fun_args'] = data[u'arg']
|
||||
@ -2674,6 +2706,8 @@ class SyndicManager(MinionBase):
|
||||
'''
|
||||
if kwargs is None:
|
||||
kwargs = {}
|
||||
successful = False
|
||||
# Call for each master
|
||||
for master, syndic_future in self.iter_master_options(master_id):
|
||||
if not syndic_future.done() or syndic_future.exception():
|
||||
log.error(
|
||||
@ -2684,15 +2718,15 @@ class SyndicManager(MinionBase):
|
||||
|
||||
try:
|
||||
getattr(syndic_future.result(), func)(*args, **kwargs)
|
||||
return
|
||||
successful = True
|
||||
except SaltClientError:
|
||||
log.error(
|
||||
u'Unable to call %s on %s, trying another...',
|
||||
func, master
|
||||
)
|
||||
self._mark_master_dead(master)
|
||||
continue
|
||||
log.critical(u'Unable to call %s on any masters!', func)
|
||||
if not successful:
|
||||
log.critical(u'Unable to call %s on any masters!', func)
|
||||
|
||||
def _return_pub_syndic(self, values, master_id=None):
|
||||
'''
|
||||
|
@ -97,11 +97,15 @@ __virtualname__ = 'pkg'
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Confirm this module is on a Debian based system
|
||||
Confirm this module is on a Debian-based system
|
||||
'''
|
||||
if __grains__.get('os_family') in ('Kali', 'Debian', 'neon', 'Deepin'):
|
||||
return __virtualname__
|
||||
elif __grains__.get('os_family', False) == 'Cumulus':
|
||||
# If your minion is running an OS which is Debian-based but does not have
|
||||
# an "os_family" grain of Debian, then the proper fix is NOT to check for
|
||||
# the minion's "os_family" grain here in the __virtual__. The correct fix
|
||||
# is to add the value from the minion's "os" grain to the _OS_FAMILY_MAP
|
||||
# dict in salt/grains/core.py, so that we assign the correct "os_family"
|
||||
# grain to the minion.
|
||||
if __grains__.get('os_family') == 'Debian':
|
||||
return __virtualname__
|
||||
return (False, 'The pkg module could not be loaded: unsupported OS family')
|
||||
|
||||
|
@ -60,7 +60,8 @@ def list_(name,
|
||||
strip_components=None,
|
||||
clean=False,
|
||||
verbose=False,
|
||||
saltenv='base'):
|
||||
saltenv='base',
|
||||
source_hash=None):
|
||||
'''
|
||||
.. versionadded:: 2016.11.0
|
||||
.. versionchanged:: 2016.11.2
|
||||
@ -149,6 +150,14 @@ def list_(name,
|
||||
``archive``. This is only applicable when ``archive`` is a file from
|
||||
the ``salt://`` fileserver.
|
||||
|
||||
source_hash
|
||||
If ``name`` is an http(s)/ftp URL and the file exists in the minion's
|
||||
file cache, this option can be passed to keep the minion from
|
||||
re-downloading the archive if the cached copy matches the specified
|
||||
hash.
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
.. _tarfile: https://docs.python.org/2/library/tarfile.html
|
||||
.. _xz: http://tukaani.org/xz/
|
||||
|
||||
@ -160,6 +169,7 @@ def list_(name,
|
||||
salt '*' archive.list /path/to/myfile.tar.gz strip_components=1
|
||||
salt '*' archive.list salt://foo.tar.gz
|
||||
salt '*' archive.list https://domain.tld/myfile.zip
|
||||
salt '*' archive.list https://domain.tld/myfile.zip source_hash=f1d2d2f924e986ac86fdf7b36c94bcdf32beec15
|
||||
salt '*' archive.list ftp://10.1.2.3/foo.rar
|
||||
'''
|
||||
def _list_tar(name, cached, decompress_cmd, failhard=False):
|
||||
@ -309,7 +319,7 @@ def list_(name,
|
||||
)
|
||||
return dirs, files, []
|
||||
|
||||
cached = __salt__['cp.cache_file'](name, saltenv)
|
||||
cached = __salt__['cp.cache_file'](name, saltenv, source_hash=source_hash)
|
||||
if not cached:
|
||||
raise CommandExecutionError('Failed to cache {0}'.format(name))
|
||||
|
||||
@ -1094,7 +1104,7 @@ def unzip(zip_file,
|
||||
return _trim_files(cleaned_files, trim_output)
|
||||
|
||||
|
||||
def is_encrypted(name, clean=False, saltenv='base'):
|
||||
def is_encrypted(name, clean=False, saltenv='base', source_hash=None):
|
||||
'''
|
||||
.. versionadded:: 2016.11.0
|
||||
|
||||
@ -1113,6 +1123,18 @@ def is_encrypted(name, clean=False, saltenv='base'):
|
||||
If there is an error listing the archive's contents, the cached
|
||||
file will not be removed, to allow for troubleshooting.
|
||||
|
||||
saltenv : base
|
||||
Specifies the fileserver environment from which to retrieve
|
||||
``archive``. This is only applicable when ``archive`` is a file from
|
||||
the ``salt://`` fileserver.
|
||||
|
||||
source_hash
|
||||
If ``name`` is an http(s)/ftp URL and the file exists in the minion's
|
||||
file cache, this option can be passed to keep the minion from
|
||||
re-downloading the archive if the cached copy matches the specified
|
||||
hash.
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
CLI Examples:
|
||||
|
||||
@ -1122,9 +1144,10 @@ def is_encrypted(name, clean=False, saltenv='base'):
|
||||
salt '*' archive.is_encrypted salt://foo.zip
|
||||
salt '*' archive.is_encrypted salt://foo.zip saltenv=dev
|
||||
salt '*' archive.is_encrypted https://domain.tld/myfile.zip clean=True
|
||||
salt '*' archive.is_encrypted https://domain.tld/myfile.zip source_hash=f1d2d2f924e986ac86fdf7b36c94bcdf32beec15
|
||||
salt '*' archive.is_encrypted ftp://10.1.2.3/foo.zip
|
||||
'''
|
||||
cached = __salt__['cp.cache_file'](name, saltenv)
|
||||
cached = __salt__['cp.cache_file'](name, saltenv, source_hash=source_hash)
|
||||
if not cached:
|
||||
raise CommandExecutionError('Failed to cache {0}'.format(name))
|
||||
|
||||
|
@ -161,10 +161,9 @@ def get_elb_config(name, region=None, key=None, keyid=None, profile=None):
|
||||
salt myminion boto_elb.exists myelb region=us-east-1
|
||||
'''
|
||||
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
|
||||
wait = 60
|
||||
orig_wait = wait
|
||||
retries = 30
|
||||
|
||||
while True:
|
||||
while retries:
|
||||
try:
|
||||
lb = conn.get_all_load_balancers(load_balancer_names=[name])
|
||||
lb = lb[0]
|
||||
@ -205,16 +204,15 @@ def get_elb_config(name, region=None, key=None, keyid=None, profile=None):
|
||||
ret['policies'] = policies
|
||||
return ret
|
||||
except boto.exception.BotoServerError as error:
|
||||
if getattr(error, 'error_code', '') == 'Throttling':
|
||||
if wait > 0:
|
||||
sleep = wait if wait % 5 == wait else 5
|
||||
log.info('Throttled by AWS API, will retry in 5 seconds.')
|
||||
time.sleep(sleep)
|
||||
wait -= sleep
|
||||
continue
|
||||
log.error('API still throttling us after {0} seconds!'.format(orig_wait))
|
||||
if error.error_code == 'Throttling':
|
||||
log.debug('Throttled by AWS API, will retry in 5 seconds.')
|
||||
time.sleep(5)
|
||||
retries -= 1
|
||||
continue
|
||||
log.error('Error fetching config for ELB {0}: {1}'.format(name, error.message))
|
||||
log.error(error)
|
||||
return {}
|
||||
return {}
|
||||
|
||||
|
||||
def listener_dict_to_tuple(listener):
|
||||
@ -515,31 +513,38 @@ def get_attributes(name, region=None, key=None, keyid=None, profile=None):
|
||||
salt myminion boto_elb.get_attributes myelb
|
||||
'''
|
||||
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
|
||||
retries = 30
|
||||
|
||||
try:
|
||||
lbattrs = conn.get_all_lb_attributes(name)
|
||||
ret = odict.OrderedDict()
|
||||
ret['access_log'] = odict.OrderedDict()
|
||||
ret['cross_zone_load_balancing'] = odict.OrderedDict()
|
||||
ret['connection_draining'] = odict.OrderedDict()
|
||||
ret['connecting_settings'] = odict.OrderedDict()
|
||||
al = lbattrs.access_log
|
||||
czlb = lbattrs.cross_zone_load_balancing
|
||||
cd = lbattrs.connection_draining
|
||||
cs = lbattrs.connecting_settings
|
||||
ret['access_log']['enabled'] = al.enabled
|
||||
ret['access_log']['s3_bucket_name'] = al.s3_bucket_name
|
||||
ret['access_log']['s3_bucket_prefix'] = al.s3_bucket_prefix
|
||||
ret['access_log']['emit_interval'] = al.emit_interval
|
||||
ret['cross_zone_load_balancing']['enabled'] = czlb.enabled
|
||||
ret['connection_draining']['enabled'] = cd.enabled
|
||||
ret['connection_draining']['timeout'] = cd.timeout
|
||||
ret['connecting_settings']['idle_timeout'] = cs.idle_timeout
|
||||
return ret
|
||||
except boto.exception.BotoServerError as error:
|
||||
log.debug(error)
|
||||
log.error('ELB {0} does not exist: {1}'.format(name, error))
|
||||
return {}
|
||||
while retries:
|
||||
try:
|
||||
lbattrs = conn.get_all_lb_attributes(name)
|
||||
ret = odict.OrderedDict()
|
||||
ret['access_log'] = odict.OrderedDict()
|
||||
ret['cross_zone_load_balancing'] = odict.OrderedDict()
|
||||
ret['connection_draining'] = odict.OrderedDict()
|
||||
ret['connecting_settings'] = odict.OrderedDict()
|
||||
al = lbattrs.access_log
|
||||
czlb = lbattrs.cross_zone_load_balancing
|
||||
cd = lbattrs.connection_draining
|
||||
cs = lbattrs.connecting_settings
|
||||
ret['access_log']['enabled'] = al.enabled
|
||||
ret['access_log']['s3_bucket_name'] = al.s3_bucket_name
|
||||
ret['access_log']['s3_bucket_prefix'] = al.s3_bucket_prefix
|
||||
ret['access_log']['emit_interval'] = al.emit_interval
|
||||
ret['cross_zone_load_balancing']['enabled'] = czlb.enabled
|
||||
ret['connection_draining']['enabled'] = cd.enabled
|
||||
ret['connection_draining']['timeout'] = cd.timeout
|
||||
ret['connecting_settings']['idle_timeout'] = cs.idle_timeout
|
||||
return ret
|
||||
except boto.exception.BotoServerError as e:
|
||||
if e.error_code == 'Throttling':
|
||||
log.debug("Throttled by AWS API, will retry in 5 seconds...")
|
||||
time.sleep(5)
|
||||
retries -= 1
|
||||
continue
|
||||
log.error('ELB {0} does not exist: {1}'.format(name, e.message))
|
||||
return {}
|
||||
return {}
|
||||
|
||||
|
||||
def set_attributes(name, attributes, region=None, key=None, keyid=None,
|
||||
|
@ -78,7 +78,7 @@ def __virtual__():
|
||||
'''
|
||||
if not HAS_BOTO3:
|
||||
return (False, 'The boto_sqs module could not be loaded: boto3 libraries not found')
|
||||
__utils__['boto3.assign_funcs'](__name__, 'sqs', pack=__salt__)
|
||||
__utils__['boto3.assign_funcs'](__name__, 'sqs')
|
||||
return True
|
||||
|
||||
|
||||
|
@ -599,9 +599,14 @@ def exists(vpc_id=None, name=None, cidr=None, tags=None, region=None, key=None,
|
||||
try:
|
||||
vpc_ids = _find_vpcs(vpc_id=vpc_id, vpc_name=name, cidr=cidr, tags=tags,
|
||||
region=region, key=key, keyid=keyid, profile=profile)
|
||||
return {'exists': bool(vpc_ids)}
|
||||
except BotoServerError as e:
|
||||
return {'error': salt.utils.boto.get_error(e)}
|
||||
except BotoServerError as err:
|
||||
boto_err = salt.utils.boto.get_error(err)
|
||||
if boto_err.get('aws', {}).get('code') == 'InvalidVpcID.NotFound':
|
||||
# VPC was not found: handle the error and return False.
|
||||
return {'exists': False}
|
||||
return {'error': boto_err}
|
||||
|
||||
return {'exists': bool(vpc_ids)}
|
||||
|
||||
|
||||
def create(cidr_block, instance_tenancy=None, vpc_name=None,
|
||||
@ -723,27 +728,34 @@ def describe(vpc_id=None, vpc_name=None, region=None, key=None,
|
||||
try:
|
||||
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
|
||||
vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)
|
||||
if not vpc_id:
|
||||
except BotoServerError as err:
|
||||
boto_err = salt.utils.boto.get_error(err)
|
||||
if boto_err.get('aws', {}).get('code') == 'InvalidVpcID.NotFound':
|
||||
# VPC was not found: handle the error and return None.
|
||||
return {'vpc': None}
|
||||
return {'error': boto_err}
|
||||
|
||||
filter_parameters = {'vpc_ids': vpc_id}
|
||||
if not vpc_id:
|
||||
return {'vpc': None}
|
||||
|
||||
filter_parameters = {'vpc_ids': vpc_id}
|
||||
|
||||
try:
|
||||
vpcs = conn.get_all_vpcs(**filter_parameters)
|
||||
except BotoServerError as err:
|
||||
return {'error': salt.utils.boto.get_error(err)}
|
||||
|
||||
if vpcs:
|
||||
vpc = vpcs[0] # Found!
|
||||
log.debug('Found VPC: {0}'.format(vpc.id))
|
||||
if vpcs:
|
||||
vpc = vpcs[0] # Found!
|
||||
log.debug('Found VPC: {0}'.format(vpc.id))
|
||||
|
||||
keys = ('id', 'cidr_block', 'is_default', 'state', 'tags',
|
||||
'dhcp_options_id', 'instance_tenancy')
|
||||
_r = dict([(k, getattr(vpc, k)) for k in keys])
|
||||
_r.update({'region': getattr(vpc, 'region').name})
|
||||
return {'vpc': _r}
|
||||
else:
|
||||
return {'vpc': None}
|
||||
|
||||
except BotoServerError as e:
|
||||
return {'error': salt.utils.boto.get_error(e)}
|
||||
keys = ('id', 'cidr_block', 'is_default', 'state', 'tags',
|
||||
'dhcp_options_id', 'instance_tenancy')
|
||||
_r = dict([(k, getattr(vpc, k)) for k in keys])
|
||||
_r.update({'region': getattr(vpc, 'region').name})
|
||||
return {'vpc': _r}
|
||||
else:
|
||||
return {'vpc': None}
|
||||
|
||||
|
||||
def describe_vpcs(vpc_id=None, name=None, cidr=None, tags=None,
|
||||
@ -809,7 +821,7 @@ def _find_subnets(subnet_name=None, vpc_id=None, cidr=None, tags=None, conn=None
|
||||
Given subnet properties, find and return matching subnet ids
|
||||
'''
|
||||
|
||||
if not any(subnet_name, tags, cidr):
|
||||
if not any([subnet_name, tags, cidr]):
|
||||
raise SaltInvocationError('At least one of the following must be '
|
||||
'specified: subnet_name, cidr or tags.')
|
||||
|
||||
@ -927,34 +939,38 @@ def subnet_exists(subnet_id=None, name=None, subnet_name=None, cidr=None,
|
||||
|
||||
try:
|
||||
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
|
||||
filter_parameters = {'filters': {}}
|
||||
except BotoServerError as err:
|
||||
return {'error': salt.utils.boto.get_error(err)}
|
||||
|
||||
if subnet_id:
|
||||
filter_parameters['subnet_ids'] = [subnet_id]
|
||||
|
||||
if subnet_name:
|
||||
filter_parameters['filters']['tag:Name'] = subnet_name
|
||||
|
||||
if cidr:
|
||||
filter_parameters['filters']['cidr'] = cidr
|
||||
|
||||
if tags:
|
||||
for tag_name, tag_value in six.iteritems(tags):
|
||||
filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value
|
||||
|
||||
if zones:
|
||||
filter_parameters['filters']['availability_zone'] = zones
|
||||
filter_parameters = {'filters': {}}
|
||||
if subnet_id:
|
||||
filter_parameters['subnet_ids'] = [subnet_id]
|
||||
if subnet_name:
|
||||
filter_parameters['filters']['tag:Name'] = subnet_name
|
||||
if cidr:
|
||||
filter_parameters['filters']['cidr'] = cidr
|
||||
if tags:
|
||||
for tag_name, tag_value in six.iteritems(tags):
|
||||
filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value
|
||||
if zones:
|
||||
filter_parameters['filters']['availability_zone'] = zones
|
||||
|
||||
try:
|
||||
subnets = conn.get_all_subnets(**filter_parameters)
|
||||
log.debug('The filters criteria {0} matched the following subnets:{1}'.format(filter_parameters, subnets))
|
||||
if subnets:
|
||||
log.info('Subnet {0} exists.'.format(subnet_name or subnet_id))
|
||||
return {'exists': True}
|
||||
else:
|
||||
log.info('Subnet {0} does not exist.'.format(subnet_name or subnet_id))
|
||||
except BotoServerError as err:
|
||||
boto_err = salt.utils.boto.get_error(err)
|
||||
if boto_err.get('aws', {}).get('code') == 'InvalidSubnetID.NotFound':
|
||||
# Subnet was not found: handle the error and return False.
|
||||
return {'exists': False}
|
||||
except BotoServerError as e:
|
||||
return {'error': salt.utils.boto.get_error(e)}
|
||||
return {'error': boto_err}
|
||||
|
||||
log.debug('The filters criteria {0} matched the following subnets:{1}'.format(filter_parameters, subnets))
|
||||
if subnets:
|
||||
log.info('Subnet {0} exists.'.format(subnet_name or subnet_id))
|
||||
return {'exists': True}
|
||||
else:
|
||||
log.info('Subnet {0} does not exist.'.format(subnet_name or subnet_id))
|
||||
return {'exists': False}
|
||||
|
||||
|
||||
def get_subnet_association(subnets, region=None, key=None, keyid=None,
|
||||
@ -2456,11 +2472,10 @@ def describe_route_table(route_table_id=None, route_table_name=None,
|
||||
salt myminion boto_vpc.describe_route_table route_table_id='rtb-1f382e7d'
|
||||
|
||||
'''
|
||||
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'The \'describe_route_table\' method has been deprecated and '
|
||||
'replaced by \'describe_route_tables\'.'
|
||||
'Neon',
|
||||
'The \'describe_route_table\' method has been deprecated and '
|
||||
'replaced by \'describe_route_tables\'.'
|
||||
)
|
||||
if not any((route_table_id, route_table_name, tags)):
|
||||
raise SaltInvocationError('At least one of the following must be specified: '
|
||||
|
@ -352,7 +352,7 @@ def get_dir(path, dest, saltenv='base', template=None, gzip=None, **kwargs):
|
||||
return _client().get_dir(path, dest, saltenv, gzip)
|
||||
|
||||
|
||||
def get_url(path, dest='', saltenv='base', makedirs=False):
|
||||
def get_url(path, dest='', saltenv='base', makedirs=False, source_hash=None):
|
||||
'''
|
||||
.. versionchanged:: Oxygen
|
||||
``dest`` can now be a directory
|
||||
@ -386,6 +386,13 @@ def get_url(path, dest='', saltenv='base', makedirs=False):
|
||||
Salt fileserver envrionment from which to retrieve the file. Ignored if
|
||||
``path`` is not a ``salt://`` URL.
|
||||
|
||||
source_hash
|
||||
If ``path`` is an http(s) or ftp URL and the file exists in the
|
||||
minion's file cache, this option can be passed to keep the minion from
|
||||
re-downloading the file if the cached copy matches the specified hash.
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -394,9 +401,11 @@ def get_url(path, dest='', saltenv='base', makedirs=False):
|
||||
salt '*' cp.get_url http://www.slashdot.org /tmp/index.html
|
||||
'''
|
||||
if isinstance(dest, six.string_types):
|
||||
result = _client().get_url(path, dest, makedirs, saltenv)
|
||||
result = _client().get_url(
|
||||
path, dest, makedirs, saltenv, source_hash=source_hash)
|
||||
else:
|
||||
result = _client().get_url(path, None, makedirs, saltenv, no_cache=True)
|
||||
result = _client().get_url(
|
||||
path, None, makedirs, saltenv, no_cache=True, source_hash=source_hash)
|
||||
if not result:
|
||||
log.error(
|
||||
'Unable to fetch file {0} from saltenv {1}.'.format(
|
||||
@ -429,11 +438,18 @@ def get_file_str(path, saltenv='base'):
|
||||
return fn_
|
||||
|
||||
|
||||
def cache_file(path, saltenv='base'):
|
||||
def cache_file(path, saltenv='base', source_hash=None):
|
||||
'''
|
||||
Used to cache a single file on the Minion
|
||||
|
||||
Returns the location of the new cached file on the Minion.
|
||||
Returns the location of the new cached file on the Minion
|
||||
|
||||
source_hash
|
||||
If ``name`` is an http(s) or ftp URL and the file exists in the
|
||||
minion's file cache, this option can be passed to keep the minion from
|
||||
re-downloading the file if the cached copy matches the specified hash.
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
CLI Example:
|
||||
|
||||
@ -485,7 +501,7 @@ def cache_file(path, saltenv='base'):
|
||||
if senv:
|
||||
saltenv = senv
|
||||
|
||||
result = _client().cache_file(path, saltenv)
|
||||
result = _client().cache_file(path, saltenv, source_hash=source_hash)
|
||||
if not result:
|
||||
log.error(
|
||||
u'Unable to cache file \'%s\' from saltenv \'%s\'.',
|
||||
|
@ -88,6 +88,7 @@ def _get_instance(hosts=None, profile=None):
|
||||
ca_certs = None
|
||||
verify_certs = True
|
||||
http_auth = None
|
||||
timeout = 10
|
||||
|
||||
if profile is None:
|
||||
profile = 'elasticsearch'
|
||||
@ -106,6 +107,7 @@ def _get_instance(hosts=None, profile=None):
|
||||
verify_certs = _profile.get('verify_certs', True)
|
||||
username = _profile.get('username', None)
|
||||
password = _profile.get('password', None)
|
||||
timeout = _profile.get('timeout', 10)
|
||||
|
||||
if username and password:
|
||||
http_auth = (username, password)
|
||||
@ -131,6 +133,7 @@ def _get_instance(hosts=None, profile=None):
|
||||
ca_certs=ca_certs,
|
||||
verify_certs=verify_certs,
|
||||
http_auth=http_auth,
|
||||
timeout=timeout,
|
||||
)
|
||||
else:
|
||||
es = elasticsearch.Elasticsearch(
|
||||
@ -139,6 +142,7 @@ def _get_instance(hosts=None, profile=None):
|
||||
ca_certs=ca_certs,
|
||||
verify_certs=verify_certs,
|
||||
http_auth=http_auth,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
# Try the connection
|
||||
|
@ -60,6 +60,7 @@ import salt.utils.stringutils
|
||||
import salt.utils.templates
|
||||
import salt.utils.url
|
||||
from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError, get_error_message as _get_error_message
|
||||
from salt.utils.files import HASHES, HASHES_REVMAP
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@ -67,16 +68,6 @@ __func_alias__ = {
|
||||
'makedirs_': 'makedirs'
|
||||
}
|
||||
|
||||
HASHES = {
|
||||
'sha512': 128,
|
||||
'sha384': 96,
|
||||
'sha256': 64,
|
||||
'sha224': 56,
|
||||
'sha1': 40,
|
||||
'md5': 32,
|
||||
}
|
||||
HASHES_REVMAP = dict([(y, x) for x, y in six.iteritems(HASHES)])
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
@ -3767,14 +3758,8 @@ def source_list(source, source_hash, saltenv):
|
||||
ret = (single_src, single_hash)
|
||||
break
|
||||
elif proto.startswith('http') or proto == 'ftp':
|
||||
try:
|
||||
if __salt__['cp.cache_file'](single_src):
|
||||
ret = (single_src, single_hash)
|
||||
break
|
||||
except MinionError as exc:
|
||||
# Error downloading file. Log the caught exception and
|
||||
# continue on to the next source.
|
||||
log.exception(exc)
|
||||
ret = (single_src, single_hash)
|
||||
break
|
||||
elif proto == 'file' and os.path.exists(urlparsed_single_src.path):
|
||||
ret = (single_src, single_hash)
|
||||
break
|
||||
@ -3794,9 +3779,8 @@ def source_list(source, source_hash, saltenv):
|
||||
ret = (single, source_hash)
|
||||
break
|
||||
elif proto.startswith('http') or proto == 'ftp':
|
||||
if __salt__['cp.cache_file'](single):
|
||||
ret = (single, source_hash)
|
||||
break
|
||||
ret = (single, source_hash)
|
||||
break
|
||||
elif single.startswith('/') and os.path.exists(single):
|
||||
ret = (single, source_hash)
|
||||
break
|
||||
@ -4007,11 +3991,14 @@ def get_managed(
|
||||
else:
|
||||
sfn = cached_dest
|
||||
|
||||
# If we didn't have the template or remote file, let's get it
|
||||
# Similarly when the file has been updated and the cache has to be refreshed
|
||||
# If we didn't have the template or remote file, or the file has been
|
||||
# updated and the cache has to be refreshed, download the file.
|
||||
if not sfn or cache_refetch:
|
||||
try:
|
||||
sfn = __salt__['cp.cache_file'](source, saltenv)
|
||||
sfn = __salt__['cp.cache_file'](
|
||||
source,
|
||||
saltenv,
|
||||
source_hash=source_sum.get('hsum'))
|
||||
except Exception as exc:
|
||||
# A 404 or other error code may raise an exception, catch it
|
||||
# and return a comment that will fail the calling state.
|
||||
@ -4675,7 +4662,7 @@ def check_file_meta(
|
||||
'''
|
||||
changes = {}
|
||||
if not source_sum:
|
||||
source_sum = dict()
|
||||
source_sum = {}
|
||||
lstats = stats(name, hash_type=source_sum.get('hash_type', None), follow_symlinks=False)
|
||||
if not lstats:
|
||||
changes['newfile'] = name
|
||||
@ -4683,7 +4670,10 @@ def check_file_meta(
|
||||
if 'hsum' in source_sum:
|
||||
if source_sum['hsum'] != lstats['sum']:
|
||||
if not sfn and source:
|
||||
sfn = __salt__['cp.cache_file'](source, saltenv)
|
||||
sfn = __salt__['cp.cache_file'](
|
||||
source,
|
||||
saltenv,
|
||||
source_hash=source_sum['hsum'])
|
||||
if sfn:
|
||||
try:
|
||||
changes['diff'] = get_diff(
|
||||
@ -4750,7 +4740,9 @@ def get_diff(file1,
|
||||
saltenv='base',
|
||||
show_filenames=True,
|
||||
show_changes=True,
|
||||
template=False):
|
||||
template=False,
|
||||
source_hash_file1=None,
|
||||
source_hash_file2=None):
|
||||
'''
|
||||
Return unified diff of two files
|
||||
|
||||
@ -4785,6 +4777,22 @@ def get_diff(file1,
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
source_hash_file1
|
||||
If ``file1`` is an http(s)/ftp URL and the file exists in the minion's
|
||||
file cache, this option can be passed to keep the minion from
|
||||
re-downloading the archive if the cached copy matches the specified
|
||||
hash.
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
source_hash_file2
|
||||
If ``file2`` is an http(s)/ftp URL and the file exists in the minion's
|
||||
file cache, this option can be passed to keep the minion from
|
||||
re-downloading the archive if the cached copy matches the specified
|
||||
hash.
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
CLI Examples:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -4793,14 +4801,17 @@ def get_diff(file1,
|
||||
salt '*' file.get_diff /tmp/foo.txt /tmp/bar.txt
|
||||
'''
|
||||
files = (file1, file2)
|
||||
source_hashes = (source_hash_file1, source_hash_file2)
|
||||
paths = []
|
||||
errors = []
|
||||
|
||||
for filename in files:
|
||||
for filename, source_hash in zip(files, source_hashes):
|
||||
try:
|
||||
# Local file paths will just return the same path back when passed
|
||||
# to cp.cache_file.
|
||||
cached_path = __salt__['cp.cache_file'](filename, saltenv)
|
||||
cached_path = __salt__['cp.cache_file'](filename,
|
||||
saltenv,
|
||||
source_hash=source_hash)
|
||||
if cached_path is False:
|
||||
errors.append(
|
||||
u'File {0} not found'.format(
|
||||
|
@ -27,6 +27,7 @@ except ImportError:
|
||||
|
||||
# Import Salt libs
|
||||
import salt.utils.files
|
||||
from salt.ext import six
|
||||
|
||||
# Juniper interface libraries
|
||||
# https://github.com/Juniper/py-junos-eznc
|
||||
@ -176,6 +177,10 @@ def rpc(cmd=None, dest=None, format='xml', **kwargs):
|
||||
if kwargs['__pub_arg']:
|
||||
if isinstance(kwargs['__pub_arg'][-1], dict):
|
||||
op.update(kwargs['__pub_arg'][-1])
|
||||
elif '__pub_schedule' in kwargs:
|
||||
for key, value in six.iteritems(kwargs):
|
||||
if not key.startswith('__pub_'):
|
||||
op[key] = value
|
||||
else:
|
||||
op.update(kwargs)
|
||||
op['dev_timeout'] = str(op.pop('timeout', conn.timeout))
|
||||
|
@ -40,11 +40,16 @@ import base64
|
||||
import logging
|
||||
import yaml
|
||||
import tempfile
|
||||
import signal
|
||||
from time import sleep
|
||||
from contextlib import contextmanager
|
||||
|
||||
from salt.exceptions import CommandExecutionError
|
||||
from salt.ext.six import iteritems
|
||||
import salt.utils.files
|
||||
import salt.utils.templates
|
||||
from salt.exceptions import TimeoutError
|
||||
from salt.ext.six.moves import range # pylint: disable=import-error
|
||||
|
||||
try:
|
||||
import kubernetes # pylint: disable=import-self
|
||||
@ -78,6 +83,21 @@ def __virtual__():
|
||||
return False, 'python kubernetes library not found'
|
||||
|
||||
|
||||
if not salt.utils.is_windows():
|
||||
@contextmanager
|
||||
def _time_limit(seconds):
|
||||
def signal_handler(signum, frame):
|
||||
raise TimeoutError
|
||||
signal.signal(signal.SIGALRM, signal_handler)
|
||||
signal.alarm(seconds)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
signal.alarm(0)
|
||||
|
||||
POLLING_TIME_LIMIT = 30
|
||||
|
||||
|
||||
# pylint: disable=no-member
|
||||
def _setup_conn(**kwargs):
|
||||
'''
|
||||
@ -692,7 +712,30 @@ def delete_deployment(name, namespace='default', **kwargs):
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
body=body)
|
||||
return api_response.to_dict()
|
||||
mutable_api_response = api_response.to_dict()
|
||||
if not salt.utils.is_windows():
|
||||
try:
|
||||
with _time_limit(POLLING_TIME_LIMIT):
|
||||
while show_deployment(name, namespace) is not None:
|
||||
sleep(1)
|
||||
else: # pylint: disable=useless-else-on-loop
|
||||
mutable_api_response['code'] = 200
|
||||
except TimeoutError:
|
||||
pass
|
||||
else:
|
||||
# Windows has not signal.alarm implementation, so we are just falling
|
||||
# back to loop-counting.
|
||||
for i in range(60):
|
||||
if show_deployment(name, namespace) is None:
|
||||
mutable_api_response['code'] = 200
|
||||
break
|
||||
else:
|
||||
sleep(1)
|
||||
if mutable_api_response['code'] != 200:
|
||||
log.warning('Reached polling time limit. Deployment is not yet '
|
||||
'deleted, but we are backing off. Sorry, but you\'ll '
|
||||
'have to check manually.')
|
||||
return mutable_api_response
|
||||
except (ApiException, HTTPError) as exc:
|
||||
if isinstance(exc, ApiException) and exc.status == 404:
|
||||
return None
|
||||
|
@ -1,6 +1,9 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Support for Linux File Access Control Lists
|
||||
|
||||
The Linux ACL module requires the `getfacl` and `setfacl` binaries.
|
||||
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
|
@ -356,3 +356,40 @@ def assemble(name,
|
||||
return cmd
|
||||
elif test_mode is False:
|
||||
return __salt__['cmd.run'](cmd, python_shell=False)
|
||||
|
||||
|
||||
def examine(device):
|
||||
'''
|
||||
Show detail for a specified RAID component device
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' raid.examine '/dev/sda1'
|
||||
'''
|
||||
res = __salt__['cmd.run_stdout']('mdadm -Y -E {0}'.format(device), output_loglevel='trace', python_shell=False)
|
||||
ret = {}
|
||||
|
||||
for line in res.splitlines():
|
||||
name, var = line.partition("=")[::2]
|
||||
ret[name] = var
|
||||
return ret
|
||||
|
||||
|
||||
def add(name, device):
|
||||
'''
|
||||
Add new device to RAID array.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' raid.add /dev/md0 /dev/sda1
|
||||
|
||||
'''
|
||||
|
||||
cmd = 'mdadm --manage {0} --add {1}'.format(name, device)
|
||||
if __salt__['cmd.retcode'](cmd) == 0:
|
||||
return True
|
||||
return False
|
||||
|
@ -688,11 +688,20 @@ def file_query(database, file_name, **connection_args):
|
||||
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
database
|
||||
|
||||
database to run script inside
|
||||
|
||||
file_name
|
||||
|
||||
File name of the script. This can be on the minion, or a file that is reachable by the fileserver
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' mysql.file_query mydb file_name=/tmp/sqlfile.sql
|
||||
salt '*' mysql.file_query mydb file_name=salt://sqlfile.sql
|
||||
|
||||
Return data:
|
||||
|
||||
@ -701,6 +710,9 @@ def file_query(database, file_name, **connection_args):
|
||||
{'query time': {'human': '39.0ms', 'raw': '0.03899'}, 'rows affected': 1L}
|
||||
|
||||
'''
|
||||
if any(file_name.startswith(proto) for proto in ('salt://', 'http://', 'https://', 'swift://', 's3://')):
|
||||
file_name = __salt__['cp.cache_file'](file_name)
|
||||
|
||||
if os.path.exists(file_name):
|
||||
with salt.utils.files.fopen(file_name, 'r') as ifile:
|
||||
contents = ifile.read()
|
||||
@ -709,7 +721,7 @@ def file_query(database, file_name, **connection_args):
|
||||
return False
|
||||
|
||||
query_string = ""
|
||||
ret = {'rows returned': 0, 'columns': 0, 'results': 0, 'rows affected': 0, 'query time': {'raw': 0}}
|
||||
ret = {'rows returned': 0, 'columns': [], 'results': [], 'rows affected': 0, 'query time': {'raw': 0}}
|
||||
for line in contents.splitlines():
|
||||
if re.match(r'--', line): # ignore sql comments
|
||||
continue
|
||||
@ -729,16 +741,16 @@ def file_query(database, file_name, **connection_args):
|
||||
if 'rows returned' in query_result:
|
||||
ret['rows returned'] += query_result['rows returned']
|
||||
if 'columns' in query_result:
|
||||
ret['columns'] += query_result['columns']
|
||||
ret['columns'].append(query_result['columns'])
|
||||
if 'results' in query_result:
|
||||
ret['results'] += query_result['results']
|
||||
ret['results'].append(query_result['results'])
|
||||
if 'rows affected' in query_result:
|
||||
ret['rows affected'] += query_result['rows affected']
|
||||
ret['query time']['human'] = str(round(float(ret['query time']['raw']), 2)) + 's'
|
||||
ret['query time']['raw'] = round(float(ret['query time']['raw']), 5)
|
||||
|
||||
# Remove empty keys in ret
|
||||
ret = dict((k, v) for k, v in six.iteritems(ret) if v)
|
||||
ret = {k: v for k, v in six.iteritems(ret) if v}
|
||||
|
||||
return ret
|
||||
|
||||
|
@ -926,6 +926,30 @@ def get_platform():
|
||||
return __proxy__['panos.call'](query)
|
||||
|
||||
|
||||
def get_security_rule(rulename=None, vsys='1'):
|
||||
'''
|
||||
Get the candidate configuration for the specified rule.
|
||||
|
||||
rulename(str): The name of the security rule.
|
||||
|
||||
vsys(str): The string representation of the VSYS ID.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' panos.get_security_rule rule01
|
||||
salt '*' panos.get_security_rule rule01 3
|
||||
|
||||
'''
|
||||
query = {'type': 'config',
|
||||
'action': 'get',
|
||||
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'vsys{0}\']/'
|
||||
'rulebase/security/rules/entry[@name=\'{1}\']'.format(vsys, rulename)}
|
||||
|
||||
return __proxy__['panos.call'](query)
|
||||
|
||||
|
||||
def get_session_info():
|
||||
'''
|
||||
Show device session statistics.
|
||||
|
@ -27,10 +27,27 @@ Installation Prerequisites
|
||||
|
||||
pip install purestorage
|
||||
|
||||
- Configure Pure Storage FlashArray authentication. Use one of the following
|
||||
three methods.
|
||||
|
||||
1) From the minion config
|
||||
.. code-block:: yaml
|
||||
|
||||
pure_tags:
|
||||
fa:
|
||||
san_ip: management vip or hostname for the FlashArray
|
||||
api_token: A valid api token for the FlashArray being managed
|
||||
|
||||
2) From environment (PUREFA_IP and PUREFA_API)
|
||||
3) From the pillar (PUREFA_IP and PUREFA_API)
|
||||
|
||||
:maintainer: Simon Dodsley (simon@purestorage.com)
|
||||
:maturity: new
|
||||
:requires: purestorage
|
||||
:platform: all
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
'''
|
||||
|
||||
# Import Python libs
|
||||
@ -192,7 +209,7 @@ def snap_create(name, suffix=None):
|
||||
|
||||
Will return False is volume selected to snap does not exist.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of volume to snapshot
|
||||
@ -228,7 +245,7 @@ def snap_delete(name, suffix=None, eradicate=False):
|
||||
|
||||
Will return False if selected snapshot does not exist.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of volume
|
||||
@ -270,7 +287,7 @@ def snap_eradicate(name, suffix=None):
|
||||
|
||||
Will retunr False is snapshot is not in a deleted state.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of volume
|
||||
@ -303,7 +320,7 @@ def volume_create(name, size=None):
|
||||
|
||||
Will return False if volume already exists.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of volume (truncated to 63 characters)
|
||||
@ -341,7 +358,7 @@ def volume_delete(name, eradicate=False):
|
||||
|
||||
Will return False if volume doesn't exist is already in a deleted state.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of volume
|
||||
@ -380,7 +397,7 @@ def volume_eradicate(name):
|
||||
|
||||
Will return False is volume is not in a deleted state.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of volume
|
||||
@ -410,7 +427,7 @@ def volume_extend(name, size):
|
||||
|
||||
Will return False if new size is less than or equal to existing size.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of volume
|
||||
@ -448,7 +465,7 @@ def snap_volume_create(name, target, overwrite=False):
|
||||
Will return False if target volume already exists and
|
||||
overwrite is not specified, or selected snapshot doesn't exist.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of volume snapshot
|
||||
@ -494,7 +511,7 @@ def volume_clone(name, target, overwrite=False):
|
||||
Will return False if source volume doesn't exist, or
|
||||
target volume already exists and overwrite not specified.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of volume
|
||||
@ -538,7 +555,7 @@ def volume_attach(name, host):
|
||||
|
||||
Host and volume must exist or else will return False.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of volume
|
||||
@ -571,7 +588,7 @@ def volume_detach(name, host):
|
||||
Will return False if either host or volume do not exist, or
|
||||
if selected volume isn't already connected to the host.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of volume
|
||||
@ -605,7 +622,7 @@ def host_create(name, iqn=None, wwn=None):
|
||||
Fibre Channel parameters are not in a valid format.
|
||||
See Pure Storage FlashArray documentation.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of host (truncated to 63 characters)
|
||||
@ -656,7 +673,7 @@ def host_update(name, iqn=None, wwn=None):
|
||||
by another host, or are not in a valid format.
|
||||
See Pure Storage FlashArray documentation.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of host
|
||||
@ -696,7 +713,7 @@ def host_delete(name):
|
||||
|
||||
Will return False if the host doesn't exist.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of host
|
||||
@ -732,7 +749,7 @@ def hg_create(name, host=None, volume=None):
|
||||
Will return False if hostgroup already exists, or if
|
||||
named host or volume do not exist.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of hostgroup (truncated to 63 characters)
|
||||
@ -788,7 +805,7 @@ def hg_update(name, host=None, volume=None):
|
||||
Will return False is hostgroup doesn't exist, or host
|
||||
or volume do not exist.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of hostgroup
|
||||
@ -834,7 +851,7 @@ def hg_delete(name):
|
||||
|
||||
Will return False is hostgroup is already in a deleted state.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of hostgroup
|
||||
@ -872,7 +889,7 @@ def hg_remove(name, volume=None, host=None):
|
||||
Will return False is hostgroup does not exist, or named host or volume are
|
||||
not in the hostgroup.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of hostgroup
|
||||
@ -933,7 +950,7 @@ def pg_create(name, hostgroup=None, host=None, volume=None, enabled=True):
|
||||
hostgroups, hosts or volumes
|
||||
* Named type for protection group does not exist
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of protection group
|
||||
@ -1026,7 +1043,7 @@ def pg_update(name, hostgroup=None, host=None, volume=None):
|
||||
* Incorrect type selected for current protection group type
|
||||
* Specified type does not exist
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of protection group
|
||||
@ -1116,7 +1133,7 @@ def pg_delete(name, eradicate=False):
|
||||
|
||||
Will return False if protection group is already in a deleted state.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of protection group
|
||||
@ -1153,7 +1170,7 @@ def pg_eradicate(name):
|
||||
|
||||
Will return False if protection group is not in a deleted state.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of protection group
|
||||
@ -1185,7 +1202,7 @@ def pg_remove(name, hostgroup=None, host=None, volume=None):
|
||||
* Protection group does not exist
|
||||
* Specified type is not currently associated with the protection group
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of hostgroup
|
||||
|
@ -375,8 +375,10 @@ def list_semod():
|
||||
|
||||
def _validate_filetype(filetype):
|
||||
'''
|
||||
Checks if the given filetype is a valid SELinux filetype specification.
|
||||
Throws an SaltInvocationError if it isn't.
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
Checks if the given filetype is a valid SELinux filetype
|
||||
specification. Throws an SaltInvocationError if it isn't.
|
||||
'''
|
||||
if filetype not in _SELINUX_FILETYPES.keys():
|
||||
raise SaltInvocationError('Invalid filetype given: {0}'.format(filetype))
|
||||
@ -385,6 +387,8 @@ def _validate_filetype(filetype):
|
||||
|
||||
def _context_dict_to_string(context):
|
||||
'''
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
Converts an SELinux file context from a dict to a string.
|
||||
'''
|
||||
return '{sel_user}:{sel_role}:{sel_type}:{sel_level}'.format(**context)
|
||||
@ -392,6 +396,8 @@ def _context_dict_to_string(context):
|
||||
|
||||
def _context_string_to_dict(context):
|
||||
'''
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
Converts an SELinux file context from string to dict.
|
||||
'''
|
||||
if not re.match('[^:]+:[^:]+:[^:]+:[^:]+$', context):
|
||||
@ -406,8 +412,11 @@ def _context_string_to_dict(context):
|
||||
|
||||
def filetype_id_to_string(filetype='a'):
|
||||
'''
|
||||
Translates SELinux filetype single-letter representation
|
||||
to a more human-readable version (which is also used in `semanage fcontext -l`).
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
Translates SELinux filetype single-letter representation to a more
|
||||
human-readable version (which is also used in `semanage fcontext
|
||||
-l`).
|
||||
'''
|
||||
_validate_filetype(filetype)
|
||||
return _SELINUX_FILETYPES.get(filetype, 'error')
|
||||
@ -415,20 +424,27 @@ def filetype_id_to_string(filetype='a'):
|
||||
|
||||
def fcontext_get_policy(name, filetype=None, sel_type=None, sel_user=None, sel_level=None):
|
||||
'''
|
||||
Returns the current entry in the SELinux policy list as a dictionary.
|
||||
Returns None if no exact match was found
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
Returns the current entry in the SELinux policy list as a
|
||||
dictionary. Returns None if no exact match was found.
|
||||
|
||||
Returned keys are:
|
||||
- filespec (the name supplied and matched)
|
||||
- filetype (the descriptive name of the filetype supplied)
|
||||
- sel_user, sel_role, sel_type, sel_level (the selinux context)
|
||||
|
||||
* filespec (the name supplied and matched)
|
||||
* filetype (the descriptive name of the filetype supplied)
|
||||
* sel_user, sel_role, sel_type, sel_level (the selinux context)
|
||||
|
||||
For a more in-depth explanation of the selinux context, go to
|
||||
https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/6/html/Security-Enhanced_Linux/chap-Security-Enhanced_Linux-SELinux_Contexts.html
|
||||
|
||||
name: filespec of the file or directory. Regex syntax is allowed.
|
||||
filetype: The SELinux filetype specification.
|
||||
Use one of [a, f, d, c, b, s, l, p].
|
||||
See also `man semanage-fcontext`.
|
||||
Defaults to 'a' (all files)
|
||||
name
|
||||
filespec of the file or directory. Regex syntax is allowed.
|
||||
|
||||
filetype
|
||||
The SELinux filetype specification. Use one of [a, f, d, c, b,
|
||||
s, l, p]. See also `man semanage-fcontext`. Defaults to 'a'
|
||||
(all files).
|
||||
|
||||
CLI Example:
|
||||
|
||||
@ -461,20 +477,34 @@ def fcontext_get_policy(name, filetype=None, sel_type=None, sel_user=None, sel_l
|
||||
|
||||
def fcontext_add_or_delete_policy(action, name, filetype=None, sel_type=None, sel_user=None, sel_level=None):
|
||||
'''
|
||||
Sets or deletes the SELinux policy for a given filespec and other optional parameters.
|
||||
Returns the result of the call to semanage.
|
||||
Note that you don't have to remove an entry before setting a new one for a given
|
||||
filespec and filetype, as adding one with semanage automatically overwrites a
|
||||
previously configured SELinux context.
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
name: filespec of the file or directory. Regex syntax is allowed.
|
||||
file_type: The SELinux filetype specification.
|
||||
Use one of [a, f, d, c, b, s, l, p].
|
||||
See also ``man semanage-fcontext``.
|
||||
Defaults to 'a' (all files)
|
||||
sel_type: SELinux context type. There are many.
|
||||
sel_user: SELinux user. Use ``semanage login -l`` to determine which ones are available to you
|
||||
sel_level: The MLS range of the SELinux context.
|
||||
Sets or deletes the SELinux policy for a given filespec and other
|
||||
optional parameters.
|
||||
|
||||
Returns the result of the call to semanage.
|
||||
|
||||
Note that you don't have to remove an entry before setting a new
|
||||
one for a given filespec and filetype, as adding one with semanage
|
||||
automatically overwrites a previously configured SELinux context.
|
||||
|
||||
name
|
||||
filespec of the file or directory. Regex syntax is allowed.
|
||||
|
||||
file_type
|
||||
The SELinux filetype specification. Use one of [a, f, d, c, b,
|
||||
s, l, p]. See also ``man semanage-fcontext``. Defaults to 'a'
|
||||
(all files).
|
||||
|
||||
sel_type
|
||||
SELinux context type. There are many.
|
||||
|
||||
sel_user
|
||||
SELinux user. Use ``semanage login -l`` to determine which ones
|
||||
are available to you.
|
||||
|
||||
sel_level
|
||||
The MLS range of the SELinux context.
|
||||
|
||||
CLI Example:
|
||||
|
||||
@ -500,10 +530,14 @@ def fcontext_add_or_delete_policy(action, name, filetype=None, sel_type=None, se
|
||||
|
||||
def fcontext_policy_is_applied(name, recursive=False):
|
||||
'''
|
||||
Returns an empty string if the SELinux policy for a given filespec is applied,
|
||||
returns string with differences in policy and actual situation otherwise.
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
name: filespec of the file or directory. Regex syntax is allowed.
|
||||
Returns an empty string if the SELinux policy for a given filespec
|
||||
is applied, returns string with differences in policy and actual
|
||||
situation otherwise.
|
||||
|
||||
name
|
||||
filespec of the file or directory. Regex syntax is allowed.
|
||||
|
||||
CLI Example:
|
||||
|
||||
@ -520,11 +554,17 @@ def fcontext_policy_is_applied(name, recursive=False):
|
||||
|
||||
def fcontext_apply_policy(name, recursive=False):
|
||||
'''
|
||||
Applies SElinux policies to filespec using `restorecon [-R] filespec`.
|
||||
Returns dict with changes if succesful, the output of the restorecon command otherwise.
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
name: filespec of the file or directory. Regex syntax is allowed.
|
||||
recursive: Recursively apply SELinux policies.
|
||||
Applies SElinux policies to filespec using `restorecon [-R]
|
||||
filespec`. Returns dict with changes if succesful, the output of
|
||||
the restorecon command otherwise.
|
||||
|
||||
name
|
||||
filespec of the file or directory. Regex syntax is allowed.
|
||||
|
||||
recursive
|
||||
Recursively apply SELinux policies.
|
||||
|
||||
CLI Example:
|
||||
|
||||
|
@ -132,7 +132,7 @@ def procs():
|
||||
uind = 0
|
||||
pind = 0
|
||||
cind = 0
|
||||
plines = __salt__['cmd.run'](__grains__['ps']).splitlines()
|
||||
plines = __salt__['cmd.run'](__grains__['ps'], python_shell=True).splitlines()
|
||||
guide = plines.pop(0).split()
|
||||
if 'USER' in guide:
|
||||
uind = guide.index('USER')
|
||||
@ -1417,7 +1417,7 @@ def pid(sig):
|
||||
'''
|
||||
|
||||
cmd = __grains__['ps']
|
||||
output = __salt__['cmd.run_stdout'](cmd)
|
||||
output = __salt__['cmd.run_stdout'](cmd, python_shell=True)
|
||||
|
||||
pids = ''
|
||||
for line in output.splitlines():
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -51,7 +51,7 @@ def rehash():
|
||||
|
||||
CLI Example:
|
||||
|
||||
... code-block:: bash
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' win_path.rehash
|
||||
'''
|
||||
|
@ -1280,10 +1280,10 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
||||
arguments = ['/i', cached_pkg]
|
||||
if pkginfo[version_num].get('allusers', True):
|
||||
arguments.append('ALLUSERS="1"')
|
||||
arguments.extend(salt.utils.shlex_split(install_flags))
|
||||
arguments.extend(salt.utils.shlex_split(install_flags, posix=False))
|
||||
else:
|
||||
cmd = cached_pkg
|
||||
arguments = salt.utils.shlex_split(install_flags)
|
||||
arguments = salt.utils.shlex_split(install_flags, posix=False)
|
||||
|
||||
# Install the software
|
||||
# Check Use Scheduler Option
|
||||
@ -1356,7 +1356,6 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
||||
# Launch the command
|
||||
result = __salt__['cmd.run_all'](cmd,
|
||||
cache_path,
|
||||
output_loglevel='quiet',
|
||||
python_shell=False,
|
||||
redirect_stderr=True)
|
||||
if not result['retcode']:
|
||||
@ -1615,19 +1614,19 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
|
||||
#Compute msiexec string
|
||||
use_msiexec, msiexec = _get_msiexec(pkginfo[target].get('msiexec', False))
|
||||
|
||||
# Build cmd and arguments
|
||||
# cmd and arguments must be separated for use with the task scheduler
|
||||
if use_msiexec:
|
||||
cmd = msiexec
|
||||
arguments = ['/x']
|
||||
arguments.extend(salt.utils.shlex_split(uninstall_flags, posix=False))
|
||||
else:
|
||||
cmd = expanded_cached_pkg
|
||||
arguments = salt.utils.shlex_split(uninstall_flags, posix=False)
|
||||
|
||||
# Uninstall the software
|
||||
# Check Use Scheduler Option
|
||||
if pkginfo[target].get('use_scheduler', False):
|
||||
|
||||
# Build Scheduled Task Parameters
|
||||
if use_msiexec:
|
||||
cmd = msiexec
|
||||
arguments = ['/x']
|
||||
arguments.extend(salt.utils.args.shlex_split(uninstall_flags))
|
||||
else:
|
||||
cmd = expanded_cached_pkg
|
||||
arguments = salt.utils.args.shlex_split(uninstall_flags)
|
||||
|
||||
# Create Scheduled Task
|
||||
__salt__['task.create_task'](name='update-salt-software',
|
||||
user_name='System',
|
||||
@ -1648,16 +1647,12 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
|
||||
ret[pkgname] = {'uninstall status': 'failed'}
|
||||
else:
|
||||
# Build the install command
|
||||
cmd = []
|
||||
if use_msiexec:
|
||||
cmd.extend([msiexec, '/x', expanded_cached_pkg])
|
||||
else:
|
||||
cmd.append(expanded_cached_pkg)
|
||||
cmd.extend(salt.utils.args.shlex_split(uninstall_flags))
|
||||
cmd = [cmd]
|
||||
cmd.extend(arguments)
|
||||
|
||||
# Launch the command
|
||||
result = __salt__['cmd.run_all'](
|
||||
cmd,
|
||||
output_loglevel='trace',
|
||||
python_shell=False,
|
||||
redirect_stderr=True)
|
||||
if not result['retcode']:
|
||||
|
@ -110,7 +110,7 @@ def available(software=True,
|
||||
Include software updates in the results (default is True)
|
||||
|
||||
drivers (bool):
|
||||
Include driver updates in the results (default is False)
|
||||
Include driver updates in the results (default is True)
|
||||
|
||||
summary (bool):
|
||||
- True: Return a summary of updates available for each category.
|
||||
|
@ -61,7 +61,7 @@ from salt.ext import six
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__HOLD_PATTERN = r'\w+(?:[.-][^-]+)*'
|
||||
__HOLD_PATTERN = r'[\w+]+(?:[.-][^-]+)*'
|
||||
|
||||
# Define the module's virtual name
|
||||
__virtualname__ = 'pkg'
|
||||
@ -1347,6 +1347,7 @@ def install(name=None,
|
||||
to_install = []
|
||||
to_downgrade = []
|
||||
to_reinstall = []
|
||||
_available = {}
|
||||
# The above three lists will be populated with tuples containing the
|
||||
# package name and the string being used for this particular package
|
||||
# modification. The reason for this method is that the string we use for
|
||||
|
@ -77,6 +77,9 @@ def __virtual__():
|
||||
) == 0:
|
||||
return 'zfs'
|
||||
|
||||
if __grains__['kernel'] == 'OpenBSD':
|
||||
return False
|
||||
|
||||
_zfs_fuse = lambda f: __salt__['service.' + f]('zfs-fuse')
|
||||
if _zfs_fuse('available') and (_zfs_fuse('status') or _zfs_fuse('start')):
|
||||
return 'zfs'
|
||||
|
@ -185,7 +185,7 @@ def lock_holders(path,
|
||||
|
||||
Example:
|
||||
|
||||
... code-block: bash
|
||||
.. code-block: bash
|
||||
|
||||
salt minion zk_concurrency.lock_holders /lock/path host1:1234,host2:1234
|
||||
'''
|
||||
@ -237,7 +237,7 @@ def lock(path,
|
||||
|
||||
Example:
|
||||
|
||||
... code-block: bash
|
||||
.. code-block: bash
|
||||
|
||||
salt minion zk_concurrency.lock /lock/path host1:1234,host2:1234
|
||||
'''
|
||||
@ -298,7 +298,7 @@ def unlock(path,
|
||||
|
||||
Example:
|
||||
|
||||
... code-block: bash
|
||||
.. code-block: bash
|
||||
|
||||
salt minion zk_concurrency.unlock /lock/path host1:1234,host2:1234
|
||||
'''
|
||||
@ -348,7 +348,7 @@ def party_members(path,
|
||||
|
||||
Example:
|
||||
|
||||
... code-block: bash
|
||||
.. code-block: bash
|
||||
|
||||
salt minion zk_concurrency.party_members /lock/path host1:1234,host2:1234
|
||||
salt minion zk_concurrency.party_members /lock/path host1:1234,host2:1234 min_nodes=3 blocking=True
|
||||
|
163
salt/pillar/rethinkdb_pillar.py
Normal file
163
salt/pillar/rethinkdb_pillar.py
Normal file
@ -0,0 +1,163 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Provide external pillar data from RethinkDB
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
:depends: rethinkdb (on the salt-master)
|
||||
|
||||
|
||||
salt master rethinkdb configuration
|
||||
===================================
|
||||
These variables must be configured in your master configuration file.
|
||||
* ``rethinkdb.host`` - The RethinkDB server. Defaults to ``'salt'``
|
||||
* ``rethinkdb.port`` - The port the RethinkDB server listens on.
|
||||
Defaults to ``'28015'``
|
||||
* ``rethinkdb.database`` - The database to connect to.
|
||||
Defaults to ``'salt'``
|
||||
* ``rethinkdb.username`` - The username for connecting to RethinkDB.
|
||||
Defaults to ``''``
|
||||
* ``rethinkdb.password`` - The password for connecting to RethinkDB.
|
||||
Defaults to ``''``
|
||||
|
||||
|
||||
salt-master ext_pillar configuration
|
||||
====================================
|
||||
|
||||
The ext_pillar function arguments are given in single line dictionary notation.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
ext_pillar:
|
||||
- rethinkdb: {table: ext_pillar, id_field: minion_id, field: pillar_root, pillar_key: external_pillar}
|
||||
|
||||
In the example above the following happens.
|
||||
* The salt-master will look for external pillars in the 'ext_pillar' table
|
||||
on the RethinkDB host
|
||||
* The minion id will be matched against the 'minion_id' field
|
||||
* Pillars will be retrieved from the nested field 'pillar_root'
|
||||
* Found pillars will be merged inside a key called 'external_pillar'
|
||||
|
||||
|
||||
Module Documentation
|
||||
====================
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import python libraries
|
||||
import logging
|
||||
|
||||
# Import 3rd party libraries
|
||||
try:
|
||||
import rethinkdb
|
||||
HAS_RETHINKDB = True
|
||||
except ImportError:
|
||||
HAS_RETHINKDB = False
|
||||
|
||||
__virtualname__ = 'rethinkdb'
|
||||
|
||||
__opts__ = {
|
||||
'rethinkdb.host': 'salt',
|
||||
'rethinkdb.port': '28015',
|
||||
'rethinkdb.database': 'salt',
|
||||
'rethinkdb.username': None,
|
||||
'rethinkdb.password': None
|
||||
}
|
||||
|
||||
|
||||
def __virtual__():
|
||||
if not HAS_RETHINKDB:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
# Configure logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def ext_pillar(minion_id,
|
||||
pillar,
|
||||
table='pillar',
|
||||
id_field=None,
|
||||
field=None,
|
||||
pillar_key=None):
|
||||
'''
|
||||
Collect minion external pillars from a RethinkDB database
|
||||
|
||||
Arguments:
|
||||
* `table`: The RethinkDB table containing external pillar information.
|
||||
Defaults to ``'pillar'``
|
||||
* `id_field`: Field in document containing the minion id.
|
||||
If blank then we assume the table index matches minion ids
|
||||
* `field`: Specific field in the document used for pillar data, if blank
|
||||
then the entire document will be used
|
||||
* `pillar_key`: The salt-master will nest found external pillars under
|
||||
this key before merging into the minion pillars. If blank, external
|
||||
pillars will be merged at top level
|
||||
'''
|
||||
host = __opts__['rethinkdb.host']
|
||||
port = __opts__['rethinkdb.port']
|
||||
database = __opts__['rethinkdb.database']
|
||||
username = __opts__['rethinkdb.username']
|
||||
password = __opts__['rethinkdb.password']
|
||||
|
||||
log.debug('Connecting to {0}:{1} as user \'{2}\' for RethinkDB ext_pillar'
|
||||
.format(host, port, username))
|
||||
|
||||
# Connect to the database
|
||||
conn = rethinkdb.connect(host=host,
|
||||
port=port,
|
||||
db=database,
|
||||
user=username,
|
||||
password=password)
|
||||
|
||||
data = None
|
||||
|
||||
try:
|
||||
|
||||
if id_field:
|
||||
log.debug('ext_pillar.rethinkdb: looking up pillar. '
|
||||
'table: {0}, field: {1}, minion: {2}'.format(
|
||||
table, id_field, minion_id))
|
||||
|
||||
if field:
|
||||
data = rethinkdb.table(table).filter(
|
||||
{id_field: minion_id}).pluck(field).run(conn)
|
||||
else:
|
||||
data = rethinkdb.table(table).filter(
|
||||
{id_field: minion_id}).run(conn)
|
||||
|
||||
else:
|
||||
log.debug('ext_pillar.rethinkdb: looking up pillar. '
|
||||
'table: {0}, field: id, minion: {1}'.format(
|
||||
table, minion_id))
|
||||
|
||||
if field:
|
||||
data = rethinkdb.table(table).get(minion_id).pluck(field).run(
|
||||
conn)
|
||||
else:
|
||||
data = rethinkdb.table(table).get(minion_id).run(conn)
|
||||
|
||||
finally:
|
||||
if conn.is_open():
|
||||
conn.close()
|
||||
|
||||
if data.items:
|
||||
|
||||
# Return nothing if multiple documents are found for a minion
|
||||
if len(data.items) > 1:
|
||||
log.error('ext_pillar.rethinkdb: ambiguous documents found for '
|
||||
'minion {0}'.format(minion_id))
|
||||
return {}
|
||||
|
||||
else:
|
||||
result = data.items.pop()
|
||||
|
||||
if pillar_key:
|
||||
return {pillar_key: result}
|
||||
return result
|
||||
|
||||
else:
|
||||
# No document found in the database
|
||||
log.debug('ext_pillar.rethinkdb: no document found')
|
||||
return {}
|
@ -153,6 +153,7 @@ import os
|
||||
# Import Salt Libs
|
||||
import salt.exceptions
|
||||
from salt.config.schemas.esxdatacenter import EsxdatacenterProxySchema
|
||||
from salt.utils.dictupdate import merge
|
||||
|
||||
# This must be present or the Salt loader won't load this module.
|
||||
__proxyenabled__ = ['esxdatacenter']
|
||||
@ -195,42 +196,44 @@ def init(opts):
|
||||
log.trace('Validating esxdatacenter proxy input')
|
||||
schema = EsxdatacenterProxySchema.serialize()
|
||||
log.trace('schema = {}'.format(schema))
|
||||
proxy_conf = merge(opts.get('proxy', {}), __pillar__.get('proxy', {}))
|
||||
log.trace('proxy_conf = {0}'.format(proxy_conf))
|
||||
try:
|
||||
jsonschema.validate(opts['proxy'], schema)
|
||||
jsonschema.validate(proxy_conf, schema)
|
||||
except jsonschema.exceptions.ValidationError as exc:
|
||||
raise salt.exceptions.InvalidConfigError(exc)
|
||||
|
||||
# Save mandatory fields in cache
|
||||
for key in ('vcenter', 'datacenter', 'mechanism'):
|
||||
DETAILS[key] = opts['proxy'][key]
|
||||
DETAILS[key] = proxy_conf[key]
|
||||
|
||||
# Additional validation
|
||||
if DETAILS['mechanism'] == 'userpass':
|
||||
if 'username' not in opts['proxy']:
|
||||
if 'username' not in proxy_conf:
|
||||
raise salt.exceptions.InvalidConfigError(
|
||||
'Mechanism is set to \'userpass\', but no '
|
||||
'\'username\' key found in proxy config.')
|
||||
if 'passwords' not in opts['proxy']:
|
||||
if 'passwords' not in proxy_conf:
|
||||
raise salt.exceptions.InvalidConfigError(
|
||||
'Mechanism is set to \'userpass\', but no '
|
||||
'\'passwords\' key found in proxy config.')
|
||||
for key in ('username', 'passwords'):
|
||||
DETAILS[key] = opts['proxy'][key]
|
||||
DETAILS[key] = proxy_conf[key]
|
||||
else:
|
||||
if 'domain' not in opts['proxy']:
|
||||
if 'domain' not in proxy_conf:
|
||||
raise salt.exceptions.InvalidConfigError(
|
||||
'Mechanism is set to \'sspi\', but no '
|
||||
'\'domain\' key found in proxy config.')
|
||||
if 'principal' not in opts['proxy']:
|
||||
if 'principal' not in proxy_conf:
|
||||
raise salt.exceptions.InvalidConfigError(
|
||||
'Mechanism is set to \'sspi\', but no '
|
||||
'\'principal\' key found in proxy config.')
|
||||
for key in ('domain', 'principal'):
|
||||
DETAILS[key] = opts['proxy'][key]
|
||||
DETAILS[key] = proxy_conf[key]
|
||||
|
||||
# Save optional
|
||||
DETAILS['protocol'] = opts['proxy'].get('protocol')
|
||||
DETAILS['port'] = opts['proxy'].get('port')
|
||||
DETAILS['protocol'] = proxy_conf.get('protocol')
|
||||
DETAILS['port'] = proxy_conf.get('port')
|
||||
|
||||
# Test connection
|
||||
if DETAILS['mechanism'] == 'userpass':
|
||||
|
@ -37,7 +37,6 @@ Run the salt proxy via the following command:
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import python libs
|
||||
import logging
|
||||
|
||||
# Import 3rd-party libs
|
||||
@ -47,6 +46,11 @@ try:
|
||||
import jnpr.junos.utils
|
||||
import jnpr.junos.utils.config
|
||||
import jnpr.junos.utils.sw
|
||||
from jnpr.junos.exception import RpcTimeoutError
|
||||
from jnpr.junos.exception import ConnectClosedError
|
||||
from jnpr.junos.exception import RpcError
|
||||
from jnpr.junos.exception import ConnectError
|
||||
from ncclient.operations.errors import TimeoutExpiredError
|
||||
except ImportError:
|
||||
HAS_JUNOS = False
|
||||
|
||||
@ -118,11 +122,31 @@ def conn():
|
||||
|
||||
def alive(opts):
|
||||
'''
|
||||
Return the connection status with the remote device.
|
||||
Validate and return the connection status with the remote device.
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
'''
|
||||
return thisproxy['conn'].connected
|
||||
|
||||
dev = conn()
|
||||
|
||||
# Check that the underlying netconf connection still exists.
|
||||
if dev._conn is None:
|
||||
return False
|
||||
|
||||
# call rpc only if ncclient queue is empty. If not empty that means other
|
||||
# rpc call is going on.
|
||||
if hasattr(dev._conn, '_session'):
|
||||
if dev._conn._session._transport.is_active():
|
||||
# there is no on going rpc call.
|
||||
if dev._conn._session._q.empty():
|
||||
thisproxy['conn'].connected = ping()
|
||||
else:
|
||||
# ssh connection is lost
|
||||
dev.connected = False
|
||||
else:
|
||||
# other connection modes, like telnet
|
||||
thisproxy['conn'].connected = ping()
|
||||
return dev.connected
|
||||
|
||||
|
||||
def proxytype():
|
||||
@ -150,7 +174,16 @@ def ping():
|
||||
'''
|
||||
Ping? Pong!
|
||||
'''
|
||||
return thisproxy['conn'].connected
|
||||
|
||||
dev = conn()
|
||||
try:
|
||||
dev.rpc.file_list(path='/dev/null', dev_timeout=2)
|
||||
return True
|
||||
except (RpcTimeoutError, ConnectClosedError):
|
||||
try:
|
||||
dev.close()
|
||||
except (RpcError, ConnectError, TimeoutExpiredError):
|
||||
return False
|
||||
|
||||
|
||||
def shutdown(opts):
|
||||
|
@ -22,8 +22,7 @@ To set things up, first generate a keypair. On the master, run the following:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# salt-call --local nacl.keygen keyfile=/root/.nacl
|
||||
# salt-call --local nacl.keygen_pub keyfile_pub=/root/.nacl.pub
|
||||
# salt-call --local nacl.keygen sk_file=/root/.nacl
|
||||
|
||||
|
||||
Using encrypted pillar
|
||||
@ -33,7 +32,7 @@ To encrypt secrets, copy the public key to your local machine and run:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ salt-call --local nacl.enc_pub datatoenc keyfile_pub=/root/.nacl.pub
|
||||
$ salt-call --local nacl.enc datatoenc pk_file=/root/.nacl.pub
|
||||
|
||||
|
||||
To apply the renderer on a file-by-file basis add the following line to the
|
||||
@ -80,7 +79,7 @@ def _decrypt_object(obj, **kwargs):
|
||||
return _decrypt_object(obj.getvalue(), **kwargs)
|
||||
if isinstance(obj, six.string_types):
|
||||
if re.search(NACL_REGEX, obj) is not None:
|
||||
return __salt__['nacl.dec_pub'](re.search(NACL_REGEX, obj).group(1), **kwargs)
|
||||
return __salt__['nacl.dec'](re.search(NACL_REGEX, obj).group(1), **kwargs)
|
||||
else:
|
||||
return obj
|
||||
elif isinstance(obj, dict):
|
||||
|
@ -26,6 +26,7 @@ import os
|
||||
|
||||
# Import Salt libs
|
||||
from salt.ext import six
|
||||
import salt.utils.files
|
||||
|
||||
# Get logging started
|
||||
log = logging.getLogger(__name__)
|
||||
@ -55,8 +56,20 @@ def zbx():
|
||||
|
||||
|
||||
def zabbix_send(key, host, output):
|
||||
cmd = zbx()['sender'] + " -c " + zbx()['config'] + " -s " + host + " -k " + key + " -o \"" + output +"\""
|
||||
__salt__['cmd.shell'](cmd)
|
||||
with salt.utils.fopen(zbx()['zabbix_config'], 'r') as file_handle:
|
||||
for line in file_handle:
|
||||
if "ServerActive" in line:
|
||||
flag = "true"
|
||||
server = line.rsplit('=')
|
||||
server = server[1].rsplit(',')
|
||||
for s in server:
|
||||
cmd = zbx()['sender'] + " -z " + s.replace('\n', '') + " -s " + host + " -k " + key + " -o \"" + output +"\""
|
||||
__salt__['cmd.shell'](cmd)
|
||||
break
|
||||
else:
|
||||
flag = "false"
|
||||
if flag == 'false':
|
||||
cmd = zbx()['sender'] + " -c " + zbx()['config'] + " -s " + host + " -k " + key + " -o \"" + output +"\""
|
||||
|
||||
|
||||
def returner(ret):
|
||||
|
@ -28,6 +28,11 @@ def update(branch=None, repo=None):
|
||||
fetched, and ``False`` if there were errors or no new commits were
|
||||
fetched.
|
||||
|
||||
.. versionchanged:: Oxygen
|
||||
The return for a given git_pillar remote will now be ``None`` when no
|
||||
changes were fetched. ``False`` now is reserved only for instances in
|
||||
which there were errors.
|
||||
|
||||
Fetch one or all configured git_pillar remotes.
|
||||
|
||||
.. note::
|
||||
|
@ -6,9 +6,10 @@ Module for sending messages to Mattermost
|
||||
|
||||
:configuration: This module can be used by either passing an api_url and hook
|
||||
directly or by specifying both in a configuration profile in the salt
|
||||
master/minion config.
|
||||
For example:
|
||||
master/minion config. For example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
mattermost:
|
||||
hook: peWcBiMOS9HrZG15peWcBiMOS9HrZG15
|
||||
api_url: https://example.com
|
||||
|
@ -25,14 +25,16 @@ import salt.client
|
||||
import salt.config
|
||||
import salt.loader
|
||||
import salt.cache
|
||||
import salt.utils.files
|
||||
import salt.utils.http as http
|
||||
import salt.syspaths as syspaths
|
||||
from salt.ext import six
|
||||
from salt.ext.six import string_types
|
||||
from salt.ext.six.moves import input
|
||||
from salt.ext.six.moves import filter
|
||||
from salt.template import compile_template
|
||||
import salt.utils.files
|
||||
import salt.utils.http as http
|
||||
import salt.utils.platform
|
||||
import salt.utils.win_functions
|
||||
from salt.utils.yamldumper import SafeOrderedDumper
|
||||
|
||||
# Get logging started
|
||||
@ -493,9 +495,7 @@ class SPMClient(object):
|
||||
|
||||
# No defaults for this in config.py; default to the current running
|
||||
# user and group
|
||||
import salt.utils
|
||||
if salt.utils.is_windows():
|
||||
import salt.utils.win_functions
|
||||
if salt.utils.platform.is_windows():
|
||||
uname = gname = salt.utils.win_functions.get_current_user()
|
||||
uname_sid = salt.utils.win_functions.get_sid_from_name(uname)
|
||||
uid = self.opts.get('spm_uid', uname_sid)
|
||||
|
@ -64,16 +64,30 @@ def _gen_checksum(path):
|
||||
'hash_type': __opts__['hash_type']}
|
||||
|
||||
|
||||
def _update_checksum(cached_source):
|
||||
cached_source_sum = '.'.join((cached_source, 'hash'))
|
||||
source_sum = _gen_checksum(cached_source)
|
||||
def _checksum_file_path(path):
|
||||
relpath = '.'.join((os.path.relpath(path, __opts__['cachedir']), 'hash'))
|
||||
if re.match(r'..[/\\]', relpath):
|
||||
# path is a local file
|
||||
relpath = salt.utils.path.join(
|
||||
'local',
|
||||
os.path.splitdrive(path)[-1].lstrip('/\\'),
|
||||
)
|
||||
return salt.utils.path.join(__opts__['cachedir'], 'archive_hash', relpath)
|
||||
|
||||
|
||||
def _update_checksum(path):
|
||||
checksum_file = _checksum_file_path(path)
|
||||
checksum_dir = os.path.dirname(checksum_file)
|
||||
if not os.path.isdir(checksum_dir):
|
||||
os.makedirs(checksum_dir)
|
||||
source_sum = _gen_checksum(path)
|
||||
hash_type = source_sum.get('hash_type')
|
||||
hsum = source_sum.get('hsum')
|
||||
if hash_type and hsum:
|
||||
lines = []
|
||||
try:
|
||||
try:
|
||||
with salt.utils.files.fopen(cached_source_sum, 'r') as fp_:
|
||||
with salt.utils.files.fopen(checksum_file, 'r') as fp_:
|
||||
for line in fp_:
|
||||
try:
|
||||
lines.append(line.rstrip('\n').split(':', 1))
|
||||
@ -83,7 +97,7 @@ def _update_checksum(cached_source):
|
||||
if exc.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
with salt.utils.files.fopen(cached_source_sum, 'w') as fp_:
|
||||
with salt.utils.files.fopen(checksum_file, 'w') as fp_:
|
||||
for line in lines:
|
||||
if line[0] == hash_type:
|
||||
line[1] = hsum
|
||||
@ -93,16 +107,16 @@ def _update_checksum(cached_source):
|
||||
except (IOError, OSError) as exc:
|
||||
log.warning(
|
||||
'Failed to update checksum for %s: %s',
|
||||
cached_source, exc.__str__()
|
||||
path, exc.__str__(), exc_info=True
|
||||
)
|
||||
|
||||
|
||||
def _read_cached_checksum(cached_source, form=None):
|
||||
def _read_cached_checksum(path, form=None):
|
||||
if form is None:
|
||||
form = __opts__['hash_type']
|
||||
path = '.'.join((cached_source, 'hash'))
|
||||
checksum_file = _checksum_file_path(path)
|
||||
try:
|
||||
with salt.utils.files.fopen(path, 'r') as fp_:
|
||||
with salt.utils.files.fopen(checksum_file, 'r') as fp_:
|
||||
for line in fp_:
|
||||
# Should only be one line in this file but just in case it
|
||||
# isn't, read only a single line to avoid overuse of memory.
|
||||
@ -117,9 +131,9 @@ def _read_cached_checksum(cached_source, form=None):
|
||||
return {'hash_type': hash_type, 'hsum': hsum}
|
||||
|
||||
|
||||
def _compare_checksum(cached_source, source_sum):
|
||||
def _compare_checksum(cached, source_sum):
|
||||
cached_sum = _read_cached_checksum(
|
||||
cached_source,
|
||||
cached,
|
||||
form=source_sum.get('hash_type', __opts__['hash_type'])
|
||||
)
|
||||
return source_sum == cached_sum
|
||||
@ -155,7 +169,6 @@ def extracted(name,
|
||||
user=None,
|
||||
group=None,
|
||||
if_missing=None,
|
||||
keep=False,
|
||||
trim_output=False,
|
||||
use_cmd_unzip=None,
|
||||
extract_perms=True,
|
||||
@ -391,6 +404,22 @@ def extracted(name,
|
||||
|
||||
.. versionadded:: 2016.3.4
|
||||
|
||||
keep_source : True
|
||||
For ``source`` archives not local to the minion (i.e. from the Salt
|
||||
fileserver or a remote source such as ``http(s)`` or ``ftp``), Salt
|
||||
will need to download the archive to the minion cache before they can
|
||||
be extracted. To remove the downloaded archive after extraction, set
|
||||
this argument to ``False``.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
|
||||
keep : True
|
||||
Same as ``keep_source``, kept for backward-compatibility.
|
||||
|
||||
.. note::
|
||||
If both ``keep_source`` and ``keep`` are used, ``keep`` will be
|
||||
ignored.
|
||||
|
||||
password
|
||||
**For ZIP archives only.** Password used for extraction.
|
||||
|
||||
@ -518,13 +547,6 @@ def extracted(name,
|
||||
simply checked for existence and extraction will be skipped if
|
||||
if is present.
|
||||
|
||||
keep : False
|
||||
For ``source`` archives not local to the minion (i.e. from the Salt
|
||||
fileserver or a remote source such as ``http(s)`` or ``ftp``), Salt
|
||||
will need to download the archive to the minion cache before they can
|
||||
be extracted. After extraction, these source archives will be removed
|
||||
unless this argument is set to ``True``.
|
||||
|
||||
trim_output : False
|
||||
Useful for archives with many files in them. This can either be set to
|
||||
``True`` (in which case only the first 100 files extracted will be
|
||||
@ -626,6 +648,36 @@ def extracted(name,
|
||||
# Remove pub kwargs as they're irrelevant here.
|
||||
kwargs = salt.utils.args.clean_kwargs(**kwargs)
|
||||
|
||||
if 'keep_source' in kwargs and 'keep' in kwargs:
|
||||
ret.setdefault('warnings', []).append(
|
||||
'Both \'keep_source\' and \'keep\' were used. Since these both '
|
||||
'do the same thing, \'keep\' was ignored.'
|
||||
)
|
||||
keep_source = bool(kwargs.pop('keep_source'))
|
||||
kwargs.pop('keep')
|
||||
elif 'keep_source' in kwargs:
|
||||
keep_source = bool(kwargs.pop('keep_source'))
|
||||
elif 'keep' in kwargs:
|
||||
keep_source = bool(kwargs.pop('keep'))
|
||||
else:
|
||||
# Neither was passed, default is True
|
||||
keep_source = True
|
||||
|
||||
if 'keep_source' in kwargs and 'keep' in kwargs:
|
||||
ret.setdefault('warnings', []).append(
|
||||
'Both \'keep_source\' and \'keep\' were used. Since these both '
|
||||
'do the same thing, \'keep\' was ignored.'
|
||||
)
|
||||
keep_source = bool(kwargs.pop('keep_source'))
|
||||
kwargs.pop('keep')
|
||||
elif 'keep_source' in kwargs:
|
||||
keep_source = bool(kwargs.pop('keep_source'))
|
||||
elif 'keep' in kwargs:
|
||||
keep_source = bool(kwargs.pop('keep'))
|
||||
else:
|
||||
# Neither was passed, default is True
|
||||
keep_source = True
|
||||
|
||||
if not _path_is_abs(name):
|
||||
ret['comment'] = '{0} is not an absolute path'.format(name)
|
||||
return ret
|
||||
@ -721,10 +773,10 @@ def extracted(name,
|
||||
urlparsed_source = _urlparse(source_match)
|
||||
source_hash_basename = urlparsed_source.path or urlparsed_source.netloc
|
||||
|
||||
source_is_local = urlparsed_source.scheme in ('', 'file')
|
||||
source_is_local = urlparsed_source.scheme in salt.utils.files.LOCAL_PROTOS
|
||||
if source_is_local:
|
||||
# Get rid of "file://" from start of source_match
|
||||
source_match = urlparsed_source.path
|
||||
source_match = os.path.realpath(os.path.expanduser(urlparsed_source.path))
|
||||
if not os.path.isfile(source_match):
|
||||
ret['comment'] = 'Source file \'{0}\' does not exist'.format(source_match)
|
||||
return ret
|
||||
@ -858,95 +910,59 @@ def extracted(name,
|
||||
source_sum = {}
|
||||
|
||||
if source_is_local:
|
||||
cached_source = source_match
|
||||
cached = source_match
|
||||
else:
|
||||
cached_source = os.path.join(
|
||||
__opts__['cachedir'],
|
||||
'files',
|
||||
__env__,
|
||||
re.sub(r'[:/\\]', '_', source_hash_basename),
|
||||
)
|
||||
|
||||
if os.path.isdir(cached_source):
|
||||
# Prevent a traceback from attempting to read from a directory path
|
||||
salt.utils.files.rm_rf(cached_source)
|
||||
|
||||
existing_cached_source_sum = _read_cached_checksum(cached_source)
|
||||
|
||||
if source_is_local:
|
||||
# No need to download archive, it's local to the minion
|
||||
update_source = False
|
||||
else:
|
||||
if not os.path.isfile(cached_source):
|
||||
# Archive not cached, we need to download it
|
||||
update_source = True
|
||||
else:
|
||||
# Archive is cached, keep=True likely used in prior run. If we need
|
||||
# to verify the hash, then we *have* to update the source archive
|
||||
# to know whether or not the hash changed. Hence the below
|
||||
# statement. bool(source_hash) will be True if source_hash was
|
||||
# passed, and otherwise False.
|
||||
update_source = bool(source_hash)
|
||||
|
||||
if update_source:
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['comment'] = (
|
||||
'Archive {0} would be downloaded to cache and checked to '
|
||||
'discover if extraction is necessary'.format(
|
||||
'Archive {0} would be cached (if necessary) and checked to '
|
||||
'discover if extraction is needed'.format(
|
||||
salt.utils.url.redact_http_basic_auth(source_match)
|
||||
)
|
||||
)
|
||||
return ret
|
||||
|
||||
# NOTE: This will result in more than one copy of the source archive on
|
||||
# the minion. The reason this is necessary is because if we are
|
||||
# tracking the checksum using source_hash_update, we need a location
|
||||
# where we can place the checksum file alongside the cached source
|
||||
# file, where it won't be overwritten by caching a file with the same
|
||||
# name in the same parent dir as the source file. Long term, we should
|
||||
# come up with a better solution for this.
|
||||
file_result = __states__['file.managed'](cached_source,
|
||||
source=source_match,
|
||||
source_hash=source_hash,
|
||||
source_hash_name=source_hash_name,
|
||||
makedirs=True,
|
||||
skip_verify=skip_verify)
|
||||
log.debug('file.managed: {0}'.format(file_result))
|
||||
|
||||
# Prevent a traceback if errors prevented the above state from getting
|
||||
# off the ground.
|
||||
if isinstance(file_result, list):
|
||||
try:
|
||||
ret['comment'] = '\n'.join(file_result)
|
||||
except TypeError:
|
||||
ret['comment'] = '\n'.join([str(x) for x in file_result])
|
||||
if 'file.cached' not in __states__:
|
||||
# Shouldn't happen unless there is a traceback keeping
|
||||
# salt/states/file.py from being processed through the loader. If
|
||||
# that is the case, we have much more important problems as _all_
|
||||
# file states would be unavailable.
|
||||
ret['comment'] = (
|
||||
'Unable to cache {0}, file.cached state not available'.format(
|
||||
source_match
|
||||
)
|
||||
)
|
||||
return ret
|
||||
|
||||
try:
|
||||
if not file_result['result']:
|
||||
log.debug(
|
||||
'failed to download %s',
|
||||
salt.utils.url.redact_http_basic_auth(source_match)
|
||||
)
|
||||
return file_result
|
||||
except TypeError:
|
||||
if not file_result:
|
||||
log.debug(
|
||||
'failed to download %s',
|
||||
salt.utils.url.redact_http_basic_auth(source_match)
|
||||
)
|
||||
return file_result
|
||||
result = __states__['file.cached'](source_match,
|
||||
source_hash=source_hash,
|
||||
source_hash_name=source_hash_name,
|
||||
skip_verify=skip_verify,
|
||||
saltenv=__env__)
|
||||
except Exception as exc:
|
||||
msg = 'Failed to cache {0}: {1}'.format(source_match, exc.__str__())
|
||||
log.exception(msg)
|
||||
ret['comment'] = msg
|
||||
return ret
|
||||
else:
|
||||
log.debug('file.cached: {0}'.format(result))
|
||||
|
||||
else:
|
||||
log.debug(
|
||||
'Archive %s is already in cache',
|
||||
salt.utils.url.redact_http_basic_auth(source_match)
|
||||
)
|
||||
if result['result']:
|
||||
# Get the path of the file in the minion cache
|
||||
cached = __salt__['cp.is_cached'](source_match)
|
||||
else:
|
||||
log.debug(
|
||||
'failed to download %s',
|
||||
salt.utils.url.redact_http_basic_auth(source_match)
|
||||
)
|
||||
return result
|
||||
|
||||
existing_cached_source_sum = _read_cached_checksum(cached)
|
||||
|
||||
if source_hash and source_hash_update and not skip_verify:
|
||||
# Create local hash sum file if we're going to track sum update
|
||||
_update_checksum(cached_source)
|
||||
_update_checksum(cached)
|
||||
|
||||
if archive_format == 'zip' and not password:
|
||||
log.debug('Checking %s to see if it is password-protected',
|
||||
@ -955,7 +971,7 @@ def extracted(name,
|
||||
# implicitly enabled by setting the "options" argument.
|
||||
try:
|
||||
encrypted_zip = __salt__['archive.is_encrypted'](
|
||||
cached_source,
|
||||
cached,
|
||||
clean=False,
|
||||
saltenv=__env__)
|
||||
except CommandExecutionError:
|
||||
@ -973,7 +989,7 @@ def extracted(name,
|
||||
return ret
|
||||
|
||||
try:
|
||||
contents = __salt__['archive.list'](cached_source,
|
||||
contents = __salt__['archive.list'](cached,
|
||||
archive_format=archive_format,
|
||||
options=list_options,
|
||||
strip_components=strip_components,
|
||||
@ -1142,7 +1158,7 @@ def extracted(name,
|
||||
if not extraction_needed \
|
||||
and source_hash_update \
|
||||
and existing_cached_source_sum is not None \
|
||||
and not _compare_checksum(cached_source, existing_cached_source_sum):
|
||||
and not _compare_checksum(cached, existing_cached_source_sum):
|
||||
extraction_needed = True
|
||||
source_hash_trigger = True
|
||||
else:
|
||||
@ -1200,13 +1216,13 @@ def extracted(name,
|
||||
__states__['file.directory'](name, user=user, makedirs=True)
|
||||
created_destdir = True
|
||||
|
||||
log.debug('Extracting {0} to {1}'.format(cached_source, name))
|
||||
log.debug('Extracting {0} to {1}'.format(cached, name))
|
||||
try:
|
||||
if archive_format == 'zip':
|
||||
if use_cmd_unzip:
|
||||
try:
|
||||
files = __salt__['archive.cmd_unzip'](
|
||||
cached_source,
|
||||
cached,
|
||||
name,
|
||||
options=options,
|
||||
trim_output=trim_output,
|
||||
@ -1216,7 +1232,7 @@ def extracted(name,
|
||||
ret['comment'] = exc.strerror
|
||||
return ret
|
||||
else:
|
||||
files = __salt__['archive.unzip'](cached_source,
|
||||
files = __salt__['archive.unzip'](cached,
|
||||
name,
|
||||
options=options,
|
||||
trim_output=trim_output,
|
||||
@ -1225,7 +1241,7 @@ def extracted(name,
|
||||
**kwargs)
|
||||
elif archive_format == 'rar':
|
||||
try:
|
||||
files = __salt__['archive.unrar'](cached_source,
|
||||
files = __salt__['archive.unrar'](cached,
|
||||
name,
|
||||
trim_output=trim_output,
|
||||
**kwargs)
|
||||
@ -1235,7 +1251,7 @@ def extracted(name,
|
||||
else:
|
||||
if options is None:
|
||||
try:
|
||||
with closing(tarfile.open(cached_source, 'r')) as tar:
|
||||
with closing(tarfile.open(cached, 'r')) as tar:
|
||||
tar.extractall(name)
|
||||
files = tar.getnames()
|
||||
if trim_output:
|
||||
@ -1243,7 +1259,7 @@ def extracted(name,
|
||||
except tarfile.ReadError:
|
||||
if salt.utils.path.which('xz'):
|
||||
if __salt__['cmd.retcode'](
|
||||
['xz', '-t', cached_source],
|
||||
['xz', '-t', cached],
|
||||
python_shell=False,
|
||||
ignore_retcode=True) == 0:
|
||||
# XZ-compressed data
|
||||
@ -1259,7 +1275,7 @@ def extracted(name,
|
||||
# pipe it to tar for extraction.
|
||||
cmd = 'xz --decompress --stdout {0} | tar xvf -'
|
||||
results = __salt__['cmd.run_all'](
|
||||
cmd.format(_cmd_quote(cached_source)),
|
||||
cmd.format(_cmd_quote(cached)),
|
||||
cwd=name,
|
||||
python_shell=True)
|
||||
if results['retcode'] != 0:
|
||||
@ -1329,7 +1345,7 @@ def extracted(name,
|
||||
|
||||
tar_cmd.append(tar_shortopts)
|
||||
tar_cmd.extend(tar_longopts)
|
||||
tar_cmd.extend(['-f', cached_source])
|
||||
tar_cmd.extend(['-f', cached])
|
||||
|
||||
results = __salt__['cmd.run_all'](tar_cmd,
|
||||
cwd=name,
|
||||
@ -1500,18 +1516,15 @@ def extracted(name,
|
||||
for item in enforce_failed:
|
||||
ret['comment'] += '\n- {0}'.format(item)
|
||||
|
||||
if not source_is_local and not keep:
|
||||
for path in (cached_source, __salt__['cp.is_cached'](source_match)):
|
||||
if not path:
|
||||
continue
|
||||
log.debug('Cleaning cached source file %s', path)
|
||||
try:
|
||||
os.remove(path)
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.ENOENT:
|
||||
log.error(
|
||||
'Failed to clean cached source file %s: %s',
|
||||
cached_source, exc.__str__()
|
||||
)
|
||||
if not source_is_local:
|
||||
if keep_source:
|
||||
log.debug('Keeping cached source file %s', cached)
|
||||
else:
|
||||
log.debug('Cleaning cached source file %s', cached)
|
||||
result = __states__['file.not_cached'](source_match, saltenv=__env__)
|
||||
if not result['result']:
|
||||
# Don't let failure to delete cached file cause the state
|
||||
# itself to fail, just drop it in the warnings.
|
||||
ret.setdefault('warnings', []).append(result['comment'])
|
||||
|
||||
return ret
|
||||
|
@ -517,7 +517,8 @@ def register_instances(name, instances, region=None, key=None, keyid=None,
|
||||
|
||||
health = __salt__['boto_elb.get_instance_health'](
|
||||
name, region, key, keyid, profile)
|
||||
nodes = [value['instance_id'] for value in health]
|
||||
nodes = [value['instance_id'] for value in health
|
||||
if value['description'] != 'Instance deregistration currently in progress.']
|
||||
new = [value for value in instances if value not in nodes]
|
||||
if not len(new):
|
||||
msg = 'Instance/s {0} already exist.'.format(str(instances).strip('[]'))
|
||||
|
@ -97,29 +97,61 @@ def installed(name, version=None, source=None, force=False, pre_versions=False,
|
||||
ret['changes'] = {name: 'Version {0} will be installed'
|
||||
''.format(version)}
|
||||
else:
|
||||
ret['changes'] = {name: 'Will be installed'}
|
||||
ret['changes'] = {name: 'Latest version will be installed'}
|
||||
|
||||
# Package installed
|
||||
else:
|
||||
version_info = __salt__['chocolatey.version'](name, check_remote=True)
|
||||
|
||||
full_name = name
|
||||
lower_name = name.lower()
|
||||
for pkg in version_info:
|
||||
if lower_name == pkg.lower():
|
||||
if name.lower() == pkg.lower():
|
||||
full_name = pkg
|
||||
|
||||
available_version = version_info[full_name]['available'][0]
|
||||
version = version if version else available_version
|
||||
installed_version = version_info[full_name]['installed'][0]
|
||||
|
||||
if force:
|
||||
ret['changes'] = {name: 'Version {0} will be forcibly installed'
|
||||
''.format(version)}
|
||||
elif allow_multiple:
|
||||
ret['changes'] = {name: 'Version {0} will be installed side by side'
|
||||
''.format(version)}
|
||||
if version:
|
||||
if salt.utils.compare_versions(
|
||||
ver1=installed_version, oper="==", ver2=version):
|
||||
if force:
|
||||
ret['changes'] = {
|
||||
name: 'Version {0} will be reinstalled'.format(version)}
|
||||
ret['comment'] = 'Reinstall {0} {1}' \
|
||||
''.format(full_name, version)
|
||||
else:
|
||||
ret['comment'] = '{0} {1} is already installed' \
|
||||
''.format(name, version)
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
return ret
|
||||
else:
|
||||
if allow_multiple:
|
||||
ret['changes'] = {
|
||||
name: 'Version {0} will be installed side by side with '
|
||||
'Version {1} if supported'
|
||||
''.format(version, installed_version)}
|
||||
ret['comment'] = 'Install {0} {1} side-by-side with {0} {2}' \
|
||||
''.format(full_name, version, installed_version)
|
||||
else:
|
||||
ret['changes'] = {
|
||||
name: 'Version {0} will be installed over Version {1} '
|
||||
''.format(version, installed_version)}
|
||||
ret['comment'] = 'Install {0} {1} over {0} {2}' \
|
||||
''.format(full_name, version, installed_version)
|
||||
force = True
|
||||
else:
|
||||
ret['comment'] = 'The Package {0} is already installed'.format(name)
|
||||
return ret
|
||||
version = installed_version
|
||||
if force:
|
||||
ret['changes'] = {
|
||||
name: 'Version {0} will be reinstalled'.format(version)}
|
||||
ret['comment'] = 'Reinstall {0} {1}' \
|
||||
''.format(full_name, version)
|
||||
else:
|
||||
ret['comment'] = '{0} {1} is already installed' \
|
||||
''.format(name, version)
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
return ret
|
||||
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
|
717
salt/states/dvs.py
Normal file
717
salt/states/dvs.py
Normal file
@ -0,0 +1,717 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Manage VMware distributed virtual switches (DVSs) and their distributed virtual
|
||||
portgroups (DVportgroups).
|
||||
|
||||
:codeauthor: :email:`Alexandru Bleotu <alexandru.bleotu@morganstaley.com>`
|
||||
|
||||
Examples
|
||||
========
|
||||
|
||||
Several settings can be changed for DVSs and DVporgroups. Here are two examples
|
||||
covering all of the settings. Fewer settings can be used
|
||||
|
||||
DVS
|
||||
---
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
'name': 'dvs1',
|
||||
'max_mtu': 1000,
|
||||
'uplink_names': [
|
||||
'dvUplink1',
|
||||
'dvUplink2',
|
||||
'dvUplink3'
|
||||
],
|
||||
'capability': {
|
||||
'portgroup_operation_supported': false,
|
||||
'operation_supported': true,
|
||||
'port_operation_supported': false
|
||||
},
|
||||
'lacp_api_version': 'multipleLag',
|
||||
'contact_email': 'foo@email.com',
|
||||
'product_info': {
|
||||
'version':
|
||||
'6.0.0',
|
||||
'vendor':
|
||||
'VMware,
|
||||
Inc.',
|
||||
'name':
|
||||
'DVS'
|
||||
},
|
||||
'network_resource_management_enabled': true,
|
||||
'contact_name': 'me@email.com',
|
||||
'infrastructure_traffic_resource_pools': [
|
||||
{
|
||||
'reservation': 0,
|
||||
'limit': 1000,
|
||||
'share_level': 'high',
|
||||
'key': 'management',
|
||||
'num_shares': 100
|
||||
},
|
||||
{
|
||||
'reservation': 0,
|
||||
'limit': -1,
|
||||
'share_level': 'normal',
|
||||
'key': 'faultTolerance',
|
||||
'num_shares': 50
|
||||
},
|
||||
{
|
||||
'reservation': 0,
|
||||
'limit': 32000,
|
||||
'share_level': 'normal',
|
||||
'key': 'vmotion',
|
||||
'num_shares': 50
|
||||
},
|
||||
{
|
||||
'reservation': 10000,
|
||||
'limit': -1,
|
||||
'share_level': 'normal',
|
||||
'key': 'virtualMachine',
|
||||
'num_shares': 50
|
||||
},
|
||||
{
|
||||
'reservation': 0,
|
||||
'limit': -1,
|
||||
'share_level': 'custom',
|
||||
'key': 'iSCSI',
|
||||
'num_shares': 75
|
||||
},
|
||||
{
|
||||
'reservation': 0,
|
||||
'limit': -1,
|
||||
'share_level': 'normal',
|
||||
'key': 'nfs',
|
||||
'num_shares': 50
|
||||
},
|
||||
{
|
||||
'reservation': 0,
|
||||
'limit': -1,
|
||||
'share_level': 'normal',
|
||||
'key': 'hbr',
|
||||
'num_shares': 50
|
||||
},
|
||||
{
|
||||
'reservation': 8750,
|
||||
'limit': 15000,
|
||||
'share_level': 'high',
|
||||
'key': 'vsan',
|
||||
'num_shares': 100
|
||||
},
|
||||
{
|
||||
'reservation': 0,
|
||||
'limit': -1,
|
||||
'share_level': 'normal',
|
||||
'key': 'vdp',
|
||||
'num_shares': 50
|
||||
}
|
||||
],
|
||||
'link_discovery_protocol': {
|
||||
'operation':
|
||||
'listen',
|
||||
'protocol':
|
||||
'cdp'
|
||||
},
|
||||
'network_resource_control_version': 'version3',
|
||||
'description': 'Managed by Salt. Random settings.'
|
||||
|
||||
Note: The mandatory attribute is: ``name``.
|
||||
|
||||
Portgroup
|
||||
---------
|
||||
|
||||
.. code-block:: python
|
||||
'security_policy': {
|
||||
'allow_promiscuous': true,
|
||||
'mac_changes': false,
|
||||
'forged_transmits': true
|
||||
},
|
||||
'name': 'vmotion-v702',
|
||||
'out_shaping': {
|
||||
'enabled': true,
|
||||
'average_bandwidth': 1500,
|
||||
'burst_size': 4096,
|
||||
'peak_bandwidth': 1500
|
||||
},
|
||||
'num_ports': 128,
|
||||
'teaming': {
|
||||
'port_order': {
|
||||
'active': [
|
||||
'dvUplink2'
|
||||
],
|
||||
'standby': [
|
||||
'dvUplink1'
|
||||
]
|
||||
},
|
||||
'notify_switches': false,
|
||||
'reverse_policy': true,
|
||||
'rolling_order': false,
|
||||
'policy': 'failover_explicit',
|
||||
'failure_criteria': {
|
||||
'check_error_percent': true,
|
||||
'full_duplex': false,
|
||||
'check_duplex': false,
|
||||
'percentage': 50,
|
||||
'check_speed': 'minimum',
|
||||
'speed': 20,
|
||||
'check_beacon': true
|
||||
}
|
||||
},
|
||||
'type': 'earlyBinding',
|
||||
'vlan_id': 100,
|
||||
'description': 'Managed by Salt. Random settings.'
|
||||
|
||||
Note: The mandatory attributes are: ``name``, ``type``.
|
||||
|
||||
Dependencies
|
||||
============
|
||||
|
||||
|
||||
- pyVmomi Python Module
|
||||
|
||||
|
||||
pyVmomi
|
||||
-------
|
||||
|
||||
PyVmomi can be installed via pip:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install pyVmomi
|
||||
|
||||
.. note::
|
||||
|
||||
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
|
||||
versions of Python. If using version 6.0 of pyVmomi, Python 2.7.9,
|
||||
or newer must be present. This is due to an upstream dependency
|
||||
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
|
||||
version of Python is not in the supported range, you will need to install an
|
||||
earlier version of pyVmomi. See `Issue #29537`_ for more information.
|
||||
|
||||
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
|
||||
|
||||
Based on the note above, to install an earlier version of pyVmomi than the
|
||||
version currently listed in PyPi, run the following:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install pyVmomi==5.5.0.2014.1.1
|
||||
|
||||
The 5.5.0.2014.1.1 is a known stable version that this original ESXi State
|
||||
Module was developed against.
|
||||
'''
|
||||
|
||||
# Import Python Libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
import traceback
|
||||
import sys
|
||||
|
||||
# Import Salt Libs
|
||||
import salt.exceptions
|
||||
from salt.ext.six.moves import range
|
||||
|
||||
# Import Third Party Libs
|
||||
try:
|
||||
from pyVmomi import VmomiSupport
|
||||
HAS_PYVMOMI = True
|
||||
except ImportError:
|
||||
HAS_PYVMOMI = False
|
||||
|
||||
# Get Logging Started
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def __virtual__():
|
||||
if not HAS_PYVMOMI:
|
||||
return False, 'State module did not load: pyVmomi not found'
|
||||
|
||||
# We check the supported vim versions to infer the pyVmomi version
|
||||
if 'vim25/6.0' in VmomiSupport.versionMap and \
|
||||
sys.version_info > (2, 7) and sys.version_info < (2, 7, 9):
|
||||
|
||||
return False, ('State module did not load: Incompatible versions '
|
||||
'of Python and pyVmomi present. See Issue #29537.')
|
||||
return 'dvs'
|
||||
|
||||
|
||||
def mod_init(low):
|
||||
'''
|
||||
Init function
|
||||
'''
|
||||
return True
|
||||
|
||||
|
||||
def _get_datacenter_name():
|
||||
'''
|
||||
Returns the datacenter name configured on the proxy
|
||||
|
||||
Supported proxies: esxcluster, esxdatacenter
|
||||
'''
|
||||
|
||||
proxy_type = __salt__['vsphere.get_proxy_type']()
|
||||
details = None
|
||||
if proxy_type == 'esxcluster':
|
||||
details = __salt__['esxcluster.get_details']()
|
||||
elif proxy_type == 'esxdatacenter':
|
||||
details = __salt__['esxdatacenter.get_details']()
|
||||
if not details:
|
||||
raise salt.exceptions.CommandExecutionError(
|
||||
'details for proxy type \'{0}\' not loaded'.format(proxy_type))
|
||||
return details['datacenter']
|
||||
|
||||
|
||||
def dvs_configured(name, dvs):
|
||||
'''
|
||||
Configures a DVS.
|
||||
|
||||
Creates a new DVS, if it doesn't exist in the provided datacenter or
|
||||
reconfigures it if configured differently.
|
||||
|
||||
dvs
|
||||
DVS dict representations (see module sysdocs)
|
||||
'''
|
||||
datacenter_name = _get_datacenter_name()
|
||||
dvs_name = dvs['name'] if dvs.get('name') else name
|
||||
log.info('Running state {0} for DVS \'{1}\' in datacenter '
|
||||
'\'{2}\''.format(name, dvs_name, datacenter_name))
|
||||
changes_required = False
|
||||
ret = {'name': name, 'changes': {}, 'result': None, 'comment': None}
|
||||
comments = []
|
||||
changes = {}
|
||||
changes_required = False
|
||||
|
||||
try:
|
||||
#TODO dvs validation
|
||||
si = __salt__['vsphere.get_service_instance_via_proxy']()
|
||||
dvss = __salt__['vsphere.list_dvss'](dvs_names=[dvs_name],
|
||||
service_instance=si)
|
||||
if not dvss:
|
||||
changes_required = True
|
||||
if __opts__['test']:
|
||||
comments.append('State {0} will create a new DVS '
|
||||
'\'{1}\' in datacenter \'{2}\''
|
||||
''.format(name, dvs_name, datacenter_name))
|
||||
log.info(comments[-1])
|
||||
else:
|
||||
dvs['name'] = dvs_name
|
||||
__salt__['vsphere.create_dvs'](dvs_dict=dvs,
|
||||
dvs_name=dvs_name,
|
||||
service_instance=si)
|
||||
comments.append('Created a new DVS \'{0}\' in datacenter '
|
||||
'\'{1}\''.format(dvs_name, datacenter_name))
|
||||
log.info(comments[-1])
|
||||
changes.update({'dvs': {'new': dvs}})
|
||||
else:
|
||||
# DVS already exists. Checking various aspects of the config
|
||||
props = ['description', 'contact_email', 'contact_name',
|
||||
'lacp_api_version', 'link_discovery_protocol',
|
||||
'max_mtu', 'network_resource_control_version',
|
||||
'network_resource_management_enabled']
|
||||
log.trace('DVS \'{0}\' found in datacenter \'{1}\'. Checking '
|
||||
'for any updates in '
|
||||
'{2}'.format(dvs_name, datacenter_name, props))
|
||||
props_to_original_values = {}
|
||||
props_to_updated_values = {}
|
||||
current_dvs = dvss[0]
|
||||
for prop in props:
|
||||
if prop in dvs and dvs[prop] != current_dvs.get(prop):
|
||||
props_to_original_values[prop] = current_dvs.get(prop)
|
||||
props_to_updated_values[prop] = dvs[prop]
|
||||
|
||||
# Simple infrastructure traffic resource control compare doesn't
|
||||
# work because num_shares is optional if share_level is not custom
|
||||
# We need to do a dedicated compare for this property
|
||||
infra_prop = 'infrastructure_traffic_resource_pools'
|
||||
original_infra_res_pools = []
|
||||
updated_infra_res_pools = []
|
||||
if infra_prop in dvs:
|
||||
if not current_dvs.get(infra_prop):
|
||||
updated_infra_res_pools = dvs[infra_prop]
|
||||
else:
|
||||
for idx in range(len(dvs[infra_prop])):
|
||||
if 'num_shares' not in dvs[infra_prop][idx] and \
|
||||
current_dvs[infra_prop][idx]['share_level'] != \
|
||||
'custom' and \
|
||||
'num_shares' in current_dvs[infra_prop][idx]:
|
||||
|
||||
del current_dvs[infra_prop][idx]['num_shares']
|
||||
if dvs[infra_prop][idx] != \
|
||||
current_dvs[infra_prop][idx]:
|
||||
|
||||
original_infra_res_pools.append(
|
||||
current_dvs[infra_prop][idx])
|
||||
updated_infra_res_pools.append(
|
||||
dict(dvs[infra_prop][idx]))
|
||||
if updated_infra_res_pools:
|
||||
props_to_original_values[
|
||||
'infrastructure_traffic_resource_pools'] = \
|
||||
original_infra_res_pools
|
||||
props_to_updated_values[
|
||||
'infrastructure_traffic_resource_pools'] = \
|
||||
updated_infra_res_pools
|
||||
if props_to_updated_values:
|
||||
if __opts__['test']:
|
||||
changes_string = ''
|
||||
for p in props_to_updated_values:
|
||||
if p == 'infrastructure_traffic_resource_pools':
|
||||
changes_string += \
|
||||
'\tinfrastructure_traffic_resource_pools:\n'
|
||||
for idx in range(len(props_to_updated_values[p])):
|
||||
d = props_to_updated_values[p][idx]
|
||||
s = props_to_original_values[p][idx]
|
||||
changes_string += \
|
||||
('\t\t{0} from \'{1}\' to \'{2}\'\n'
|
||||
''.format(d['key'], s, d))
|
||||
else:
|
||||
changes_string += \
|
||||
('\t{0} from \'{1}\' to \'{2}\'\n'
|
||||
''.format(p, props_to_original_values[p],
|
||||
props_to_updated_values[p]))
|
||||
comments.append(
|
||||
'State dvs_configured will update DVS \'{0}\' '
|
||||
'in datacenter \'{1}\':\n{2}'
|
||||
''.format(dvs_name, datacenter_name, changes_string))
|
||||
log.info(comments[-1])
|
||||
else:
|
||||
__salt__['vsphere.update_dvs'](
|
||||
dvs_dict=props_to_updated_values,
|
||||
dvs=dvs_name,
|
||||
service_instance=si)
|
||||
comments.append('Updated DVS \'{0}\' in datacenter \'{1}\''
|
||||
''.format(dvs_name, datacenter_name))
|
||||
log.info(comments[-1])
|
||||
changes.update({'dvs': {'new': props_to_updated_values,
|
||||
'old': props_to_original_values}})
|
||||
__salt__['vsphere.disconnect'](si)
|
||||
except salt.exceptions.CommandExecutionError as exc:
|
||||
log.error('Error: {0}\n{1}'.format(exc, traceback.format_exc()))
|
||||
if si:
|
||||
__salt__['vsphere.disconnect'](si)
|
||||
if not __opts__['test']:
|
||||
ret['result'] = False
|
||||
ret.update({'comment': str(exc),
|
||||
'result': False if not __opts__['test'] else None})
|
||||
return ret
|
||||
if not comments:
|
||||
# We have no changes
|
||||
ret.update({'comment': ('DVS \'{0}\' in datacenter \'{1}\' is '
|
||||
'correctly configured. Nothing to be done.'
|
||||
''.format(dvs_name, datacenter_name)),
|
||||
'result': True})
|
||||
else:
|
||||
ret.update({'comment': '\n'.join(comments)})
|
||||
if __opts__['test']:
|
||||
ret.update({'pchanges': changes,
|
||||
'result': None})
|
||||
else:
|
||||
ret.update({'changes': changes,
|
||||
'result': True})
|
||||
return ret
|
||||
|
||||
|
||||
def _get_diff_dict(dict1, dict2):
|
||||
'''
|
||||
Returns a dictionary with the diffs between two dictionaries
|
||||
|
||||
It will ignore any key that doesn't exist in dict2
|
||||
'''
|
||||
ret_dict = {}
|
||||
for p in dict2.keys():
|
||||
if p not in dict1:
|
||||
ret_dict.update({p: {'val1': None, 'val2': dict2[p]}})
|
||||
elif dict1[p] != dict2[p]:
|
||||
if isinstance(dict1[p], dict) and isinstance(dict2[p], dict):
|
||||
sub_diff_dict = _get_diff_dict(dict1[p], dict2[p])
|
||||
if sub_diff_dict:
|
||||
ret_dict.update({p: sub_diff_dict})
|
||||
else:
|
||||
ret_dict.update({p: {'val1': dict1[p], 'val2': dict2[p]}})
|
||||
return ret_dict
|
||||
|
||||
|
||||
def _get_val2_dict_from_diff_dict(diff_dict):
|
||||
'''
|
||||
Returns a dictionaries with the values stored in val2 of a diff dict.
|
||||
'''
|
||||
ret_dict = {}
|
||||
for p in diff_dict.keys():
|
||||
if not isinstance(diff_dict[p], dict):
|
||||
raise ValueError('Unexpected diff difct \'{0}\''.format(diff_dict))
|
||||
if 'val2' in diff_dict[p].keys():
|
||||
ret_dict.update({p: diff_dict[p]['val2']})
|
||||
else:
|
||||
ret_dict.update(
|
||||
{p: _get_val2_dict_from_diff_dict(diff_dict[p])})
|
||||
return ret_dict
|
||||
|
||||
|
||||
def _get_val1_dict_from_diff_dict(diff_dict):
|
||||
'''
|
||||
Returns a dictionaries with the values stored in val1 of a diff dict.
|
||||
'''
|
||||
ret_dict = {}
|
||||
for p in diff_dict.keys():
|
||||
if not isinstance(diff_dict[p], dict):
|
||||
raise ValueError('Unexpected diff difct \'{0}\''.format(diff_dict))
|
||||
if 'val1' in diff_dict[p].keys():
|
||||
ret_dict.update({p: diff_dict[p]['val1']})
|
||||
else:
|
||||
ret_dict.update(
|
||||
{p: _get_val1_dict_from_diff_dict(diff_dict[p])})
|
||||
return ret_dict
|
||||
|
||||
|
||||
def _get_changes_from_diff_dict(diff_dict):
|
||||
'''
|
||||
Returns a list of string message of the differences in a diff dict.
|
||||
|
||||
Each inner message is tabulated one tab deeper
|
||||
'''
|
||||
changes_strings = []
|
||||
for p in diff_dict.keys():
|
||||
if not isinstance(diff_dict[p], dict):
|
||||
raise ValueError('Unexpected diff difct \'{0}\''.format(diff_dict))
|
||||
if sorted(diff_dict[p].keys()) == ['val1', 'val2']:
|
||||
# Some string formatting
|
||||
from_str = diff_dict[p]['val1']
|
||||
if isinstance(diff_dict[p]['val1'], str):
|
||||
from_str = '\'{0}\''.format(diff_dict[p]['val1'])
|
||||
elif isinstance(diff_dict[p]['val1'], list):
|
||||
from_str = '\'{0}\''.format(', '.join(diff_dict[p]['val1']))
|
||||
to_str = diff_dict[p]['val2']
|
||||
if isinstance(diff_dict[p]['val2'], str):
|
||||
to_str = '\'{0}\''.format(diff_dict[p]['val2'])
|
||||
elif isinstance(diff_dict[p]['val2'], list):
|
||||
to_str = '\'{0}\''.format(', '.join(diff_dict[p]['val2']))
|
||||
changes_strings.append('{0} from {1} to {2}'.format(
|
||||
p, from_str, to_str))
|
||||
else:
|
||||
sub_changes = _get_changes_from_diff_dict(diff_dict[p])
|
||||
if sub_changes:
|
||||
changes_strings.append('{0}:'.format(p))
|
||||
changes_strings.extend(['\t{0}'.format(c)
|
||||
for c in sub_changes])
|
||||
return changes_strings
|
||||
|
||||
|
||||
def portgroups_configured(name, dvs, portgroups):
|
||||
'''
|
||||
Configures portgroups on a DVS.
|
||||
|
||||
Creates/updates/removes portgroups in a provided DVS
|
||||
|
||||
dvs
|
||||
Name of the DVS
|
||||
|
||||
portgroups
|
||||
Portgroup dict representations (see module sysdocs)
|
||||
'''
|
||||
datacenter = _get_datacenter_name()
|
||||
log.info('Running state {0} on DVS \'{1}\', datacenter '
|
||||
'\'{2}\''.format(name, dvs, datacenter))
|
||||
changes_required = False
|
||||
ret = {'name': name, 'changes': {}, 'result': None, 'comment': None,
|
||||
'pchanges': {}}
|
||||
comments = []
|
||||
changes = {}
|
||||
changes_required = False
|
||||
|
||||
try:
|
||||
#TODO portroups validation
|
||||
si = __salt__['vsphere.get_service_instance_via_proxy']()
|
||||
current_pgs = __salt__['vsphere.list_dvportgroups'](
|
||||
dvs=dvs, service_instance=si)
|
||||
expected_pg_names = []
|
||||
for pg in portgroups:
|
||||
pg_name = pg['name']
|
||||
expected_pg_names.append(pg_name)
|
||||
del pg['name']
|
||||
log.info('Checking pg \'{0}\''.format(pg_name))
|
||||
filtered_current_pgs = \
|
||||
[p for p in current_pgs if p.get('name') == pg_name]
|
||||
if not filtered_current_pgs:
|
||||
changes_required = True
|
||||
if __opts__['test']:
|
||||
comments.append('State {0} will create a new portgroup '
|
||||
'\'{1}\' in DVS \'{2}\', datacenter '
|
||||
'\'{3}\''.format(name, pg_name, dvs,
|
||||
datacenter))
|
||||
else:
|
||||
__salt__['vsphere.create_dvportgroup'](
|
||||
portgroup_dict=pg, portgroup_name=pg_name, dvs=dvs,
|
||||
service_instance=si)
|
||||
comments.append('Created a new portgroup \'{0}\' in DVS '
|
||||
'\'{1}\', datacenter \'{2}\''
|
||||
''.format(pg_name, dvs, datacenter))
|
||||
log.info(comments[-1])
|
||||
changes.update({pg_name: {'new': pg}})
|
||||
else:
|
||||
# Porgroup already exists. Checking the config
|
||||
log.trace('Portgroup \'{0}\' found in DVS \'{1}\', datacenter '
|
||||
'\'{2}\'. Checking for any updates.'
|
||||
''.format(pg_name, dvs, datacenter))
|
||||
current_pg = filtered_current_pgs[0]
|
||||
diff_dict = _get_diff_dict(current_pg, pg)
|
||||
|
||||
if diff_dict:
|
||||
changes_required = True
|
||||
if __opts__['test']:
|
||||
changes_strings = \
|
||||
_get_changes_from_diff_dict(diff_dict)
|
||||
log.trace('changes_strings = '
|
||||
'{0}'.format(changes_strings))
|
||||
comments.append(
|
||||
'State {0} will update portgroup \'{1}\' in '
|
||||
'DVS \'{2}\', datacenter \'{3}\':\n{4}'
|
||||
''.format(name, pg_name, dvs, datacenter,
|
||||
'\n'.join(['\t{0}'.format(c) for c in
|
||||
changes_strings])))
|
||||
else:
|
||||
__salt__['vsphere.update_dvportgroup'](
|
||||
portgroup_dict=pg, portgroup=pg_name, dvs=dvs,
|
||||
service_instance=si)
|
||||
comments.append('Updated portgroup \'{0}\' in DVS '
|
||||
'\'{1}\', datacenter \'{2}\''
|
||||
''.format(pg_name, dvs, datacenter))
|
||||
log.info(comments[-1])
|
||||
changes.update(
|
||||
{pg_name: {'new':
|
||||
_get_val2_dict_from_diff_dict(diff_dict),
|
||||
'old':
|
||||
_get_val1_dict_from_diff_dict(diff_dict)}})
|
||||
# Add the uplink portgroup to the expected pg names
|
||||
uplink_pg = __salt__['vsphere.list_uplink_dvportgroup'](
|
||||
dvs=dvs, service_instance=si)
|
||||
expected_pg_names.append(uplink_pg['name'])
|
||||
# Remove any extra portgroups
|
||||
for current_pg in current_pgs:
|
||||
if current_pg['name'] not in expected_pg_names:
|
||||
changes_required = True
|
||||
if __opts__['test']:
|
||||
comments.append('State {0} will remove '
|
||||
'the portgroup \'{1}\' from DVS \'{2}\', '
|
||||
'datacenter \'{3}\''
|
||||
''.format(name, current_pg['name'], dvs,
|
||||
datacenter))
|
||||
else:
|
||||
__salt__['vsphere.remove_dvportgroup'](
|
||||
portgroup=current_pg['name'], dvs=dvs,
|
||||
service_instance=si)
|
||||
comments.append('Removed the portgroup \'{0}\' from DVS '
|
||||
'\'{1}\', datacenter \'{2}\''
|
||||
''.format(current_pg['name'], dvs,
|
||||
datacenter))
|
||||
log.info(comments[-1])
|
||||
changes.update({current_pg['name']:
|
||||
{'old': current_pg}})
|
||||
__salt__['vsphere.disconnect'](si)
|
||||
except salt.exceptions.CommandExecutionError as exc:
|
||||
log.error('Error: {0}\n{1}'.format(exc, traceback.format_exc()))
|
||||
if si:
|
||||
__salt__['vsphere.disconnect'](si)
|
||||
if not __opts__['test']:
|
||||
ret['result'] = False
|
||||
ret.update({'comment': exc.strerror,
|
||||
'result': False if not __opts__['test'] else None})
|
||||
return ret
|
||||
if not changes_required:
|
||||
# We have no changes
|
||||
ret.update({'comment': ('All portgroups in DVS \'{0}\', datacenter '
|
||||
'\'{1}\' exist and are correctly configured. '
|
||||
'Nothing to be done.'.format(dvs, datacenter)),
|
||||
'result': True})
|
||||
else:
|
||||
ret.update({'comment': '\n'.join(comments)})
|
||||
if __opts__['test']:
|
||||
ret.update({'pchanges': changes,
|
||||
'result': None})
|
||||
else:
|
||||
ret.update({'changes': changes,
|
||||
'result': True})
|
||||
return ret
|
||||
|
||||
|
||||
def uplink_portgroup_configured(name, dvs, uplink_portgroup):
|
||||
'''
|
||||
Configures the uplink portgroup on a DVS. The state assumes there is only
|
||||
one uplink portgroup.
|
||||
|
||||
dvs
|
||||
Name of the DVS
|
||||
|
||||
upling_portgroup
|
||||
Uplink portgroup dict representations (see module sysdocs)
|
||||
|
||||
'''
|
||||
datacenter = _get_datacenter_name()
|
||||
log.info('Running {0} on DVS \'{1}\', datacenter \'{2}\''
|
||||
''.format(name, dvs, datacenter))
|
||||
changes_required = False
|
||||
ret = {'name': name, 'changes': {}, 'result': None, 'comment': None,
|
||||
'pchanges': {}}
|
||||
comments = []
|
||||
changes = {}
|
||||
changes_required = False
|
||||
|
||||
try:
|
||||
#TODO portroups validation
|
||||
si = __salt__['vsphere.get_service_instance_via_proxy']()
|
||||
current_uplink_portgroup = __salt__['vsphere.list_uplink_dvportgroup'](
|
||||
dvs=dvs, service_instance=si)
|
||||
log.trace('current_uplink_portgroup = '
|
||||
'{0}'.format(current_uplink_portgroup))
|
||||
diff_dict = _get_diff_dict(current_uplink_portgroup, uplink_portgroup)
|
||||
if diff_dict:
|
||||
changes_required = True
|
||||
if __opts__['test']:
|
||||
changes_strings = \
|
||||
_get_changes_from_diff_dict(diff_dict)
|
||||
log.trace('changes_strings = '
|
||||
'{0}'.format(changes_strings))
|
||||
comments.append(
|
||||
'State {0} will update the '
|
||||
'uplink portgroup in DVS \'{1}\', datacenter '
|
||||
'\'{2}\':\n{3}'
|
||||
''.format(name, dvs, datacenter,
|
||||
'\n'.join(['\t{0}'.format(c) for c in
|
||||
changes_strings])))
|
||||
else:
|
||||
__salt__['vsphere.update_dvportgroup'](
|
||||
portgroup_dict=uplink_portgroup,
|
||||
portgroup=current_uplink_portgroup['name'],
|
||||
dvs=dvs,
|
||||
service_instance=si)
|
||||
comments.append('Updated the uplink portgroup in DVS '
|
||||
'\'{0}\', datacenter \'{1}\''
|
||||
''.format(dvs, datacenter))
|
||||
log.info(comments[-1])
|
||||
changes.update(
|
||||
{'uplink_portgroup':
|
||||
{'new': _get_val2_dict_from_diff_dict(diff_dict),
|
||||
'old': _get_val1_dict_from_diff_dict(diff_dict)}})
|
||||
__salt__['vsphere.disconnect'](si)
|
||||
except salt.exceptions.CommandExecutionError as exc:
|
||||
log.error('Error: {0}\n{1}'.format(exc, traceback.format_exc()))
|
||||
if si:
|
||||
__salt__['vsphere.disconnect'](si)
|
||||
if not __opts__['test']:
|
||||
ret['result'] = False
|
||||
ret.update({'comment': exc.strerror,
|
||||
'result': False if not __opts__['test'] else None})
|
||||
return ret
|
||||
if not changes_required:
|
||||
# We have no changes
|
||||
ret.update({'comment': ('Uplink portgroup in DVS \'{0}\', datacenter '
|
||||
'\'{1}\' is correctly configured. '
|
||||
'Nothing to be done.'.format(dvs, datacenter)),
|
||||
'result': True})
|
||||
else:
|
||||
ret.update({'comment': '\n'.join(comments)})
|
||||
if __opts__['test']:
|
||||
ret.update({'pchanges': changes,
|
||||
'result': None})
|
||||
else:
|
||||
ret.update({'changes': changes,
|
||||
'result': True})
|
||||
return ret
|
538
salt/states/esxcluster.py
Normal file
538
salt/states/esxcluster.py
Normal file
@ -0,0 +1,538 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Manage VMware ESXi Clusters.
|
||||
|
||||
Dependencies
|
||||
============
|
||||
|
||||
- pyVmomi Python Module
|
||||
|
||||
|
||||
pyVmomi
|
||||
-------
|
||||
|
||||
PyVmomi can be installed via pip:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install pyVmomi
|
||||
|
||||
.. note::
|
||||
|
||||
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
|
||||
versions of Python. If using version 6.0 of pyVmomi, Python 2.7.9,
|
||||
or newer must be present. This is due to an upstream dependency
|
||||
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
|
||||
version of Python is not in the supported range, you will need to install an
|
||||
earlier version of pyVmomi. See `Issue #29537`_ for more information.
|
||||
|
||||
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
|
||||
|
||||
Based on the note above, to install an earlier version of pyVmomi than the
|
||||
version currently listed in PyPi, run the following:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install pyVmomi==5.5.0.2014.1.1
|
||||
|
||||
The 5.5.0.2014.1.1 is a known stable version that this original ESXi State
|
||||
Module was developed against.
|
||||
'''
|
||||
|
||||
# Import Python Libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
import traceback
|
||||
import sys
|
||||
|
||||
# Import Salt Libs
|
||||
import salt.exceptions
|
||||
from salt.utils.dictdiffer import recursive_diff
|
||||
from salt.utils.listdiffer import list_diff
|
||||
from salt.config.schemas.esxcluster import ESXClusterConfigSchema, \
|
||||
LicenseSchema
|
||||
from salt.utils import dictupdate
|
||||
|
||||
# External libraries
|
||||
try:
|
||||
import jsonschema
|
||||
HAS_JSONSCHEMA = True
|
||||
except ImportError:
|
||||
HAS_JSONSCHEMA = False
|
||||
|
||||
try:
|
||||
from pyVmomi import VmomiSupport
|
||||
HAS_PYVMOMI = True
|
||||
except ImportError:
|
||||
HAS_PYVMOMI = False
|
||||
|
||||
# Get Logging Started
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def __virtual__():
|
||||
if not HAS_JSONSCHEMA:
|
||||
return False, 'State module did not load: jsonschema not found'
|
||||
if not HAS_PYVMOMI:
|
||||
return False, 'State module did not load: pyVmomi not found'
|
||||
|
||||
# We check the supported vim versions to infer the pyVmomi version
|
||||
if 'vim25/6.0' in VmomiSupport.versionMap and \
|
||||
sys.version_info > (2, 7) and sys.version_info < (2, 7, 9):
|
||||
|
||||
return False, ('State module did not load: Incompatible versions '
|
||||
'of Python and pyVmomi present. See Issue #29537.')
|
||||
return True
|
||||
|
||||
|
||||
def mod_init(low):
|
||||
'''
|
||||
Retrieves and adapt the login credentials from the proxy connection module
|
||||
'''
|
||||
return True
|
||||
|
||||
|
||||
def _get_vsan_datastore(si, cluster_name):
|
||||
'''Retrieves the vsan_datastore'''
|
||||
|
||||
log.trace('Retrieving vsan datastore')
|
||||
vsan_datastores = [ds for ds in
|
||||
__salt__['vsphere.list_datastores_via_proxy'](
|
||||
service_instance=si)
|
||||
if ds['type'] == 'vsan']
|
||||
|
||||
if not vsan_datastores:
|
||||
raise salt.exceptions.VMwareObjectRetrievalError(
|
||||
'No vSAN datastores where retrieved for cluster '
|
||||
'\'{0}\''.format(cluster_name))
|
||||
return vsan_datastores[0]
|
||||
|
||||
|
||||
def cluster_configured(name, cluster_config):
|
||||
'''
|
||||
Configures a cluster. Creates a new cluster, if it doesn't exist on the
|
||||
vCenter or reconfigures it if configured differently
|
||||
|
||||
Supported proxies: esxdatacenter, esxcluster
|
||||
|
||||
name
|
||||
Name of the state. If the state is run in by an ``esxdatacenter``
|
||||
proxy, it will be the name of the cluster.
|
||||
|
||||
cluster_config
|
||||
Configuration applied to the cluster.
|
||||
Complex datastructure following the ESXClusterConfigSchema.
|
||||
Valid example is:
|
||||
|
||||
.. code-block::yaml
|
||||
|
||||
drs:
|
||||
default_vm_behavior: fullyAutomated
|
||||
enabled: true
|
||||
vmotion_rate: 3
|
||||
ha:
|
||||
admission_control
|
||||
_enabled: false
|
||||
default_vm_settings:
|
||||
isolation_response: powerOff
|
||||
restart_priority: medium
|
||||
enabled: true
|
||||
hb_ds_candidate_policy: userSelectedDs
|
||||
host_monitoring: enabled
|
||||
options:
|
||||
- key: das.ignoreinsufficienthbdatastore
|
||||
value: 'true'
|
||||
vm_monitoring: vmMonitoringDisabled
|
||||
vm_swap_placement: vmDirectory
|
||||
vsan:
|
||||
auto_claim_storage: false
|
||||
compression_enabled: true
|
||||
dedup_enabled: true
|
||||
enabled: true
|
||||
|
||||
'''
|
||||
proxy_type = __salt__['vsphere.get_proxy_type']()
|
||||
if proxy_type == 'esxdatacenter':
|
||||
cluster_name, datacenter_name = \
|
||||
name, __salt__['esxdatacenter.get_details']()['datacenter']
|
||||
elif proxy_type == 'esxcluster':
|
||||
cluster_name, datacenter_name = \
|
||||
__salt__['esxcluster.get_details']()['cluster'], \
|
||||
__salt__['esxcluster.get_details']()['datacenter']
|
||||
else:
|
||||
raise salt.exceptions.CommandExecutionError('Unsupported proxy {0}'
|
||||
''.format(proxy_type))
|
||||
log.info('Running {0} for cluster \'{1}\' in datacenter '
|
||||
'\'{2}\''.format(name, cluster_name, datacenter_name))
|
||||
cluster_dict = cluster_config
|
||||
log.trace('cluster_dict = {0}'.format(cluster_dict))
|
||||
changes_required = False
|
||||
ret = {'name': name,
|
||||
'changes': {}, 'result': None, 'comment': 'Default'}
|
||||
comments = []
|
||||
changes = {}
|
||||
changes_required = False
|
||||
|
||||
try:
|
||||
log.trace('Validating cluster_configured state input')
|
||||
schema = ESXClusterConfigSchema.serialize()
|
||||
log.trace('schema = {0}'.format(schema))
|
||||
try:
|
||||
jsonschema.validate(cluster_dict, schema)
|
||||
except jsonschema.exceptions.ValidationError as exc:
|
||||
raise salt.exceptions.InvalidESXClusterPayloadError(exc)
|
||||
current = None
|
||||
si = __salt__['vsphere.get_service_instance_via_proxy']()
|
||||
try:
|
||||
current = __salt__['vsphere.list_cluster'](datacenter_name,
|
||||
cluster_name,
|
||||
service_instance=si)
|
||||
except salt.exceptions.VMwareObjectRetrievalError:
|
||||
changes_required = True
|
||||
if __opts__['test']:
|
||||
comments.append('State {0} will create cluster '
|
||||
'\'{1}\' in datacenter \'{2}\'.'
|
||||
''.format(name, cluster_name, datacenter_name))
|
||||
log.info(comments[-1])
|
||||
__salt__['vsphere.disconnect'](si)
|
||||
ret.update({'result': None,
|
||||
'comment': '\n'.join(comments)})
|
||||
return ret
|
||||
log.trace('Creating cluster \'{0}\' in datacenter \'{1}\'. '
|
||||
''.format(cluster_name, datacenter_name))
|
||||
__salt__['vsphere.create_cluster'](cluster_dict,
|
||||
datacenter_name,
|
||||
cluster_name,
|
||||
service_instance=si)
|
||||
comments.append('Created cluster \'{0}\' in datacenter \'{1}\''
|
||||
''.format(cluster_name, datacenter_name))
|
||||
log.info(comments[-1])
|
||||
changes.update({'new': cluster_dict})
|
||||
if current:
|
||||
# Cluster already exists
|
||||
# We need to handle lists sepparately
|
||||
ldiff = None
|
||||
if 'ha' in cluster_dict and 'options' in cluster_dict['ha']:
|
||||
ldiff = list_diff(current.get('ha', {}).get('options', []),
|
||||
cluster_dict.get('ha', {}).get('options', []),
|
||||
'key')
|
||||
log.trace('options diffs = {0}'.format(ldiff.diffs))
|
||||
# Remove options if exist
|
||||
del cluster_dict['ha']['options']
|
||||
if 'ha' in current and 'options' in current['ha']:
|
||||
del current['ha']['options']
|
||||
diff = recursive_diff(current, cluster_dict)
|
||||
log.trace('diffs = {0}'.format(diff.diffs))
|
||||
if not (diff.diffs or (ldiff and ldiff.diffs)):
|
||||
# No differences
|
||||
comments.append('Cluster \'{0}\' in datacenter \'{1}\' is up '
|
||||
'to date. Nothing to be done.'
|
||||
''.format(cluster_name, datacenter_name))
|
||||
log.info(comments[-1])
|
||||
else:
|
||||
changes_required = True
|
||||
changes_str = ''
|
||||
if diff.diffs:
|
||||
changes_str = '{0}{1}'.format(changes_str,
|
||||
diff.changes_str)
|
||||
if ldiff and ldiff.diffs:
|
||||
changes_str = '{0}\nha:\n options:\n{1}'.format(
|
||||
changes_str,
|
||||
'\n'.join([' {0}'.format(l) for l in
|
||||
ldiff.changes_str2.split('\n')]))
|
||||
# Apply the changes
|
||||
if __opts__['test']:
|
||||
comments.append(
|
||||
'State {0} will update cluster \'{1}\' '
|
||||
'in datacenter \'{2}\':\n{3}'
|
||||
''.format(name, cluster_name,
|
||||
datacenter_name, changes_str))
|
||||
else:
|
||||
new_values = diff.new_values
|
||||
old_values = diff.old_values
|
||||
if ldiff and ldiff.new_values:
|
||||
dictupdate.update(
|
||||
new_values, {'ha': {'options': ldiff.new_values}})
|
||||
if ldiff and ldiff.old_values:
|
||||
dictupdate.update(
|
||||
old_values, {'ha': {'options': ldiff.old_values}})
|
||||
log.trace('new_values = {0}'.format(new_values))
|
||||
__salt__['vsphere.update_cluster'](new_values,
|
||||
datacenter_name,
|
||||
cluster_name,
|
||||
service_instance=si)
|
||||
comments.append('Updated cluster \'{0}\' in datacenter '
|
||||
'\'{1}\''.format(cluster_name,
|
||||
datacenter_name))
|
||||
log.info(comments[-1])
|
||||
changes.update({'new': new_values,
|
||||
'old': old_values})
|
||||
__salt__['vsphere.disconnect'](si)
|
||||
ret_status = True
|
||||
if __opts__['test'] and changes_required:
|
||||
ret_status = None
|
||||
ret.update({'result': ret_status,
|
||||
'comment': '\n'.join(comments),
|
||||
'changes': changes})
|
||||
return ret
|
||||
except salt.exceptions.CommandExecutionError as exc:
|
||||
log.error('Error: {0}\n{1}'.format(exc, traceback.format_exc()))
|
||||
if si:
|
||||
__salt__['vsphere.disconnect'](si)
|
||||
ret.update({
|
||||
'result': False,
|
||||
'comment': str(exc)})
|
||||
return ret
|
||||
|
||||
|
||||
def vsan_datastore_configured(name, datastore_name):
|
||||
'''
|
||||
Configures the cluster's VSAN datastore
|
||||
|
||||
WARNING: The VSAN datastore is created automatically after the first
|
||||
ESXi host is added to the cluster; the state assumes that the datastore
|
||||
exists and errors if it doesn't.
|
||||
'''
|
||||
|
||||
cluster_name, datacenter_name = \
|
||||
__salt__['esxcluster.get_details']()['cluster'], \
|
||||
__salt__['esxcluster.get_details']()['datacenter']
|
||||
display_name = '{0}/{1}'.format(datacenter_name, cluster_name)
|
||||
log.info('Running vsan_datastore_configured for '
|
||||
'\'{0}\''.format(display_name))
|
||||
ret = {'name': name,
|
||||
'changes': {}, 'result': None,
|
||||
'comment': 'Default'}
|
||||
comments = []
|
||||
changes = {}
|
||||
changes_required = False
|
||||
|
||||
try:
|
||||
si = __salt__['vsphere.get_service_instance_via_proxy']()
|
||||
# Checking if we need to rename the vsan datastore
|
||||
vsan_ds = _get_vsan_datastore(si, cluster_name)
|
||||
if vsan_ds['name'] == datastore_name:
|
||||
comments.append('vSAN datastore is correctly named \'{0}\'. '
|
||||
'Nothing to be done.'.format(vsan_ds['name']))
|
||||
log.info(comments[-1])
|
||||
else:
|
||||
# vsan_ds needs to be updated
|
||||
changes_required = True
|
||||
if __opts__['test']:
|
||||
comments.append('State {0} will rename the vSAN datastore to '
|
||||
'\'{1}\'.'.format(name, datastore_name))
|
||||
log.info(comments[-1])
|
||||
else:
|
||||
log.trace('Renaming vSAN datastore \'{0}\' to \'{1}\''
|
||||
''.format(vsan_ds['name'], datastore_name))
|
||||
__salt__['vsphere.rename_datastore'](
|
||||
datastore_name=vsan_ds['name'],
|
||||
new_datastore_name=datastore_name,
|
||||
service_instance=si)
|
||||
comments.append('Renamed vSAN datastore to \'{0}\'.'
|
||||
''.format(datastore_name))
|
||||
changes = {'vsan_datastore': {'new': {'name': datastore_name},
|
||||
'old': {'name': vsan_ds['name']}}}
|
||||
log.info(comments[-1])
|
||||
__salt__['vsphere.disconnect'](si)
|
||||
|
||||
ret.update({'result': True if (not changes_required) else None if
|
||||
__opts__['test'] else True,
|
||||
'comment': '\n'.join(comments),
|
||||
'changes': changes})
|
||||
return ret
|
||||
except salt.exceptions.CommandExecutionError as exc:
|
||||
log.error('Error: {0}\n{1}'.format(exc, traceback.format_exc()))
|
||||
if si:
|
||||
__salt__['vsphere.disconnect'](si)
|
||||
ret.update({
|
||||
'result': False,
|
||||
'comment': exc.strerror})
|
||||
return ret
|
||||
|
||||
|
||||
def licenses_configured(name, licenses=None):
|
||||
'''
|
||||
Configures licenses on the cluster entity
|
||||
|
||||
Checks if each license exists on the server:
|
||||
- if it doesn't, it creates it
|
||||
Check if license is assigned to the cluster:
|
||||
- if it's not assigned to the cluster:
|
||||
- assign it to the cluster if there is space
|
||||
- error if there's no space
|
||||
- if it's assigned to the cluster nothing needs to be done
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': None,
|
||||
'comment': 'Default'}
|
||||
if not licenses:
|
||||
raise salt.exceptions.ArgumentValueError('No licenses provided')
|
||||
cluster_name, datacenter_name = \
|
||||
__salt__['esxcluster.get_details']()['cluster'], \
|
||||
__salt__['esxcluster.get_details']()['datacenter']
|
||||
display_name = '{0}/{1}'.format(datacenter_name, cluster_name)
|
||||
log.info('Running licenses configured for \'{0}\''.format(display_name))
|
||||
log.trace('licenses = {0}'.format(licenses))
|
||||
entity = {'type': 'cluster',
|
||||
'datacenter': datacenter_name,
|
||||
'cluster': cluster_name}
|
||||
log.trace('entity = {0}'.format(entity))
|
||||
|
||||
comments = []
|
||||
changes = {}
|
||||
old_licenses = []
|
||||
new_licenses = []
|
||||
has_errors = False
|
||||
needs_changes = False
|
||||
try:
|
||||
# Validate licenses
|
||||
log.trace('Validating licenses')
|
||||
schema = LicenseSchema.serialize()
|
||||
try:
|
||||
jsonschema.validate({'licenses': licenses}, schema)
|
||||
except jsonschema.exceptions.ValidationError as exc:
|
||||
raise salt.exceptions.InvalidLicenseError(exc)
|
||||
|
||||
si = __salt__['vsphere.get_service_instance_via_proxy']()
|
||||
# Retrieve licenses
|
||||
existing_licenses = __salt__['vsphere.list_licenses'](
|
||||
service_instance=si)
|
||||
remaining_licenses = existing_licenses[:]
|
||||
# Cycle through licenses
|
||||
for license_name, license in licenses.items():
|
||||
# Check if license already exists
|
||||
filtered_licenses = [l for l in existing_licenses
|
||||
if l['key'] == license]
|
||||
# TODO Update license description - not of interest right now
|
||||
if not filtered_licenses:
|
||||
# License doesn't exist - add and assign to cluster
|
||||
needs_changes = True
|
||||
if __opts__['test']:
|
||||
# If it doesn't exist it clearly needs to be assigned as
|
||||
# well so we can stop the check here
|
||||
comments.append('State {0} will add license \'{1}\', '
|
||||
'and assign it to cluster \'{2}\'.'
|
||||
''.format(name, license_name, display_name))
|
||||
log.info(comments[-1])
|
||||
continue
|
||||
else:
|
||||
try:
|
||||
existing_license = __salt__['vsphere.add_license'](
|
||||
key=license, description=license_name,
|
||||
service_instance=si)
|
||||
except salt.exceptions.VMwareApiError as ex:
|
||||
comments.append(ex.err_msg)
|
||||
log.error(comments[-1])
|
||||
has_errors = True
|
||||
continue
|
||||
comments.append('Added license \'{0}\'.'
|
||||
''.format(license_name))
|
||||
log.info(comments[-1])
|
||||
else:
|
||||
# License exists let's check if it's assigned to the cluster
|
||||
comments.append('License \'{0}\' already exists. '
|
||||
'Nothing to be done.'.format(license_name))
|
||||
log.info(comments[-1])
|
||||
existing_license = filtered_licenses[0]
|
||||
|
||||
log.trace('Checking licensed entities...')
|
||||
assigned_licenses = __salt__['vsphere.list_assigned_licenses'](
|
||||
entity=entity,
|
||||
entity_display_name=display_name,
|
||||
service_instance=si)
|
||||
|
||||
# Checking if any of the licenses already assigned have the same
|
||||
# name as the new license; the already assigned license would be
|
||||
# replaced by the new license
|
||||
#
|
||||
# Licenses with different names but matching features would be
|
||||
# replaced as well, but searching for those would be very complex
|
||||
#
|
||||
# the name check if good enough for now
|
||||
already_assigned_license = assigned_licenses[0] if \
|
||||
assigned_licenses else None
|
||||
|
||||
if already_assigned_license and \
|
||||
already_assigned_license['key'] == license:
|
||||
|
||||
# License is already assigned to entity
|
||||
comments.append('License \'{0}\' already assigned to '
|
||||
'cluster \'{1}\'. Nothing to be done.'
|
||||
''.format(license_name, display_name))
|
||||
log.info(comments[-1])
|
||||
continue
|
||||
|
||||
needs_changes = True
|
||||
# License needs to be assigned to entity
|
||||
|
||||
if existing_license['capacity'] <= existing_license['used']:
|
||||
# License is already fully used
|
||||
comments.append('Cannot assign license \'{0}\' to cluster '
|
||||
'\'{1}\'. No free capacity available.'
|
||||
''.format(license_name, display_name))
|
||||
log.error(comments[-1])
|
||||
has_errors = True
|
||||
continue
|
||||
|
||||
# Assign license
|
||||
if __opts__['test']:
|
||||
comments.append('State {0} will assign license \'{1}\' '
|
||||
'to cluster \'{2}\'.'.format(
|
||||
name, license_name, display_name))
|
||||
log.info(comments[-1])
|
||||
else:
|
||||
try:
|
||||
__salt__['vsphere.assign_license'](
|
||||
license_key=license,
|
||||
license_name=license_name,
|
||||
entity=entity,
|
||||
entity_display_name=display_name,
|
||||
service_instance=si)
|
||||
except salt.exceptions.VMwareApiError as ex:
|
||||
comments.append(ex.err_msg)
|
||||
log.error(comments[-1])
|
||||
has_errors = True
|
||||
continue
|
||||
comments.append('Assigned license \'{0}\' to cluster \'{1}\'.'
|
||||
''.format(license_name, display_name))
|
||||
log.info(comments[-1])
|
||||
# Note: Because the already_assigned_license was retrieved
|
||||
# from the assignment license manager it doesn't have a used
|
||||
# value - that's a limitation from VMware. The license would
|
||||
# need to be retrieved again from the license manager to get
|
||||
# the value
|
||||
|
||||
# Hide license keys
|
||||
assigned_license = __salt__['vsphere.list_assigned_licenses'](
|
||||
entity=entity,
|
||||
entity_display_name=display_name,
|
||||
service_instance=si)[0]
|
||||
assigned_license['key'] = '<hidden>'
|
||||
if already_assigned_license:
|
||||
already_assigned_license['key'] = '<hidden>'
|
||||
if already_assigned_license and \
|
||||
already_assigned_license['capacity'] == sys.maxsize:
|
||||
|
||||
already_assigned_license['capacity'] = 'Unlimited'
|
||||
|
||||
changes[license_name] = {'new': assigned_license,
|
||||
'old': already_assigned_license}
|
||||
continue
|
||||
__salt__['vsphere.disconnect'](si)
|
||||
|
||||
ret.update({'result': True if (not needs_changes) else None if
|
||||
__opts__['test'] else False if has_errors else True,
|
||||
'comment': '\n'.join(comments),
|
||||
'changes': changes if not __opts__['test'] else {}})
|
||||
|
||||
return ret
|
||||
except salt.exceptions.CommandExecutionError as exc:
|
||||
log.error('Error: {0}\n{1}'.format(exc, traceback.format_exc()))
|
||||
if si:
|
||||
__salt__['vsphere.disconnect'](si)
|
||||
ret.update({
|
||||
'result': False,
|
||||
'comment': exc.strerror})
|
||||
return ret
|
@ -299,6 +299,7 @@ if salt.utils.platform.is_windows():
|
||||
# Import 3rd-party libs
|
||||
from salt.ext import six
|
||||
from salt.ext.six.moves import zip_longest
|
||||
from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module
|
||||
if salt.utils.platform.is_windows():
|
||||
import pywintypes
|
||||
import win32com.client
|
||||
@ -1530,6 +1531,7 @@ def managed(name,
|
||||
source=None,
|
||||
source_hash='',
|
||||
source_hash_name=None,
|
||||
keep_source=True,
|
||||
user=None,
|
||||
group=None,
|
||||
mode=None,
|
||||
@ -1729,6 +1731,15 @@ def managed(name,
|
||||
|
||||
.. versionadded:: 2016.3.5
|
||||
|
||||
keep_source : True
|
||||
Set to ``False`` to discard the cached copy of the source file once the
|
||||
state completes. This can be useful for larger files to keep them from
|
||||
taking up space in minion cache. However, keep in mind that discarding
|
||||
the source file will result in the state needing to re-download the
|
||||
source file if the state is run again.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
|
||||
user
|
||||
The user to own the file, this defaults to the user salt is running as
|
||||
on the minion
|
||||
@ -2440,8 +2451,9 @@ def managed(name,
|
||||
except Exception as exc:
|
||||
ret['changes'] = {}
|
||||
log.debug(traceback.format_exc())
|
||||
if os.path.isfile(tmp_filename):
|
||||
os.remove(tmp_filename)
|
||||
salt.utils.files.remove(tmp_filename)
|
||||
if not keep_source and sfn:
|
||||
salt.utils.files.remove(sfn)
|
||||
return _error(ret, 'Unable to check_cmd file: {0}'.format(exc))
|
||||
|
||||
# file being updated to verify using check_cmd
|
||||
@ -2459,15 +2471,9 @@ def managed(name,
|
||||
cret = mod_run_check_cmd(check_cmd, tmp_filename, **check_cmd_opts)
|
||||
if isinstance(cret, dict):
|
||||
ret.update(cret)
|
||||
if os.path.isfile(tmp_filename):
|
||||
os.remove(tmp_filename)
|
||||
if sfn and os.path.isfile(sfn):
|
||||
os.remove(sfn)
|
||||
salt.utils.files.remove(tmp_filename)
|
||||
return ret
|
||||
|
||||
if sfn and os.path.isfile(sfn):
|
||||
os.remove(sfn)
|
||||
|
||||
# Since we generated a new tempfile and we are not returning here
|
||||
# lets change the original sfn to the new tempfile or else we will
|
||||
# get file not found
|
||||
@ -2516,10 +2522,10 @@ def managed(name,
|
||||
log.debug(traceback.format_exc())
|
||||
return _error(ret, 'Unable to manage file: {0}'.format(exc))
|
||||
finally:
|
||||
if tmp_filename and os.path.isfile(tmp_filename):
|
||||
os.remove(tmp_filename)
|
||||
if sfn and os.path.isfile(sfn):
|
||||
os.remove(sfn)
|
||||
if tmp_filename:
|
||||
salt.utils.files.remove(tmp_filename)
|
||||
if not keep_source and sfn:
|
||||
salt.utils.files.remove(sfn)
|
||||
|
||||
|
||||
_RECURSE_TYPES = ['user', 'group', 'mode', 'ignore_files', 'ignore_dirs']
|
||||
@ -3048,6 +3054,7 @@ def directory(name,
|
||||
|
||||
def recurse(name,
|
||||
source,
|
||||
keep_source=True,
|
||||
clean=False,
|
||||
require=None,
|
||||
user=None,
|
||||
@ -3080,6 +3087,15 @@ def recurse(name,
|
||||
located on the master in the directory named spam, and is called eggs,
|
||||
the source string is salt://spam/eggs
|
||||
|
||||
keep_source : True
|
||||
Set to ``False`` to discard the cached copy of the source file once the
|
||||
state completes. This can be useful for larger files to keep them from
|
||||
taking up space in minion cache. However, keep in mind that discarding
|
||||
the source file will result in the state needing to re-download the
|
||||
source file if the state is run again.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
|
||||
clean
|
||||
Make sure that only files that are set up by salt and required by this
|
||||
function are kept. If this option is set then everything in this
|
||||
@ -3360,6 +3376,7 @@ def recurse(name,
|
||||
_ret = managed(
|
||||
path,
|
||||
source=source,
|
||||
keep_source=keep_source,
|
||||
user=user,
|
||||
group=group,
|
||||
mode='keep' if keep_mode else file_mode,
|
||||
@ -6426,3 +6443,340 @@ def shortcut(
|
||||
ret['comment'] += (', but was unable to set ownership to '
|
||||
'{0}'.format(user))
|
||||
return ret
|
||||
|
||||
|
||||
def cached(name,
|
||||
source_hash='',
|
||||
source_hash_name=None,
|
||||
skip_verify=False,
|
||||
saltenv='base'):
|
||||
'''
|
||||
.. versionadded:: 2017.7.3
|
||||
|
||||
Ensures that a file is saved to the minion's cache. This state is primarily
|
||||
invoked by other states to ensure that we do not re-download a source file
|
||||
if we do not need to.
|
||||
|
||||
name
|
||||
The URL of the file to be cached. To cache a file from an environment
|
||||
other than ``base``, either use the ``saltenv`` argument or include the
|
||||
saltenv in the URL (e.g. ``salt://path/to/file.conf?saltenv=dev``).
|
||||
|
||||
.. note::
|
||||
A list of URLs is not supported, this must be a single URL. If a
|
||||
local file is passed here, then the state will obviously not try to
|
||||
download anything, but it will compare a hash if one is specified.
|
||||
|
||||
source_hash
|
||||
See the documentation for this same argument in the
|
||||
:py:func:`file.managed <salt.states.file.managed>` state.
|
||||
|
||||
.. note::
|
||||
For remote files not originating from the ``salt://`` fileserver,
|
||||
such as http(s) or ftp servers, this state will not re-download the
|
||||
file if the locally-cached copy matches this hash. This is done to
|
||||
prevent unnecessary downloading on repeated runs of this state. To
|
||||
update the cached copy of a file, it is necessary to update this
|
||||
hash.
|
||||
|
||||
source_hash_name
|
||||
See the documentation for this same argument in the
|
||||
:py:func:`file.managed <salt.states.file.managed>` state.
|
||||
|
||||
skip_verify
|
||||
See the documentation for this same argument in the
|
||||
:py:func:`file.managed <salt.states.file.managed>` state.
|
||||
|
||||
.. note::
|
||||
Setting this to ``True`` will result in a copy of the file being
|
||||
downloaded from a remote (http(s), ftp, etc.) source each time the
|
||||
state is run.
|
||||
|
||||
saltenv
|
||||
Used to specify the environment from which to download a file from the
|
||||
Salt fileserver (i.e. those with ``salt://`` URL).
|
||||
|
||||
|
||||
This state will in most cases not be useful in SLS files, but it is useful
|
||||
when writing a state or remote-execution module that needs to make sure
|
||||
that a file at a given URL has been downloaded to the cachedir. One example
|
||||
of this is in the :py:func:`archive.extracted <salt.states.file.extracted>`
|
||||
state:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
result = __states__['file.cached'](source_match,
|
||||
source_hash=source_hash,
|
||||
source_hash_name=source_hash_name,
|
||||
skip_verify=skip_verify,
|
||||
saltenv=__env__)
|
||||
|
||||
This will return a dictionary containing the state's return data, including
|
||||
a ``result`` key which will state whether or not the state was successful.
|
||||
Note that this will not catch exceptions, so it is best used within a
|
||||
try/except.
|
||||
|
||||
Once this state has been run from within another state or remote-execution
|
||||
module, the actual location of the cached file can be obtained using
|
||||
:py:func:`cp.is_cached <salt.modules.cp.is_cached>`:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
cached = __salt__['cp.is_cached'](source_match)
|
||||
|
||||
This function will return the cached path of the file, or an empty string
|
||||
if the file is not present in the minion cache.
|
||||
'''
|
||||
ret = {'changes': {},
|
||||
'comment': '',
|
||||
'name': name,
|
||||
'result': False}
|
||||
|
||||
try:
|
||||
parsed = _urlparse(name)
|
||||
except Exception:
|
||||
ret['comment'] = 'Only URLs or local file paths are valid input'
|
||||
return ret
|
||||
|
||||
# This if statement will keep the state from proceeding if a remote source
|
||||
# is specified and no source_hash is presented (unless we're skipping hash
|
||||
# verification).
|
||||
if not skip_verify \
|
||||
and not source_hash \
|
||||
and parsed.scheme in salt.utils.files.REMOTE_PROTOS:
|
||||
ret['comment'] = (
|
||||
'Unable to verify upstream hash of source file {0}, please set '
|
||||
'source_hash or set skip_verify to True'.format(name)
|
||||
)
|
||||
return ret
|
||||
|
||||
if source_hash:
|
||||
# Get the hash and hash type from the input. This takes care of parsing
|
||||
# the hash out of a file containing checksums, if that is how the
|
||||
# source_hash was specified.
|
||||
try:
|
||||
source_sum = __salt__['file.get_source_sum'](
|
||||
source=name,
|
||||
source_hash=source_hash,
|
||||
source_hash_name=source_hash_name,
|
||||
saltenv=saltenv)
|
||||
except CommandExecutionError as exc:
|
||||
ret['comment'] = exc.strerror
|
||||
return ret
|
||||
else:
|
||||
if not source_sum:
|
||||
# We shouldn't get here, problems in retrieving the hash in
|
||||
# file.get_source_sum should result in a CommandExecutionError
|
||||
# being raised, which we catch above. Nevertheless, we should
|
||||
# provide useful information in the event that
|
||||
# file.get_source_sum regresses.
|
||||
ret['comment'] = (
|
||||
'Failed to get source hash from {0}. This may be a bug. '
|
||||
'If this error persists, please report it and set '
|
||||
'skip_verify to True to work around it.'.format(source_hash)
|
||||
)
|
||||
return ret
|
||||
else:
|
||||
source_sum = {}
|
||||
|
||||
if parsed.scheme in salt.utils.files.LOCAL_PROTOS:
|
||||
# Source is a local file path
|
||||
full_path = os.path.realpath(os.path.expanduser(parsed.path))
|
||||
if os.path.exists(full_path):
|
||||
if not skip_verify and source_sum:
|
||||
# Enforce the hash
|
||||
local_hash = __salt__['file.get_hash'](
|
||||
full_path,
|
||||
source_sum.get('hash_type', __opts__['hash_type']))
|
||||
if local_hash == source_sum['hsum']:
|
||||
ret['result'] = True
|
||||
ret['comment'] = (
|
||||
'File {0} is present on the minion and has hash '
|
||||
'{1}'.format(full_path, local_hash)
|
||||
)
|
||||
else:
|
||||
ret['comment'] = (
|
||||
'File {0} is present on the minion, but the hash ({1}) '
|
||||
'does not match the specified hash ({2})'.format(
|
||||
full_path, local_hash, source_sum['hsum']
|
||||
)
|
||||
)
|
||||
return ret
|
||||
else:
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'File {0} is present on the minion'.format(
|
||||
full_path
|
||||
)
|
||||
return ret
|
||||
else:
|
||||
ret['comment'] = 'File {0} is not present on the minion'.format(
|
||||
full_path
|
||||
)
|
||||
return ret
|
||||
|
||||
local_copy = __salt__['cp.is_cached'](name, saltenv=saltenv)
|
||||
|
||||
if local_copy:
|
||||
# File is already cached
|
||||
pre_hash = __salt__['file.get_hash'](
|
||||
local_copy,
|
||||
source_sum.get('hash_type', __opts__['hash_type']))
|
||||
|
||||
if not skip_verify and source_sum:
|
||||
# Get the local copy's hash to compare with the hash that was
|
||||
# specified via source_hash. If it matches, we can exit early from
|
||||
# the state without going any further, because the file is cached
|
||||
# with the correct hash.
|
||||
if pre_hash == source_sum['hsum']:
|
||||
ret['result'] = True
|
||||
ret['comment'] = (
|
||||
'File is already cached to {0} with hash {1}'.format(
|
||||
local_copy, pre_hash
|
||||
)
|
||||
)
|
||||
else:
|
||||
pre_hash = None
|
||||
|
||||
def _try_cache(path, checksum):
|
||||
'''
|
||||
This helper is not needed anymore in develop as the fileclient in the
|
||||
develop branch now has means of skipping a download if the existing
|
||||
hash matches one passed to cp.cache_file. Remove this helper and the
|
||||
code that invokes it, once we have merged forward into develop.
|
||||
'''
|
||||
if not path or not checksum:
|
||||
return True
|
||||
form = salt.utils.files.HASHES_REVMAP.get(len(checksum))
|
||||
if form is None:
|
||||
# Shouldn't happen, an invalid checksum length should be caught
|
||||
# before we get here. But in the event this gets through, don't let
|
||||
# it cause any trouble, and just return True.
|
||||
return True
|
||||
try:
|
||||
return salt.utils.get_hash(path, form=form) != checksum
|
||||
except (IOError, OSError, ValueError):
|
||||
# Again, shouldn't happen, but don't let invalid input/permissions
|
||||
# in the call to get_hash blow this up.
|
||||
return True
|
||||
|
||||
# Cache the file. Note that this will not actually download the file if
|
||||
# either of the following is true:
|
||||
# 1. source is a salt:// URL and the fileserver determines that the hash
|
||||
# of the minion's copy matches that of the fileserver.
|
||||
# 2. File is remote (http(s), ftp, etc.) and the specified source_hash
|
||||
# matches the cached copy.
|
||||
# Remote, non salt:// sources _will_ download if a copy of the file was
|
||||
# not already present in the minion cache.
|
||||
if _try_cache(local_copy, source_sum.get('hsum')):
|
||||
# The _try_cache helper is obsolete in the develop branch. Once merged
|
||||
# forward, remove the helper as well as this if statement, and dedent
|
||||
# the below block.
|
||||
try:
|
||||
local_copy = __salt__['cp.cache_file'](
|
||||
name,
|
||||
saltenv=saltenv,
|
||||
source_hash=source_sum.get('hsum'))
|
||||
except Exception as exc:
|
||||
ret['comment'] = exc.__str__()
|
||||
return ret
|
||||
|
||||
if not local_copy:
|
||||
ret['comment'] = (
|
||||
'Failed to cache {0}, check minion log for more '
|
||||
'information'.format(name)
|
||||
)
|
||||
return ret
|
||||
|
||||
post_hash = __salt__['file.get_hash'](
|
||||
local_copy,
|
||||
source_sum.get('hash_type', __opts__['hash_type']))
|
||||
|
||||
if pre_hash != post_hash:
|
||||
ret['changes']['hash'] = {'old': pre_hash, 'new': post_hash}
|
||||
|
||||
# Check the hash, if we're enforcing one. Note that this will be the first
|
||||
# hash check if the file was not previously cached, and the 2nd hash check
|
||||
# if it was cached and the
|
||||
if not skip_verify and source_sum:
|
||||
if post_hash == source_sum['hsum']:
|
||||
ret['result'] = True
|
||||
ret['comment'] = (
|
||||
'File is already cached to {0} with hash {1}'.format(
|
||||
local_copy, post_hash
|
||||
)
|
||||
)
|
||||
else:
|
||||
ret['comment'] = (
|
||||
'File is cached to {0}, but the hash ({1}) does not match '
|
||||
'the specified hash ({2})'.format(
|
||||
local_copy, post_hash, source_sum['hsum']
|
||||
)
|
||||
)
|
||||
return ret
|
||||
|
||||
# We're not enforcing a hash, and we already know that the file was
|
||||
# successfully cached, so we know the state was successful.
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'File is cached to {0}'.format(local_copy)
|
||||
return ret
|
||||
|
||||
|
||||
def not_cached(name, saltenv='base'):
|
||||
'''
|
||||
.. versionadded:: 2017.7.3
|
||||
|
||||
Ensures that a file is saved to the minion's cache. This state is primarily
|
||||
invoked by other states to ensure that we do not re-download a source file
|
||||
if we do not need to.
|
||||
|
||||
name
|
||||
The URL of the file to be cached. To cache a file from an environment
|
||||
other than ``base``, either use the ``saltenv`` argument or include the
|
||||
saltenv in the URL (e.g. ``salt://path/to/file.conf?saltenv=dev``).
|
||||
|
||||
.. note::
|
||||
A list of URLs is not supported, this must be a single URL. If a
|
||||
local file is passed here, the state will take no action.
|
||||
|
||||
saltenv
|
||||
Used to specify the environment from which to download a file from the
|
||||
Salt fileserver (i.e. those with ``salt://`` URL).
|
||||
'''
|
||||
ret = {'changes': {},
|
||||
'comment': '',
|
||||
'name': name,
|
||||
'result': False}
|
||||
|
||||
try:
|
||||
parsed = _urlparse(name)
|
||||
except Exception:
|
||||
ret['comment'] = 'Only URLs or local file paths are valid input'
|
||||
return ret
|
||||
else:
|
||||
if parsed.scheme in salt.utils.files.LOCAL_PROTOS:
|
||||
full_path = os.path.realpath(os.path.expanduser(parsed.path))
|
||||
ret['result'] = True
|
||||
ret['comment'] = (
|
||||
'File {0} is a local path, no action taken'.format(
|
||||
full_path
|
||||
)
|
||||
)
|
||||
return ret
|
||||
|
||||
local_copy = __salt__['cp.is_cached'](name, saltenv=saltenv)
|
||||
|
||||
if local_copy:
|
||||
try:
|
||||
os.remove(local_copy)
|
||||
except Exception as exc:
|
||||
ret['comment'] = 'Failed to delete {0}: {1}'.format(
|
||||
local_copy, exc.__str__()
|
||||
)
|
||||
else:
|
||||
ret['result'] = True
|
||||
ret['changes']['deleted'] = True
|
||||
ret['comment'] = '{0} was deleted'.format(local_copy)
|
||||
else:
|
||||
ret['result'] = True
|
||||
ret['comment'] = '{0} is not cached'.format(name)
|
||||
return ret
|
||||
|
@ -2,6 +2,8 @@
|
||||
'''
|
||||
Linux File Access Control Lists
|
||||
|
||||
The Linux ACL state module requires the `getfacl` and `setfacl` binaries.
|
||||
|
||||
Ensure a Linux ACL is present
|
||||
|
||||
.. code-block:: yaml
|
||||
@ -50,7 +52,7 @@ def __virtual__():
|
||||
if salt.utils.path.which('getfacl') and salt.utils.path.which('setfacl'):
|
||||
return __virtualname__
|
||||
|
||||
return False
|
||||
return False, 'The linux_acl state cannot be loaded: the getfacl or setfacl binary is not in the path.'
|
||||
|
||||
|
||||
def present(name, acl_type, acl_name='', perms='', recurse=False):
|
||||
@ -85,11 +87,12 @@ def present(name, acl_type, acl_name='', perms='', recurse=False):
|
||||
# applied to the user/group that owns the file, e.g.,
|
||||
# default:group::rwx would be listed as default:group:root:rwx
|
||||
# In this case, if acl_name is empty, we really want to search for root
|
||||
# but still uses '' for other
|
||||
|
||||
# We search through the dictionary getfacl returns for the owner of the
|
||||
# file if acl_name is empty.
|
||||
if acl_name == '':
|
||||
_search_name = __current_perms[name].get('comment').get(_acl_type)
|
||||
_search_name = __current_perms[name].get('comment').get(_acl_type, '')
|
||||
else:
|
||||
_search_name = acl_name
|
||||
|
||||
@ -187,11 +190,12 @@ def absent(name, acl_type, acl_name='', perms='', recurse=False):
|
||||
# applied to the user/group that owns the file, e.g.,
|
||||
# default:group::rwx would be listed as default:group:root:rwx
|
||||
# In this case, if acl_name is empty, we really want to search for root
|
||||
# but still uses '' for other
|
||||
|
||||
# We search through the dictionary getfacl returns for the owner of the
|
||||
# file if acl_name is empty.
|
||||
if acl_name == '':
|
||||
_search_name = __current_perms[name].get('comment').get(_acl_type)
|
||||
_search_name = __current_perms[name].get('comment').get(_acl_type, '')
|
||||
else:
|
||||
_search_name = acl_name
|
||||
|
||||
|
@ -25,9 +25,6 @@ import logging
|
||||
# Import salt libs
|
||||
import salt.utils.path
|
||||
|
||||
# Import 3rd-party libs
|
||||
from salt.ext import six
|
||||
|
||||
# Set up logger
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@ -88,69 +85,127 @@ def present(name,
|
||||
|
||||
# Device exists
|
||||
raids = __salt__['raid.list']()
|
||||
if raids.get(name):
|
||||
ret['comment'] = 'Raid {0} already present'.format(name)
|
||||
return ret
|
||||
present = raids.get(name)
|
||||
|
||||
# Decide whether to create or assemble
|
||||
can_assemble = {}
|
||||
for dev in devices:
|
||||
# mdadm -E exits with 0 iff all devices given are part of an array
|
||||
cmd = 'mdadm -E {0}'.format(dev)
|
||||
can_assemble[dev] = __salt__['cmd.retcode'](cmd) == 0
|
||||
missing = []
|
||||
uuid_dict = {}
|
||||
new_devices = []
|
||||
|
||||
if True in six.itervalues(can_assemble) and False in six.itervalues(can_assemble):
|
||||
in_raid = sorted([x[0] for x in six.iteritems(can_assemble) if x[1]])
|
||||
not_in_raid = sorted([x[0] for x in six.iteritems(can_assemble) if not x[1]])
|
||||
ret['comment'] = 'Devices are a mix of RAID constituents ({0}) and '\
|
||||
'non-RAID-constituents({1}).'.format(in_raid, not_in_raid)
|
||||
for dev in devices:
|
||||
if dev == 'missing' or not __salt__['file.access'](dev, 'f'):
|
||||
missing.append(dev)
|
||||
continue
|
||||
superblock = __salt__['raid.examine'](dev)
|
||||
|
||||
if 'MD_UUID' in superblock:
|
||||
uuid = superblock['MD_UUID']
|
||||
if uuid not in uuid_dict:
|
||||
uuid_dict[uuid] = []
|
||||
uuid_dict[uuid].append(dev)
|
||||
else:
|
||||
new_devices.append(dev)
|
||||
|
||||
if len(uuid_dict) > 1:
|
||||
ret['comment'] = 'Devices are a mix of RAID constituents with multiple MD_UUIDs: {0}.'.format(
|
||||
sorted(uuid_dict.keys()))
|
||||
ret['result'] = False
|
||||
return ret
|
||||
elif next(six.itervalues(can_assemble)):
|
||||
elif len(uuid_dict) == 1:
|
||||
uuid = list(uuid_dict.keys())[0]
|
||||
if present and present['uuid'] != uuid:
|
||||
ret['comment'] = 'Devices MD_UUIDs: {0} differs from present RAID uuid {1}.'.format(uuid, present['uuid'])
|
||||
ret['result'] = False
|
||||
return ret
|
||||
|
||||
devices_with_superblock = uuid_dict[uuid]
|
||||
else:
|
||||
devices_with_superblock = []
|
||||
|
||||
if present:
|
||||
do_assemble = False
|
||||
do_create = False
|
||||
elif len(devices_with_superblock) > 0:
|
||||
do_assemble = True
|
||||
do_create = False
|
||||
verb = 'assembled'
|
||||
else:
|
||||
if len(new_devices) == 0:
|
||||
ret['comment'] = 'All devices are missing: {0}.'.format(missing)
|
||||
ret['result'] = False
|
||||
return ret
|
||||
do_assemble = False
|
||||
do_create = True
|
||||
verb = 'created'
|
||||
|
||||
# If running with test use the test_mode with create or assemble
|
||||
if __opts__['test']:
|
||||
if do_assemble:
|
||||
res = __salt__['raid.assemble'](name,
|
||||
devices,
|
||||
devices_with_superblock,
|
||||
test_mode=True,
|
||||
**kwargs)
|
||||
else:
|
||||
elif do_create:
|
||||
res = __salt__['raid.create'](name,
|
||||
level,
|
||||
devices,
|
||||
new_devices + ['missing'] * len(missing),
|
||||
test_mode=True,
|
||||
**kwargs)
|
||||
ret['comment'] = 'Raid will be {0} with: {1}'.format(verb, res)
|
||||
ret['result'] = None
|
||||
|
||||
if present:
|
||||
ret['comment'] = 'Raid {0} already present.'.format(name)
|
||||
|
||||
if do_assemble or do_create:
|
||||
ret['comment'] = 'Raid will be {0} with: {1}'.format(verb, res)
|
||||
ret['result'] = None
|
||||
|
||||
if (do_assemble or present) and len(new_devices) > 0:
|
||||
ret['comment'] += ' New devices will be added: {0}'.format(new_devices)
|
||||
ret['result'] = None
|
||||
|
||||
if len(missing) > 0:
|
||||
ret['comment'] += ' Missing devices: {0}'.format(missing)
|
||||
|
||||
return ret
|
||||
|
||||
# Attempt to create or assemble the array
|
||||
if do_assemble:
|
||||
__salt__['raid.assemble'](name,
|
||||
devices,
|
||||
devices_with_superblock,
|
||||
**kwargs)
|
||||
else:
|
||||
elif do_create:
|
||||
__salt__['raid.create'](name,
|
||||
level,
|
||||
devices,
|
||||
new_devices + ['missing'] * len(missing),
|
||||
**kwargs)
|
||||
|
||||
raids = __salt__['raid.list']()
|
||||
changes = raids.get(name)
|
||||
if changes:
|
||||
ret['comment'] = 'Raid {0} {1}.'.format(name, verb)
|
||||
ret['changes'] = changes
|
||||
# Saving config
|
||||
__salt__['raid.save_config']()
|
||||
if not present:
|
||||
raids = __salt__['raid.list']()
|
||||
changes = raids.get(name)
|
||||
if changes:
|
||||
ret['comment'] = 'Raid {0} {1}.'.format(name, verb)
|
||||
ret['changes'] = changes
|
||||
# Saving config
|
||||
__salt__['raid.save_config']()
|
||||
else:
|
||||
ret['comment'] = 'Raid {0} failed to be {1}.'.format(name, verb)
|
||||
ret['result'] = False
|
||||
else:
|
||||
ret['comment'] = 'Raid {0} failed to be {1}.'.format(name, verb)
|
||||
ret['result'] = False
|
||||
ret['comment'] = 'Raid {0} already present.'.format(name)
|
||||
|
||||
if (do_assemble or present) and len(new_devices) > 0 and ret['result']:
|
||||
for d in new_devices:
|
||||
res = __salt__['raid.add'](name, d)
|
||||
if not res:
|
||||
ret['comment'] += ' Unable to add {0} to {1}.\n'.format(d, name)
|
||||
ret['result'] = False
|
||||
else:
|
||||
ret['comment'] += ' Added new device {0} to {1}.\n'.format(d, name)
|
||||
if ret['result']:
|
||||
ret['changes']['added'] = new_devices
|
||||
|
||||
if len(missing) > 0:
|
||||
ret['comment'] += ' Missing devices: {0}'.format(missing)
|
||||
|
||||
return ret
|
||||
|
||||
|
@ -94,6 +94,24 @@ def __virtual__():
|
||||
return 'panos.commit' in __salt__
|
||||
|
||||
|
||||
def _build_members(members, anycheck=False):
|
||||
'''
|
||||
Builds a member formatted string for XML operation.
|
||||
|
||||
'''
|
||||
if isinstance(members, list):
|
||||
|
||||
# This check will strip down members to a single any statement
|
||||
if anycheck and 'any' in members:
|
||||
return "<member>any</member>"
|
||||
response = ""
|
||||
for m in members:
|
||||
response += "<member>{0}</member>".format(m)
|
||||
return response
|
||||
else:
|
||||
return "<member>{0}</member>".format(members)
|
||||
|
||||
|
||||
def _default_ret(name):
|
||||
'''
|
||||
Set the default response values.
|
||||
@ -109,6 +127,135 @@ def _default_ret(name):
|
||||
return ret
|
||||
|
||||
|
||||
def _edit_config(xpath, element):
|
||||
'''
|
||||
Sends an edit request to the device.
|
||||
|
||||
'''
|
||||
query = {'type': 'config',
|
||||
'action': 'edit',
|
||||
'xpath': xpath,
|
||||
'element': element}
|
||||
|
||||
response = __proxy__['panos.call'](query)
|
||||
|
||||
return _validate_response(response)
|
||||
|
||||
|
||||
def _get_config(xpath):
|
||||
'''
|
||||
Retrieves an xpath from the device.
|
||||
|
||||
'''
|
||||
query = {'type': 'config',
|
||||
'action': 'get',
|
||||
'xpath': xpath}
|
||||
|
||||
response = __proxy__['panos.call'](query)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
def _move_after(xpath, target):
|
||||
'''
|
||||
Moves an xpath to the after of its section.
|
||||
|
||||
'''
|
||||
query = {'type': 'config',
|
||||
'action': 'move',
|
||||
'xpath': xpath,
|
||||
'where': 'after',
|
||||
'dst': target}
|
||||
|
||||
response = __proxy__['panos.call'](query)
|
||||
|
||||
return _validate_response(response)
|
||||
|
||||
|
||||
def _move_before(xpath, target):
|
||||
'''
|
||||
Moves an xpath to the bottom of its section.
|
||||
|
||||
'''
|
||||
query = {'type': 'config',
|
||||
'action': 'move',
|
||||
'xpath': xpath,
|
||||
'where': 'before',
|
||||
'dst': target}
|
||||
|
||||
response = __proxy__['panos.call'](query)
|
||||
|
||||
return _validate_response(response)
|
||||
|
||||
|
||||
def _move_bottom(xpath):
|
||||
'''
|
||||
Moves an xpath to the bottom of its section.
|
||||
|
||||
'''
|
||||
query = {'type': 'config',
|
||||
'action': 'move',
|
||||
'xpath': xpath,
|
||||
'where': 'bottom'}
|
||||
|
||||
response = __proxy__['panos.call'](query)
|
||||
|
||||
return _validate_response(response)
|
||||
|
||||
|
||||
def _move_top(xpath):
|
||||
'''
|
||||
Moves an xpath to the top of its section.
|
||||
|
||||
'''
|
||||
query = {'type': 'config',
|
||||
'action': 'move',
|
||||
'xpath': xpath,
|
||||
'where': 'top'}
|
||||
|
||||
response = __proxy__['panos.call'](query)
|
||||
|
||||
return _validate_response(response)
|
||||
|
||||
|
||||
def _set_config(xpath, element):
|
||||
'''
|
||||
Sends a set request to the device.
|
||||
|
||||
'''
|
||||
query = {'type': 'config',
|
||||
'action': 'set',
|
||||
'xpath': xpath,
|
||||
'element': element}
|
||||
|
||||
response = __proxy__['panos.call'](query)
|
||||
|
||||
return _validate_response(response)
|
||||
|
||||
|
||||
def _validate_response(response):
|
||||
'''
|
||||
Validates a response from a Palo Alto device. Used to verify success of commands.
|
||||
|
||||
'''
|
||||
if not response:
|
||||
return False, "Error during move configuration. Verify connectivity to device."
|
||||
elif 'msg' in response:
|
||||
if response['msg'] == 'command succeeded':
|
||||
return True, response['msg']
|
||||
else:
|
||||
return False, response['msg']
|
||||
elif 'line' in response:
|
||||
if response['line'] == 'already at the top':
|
||||
return True, response['line']
|
||||
elif response['line'] == 'already at the bottom':
|
||||
return True, response['line']
|
||||
else:
|
||||
return False, response['line']
|
||||
else:
|
||||
return False, "Error during move configuration. Verify connectivity to device."
|
||||
|
||||
|
||||
def add_config_lock(name):
|
||||
'''
|
||||
Prevent other users from changing configuration until the lock is released.
|
||||
@ -186,7 +333,7 @@ def clone_config(name, xpath=None, newname=None, commit=False):
|
||||
return ret
|
||||
|
||||
|
||||
def commit(name):
|
||||
def commit_config(name):
|
||||
'''
|
||||
Commits the candidate configuration to the running configuration.
|
||||
|
||||
@ -197,7 +344,7 @@ def commit(name):
|
||||
.. code-block:: yaml
|
||||
|
||||
panos/commit:
|
||||
panos.commit
|
||||
panos.commit_config
|
||||
|
||||
'''
|
||||
ret = _default_ret(name)
|
||||
@ -338,6 +485,8 @@ def edit_config(name, xpath=None, value=None, commit=False):
|
||||
You can replace an existing object hierarchy at a specified location in the configuration with a new value. Use
|
||||
the xpath parameter to specify the location of the object, including the node to be replaced.
|
||||
|
||||
This is the recommended state to enforce configurations on a xpath.
|
||||
|
||||
name: The name of the module function to execute.
|
||||
|
||||
xpath(str): The XPATH of the configuration API tree to control.
|
||||
@ -359,24 +508,17 @@ def edit_config(name, xpath=None, value=None, commit=False):
|
||||
'''
|
||||
ret = _default_ret(name)
|
||||
|
||||
if not xpath:
|
||||
return ret
|
||||
|
||||
if not value:
|
||||
return ret
|
||||
|
||||
query = {'type': 'config',
|
||||
'action': 'edit',
|
||||
'xpath': xpath,
|
||||
'element': value}
|
||||
|
||||
response = __proxy__['panos.call'](query)
|
||||
result, msg = _edit_config(xpath, value)
|
||||
|
||||
ret.update({
|
||||
'changes': response,
|
||||
'result': True
|
||||
'comment': msg,
|
||||
'result': result
|
||||
})
|
||||
|
||||
# Ensure we do not commit after a failed action
|
||||
if not result:
|
||||
return ret
|
||||
|
||||
if commit is True:
|
||||
ret.update({
|
||||
'commit': __salt__['panos.commit'](),
|
||||
@ -404,7 +546,8 @@ def move_config(name, xpath=None, where=None, dst=None, commit=False):
|
||||
dst(str): Optional. Specifies the destination to utilize for a move action. This is ignored for the top
|
||||
or bottom action.
|
||||
|
||||
commit(bool): If true the firewall will commit the changes, if false do not commit changes.
|
||||
commit(bool): If true the firewall will commit the changes, if false do not commit changes. If the operation is
|
||||
not successful, it will not commit.
|
||||
|
||||
SLS Example:
|
||||
|
||||
@ -433,35 +576,21 @@ def move_config(name, xpath=None, where=None, dst=None, commit=False):
|
||||
return ret
|
||||
|
||||
if where == 'after':
|
||||
query = {'type': 'config',
|
||||
'action': 'move',
|
||||
'xpath': xpath,
|
||||
'where': 'after',
|
||||
'dst': dst}
|
||||
result, msg = _move_after(xpath, dst)
|
||||
elif where == 'before':
|
||||
query = {'type': 'config',
|
||||
'action': 'move',
|
||||
'xpath': xpath,
|
||||
'where': 'before',
|
||||
'dst': dst}
|
||||
result, msg = _move_before(xpath, dst)
|
||||
elif where == 'top':
|
||||
query = {'type': 'config',
|
||||
'action': 'move',
|
||||
'xpath': xpath,
|
||||
'where': 'top'}
|
||||
result, msg = _move_top(xpath)
|
||||
elif where == 'bottom':
|
||||
query = {'type': 'config',
|
||||
'action': 'move',
|
||||
'xpath': xpath,
|
||||
'where': 'bottom'}
|
||||
|
||||
response = __proxy__['panos.call'](query)
|
||||
result, msg = _move_bottom(xpath)
|
||||
|
||||
ret.update({
|
||||
'changes': response,
|
||||
'result': True
|
||||
'result': result
|
||||
})
|
||||
|
||||
if not result:
|
||||
return ret
|
||||
|
||||
if commit is True:
|
||||
ret.update({
|
||||
'commit': __salt__['panos.commit'](),
|
||||
@ -547,6 +676,350 @@ def rename_config(name, xpath=None, newname=None, commit=False):
|
||||
return ret
|
||||
|
||||
|
||||
def security_rule_exists(name,
|
||||
rulename=None,
|
||||
vsys='1',
|
||||
action=None,
|
||||
disabled=None,
|
||||
sourcezone=None,
|
||||
destinationzone=None,
|
||||
source=None,
|
||||
destination=None,
|
||||
application=None,
|
||||
service=None,
|
||||
description=None,
|
||||
logsetting=None,
|
||||
logstart=None,
|
||||
logend=None,
|
||||
negatesource=None,
|
||||
negatedestination=None,
|
||||
profilegroup=None,
|
||||
datafilter=None,
|
||||
fileblock=None,
|
||||
spyware=None,
|
||||
urlfilter=None,
|
||||
virus=None,
|
||||
vulnerability=None,
|
||||
wildfire=None,
|
||||
move=None,
|
||||
movetarget=None,
|
||||
commit=False):
|
||||
'''
|
||||
Ensures that a security rule exists on the device. Also, ensure that all configurations are set appropriately.
|
||||
|
||||
This method will create the rule if it does not exist. If the rule does exist, it will ensure that the
|
||||
configurations are set appropriately.
|
||||
|
||||
If the rule does not exist and is created, any value that is not provided will be provided as the default.
|
||||
The action, to, from, source, destination, application, and service fields are mandatory and must be provided.
|
||||
|
||||
This will enforce the exact match of the rule. For example, if the rule is currently configured with the log-end
|
||||
option, but this option is not specified in the state method, it will be removed and reset to the system default.
|
||||
|
||||
It is strongly recommended to specify all options to ensure proper operation.
|
||||
|
||||
When defining the profile group settings, the device can only support either a profile group or individual settings.
|
||||
If both are specified, the profile group will be preferred and the individual settings are ignored. If neither are
|
||||
specified, the value will be set to system default of none.
|
||||
|
||||
name: The name of the module function to execute.
|
||||
|
||||
rulename(str): The name of the security rule. The name is case-sensitive and can have up to 31 characters, which
|
||||
can be letters, numbers, spaces, hyphens, and underscores. The name must be unique on a firewall and, on Panorama,
|
||||
unique within its device group and any ancestor or descendant device groups.
|
||||
|
||||
vsys(str): The string representation of the VSYS ID. Defaults to VSYS 1.
|
||||
|
||||
action(str): The action that the security rule will enforce. Valid options are: allow, deny, drop, reset-client,
|
||||
reset-server, reset-both.
|
||||
|
||||
disabled(bool): Controls if the rule is disabled. Set 'True' to disable and 'False' to enable.
|
||||
|
||||
sourcezone(str, list): The source zone(s). The value 'any' will match all zones.
|
||||
|
||||
destinationzone(str, list): The destination zone(s). The value 'any' will match all zones.
|
||||
|
||||
source(str, list): The source address(es). The value 'any' will match all addresses.
|
||||
|
||||
destination(str, list): The destination address(es). The value 'any' will match all addresses.
|
||||
|
||||
application(str, list): The application(s) matched. The value 'any' will match all applications.
|
||||
|
||||
service(str, list): The service(s) matched. The value 'any' will match all services. The value
|
||||
'application-default' will match based upon the application defined ports.
|
||||
|
||||
description(str): A description for the policy (up to 255 characters).
|
||||
|
||||
logsetting(str): The name of a valid log forwarding profile.
|
||||
|
||||
logstart(bool): Generates a traffic log entry for the start of a session (disabled by default).
|
||||
|
||||
logend(bool): Generates a traffic log entry for the end of a session (enabled by default).
|
||||
|
||||
negatesource(bool): Match all but the specified source addresses.
|
||||
|
||||
negatedestination(bool): Match all but the specified destination addresses.
|
||||
|
||||
profilegroup(str): A valid profile group name.
|
||||
|
||||
datafilter(str): A valid data filter profile name. Ignored with the profilegroup option set.
|
||||
|
||||
fileblock(str): A valid file blocking profile name. Ignored with the profilegroup option set.
|
||||
|
||||
spyware(str): A valid spyware profile name. Ignored with the profilegroup option set.
|
||||
|
||||
urlfilter(str): A valid URL filtering profile name. Ignored with the profilegroup option set.
|
||||
|
||||
virus(str): A valid virus profile name. Ignored with the profilegroup option set.
|
||||
|
||||
vulnerability(str): A valid vulnerability profile name. Ignored with the profilegroup option set.
|
||||
|
||||
wildfire(str): A valid vulnerability profile name. Ignored with the profilegroup option set.
|
||||
|
||||
move(str): An optional argument that ensure the rule is moved to a specific location. Valid options are 'top',
|
||||
'bottom', 'before', or 'after'. The 'before' and 'after' options require the use of the 'movetarget' argument
|
||||
to define the location of the move request.
|
||||
|
||||
movetarget(str): An optional argument that defines the target of the move operation if the move argument is
|
||||
set to 'before' or 'after'.
|
||||
|
||||
commit(bool): If true the firewall will commit the changes, if false do not commit changes.
|
||||
|
||||
SLS Example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
panos/rulebase/security/rule01:
|
||||
panos.security_rule_exists:
|
||||
- rulename: rule01
|
||||
- vsys: 1
|
||||
- action: allow
|
||||
- disabled: False
|
||||
- sourcezone: untrust
|
||||
- destinationzone: trust
|
||||
- source:
|
||||
- 10.10.10.0/24
|
||||
- 1.1.1.1
|
||||
- destination:
|
||||
- 2.2.2.2-2.2.2.4
|
||||
- application:
|
||||
- any
|
||||
- service:
|
||||
- tcp-25
|
||||
- description: My test security rule
|
||||
- logsetting: logprofile
|
||||
- logstart: False
|
||||
- logend: True
|
||||
- negatesource: False
|
||||
- negatedestination: False
|
||||
- profilegroup: myprofilegroup
|
||||
- move: top
|
||||
- commit: False
|
||||
|
||||
panos/rulebase/security/rule01:
|
||||
panos.security_rule_exists:
|
||||
- rulename: rule01
|
||||
- vsys: 1
|
||||
- action: allow
|
||||
- disabled: False
|
||||
- sourcezone: untrust
|
||||
- destinationzone: trust
|
||||
- source:
|
||||
- 10.10.10.0/24
|
||||
- 1.1.1.1
|
||||
- destination:
|
||||
- 2.2.2.2-2.2.2.4
|
||||
- application:
|
||||
- any
|
||||
- service:
|
||||
- tcp-25
|
||||
- description: My test security rule
|
||||
- logsetting: logprofile
|
||||
- logstart: False
|
||||
- logend: False
|
||||
- datafilter: foobar
|
||||
- fileblock: foobar
|
||||
- spyware: foobar
|
||||
- urlfilter: foobar
|
||||
- virus: foobar
|
||||
- vulnerability: foobar
|
||||
- wildfire: foobar
|
||||
- move: after
|
||||
- movetarget: rule02
|
||||
- commit: False
|
||||
'''
|
||||
ret = _default_ret(name)
|
||||
|
||||
if not rulename:
|
||||
return ret
|
||||
|
||||
# Check if rule currently exists
|
||||
rule = __salt__['panos.get_security_rule'](rulename, vsys)
|
||||
|
||||
# Build the rule element
|
||||
element = ""
|
||||
if sourcezone:
|
||||
element += "<from>{0}</from>".format(_build_members(sourcezone, True))
|
||||
else:
|
||||
ret.update({'comment': "The sourcezone field must be provided."})
|
||||
return ret
|
||||
|
||||
if destinationzone:
|
||||
element += "<to>{0}</to>".format(_build_members(destinationzone, True))
|
||||
else:
|
||||
ret.update({'comment': "The destinationzone field must be provided."})
|
||||
return ret
|
||||
|
||||
if source:
|
||||
element += "<source>{0}</source>".format(_build_members(source, True))
|
||||
else:
|
||||
ret.update({'comment': "The source field must be provided."})
|
||||
return
|
||||
|
||||
if destination:
|
||||
element += "<destination>{0}</destination>".format(_build_members(destination, True))
|
||||
else:
|
||||
ret.update({'comment': "The destination field must be provided."})
|
||||
return ret
|
||||
|
||||
if application:
|
||||
element += "<application>{0}</application>".format(_build_members(application, True))
|
||||
else:
|
||||
ret.update({'comment': "The application field must be provided."})
|
||||
return ret
|
||||
|
||||
if service:
|
||||
element += "<service>{0}</service>".format(_build_members(service, True))
|
||||
else:
|
||||
ret.update({'comment': "The service field must be provided."})
|
||||
return ret
|
||||
|
||||
if action:
|
||||
element += "<action>{0}</action>".format(action)
|
||||
else:
|
||||
ret.update({'comment': "The action field must be provided."})
|
||||
return ret
|
||||
|
||||
if disabled is not None:
|
||||
if disabled:
|
||||
element += "<disabled>yes</disabled>"
|
||||
else:
|
||||
element += "<disabled>no</disabled>"
|
||||
|
||||
if description:
|
||||
element += "<description>{0}</description>".format(description)
|
||||
|
||||
if logsetting:
|
||||
element += "<log-setting>{0}</log-setting>".format(logsetting)
|
||||
|
||||
if logstart is not None:
|
||||
if logstart:
|
||||
element += "<log-start>yes</log-start>"
|
||||
else:
|
||||
element += "<log-start>no</log-start>"
|
||||
|
||||
if logend is not None:
|
||||
if logend:
|
||||
element += "<log-end>yes</log-end>"
|
||||
else:
|
||||
element += "<log-end>no</log-end>"
|
||||
|
||||
if negatesource is not None:
|
||||
if negatesource:
|
||||
element += "<negate-source>yes</negate-source>"
|
||||
else:
|
||||
element += "<negate-source>no</negate-source>"
|
||||
|
||||
if negatedestination is not None:
|
||||
if negatedestination:
|
||||
element += "<negate-destination>yes</negate-destination>"
|
||||
else:
|
||||
element += "<negate-destination>no</negate-destination>"
|
||||
|
||||
# Build the profile settings
|
||||
profile_string = None
|
||||
if profilegroup:
|
||||
profile_string = "<group><member>{0}</member></group>".format(profilegroup)
|
||||
else:
|
||||
member_string = ""
|
||||
if datafilter:
|
||||
member_string += "<data-filtering><member>{0}</member></data-filtering>".format(datafilter)
|
||||
if fileblock:
|
||||
member_string += "<file-blocking><member>{0}</member></file-blocking>".format(fileblock)
|
||||
if spyware:
|
||||
member_string += "<spyware><member>{0}</member></spyware>".format(spyware)
|
||||
if urlfilter:
|
||||
member_string += "<url-filtering><member>{0}</member></url-filtering>".format(urlfilter)
|
||||
if virus:
|
||||
member_string += "<virus><member>{0}</member></virus>".format(virus)
|
||||
if vulnerability:
|
||||
member_string += "<vulnerability><member>{0}</member></vulnerability>".format(vulnerability)
|
||||
if wildfire:
|
||||
member_string += "<wildfire-analysis><member>{0}</member></wildfire-analysis>".format(wildfire)
|
||||
if member_string != "":
|
||||
profile_string = "<profiles>{0}</profiles>".format(member_string)
|
||||
|
||||
if profile_string:
|
||||
element += "<profile-setting>{0}</profile-setting>".format(profile_string)
|
||||
|
||||
full_element = "<entry name='{0}'>{1}</entry>".format(rulename, element)
|
||||
|
||||
create_rule = False
|
||||
|
||||
if 'result' in rule:
|
||||
if rule['result'] == "None":
|
||||
create_rule = True
|
||||
|
||||
if create_rule:
|
||||
xpath = "/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'vsys{0}\']/rulebase/" \
|
||||
"security/rules".format(vsys)
|
||||
|
||||
result, msg = _set_config(xpath, full_element)
|
||||
if not result:
|
||||
ret['changes']['set'] = msg
|
||||
return ret
|
||||
else:
|
||||
xpath = "/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'vsys{0}\']/rulebase/" \
|
||||
"security/rules/entry[@name=\'{1}\']".format(vsys, rulename)
|
||||
|
||||
result, msg = _edit_config(xpath, full_element)
|
||||
if not result:
|
||||
ret['changes']['edit'] = msg
|
||||
return ret
|
||||
|
||||
if move:
|
||||
movepath = "/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'vsys{0}\']/rulebase/" \
|
||||
"security/rules/entry[@name=\'{1}\']".format(vsys, rulename)
|
||||
move_result = False
|
||||
move_msg = ''
|
||||
if move == "before" and movetarget:
|
||||
move_result, move_msg = _move_before(movepath, movetarget)
|
||||
elif move == "after":
|
||||
move_result, move_msg = _move_after(movepath, movetarget)
|
||||
elif move == "top":
|
||||
move_result, move_msg = _move_top(movepath)
|
||||
elif move == "bottom":
|
||||
move_result, move_msg = _move_bottom(movepath)
|
||||
|
||||
if not move_result:
|
||||
ret['changes']['move'] = move_msg
|
||||
return ret
|
||||
|
||||
if commit is True:
|
||||
ret.update({
|
||||
'commit': __salt__['panos.commit'](),
|
||||
'comment': 'Security rule verified successfully.',
|
||||
'result': True
|
||||
})
|
||||
else:
|
||||
ret.update({
|
||||
'comment': 'Security rule verified successfully.',
|
||||
'result': True
|
||||
})
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def set_config(name, xpath=None, value=None, commit=False):
|
||||
'''
|
||||
Sets a Palo Alto XPATH to a specific value. This will always overwrite the existing value, even if it is not
|
||||
@ -576,24 +1049,17 @@ def set_config(name, xpath=None, value=None, commit=False):
|
||||
'''
|
||||
ret = _default_ret(name)
|
||||
|
||||
if not xpath:
|
||||
return ret
|
||||
|
||||
if not value:
|
||||
return ret
|
||||
|
||||
query = {'type': 'config',
|
||||
'action': 'set',
|
||||
'xpath': xpath,
|
||||
'element': value}
|
||||
|
||||
response = __proxy__['panos.call'](query)
|
||||
result, msg = _set_config(xpath, value)
|
||||
|
||||
ret.update({
|
||||
'changes': response,
|
||||
'result': True
|
||||
'comment': msg,
|
||||
'result': result
|
||||
})
|
||||
|
||||
# Ensure we do not commit after a failed action
|
||||
if not result:
|
||||
return ret
|
||||
|
||||
if commit is True:
|
||||
ret.update({
|
||||
'commit': __salt__['panos.commit'](),
|
||||
|
@ -310,17 +310,27 @@ def module_remove(name):
|
||||
|
||||
def fcontext_policy_present(name, sel_type, filetype='a', sel_user=None, sel_level=None):
|
||||
'''
|
||||
Makes sure a SELinux policy for a given filespec (name),
|
||||
filetype and SELinux context type is present.
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
name: filespec of the file or directory. Regex syntax is allowed.
|
||||
sel_type: SELinux context type. There are many.
|
||||
filetype: The SELinux filetype specification.
|
||||
Use one of [a, f, d, c, b, s, l, p].
|
||||
See also `man semanage-fcontext`.
|
||||
Defaults to 'a' (all files)
|
||||
sel_user: The SELinux user.
|
||||
sel_level: The SELinux MLS range
|
||||
Makes sure a SELinux policy for a given filespec (name), filetype
|
||||
and SELinux context type is present.
|
||||
|
||||
name
|
||||
filespec of the file or directory. Regex syntax is allowed.
|
||||
|
||||
sel_type
|
||||
SELinux context type. There are many.
|
||||
|
||||
filetype
|
||||
The SELinux filetype specification. Use one of [a, f, d, c, b,
|
||||
s, l, p]. See also `man semanage-fcontext`. Defaults to 'a'
|
||||
(all files).
|
||||
|
||||
sel_user
|
||||
The SELinux user.
|
||||
|
||||
sel_level
|
||||
The SELinux MLS range.
|
||||
'''
|
||||
ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''}
|
||||
new_state = {}
|
||||
@ -383,17 +393,27 @@ def fcontext_policy_present(name, sel_type, filetype='a', sel_user=None, sel_lev
|
||||
|
||||
def fcontext_policy_absent(name, filetype='a', sel_type=None, sel_user=None, sel_level=None):
|
||||
'''
|
||||
Makes sure an SELinux file context policy for a given filespec (name),
|
||||
filetype and SELinux context type is absent.
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
name: filespec of the file or directory. Regex syntax is allowed.
|
||||
filetype: The SELinux filetype specification.
|
||||
Use one of [a, f, d, c, b, s, l, p].
|
||||
See also `man semanage-fcontext`.
|
||||
Defaults to 'a' (all files).
|
||||
sel_type: The SELinux context type. There are many.
|
||||
sel_user: The SELinux user.
|
||||
sel_level: The SELinux MLS range
|
||||
Makes sure an SELinux file context policy for a given filespec
|
||||
(name), filetype and SELinux context type is absent.
|
||||
|
||||
name
|
||||
filespec of the file or directory. Regex syntax is allowed.
|
||||
|
||||
filetype
|
||||
The SELinux filetype specification. Use one of [a, f, d, c, b,
|
||||
s, l, p]. See also `man semanage-fcontext`. Defaults to 'a'
|
||||
(all files).
|
||||
|
||||
sel_type
|
||||
The SELinux context type. There are many.
|
||||
|
||||
sel_user
|
||||
The SELinux user.
|
||||
|
||||
sel_level
|
||||
The SELinux MLS range.
|
||||
'''
|
||||
ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''}
|
||||
new_state = {}
|
||||
@ -433,7 +453,10 @@ def fcontext_policy_absent(name, filetype='a', sel_type=None, sel_user=None, sel
|
||||
|
||||
def fcontext_policy_applied(name, recursive=False):
|
||||
'''
|
||||
Checks and makes sure the SELinux policies for a given filespec are applied.
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
Checks and makes sure the SELinux policies for a given filespec are
|
||||
applied.
|
||||
'''
|
||||
ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user