mirror of
https://github.com/valitydev/salt.git
synced 2024-11-07 17:09:03 +00:00
Merge branch 'develop' into bugs/43003_group_syndic_develop
This commit is contained in:
commit
f8c428f034
60
.github/CODEOWNERS
vendored
Normal file
60
.github/CODEOWNERS
vendored
Normal file
@ -0,0 +1,60 @@
|
||||
# SALTSTACK CODE OWNERS
|
||||
|
||||
# See https://help.github.com/articles/about-codeowners/
|
||||
# for more info about CODEOWNERS file
|
||||
|
||||
# Lines starting with '#' are comments.
|
||||
# Each line is a file pattern followed by one or more owners.
|
||||
|
||||
# See https://help.github.com/articles/about-codeowners/
|
||||
# for more info about the CODEOWNERS file
|
||||
|
||||
# Team Boto
|
||||
salt/**/*boto* @saltstack/team-boto
|
||||
|
||||
# Team Core
|
||||
salt/auth/ @saltstack/team-core
|
||||
salt/cache/ @saltstack/team-core
|
||||
salt/cli/ @saltstack/team-core
|
||||
salt/client/* @saltstack/team-core
|
||||
salt/config/* @saltstack/team-core
|
||||
salt/daemons/ @saltstack/team-core
|
||||
salt/pillar/ @saltstack/team-core
|
||||
salt/loader.py @saltstack/team-core
|
||||
salt/payload.py @saltstack/team-core
|
||||
salt/**/master* @saltstack/team-core
|
||||
salt/**/minion* @saltstack/team-core
|
||||
|
||||
# Team Cloud
|
||||
salt/cloud/ @saltstack/team-cloud
|
||||
salt/utils/openstack/ @saltstack/team-cloud
|
||||
salt/utils/aws.py @saltstack/team-cloud
|
||||
salt/**/*cloud* @saltstack/team-cloud
|
||||
|
||||
# Team NetAPI
|
||||
salt/cli/api.py @saltstack/team-netapi
|
||||
salt/client/netapi.py @saltstack/team-netapi
|
||||
salt/netapi/ @saltstack/team-netapi
|
||||
|
||||
# Team Network
|
||||
salt/proxy/ @saltstack/team-proxy
|
||||
|
||||
# Team SPM
|
||||
salt/cli/spm.py @saltstack/team-spm
|
||||
salt/spm/ @saltstack/team-spm
|
||||
|
||||
# Team SSH
|
||||
salt/cli/ssh.py @saltstack/team-ssh
|
||||
salt/client/ssh/ @saltstack/team-ssh
|
||||
salt/runners/ssh.py @saltstack/team-ssh
|
||||
salt/**/thin.py @saltstack/team-ssh
|
||||
|
||||
# Team State
|
||||
salt/state.py @saltstack/team-state
|
||||
|
||||
# Team Transport
|
||||
salt/transport/ @saltstack/team-transport
|
||||
salt/utils/zeromq.py @saltstack/team-transport
|
||||
|
||||
# Team Windows
|
||||
salt/**/*win* @saltstack/team-windows
|
4
.github/stale.yml
vendored
4
.github/stale.yml
vendored
@ -1,8 +1,8 @@
|
||||
# Probot Stale configuration file
|
||||
|
||||
# Number of days of inactivity before an issue becomes stale
|
||||
# 1115 is approximately 3 years and 1 month
|
||||
daysUntilStale: 1115
|
||||
# 1000 is approximately 2 years and 9 months
|
||||
daysUntilStale: 1000
|
||||
|
||||
# Number of days of inactivity before a stale issue is closed
|
||||
daysUntilClose: 7
|
||||
|
@ -4,9 +4,14 @@
|
||||
"name": "ryan-lane",
|
||||
"files": ["salt/**/*boto*.py"],
|
||||
"skipTeamPrs": false
|
||||
},
|
||||
{
|
||||
"name": "tkwilliams",
|
||||
"files": ["salt/**/*boto*.py"],
|
||||
"skipTeamPrs": false
|
||||
}
|
||||
],
|
||||
"skipTitle": "Merge forward",
|
||||
"userBlacklist": ["cvrebert", "markusgattol", "olliewalsh"]
|
||||
"userBlacklist": ["cvrebert", "markusgattol", "olliewalsh", "basepi"]
|
||||
}
|
||||
|
||||
|
12
README.rst
12
README.rst
@ -67,10 +67,11 @@ Engage SaltStack
|
||||
|
||||
`SaltConf`_, **User Groups and Meetups** - SaltStack has a vibrant and `global
|
||||
community`_ of customers, users, developers and enthusiasts. Connect with other
|
||||
Salted folks in your area of the world, or join `SaltConf16`_, the SaltStack
|
||||
annual user conference, April 19-21 in Salt Lake City. Please let us know if
|
||||
you would like to start a user group or if we should add your existing
|
||||
SaltStack user group to this list by emailing: info@saltstack.com
|
||||
Salted folks in your area of the world, or join `SaltConf`_, the SaltStack
|
||||
annual user conference held in Salt Lake City. Please visit the `SaltConf`_ site
|
||||
for details of our next conference. Also, please let us know if you would like
|
||||
to start a user group or if we should add your existing SaltStack user group to
|
||||
this list by emailing: info@saltstack.com
|
||||
|
||||
**SaltStack Training** - Get access to proprietary `SaltStack education
|
||||
offerings`_ through instructor-led training offered on-site, virtually or at
|
||||
@ -89,9 +90,8 @@ services`_ offerings.
|
||||
* LinkedIn Group - `<https://www.linkedin.com/groups/4877160>`_
|
||||
* Google+ - `<https://plus.google.com/b/112856352920437801867/+SaltStackInc/posts>`_
|
||||
|
||||
.. _SaltConf: http://www.youtube.com/user/saltstack
|
||||
.. _global community: http://www.meetup.com/pro/saltstack/
|
||||
.. _SaltConf16: http://saltconf.com/
|
||||
.. _SaltConf: http://saltconf.com/
|
||||
.. _SaltStack education offerings: http://saltstack.com/training/
|
||||
.. _SaltStack Certified Engineer (SSCE): http://saltstack.com/certification/
|
||||
.. _SaltStack professional services: http://saltstack.com/services/
|
||||
|
@ -3,7 +3,7 @@
|
||||
# directory is identical.
|
||||
|
||||
#my-digitalocean-config:
|
||||
# driver: digital_ocean
|
||||
# driver: digitalocean
|
||||
# client_key: wFGEwgregeqw3435gDger
|
||||
# api_key: GDE43t43REGTrkilg43934t34qT43t4dgegerGEgg
|
||||
# location: New York 1
|
||||
|
@ -1,5 +1,5 @@
|
||||
#my-digitalocean-config:
|
||||
# driver: digital_ocean
|
||||
# driver: digitalocean
|
||||
# client_key: wFGEwgregeqw3435gDger
|
||||
# api_key: GDE43t43REGTrkilg43934t34qT43t4dgegerGEgg
|
||||
# location: New York 1
|
||||
|
12
conf/master
12
conf/master
@ -59,15 +59,14 @@
|
||||
|
||||
# Directory for custom modules. This directory can contain subdirectories for
|
||||
# each of Salt's module types such as "runners", "output", "wheel", "modules",
|
||||
# "states", "returners", etc.
|
||||
#extension_modules: <no default>
|
||||
# "states", "returners", "engines", "utils", etc.
|
||||
#extension_modules: /var/cache/salt/master/extmods
|
||||
|
||||
# Directory for custom modules. This directory can contain subdirectories for
|
||||
# each of Salt's module types such as "runners", "output", "wheel", "modules",
|
||||
# "states", "returners", "engines", etc.
|
||||
# "states", "returners", "engines", "utils", etc.
|
||||
# Like 'extension_modules' but can take an array of paths
|
||||
#module_dirs: <no default>
|
||||
# - /var/cache/salt/minion/extmods
|
||||
#module_dirs: []
|
||||
|
||||
# Verify and set permissions on configuration directories at startup:
|
||||
#verify_env: True
|
||||
@ -533,6 +532,9 @@
|
||||
# Add any additional locations to look for master runners:
|
||||
#runner_dirs: []
|
||||
|
||||
# Add any additional locations to look for master utils:
|
||||
#utils_dirs: []
|
||||
|
||||
# Enable Cython for master side modules:
|
||||
#cython_enable: False
|
||||
|
||||
|
@ -373,7 +373,7 @@
|
||||
# interface: eth0
|
||||
# cidr: '10.0.0.0/8'
|
||||
|
||||
# The number of seconds a mine update runs.
|
||||
# The number of minutes between mine updates.
|
||||
#mine_interval: 60
|
||||
|
||||
# Windows platforms lack posix IPC and must rely on slower TCP based inter-
|
||||
@ -689,6 +689,12 @@
|
||||
# for a full explanation.
|
||||
#multiprocessing: True
|
||||
|
||||
# Limit the maximum amount of processes or threads created by salt-minion.
|
||||
# This is useful to avoid resource exhaustion in case the minion receives more
|
||||
# publications than it is able to handle, as it limits the number of spawned
|
||||
# processes or threads. -1 is the default and disables the limit.
|
||||
#process_count_max: -1
|
||||
|
||||
|
||||
##### Logging settings #####
|
||||
##########################################
|
||||
|
@ -10795,6 +10795,7 @@ cmd_whitelist_glob:
|
||||
.UNINDENT
|
||||
.UNINDENT
|
||||
.SS Thread Settings
|
||||
.SS \fBmultiprocessing\fP
|
||||
.sp
|
||||
Default: \fBTrue\fP
|
||||
.sp
|
||||
|
@ -22,6 +22,7 @@ beacon modules
|
||||
load
|
||||
log
|
||||
memusage
|
||||
napalm_beacon
|
||||
network_info
|
||||
network_settings
|
||||
pkg
|
||||
|
6
doc/ref/beacons/all/salt.beacons.napalm_beacon.rst
Normal file
6
doc/ref/beacons/all/salt.beacons.napalm_beacon.rst
Normal file
@ -0,0 +1,6 @@
|
||||
==========================
|
||||
salt.beacons.napalm_beacon
|
||||
==========================
|
||||
|
||||
.. automodule:: salt.beacons.napalm_beacon
|
||||
:members:
|
@ -136,7 +136,7 @@ Query Options
|
||||
.. versionadded:: 2014.7.0
|
||||
|
||||
Display a list of configured profiles. Pass in a cloud provider to view
|
||||
the provider's associated profiles, such as ``digital_ocean``, or pass in
|
||||
the provider's associated profiles, such as ``digitalocean``, or pass in
|
||||
``all`` to list all the configured profiles.
|
||||
|
||||
|
||||
|
@ -39,6 +39,13 @@ specified target expression.
|
||||
desitination will be assumed to be a directory. Finally, recursion is now
|
||||
supported, allowing for entire directories to be copied.
|
||||
|
||||
.. versionchanged:: 2016.11.7,2017.7.2
|
||||
Reverted back to the old copy mode to preserve backward compatibility. The
|
||||
new functionality added in 2016.6.6 and 2017.7.0 is now available using the
|
||||
``-C`` or ``--chunked`` CLI arguments. Note that compression, recursive
|
||||
copying, and support for copying large files is only available in chunked
|
||||
mode.
|
||||
|
||||
Options
|
||||
=======
|
||||
|
||||
@ -56,9 +63,16 @@ Options
|
||||
.. include:: _includes/target-selection.rst
|
||||
|
||||
|
||||
.. option:: -C, --chunked
|
||||
|
||||
Use new chunked mode to copy files. This mode supports large files, recursive
|
||||
directories copying and compression.
|
||||
|
||||
.. versionadded:: 2016.11.7,2017.7.2
|
||||
|
||||
.. option:: -n, --no-compression
|
||||
|
||||
Disable gzip compression.
|
||||
Disable gzip compression in chunked mode.
|
||||
|
||||
.. versionadded:: 2016.3.7,2016.11.6,2017.7.0
|
||||
|
||||
|
@ -13,7 +13,7 @@ Full list of Salt Cloud modules
|
||||
aliyun
|
||||
azurearm
|
||||
cloudstack
|
||||
digital_ocean
|
||||
digitalocean
|
||||
dimensiondata
|
||||
ec2
|
||||
gce
|
||||
|
@ -1,6 +1,6 @@
|
||||
===============================
|
||||
salt.cloud.clouds.digital_ocean
|
||||
salt.cloud.clouds.digitalocean
|
||||
===============================
|
||||
|
||||
.. automodule:: salt.cloud.clouds.digital_ocean
|
||||
.. automodule:: salt.cloud.clouds.digitalocean
|
||||
:members:
|
@ -183,8 +183,8 @@ The directory to store the pki authentication keys.
|
||||
|
||||
Directory for custom modules. This directory can contain subdirectories for
|
||||
each of Salt's module types such as ``runners``, ``output``, ``wheel``,
|
||||
``modules``, ``states``, ``returners``, ``engines``, etc. This path is appended to
|
||||
:conf_master:`root_dir`.
|
||||
``modules``, ``states``, ``returners``, ``engines``, ``utils``, etc.
|
||||
This path is appended to :conf_master:`root_dir`.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
@ -234,6 +234,7 @@ Valid options:
|
||||
- clouds
|
||||
- tops
|
||||
- roster
|
||||
- tokens
|
||||
|
||||
.. conf_master:: module_dirs
|
||||
|
||||
@ -1730,6 +1731,22 @@ Set additional directories to search for runner modules.
|
||||
runner_dirs:
|
||||
- /var/lib/salt/runners
|
||||
|
||||
.. conf_master:: utils_dirs
|
||||
|
||||
``utils_dirs``
|
||||
---------------
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
Default: ``[]``
|
||||
|
||||
Set additional directories to search for util modules.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
utils_dirs:
|
||||
- /var/lib/salt/utils
|
||||
|
||||
.. conf_master:: cython_enable
|
||||
|
||||
``cython_enable``
|
||||
@ -3770,7 +3787,7 @@ they were created by a different master.
|
||||
Default: ``True``
|
||||
|
||||
Normally, when processing :ref:`git_pillar remotes
|
||||
<git-pillar-2015-8-0-and-later>`, if more than one repo under the same ``git``
|
||||
<git-pillar-configuration>`, if more than one repo under the same ``git``
|
||||
section in the ``ext_pillar`` configuration refers to the same pillar
|
||||
environment, then each repo in a given environment will have access to the
|
||||
other repos' files to be referenced in their top files. However, it may be
|
||||
@ -4158,7 +4175,9 @@ information.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
reactor: []
|
||||
reactor:
|
||||
- 'salt/minion/*/start':
|
||||
- salt://reactor/startup_tasks.sls
|
||||
|
||||
.. conf_master:: reactor_refresh_interval
|
||||
|
||||
|
@ -706,7 +706,7 @@ Note these can be defined in the pillar for a minion as well.
|
||||
|
||||
Default: ``60``
|
||||
|
||||
The number of seconds a mine update runs.
|
||||
The number of minutes between mine updates.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
@ -2113,6 +2113,41 @@ It will be interpreted as megabytes.
|
||||
|
||||
file_recv_max_size: 100
|
||||
|
||||
.. conf_minion:: pass_to_ext_pillars
|
||||
|
||||
``pass_to_ext_pillars``
|
||||
-----------------------
|
||||
|
||||
Specify a list of configuration keys whose values are to be passed to
|
||||
external pillar functions.
|
||||
|
||||
Suboptions can be specified using the ':' notation (i.e. ``option:suboption``)
|
||||
|
||||
The values are merged and included in the ``extra_minion_data`` optional
|
||||
parameter of the external pillar function. The ``extra_minion_data`` parameter
|
||||
is passed only to the external pillar functions that have it explicitly
|
||||
specified in their definition.
|
||||
|
||||
If the config contains
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
opt1: value1
|
||||
opt2:
|
||||
subopt1: value2
|
||||
subopt2: value3
|
||||
|
||||
pass_to_ext_pillars:
|
||||
- opt1
|
||||
- opt2: subopt1
|
||||
|
||||
the ``extra_minion_data`` parameter will be
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
{'opt1': 'value1',
|
||||
'opt2': {'subopt1': 'value2'}}
|
||||
|
||||
Security Settings
|
||||
=================
|
||||
|
||||
@ -2369,11 +2404,14 @@ Thread Settings
|
||||
|
||||
.. conf_minion:: multiprocessing
|
||||
|
||||
``multiprocessing``
|
||||
-------
|
||||
|
||||
Default: ``True``
|
||||
|
||||
If `multiprocessing` is enabled when a minion receives a
|
||||
If ``multiprocessing`` is enabled when a minion receives a
|
||||
publication a new process is spawned and the command is executed therein.
|
||||
Conversely, if `multiprocessing` is disabled the new publication will be run
|
||||
Conversely, if ``multiprocessing`` is disabled the new publication will be run
|
||||
executed in a thread.
|
||||
|
||||
|
||||
@ -2381,6 +2419,23 @@ executed in a thread.
|
||||
|
||||
multiprocessing: True
|
||||
|
||||
.. conf_minion:: process_count_max
|
||||
|
||||
``process_count_max``
|
||||
-------
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
Default: ``-1``
|
||||
|
||||
Limit the maximum amount of processes or threads created by ``salt-minion``.
|
||||
This is useful to avoid resource exhaustion in case the minion receives more
|
||||
publications than it is able to handle, as it limits the number of spawned
|
||||
processes or threads. ``-1`` is the default and disables the limit.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
process_count_max: -1
|
||||
|
||||
.. _minion-logging-settings:
|
||||
|
||||
|
@ -118,3 +118,53 @@ has to be closed after every command.
|
||||
.. code-block:: yaml
|
||||
|
||||
proxy_always_alive: False
|
||||
|
||||
``proxy_merge_pillar_in_opts``
|
||||
------------------------------
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
|
||||
Default: ``False``.
|
||||
|
||||
Wheter the pillar data to be merged into the proxy configuration options.
|
||||
As multiple proxies can run on the same server, we may need different
|
||||
configuration options for each, while there's one single configuration file.
|
||||
The solution is merging the pillar data of each proxy minion into the opts.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
proxy_merge_pillar_in_opts: True
|
||||
|
||||
``proxy_deep_merge_pillar_in_opts``
|
||||
-----------------------------------
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
|
||||
Default: ``False``.
|
||||
|
||||
Deep merge of pillar data into configuration opts.
|
||||
This option is evaluated only when :conf_proxy:`proxy_merge_pillar_in_opts` is
|
||||
enabled.
|
||||
|
||||
``proxy_merge_pillar_in_opts_strategy``
|
||||
---------------------------------------
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
|
||||
Default: ``smart``.
|
||||
|
||||
The strategy used when merging pillar configuration into opts.
|
||||
This option is evaluated only when :conf_proxy:`proxy_merge_pillar_in_opts` is
|
||||
enabled.
|
||||
|
||||
``proxy_mines_pillar``
|
||||
----------------------
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
|
||||
Default: ``True``.
|
||||
|
||||
Allow enabling mine details using pillar data. This evaluates the mine
|
||||
configuration under the pillar, for the following regular minion options that
|
||||
are also equally available on the proxy minion: :conf_minion:`mine_interval`,
|
||||
and :conf_minion:`mine_functions`.
|
||||
|
@ -44,6 +44,7 @@ execution modules
|
||||
boto_apigateway
|
||||
boto_asg
|
||||
boto_cfn
|
||||
boto_cloudfront
|
||||
boto_cloudtrail
|
||||
boto_cloudwatch
|
||||
boto_cloudwatch_event
|
||||
@ -326,6 +327,7 @@ execution modules
|
||||
ps
|
||||
publish
|
||||
puppet
|
||||
purefa
|
||||
pushbullet
|
||||
pushover_notify
|
||||
pw_group
|
||||
@ -417,6 +419,7 @@ execution modules
|
||||
test
|
||||
testinframod
|
||||
test_virtual
|
||||
textfsm_mod
|
||||
timezone
|
||||
tls
|
||||
tomcat
|
||||
|
6
doc/ref/modules/all/salt.modules.boto_cloudfront.rst
Normal file
6
doc/ref/modules/all/salt.modules.boto_cloudfront.rst
Normal file
@ -0,0 +1,6 @@
|
||||
============================
|
||||
salt.modules.boto_cloudfront
|
||||
============================
|
||||
|
||||
.. automodule:: salt.modules.boto_cloudfront
|
||||
:members:
|
@ -13,7 +13,7 @@ salt.modules.kernelpkg
|
||||
Execution Module Used for
|
||||
============================================ ========================================
|
||||
:py:mod:`~salt.modules.kernelpkg_linux_apt` Debian/Ubuntu-based distros which use
|
||||
``apt-get(8)`` for package management
|
||||
``apt-get`` for package management
|
||||
:py:mod:`~salt.modules.kernelpkg_linux_yum` RedHat-based distros and derivatives
|
||||
using ``yum(8)`` or ``dnf(8)``
|
||||
using ``yum`` or ``dnf``
|
||||
============================================ ========================================
|
||||
|
6
doc/ref/modules/all/salt.modules.purefa.rst
Normal file
6
doc/ref/modules/all/salt.modules.purefa.rst
Normal file
@ -0,0 +1,6 @@
|
||||
===================
|
||||
salt.modules.purefa
|
||||
===================
|
||||
|
||||
.. automodule:: salt.modules.purefa
|
||||
:members:
|
5
doc/ref/modules/all/salt.modules.textfsm_mod.rst
Normal file
5
doc/ref/modules/all/salt.modules.textfsm_mod.rst
Normal file
@ -0,0 +1,5 @@
|
||||
salt.modules.textfsm_mod module
|
||||
===============================
|
||||
|
||||
.. automodule:: salt.modules.textfsm_mod
|
||||
:members:
|
@ -451,7 +451,7 @@ For example:
|
||||
'''
|
||||
Only load if git exists on the system
|
||||
'''
|
||||
if salt.utils.which('git') is None:
|
||||
if salt.utils.path.which('git') is None:
|
||||
return (False,
|
||||
'The git execution module cannot be loaded: git unavailable.')
|
||||
else:
|
||||
|
@ -25,6 +25,9 @@ configuration:
|
||||
- web*:
|
||||
- test.*
|
||||
- pkg.*
|
||||
# Allow managers to use saltutil module functions
|
||||
manager_.*:
|
||||
- saltutil.*
|
||||
|
||||
Permission Issues
|
||||
-----------------
|
||||
|
@ -1,5 +1,5 @@
|
||||
salt.runners.auth module
|
||||
========================
|
||||
salt.runners.auth
|
||||
=================
|
||||
|
||||
.. automodule:: salt.runners.auth
|
||||
:members:
|
||||
|
@ -1,5 +1,5 @@
|
||||
salt.runners.digicertapi module
|
||||
===============================
|
||||
salt.runners.digicertapi
|
||||
========================
|
||||
|
||||
.. automodule:: salt.runners.digicertapi
|
||||
:members:
|
||||
|
@ -1,5 +1,5 @@
|
||||
salt.runners.event module
|
||||
=========================
|
||||
salt.runners.event
|
||||
==================
|
||||
|
||||
.. automodule:: salt.runners.event
|
||||
:members:
|
||||
|
@ -1,5 +1,11 @@
|
||||
salt.runners.mattermost module
|
||||
==============================
|
||||
salt.runners.mattermost
|
||||
=======================
|
||||
|
||||
**Note for 2017.7 releases!**
|
||||
|
||||
Due to the `salt.runners.config <https://github.com/saltstack/salt/blob/develop/salt/runners/config.py>`_ module not being available in this release series, importing the `salt.runners.config <https://github.com/saltstack/salt/blob/develop/salt/runners/config.py>`_ module from the develop branch is required to make this module work.
|
||||
|
||||
Ref: `Mattermost runner failing to retrieve config values due to unavailable config runner #43479 <https://github.com/saltstack/salt/issues/43479>`_
|
||||
|
||||
.. automodule:: salt.runners.mattermost
|
||||
:members:
|
||||
|
@ -1,5 +1,5 @@
|
||||
salt.runners.smartos_vmadm module
|
||||
=================================
|
||||
salt.runners.smartos_vmadm
|
||||
==========================
|
||||
|
||||
.. automodule:: salt.runners.smartos_vmadm
|
||||
:members:
|
||||
|
@ -1,5 +1,5 @@
|
||||
salt.runners.vault module
|
||||
=========================
|
||||
salt.runners.vault
|
||||
==================
|
||||
|
||||
.. automodule:: salt.runners.vault
|
||||
:members:
|
||||
|
@ -1,5 +1,5 @@
|
||||
salt.runners.venafiapi module
|
||||
=============================
|
||||
salt.runners.venafiapi
|
||||
======================
|
||||
|
||||
.. automodule:: salt.runners.venafiapi
|
||||
:members:
|
||||
|
@ -1,5 +1,5 @@
|
||||
salt.runners.vistara module
|
||||
===========================
|
||||
salt.runners.vistara
|
||||
====================
|
||||
|
||||
.. automodule:: salt.runners.vistara
|
||||
:members:
|
||||
|
@ -122,7 +122,7 @@ This example, simplified from the pkg state, shows how to create mod_aggregate f
|
||||
for chunk in chunks:
|
||||
# The state runtime uses "tags" to track completed jobs, it may
|
||||
# look familiar with the _|-
|
||||
tag = salt.utils.gen_state_tag(chunk)
|
||||
tag = __utils__['state.gen_tag'](chunk)
|
||||
if tag in running:
|
||||
# Already ran the pkg state, skip aggregation
|
||||
continue
|
||||
|
@ -31,6 +31,7 @@ state modules
|
||||
boto_apigateway
|
||||
boto_asg
|
||||
boto_cfn
|
||||
boto_cloudfront
|
||||
boto_cloudtrail
|
||||
boto_cloudwatch_alarm
|
||||
boto_cloudwatch_event
|
||||
@ -179,6 +180,7 @@ state modules
|
||||
netusers
|
||||
network
|
||||
netyang
|
||||
nfs_export
|
||||
nftables
|
||||
npm
|
||||
ntp
|
||||
|
6
doc/ref/states/all/salt.states.boto_cloudfront.rst
Normal file
6
doc/ref/states/all/salt.states.boto_cloudfront.rst
Normal file
@ -0,0 +1,6 @@
|
||||
===========================
|
||||
salt.states.boto_cloudfront
|
||||
===========================
|
||||
|
||||
.. automodule:: salt.states.boto_cloudfront
|
||||
:members:
|
6
doc/ref/states/all/salt.states.nfs_export.rst
Normal file
6
doc/ref/states/all/salt.states.nfs_export.rst
Normal file
@ -0,0 +1,6 @@
|
||||
======================
|
||||
salt.states.nfs_export
|
||||
======================
|
||||
|
||||
.. automodule:: salt.states.nfs_export
|
||||
:members:
|
@ -153,7 +153,12 @@ A State Module must return a dict containing the following keys/values:
|
||||
However, if a state is going to fail and this can be determined
|
||||
in test mode without applying the change, ``False`` can be returned.
|
||||
|
||||
- **comment:** A string containing a summary of the result.
|
||||
- **comment:** A list of strings or a single string summarizing the result.
|
||||
Note that support for lists of strings is available as of Salt Oxygen.
|
||||
Lists of strings will be joined with newlines to form the final comment;
|
||||
this is useful to allow multiple comments from subparts of a state.
|
||||
Prefer to keep line lengths short (use multiple lines as needed),
|
||||
and end with punctuation (e.g. a period) to delimit multiple comments.
|
||||
|
||||
The return data can also, include the **pchanges** key, this stands for
|
||||
`predictive changes`. The **pchanges** key informs the State system what
|
||||
|
@ -253,9 +253,8 @@ in ``/etc/salt/master.d/reactor.conf``:
|
||||
|
||||
.. note::
|
||||
You can have only one top level ``reactor`` section, so if one already
|
||||
exists, add this code to the existing section. See :ref:`Understanding the
|
||||
Structure of Reactor Formulas <reactor-structure>` to learn more about
|
||||
reactor SLS syntax.
|
||||
exists, add this code to the existing section. See :ref:`here
|
||||
<reactor-sls>` to learn more about reactor SLS syntax.
|
||||
|
||||
|
||||
Start the Salt Master in Debug Mode
|
||||
|
@ -15,9 +15,7 @@ More information about Azure is located at `http://www.windowsazure.com/
|
||||
|
||||
Dependencies
|
||||
============
|
||||
* `Microsoft Azure SDK for Python <https://pypi.python.org/pypi/azure>`_ >= 2.0rc6
|
||||
* `Microsoft Azure Storage SDK for Python <https://pypi.python.org/pypi/azure-storage>`_ >= 0.32
|
||||
* The python-requests library, for Python < 2.7.9.
|
||||
* Azure Cli ```pip install 'azure-cli>=2.0.12'```
|
||||
* A Microsoft Azure account
|
||||
* `Salt <https://github.com/saltstack/salt>`_
|
||||
|
||||
|
@ -183,7 +183,7 @@ imports should be absent from the Salt Cloud module.
|
||||
|
||||
A good example of a non-libcloud driver is the DigitalOcean driver:
|
||||
|
||||
https://github.com/saltstack/salt/tree/develop/salt/cloud/clouds/digital_ocean.py
|
||||
https://github.com/saltstack/salt/tree/develop/salt/cloud/clouds/digitalocean.py
|
||||
|
||||
The ``create()`` Function
|
||||
-------------------------
|
||||
|
@ -444,7 +444,7 @@ under the API Access tab.
|
||||
.. code-block:: yaml
|
||||
|
||||
my-digitalocean-config:
|
||||
driver: digital_ocean
|
||||
driver: digitalocean
|
||||
personal_access_token: xxx
|
||||
location: New York 1
|
||||
|
||||
|
@ -19,7 +19,7 @@ under the "SSH Keys" section.
|
||||
# /etc/salt/cloud.providers.d/ directory.
|
||||
|
||||
my-digitalocean-config:
|
||||
driver: digital_ocean
|
||||
driver: digitalocean
|
||||
personal_access_token: xxx
|
||||
ssh_key_file: /path/to/ssh/key/file
|
||||
ssh_key_names: my-key-name,my-key-name-2
|
||||
@ -63,7 +63,7 @@ command:
|
||||
# salt-cloud --list-locations my-digitalocean-config
|
||||
my-digitalocean-config:
|
||||
----------
|
||||
digital_ocean:
|
||||
digitalocean:
|
||||
----------
|
||||
Amsterdam 1:
|
||||
----------
|
||||
@ -87,7 +87,7 @@ command:
|
||||
# salt-cloud --list-sizes my-digitalocean-config
|
||||
my-digitalocean-config:
|
||||
----------
|
||||
digital_ocean:
|
||||
digitalocean:
|
||||
----------
|
||||
512MB:
|
||||
----------
|
||||
@ -117,7 +117,7 @@ command:
|
||||
# salt-cloud --list-images my-digitalocean-config
|
||||
my-digitalocean-config:
|
||||
----------
|
||||
digital_ocean:
|
||||
digitalocean:
|
||||
----------
|
||||
10.1:
|
||||
----------
|
||||
@ -142,7 +142,7 @@ Profile Specifics:
|
||||
ssh_username
|
||||
------------
|
||||
|
||||
If using a FreeBSD image from Digital Ocean, you'll need to set the ``ssh_username``
|
||||
If using a FreeBSD image from DigitalOcean, you'll need to set the ``ssh_username``
|
||||
setting to ``freebsd`` in your profile configuration.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
@ -260,6 +260,21 @@ The Salt development team will back-port bug fixes made to ``develop`` to the
|
||||
current release branch if the contributor cannot create the pull request
|
||||
against that branch.
|
||||
|
||||
Release Branches
|
||||
----------------
|
||||
|
||||
For each release, a branch will be created when the SaltStack release team is
|
||||
ready to tag. The release branch is created from the parent branch and will be
|
||||
the same name as the tag minus the ``v``. For example, the ``2017.7.1`` release
|
||||
branch was created from the ``2017.7`` parent branch and the ``v2017.7.1``
|
||||
release was tagged at the ``HEAD`` of the ``2017.7.1`` branch. This branching
|
||||
strategy will allow for more stability when there is a need for a re-tag during
|
||||
the testing phase of the release process.
|
||||
|
||||
Once the release branch is created, the fixes required for a given release, as
|
||||
determined by the SaltStack release team, will be added to this branch. All
|
||||
commits in this branch will be merged forward into the parent branch as well.
|
||||
|
||||
Keeping Salt Forks in Sync
|
||||
==========================
|
||||
|
||||
|
@ -219,7 +219,7 @@ the default cloud provider configuration file for DigitalOcean looks like this:
|
||||
.. code-block:: yaml
|
||||
|
||||
digitalocean-config:
|
||||
driver: digital_ocean
|
||||
driver: digitalocean
|
||||
client_key: ''
|
||||
api_key: ''
|
||||
location: New York 1
|
||||
@ -230,7 +230,7 @@ must be provided:
|
||||
.. code-block:: yaml
|
||||
|
||||
digitalocean-config:
|
||||
driver: digital_ocean
|
||||
driver: digitalocean
|
||||
client_key: wFGEwgregeqw3435gDger
|
||||
api_key: GDE43t43REGTrkilg43934t34qT43t4dgegerGEgg
|
||||
location: New York 1
|
||||
|
@ -541,7 +541,7 @@ provider configuration file in the integration test file directory located at
|
||||
``tests/integration/files/conf/cloud.*.d/``.
|
||||
|
||||
The following is an example of the default profile configuration file for Digital
|
||||
Ocean, located at: ``tests/integration/files/conf/cloud.profiles.d/digital_ocean.conf``:
|
||||
Ocean, located at: ``tests/integration/files/conf/cloud.profiles.d/digitalocean.conf``:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
@ -557,12 +557,12 @@ be provided by the user by editing the provider configuration file before runnin
|
||||
tests.
|
||||
|
||||
The following is an example of the default provider configuration file for Digital
|
||||
Ocean, located at: ``tests/integration/files/conf/cloud.providers.d/digital_ocean.conf``:
|
||||
Ocean, located at: ``tests/integration/files/conf/cloud.providers.d/digitalocean.conf``:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
digitalocean-config:
|
||||
driver: digital_ocean
|
||||
driver: digitalocean
|
||||
client_key: ''
|
||||
api_key: ''
|
||||
location: New York 1
|
||||
|
@ -93,6 +93,26 @@ By user, by minion:
|
||||
<minion compound target>:
|
||||
- <regex to match function>
|
||||
|
||||
By user, by runner/wheel:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
external_auth:
|
||||
<eauth backend>:
|
||||
<user or group%>:
|
||||
<@runner or @wheel>:
|
||||
- <regex to match function>
|
||||
|
||||
By user, by runner+wheel module:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
external_auth:
|
||||
<eauth backend>:
|
||||
<user or group%>:
|
||||
<@module_name>:
|
||||
- <regex to match function without module_name>
|
||||
|
||||
Groups
|
||||
------
|
||||
|
||||
@ -127,6 +147,14 @@ Positional arguments or keyword arguments to functions can also be whitelisted.
|
||||
kwargs:
|
||||
'kwa': 'kwa.*'
|
||||
'kwb': 'kwb'
|
||||
- '@runner':
|
||||
- 'runner_mod.*':
|
||||
args:
|
||||
- 'a.*'
|
||||
- 'b.*'
|
||||
kwargs:
|
||||
'kwa': 'kwa.*'
|
||||
'kwb': 'kwb'
|
||||
|
||||
The rules:
|
||||
|
||||
|
@ -27,7 +27,12 @@ Salt engines are configured under an ``engines`` top-level section in your Salt
|
||||
port: 5959
|
||||
proto: tcp
|
||||
|
||||
Salt engines must be in the Salt path, or you can add the ``engines_dirs`` option in your Salt master configuration with a list of directories under which Salt attempts to find Salt engines.
|
||||
Salt engines must be in the Salt path, or you can add the ``engines_dirs`` option in your Salt master configuration with a list of directories under which Salt attempts to find Salt engines. This option should be formatted as a list of directories to search, such as:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
engines_dirs:
|
||||
- /home/bob/engines
|
||||
|
||||
Writing an Engine
|
||||
=================
|
||||
|
@ -18,7 +18,7 @@ Installation from official Debian and Raspbian repositories is described
|
||||
Installation from the Official SaltStack Repository
|
||||
===================================================
|
||||
|
||||
Packages for Debian 8 (Jessie) and Debian 7 (Wheezy) are available in the
|
||||
Packages for Debian 9 (Stretch) and Debian 8 (Jessie) are available in the
|
||||
Official SaltStack repository.
|
||||
|
||||
Instructions are at https://repo.saltstack.com/#debian.
|
||||
|
@ -27,9 +27,9 @@ event bus is an open system used for sending information notifying Salt and
|
||||
other systems about operations.
|
||||
|
||||
The event system fires events with a very specific criteria. Every event has a
|
||||
:strong:`tag`. Event tags allow for fast top level filtering of events. In
|
||||
addition to the tag, each event has a data structure. This data structure is a
|
||||
dict, which contains information about the event.
|
||||
**tag**. Event tags allow for fast top-level filtering of events. In addition
|
||||
to the tag, each event has a data structure. This data structure is a
|
||||
dictionary, which contains information about the event.
|
||||
|
||||
.. _reactor-mapping-events:
|
||||
|
||||
@ -65,15 +65,12 @@ and each event tag has a list of reactor SLS files to be run.
|
||||
the :ref:`querystring syntax <querystring-syntax>` (e.g.
|
||||
``salt://reactor/mycustom.sls?saltenv=reactor``).
|
||||
|
||||
Reactor sls files are similar to state and pillar sls files. They are
|
||||
by default yaml + Jinja templates and are passed familiar context variables.
|
||||
Reactor SLS files are similar to State and Pillar SLS files. They are by
|
||||
default YAML + Jinja templates and are passed familiar context variables.
|
||||
Click :ref:`here <reactor-jinja-context>` for more detailed information on the
|
||||
variables availble in Jinja templating.
|
||||
|
||||
They differ because of the addition of the ``tag`` and ``data`` variables.
|
||||
|
||||
- The ``tag`` variable is just the tag in the fired event.
|
||||
- The ``data`` variable is the event's data dict.
|
||||
|
||||
Here is a simple reactor sls:
|
||||
Here is the SLS for a simple reaction:
|
||||
|
||||
.. code-block:: jinja
|
||||
|
||||
@ -90,71 +87,278 @@ data structure and compiler used for the state system is used for the reactor
|
||||
system. The only difference is that the data is matched up to the salt command
|
||||
API and the runner system. In this example, a command is published to the
|
||||
``mysql1`` minion with a function of :py:func:`state.apply
|
||||
<salt.modules.state.apply_>`. Similarly, a runner can be called:
|
||||
<salt.modules.state.apply_>`, which performs a :ref:`highstate
|
||||
<running-highstate>`. Similarly, a runner can be called:
|
||||
|
||||
.. code-block:: jinja
|
||||
|
||||
{% if data['data']['custom_var'] == 'runit' %}
|
||||
call_runit_orch:
|
||||
runner.state.orchestrate:
|
||||
- mods: _orch.runit
|
||||
- args:
|
||||
- mods: orchestrate.runit
|
||||
{% endif %}
|
||||
|
||||
This example will execute the state.orchestrate runner and intiate an execution
|
||||
of the runit orchestrator located at ``/srv/salt/_orch/runit.sls``. Using
|
||||
``_orch/`` is any arbitrary path but it is recommended to avoid using "orchestrate"
|
||||
as this is most likely to cause confusion.
|
||||
of the ``runit`` orchestrator located at ``/srv/salt/orchestrate/runit.sls``.
|
||||
|
||||
Writing SLS Files
|
||||
-----------------
|
||||
Types of Reactions
|
||||
==================
|
||||
|
||||
Reactor SLS files are stored in the same location as State SLS files. This means
|
||||
that both ``file_roots`` and ``gitfs_remotes`` impact what SLS files are
|
||||
available to the reactor and orchestrator.
|
||||
============================== ==================================================================================
|
||||
Name Description
|
||||
============================== ==================================================================================
|
||||
:ref:`local <reactor-local>` Runs a :ref:`remote-execution function <all-salt.modules>` on targeted minions
|
||||
:ref:`runner <reactor-runner>` Executes a :ref:`runner function <all-salt.runners>`
|
||||
:ref:`wheel <reactor-wheel>` Executes a :ref:`wheel function <all-salt.wheel>` on the master
|
||||
:ref:`caller <reactor-caller>` Runs a :ref:`remote-execution function <all-salt.modules>` on a masterless minion
|
||||
============================== ==================================================================================
|
||||
|
||||
It is recommended to keep reactor and orchestrator SLS files in their own uniquely
|
||||
named subdirectories such as ``_orch/``, ``orch/``, ``_orchestrate/``, ``react/``,
|
||||
``_reactor/``, etc. Keeping a unique name helps prevent confusion when trying to
|
||||
read through this a few years down the road.
|
||||
.. note::
|
||||
The ``local`` and ``caller`` reaction types will be renamed for the Oxygen
|
||||
release. These reaction types were named after Salt's internal client
|
||||
interfaces, and are not intuitively named. Both ``local`` and ``caller``
|
||||
will continue to work in Reactor SLS files, but for the Oxygen release the
|
||||
documentation will be updated to reflect the new preferred naming.
|
||||
|
||||
The Goal of Writing Reactor SLS Files
|
||||
=====================================
|
||||
Where to Put Reactor SLS Files
|
||||
==============================
|
||||
|
||||
Reactor SLS files share the familiar syntax from Salt States but there are
|
||||
important differences. The goal of a Reactor file is to process a Salt event as
|
||||
quickly as possible and then to optionally start a **new** process in response.
|
||||
Reactor SLS files can come both from files local to the master, and from any of
|
||||
backends enabled via the :conf_master:`fileserver_backend` config option. Files
|
||||
placed in the Salt fileserver can be referenced using a ``salt://`` URL, just
|
||||
like they can in State SLS files.
|
||||
|
||||
1. The Salt Reactor watches Salt's event bus for new events.
|
||||
2. The event tag is matched against the list of event tags under the
|
||||
``reactor`` section in the Salt Master config.
|
||||
3. The SLS files for any matches are Rendered into a data structure that
|
||||
represents one or more function calls.
|
||||
4. That data structure is given to a pool of worker threads for execution.
|
||||
It is recommended to place reactor and orchestrator SLS files in their own
|
||||
uniquely-named subdirectories such as ``orch/``, ``orchestrate/``, ``react/``,
|
||||
``reactor/``, etc., to keep them organized.
|
||||
|
||||
.. _reactor-sls:
|
||||
|
||||
Writing Reactor SLS
|
||||
===================
|
||||
|
||||
The different reaction types were developed separately and have historically
|
||||
had different methods for passing arguments. For the 2017.7.2 release a new,
|
||||
unified configuration schema has been introduced, which applies to all reaction
|
||||
types.
|
||||
|
||||
The old config schema will continue to be supported, and there is no plan to
|
||||
deprecate it at this time.
|
||||
|
||||
.. _reactor-local:
|
||||
|
||||
Local Reactions
|
||||
---------------
|
||||
|
||||
A ``local`` reaction runs a :ref:`remote-execution function <all-salt.modules>`
|
||||
on the targeted minions.
|
||||
|
||||
The old config schema required the positional and keyword arguments to be
|
||||
manually separated by the user under ``arg`` and ``kwarg`` parameters. However,
|
||||
this is not very user-friendly, as it forces the user to distinguish which type
|
||||
of argument is which, and make sure that positional arguments are ordered
|
||||
properly. Therefore, the new config schema is recommended if the master is
|
||||
running a supported release.
|
||||
|
||||
The below two examples are equivalent:
|
||||
|
||||
+---------------------------------+-----------------------------+
|
||||
| Supported in 2017.7.2 and later | Supported in all releases |
|
||||
+=================================+=============================+
|
||||
| :: | :: |
|
||||
| | |
|
||||
| install_zsh: | install_zsh: |
|
||||
| local.state.single: | local.state.single: |
|
||||
| - tgt: 'kernel:Linux' | - tgt: 'kernel:Linux' |
|
||||
| - tgt_type: grain | - tgt_type: grain |
|
||||
| - args: | - arg: |
|
||||
| - fun: pkg.installed | - pkg.installed |
|
||||
| - name: zsh | - zsh |
|
||||
| - fromrepo: updates | - kwarg: |
|
||||
| | fromrepo: updates |
|
||||
+---------------------------------+-----------------------------+
|
||||
|
||||
This reaction would be equvalent to running the following Salt command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt -G 'kernel:Linux' state.single pkg.installed name=zsh fromrepo=updates
|
||||
|
||||
.. note::
|
||||
Any other parameters in the :py:meth:`LocalClient().cmd_async()
|
||||
<salt.client.LocalClient.cmd_async>` method can be passed at the same
|
||||
indentation level as ``tgt``.
|
||||
|
||||
.. note::
|
||||
``tgt_type`` is only required when the target expression defined in ``tgt``
|
||||
uses a :ref:`target type <targeting>` other than a minion ID glob.
|
||||
|
||||
The ``tgt_type`` argument was named ``expr_form`` in releases prior to
|
||||
2017.7.0.
|
||||
|
||||
.. _reactor-runner:
|
||||
|
||||
Runner Reactions
|
||||
----------------
|
||||
|
||||
Runner reactions execute :ref:`runner functions <all-salt.runners>` locally on
|
||||
the master.
|
||||
|
||||
The old config schema called for passing arguments to the reaction directly
|
||||
under the name of the runner function. However, this can cause unpredictable
|
||||
interactions with the Reactor system's internal arguments. It is also possible
|
||||
to pass positional and keyword arguments under ``arg`` and ``kwarg`` like above
|
||||
in :ref:`local reactions <reactor-local>`, but as noted above this is not very
|
||||
user-friendly. Therefore, the new config schema is recommended if the master
|
||||
is running a supported release.
|
||||
|
||||
The below two examples are equivalent:
|
||||
|
||||
+-------------------------------------------------+-------------------------------------------------+
|
||||
| Supported in 2017.7.2 and later | Supported in all releases |
|
||||
+=================================================+=================================================+
|
||||
| :: | :: |
|
||||
| | |
|
||||
| deploy_app: | deploy_app: |
|
||||
| runner.state.orchestrate: | runner.state.orchestrate: |
|
||||
| - args: | - mods: orchestrate.deploy_app |
|
||||
| - mods: orchestrate.deploy_app | - kwarg: |
|
||||
| - pillar: | pillar: |
|
||||
| event_tag: {{ tag }} | event_tag: {{ tag }} |
|
||||
| event_data: {{ data['data']|json }} | event_data: {{ data['data']|json }} |
|
||||
+-------------------------------------------------+-------------------------------------------------+
|
||||
|
||||
Assuming that the event tag is ``foo``, and the data passed to the event is
|
||||
``{'bar': 'baz'}``, then this reaction is equvalent to running the following
|
||||
Salt command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-run state.orchestrate mods=orchestrate.deploy_app pillar='{"event_tag": "foo", "event_data": {"bar": "baz"}}'
|
||||
|
||||
.. _reactor-wheel:
|
||||
|
||||
Wheel Reactions
|
||||
---------------
|
||||
|
||||
Wheel reactions run :ref:`wheel functions <all-salt.wheel>` locally on the
|
||||
master.
|
||||
|
||||
Like :ref:`runner reactions <reactor-runner>`, the old config schema called for
|
||||
wheel reactions to have arguments passed directly under the name of the
|
||||
:ref:`wheel function <all-salt.wheel>` (or in ``arg`` or ``kwarg`` parameters).
|
||||
|
||||
The below two examples are equivalent:
|
||||
|
||||
+-----------------------------------+---------------------------------+
|
||||
| Supported in 2017.7.2 and later | Supported in all releases |
|
||||
+===================================+=================================+
|
||||
| :: | :: |
|
||||
| | |
|
||||
| remove_key: | remove_key: |
|
||||
| wheel.key.delete: | wheel.key.delete: |
|
||||
| - args: | - match: {{ data['id'] }} |
|
||||
| - match: {{ data['id'] }} | |
|
||||
+-----------------------------------+---------------------------------+
|
||||
|
||||
.. _reactor-caller:
|
||||
|
||||
Caller Reactions
|
||||
----------------
|
||||
|
||||
Caller reactions run :ref:`remote-execution functions <all-salt.modules>` on a
|
||||
minion daemon's Reactor system. To run a Reactor on the minion, it is necessary
|
||||
to configure the :mod:`Reactor Engine <salt.engines.reactor>` in the minion
|
||||
config file, and then setup your watched events in a ``reactor`` section in the
|
||||
minion config file as well.
|
||||
|
||||
.. note:: Masterless Minions use this Reactor
|
||||
|
||||
This is the only way to run the Reactor if you use masterless minions.
|
||||
|
||||
Both the old and new config schemas involve passing arguments under an ``args``
|
||||
parameter. However, the old config schema only supports positional arguments.
|
||||
Therefore, the new config schema is recommended if the masterless minion is
|
||||
running a supported release.
|
||||
|
||||
The below two examples are equivalent:
|
||||
|
||||
+---------------------------------+---------------------------+
|
||||
| Supported in 2017.7.2 and later | Supported in all releases |
|
||||
+=================================+===========================+
|
||||
| :: | :: |
|
||||
| | |
|
||||
| touch_file: | touch_file: |
|
||||
| caller.file.touch: | caller.file.touch: |
|
||||
| - args: | - args: |
|
||||
| - name: /tmp/foo | - /tmp/foo |
|
||||
+---------------------------------+---------------------------+
|
||||
|
||||
This reaction is equvalent to running the following Salt command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-call file.touch name=/tmp/foo
|
||||
|
||||
Best Practices for Writing Reactor SLS Files
|
||||
============================================
|
||||
|
||||
The Reactor works as follows:
|
||||
|
||||
1. The Salt Reactor watches Salt's event bus for new events.
|
||||
2. Each event's tag is matched against the list of event tags configured under
|
||||
the :conf_master:`reactor` section in the Salt Master config.
|
||||
3. The SLS files for any matches are rendered into a data structure that
|
||||
represents one or more function calls.
|
||||
4. That data structure is given to a pool of worker threads for execution.
|
||||
|
||||
Matching and rendering Reactor SLS files is done sequentially in a single
|
||||
process. Complex Jinja that calls out to slow Execution or Runner modules slows
|
||||
down the rendering and causes other reactions to pile up behind the current
|
||||
one. The worker pool is designed to handle complex and long-running processes
|
||||
such as Salt Orchestrate.
|
||||
process. For that reason, reactor SLS files should contain few individual
|
||||
reactions (one, if at all possible). Also, keep in mind that reactions are
|
||||
fired asynchronously (with the exception of :ref:`caller <reactor-caller>`) and
|
||||
do *not* support :ref:`requisites <requisites>`.
|
||||
|
||||
tl;dr: Rendering Reactor SLS files MUST be simple and quick. The new process
|
||||
started by the worker threads can be long-running. Using the reactor to fire
|
||||
an orchestrate runner would be ideal.
|
||||
Complex Jinja templating that calls out to slow :ref:`remote-execution
|
||||
<all-salt.modules>` or :ref:`runner <all-salt.runners>` functions slows down
|
||||
the rendering and causes other reactions to pile up behind the current one. The
|
||||
worker pool is designed to handle complex and long-running processes like
|
||||
:ref:`orchestration <orchestrate-runner>` jobs.
|
||||
|
||||
Therefore, when complex tasks are in order, :ref:`orchestration
|
||||
<orchestrate-runner>` is a natural fit. Orchestration SLS files can be more
|
||||
complex, and use requisites. Performing a complex task using orchestration lets
|
||||
the Reactor system fire off the orchestration job and proceed with processing
|
||||
other reactions.
|
||||
|
||||
.. _reactor-jinja-context:
|
||||
|
||||
Jinja Context
|
||||
-------------
|
||||
=============
|
||||
|
||||
Reactor files only have access to a minimal Jinja context. ``grains`` and
|
||||
``pillar`` are not available. The ``salt`` object is available for calling
|
||||
Runner and Execution modules but it should be used sparingly and only for quick
|
||||
tasks for the reasons mentioned above.
|
||||
Reactor SLS files only have access to a minimal Jinja context. ``grains`` and
|
||||
``pillar`` are *not* available. The ``salt`` object is available for calling
|
||||
:ref:`remote-execution <all-salt.modules>` or :ref:`runner <all-salt.runners>`
|
||||
functions, but it should be used sparingly and only for quick tasks for the
|
||||
reasons mentioned above.
|
||||
|
||||
In addition to the ``salt`` object, the following variables are available in
|
||||
the Jinja context:
|
||||
|
||||
- ``tag`` - the tag from the event that triggered execution of the Reactor SLS
|
||||
file
|
||||
- ``data`` - the event's data dictionary
|
||||
|
||||
The ``data`` dict will contain an ``id`` key containing the minion ID, if the
|
||||
event was fired from a minion, and a ``data`` key containing the data passed to
|
||||
the event.
|
||||
|
||||
Advanced State System Capabilities
|
||||
----------------------------------
|
||||
==================================
|
||||
|
||||
Reactor SLS files, by design, do not support Requisites, ordering,
|
||||
``onlyif``/``unless`` conditionals and most other powerful constructs from
|
||||
Salt's State system.
|
||||
Reactor SLS files, by design, do not support :ref:`requisites <requisites>`,
|
||||
ordering, ``onlyif``/``unless`` conditionals and most other powerful constructs
|
||||
from Salt's State system.
|
||||
|
||||
Complex Master-side operations are best performed by Salt's Orchestrate system
|
||||
so using the Reactor to kick off an Orchestrate run is a very common pairing.
|
||||
@ -166,7 +370,7 @@ For example:
|
||||
# /etc/salt/master.d/reactor.conf
|
||||
# A custom event containing: {"foo": "Foo!", "bar: "bar*", "baz": "Baz!"}
|
||||
reactor:
|
||||
- myco/custom/event:
|
||||
- my/custom/event:
|
||||
- /srv/reactor/some_event.sls
|
||||
|
||||
.. code-block:: jinja
|
||||
@ -174,15 +378,15 @@ For example:
|
||||
# /srv/reactor/some_event.sls
|
||||
invoke_orchestrate_file:
|
||||
runner.state.orchestrate:
|
||||
- mods: _orch.do_complex_thing # /srv/salt/_orch/do_complex_thing.sls
|
||||
- kwarg:
|
||||
pillar:
|
||||
event_tag: {{ tag }}
|
||||
event_data: {{ data|json() }}
|
||||
- args:
|
||||
- mods: orchestrate.do_complex_thing
|
||||
- pillar:
|
||||
event_tag: {{ tag }}
|
||||
event_data: {{ data|json }}
|
||||
|
||||
.. code-block:: jinja
|
||||
|
||||
# /srv/salt/_orch/do_complex_thing.sls
|
||||
# /srv/salt/orchestrate/do_complex_thing.sls
|
||||
{% set tag = salt.pillar.get('event_tag') %}
|
||||
{% set data = salt.pillar.get('event_data') %}
|
||||
|
||||
@ -209,7 +413,7 @@ For example:
|
||||
.. _beacons-and-reactors:
|
||||
|
||||
Beacons and Reactors
|
||||
--------------------
|
||||
====================
|
||||
|
||||
An event initiated by a beacon, when it arrives at the master will be wrapped
|
||||
inside a second event, such that the data object containing the beacon
|
||||
@ -219,27 +423,52 @@ For example, to access the ``id`` field of the beacon event in a reactor file,
|
||||
you will need to reference ``{{ data['data']['id'] }}`` rather than ``{{
|
||||
data['id'] }}`` as for events initiated directly on the event bus.
|
||||
|
||||
Similarly, the data dictionary attached to the event would be located in
|
||||
``{{ data['data']['data'] }}`` instead of ``{{ data['data'] }}``.
|
||||
|
||||
See the :ref:`beacon documentation <beacon-example>` for examples.
|
||||
|
||||
Fire an event
|
||||
=============
|
||||
Manually Firing an Event
|
||||
========================
|
||||
|
||||
To fire an event from a minion call ``event.send``
|
||||
From the Master
|
||||
---------------
|
||||
|
||||
Use the :py:func:`event.send <salt.runners.event.send>` runner:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-call event.send 'foo' '{orchestrate: refresh}'
|
||||
salt-run event.send foo '{orchestrate: refresh}'
|
||||
|
||||
After this is called, any reactor sls files matching event tag ``foo`` will
|
||||
execute with ``{{ data['data']['orchestrate'] }}`` equal to ``'refresh'``.
|
||||
From the Minion
|
||||
---------------
|
||||
|
||||
See :py:mod:`salt.modules.event` for more information.
|
||||
To fire an event to the master from a minion, call :py:func:`event.send
|
||||
<salt.modules.event.send>`:
|
||||
|
||||
Knowing what event is being fired
|
||||
=================================
|
||||
.. code-block:: bash
|
||||
|
||||
The best way to see exactly what events are fired and what data is available in
|
||||
each event is to use the :py:func:`state.event runner
|
||||
salt-call event.send foo '{orchestrate: refresh}'
|
||||
|
||||
To fire an event to the minion's local event bus, call :py:func:`event.fire
|
||||
<salt.modules.event.fire>`:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-call event.fire '{orchestrate: refresh}' foo
|
||||
|
||||
Referencing Data Passed in Events
|
||||
---------------------------------
|
||||
|
||||
Assuming any of the above examples, any reactor SLS files triggered by watching
|
||||
the event tag ``foo`` will execute with ``{{ data['data']['orchestrate'] }}``
|
||||
equal to ``'refresh'``.
|
||||
|
||||
Getting Information About Events
|
||||
================================
|
||||
|
||||
The best way to see exactly what events have been fired and what data is
|
||||
available in each event is to use the :py:func:`state.event runner
|
||||
<salt.runners.state.event>`.
|
||||
|
||||
.. seealso:: :ref:`Common Salt Events <event-master_events>`
|
||||
@ -308,156 +537,10 @@ rendered SLS file (or any errors generated while rendering the SLS file).
|
||||
view the result of referencing Jinja variables. If the result is empty then
|
||||
Jinja produced an empty result and the Reactor will ignore it.
|
||||
|
||||
.. _reactor-structure:
|
||||
Passing Event Data to Minions or Orchestration as Pillar
|
||||
--------------------------------------------------------
|
||||
|
||||
Understanding the Structure of Reactor Formulas
|
||||
===============================================
|
||||
|
||||
**I.e., when to use `arg` and `kwarg` and when to specify the function
|
||||
arguments directly.**
|
||||
|
||||
While the reactor system uses the same basic data structure as the state
|
||||
system, the functions that will be called using that data structure are
|
||||
different functions than are called via Salt's state system. The Reactor can
|
||||
call Runner modules using the `runner` prefix, Wheel modules using the `wheel`
|
||||
prefix, and can also cause minions to run Execution modules using the `local`
|
||||
prefix.
|
||||
|
||||
.. versionchanged:: 2014.7.0
|
||||
The ``cmd`` prefix was renamed to ``local`` for consistency with other
|
||||
parts of Salt. A backward-compatible alias was added for ``cmd``.
|
||||
|
||||
The Reactor runs on the master and calls functions that exist on the master. In
|
||||
the case of Runner and Wheel functions the Reactor can just call those
|
||||
functions directly since they exist on the master and are run on the master.
|
||||
|
||||
In the case of functions that exist on minions and are run on minions, the
|
||||
Reactor still needs to call a function on the master in order to send the
|
||||
necessary data to the minion so the minion can execute that function.
|
||||
|
||||
The Reactor calls functions exposed in :ref:`Salt's Python API documentation
|
||||
<client-apis>`. and thus the structure of Reactor files very transparently
|
||||
reflects the function signatures of those functions.
|
||||
|
||||
Calling Execution modules on Minions
|
||||
------------------------------------
|
||||
|
||||
The Reactor sends commands down to minions in the exact same way Salt's CLI
|
||||
interface does. It calls a function locally on the master that sends the name
|
||||
of the function as well as a list of any arguments and a dictionary of any
|
||||
keyword arguments that the minion should use to execute that function.
|
||||
|
||||
Specifically, the Reactor calls the async version of :py:meth:`this function
|
||||
<salt.client.LocalClient.cmd>`. You can see that function has 'arg' and 'kwarg'
|
||||
parameters which are both values that are sent down to the minion.
|
||||
|
||||
Executing remote commands maps to the :strong:`LocalClient` interface which is
|
||||
used by the :strong:`salt` command. This interface more specifically maps to
|
||||
the :strong:`cmd_async` method inside of the :strong:`LocalClient` class. This
|
||||
means that the arguments passed are being passed to the :strong:`cmd_async`
|
||||
method, not the remote method. A field starts with :strong:`local` to use the
|
||||
:strong:`LocalClient` subsystem. The result is, to execute a remote command,
|
||||
a reactor formula would look like this:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
clean_tmp:
|
||||
local.cmd.run:
|
||||
- tgt: '*'
|
||||
- arg:
|
||||
- rm -rf /tmp/*
|
||||
|
||||
The ``arg`` option takes a list of arguments as they would be presented on the
|
||||
command line, so the above declaration is the same as running this salt
|
||||
command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cmd.run 'rm -rf /tmp/*'
|
||||
|
||||
Use the ``tgt_type`` argument to specify a matcher:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
clean_tmp:
|
||||
local.cmd.run:
|
||||
- tgt: 'os:Ubuntu'
|
||||
- tgt_type: grain
|
||||
- arg:
|
||||
- rm -rf /tmp/*
|
||||
|
||||
|
||||
clean_tmp:
|
||||
local.cmd.run:
|
||||
- tgt: 'G@roles:hbase_master'
|
||||
- tgt_type: compound
|
||||
- arg:
|
||||
- rm -rf /tmp/*
|
||||
|
||||
.. note::
|
||||
The ``tgt_type`` argument was named ``expr_form`` in releases prior to
|
||||
2017.7.0 (2016.11.x and earlier).
|
||||
|
||||
Any other parameters in the :py:meth:`LocalClient().cmd()
|
||||
<salt.client.LocalClient.cmd>` method can be specified as well.
|
||||
|
||||
Executing Reactors from the Minion
|
||||
----------------------------------
|
||||
|
||||
The minion can be setup to use the Reactor via a reactor engine. This just
|
||||
sets up and listens to the minions event bus, instead of to the masters.
|
||||
|
||||
The biggest difference is that you have to use the caller method on the
|
||||
Reactor, which is the equivalent of salt-call, to run your commands.
|
||||
|
||||
:mod:`Reactor Engine setup <salt.engines.reactor>`
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
clean_tmp:
|
||||
caller.cmd.run:
|
||||
- arg:
|
||||
- rm -rf /tmp/*
|
||||
|
||||
.. note:: Masterless Minions use this Reactor
|
||||
|
||||
This is the only way to run the Reactor if you use masterless minions.
|
||||
|
||||
Calling Runner modules and Wheel modules
|
||||
----------------------------------------
|
||||
|
||||
Calling Runner modules and Wheel modules from the Reactor uses a more direct
|
||||
syntax since the function is being executed locally instead of sending a
|
||||
command to a remote system to be executed there. There are no 'arg' or 'kwarg'
|
||||
parameters (unless the Runner function or Wheel function accepts a parameter
|
||||
with either of those names.)
|
||||
|
||||
For example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
clear_the_grains_cache_for_all_minions:
|
||||
runner.cache.clear_grains
|
||||
|
||||
If the :py:func:`the runner takes arguments <salt.runners.cloud.profile>` then
|
||||
they must be specified as keyword arguments.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
spin_up_more_web_machines:
|
||||
runner.cloud.profile:
|
||||
- prof: centos_6
|
||||
- instances:
|
||||
- web11 # These VM names would be generated via Jinja in a
|
||||
- web12 # real-world example.
|
||||
|
||||
To determine the proper names for the arguments, check the documentation
|
||||
or source code for the runner function you wish to call.
|
||||
|
||||
Passing event data to Minions or Orchestrate as Pillar
|
||||
------------------------------------------------------
|
||||
|
||||
An interesting trick to pass data from the Reactor script to
|
||||
An interesting trick to pass data from the Reactor SLS file to
|
||||
:py:func:`state.apply <salt.modules.state.apply_>` is to pass it as inline
|
||||
Pillar data since both functions take a keyword argument named ``pillar``.
|
||||
|
||||
@ -484,10 +567,9 @@ from the event to the state file via inline Pillar.
|
||||
add_new_minion_to_pool:
|
||||
local.state.apply:
|
||||
- tgt: 'haproxy*'
|
||||
- arg:
|
||||
- haproxy.refresh_pool
|
||||
- kwarg:
|
||||
pillar:
|
||||
- args:
|
||||
- mods: haproxy.refresh_pool
|
||||
- pillar:
|
||||
new_minion: {{ data['id'] }}
|
||||
{% endif %}
|
||||
|
||||
@ -503,17 +585,16 @@ This works with Orchestrate files as well:
|
||||
|
||||
call_some_orchestrate_file:
|
||||
runner.state.orchestrate:
|
||||
- mods: _orch.some_orchestrate_file
|
||||
- pillar:
|
||||
stuff: things
|
||||
- args:
|
||||
- mods: orchestrate.some_orchestrate_file
|
||||
- pillar:
|
||||
stuff: things
|
||||
|
||||
Which is equivalent to the following command at the CLI:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-run state.orchestrate _orch.some_orchestrate_file pillar='{stuff: things}'
|
||||
|
||||
This expects to find a file at /srv/salt/_orch/some_orchestrate_file.sls.
|
||||
salt-run state.orchestrate orchestrate.some_orchestrate_file pillar='{stuff: things}'
|
||||
|
||||
Finally, that data is available in the state file using the normal Pillar
|
||||
lookup syntax. The following example is grabbing web server names and IP
|
||||
@ -564,7 +645,7 @@ includes the minion id, which we can use for matching.
|
||||
- 'salt/minion/ink*/start':
|
||||
- /srv/reactor/auth-complete.sls
|
||||
|
||||
In this sls file, we say that if the key was rejected we will delete the key on
|
||||
In this SLS file, we say that if the key was rejected we will delete the key on
|
||||
the master and then also tell the master to ssh in to the minion and tell it to
|
||||
restart the minion, since a minion process will die if the key is rejected.
|
||||
|
||||
@ -580,19 +661,21 @@ authentication every ten seconds by default.
|
||||
{% if not data['result'] and data['id'].startswith('ink') %}
|
||||
minion_remove:
|
||||
wheel.key.delete:
|
||||
- match: {{ data['id'] }}
|
||||
- args:
|
||||
- match: {{ data['id'] }}
|
||||
minion_rejoin:
|
||||
local.cmd.run:
|
||||
- tgt: salt-master.domain.tld
|
||||
- arg:
|
||||
- ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "{{ data['id'] }}" 'sleep 10 && /etc/init.d/salt-minion restart'
|
||||
- args:
|
||||
- cmd: ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "{{ data['id'] }}" 'sleep 10 && /etc/init.d/salt-minion restart'
|
||||
{% endif %}
|
||||
|
||||
{# Ink server is sending new key -- accept this key #}
|
||||
{% if 'act' in data and data['act'] == 'pend' and data['id'].startswith('ink') %}
|
||||
minion_add:
|
||||
wheel.key.accept:
|
||||
- match: {{ data['id'] }}
|
||||
- args:
|
||||
- match: {{ data['id'] }}
|
||||
{% endif %}
|
||||
|
||||
No if statements are needed here because we already limited this action to just
|
||||
|
@ -106,7 +106,7 @@ bringing with it the ability to access authenticated repositories.
|
||||
|
||||
Using the new features will require updates to the git ext_pillar
|
||||
configuration, further details can be found in the :ref:`pillar.git_pillar
|
||||
<git-pillar-2015-8-0-and-later>` docs.
|
||||
<git-pillar-configuration>` docs.
|
||||
|
||||
.. _pygit2: https://github.com/libgit2/pygit2
|
||||
|
||||
|
1719
doc/topics/releases/2016.11.8.rst
Normal file
1719
doc/topics/releases/2016.11.8.rst
Normal file
File diff suppressed because it is too large
Load Diff
@ -4,23 +4,12 @@ Salt 2016.3.7 Release Notes
|
||||
|
||||
Version 2016.3.7 is a bugfix release for :ref:`2016.3.0 <release-2016-3-0>`.
|
||||
|
||||
New master configuration option `allow_minion_key_revoke`, defaults to True. This option
|
||||
controls whether a minion can request that the master revoke its key. When True, a minion
|
||||
can request a key revocation and the master will comply. If it is False, the key will not
|
||||
be revoked by the msater.
|
||||
Changes for v2016.3.6..v2016.3.7
|
||||
--------------------------------
|
||||
|
||||
New master configuration option `require_minion_sign_messages`
|
||||
This requires that minions cryptographically sign the messages they
|
||||
publish to the master. If minions are not signing, then log this information
|
||||
at loglevel 'INFO' and drop the message without acting on it.
|
||||
Security Fix
|
||||
============
|
||||
|
||||
New master configuration option `drop_messages_signature_fail`
|
||||
Drop messages from minions when their signatures do not validate.
|
||||
Note that when this option is False but `require_minion_sign_messages` is True
|
||||
minions MUST sign their messages but the validity of their signatures
|
||||
is ignored.
|
||||
CVE-2017-12791 Maliciously crafted minion IDs can cause unwanted directory traversals on the Salt-master
|
||||
|
||||
New minion configuration option `minion_sign_messages`
|
||||
Causes the minion to cryptographically sign the payload of messages it places
|
||||
on the event bus for the master. The payloads are signed with the minion's
|
||||
private key so the master can verify the signature with its public key.
|
||||
Correct a flaw in minion id validation which could allow certain minions to authenticate to a master despite not having the correct credentials. To exploit the vulnerability, an attacker must create a salt-minion with an ID containing characters that will cause a directory traversal. Credit for discovering the security flaw goes to: Vernhk@qq.com
|
||||
|
29
doc/topics/releases/2016.3.8.rst
Normal file
29
doc/topics/releases/2016.3.8.rst
Normal file
@ -0,0 +1,29 @@
|
||||
===========================
|
||||
Salt 2016.3.8 Release Notes
|
||||
===========================
|
||||
|
||||
Version 2016.3.8 is a bugfix release for :ref:`2016.3.0 <release-2016-3-0>`.
|
||||
|
||||
Changes for v2016.3.7..v2016.3.8
|
||||
--------------------------------
|
||||
|
||||
New master configuration option `allow_minion_key_revoke`, defaults to True. This option
|
||||
controls whether a minion can request that the master revoke its key. When True, a minion
|
||||
can request a key revocation and the master will comply. If it is False, the key will not
|
||||
be revoked by the msater.
|
||||
|
||||
New master configuration option `require_minion_sign_messages`
|
||||
This requires that minions cryptographically sign the messages they
|
||||
publish to the master. If minions are not signing, then log this information
|
||||
at loglevel 'INFO' and drop the message without acting on it.
|
||||
|
||||
New master configuration option `drop_messages_signature_fail`
|
||||
Drop messages from minions when their signatures do not validate.
|
||||
Note that when this option is False but `require_minion_sign_messages` is True
|
||||
minions MUST sign their messages but the validity of their signatures
|
||||
is ignored.
|
||||
|
||||
New minion configuration option `minion_sign_messages`
|
||||
Causes the minion to cryptographically sign the payload of messages it places
|
||||
on the event bus for the master. The payloads are signed with the minion's
|
||||
private key so the master can verify the signature with its public key.
|
File diff suppressed because it is too large
Load Diff
@ -1110,15 +1110,8 @@ Using Git as an External Pillar Source
|
||||
The git external pillar (a.k.a. git_pillar) has been rewritten for the 2015.8.0
|
||||
release. This rewrite brings with it pygit2_ support (allowing for access to
|
||||
authenticated repositories), as well as more granular support for per-remote
|
||||
configuration.
|
||||
|
||||
To make use of the new features, changes to the git ext_pillar configuration
|
||||
must be made. The new configuration schema is detailed :ref:`here
|
||||
<git-pillar-2015-8-0-and-later>`.
|
||||
|
||||
For Salt releases before 2015.8.0, click :ref:`here <git-pillar-pre-2015-8-0>`
|
||||
for documentation.
|
||||
|
||||
configuration. This configuration schema is detailed :ref:`here
|
||||
<git-pillar-configuration>`.
|
||||
|
||||
.. _faq-gitfs-bug:
|
||||
|
||||
|
@ -13,7 +13,7 @@ Using Apache Libcloud for declarative and procedural multi-cloud orchestration
|
||||
|
||||
Apache Libcloud is a Python library which hides differences between different cloud provider APIs and allows
|
||||
you to manage different cloud resources through a unified and easy to use API. Apache Libcloud supports over
|
||||
60 cloud platforms, including Amazon, Microsoft Azure, Digital Ocean, Google Cloud Platform and OpenStack.
|
||||
60 cloud platforms, including Amazon, Microsoft Azure, DigitalOcean, Google Cloud Platform and OpenStack.
|
||||
|
||||
Execution and state modules are available for Compute, DNS, Storage and Load Balancer drivers from Apache Libcloud in
|
||||
SaltStack.
|
||||
|
@ -81,14 +81,18 @@ the ``foo`` utility module with a ``__virtual__`` function.
|
||||
def bar():
|
||||
return 'baz'
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
Instantiating objects from classes declared in util modules works with
|
||||
Master side modules, such as Runners, Outputters, etc.
|
||||
|
||||
Also you could even write your utility modules in object oriented fashion:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
My utils module
|
||||
---------------
|
||||
My OOP-style utils module
|
||||
-------------------------
|
||||
|
||||
This module contains common functions for use in my other custom types.
|
||||
'''
|
||||
|
@ -481,11 +481,17 @@ Alternatively the ``uninstaller`` can also simply repeat the URL of the msi file
|
||||
:param bool allusers: This parameter is specific to `.msi` installations. It
|
||||
tells `msiexec` to install the software for all users. The default is True.
|
||||
|
||||
:param bool cache_dir: If true, the entire directory where the installer resides
|
||||
will be recursively cached. This is useful for installers that depend on
|
||||
other files in the same directory for installation.
|
||||
:param bool cache_dir: If true when installer URL begins with salt://, the
|
||||
entire directory where the installer resides will be recursively cached.
|
||||
This is useful for installers that depend on other files in the same
|
||||
directory for installation.
|
||||
|
||||
.. note:: Only applies to salt: installer URLs.
|
||||
:param str cache_file:
|
||||
When installer URL begins with salt://, this indicates single file to copy
|
||||
down for use with the installer. Copied to the same location as the
|
||||
installer. Use this over ``cache_dir`` if there are many files in the
|
||||
directory and you only need a specific file and don't want to cache
|
||||
additional files that may reside in the installer directory.
|
||||
|
||||
Here's an example for a software package that has dependent files:
|
||||
|
||||
|
@ -15,91 +15,119 @@
|
||||
# This script is run as a part of the macOS Salt Installation
|
||||
#
|
||||
###############################################################################
|
||||
echo "Post install started on:" > /tmp/postinstall.txt
|
||||
date >> /tmp/postinstall.txt
|
||||
|
||||
###############################################################################
|
||||
# Define Variables
|
||||
###############################################################################
|
||||
# Get Minor Version
|
||||
OSX_VERSION=$(sw_vers | grep ProductVersion | cut -f 2 -d: | tr -d '[:space:]')
|
||||
MINOR=$(echo ${OSX_VERSION} | cut -f 2 -d.)
|
||||
# Path Variables
|
||||
INSTALL_DIR="/opt/salt"
|
||||
BIN_DIR="$INSTALL_DIR/bin"
|
||||
CONFIG_DIR="/etc/salt"
|
||||
TEMP_DIR="/tmp"
|
||||
SBIN_DIR="/usr/local/sbin"
|
||||
|
||||
###############################################################################
|
||||
# Set up logging and error handling
|
||||
###############################################################################
|
||||
echo "Post install script started on:" > "$TEMP_DIR/postinstall.txt"
|
||||
date "+%Y/%m/%d %H:%m:%S" >> "$TEMP_DIR/postinstall.txt"
|
||||
trap 'quit_on_error $LINENO $BASH_COMMAND' ERR
|
||||
|
||||
quit_on_error() {
|
||||
echo "$(basename $0) caught error on line : $1 command was: $2" >> /tmp/postinstall.txt
|
||||
echo "$(basename $0) caught error on line : $1 command was: $2" >> "$TEMP_DIR/postinstall.txt"
|
||||
exit -1
|
||||
}
|
||||
|
||||
###############################################################################
|
||||
# Check for existing minion config, copy if it doesn't exist
|
||||
###############################################################################
|
||||
if [ ! -f /etc/salt/minion ]; then
|
||||
echo "Config copy: Started..." >> /tmp/postinstall.txt
|
||||
cp /etc/salt/minion.dist /etc/salt/minion
|
||||
echo "Config copy: Successful" >> /tmp/postinstall.txt
|
||||
if [ ! -f "$CONFIG_DIR/minion" ]; then
|
||||
echo "Config: Copy Started..." >> "$TEMP_DIR/postinstall.txt"
|
||||
cp "$CONFIG_DIR/minion.dist" "$CONFIG_DIR/minion"
|
||||
echo "Config: Copied Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
fi
|
||||
|
||||
###############################################################################
|
||||
# Create symlink to salt-config.sh
|
||||
###############################################################################
|
||||
# echo "Symlink: Creating symlink for salt-config..." >> /tmp/postinstall.txt
|
||||
if [ ! -d "/usr/local/sbin" ]; then
|
||||
mkdir /usr/local/sbin
|
||||
if [ ! -d "$SBIN_DIR" ]; then
|
||||
echo "Symlink: Creating $SBIN_DIR..." >> "$TEMP_DIR/postinstall.txt"
|
||||
mkdir "$SBIN_DIR"
|
||||
echo "Symlink: Created Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
fi
|
||||
ln -sf /opt/salt/bin/salt-config.sh /usr/local/sbin/salt-config
|
||||
echo "Symlink: Creating symlink for salt-config..." >> "$TEMP_DIR/postinstall.txt"
|
||||
ln -sf "$BIN_DIR/salt-config.sh" "$SBIN_DIR/salt-config"
|
||||
echo "Symlink: Created Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
###############################################################################
|
||||
# Add salt to paths.d
|
||||
###############################################################################
|
||||
# echo "Path: Adding salt to the path..." >> /tmp/postinstall.txt
|
||||
if [ ! -d "/etc/paths.d" ]; then
|
||||
echo "Path: Creating paths.d directory..." >> "$TEMP_DIR/postinstall.txt"
|
||||
mkdir /etc/paths.d
|
||||
echo "Path: Created Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
fi
|
||||
sh -c 'echo "/opt/salt/bin" > /etc/paths.d/salt'
|
||||
sh -c 'echo "/usr/local/sbin" >> /etc/paths.d/salt'
|
||||
echo "Path: Adding salt to the path..." >> "$TEMP_DIR/postinstall.txt"
|
||||
sh -c "echo \"$BIN_DIR\" > /etc/paths.d/salt"
|
||||
sh -c "echo \"$SBIN_DIR\" >> /etc/paths.d/salt"
|
||||
echo "Path: Added Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
###############################################################################
|
||||
# Register Salt as a service
|
||||
###############################################################################
|
||||
setup_services_maverick() {
|
||||
echo "Using old (< 10.10) launchctl interface" >> /tmp/postinstall.txt
|
||||
echo "Service: Using old (< 10.10) launchctl interface" >> "$TEMP_DIR/postinstall.txt"
|
||||
if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then
|
||||
echo "Stop running service..." >> /tmp/postinstall.txt
|
||||
echo "Service: Stopping salt-minion..." >> "$TEMP_DIR/postinstall.txt"
|
||||
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist
|
||||
echo "Service: Stopped Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
fi;
|
||||
echo "Service: Starting salt-minion..." >> "$TEMP_DIR/postinstall.txt"
|
||||
launchctl load -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist || return 1
|
||||
echo "Service: Started Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
echo "Service start: Successful" >> /tmp/postinstall.txt
|
||||
|
||||
echo "Service disable: Disabling Master, Syndic, and API" >> /tmp/postinstall.txt
|
||||
|
||||
echo "Service: Disabling Master, Syndic, and API services..." >> "$TEMP_DIR/postinstall.txt"
|
||||
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.api.plist
|
||||
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.master.plist
|
||||
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.syndic.plist
|
||||
echo "Service: Disabled Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
setup_services_yosemite_and_later() {
|
||||
echo "Using new (>= 10.10) launchctl interface" >> /tmp/postinstall.txt
|
||||
echo "Service: Using new (>= 10.10) launchctl interface" >> "$TEMP_DIR/postinstall.txt"
|
||||
echo "Service: Enabling salt-minion..." >> "$TEMP_DIR/postinstall.txt"
|
||||
launchctl enable system/com.saltstack.salt.minion
|
||||
echo "Service start: Bootstrapping service..." >> /tmp/postinstall.txt
|
||||
echo "Service: Enabled Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
echo "Service: Bootstrapping salt-minion..." >> "$TEMP_DIR/postinstall.txt"
|
||||
launchctl bootstrap system /Library/LaunchDaemons/com.saltstack.salt.minion.plist
|
||||
echo "Service: Bootstrapped Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then
|
||||
echo "Service is running" >> /tmp/postinstall.txt
|
||||
echo "Service: Service Running" >> "$TEMP_DIR/postinstall.txt"
|
||||
else
|
||||
echo "Service start: Kickstarting service..." >> /tmp/postinstall.txt
|
||||
echo "Service: Kickstarting Service..." >> "$TEMP_DIR/postinstall.txt"
|
||||
launchctl kickstart -kp system/com.saltstack.salt.minion
|
||||
echo "Service: Kickstarted Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
fi
|
||||
|
||||
echo "Service start: Successful" >> /tmp/postinstall.txt
|
||||
|
||||
echo "Service disable: Disabling Master, Syndic, and API" >> /tmp/postinstall.txt
|
||||
echo "Service: Started Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
echo "Service: Disabling Master, Syndic, and API services" >> "$TEMP_DIR/postinstall.txt"
|
||||
launchctl disable system/com.saltstack.salt.master
|
||||
launchctl disable system/com.saltstack.salt.syndic
|
||||
launchctl disable system/com.saltstack.salt.api
|
||||
echo "Service: Disabled Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
OSX_VERSION=$(sw_vers | grep ProductVersion | cut -f 2 -d: | tr -d '[:space:]')
|
||||
MINOR=$(echo ${OSX_VERSION} | cut -f 2 -d.)
|
||||
|
||||
echo "Service start: Enabling service..." >> /tmp/postinstall.txt
|
||||
echo "Service: Configuring..." >> "$TEMP_DIR/postinstall.txt"
|
||||
case $MINOR in
|
||||
9 )
|
||||
setup_services_maverick;
|
||||
@ -108,7 +136,9 @@ case $MINOR in
|
||||
setup_services_yosemite_and_later;
|
||||
;;
|
||||
esac
|
||||
echo "Service: Configured Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
echo "Post install completed successfully" >> /tmp/postinstall.txt
|
||||
echo "Post install completed successfully on:" >> "$TEMP_DIR/postinstall.txt"
|
||||
date "+%Y/%m/%d %H:%m:%S" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
exit 0
|
||||
|
@ -6,7 +6,8 @@
|
||||
# Date: December 2015
|
||||
#
|
||||
# Description: This script stops the salt minion service before attempting to
|
||||
# install Salt on macOS
|
||||
# install Salt on macOS. It also removes the /opt/salt/bin
|
||||
# directory, symlink to salt-config, and salt from paths.d.
|
||||
#
|
||||
# Requirements:
|
||||
# - None
|
||||
@ -15,12 +16,29 @@
|
||||
# This script is run as a part of the macOS Salt Installation
|
||||
#
|
||||
###############################################################################
|
||||
echo "Preinstall started on:" > /tmp/preinstall.txt
|
||||
date >> /tmp/preinstall.txt
|
||||
|
||||
###############################################################################
|
||||
# Define Variables
|
||||
###############################################################################
|
||||
# Get Minor Version
|
||||
OSX_VERSION=$(sw_vers | grep ProductVersion | cut -f 2 -d: | tr -d '[:space:]')
|
||||
MINOR=$(echo ${OSX_VERSION} | cut -f 2 -d.)
|
||||
# Path Variables
|
||||
INSTALL_DIR="/opt/salt"
|
||||
BIN_DIR="$INSTALL_DIR/bin"
|
||||
CONFIG_DIR="/etc/salt"
|
||||
TEMP_DIR="/tmp"
|
||||
SBIN_DIR="/usr/local/sbin"
|
||||
|
||||
###############################################################################
|
||||
# Set up logging and error handling
|
||||
###############################################################################
|
||||
echo "Preinstall started on:" > "$TEMP_DIR/preinstall.txt"
|
||||
date "+%Y/%m/%d %H:%m:%S" >> "$TEMP_DIR/preinstall.txt"
|
||||
trap 'quit_on_error $LINENO $BASH_COMMAND' ERR
|
||||
|
||||
quit_on_error() {
|
||||
echo "$(basename $0) caught error on line : $1 command was: $2" >> /tmp/preinstall.txt
|
||||
echo "$(basename $0) caught error on line : $1 command was: $2" >> "$TEMP_DIR/preinstall.txt"
|
||||
exit -1
|
||||
}
|
||||
|
||||
@ -31,24 +49,58 @@ MINOR=$(echo ${OSX_VERSION} | cut -f 2 -d.)
|
||||
# Stop the service
|
||||
###############################################################################
|
||||
stop_service_maverick() {
|
||||
echo "Using old (< 10.10) launchctl interface" >> /tmp/preinstall.txt
|
||||
echo "Service: Using old (< 10.10) launchctl interface" >> "$TEMP_DIR/preinstall.txt"
|
||||
if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then
|
||||
echo "Stop service: Started..." >> /tmp/preinstall.txt
|
||||
echo "Service: Unloading minion..." >> "$TEMP_DIR/preinstall.txt"
|
||||
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist
|
||||
echo "Stop service: Successful" >> /tmp/preinstall.txt
|
||||
echo "Service: Unloaded Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
if /bin/launchctl list "com.saltstack.salt.master" &> /dev/null; then
|
||||
echo "Service: Unloading master..." >> "$TEMP_DIR/preinstall.txt"
|
||||
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.master.plist
|
||||
echo "Service: Unloaded Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
if /bin/launchctl list "com.saltstack.salt.syndic" &> /dev/null; then
|
||||
echo "Service: Unloading syndic..." >> "$TEMP_DIR/preinstall.txt"
|
||||
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.syndic.plist
|
||||
echo "Service: Unloaded Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
if /bin/launchctl list "com.saltstack.salt.api" &> /dev/null; then
|
||||
echo "Service: Unloading api..." >> "$TEMP_DIR/preinstall.txt"
|
||||
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.api.plist
|
||||
echo "Service: Unloaded Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
}
|
||||
|
||||
stop_service_yosemite_and_later() {
|
||||
echo "Using new (>= 10.10) launchctl interface" >> /tmp/preinstall.txt
|
||||
echo "Service: Using new (>= 10.10) launchctl interface" >> "$TEMP_DIR/preinstall.txt"
|
||||
if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then
|
||||
echo "Stop service: Started..." >> /tmp/preinstall.txt
|
||||
echo "Service: Stopping minion..." >> "$TEMP_DIR/preinstall.txt"
|
||||
launchctl disable system/com.saltstack.salt.minion
|
||||
launchctl bootout system /Library/LaunchDaemons/com.saltstack.salt.minion.plist
|
||||
echo "Stop service: Successful" >> /tmp/preinstall.txt
|
||||
echo "Service: Stopped Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
if /bin/launchctl list "com.saltstack.salt.master" &> /dev/null; then
|
||||
echo "Service: Stopping master..." >> "$TEMP_DIR/preinstall.txt"
|
||||
launchctl disable system/com.saltstack.salt.master
|
||||
launchctl bootout system /Library/LaunchDaemons/com.saltstack.salt.master.plist
|
||||
echo "Service: Stopped Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
if /bin/launchctl list "com.saltstack.salt.syndic" &> /dev/null; then
|
||||
echo "Service: Stopping syndic..." >> "$TEMP_DIR/preinstall.txt"
|
||||
launchctl disable system/com.saltstack.salt.syndic
|
||||
launchctl bootout system /Library/LaunchDaemons/com.saltstack.salt.syndic.plist
|
||||
echo "Service: Stopped Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
if /bin/launchctl list "com.saltstack.salt.api" &> /dev/null; then
|
||||
echo "Service: Stopping api..." >> "$TEMP_DIR/preinstall.txt"
|
||||
launchctl disable system/com.saltstack.salt.api
|
||||
launchctl bootout system /Library/LaunchDaemons/com.saltstack.salt.api.plist
|
||||
echo "Service: Stopped Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
}
|
||||
|
||||
echo "Service: Configuring..." >> "$TEMP_DIR/preinstall.txt"
|
||||
case $MINOR in
|
||||
9 )
|
||||
stop_service_maverick;
|
||||
@ -57,6 +109,36 @@ case $MINOR in
|
||||
stop_service_yosemite_and_later;
|
||||
;;
|
||||
esac
|
||||
echo "Preinstall Completed Successfully" >> /tmp/preinstall.txt
|
||||
echo "Service: Configured Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
|
||||
###############################################################################
|
||||
# Remove the Symlink to salt-config.sh
|
||||
###############################################################################
|
||||
if [ -L "$SBIN_DIR/salt-config" ]; then
|
||||
echo "Cleanup: Removing Symlink $BIN_DIR/salt-config" >> "$TEMP_DIR/preinstall.txt"
|
||||
rm "$SBIN_DIR/salt-config"
|
||||
echo "Cleanup: Removed Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
|
||||
###############################################################################
|
||||
# Remove the $INSTALL_DIR directory
|
||||
###############################################################################
|
||||
if [ -d "$INSTALL_DIR" ]; then
|
||||
echo "Cleanup: Removing $INSTALL_DIR" >> "$TEMP_DIR/preinstall.txt"
|
||||
rm -rf "$INSTALL_DIR"
|
||||
echo "Cleanup: Removed Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
|
||||
###############################################################################
|
||||
# Remove the salt from the paths.d
|
||||
###############################################################################
|
||||
if [ -f "/etc/paths.d/salt" ]; then
|
||||
echo "Path: Removing salt from the path..." >> "$TEMP_DIR/preinstall.txt"
|
||||
rm "/etc/paths.d/salt"
|
||||
echo "Path: Removed Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
|
||||
echo "Preinstall Completed Successfully on:" >> "$TEMP_DIR/preinstall.txt"
|
||||
date "+%Y/%m/%d %H:%m:%S" >> "$TEMP_DIR/preinstall.txt"
|
||||
|
||||
exit 0
|
||||
|
@ -35,8 +35,9 @@ _salt_get_keys(){
|
||||
}
|
||||
|
||||
_salt(){
|
||||
local _salt_cache_functions=${SALT_COMP_CACHE_FUNCTIONS:-"$HOME/.cache/salt-comp-cache_functions"}
|
||||
local _salt_cache_timeout=${SALT_COMP_CACHE_TIMEOUT:-"last hour"}
|
||||
CACHE_DIR="$HOME/.cache/salt-comp-cache_functions"
|
||||
local _salt_cache_functions=${SALT_COMP_CACHE_FUNCTIONS:=$CACHE_DIR}
|
||||
local _salt_cache_timeout=${SALT_COMP_CACHE_TIMEOUT:='last hour'}
|
||||
|
||||
if [ ! -d "$(dirname ${_salt_cache_functions})" ]; then
|
||||
mkdir -p "$(dirname ${_salt_cache_functions})"
|
||||
|
@ -89,7 +89,7 @@ if Defined x (
|
||||
if %Python%==2 (
|
||||
Set "PyDir=C:\Python27"
|
||||
) else (
|
||||
Set "PyDir=C:\Program Files\Python35"
|
||||
Set "PyDir=C:\Python35"
|
||||
)
|
||||
Set "PATH=%PATH%;%PyDir%;%PyDir%\Scripts"
|
||||
|
||||
|
@ -175,7 +175,7 @@ If (Test-Path "$($ini['Settings']['Python2Dir'])\python.exe") {
|
||||
DownloadFileWithProgress $url $file
|
||||
|
||||
Write-Output " - $script_name :: Installing $($ini[$bitPrograms]['Python2']) . . ."
|
||||
$p = Start-Process msiexec -ArgumentList "/i $file /qb ADDLOCAL=DefaultFeature,SharedCRT,Extensions,pip_feature,PrependPath TARGETDIR=$($ini['Settings']['Python2Dir'])" -Wait -NoNewWindow -PassThru
|
||||
$p = Start-Process msiexec -ArgumentList "/i $file /qb ADDLOCAL=DefaultFeature,SharedCRT,Extensions,pip_feature,PrependPath TARGETDIR=`"$($ini['Settings']['Python2Dir'])`"" -Wait -NoNewWindow -PassThru
|
||||
}
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
@ -191,7 +191,7 @@ If (!($Path.ToLower().Contains("$($ini['Settings']['Scripts2Dir'])".ToLower())))
|
||||
|
||||
#==============================================================================
|
||||
# Update PIP and SetupTools
|
||||
# caching depends on environmant variable SALT_PIP_LOCAL_CACHE
|
||||
# caching depends on environment variable SALT_PIP_LOCAL_CACHE
|
||||
#==============================================================================
|
||||
Write-Output " ----------------------------------------------------------------"
|
||||
Write-Output " - $script_name :: Updating PIP and SetupTools . . ."
|
||||
@ -212,7 +212,7 @@ if ( ! [bool]$Env:SALT_PIP_LOCAL_CACHE) {
|
||||
|
||||
#==============================================================================
|
||||
# Install pypi resources using pip
|
||||
# caching depends on environmant variable SALT_REQ_LOCAL_CACHE
|
||||
# caching depends on environment variable SALT_REQ_LOCAL_CACHE
|
||||
#==============================================================================
|
||||
Write-Output " ----------------------------------------------------------------"
|
||||
Write-Output " - $script_name :: Installing pypi resources using pip . . ."
|
||||
@ -230,6 +230,24 @@ if ( ! [bool]$Env:SALT_REQ_LOCAL_CACHE) {
|
||||
Start_Process_and_test_exitcode "$($ini['Settings']['Python2Dir'])\python.exe" "-m pip install --no-index --find-links=$Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req_2.txt" "pip install"
|
||||
}
|
||||
|
||||
#==============================================================================
|
||||
# Move PyWin32 DLL's to site-packages\win32
|
||||
#==============================================================================
|
||||
Write-Output " - $script_name :: Moving PyWin32 DLLs . . ."
|
||||
Move-Item "$($ini['Settings']['SitePkgs2Dir'])\pywin32_system32\*.dll" "$($ini['Settings']['SitePkgs2Dir'])\win32" -Force
|
||||
|
||||
# Remove pywin32_system32 directory
|
||||
Write-Output " - $script_name :: Removing pywin32_system32 Directory . . ."
|
||||
Remove-Item "$($ini['Settings']['SitePkgs2Dir'])\pywin32_system32"
|
||||
|
||||
# Remove pythonwin directory
|
||||
Write-Output " - $script_name :: Removing pythonwin Directory . . ."
|
||||
Remove-Item "$($ini['Settings']['SitePkgs2Dir'])\pythonwin" -Force -Recurse
|
||||
|
||||
# Remove PyWin32 PostInstall and testall Scripts
|
||||
Write-Output " - $script_name :: Removing PyWin32 scripts . . ."
|
||||
Remove-Item "$($ini['Settings']['Scripts2Dir'])\pywin32_*" -Force -Recurse
|
||||
|
||||
#==============================================================================
|
||||
# Install PyYAML with CLoader
|
||||
# This has to be a compiled binary to get the CLoader
|
||||
|
@ -175,7 +175,7 @@ If (Test-Path "$($ini['Settings']['Python3Dir'])\python.exe") {
|
||||
DownloadFileWithProgress $url $file
|
||||
|
||||
Write-Output " - $script_name :: Installing $($ini[$bitPrograms]['Python3']) . . ."
|
||||
$p = Start-Process $file -ArgumentList '/passive InstallAllUsers=1 TargetDir="C:\Program Files\Python35" Include_doc=0 Include_tcltk=0 Include_test=0 Include_launcher=0 PrependPath=1 Shortcuts=0' -Wait -NoNewWindow -PassThru
|
||||
$p = Start-Process $file -ArgumentList "/passive InstallAllUsers=1 TargetDir=`"$($ini['Settings']['Python3Dir'])`" Include_doc=0 Include_tcltk=0 Include_test=0 Include_launcher=0 PrependPath=1 Shortcuts=0" -Wait -NoNewWindow -PassThru
|
||||
}
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
@ -247,7 +247,7 @@ Start_Process_and_test_exitcode "$($ini['Settings']['Scripts3Dir'])\pip.exe" "i
|
||||
|
||||
# Move DLL's to Python Root
|
||||
Write-Output " - $script_name :: Moving PyWin32 DLLs . . ."
|
||||
Move-Item "$($ini['Settings']['SitePkgs3Dir'])\pywin32_system32\*.dll" "$($ini['Settings']['Python3Dir'])" -Force
|
||||
Move-Item "$($ini['Settings']['SitePkgs3Dir'])\pywin32_system32\*.dll" "$($ini['Settings']['SitePkgs3Dir'])\win32" -Force
|
||||
|
||||
# Remove pywin32_system32 directory
|
||||
Write-Output " - $script_name :: Removing pywin32_system32 Directory . . ."
|
||||
@ -257,6 +257,10 @@ Remove-Item "$($ini['Settings']['SitePkgs3Dir'])\pywin32_system32"
|
||||
Write-Output " - $script_name :: Removing pythonwin Directory . . ."
|
||||
Remove-Item "$($ini['Settings']['SitePkgs3Dir'])\pythonwin" -Force -Recurse
|
||||
|
||||
# Remove PyWin32 PostInstall and testall Scripts
|
||||
Write-Output " - $script_name :: Removing PyWin32 scripts . . ."
|
||||
Remove-Item "$($ini['Settings']['Scripts3Dir'])\pywin32_*" -Force -Recurse
|
||||
|
||||
#==============================================================================
|
||||
# Fix PyCrypto
|
||||
#==============================================================================
|
||||
|
@ -56,7 +56,7 @@ if %Python%==2 (
|
||||
Set "PyVerMajor=2"
|
||||
Set "PyVerMinor=7"
|
||||
) else (
|
||||
Set "PyDir=C:\Program Files\Python35"
|
||||
Set "PyDir=C:\Python35"
|
||||
Set "PyVerMajor=3"
|
||||
Set "PyVerMinor=5"
|
||||
)
|
||||
|
@ -16,9 +16,10 @@ if %errorLevel%==0 (
|
||||
)
|
||||
echo.
|
||||
|
||||
:CheckPython2
|
||||
if exist "\Python27" goto RemovePython2
|
||||
if exist "\Program Files\Python35" goto RemovePython3
|
||||
goto eof
|
||||
|
||||
goto CheckPython3
|
||||
|
||||
:RemovePython2
|
||||
rem Uninstall Python 2.7
|
||||
@ -47,25 +48,30 @@ goto eof
|
||||
|
||||
goto eof
|
||||
|
||||
:CheckPython3
|
||||
if exist "\Python35" goto RemovePython3
|
||||
|
||||
goto eof
|
||||
|
||||
:RemovePython3
|
||||
echo %0 :: Uninstalling Python 3 ...
|
||||
echo ---------------------------------------------------------------------
|
||||
:: 64 bit
|
||||
if exist "%LOCALAPPDATA%\Package Cache\{b94f45d6-8461-440c-aa4d-bf197b2c2499}" (
|
||||
echo %0 :: - 3.5.3 64bit
|
||||
"%LOCALAPPDATA%\Package Cache\{b94f45d6-8461-440c-aa4d-bf197b2c2499}\python-3.5.3-amd64.exe" /uninstall
|
||||
"%LOCALAPPDATA%\Package Cache\{b94f45d6-8461-440c-aa4d-bf197b2c2499}\python-3.5.3-amd64.exe" /uninstall /passive
|
||||
)
|
||||
|
||||
:: 32 bit
|
||||
if exist "%LOCALAPPDATA%\Package Cache\{a10037e1-4247-47c9-935b-c5ca049d0299}" (
|
||||
echo %0 :: - 3.5.3 32bit
|
||||
"%LOCALAPPDATA%\Package Cache\{a10037e1-4247-47c9-935b-c5ca049d0299}\python-3.5.3" /uninstall
|
||||
"%LOCALAPPDATA%\Package Cache\{a10037e1-4247-47c9-935b-c5ca049d0299}\python-3.5.3" /uninstall /passive
|
||||
)
|
||||
|
||||
rem wipe the Python directory
|
||||
echo %0 :: Removing the C:\Program Files\Python35 Directory ...
|
||||
echo %0 :: Removing the C:\Python35 Directory ...
|
||||
echo ---------------------------------------------------------------------
|
||||
rd /s /q "C:\Program Files\Python35"
|
||||
rd /s /q "C:\Python35"
|
||||
if %errorLevel%==0 (
|
||||
echo Successful
|
||||
) else (
|
||||
|
@ -44,7 +44,7 @@ ${StrStrAdv}
|
||||
!define CPUARCH "x86"
|
||||
!endif
|
||||
|
||||
; Part of the Trim function for Strings
|
||||
# Part of the Trim function for Strings
|
||||
!define Trim "!insertmacro Trim"
|
||||
!macro Trim ResultVar String
|
||||
Push "${String}"
|
||||
@ -61,27 +61,27 @@ ${StrStrAdv}
|
||||
!define MUI_UNICON "salt.ico"
|
||||
!define MUI_WELCOMEFINISHPAGE_BITMAP "panel.bmp"
|
||||
|
||||
; Welcome page
|
||||
# Welcome page
|
||||
!insertmacro MUI_PAGE_WELCOME
|
||||
|
||||
; License page
|
||||
# License page
|
||||
!insertmacro MUI_PAGE_LICENSE "LICENSE.txt"
|
||||
|
||||
; Configure Minion page
|
||||
# Configure Minion page
|
||||
Page custom pageMinionConfig pageMinionConfig_Leave
|
||||
|
||||
; Instfiles page
|
||||
# Instfiles page
|
||||
!insertmacro MUI_PAGE_INSTFILES
|
||||
|
||||
; Finish page (Customized)
|
||||
# Finish page (Customized)
|
||||
!define MUI_PAGE_CUSTOMFUNCTION_SHOW pageFinish_Show
|
||||
!define MUI_PAGE_CUSTOMFUNCTION_LEAVE pageFinish_Leave
|
||||
!insertmacro MUI_PAGE_FINISH
|
||||
|
||||
; Uninstaller pages
|
||||
# Uninstaller pages
|
||||
!insertmacro MUI_UNPAGE_INSTFILES
|
||||
|
||||
; Language files
|
||||
# Language files
|
||||
!insertmacro MUI_LANGUAGE "English"
|
||||
|
||||
|
||||
@ -201,8 +201,8 @@ ShowInstDetails show
|
||||
ShowUnInstDetails show
|
||||
|
||||
|
||||
; Check and install Visual C++ redist packages
|
||||
; See http://blogs.msdn.com/b/astebner/archive/2009/01/29/9384143.aspx for more info
|
||||
# Check and install Visual C++ redist packages
|
||||
# See http://blogs.msdn.com/b/astebner/archive/2009/01/29/9384143.aspx for more info
|
||||
Section -Prerequisites
|
||||
|
||||
Var /GLOBAL VcRedistName
|
||||
@ -211,12 +211,12 @@ Section -Prerequisites
|
||||
Var /Global CheckVcRedist
|
||||
StrCpy $CheckVcRedist "False"
|
||||
|
||||
; Visual C++ 2015 redist packages
|
||||
# Visual C++ 2015 redist packages
|
||||
!define PY3_VC_REDIST_NAME "VC_Redist_2015"
|
||||
!define PY3_VC_REDIST_X64_GUID "{50A2BC33-C9CD-3BF1-A8FF-53C10A0B183C}"
|
||||
!define PY3_VC_REDIST_X86_GUID "{BBF2AC74-720C-3CB3-8291-5E34039232FA}"
|
||||
|
||||
; Visual C++ 2008 SP1 MFC Security Update redist packages
|
||||
# Visual C++ 2008 SP1 MFC Security Update redist packages
|
||||
!define PY2_VC_REDIST_NAME "VC_Redist_2008_SP1_MFC"
|
||||
!define PY2_VC_REDIST_X64_GUID "{5FCE6D76-F5DC-37AB-B2B8-22AB8CEDB1D4}"
|
||||
!define PY2_VC_REDIST_X86_GUID "{9BE518E6-ECC6-35A9-88E4-87755C07200F}"
|
||||
@ -239,7 +239,7 @@ Section -Prerequisites
|
||||
StrCpy $VcRedistGuid ${PY2_VC_REDIST_X86_GUID}
|
||||
${EndIf}
|
||||
|
||||
; VCRedist 2008 only needed on Windows Server 2008R2/Windows 7 and below
|
||||
# VCRedist 2008 only needed on Windows Server 2008R2/Windows 7 and below
|
||||
${If} ${AtMostWin2008R2}
|
||||
StrCpy $CheckVcRedist "True"
|
||||
${EndIf}
|
||||
@ -255,20 +255,41 @@ Section -Prerequisites
|
||||
"$VcRedistName is currently not installed. Would you like to install?" \
|
||||
/SD IDYES IDNO endVcRedist
|
||||
|
||||
ClearErrors
|
||||
; The Correct version of VCRedist is copied over by "build_pkg.bat"
|
||||
# The Correct version of VCRedist is copied over by "build_pkg.bat"
|
||||
SetOutPath "$INSTDIR\"
|
||||
File "..\prereqs\vcredist.exe"
|
||||
; /passive used by 2015 installer
|
||||
; /qb! used by 2008 installer
|
||||
; It just ignores the unrecognized switches...
|
||||
ExecWait "$INSTDIR\vcredist.exe /qb! /passive"
|
||||
IfErrors 0 endVcRedist
|
||||
# If an output variable is specified ($0 in the case below),
|
||||
# ExecWait sets the variable with the exit code (and only sets the
|
||||
# error flag if an error occurs; if an error occurs, the contents
|
||||
# of the user variable are undefined).
|
||||
# http://nsis.sourceforge.net/Reference/ExecWait
|
||||
# /passive used by 2015 installer
|
||||
# /qb! used by 2008 installer
|
||||
# It just ignores the unrecognized switches...
|
||||
ClearErrors
|
||||
ExecWait '"$INSTDIR\vcredist.exe" /qb! /passive /norestart' $0
|
||||
IfErrors 0 CheckVcRedistErrorCode
|
||||
MessageBox MB_OK \
|
||||
"$VcRedistName failed to install. Try installing the package manually." \
|
||||
/SD IDOK
|
||||
Goto endVcRedist
|
||||
|
||||
CheckVcRedistErrorCode:
|
||||
# Check for Reboot Error Code (3010)
|
||||
${If} $0 == 3010
|
||||
MessageBox MB_OK \
|
||||
"$VcRedistName installed but requires a restart to complete." \
|
||||
/SD IDOK
|
||||
|
||||
# Check for any other errors
|
||||
${ElseIfNot} $0 == 0
|
||||
MessageBox MB_OK \
|
||||
"$VcRedistName failed with ErrorCode: $0. Try installing the package manually." \
|
||||
/SD IDOK
|
||||
${EndIf}
|
||||
|
||||
endVcRedist:
|
||||
|
||||
${EndIf}
|
||||
|
||||
${EndIf}
|
||||
@ -294,12 +315,12 @@ Function .onInit
|
||||
|
||||
Call parseCommandLineSwitches
|
||||
|
||||
; Check for existing installation
|
||||
# Check for existing installation
|
||||
ReadRegStr $R0 HKLM \
|
||||
"Software\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT_NAME}" \
|
||||
"UninstallString"
|
||||
StrCmp $R0 "" checkOther
|
||||
; Found existing installation, prompt to uninstall
|
||||
# Found existing installation, prompt to uninstall
|
||||
MessageBox MB_OKCANCEL|MB_ICONEXCLAMATION \
|
||||
"${PRODUCT_NAME} is already installed.$\n$\n\
|
||||
Click `OK` to remove the existing installation." \
|
||||
@ -307,12 +328,12 @@ Function .onInit
|
||||
Abort
|
||||
|
||||
checkOther:
|
||||
; Check for existing installation of full salt
|
||||
# Check for existing installation of full salt
|
||||
ReadRegStr $R0 HKLM \
|
||||
"Software\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT_NAME_OTHER}" \
|
||||
"UninstallString"
|
||||
StrCmp $R0 "" skipUninstall
|
||||
; Found existing installation, prompt to uninstall
|
||||
# Found existing installation, prompt to uninstall
|
||||
MessageBox MB_OKCANCEL|MB_ICONEXCLAMATION \
|
||||
"${PRODUCT_NAME_OTHER} is already installed.$\n$\n\
|
||||
Click `OK` to remove the existing installation." \
|
||||
@ -321,22 +342,22 @@ Function .onInit
|
||||
|
||||
uninst:
|
||||
|
||||
; Get current Silent status
|
||||
# Get current Silent status
|
||||
StrCpy $R0 0
|
||||
${If} ${Silent}
|
||||
StrCpy $R0 1
|
||||
${EndIf}
|
||||
|
||||
; Turn on Silent mode
|
||||
# Turn on Silent mode
|
||||
SetSilent silent
|
||||
|
||||
; Don't remove all directories
|
||||
# Don't remove all directories
|
||||
StrCpy $DeleteInstallDir 0
|
||||
|
||||
; Uninstall silently
|
||||
# Uninstall silently
|
||||
Call uninstallSalt
|
||||
|
||||
; Set it back to Normal mode, if that's what it was before
|
||||
# Set it back to Normal mode, if that's what it was before
|
||||
${If} $R0 == 0
|
||||
SetSilent normal
|
||||
${EndIf}
|
||||
@ -350,7 +371,7 @@ Section -Post
|
||||
|
||||
WriteUninstaller "$INSTDIR\uninst.exe"
|
||||
|
||||
; Uninstall Registry Entries
|
||||
# Uninstall Registry Entries
|
||||
WriteRegStr ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_UNINST_KEY}" \
|
||||
"DisplayName" "$(^Name)"
|
||||
WriteRegStr ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_UNINST_KEY}" \
|
||||
@ -366,19 +387,19 @@ Section -Post
|
||||
WriteRegStr HKLM "SYSTEM\CurrentControlSet\services\salt-minion" \
|
||||
"DependOnService" "nsi"
|
||||
|
||||
; Set the estimated size
|
||||
# Set the estimated size
|
||||
${GetSize} "$INSTDIR\bin" "/S=OK" $0 $1 $2
|
||||
IntFmt $0 "0x%08X" $0
|
||||
WriteRegDWORD ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_UNINST_KEY}" \
|
||||
"EstimatedSize" "$0"
|
||||
|
||||
; Commandline Registry Entries
|
||||
# Commandline Registry Entries
|
||||
WriteRegStr HKLM "${PRODUCT_CALL_REGKEY}" "" "$INSTDIR\salt-call.bat"
|
||||
WriteRegStr HKLM "${PRODUCT_CALL_REGKEY}" "Path" "$INSTDIR\bin\"
|
||||
WriteRegStr HKLM "${PRODUCT_MINION_REGKEY}" "" "$INSTDIR\salt-minion.bat"
|
||||
WriteRegStr HKLM "${PRODUCT_MINION_REGKEY}" "Path" "$INSTDIR\bin\"
|
||||
|
||||
; Register the Salt-Minion Service
|
||||
# Register the Salt-Minion Service
|
||||
nsExec::Exec "nssm.exe install salt-minion $INSTDIR\bin\python.exe -E -s $INSTDIR\bin\Scripts\salt-minion -c $INSTDIR\conf -l quiet"
|
||||
nsExec::Exec "nssm.exe set salt-minion Description Salt Minion from saltstack.com"
|
||||
nsExec::Exec "nssm.exe set salt-minion Start SERVICE_AUTO_START"
|
||||
@ -398,12 +419,12 @@ SectionEnd
|
||||
|
||||
Function .onInstSuccess
|
||||
|
||||
; If StartMinionDelayed is 1, then set the service to start delayed
|
||||
# If StartMinionDelayed is 1, then set the service to start delayed
|
||||
${If} $StartMinionDelayed == 1
|
||||
nsExec::Exec "nssm.exe set salt-minion Start SERVICE_DELAYED_AUTO_START"
|
||||
${EndIf}
|
||||
|
||||
; If start-minion is 1, then start the service
|
||||
# If start-minion is 1, then start the service
|
||||
${If} $StartMinion == 1
|
||||
nsExec::Exec 'net start salt-minion'
|
||||
${EndIf}
|
||||
@ -413,10 +434,11 @@ FunctionEnd
|
||||
|
||||
Function un.onInit
|
||||
|
||||
; Load the parameters
|
||||
# Load the parameters
|
||||
${GetParameters} $R0
|
||||
|
||||
# Uninstaller: Remove Installation Directory
|
||||
ClearErrors
|
||||
${GetOptions} $R0 "/delete-install-dir" $R1
|
||||
IfErrors delete_install_dir_not_found
|
||||
StrCpy $DeleteInstallDir 1
|
||||
@ -434,7 +456,7 @@ Section Uninstall
|
||||
|
||||
Call un.uninstallSalt
|
||||
|
||||
; Remove C:\salt from the Path
|
||||
# Remove C:\salt from the Path
|
||||
Push "C:\salt"
|
||||
Call un.RemoveFromPath
|
||||
|
||||
@ -444,27 +466,27 @@ SectionEnd
|
||||
!macro uninstallSalt un
|
||||
Function ${un}uninstallSalt
|
||||
|
||||
; Make sure we're in the right directory
|
||||
# Make sure we're in the right directory
|
||||
${If} $INSTDIR == "c:\salt\bin\Scripts"
|
||||
StrCpy $INSTDIR "C:\salt"
|
||||
${EndIf}
|
||||
|
||||
; Stop and Remove salt-minion service
|
||||
# Stop and Remove salt-minion service
|
||||
nsExec::Exec 'net stop salt-minion'
|
||||
nsExec::Exec 'sc delete salt-minion'
|
||||
|
||||
; Stop and remove the salt-master service
|
||||
# Stop and remove the salt-master service
|
||||
nsExec::Exec 'net stop salt-master'
|
||||
nsExec::Exec 'sc delete salt-master'
|
||||
|
||||
; Remove files
|
||||
# Remove files
|
||||
Delete "$INSTDIR\uninst.exe"
|
||||
Delete "$INSTDIR\nssm.exe"
|
||||
Delete "$INSTDIR\salt*"
|
||||
Delete "$INSTDIR\vcredist.exe"
|
||||
RMDir /r "$INSTDIR\bin"
|
||||
|
||||
; Remove Registry entries
|
||||
# Remove Registry entries
|
||||
DeleteRegKey ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_UNINST_KEY}"
|
||||
DeleteRegKey ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_UNINST_KEY_OTHER}"
|
||||
DeleteRegKey ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_CALL_REGKEY}"
|
||||
@ -474,17 +496,17 @@ Function ${un}uninstallSalt
|
||||
DeleteRegKey ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_MINION_REGKEY}"
|
||||
DeleteRegKey ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_RUN_REGKEY}"
|
||||
|
||||
; Automatically close when finished
|
||||
# Automatically close when finished
|
||||
SetAutoClose true
|
||||
|
||||
; Prompt to remove the Installation directory
|
||||
# Prompt to remove the Installation directory
|
||||
${IfNot} $DeleteInstallDir == 1
|
||||
MessageBox MB_ICONQUESTION|MB_YESNO|MB_DEFBUTTON2 \
|
||||
"Would you like to completely remove $INSTDIR and all of its contents?" \
|
||||
/SD IDNO IDNO finished
|
||||
${EndIf}
|
||||
|
||||
; Make sure you're not removing Program Files
|
||||
# Make sure you're not removing Program Files
|
||||
${If} $INSTDIR != 'Program Files'
|
||||
${AndIf} $INSTDIR != 'Program Files (x86)'
|
||||
RMDir /r "$INSTDIR"
|
||||
@ -526,7 +548,7 @@ FunctionEnd
|
||||
|
||||
Function Trim
|
||||
|
||||
Exch $R1 ; Original string
|
||||
Exch $R1 # Original string
|
||||
Push $R2
|
||||
|
||||
Loop:
|
||||
@ -558,36 +580,36 @@ Function Trim
|
||||
FunctionEnd
|
||||
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; StrStr Function
|
||||
; - find substring in a string
|
||||
;
|
||||
; Usage:
|
||||
; Push "this is some string"
|
||||
; Push "some"
|
||||
; Call StrStr
|
||||
; Pop $0 ; "some string"
|
||||
;------------------------------------------------------------------------------
|
||||
#------------------------------------------------------------------------------
|
||||
# StrStr Function
|
||||
# - find substring in a string
|
||||
#
|
||||
# Usage:
|
||||
# Push "this is some string"
|
||||
# Push "some"
|
||||
# Call StrStr
|
||||
# Pop $0 ; "some string"
|
||||
#------------------------------------------------------------------------------
|
||||
!macro StrStr un
|
||||
Function ${un}StrStr
|
||||
|
||||
Exch $R1 ; $R1=substring, stack=[old$R1,string,...]
|
||||
Exch ; stack=[string,old$R1,...]
|
||||
Exch $R2 ; $R2=string, stack=[old$R2,old$R1,...]
|
||||
Push $R3 ; $R3=strlen(substring)
|
||||
Push $R4 ; $R4=count
|
||||
Push $R5 ; $R5=tmp
|
||||
StrLen $R3 $R1 ; Get the length of the Search String
|
||||
StrCpy $R4 0 ; Set the counter to 0
|
||||
Exch $R1 # $R1=substring, stack=[old$R1,string,...]
|
||||
Exch # stack=[string,old$R1,...]
|
||||
Exch $R2 # $R2=string, stack=[old$R2,old$R1,...]
|
||||
Push $R3 # $R3=strlen(substring)
|
||||
Push $R4 # $R4=count
|
||||
Push $R5 # $R5=tmp
|
||||
StrLen $R3 $R1 # Get the length of the Search String
|
||||
StrCpy $R4 0 # Set the counter to 0
|
||||
|
||||
loop:
|
||||
StrCpy $R5 $R2 $R3 $R4 ; Create a moving window of the string that is
|
||||
; the size of the length of the search string
|
||||
StrCmp $R5 $R1 done ; Is the contents of the window the same as
|
||||
; search string, then done
|
||||
StrCmp $R5 "" done ; Is the window empty, then done
|
||||
IntOp $R4 $R4 + 1 ; Shift the windows one character
|
||||
Goto loop ; Repeat
|
||||
StrCpy $R5 $R2 $R3 $R4 # Create a moving window of the string that is
|
||||
# the size of the length of the search string
|
||||
StrCmp $R5 $R1 done # Is the contents of the window the same as
|
||||
# search string, then done
|
||||
StrCmp $R5 "" done # Is the window empty, then done
|
||||
IntOp $R4 $R4 + 1 # Shift the windows one character
|
||||
Goto loop # Repeat
|
||||
|
||||
done:
|
||||
StrCpy $R1 $R2 "" $R4
|
||||
@ -595,7 +617,7 @@ Function ${un}StrStr
|
||||
Pop $R4
|
||||
Pop $R3
|
||||
Pop $R2
|
||||
Exch $R1 ; $R1=old$R1, stack=[result,...]
|
||||
Exch $R1 # $R1=old$R1, stack=[result,...]
|
||||
|
||||
FunctionEnd
|
||||
!macroend
|
||||
@ -603,74 +625,74 @@ FunctionEnd
|
||||
!insertmacro StrStr "un."
|
||||
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; AddToPath Function
|
||||
; - Adds item to Path for All Users
|
||||
; - Overcomes NSIS ReadRegStr limitation of 1024 characters by using Native
|
||||
; Windows Commands
|
||||
;
|
||||
; Usage:
|
||||
; Push "C:\path\to\add"
|
||||
; Call AddToPath
|
||||
;------------------------------------------------------------------------------
|
||||
#------------------------------------------------------------------------------
|
||||
# AddToPath Function
|
||||
# - Adds item to Path for All Users
|
||||
# - Overcomes NSIS ReadRegStr limitation of 1024 characters by using Native
|
||||
# Windows Commands
|
||||
#
|
||||
# Usage:
|
||||
# Push "C:\path\to\add"
|
||||
# Call AddToPath
|
||||
#------------------------------------------------------------------------------
|
||||
!define Environ 'HKLM "SYSTEM\CurrentControlSet\Control\Session Manager\Environment"'
|
||||
Function AddToPath
|
||||
|
||||
Exch $0 ; Path to add
|
||||
Push $1 ; Current Path
|
||||
Push $2 ; Results of StrStr / Length of Path + Path to Add
|
||||
Push $3 ; Handle to Reg / Length of Path
|
||||
Push $4 ; Result of Registry Call
|
||||
Exch $0 # Path to add
|
||||
Push $1 # Current Path
|
||||
Push $2 # Results of StrStr / Length of Path + Path to Add
|
||||
Push $3 # Handle to Reg / Length of Path
|
||||
Push $4 # Result of Registry Call
|
||||
|
||||
; Open a handle to the key in the registry, handle in $3, Error in $4
|
||||
# Open a handle to the key in the registry, handle in $3, Error in $4
|
||||
System::Call "advapi32::RegOpenKey(i 0x80000002, t'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', *i.r3) i.r4"
|
||||
; Make sure registry handle opened successfully (returned 0)
|
||||
# Make sure registry handle opened successfully (returned 0)
|
||||
IntCmp $4 0 0 done done
|
||||
|
||||
; Load the contents of path into $1, Error Code into $4, Path length into $2
|
||||
# Load the contents of path into $1, Error Code into $4, Path length into $2
|
||||
System::Call "advapi32::RegQueryValueEx(i $3, t'PATH', i 0, i 0, t.r1, *i ${NSIS_MAX_STRLEN} r2) i.r4"
|
||||
|
||||
; Close the handle to the registry ($3)
|
||||
# Close the handle to the registry ($3)
|
||||
System::Call "advapi32::RegCloseKey(i $3)"
|
||||
|
||||
; Check for Error Code 234, Path too long for the variable
|
||||
IntCmp $4 234 0 +4 +4 ; $4 == ERROR_MORE_DATA
|
||||
# Check for Error Code 234, Path too long for the variable
|
||||
IntCmp $4 234 0 +4 +4 # $4 == ERROR_MORE_DATA
|
||||
DetailPrint "AddToPath Failed: original length $2 > ${NSIS_MAX_STRLEN}"
|
||||
MessageBox MB_OK \
|
||||
"You may add C:\salt to the %PATH% for convenience when issuing local salt commands from the command line." \
|
||||
/SD IDOK
|
||||
Goto done
|
||||
|
||||
; If no error, continue
|
||||
IntCmp $4 0 +5 ; $4 != NO_ERROR
|
||||
; Error 2 means the Key was not found
|
||||
IntCmp $4 2 +3 ; $4 != ERROR_FILE_NOT_FOUND
|
||||
# If no error, continue
|
||||
IntCmp $4 0 +5 # $4 != NO_ERROR
|
||||
# Error 2 means the Key was not found
|
||||
IntCmp $4 2 +3 # $4 != ERROR_FILE_NOT_FOUND
|
||||
DetailPrint "AddToPath: unexpected error code $4"
|
||||
Goto done
|
||||
StrCpy $1 ""
|
||||
|
||||
; Check if already in PATH
|
||||
Push "$1;" ; The string to search
|
||||
Push "$0;" ; The string to find
|
||||
# Check if already in PATH
|
||||
Push "$1;" # The string to search
|
||||
Push "$0;" # The string to find
|
||||
Call StrStr
|
||||
Pop $2 ; The result of the search
|
||||
StrCmp $2 "" 0 done ; String not found, try again with ';' at the end
|
||||
; Otherwise, it's already in the path
|
||||
Push "$1;" ; The string to search
|
||||
Push "$0\;" ; The string to find
|
||||
Pop $2 # The result of the search
|
||||
StrCmp $2 "" 0 done # String not found, try again with ';' at the end
|
||||
# Otherwise, it's already in the path
|
||||
Push "$1;" # The string to search
|
||||
Push "$0\;" # The string to find
|
||||
Call StrStr
|
||||
Pop $2 ; The result
|
||||
StrCmp $2 "" 0 done ; String not found, continue (add)
|
||||
; Otherwise, it's already in the path
|
||||
Pop $2 # The result
|
||||
StrCmp $2 "" 0 done # String not found, continue (add)
|
||||
# Otherwise, it's already in the path
|
||||
|
||||
; Prevent NSIS string overflow
|
||||
StrLen $2 $0 ; Length of path to add ($2)
|
||||
StrLen $3 $1 ; Length of current path ($3)
|
||||
IntOp $2 $2 + $3 ; Length of current path + path to add ($2)
|
||||
IntOp $2 $2 + 2 ; Account for the additional ';'
|
||||
; $2 = strlen(dir) + strlen(PATH) + sizeof(";")
|
||||
# Prevent NSIS string overflow
|
||||
StrLen $2 $0 # Length of path to add ($2)
|
||||
StrLen $3 $1 # Length of current path ($3)
|
||||
IntOp $2 $2 + $3 # Length of current path + path to add ($2)
|
||||
IntOp $2 $2 + 2 # Account for the additional ';'
|
||||
# $2 = strlen(dir) + strlen(PATH) + sizeof(";")
|
||||
|
||||
; Make sure the new length isn't over the NSIS_MAX_STRLEN
|
||||
# Make sure the new length isn't over the NSIS_MAX_STRLEN
|
||||
IntCmp $2 ${NSIS_MAX_STRLEN} +4 +4 0
|
||||
DetailPrint "AddToPath: new length $2 > ${NSIS_MAX_STRLEN}"
|
||||
MessageBox MB_OK \
|
||||
@ -678,18 +700,18 @@ Function AddToPath
|
||||
/SD IDOK
|
||||
Goto done
|
||||
|
||||
; Append dir to PATH
|
||||
# Append dir to PATH
|
||||
DetailPrint "Add to PATH: $0"
|
||||
StrCpy $2 $1 1 -1 ; Copy the last character of the existing path
|
||||
StrCmp $2 ";" 0 +2 ; Check for trailing ';'
|
||||
StrCpy $1 $1 -1 ; remove trailing ';'
|
||||
StrCmp $1 "" +2 ; Make sure Path is not empty
|
||||
StrCpy $0 "$1;$0" ; Append new path at the end ($0)
|
||||
StrCpy $2 $1 1 -1 # Copy the last character of the existing path
|
||||
StrCmp $2 ";" 0 +2 # Check for trailing ';'
|
||||
StrCpy $1 $1 -1 # remove trailing ';'
|
||||
StrCmp $1 "" +2 # Make sure Path is not empty
|
||||
StrCpy $0 "$1;$0" # Append new path at the end ($0)
|
||||
|
||||
; We can use the NSIS command here. Only 'ReadRegStr' is affected
|
||||
# We can use the NSIS command here. Only 'ReadRegStr' is affected
|
||||
WriteRegExpandStr ${Environ} "PATH" $0
|
||||
|
||||
; Broadcast registry change to open programs
|
||||
# Broadcast registry change to open programs
|
||||
SendMessage ${HWND_BROADCAST} ${WM_WININICHANGE} 0 "STR:Environment" /TIMEOUT=5000
|
||||
|
||||
done:
|
||||
@ -702,16 +724,16 @@ Function AddToPath
|
||||
FunctionEnd
|
||||
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; RemoveFromPath Function
|
||||
; - Removes item from Path for All Users
|
||||
; - Overcomes NSIS ReadRegStr limitation of 1024 characters by using Native
|
||||
; Windows Commands
|
||||
;
|
||||
; Usage:
|
||||
; Push "C:\path\to\add"
|
||||
; Call un.RemoveFromPath
|
||||
;------------------------------------------------------------------------------
|
||||
#------------------------------------------------------------------------------
|
||||
# RemoveFromPath Function
|
||||
# - Removes item from Path for All Users
|
||||
# - Overcomes NSIS ReadRegStr limitation of 1024 characters by using Native
|
||||
# Windows Commands
|
||||
#
|
||||
# Usage:
|
||||
# Push "C:\path\to\add"
|
||||
# Call un.RemoveFromPath
|
||||
#------------------------------------------------------------------------------
|
||||
Function un.RemoveFromPath
|
||||
|
||||
Exch $0
|
||||
@ -722,59 +744,59 @@ Function un.RemoveFromPath
|
||||
Push $5
|
||||
Push $6
|
||||
|
||||
; Open a handle to the key in the registry, handle in $3, Error in $4
|
||||
# Open a handle to the key in the registry, handle in $3, Error in $4
|
||||
System::Call "advapi32::RegOpenKey(i 0x80000002, t'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', *i.r3) i.r4"
|
||||
; Make sure registry handle opened successfully (returned 0)
|
||||
# Make sure registry handle opened successfully (returned 0)
|
||||
IntCmp $4 0 0 done done
|
||||
|
||||
; Load the contents of path into $1, Error Code into $4, Path length into $2
|
||||
# Load the contents of path into $1, Error Code into $4, Path length into $2
|
||||
System::Call "advapi32::RegQueryValueEx(i $3, t'PATH', i 0, i 0, t.r1, *i ${NSIS_MAX_STRLEN} r2) i.r4"
|
||||
|
||||
; Close the handle to the registry ($3)
|
||||
# Close the handle to the registry ($3)
|
||||
System::Call "advapi32::RegCloseKey(i $3)"
|
||||
|
||||
; Check for Error Code 234, Path too long for the variable
|
||||
IntCmp $4 234 0 +4 +4 ; $4 == ERROR_MORE_DATA
|
||||
# Check for Error Code 234, Path too long for the variable
|
||||
IntCmp $4 234 0 +4 +4 # $4 == ERROR_MORE_DATA
|
||||
DetailPrint "AddToPath: original length $2 > ${NSIS_MAX_STRLEN}"
|
||||
Goto done
|
||||
|
||||
; If no error, continue
|
||||
IntCmp $4 0 +5 ; $4 != NO_ERROR
|
||||
; Error 2 means the Key was not found
|
||||
IntCmp $4 2 +3 ; $4 != ERROR_FILE_NOT_FOUND
|
||||
# If no error, continue
|
||||
IntCmp $4 0 +5 # $4 != NO_ERROR
|
||||
# Error 2 means the Key was not found
|
||||
IntCmp $4 2 +3 # $4 != ERROR_FILE_NOT_FOUND
|
||||
DetailPrint "AddToPath: unexpected error code $4"
|
||||
Goto done
|
||||
StrCpy $1 ""
|
||||
|
||||
; Ensure there's a trailing ';'
|
||||
StrCpy $5 $1 1 -1 ; Copy the last character of the path
|
||||
StrCmp $5 ";" +2 ; Check for trailing ';', if found continue
|
||||
StrCpy $1 "$1;" ; ensure trailing ';'
|
||||
# Ensure there's a trailing ';'
|
||||
StrCpy $5 $1 1 -1 # Copy the last character of the path
|
||||
StrCmp $5 ";" +2 # Check for trailing ';', if found continue
|
||||
StrCpy $1 "$1;" # ensure trailing ';'
|
||||
|
||||
; Check for our directory inside the path
|
||||
Push $1 ; String to Search
|
||||
Push "$0;" ; Dir to Find
|
||||
# Check for our directory inside the path
|
||||
Push $1 # String to Search
|
||||
Push "$0;" # Dir to Find
|
||||
Call un.StrStr
|
||||
Pop $2 ; The results of the search
|
||||
StrCmp $2 "" done ; If results are empty, we're done, otherwise continue
|
||||
Pop $2 # The results of the search
|
||||
StrCmp $2 "" done # If results are empty, we're done, otherwise continue
|
||||
|
||||
; Remove our Directory from the Path
|
||||
# Remove our Directory from the Path
|
||||
DetailPrint "Remove from PATH: $0"
|
||||
StrLen $3 "$0;" ; Get the length of our dir ($3)
|
||||
StrLen $4 $2 ; Get the length of the return from StrStr ($4)
|
||||
StrCpy $5 $1 -$4 ; $5 is now the part before the path to remove
|
||||
StrCpy $6 $2 "" $3 ; $6 is now the part after the path to remove
|
||||
StrCpy $3 "$5$6" ; Combine $5 and $6
|
||||
StrLen $3 "$0;" # Get the length of our dir ($3)
|
||||
StrLen $4 $2 # Get the length of the return from StrStr ($4)
|
||||
StrCpy $5 $1 -$4 # $5 is now the part before the path to remove
|
||||
StrCpy $6 $2 "" $3 # $6 is now the part after the path to remove
|
||||
StrCpy $3 "$5$6" # Combine $5 and $6
|
||||
|
||||
; Check for Trailing ';'
|
||||
StrCpy $5 $3 1 -1 ; Load the last character of the string
|
||||
StrCmp $5 ";" 0 +2 ; Check for ';'
|
||||
StrCpy $3 $3 -1 ; remove trailing ';'
|
||||
# Check for Trailing ';'
|
||||
StrCpy $5 $3 1 -1 # Load the last character of the string
|
||||
StrCmp $5 ";" 0 +2 # Check for ';'
|
||||
StrCpy $3 $3 -1 # remove trailing ';'
|
||||
|
||||
; Write the new path to the registry
|
||||
# Write the new path to the registry
|
||||
WriteRegExpandStr ${Environ} "PATH" $3
|
||||
|
||||
; Broadcast the change to all open applications
|
||||
# Broadcast the change to all open applications
|
||||
SendMessage ${HWND_BROADCAST} ${WM_WININICHANGE} 0 "STR:Environment" /TIMEOUT=5000
|
||||
|
||||
done:
|
||||
@ -808,6 +830,7 @@ Function getMinionConfig
|
||||
confFound:
|
||||
FileOpen $0 "$INSTDIR\conf\minion" r
|
||||
|
||||
ClearErrors
|
||||
confLoop:
|
||||
FileRead $0 $1
|
||||
IfErrors EndOfFile
|
||||
@ -838,68 +861,69 @@ FunctionEnd
|
||||
Function updateMinionConfig
|
||||
|
||||
ClearErrors
|
||||
FileOpen $0 "$INSTDIR\conf\minion" "r" ; open target file for reading
|
||||
GetTempFileName $R0 ; get new temp file name
|
||||
FileOpen $1 $R0 "w" ; open temp file for writing
|
||||
FileOpen $0 "$INSTDIR\conf\minion" "r" # open target file for reading
|
||||
GetTempFileName $R0 # get new temp file name
|
||||
FileOpen $1 $R0 "w" # open temp file for writing
|
||||
|
||||
loop: ; loop through each line
|
||||
FileRead $0 $2 ; read line from target file
|
||||
IfErrors done ; end if errors are encountered (end of line)
|
||||
loop: # loop through each line
|
||||
FileRead $0 $2 # read line from target file
|
||||
IfErrors done # end if errors are encountered (end of line)
|
||||
|
||||
${If} $MasterHost_State != "" ; if master is empty
|
||||
${AndIf} $MasterHost_State != "salt" ; and if master is not 'salt'
|
||||
${StrLoc} $3 $2 "master:" ">" ; where is 'master:' in this line
|
||||
${If} $3 == 0 ; is it in the first...
|
||||
${OrIf} $3 == 1 ; or second position (account for comments)
|
||||
StrCpy $2 "master: $MasterHost_State$\r$\n" ; write the master
|
||||
${EndIf} ; close if statement
|
||||
${EndIf} ; close if statement
|
||||
${If} $MasterHost_State != "" # if master is empty
|
||||
${AndIf} $MasterHost_State != "salt" # and if master is not 'salt'
|
||||
${StrLoc} $3 $2 "master:" ">" # where is 'master:' in this line
|
||||
${If} $3 == 0 # is it in the first...
|
||||
${OrIf} $3 == 1 # or second position (account for comments)
|
||||
StrCpy $2 "master: $MasterHost_State$\r$\n" # write the master
|
||||
${EndIf} # close if statement
|
||||
${EndIf} # close if statement
|
||||
|
||||
${If} $MinionName_State != "" ; if minion is empty
|
||||
${AndIf} $MinionName_State != "hostname" ; and if minion is not 'hostname'
|
||||
${StrLoc} $3 $2 "id:" ">" ; where is 'id:' in this line
|
||||
${If} $3 == 0 ; is it in the first...
|
||||
${OrIf} $3 == 1 ; or the second position (account for comments)
|
||||
StrCpy $2 "id: $MinionName_State$\r$\n" ; change line
|
||||
${EndIf} ; close if statement
|
||||
${EndIf} ; close if statement
|
||||
${If} $MinionName_State != "" # if minion is empty
|
||||
${AndIf} $MinionName_State != "hostname" # and if minion is not 'hostname'
|
||||
${StrLoc} $3 $2 "id:" ">" # where is 'id:' in this line
|
||||
${If} $3 == 0 # is it in the first...
|
||||
${OrIf} $3 == 1 # or the second position (account for comments)
|
||||
StrCpy $2 "id: $MinionName_State$\r$\n" # change line
|
||||
${EndIf} # close if statement
|
||||
${EndIf} # close if statement
|
||||
|
||||
FileWrite $1 $2 ; write changed or unchanged line to temp file
|
||||
FileWrite $1 $2 # write changed or unchanged line to temp file
|
||||
Goto loop
|
||||
|
||||
done:
|
||||
FileClose $0 ; close target file
|
||||
FileClose $1 ; close temp file
|
||||
Delete "$INSTDIR\conf\minion" ; delete target file
|
||||
CopyFiles /SILENT $R0 "$INSTDIR\conf\minion" ; copy temp file to target file
|
||||
Delete $R0 ; delete temp file
|
||||
FileClose $0 # close target file
|
||||
FileClose $1 # close temp file
|
||||
Delete "$INSTDIR\conf\minion" # delete target file
|
||||
CopyFiles /SILENT $R0 "$INSTDIR\conf\minion" # copy temp file to target file
|
||||
Delete $R0 # delete temp file
|
||||
|
||||
FunctionEnd
|
||||
|
||||
|
||||
Function parseCommandLineSwitches
|
||||
|
||||
; Load the parameters
|
||||
# Load the parameters
|
||||
${GetParameters} $R0
|
||||
|
||||
; Check for start-minion switches
|
||||
; /start-service is to be deprecated, so we must check for both
|
||||
# Check for start-minion switches
|
||||
# /start-service is to be deprecated, so we must check for both
|
||||
${GetOptions} $R0 "/start-service=" $R1
|
||||
${GetOptions} $R0 "/start-minion=" $R2
|
||||
|
||||
# Service: Start Salt Minion
|
||||
${IfNot} $R2 == ""
|
||||
; If start-minion was passed something, then set it
|
||||
# If start-minion was passed something, then set it
|
||||
StrCpy $StartMinion $R2
|
||||
${ElseIfNot} $R1 == ""
|
||||
; If start-service was passed something, then set StartMinion to that
|
||||
# If start-service was passed something, then set StartMinion to that
|
||||
StrCpy $StartMinion $R1
|
||||
${Else}
|
||||
; Otherwise default to 1
|
||||
# Otherwise default to 1
|
||||
StrCpy $StartMinion 1
|
||||
${EndIf}
|
||||
|
||||
# Service: Minion Startup Type Delayed
|
||||
ClearErrors
|
||||
${GetOptions} $R0 "/start-minion-delayed" $R1
|
||||
IfErrors start_minion_delayed_not_found
|
||||
StrCpy $StartMinionDelayed 1
|
||||
|
@ -19,9 +19,9 @@ Function Get-Settings {
|
||||
"Python2Dir" = "C:\Python27"
|
||||
"Scripts2Dir" = "C:\Python27\Scripts"
|
||||
"SitePkgs2Dir" = "C:\Python27\Lib\site-packages"
|
||||
"Python3Dir" = "C:\Program Files\Python35"
|
||||
"Scripts3Dir" = "C:\Program Files\Python35\Scripts"
|
||||
"SitePkgs3Dir" = "C:\Program Files\Python35\Lib\site-packages"
|
||||
"Python3Dir" = "C:\Python35"
|
||||
"Scripts3Dir" = "C:\Python35\Scripts"
|
||||
"SitePkgs3Dir" = "C:\Python35\Lib\site-packages"
|
||||
"DownloadDir" = "$env:Temp\DevSalt"
|
||||
}
|
||||
# The script deletes the DownLoadDir (above) for each install.
|
||||
|
@ -17,9 +17,7 @@ from __future__ import absolute_import
|
||||
|
||||
# Import python libs
|
||||
from __future__ import print_function
|
||||
import os
|
||||
import collections
|
||||
import hashlib
|
||||
import time
|
||||
import logging
|
||||
import random
|
||||
@ -31,8 +29,10 @@ import salt.config
|
||||
import salt.loader
|
||||
import salt.transport.client
|
||||
import salt.utils
|
||||
import salt.utils.args
|
||||
import salt.utils.files
|
||||
import salt.utils.minions
|
||||
import salt.utils.versions
|
||||
import salt.payload
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
@ -56,6 +56,7 @@ class LoadAuth(object):
|
||||
self.max_fail = 1.0
|
||||
self.serial = salt.payload.Serial(opts)
|
||||
self.auth = salt.loader.auth(opts)
|
||||
self.tokens = salt.loader.eauth_tokens(opts)
|
||||
self.ckminions = ckminions or salt.utils.minions.CkMinions(opts)
|
||||
|
||||
def load_name(self, load):
|
||||
@ -69,7 +70,7 @@ class LoadAuth(object):
|
||||
if fstr not in self.auth:
|
||||
return ''
|
||||
try:
|
||||
pname_arg = salt.utils.arg_lookup(self.auth[fstr])['args'][0]
|
||||
pname_arg = salt.utils.args.arg_lookup(self.auth[fstr])['args'][0]
|
||||
return load[pname_arg]
|
||||
except IndexError:
|
||||
return ''
|
||||
@ -200,13 +201,6 @@ class LoadAuth(object):
|
||||
'''
|
||||
if not self.authenticate_eauth(load):
|
||||
return {}
|
||||
fstr = '{0}.auth'.format(load['eauth'])
|
||||
hash_type = getattr(hashlib, self.opts.get('hash_type', 'md5'))
|
||||
tok = str(hash_type(os.urandom(512)).hexdigest())
|
||||
t_path = os.path.join(self.opts['token_dir'], tok)
|
||||
while os.path.isfile(t_path):
|
||||
tok = str(hash_type(os.urandom(512)).hexdigest())
|
||||
t_path = os.path.join(self.opts['token_dir'], tok)
|
||||
|
||||
if self._allow_custom_expire(load):
|
||||
token_expire = load.pop('token_expire', self.opts['token_expire'])
|
||||
@ -217,39 +211,27 @@ class LoadAuth(object):
|
||||
tdata = {'start': time.time(),
|
||||
'expire': time.time() + token_expire,
|
||||
'name': self.load_name(load),
|
||||
'eauth': load['eauth'],
|
||||
'token': tok}
|
||||
'eauth': load['eauth']}
|
||||
|
||||
if self.opts['keep_acl_in_token']:
|
||||
acl_ret = self.__get_acl(load)
|
||||
tdata['auth_list'] = acl_ret
|
||||
|
||||
if 'groups' in load:
|
||||
tdata['groups'] = load['groups']
|
||||
groups = self.get_groups(load)
|
||||
if groups:
|
||||
tdata['groups'] = groups
|
||||
|
||||
try:
|
||||
with salt.utils.files.set_umask(0o177):
|
||||
with salt.utils.files.fopen(t_path, 'w+b') as fp_:
|
||||
fp_.write(self.serial.dumps(tdata))
|
||||
except (IOError, OSError):
|
||||
log.warning('Authentication failure: can not write token file "{0}".'.format(t_path))
|
||||
return {}
|
||||
return tdata
|
||||
return self.tokens["{0}.mk_token".format(self.opts['eauth_tokens'])](self.opts, tdata)
|
||||
|
||||
def get_tok(self, tok):
|
||||
'''
|
||||
Return the name associated with the token, or False if the token is
|
||||
not valid
|
||||
'''
|
||||
t_path = os.path.join(self.opts['token_dir'], tok)
|
||||
if not os.path.isfile(t_path):
|
||||
return {}
|
||||
try:
|
||||
with salt.utils.files.fopen(t_path, 'rb') as fp_:
|
||||
tdata = self.serial.loads(fp_.read())
|
||||
except (IOError, OSError):
|
||||
log.warning('Authentication failure: can not read token file "{0}".'.format(t_path))
|
||||
tdata = self.tokens["{0}.get_token".format(self.opts['eauth_tokens'])](self.opts, tok)
|
||||
if not tdata:
|
||||
return {}
|
||||
|
||||
rm_tok = False
|
||||
if 'expire' not in tdata:
|
||||
# invalid token, delete it!
|
||||
@ -257,13 +239,22 @@ class LoadAuth(object):
|
||||
if tdata.get('expire', '0') < time.time():
|
||||
rm_tok = True
|
||||
if rm_tok:
|
||||
try:
|
||||
os.remove(t_path)
|
||||
return {}
|
||||
except (IOError, OSError):
|
||||
pass
|
||||
self.rm_token(tok)
|
||||
|
||||
return tdata
|
||||
|
||||
def list_tokens(self):
|
||||
'''
|
||||
List all tokens in eauth_tokn storage.
|
||||
'''
|
||||
return self.tokens["{0}.list_tokens".format(self.opts['eauth_tokens'])](self.opts)
|
||||
|
||||
def rm_token(self, tok):
|
||||
'''
|
||||
Remove the given token from token storage.
|
||||
'''
|
||||
self.tokens["{0}.rm_token".format(self.opts['eauth_tokens'])](self.opts, tok)
|
||||
|
||||
def authenticate_token(self, load):
|
||||
'''
|
||||
Authenticate a user by the token specified in load.
|
||||
@ -303,29 +294,31 @@ class LoadAuth(object):
|
||||
def authenticate_key(self, load, key):
|
||||
'''
|
||||
Authenticate a user by the key passed in load.
|
||||
Return the effective user id (name) if it's differ from the specified one (for sudo).
|
||||
If the effective user id is the same as passed one return True on success or False on
|
||||
Return the effective user id (name) if it's different from the specified one (for sudo).
|
||||
If the effective user id is the same as the passed one, return True on success or False on
|
||||
failure.
|
||||
'''
|
||||
auth_key = load.pop('key')
|
||||
if not auth_key:
|
||||
log.warning('Authentication failure of type "user" occurred.')
|
||||
error_msg = 'Authentication failure of type "user" occurred.'
|
||||
auth_key = load.pop('key', None)
|
||||
if auth_key is None:
|
||||
log.warning(error_msg)
|
||||
return False
|
||||
|
||||
if 'user' in load:
|
||||
auth_user = AuthUser(load['user'])
|
||||
if auth_user.is_sudo():
|
||||
# If someone sudos check to make sure there is no ACL's around their username
|
||||
if auth_key != key[self.opts.get('user', 'root')]:
|
||||
log.warning('Authentication failure of type "user" occurred.')
|
||||
log.warning(error_msg)
|
||||
return False
|
||||
return auth_user.sudo_name()
|
||||
elif load['user'] == self.opts.get('user', 'root') or load['user'] == 'root':
|
||||
if auth_key != key[self.opts.get('user', 'root')]:
|
||||
log.warning('Authentication failure of type "user" occurred.')
|
||||
log.warning(error_msg)
|
||||
return False
|
||||
elif auth_user.is_running_user():
|
||||
if auth_key != key.get(load['user']):
|
||||
log.warning('Authentication failure of type "user" occurred.')
|
||||
log.warning(error_msg)
|
||||
return False
|
||||
elif auth_key == key.get('root'):
|
||||
pass
|
||||
@ -333,19 +326,19 @@ class LoadAuth(object):
|
||||
if load['user'] in key:
|
||||
# User is authorised, check key and check perms
|
||||
if auth_key != key[load['user']]:
|
||||
log.warning('Authentication failure of type "user" occurred.')
|
||||
log.warning(error_msg)
|
||||
return False
|
||||
return load['user']
|
||||
else:
|
||||
log.warning('Authentication failure of type "user" occurred.')
|
||||
log.warning(error_msg)
|
||||
return False
|
||||
else:
|
||||
if auth_key != key[salt.utils.get_user()]:
|
||||
log.warning('Authentication failure of type "other" occurred.')
|
||||
log.warning(error_msg)
|
||||
return False
|
||||
return True
|
||||
|
||||
def get_auth_list(self, load):
|
||||
def get_auth_list(self, load, token=None):
|
||||
'''
|
||||
Retrieve access list for the user specified in load.
|
||||
The list is built by eauth module or from master eauth configuration.
|
||||
@ -353,62 +346,36 @@ class LoadAuth(object):
|
||||
list if the user has no rights to execute anything on this master and returns non-empty list
|
||||
if user is allowed to execute particular functions.
|
||||
'''
|
||||
# Get auth list from token
|
||||
if token and self.opts['keep_acl_in_token'] and 'auth_list' in token:
|
||||
return token['auth_list']
|
||||
# Get acl from eauth module.
|
||||
auth_list = self.__get_acl(load)
|
||||
if auth_list is not None:
|
||||
return auth_list
|
||||
|
||||
if load['eauth'] not in self.opts['external_auth']:
|
||||
eauth = token['eauth'] if token else load['eauth']
|
||||
if eauth not in self.opts['external_auth']:
|
||||
# No matching module is allowed in config
|
||||
log.warning('Authorization failure occurred.')
|
||||
return None
|
||||
|
||||
name = self.load_name(load) # The username we are attempting to auth with
|
||||
groups = self.get_groups(load) # The groups this user belongs to
|
||||
eauth_config = self.opts['external_auth'][load['eauth']]
|
||||
if groups is None or groups is False:
|
||||
if token:
|
||||
name = token['name']
|
||||
groups = token.get('groups')
|
||||
else:
|
||||
name = self.load_name(load) # The username we are attempting to auth with
|
||||
groups = self.get_groups(load) # The groups this user belongs to
|
||||
eauth_config = self.opts['external_auth'][eauth]
|
||||
if not groups:
|
||||
groups = []
|
||||
group_perm_keys = [item for item in eauth_config if item.endswith('%')] # The configured auth groups
|
||||
|
||||
# First we need to know if the user is allowed to proceed via any of their group memberships.
|
||||
group_auth_match = False
|
||||
for group_config in group_perm_keys:
|
||||
group_config = group_config.rstrip('%')
|
||||
for group in groups:
|
||||
if group == group_config:
|
||||
group_auth_match = True
|
||||
# If a group_auth_match is set it means only that we have a
|
||||
# user which matches at least one or more of the groups defined
|
||||
# in the configuration file.
|
||||
|
||||
external_auth_in_db = False
|
||||
for entry in eauth_config:
|
||||
if entry.startswith('^'):
|
||||
external_auth_in_db = True
|
||||
break
|
||||
|
||||
# If neither a catchall, a named membership or a group
|
||||
# membership is found, there is no need to continue. Simply
|
||||
# deny the user access.
|
||||
if not ((name in eauth_config) |
|
||||
('*' in eauth_config) |
|
||||
group_auth_match | external_auth_in_db):
|
||||
# Auth successful, but no matching user found in config
|
||||
log.warning('Authorization failure occurred.')
|
||||
return None
|
||||
|
||||
# We now have an authenticated session and it is time to determine
|
||||
# what the user has access to.
|
||||
auth_list = []
|
||||
if name in eauth_config:
|
||||
auth_list = eauth_config[name]
|
||||
elif '*' in eauth_config:
|
||||
auth_list = eauth_config['*']
|
||||
if group_auth_match:
|
||||
auth_list = self.ckminions.fill_auth_list_from_groups(
|
||||
eauth_config,
|
||||
groups,
|
||||
auth_list)
|
||||
auth_list = self.ckminions.fill_auth_list(
|
||||
eauth_config,
|
||||
name,
|
||||
groups)
|
||||
|
||||
auth_list = self.__process_acl(load, auth_list)
|
||||
|
||||
@ -416,12 +383,77 @@ class LoadAuth(object):
|
||||
|
||||
return auth_list
|
||||
|
||||
def check_authentication(self, load, auth_type, key=None, show_username=False):
|
||||
'''
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
Go through various checks to see if the token/eauth/user can be authenticated.
|
||||
|
||||
Returns a dictionary containing the following keys:
|
||||
|
||||
- auth_list
|
||||
- username
|
||||
- error
|
||||
|
||||
If an error is encountered, return immediately with the relevant error dictionary
|
||||
as authentication has failed. Otherwise, return the username and valid auth_list.
|
||||
'''
|
||||
auth_list = []
|
||||
username = load.get('username', 'UNKNOWN')
|
||||
ret = {'auth_list': auth_list,
|
||||
'username': username,
|
||||
'error': {}}
|
||||
|
||||
# Authenticate
|
||||
if auth_type == 'token':
|
||||
token = self.authenticate_token(load)
|
||||
if not token:
|
||||
ret['error'] = {'name': 'TokenAuthenticationError',
|
||||
'message': 'Authentication failure of type "token" occurred.'}
|
||||
return ret
|
||||
|
||||
# Update username for token
|
||||
username = token['name']
|
||||
ret['username'] = username
|
||||
auth_list = self.get_auth_list(load, token=token)
|
||||
elif auth_type == 'eauth':
|
||||
if not self.authenticate_eauth(load):
|
||||
ret['error'] = {'name': 'EauthAuthenticationError',
|
||||
'message': 'Authentication failure of type "eauth" occurred for '
|
||||
'user {0}.'.format(username)}
|
||||
return ret
|
||||
|
||||
auth_list = self.get_auth_list(load)
|
||||
elif auth_type == 'user':
|
||||
if not self.authenticate_key(load, key):
|
||||
if show_username:
|
||||
msg = 'Authentication failure of type "user" occurred for user {0}.'.format(username)
|
||||
else:
|
||||
msg = 'Authentication failure of type "user" occurred'
|
||||
ret['error'] = {'name': 'UserAuthenticationError', 'message': msg}
|
||||
return ret
|
||||
else:
|
||||
ret['error'] = {'name': 'SaltInvocationError',
|
||||
'message': 'Authentication type not supported.'}
|
||||
return ret
|
||||
|
||||
# Authentication checks passed
|
||||
ret['auth_list'] = auth_list
|
||||
return ret
|
||||
|
||||
|
||||
class Authorize(object):
|
||||
'''
|
||||
The authorization engine used by EAUTH
|
||||
'''
|
||||
def __init__(self, opts, load, loadauth=None):
|
||||
salt.utils.versions.warn_until(
|
||||
'Neon',
|
||||
'The \'Authorize\' class has been deprecated. Please use the '
|
||||
'\'LoadAuth\', \'Reslover\', or \'AuthUser\' classes instead. '
|
||||
'Support for the \'Authorze\' class will be removed in Salt '
|
||||
'{version}.'
|
||||
)
|
||||
self.opts = salt.config.master_config(opts['conf_file'])
|
||||
self.load = load
|
||||
self.ckminions = salt.utils.minions.CkMinions(opts)
|
||||
@ -554,6 +586,15 @@ class Authorize(object):
|
||||
load.get('arg', None),
|
||||
load.get('tgt', None),
|
||||
load.get('tgt_type', 'glob'))
|
||||
|
||||
# Handle possible return of dict data structure from any_auth call to
|
||||
# avoid a stacktrace. As mentioned in PR #43181, this entire class is
|
||||
# dead code and is marked for removal in Salt Neon. But until then, we
|
||||
# should handle the dict return, which is an error and should return
|
||||
# False until this class is removed.
|
||||
if isinstance(good, dict):
|
||||
return False
|
||||
|
||||
if not good:
|
||||
# Accept find_job so the CLI will function cleanly
|
||||
if load.get('fun', '') != 'saltutil.find_job':
|
||||
@ -566,7 +607,7 @@ class Authorize(object):
|
||||
authorization
|
||||
|
||||
Note: this will check that the user has at least one right that will let
|
||||
him execute "load", this does not deal with conflicting rules
|
||||
the user execute "load", this does not deal with conflicting rules
|
||||
'''
|
||||
|
||||
adata = self.auth_data
|
||||
@ -638,7 +679,7 @@ class Resolver(object):
|
||||
'not available').format(eauth))
|
||||
return ret
|
||||
|
||||
args = salt.utils.arg_lookup(self.auth[fstr])
|
||||
args = salt.utils.args.arg_lookup(self.auth[fstr])
|
||||
for arg in args['args']:
|
||||
if arg in self.opts:
|
||||
ret[arg] = self.opts[arg]
|
||||
|
@ -378,7 +378,7 @@ def groups(username, **kwargs):
|
||||
search_results = bind.search_s(search_base,
|
||||
ldap.SCOPE_SUBTREE,
|
||||
search_string,
|
||||
[_config('accountattributename'), 'cn'])
|
||||
[_config('accountattributename'), 'cn', _config('groupattribute')])
|
||||
for _, entry in search_results:
|
||||
if username in entry[_config('accountattributename')]:
|
||||
group_list.append(entry['cn'][0])
|
||||
@ -390,7 +390,7 @@ def groups(username, **kwargs):
|
||||
|
||||
# Only test user auth on first call for job.
|
||||
# 'show_jid' only exists on first payload so we can use that for the conditional.
|
||||
if 'show_jid' in kwargs and not _bind(username, kwargs['password'],
|
||||
if 'show_jid' in kwargs and not _bind(username, kwargs.get('password'),
|
||||
anonymous=_config('auth_by_group_membership_only', mandatory=False) and
|
||||
_config('anonymous', mandatory=False)):
|
||||
log.error('LDAP username and password do not match')
|
||||
|
@ -59,7 +59,7 @@ class Beacon(object):
|
||||
|
||||
if 'enabled' in current_beacon_config:
|
||||
if not current_beacon_config['enabled']:
|
||||
log.trace('Beacon {0} disabled'.format(mod))
|
||||
log.trace('Beacon %s disabled', mod)
|
||||
continue
|
||||
else:
|
||||
# remove 'enabled' item before processing the beacon
|
||||
@ -68,7 +68,7 @@ class Beacon(object):
|
||||
else:
|
||||
self._remove_list_item(config[mod], 'enabled')
|
||||
|
||||
log.trace('Beacon processing: {0}'.format(mod))
|
||||
log.trace('Beacon processing: %s', mod)
|
||||
fun_str = '{0}.beacon'.format(mod)
|
||||
validate_str = '{0}.validate'.format(mod)
|
||||
if fun_str in self.beacons:
|
||||
@ -77,10 +77,10 @@ class Beacon(object):
|
||||
if interval:
|
||||
b_config = self._trim_config(b_config, mod, 'interval')
|
||||
if not self._process_interval(mod, interval):
|
||||
log.trace('Skipping beacon {0}. Interval not reached.'.format(mod))
|
||||
log.trace('Skipping beacon %s. Interval not reached.', mod)
|
||||
continue
|
||||
if self._determine_beacon_config(current_beacon_config, 'disable_during_state_run'):
|
||||
log.trace('Evaluting if beacon {0} should be skipped due to a state run.'.format(mod))
|
||||
log.trace('Evaluting if beacon %s should be skipped due to a state run.', mod)
|
||||
b_config = self._trim_config(b_config, mod, 'disable_during_state_run')
|
||||
is_running = False
|
||||
running_jobs = salt.utils.minion.running(self.opts)
|
||||
@ -90,10 +90,10 @@ class Beacon(object):
|
||||
if is_running:
|
||||
close_str = '{0}.close'.format(mod)
|
||||
if close_str in self.beacons:
|
||||
log.info('Closing beacon {0}. State run in progress.'.format(mod))
|
||||
log.info('Closing beacon %s. State run in progress.', mod)
|
||||
self.beacons[close_str](b_config[mod])
|
||||
else:
|
||||
log.info('Skipping beacon {0}. State run in progress.'.format(mod))
|
||||
log.info('Skipping beacon %s. State run in progress.', mod)
|
||||
continue
|
||||
# Update __grains__ on the beacon
|
||||
self.beacons[fun_str].__globals__['__grains__'] = grains
|
||||
@ -120,7 +120,7 @@ class Beacon(object):
|
||||
if runonce:
|
||||
self.disable_beacon(mod)
|
||||
else:
|
||||
log.warning('Unable to process beacon {0}'.format(mod))
|
||||
log.warning('Unable to process beacon %s', mod)
|
||||
return ret
|
||||
|
||||
def _trim_config(self, b_config, mod, key):
|
||||
@ -149,19 +149,19 @@ class Beacon(object):
|
||||
Process beacons with intervals
|
||||
Return True if a beacon should be run on this loop
|
||||
'''
|
||||
log.trace('Processing interval {0} for beacon mod {1}'.format(interval, mod))
|
||||
log.trace('Processing interval %s for beacon mod %s', interval, mod)
|
||||
loop_interval = self.opts['loop_interval']
|
||||
if mod in self.interval_map:
|
||||
log.trace('Processing interval in map')
|
||||
counter = self.interval_map[mod]
|
||||
log.trace('Interval counter: {0}'.format(counter))
|
||||
log.trace('Interval counter: %s', counter)
|
||||
if counter * loop_interval >= interval:
|
||||
self.interval_map[mod] = 1
|
||||
return True
|
||||
else:
|
||||
self.interval_map[mod] += 1
|
||||
else:
|
||||
log.trace('Interval process inserting mod: {0}'.format(mod))
|
||||
log.trace('Interval process inserting mod: %s', mod)
|
||||
self.interval_map[mod] = 1
|
||||
return False
|
||||
|
||||
@ -205,15 +205,50 @@ class Beacon(object):
|
||||
'''
|
||||
# Fire the complete event back along with the list of beacons
|
||||
evt = salt.utils.event.get_event('minion', opts=self.opts)
|
||||
b_conf = self.functions['config.merge']('beacons')
|
||||
if not isinstance(self.opts['beacons'], dict):
|
||||
self.opts['beacons'] = {}
|
||||
self.opts['beacons'].update(b_conf)
|
||||
evt.fire_event({'complete': True, 'beacons': self.opts['beacons']},
|
||||
tag='/salt/minion/minion_beacons_list_complete')
|
||||
|
||||
return True
|
||||
|
||||
def list_available_beacons(self):
|
||||
'''
|
||||
List the available beacons
|
||||
'''
|
||||
_beacons = ['{0}'.format(_beacon.replace('.beacon', ''))
|
||||
for _beacon in self.beacons if '.beacon' in _beacon]
|
||||
|
||||
# Fire the complete event back along with the list of beacons
|
||||
evt = salt.utils.event.get_event('minion', opts=self.opts)
|
||||
evt.fire_event({'complete': True, 'beacons': _beacons},
|
||||
tag='/salt/minion/minion_beacons_list_available_complete')
|
||||
|
||||
return True
|
||||
|
||||
def validate_beacon(self, name, beacon_data):
|
||||
'''
|
||||
Return available beacon functions
|
||||
'''
|
||||
validate_str = '{}.validate'.format(name)
|
||||
# Run the validate function if it's available,
|
||||
# otherwise there is a warning about it being missing
|
||||
if validate_str in self.beacons:
|
||||
if 'enabled' in beacon_data:
|
||||
del beacon_data['enabled']
|
||||
valid, vcomment = self.beacons[validate_str](beacon_data)
|
||||
else:
|
||||
log.info('Beacon %s does not have a validate'
|
||||
' function, skipping validation.', name)
|
||||
valid = True
|
||||
|
||||
# Fire the complete event back along with the list of beacons
|
||||
evt = salt.utils.event.get_event('minion', opts=self.opts)
|
||||
evt.fire_event({'complete': True,
|
||||
'vcomment': vcomment,
|
||||
'valid': valid},
|
||||
tag='/salt/minion/minion_beacon_validation_complete')
|
||||
|
||||
return True
|
||||
|
||||
def add_beacon(self, name, beacon_data):
|
||||
'''
|
||||
Add a beacon item
|
||||
@ -224,9 +259,9 @@ class Beacon(object):
|
||||
|
||||
if name in self.opts['beacons']:
|
||||
log.info('Updating settings for beacon '
|
||||
'item: {0}'.format(name))
|
||||
'item: %s', name)
|
||||
else:
|
||||
log.info('Added new beacon item {0}'.format(name))
|
||||
log.info('Added new beacon item %s', name)
|
||||
self.opts['beacons'].update(data)
|
||||
|
||||
# Fire the complete event back along with updated list of beacons
|
||||
@ -245,7 +280,7 @@ class Beacon(object):
|
||||
data[name] = beacon_data
|
||||
|
||||
log.info('Updating settings for beacon '
|
||||
'item: {0}'.format(name))
|
||||
'item: %s', name)
|
||||
self.opts['beacons'].update(data)
|
||||
|
||||
# Fire the complete event back along with updated list of beacons
|
||||
@ -261,7 +296,7 @@ class Beacon(object):
|
||||
'''
|
||||
|
||||
if name in self.opts['beacons']:
|
||||
log.info('Deleting beacon item {0}'.format(name))
|
||||
log.info('Deleting beacon item %s', name)
|
||||
del self.opts['beacons'][name]
|
||||
|
||||
# Fire the complete event back along with updated list of beacons
|
||||
|
@ -10,14 +10,19 @@ Beacon to fire events at failed login of users
|
||||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
import os
|
||||
import struct
|
||||
import time
|
||||
|
||||
# Import Salt Libs
|
||||
import salt.utils.files
|
||||
|
||||
# Import 3rd-party libs
|
||||
from salt.ext import six
|
||||
import salt.ext.six
|
||||
# pylint: disable=import-error
|
||||
from salt.ext.six.moves import map
|
||||
# pylint: enable=import-error
|
||||
|
||||
__virtualname__ = 'btmp'
|
||||
BTMP = '/var/log/btmp'
|
||||
@ -37,6 +42,15 @@ FIELDS = [
|
||||
SIZE = struct.calcsize(FMT)
|
||||
LOC_KEY = 'btmp.loc'
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# pylint: disable=import-error
|
||||
try:
|
||||
import dateutil.parser as dateutil_parser
|
||||
_TIME_SUPPORTED = True
|
||||
except ImportError:
|
||||
_TIME_SUPPORTED = False
|
||||
|
||||
|
||||
def __virtual__():
|
||||
if os.path.isfile(BTMP):
|
||||
@ -44,6 +58,20 @@ def __virtual__():
|
||||
return False
|
||||
|
||||
|
||||
def _check_time_range(time_range, now):
|
||||
'''
|
||||
Check time range
|
||||
'''
|
||||
if _TIME_SUPPORTED:
|
||||
_start = int(time.mktime(dateutil_parser.parse(time_range['start']).timetuple()))
|
||||
_end = int(time.mktime(dateutil_parser.parse(time_range['end']).timetuple()))
|
||||
|
||||
return bool(_start <= now <= _end)
|
||||
else:
|
||||
log.error('Dateutil is required.')
|
||||
return False
|
||||
|
||||
|
||||
def _get_loc():
|
||||
'''
|
||||
return the active file location
|
||||
@ -60,6 +88,45 @@ def validate(config):
|
||||
if not isinstance(config, list):
|
||||
return False, ('Configuration for btmp beacon must '
|
||||
'be a list.')
|
||||
else:
|
||||
_config = {}
|
||||
list(map(_config.update, config))
|
||||
|
||||
if 'users' in _config:
|
||||
if not isinstance(_config['users'], dict):
|
||||
return False, ('User configuration for btmp beacon must '
|
||||
'be a dictionary.')
|
||||
else:
|
||||
for user in _config['users']:
|
||||
if _config['users'][user] and \
|
||||
'time_range' in _config['users'][user]:
|
||||
_time_range = _config['users'][user]['time_range']
|
||||
if not isinstance(_time_range, dict):
|
||||
return False, ('The time_range parameter for '
|
||||
'btmp beacon must '
|
||||
'be a dictionary.')
|
||||
else:
|
||||
if not all(k in _time_range for k in ('start', 'end')):
|
||||
return False, ('The time_range parameter for '
|
||||
'btmp beacon must contain '
|
||||
'start & end options.')
|
||||
if 'defaults' in _config:
|
||||
if not isinstance(_config['defaults'], dict):
|
||||
return False, ('Defaults configuration for btmp beacon must '
|
||||
'be a dictionary.')
|
||||
else:
|
||||
if 'time_range' in _config['defaults']:
|
||||
_time_range = _config['defaults']['time_range']
|
||||
if not isinstance(_time_range, dict):
|
||||
return False, ('The time_range parameter for '
|
||||
'btmp beacon must '
|
||||
'be a dictionary.')
|
||||
else:
|
||||
if not all(k in _time_range for k in ('start', 'end')):
|
||||
return False, ('The time_range parameter for '
|
||||
'btmp beacon must contain '
|
||||
'start & end options.')
|
||||
|
||||
return True, 'Valid beacon configuration'
|
||||
|
||||
|
||||
@ -72,8 +139,40 @@ def beacon(config):
|
||||
|
||||
beacons:
|
||||
btmp: []
|
||||
|
||||
beacons:
|
||||
btmp:
|
||||
- users:
|
||||
gareth:
|
||||
- defaults:
|
||||
time_range:
|
||||
start: '8am'
|
||||
end: '4pm'
|
||||
|
||||
beacons:
|
||||
btmp:
|
||||
- users:
|
||||
gareth:
|
||||
time_range:
|
||||
start: '8am'
|
||||
end: '4pm'
|
||||
- defaults:
|
||||
time_range:
|
||||
start: '8am'
|
||||
end: '4pm'
|
||||
'''
|
||||
ret = []
|
||||
|
||||
users = None
|
||||
defaults = None
|
||||
|
||||
for config_item in config:
|
||||
if 'users' in config_item:
|
||||
users = config_item['users']
|
||||
|
||||
if 'defaults' in config_item:
|
||||
defaults = config_item['defaults']
|
||||
|
||||
with salt.utils.files.fopen(BTMP, 'rb') as fp_:
|
||||
loc = __context__.get(LOC_KEY, 0)
|
||||
if loc == 0:
|
||||
@ -83,6 +182,7 @@ def beacon(config):
|
||||
else:
|
||||
fp_.seek(loc)
|
||||
while True:
|
||||
now = int(time.time())
|
||||
raw = fp_.read(SIZE)
|
||||
if len(raw) != SIZE:
|
||||
return ret
|
||||
@ -91,7 +191,30 @@ def beacon(config):
|
||||
event = {}
|
||||
for ind, field in enumerate(FIELDS):
|
||||
event[field] = pack[ind]
|
||||
if isinstance(event[field], six.string_types):
|
||||
event[field] = event[field].strip('\x00')
|
||||
ret.append(event)
|
||||
if isinstance(event[field], salt.ext.six.string_types):
|
||||
if isinstance(event[field], bytes):
|
||||
event[field] = event[field].decode()
|
||||
event[field] = event[field].strip('b\x00')
|
||||
else:
|
||||
event[field] = event[field].strip('\x00')
|
||||
|
||||
if users:
|
||||
if event['user'] in users:
|
||||
_user = users[event['user']]
|
||||
if isinstance(_user, dict) and 'time_range' in _user:
|
||||
if _check_time_range(_user['time_range'], now):
|
||||
ret.append(event)
|
||||
else:
|
||||
if defaults and 'time_range' in defaults:
|
||||
if _check_time_range(defaults['time_range'],
|
||||
now):
|
||||
ret.append(event)
|
||||
else:
|
||||
ret.append(event)
|
||||
else:
|
||||
if defaults and 'time_range' in defaults:
|
||||
if _check_time_range(defaults['time_range'], now):
|
||||
ret.append(event)
|
||||
else:
|
||||
ret.append(event)
|
||||
return ret
|
||||
|
@ -23,7 +23,9 @@ import re
|
||||
|
||||
# Import salt libs
|
||||
import salt.ext.six
|
||||
# pylint: disable=import-error
|
||||
from salt.ext.six.moves import map
|
||||
# pylint: enable=import-error
|
||||
|
||||
# Import third party libs
|
||||
try:
|
||||
|
353
salt/beacons/napalm_beacon.py
Normal file
353
salt/beacons/napalm_beacon.py
Normal file
@ -0,0 +1,353 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
NAPALM functions
|
||||
================
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
Watch NAPALM functions and fire events on specific triggers.
|
||||
|
||||
.. note::
|
||||
|
||||
The ``NAPALM`` beacon only works only when running under
|
||||
a regular Minion or a Proxy Minion, managed via NAPALM_.
|
||||
Check the documentation for the
|
||||
:mod:`NAPALM proxy module <salt.proxy.napalm>`.
|
||||
|
||||
_NAPALM: http://napalm.readthedocs.io/en/latest/index.html
|
||||
|
||||
The configuration accepts a list of Salt functions to be
|
||||
invoked, and the corresponding output hierarchy that should
|
||||
be matched against. To invoke a function with certain
|
||||
arguments, they can be specified using the ``_args`` key, or
|
||||
``_kwargs`` for more specific key-value arguments.
|
||||
|
||||
The match structure follows the output hierarchy of the NAPALM
|
||||
functions, under the ``out`` key.
|
||||
|
||||
For example, the following is normal structure returned by the
|
||||
:mod:`ntp.stats <salt.modules.napalm_ntp.stats>` execution function:
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
{
|
||||
"comment": "",
|
||||
"result": true,
|
||||
"out": [
|
||||
{
|
||||
"referenceid": ".GPSs.",
|
||||
"remote": "172.17.17.1",
|
||||
"synchronized": true,
|
||||
"reachability": 377,
|
||||
"offset": 0.461,
|
||||
"when": "860",
|
||||
"delay": 143.606,
|
||||
"hostpoll": 1024,
|
||||
"stratum": 1,
|
||||
"jitter": 0.027,
|
||||
"type": "-"
|
||||
},
|
||||
{
|
||||
"referenceid": ".INIT.",
|
||||
"remote": "172.17.17.2",
|
||||
"synchronized": false,
|
||||
"reachability": 0,
|
||||
"offset": 0.0,
|
||||
"when": "-",
|
||||
"delay": 0.0,
|
||||
"hostpoll": 1024,
|
||||
"stratum": 16,
|
||||
"jitter": 4000.0,
|
||||
"type": "-"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
In order to fire events when the synchronization is lost with
|
||||
one of the NTP peers, e.g., ``172.17.17.2``, we can match it explicitly as:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
ntp.stats:
|
||||
remote: 172.17.17.2
|
||||
synchronized: false
|
||||
|
||||
There is one single nesting level, as the output of ``ntp.stats`` is
|
||||
just a list of dictionaries, and this beacon will compare each dictionary
|
||||
from the list with the structure examplified above.
|
||||
|
||||
.. note::
|
||||
|
||||
When we want to match on any element at a certain level, we can
|
||||
configure ``*`` to match anything.
|
||||
|
||||
Considering a more complex structure consisting on multiple nested levels,
|
||||
e.g., the output of the :mod:`bgp.neighbors <salt.modules.napalm_bgp.neighbors>`
|
||||
execution function, to check when any neighbor from the ``global``
|
||||
routing table is down, the match structure would have the format:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
bgp.neighbors:
|
||||
global:
|
||||
'*':
|
||||
up: false
|
||||
|
||||
The match structure above will match any BGP neighbor, with
|
||||
any network (``*`` matches any AS number), under the ``global`` VRF.
|
||||
In other words, this beacon will push an event on the Salt bus
|
||||
when there's a BGP neighbor down.
|
||||
|
||||
The right operand can also accept mathematical operations
|
||||
(i.e., ``<``, ``<=``, ``!=``, ``>``, ``>=`` etc.) when comparing
|
||||
numerical values.
|
||||
|
||||
Configuration Example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
beacons:
|
||||
napalm:
|
||||
- net.interfaces:
|
||||
# fire events when any interfaces is down
|
||||
'*':
|
||||
is_up: false
|
||||
- net.interfaces:
|
||||
# fire events only when the xe-0/0/0 interface is down
|
||||
'xe-0/0/0':
|
||||
is_up: false
|
||||
- ntp.stats:
|
||||
# fire when there's any NTP peer unsynchornized
|
||||
synchronized: false
|
||||
- ntp.stats:
|
||||
# fire only when the synchronization
|
||||
# with with the 172.17.17.2 NTP server is lost
|
||||
_args:
|
||||
- 172.17.17.2
|
||||
synchronized: false
|
||||
- ntp.stats:
|
||||
# fire only when there's a NTP peer with
|
||||
# synchronization stratum > 5
|
||||
stratum: '> 5'
|
||||
|
||||
Event structure example:
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
salt/beacon/edge01.bjm01/napalm/junos/ntp.stats {
|
||||
"_stamp": "2017-09-05T09:51:09.377202",
|
||||
"args": [],
|
||||
"data": {
|
||||
"comment": "",
|
||||
"out": [
|
||||
{
|
||||
"delay": 0.0,
|
||||
"hostpoll": 1024,
|
||||
"jitter": 4000.0,
|
||||
"offset": 0.0,
|
||||
"reachability": 0,
|
||||
"referenceid": ".INIT.",
|
||||
"remote": "172.17.17.1",
|
||||
"stratum": 16,
|
||||
"synchronized": false,
|
||||
"type": "-",
|
||||
"when": "-"
|
||||
}
|
||||
],
|
||||
"result": true
|
||||
},
|
||||
"fun": "ntp.stats",
|
||||
"id": "edge01.bjm01",
|
||||
"kwargs": {},
|
||||
"match": {
|
||||
"stratum": "> 5"
|
||||
}
|
||||
}
|
||||
|
||||
The event examplified above has been fired when the device
|
||||
identified by the Minion id ``edge01.bjm01`` has been synchronized
|
||||
with a NTP server at a stratum level greater than 5.
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Python std lib
|
||||
import re
|
||||
import logging
|
||||
|
||||
# Import Salt modules
|
||||
from salt.ext import six
|
||||
import salt.utils.napalm
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
_numeric_regex = re.compile(r'^(<|>|<=|>=|==|!=)\s*(\d+(\.\d+){0,1})$')
|
||||
# the numeric regex will match the right operand, e.g '>= 20', '< 100', '!= 20', '< 1000.12' etc.
|
||||
_numeric_operand = {
|
||||
'<': '__lt__',
|
||||
'>': '__gt__',
|
||||
'>=': '__ge__',
|
||||
'<=': '__le__',
|
||||
'==': '__eq__',
|
||||
'!=': '__ne__',
|
||||
} # mathematical operand - private method map
|
||||
|
||||
|
||||
__virtualname__ = 'napalm'
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
This beacon can only work when running under a regular or a proxy minion, managed through napalm.
|
||||
'''
|
||||
return salt.utils.napalm.virtual(__opts__, __virtualname__, __file__)
|
||||
|
||||
|
||||
def _compare(cur_cmp, cur_struct):
|
||||
'''
|
||||
Compares two objects and return a boolean value
|
||||
when there's a match.
|
||||
'''
|
||||
if isinstance(cur_cmp, dict) and isinstance(cur_struct, dict):
|
||||
log.debug('Comparing dict to dict')
|
||||
for cmp_key, cmp_value in six.iteritems(cur_cmp):
|
||||
if cmp_key == '*':
|
||||
# matches any key from the source dictionary
|
||||
if isinstance(cmp_value, dict):
|
||||
found = False
|
||||
for _, cur_struct_val in six.iteritems(cur_struct):
|
||||
found |= _compare(cmp_value, cur_struct_val)
|
||||
return found
|
||||
else:
|
||||
found = False
|
||||
if isinstance(cur_struct, (list, tuple)):
|
||||
for cur_ele in cur_struct:
|
||||
found |= _compare(cmp_value, cur_ele)
|
||||
elif isinstance(cur_struct, dict):
|
||||
for _, cur_ele in six.iteritems(cur_struct):
|
||||
found |= _compare(cmp_value, cur_ele)
|
||||
return found
|
||||
else:
|
||||
if isinstance(cmp_value, dict):
|
||||
if cmp_key not in cur_struct:
|
||||
return False
|
||||
return _compare(cmp_value, cur_struct[cmp_key])
|
||||
if isinstance(cmp_value, list):
|
||||
found = False
|
||||
for _, cur_struct_val in six.iteritems(cur_struct):
|
||||
found |= _compare(cmp_value, cur_struct_val)
|
||||
return found
|
||||
else:
|
||||
return _compare(cmp_value, cur_struct[cmp_key])
|
||||
elif isinstance(cur_cmp, (list, tuple)) and isinstance(cur_struct, (list, tuple)):
|
||||
log.debug('Comparing list to list')
|
||||
found = False
|
||||
for cur_cmp_ele in cur_cmp:
|
||||
for cur_struct_ele in cur_struct:
|
||||
found |= _compare(cur_cmp_ele, cur_struct_ele)
|
||||
return found
|
||||
elif isinstance(cur_cmp, dict) and isinstance(cur_struct, (list, tuple)):
|
||||
log.debug('Comparing dict to list (of dicts?)')
|
||||
found = False
|
||||
for cur_struct_ele in cur_struct:
|
||||
found |= _compare(cur_cmp, cur_struct_ele)
|
||||
return found
|
||||
elif isinstance(cur_cmp, bool) and isinstance(cur_struct, bool):
|
||||
log.debug('Comparing booleans: %s ? %s', cur_cmp, cur_struct)
|
||||
return cur_cmp == cur_struct
|
||||
elif isinstance(cur_cmp, (six.string_types, six.text_type)) and \
|
||||
isinstance(cur_struct, (six.string_types, six.text_type)):
|
||||
log.debug('Comparing strings (and regex?): %s ? %s', cur_cmp, cur_struct)
|
||||
# Trying literal match
|
||||
matched = re.match(cur_cmp, cur_struct, re.I)
|
||||
if matched:
|
||||
return True
|
||||
return False
|
||||
elif isinstance(cur_cmp, (six.integer_types, float)) and \
|
||||
isinstance(cur_struct, (six.integer_types, float)):
|
||||
log.debug('Comparing numeric values: %d ? %d', cur_cmp, cur_struct)
|
||||
# numeric compare
|
||||
return cur_cmp == cur_struct
|
||||
elif isinstance(cur_struct, (six.integer_types, float)) and \
|
||||
isinstance(cur_cmp, (six.string_types, six.text_type)):
|
||||
# Comapring the numerical value agains a presumably mathematical value
|
||||
log.debug('Comparing a numeric value (%d) with a string (%s)', cur_struct, cur_cmp)
|
||||
numeric_compare = _numeric_regex.match(cur_cmp)
|
||||
# determine if the value to compare agains is a mathematical operand
|
||||
if numeric_compare:
|
||||
compare_value = numeric_compare.group(2)
|
||||
return getattr(float(cur_struct), _numeric_operand[numeric_compare.group(1)])(float(compare_value))
|
||||
return False
|
||||
return False
|
||||
|
||||
|
||||
def validate(config):
|
||||
'''
|
||||
Validate the beacon configuration.
|
||||
'''
|
||||
# Must be a list of dicts.
|
||||
if not isinstance(config, list):
|
||||
return False, 'Configuration for napalm beacon must be a list.'
|
||||
for mod in config:
|
||||
fun = mod.keys()[0]
|
||||
fun_cfg = mod.values()[0]
|
||||
if not isinstance(fun_cfg, dict):
|
||||
return False, 'The match structure for the {} execution function output must be a dictionary'.format(fun)
|
||||
if fun not in __salt__:
|
||||
return False, 'Execution function {} is not availabe!'.format(fun)
|
||||
return True, 'Valid configuration for the napal beacon!'
|
||||
|
||||
|
||||
def beacon(config):
|
||||
'''
|
||||
Watch napalm function and fire events.
|
||||
'''
|
||||
log.debug('Executing napalm beacon with config:')
|
||||
log.debug(config)
|
||||
ret = []
|
||||
for mod in config:
|
||||
if not mod:
|
||||
continue
|
||||
event = {}
|
||||
fun = mod.keys()[0]
|
||||
fun_cfg = mod.values()[0]
|
||||
args = fun_cfg.pop('_args', [])
|
||||
kwargs = fun_cfg.pop('_kwargs', {})
|
||||
log.debug('Executing {fun} with {args} and {kwargs}'.format(
|
||||
fun=fun,
|
||||
args=args,
|
||||
kwargs=kwargs
|
||||
))
|
||||
fun_ret = __salt__[fun](*args, **kwargs)
|
||||
log.debug('Got the reply from the minion:')
|
||||
log.debug(fun_ret)
|
||||
if not fun_ret.get('result', False):
|
||||
log.error('Error whilst executing {}'.format(fun))
|
||||
log.error(fun_ret)
|
||||
continue
|
||||
fun_ret_out = fun_ret['out']
|
||||
log.debug('Comparing to:')
|
||||
log.debug(fun_cfg)
|
||||
try:
|
||||
fun_cmp_result = _compare(fun_cfg, fun_ret_out)
|
||||
except Exception as err:
|
||||
log.error(err, exc_info=True)
|
||||
# catch any exception and continue
|
||||
# to not jeopardise the execution of the next function in the list
|
||||
continue
|
||||
log.debug('Result of comparison: {res}'.format(res=fun_cmp_result))
|
||||
if fun_cmp_result:
|
||||
log.info('Matched {fun} with {cfg}'.format(
|
||||
fun=fun,
|
||||
cfg=fun_cfg
|
||||
))
|
||||
event['tag'] = '{os}/{fun}'.format(os=__grains__['os'], fun=fun)
|
||||
event['fun'] = fun
|
||||
event['args'] = args
|
||||
event['kwargs'] = kwargs
|
||||
event['data'] = fun_ret
|
||||
event['match'] = fun_cfg
|
||||
log.debug('Queueing event:')
|
||||
log.debug(event)
|
||||
ret.append(event)
|
||||
log.debug('NAPALM beacon generated the events:')
|
||||
log.debug(ret)
|
||||
return ret
|
@ -10,14 +10,19 @@ Beacon to fire events at login of users as registered in the wtmp file
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
import os
|
||||
import struct
|
||||
import time
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils.files
|
||||
|
||||
# Import 3rd-party libs
|
||||
from salt.ext import six
|
||||
import salt.ext.six
|
||||
# pylint: disable=import-error
|
||||
from salt.ext.six.moves import map
|
||||
# pylint: enable=import-error
|
||||
|
||||
__virtualname__ = 'wtmp'
|
||||
WTMP = '/var/log/wtmp'
|
||||
@ -37,9 +42,15 @@ FIELDS = [
|
||||
SIZE = struct.calcsize(FMT)
|
||||
LOC_KEY = 'wtmp.loc'
|
||||
|
||||
import logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# pylint: disable=import-error
|
||||
try:
|
||||
import dateutil.parser as dateutil_parser
|
||||
_TIME_SUPPORTED = True
|
||||
except ImportError:
|
||||
_TIME_SUPPORTED = False
|
||||
|
||||
|
||||
def __virtual__():
|
||||
if os.path.isfile(WTMP):
|
||||
@ -47,6 +58,20 @@ def __virtual__():
|
||||
return False
|
||||
|
||||
|
||||
def _check_time_range(time_range, now):
|
||||
'''
|
||||
Check time range
|
||||
'''
|
||||
if _TIME_SUPPORTED:
|
||||
_start = int(time.mktime(dateutil_parser.parse(time_range['start']).timetuple()))
|
||||
_end = int(time.mktime(dateutil_parser.parse(time_range['end']).timetuple()))
|
||||
|
||||
return bool(_start <= now <= _end)
|
||||
else:
|
||||
log.error('Dateutil is required.')
|
||||
return False
|
||||
|
||||
|
||||
def _get_loc():
|
||||
'''
|
||||
return the active file location
|
||||
@ -62,6 +87,44 @@ def validate(config):
|
||||
# Configuration for wtmp beacon should be a list of dicts
|
||||
if not isinstance(config, list):
|
||||
return False, ('Configuration for wtmp beacon must be a list.')
|
||||
else:
|
||||
_config = {}
|
||||
list(map(_config.update, config))
|
||||
|
||||
if 'users' in _config:
|
||||
if not isinstance(_config['users'], dict):
|
||||
return False, ('User configuration for btmp beacon must '
|
||||
'be a dictionary.')
|
||||
else:
|
||||
for user in _config['users']:
|
||||
if _config['users'][user] and \
|
||||
'time_range' in _config['users'][user]:
|
||||
_time_range = _config['users'][user]['time_range']
|
||||
if not isinstance(_time_range, dict):
|
||||
return False, ('The time_range parameter for '
|
||||
'btmp beacon must '
|
||||
'be a dictionary.')
|
||||
else:
|
||||
if not all(k in _time_range for k in ('start', 'end')):
|
||||
return False, ('The time_range parameter for '
|
||||
'btmp beacon must contain '
|
||||
'start & end options.')
|
||||
if 'defaults' in _config:
|
||||
if not isinstance(_config['defaults'], dict):
|
||||
return False, ('Defaults configuration for btmp beacon must '
|
||||
'be a dictionary.')
|
||||
else:
|
||||
if 'time_range' in _config['defaults']:
|
||||
_time_range = _config['defaults']['time_range']
|
||||
if not isinstance(_time_range, dict):
|
||||
return False, ('The time_range parameter for '
|
||||
'btmp beacon must '
|
||||
'be a dictionary.')
|
||||
else:
|
||||
if not all(k in _time_range for k in ('start', 'end')):
|
||||
return False, ('The time_range parameter for '
|
||||
'btmp beacon must contain '
|
||||
'start & end options.')
|
||||
return True, 'Valid beacon configuration'
|
||||
|
||||
|
||||
@ -74,8 +137,40 @@ def beacon(config):
|
||||
|
||||
beacons:
|
||||
wtmp: []
|
||||
'''
|
||||
|
||||
beacons:
|
||||
wtmp:
|
||||
- users:
|
||||
gareth:
|
||||
- defaults:
|
||||
time_range:
|
||||
start: '8am'
|
||||
end: '4pm'
|
||||
|
||||
beacons:
|
||||
wtmp:
|
||||
- users:
|
||||
gareth:
|
||||
time_range:
|
||||
start: '8am'
|
||||
end: '4pm'
|
||||
- defaults:
|
||||
time_range:
|
||||
start: '8am'
|
||||
end: '4pm'
|
||||
'''
|
||||
ret = []
|
||||
|
||||
users = None
|
||||
defaults = None
|
||||
|
||||
for config_item in config:
|
||||
if 'users' in config_item:
|
||||
users = config_item['users']
|
||||
|
||||
if 'defaults' in config_item:
|
||||
defaults = config_item['defaults']
|
||||
|
||||
with salt.utils.files.fopen(WTMP, 'rb') as fp_:
|
||||
loc = __context__.get(LOC_KEY, 0)
|
||||
if loc == 0:
|
||||
@ -85,6 +180,7 @@ def beacon(config):
|
||||
else:
|
||||
fp_.seek(loc)
|
||||
while True:
|
||||
now = int(time.time())
|
||||
raw = fp_.read(SIZE)
|
||||
if len(raw) != SIZE:
|
||||
return ret
|
||||
@ -93,7 +189,30 @@ def beacon(config):
|
||||
event = {}
|
||||
for ind, field in enumerate(FIELDS):
|
||||
event[field] = pack[ind]
|
||||
if isinstance(event[field], six.string_types):
|
||||
event[field] = event[field].strip('\x00')
|
||||
ret.append(event)
|
||||
if isinstance(event[field], salt.ext.six.string_types):
|
||||
if isinstance(event[field], bytes):
|
||||
event[field] = event[field].decode()
|
||||
event[field] = event[field].strip('b\x00')
|
||||
else:
|
||||
event[field] = event[field].strip('\x00')
|
||||
|
||||
if users:
|
||||
if event['user'] in users:
|
||||
_user = users[event['user']]
|
||||
if isinstance(_user, dict) and 'time_range' in _user:
|
||||
if _check_time_range(_user['time_range'], now):
|
||||
ret.append(event)
|
||||
else:
|
||||
if defaults and 'time_range' in defaults:
|
||||
if _check_time_range(defaults['time_range'],
|
||||
now):
|
||||
ret.append(event)
|
||||
else:
|
||||
ret.append(event)
|
||||
else:
|
||||
if defaults and 'time_range' in defaults:
|
||||
if _check_time_range(defaults['time_range'], now):
|
||||
ret.append(event)
|
||||
else:
|
||||
ret.append(event)
|
||||
return ret
|
||||
|
8
salt/cache/__init__.py
vendored
8
salt/cache/__init__.py
vendored
@ -73,7 +73,7 @@ class Cache(object):
|
||||
self.cachedir = opts.get('cachedir', salt.syspaths.CACHE_DIR)
|
||||
else:
|
||||
self.cachedir = cachedir
|
||||
self.driver = opts.get('cache', salt.config.DEFAULT_MASTER_OPTS)
|
||||
self.driver = opts.get('cache', salt.config.DEFAULT_MASTER_OPTS['cache'])
|
||||
self.serial = Serial(opts)
|
||||
self._modules = None
|
||||
self._kwargs = kwargs
|
||||
@ -224,7 +224,7 @@ class Cache(object):
|
||||
fun = '{0}.flush'.format(self.driver)
|
||||
return self.modules[fun](bank, key=key, **self._kwargs)
|
||||
|
||||
def ls(self, bank):
|
||||
def list(self, bank):
|
||||
'''
|
||||
Lists entries stored in the specified bank.
|
||||
|
||||
@ -240,11 +240,9 @@ class Cache(object):
|
||||
Raises an exception if cache driver detected an error accessing data
|
||||
in the cache backend (auth, permissions, etc).
|
||||
'''
|
||||
fun = '{0}.ls'.format(self.driver)
|
||||
fun = '{0}.list'.format(self.driver)
|
||||
return self.modules[fun](bank, **self._kwargs)
|
||||
|
||||
list = ls
|
||||
|
||||
def contains(self, bank, key=None):
|
||||
'''
|
||||
Checks if the specified bank contains the specified key.
|
||||
|
13
salt/cache/consul.py
vendored
13
salt/cache/consul.py
vendored
@ -4,6 +4,8 @@ Minion data cache plugin for Consul key/value data store.
|
||||
|
||||
.. versionadded:: 2016.11.2
|
||||
|
||||
:depends: python-consul >= 0.2.0
|
||||
|
||||
It is up to the system administrator to set up and configure the Consul
|
||||
infrastructure. All is needed for this plugin is a working Consul agent
|
||||
with a read-write access to the key-value store.
|
||||
@ -61,7 +63,7 @@ api = None
|
||||
# Define the module's virtual name
|
||||
__virtualname__ = 'consul'
|
||||
|
||||
__func_alias__ = {'list': 'ls'}
|
||||
__func_alias__ = {'list_': 'list'}
|
||||
|
||||
|
||||
def __virtual__():
|
||||
@ -81,8 +83,11 @@ def __virtual__():
|
||||
'verify': __opts__.get('consul.verify', True),
|
||||
}
|
||||
|
||||
global api
|
||||
api = consul.Consul(**consul_kwargs)
|
||||
try:
|
||||
global api
|
||||
api = consul.Consul(**consul_kwargs)
|
||||
except AttributeError:
|
||||
return (False, "Failed to invoke consul.Consul, please make sure you have python-consul >= 0.2.0 installed")
|
||||
|
||||
return __virtualname__
|
||||
|
||||
@ -139,7 +144,7 @@ def flush(bank, key=None):
|
||||
)
|
||||
|
||||
|
||||
def ls(bank):
|
||||
def list_(bank):
|
||||
'''
|
||||
Return an iterable object containing all entries stored in the specified bank.
|
||||
'''
|
||||
|
4
salt/cache/localfs.py
vendored
4
salt/cache/localfs.py
vendored
@ -24,7 +24,7 @@ import salt.utils.files
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__func_alias__ = {'list': 'ls'}
|
||||
__func_alias__ = {'list_': 'list'}
|
||||
|
||||
|
||||
def __cachedir(kwargs=None):
|
||||
@ -144,7 +144,7 @@ def flush(bank, key=None, cachedir=None):
|
||||
return True
|
||||
|
||||
|
||||
def ls(bank, cachedir):
|
||||
def list_(bank, cachedir):
|
||||
'''
|
||||
Return an iterable object containing all entries stored in the specified bank.
|
||||
'''
|
||||
|
27
salt/cache/redis_cache.py
vendored
27
salt/cache/redis_cache.py
vendored
@ -161,9 +161,7 @@ from salt.exceptions import SaltCacheError
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
__virtualname__ = 'redis'
|
||||
__func_alias__ = {
|
||||
'ls': 'list'
|
||||
}
|
||||
__func_alias__ = {'list_': 'list'}
|
||||
|
||||
log = logging.getLogger(__file__)
|
||||
|
||||
@ -478,23 +476,22 @@ def flush(bank, key=None):
|
||||
return True
|
||||
|
||||
|
||||
def ls(bank):
|
||||
def list_(bank):
|
||||
'''
|
||||
Lists entries stored in the specified bank.
|
||||
'''
|
||||
redis_server = _get_redis_server()
|
||||
bank_keys_redis_key = _get_bank_keys_redis_key(bank)
|
||||
bank_keys = None
|
||||
bank_redis_key = _get_bank_redis_key(bank)
|
||||
try:
|
||||
bank_keys = redis_server.smembers(bank_keys_redis_key)
|
||||
banks = redis_server.smembers(bank_redis_key)
|
||||
except (RedisConnectionError, RedisResponseError) as rerr:
|
||||
mesg = 'Cannot list the Redis cache key {rkey}: {rerr}'.format(rkey=bank_keys_redis_key,
|
||||
mesg = 'Cannot list the Redis cache key {rkey}: {rerr}'.format(rkey=bank_redis_key,
|
||||
rerr=rerr)
|
||||
log.error(mesg)
|
||||
raise SaltCacheError(mesg)
|
||||
if not bank_keys:
|
||||
if not banks:
|
||||
return []
|
||||
return list(bank_keys)
|
||||
return list(banks)
|
||||
|
||||
|
||||
def contains(bank, key):
|
||||
@ -502,15 +499,11 @@ def contains(bank, key):
|
||||
Checks if the specified bank contains the specified key.
|
||||
'''
|
||||
redis_server = _get_redis_server()
|
||||
bank_keys_redis_key = _get_bank_keys_redis_key(bank)
|
||||
bank_keys = None
|
||||
bank_redis_key = _get_bank_redis_key(bank)
|
||||
try:
|
||||
bank_keys = redis_server.smembers(bank_keys_redis_key)
|
||||
return redis_server.sismember(bank_redis_key, key)
|
||||
except (RedisConnectionError, RedisResponseError) as rerr:
|
||||
mesg = 'Cannot retrieve the Redis cache key {rkey}: {rerr}'.format(rkey=bank_keys_redis_key,
|
||||
mesg = 'Cannot retrieve the Redis cache key {rkey}: {rerr}'.format(rkey=bank_redis_key,
|
||||
rerr=rerr)
|
||||
log.error(mesg)
|
||||
raise SaltCacheError(mesg)
|
||||
if not bank_keys:
|
||||
return False
|
||||
return key in bank_keys
|
||||
|
@ -157,7 +157,7 @@ class BaseCaller(object):
|
||||
'''
|
||||
ret = {}
|
||||
fun = self.opts['fun']
|
||||
ret['jid'] = salt.utils.jid.gen_jid()
|
||||
ret['jid'] = salt.utils.jid.gen_jid(self.opts)
|
||||
proc_fn = os.path.join(
|
||||
salt.minion.get_proc_dir(self.opts['cachedir']),
|
||||
ret['jid']
|
||||
|
@ -18,14 +18,16 @@ import sys
|
||||
|
||||
# Import salt libs
|
||||
import salt.client
|
||||
import salt.output
|
||||
import salt.utils
|
||||
import salt.utils.files
|
||||
import salt.utils.gzip_util
|
||||
import salt.utils.itertools
|
||||
import salt.utils.minions
|
||||
import salt.utils.parsers
|
||||
import salt.utils.platform
|
||||
import salt.utils.stringutils
|
||||
from salt.utils.verify import verify_log
|
||||
import salt.output
|
||||
import salt.utils.verify
|
||||
|
||||
# Import 3rd party libs
|
||||
from salt.ext import six
|
||||
@ -46,7 +48,7 @@ class SaltCPCli(salt.utils.parsers.SaltCPOptionParser):
|
||||
|
||||
# Setup file logging!
|
||||
self.setup_logfile_logger()
|
||||
verify_log(self.config)
|
||||
salt.utils.verify.verify_log(self.config)
|
||||
|
||||
cp_ = SaltCP(self.config)
|
||||
cp_.run()
|
||||
@ -103,10 +105,70 @@ class SaltCP(object):
|
||||
empty_dirs.update(empty_dirs_)
|
||||
return files, sorted(empty_dirs)
|
||||
|
||||
def _file_dict(self, fn_):
|
||||
'''
|
||||
Take a path and return the contents of the file as a string
|
||||
'''
|
||||
if not os.path.isfile(fn_):
|
||||
err = 'The referenced file, {0} is not available.'.format(fn_)
|
||||
sys.stderr.write(err + '\n')
|
||||
sys.exit(42)
|
||||
with salt.utils.files.fopen(fn_, 'r') as fp_:
|
||||
data = fp_.read()
|
||||
return {fn_: data}
|
||||
|
||||
def _load_files(self):
|
||||
'''
|
||||
Parse the files indicated in opts['src'] and load them into a python
|
||||
object for transport
|
||||
'''
|
||||
files = {}
|
||||
for fn_ in self.opts['src']:
|
||||
if os.path.isfile(fn_):
|
||||
files.update(self._file_dict(fn_))
|
||||
elif os.path.isdir(fn_):
|
||||
salt.utils.print_cli(fn_ + ' is a directory, only files are supported '
|
||||
'in non-chunked mode. Use "--chunked" command '
|
||||
'line argument.')
|
||||
sys.exit(1)
|
||||
return files
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
Make the salt client call
|
||||
'''
|
||||
if self.opts['chunked']:
|
||||
ret = self.run_chunked()
|
||||
else:
|
||||
ret = self.run_oldstyle()
|
||||
|
||||
salt.output.display_output(
|
||||
ret,
|
||||
self.opts.get('output', 'nested'),
|
||||
self.opts)
|
||||
|
||||
def run_oldstyle(self):
|
||||
'''
|
||||
Make the salt client call in old-style all-in-one call method
|
||||
'''
|
||||
arg = [self._load_files(), self.opts['dest']]
|
||||
local = salt.client.get_local_client(self.opts['conf_file'])
|
||||
args = [self.opts['tgt'],
|
||||
'cp.recv',
|
||||
arg,
|
||||
self.opts['timeout'],
|
||||
]
|
||||
|
||||
selected_target_option = self.opts.get('selected_target_option', None)
|
||||
if selected_target_option is not None:
|
||||
args.append(selected_target_option)
|
||||
|
||||
return local.cmd(*args)
|
||||
|
||||
def run_chunked(self):
|
||||
'''
|
||||
Make the salt client call in the new fasion chunked multi-call way
|
||||
'''
|
||||
files, empty_dirs = self._list_files()
|
||||
dest = self.opts['dest']
|
||||
gzip = self.opts['gzip']
|
||||
@ -122,9 +184,10 @@ class SaltCP(object):
|
||||
if gzip \
|
||||
else salt.utils.itertools.read_file
|
||||
|
||||
minions = salt.utils.minions.CkMinions(self.opts).check_minions(
|
||||
_res = salt.utils.minions.CkMinions(self.opts).check_minions(
|
||||
tgt,
|
||||
tgt_type=selected_target_option or 'glob')
|
||||
minions = _res['minions']
|
||||
|
||||
local = salt.client.get_local_client(self.opts['conf_file'])
|
||||
|
||||
@ -168,7 +231,7 @@ class SaltCP(object):
|
||||
)
|
||||
args = [
|
||||
tgt,
|
||||
'cp.recv',
|
||||
'cp.recv_chunked',
|
||||
[remote_path, chunk, append, gzip, mode],
|
||||
timeout,
|
||||
]
|
||||
@ -214,14 +277,11 @@ class SaltCP(object):
|
||||
else '',
|
||||
tgt,
|
||||
)
|
||||
args = [tgt, 'cp.recv', [remote_path, None], timeout]
|
||||
args = [tgt, 'cp.recv_chunked', [remote_path, None], timeout]
|
||||
if selected_target_option is not None:
|
||||
args.append(selected_target_option)
|
||||
|
||||
for minion_id, minion_ret in six.iteritems(local.cmd(*args)):
|
||||
ret.setdefault(minion_id, {})[remote_path] = minion_ret
|
||||
|
||||
salt.output.display_output(
|
||||
ret,
|
||||
self.opts.get('output', 'nested'),
|
||||
self.opts)
|
||||
return ret
|
||||
|
@ -14,7 +14,7 @@ from __future__ import absolute_import
|
||||
# Import Salt libs
|
||||
import salt.spm
|
||||
import salt.utils.parsers as parsers
|
||||
from salt.utils.verify import verify_log
|
||||
from salt.utils.verify import verify_log, verify_env
|
||||
|
||||
|
||||
class SPM(parsers.SPMParser):
|
||||
@ -29,6 +29,10 @@ class SPM(parsers.SPMParser):
|
||||
ui = salt.spm.SPMCmdlineInterface()
|
||||
self.parse_args()
|
||||
self.setup_logfile_logger()
|
||||
v_dirs = [
|
||||
self.config['cachedir'],
|
||||
]
|
||||
verify_env(v_dirs, self.config['user'],)
|
||||
verify_log(self.config)
|
||||
client = salt.spm.SPMClient(ui, self.config)
|
||||
client.run(self.args)
|
||||
|
@ -189,15 +189,14 @@ class LocalClient(object):
|
||||
key_user = key_user.replace(u'\\', u'_')
|
||||
keyfile = os.path.join(self.opts[u'cachedir'],
|
||||
u'.{0}_key'.format(key_user))
|
||||
# Make sure all key parent directories are accessible
|
||||
salt.utils.verify.check_path_traversal(self.opts[u'cachedir'],
|
||||
key_user,
|
||||
self.skip_perm_errors)
|
||||
|
||||
try:
|
||||
# Make sure all key parent directories are accessible
|
||||
salt.utils.verify.check_path_traversal(self.opts[u'cachedir'],
|
||||
key_user,
|
||||
self.skip_perm_errors)
|
||||
with salt.utils.files.fopen(keyfile, u'r') as key:
|
||||
return key.read()
|
||||
except (OSError, IOError):
|
||||
except (OSError, IOError, SaltClientError):
|
||||
# Fall back to eauth
|
||||
return u''
|
||||
|
||||
@ -348,7 +347,8 @@ class LocalClient(object):
|
||||
return self._check_pub_data(pub_data)
|
||||
|
||||
def gather_minions(self, tgt, expr_form):
|
||||
return salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form)
|
||||
_res = salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form)
|
||||
return _res['minions']
|
||||
|
||||
@tornado.gen.coroutine
|
||||
def run_job_async(
|
||||
@ -1142,6 +1142,7 @@ class LocalClient(object):
|
||||
minion_timeouts = {}
|
||||
|
||||
found = set()
|
||||
missing = []
|
||||
# Check to see if the jid is real, if not return the empty dict
|
||||
try:
|
||||
if self.returners[u'{0}.get_load'.format(self.opts[u'master_job_cache'])](jid) == {}:
|
||||
@ -1180,6 +1181,8 @@ class LocalClient(object):
|
||||
break
|
||||
if u'minions' in raw.get(u'data', {}):
|
||||
minions.update(raw[u'data'][u'minions'])
|
||||
if u'missing' in raw.get(u'data', {}):
|
||||
missing.extend(raw[u'data'][u'missing'])
|
||||
continue
|
||||
if u'return' not in raw[u'data']:
|
||||
continue
|
||||
@ -1321,6 +1324,10 @@ class LocalClient(object):
|
||||
for minion in list((minions - found)):
|
||||
yield {minion: {u'failed': True}}
|
||||
|
||||
if missing:
|
||||
for minion in missing:
|
||||
yield {minion: {'failed': True}}
|
||||
|
||||
def get_returns(
|
||||
self,
|
||||
jid,
|
||||
|
@ -14,8 +14,9 @@ client applications.
|
||||
http://docs.saltstack.com/ref/clients/index.html
|
||||
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import os
|
||||
|
||||
# Import Salt libs
|
||||
@ -24,9 +25,9 @@ import salt.auth
|
||||
import salt.client
|
||||
import salt.runner
|
||||
import salt.wheel
|
||||
import salt.utils
|
||||
import salt.utils.args
|
||||
import salt.utils.event
|
||||
import salt.syspaths as syspaths
|
||||
from salt.utils.event import tagify
|
||||
from salt.exceptions import EauthAuthenticationError
|
||||
|
||||
|
||||
@ -229,7 +230,7 @@ class APIClient(object):
|
||||
functions = self.wheelClient.functions
|
||||
elif client == u'runner':
|
||||
functions = self.runnerClient.functions
|
||||
result = {u'master': salt.utils.argspec_report(functions, module)}
|
||||
result = {u'master': salt.utils.args.argspec_report(functions, module)}
|
||||
return result
|
||||
|
||||
def create_token(self, creds):
|
||||
@ -322,4 +323,4 @@ class APIClient(object):
|
||||
Need to convert this to a master call with appropriate authentication
|
||||
|
||||
'''
|
||||
return self.event.fire_event(data, tagify(tag, u'wui'))
|
||||
return self.event.fire_event(data, salt.utils.event.tagify(tag, u'wui'))
|
||||
|
@ -16,7 +16,8 @@ import copy as pycopy
|
||||
# Import Salt libs
|
||||
import salt.exceptions
|
||||
import salt.minion
|
||||
import salt.utils
|
||||
import salt.utils # Can be removed once daemonize, get_specific_user, format_call are moved
|
||||
import salt.utils.args
|
||||
import salt.utils.doc
|
||||
import salt.utils.error
|
||||
import salt.utils.event
|
||||
@ -25,6 +26,7 @@ import salt.utils.job
|
||||
import salt.utils.lazy
|
||||
import salt.utils.platform
|
||||
import salt.utils.process
|
||||
import salt.utils.state
|
||||
import salt.utils.versions
|
||||
import salt.transport
|
||||
import salt.log.setup
|
||||
@ -297,7 +299,7 @@ class SyncClientMixin(object):
|
||||
# this is not to clutter the output with the module loading
|
||||
# if we have a high debug level.
|
||||
self.mminion # pylint: disable=W0104
|
||||
jid = low.get(u'__jid__', salt.utils.jid.gen_jid())
|
||||
jid = low.get(u'__jid__', salt.utils.jid.gen_jid(self.opts))
|
||||
tag = low.get(u'__tag__', salt.utils.event.tagify(jid, prefix=self.tag_prefix))
|
||||
|
||||
data = {u'fun': u'{0}.{1}'.format(self.client, fun),
|
||||
@ -362,29 +364,19 @@ class SyncClientMixin(object):
|
||||
# packed into the top level object. The plan is to move away from
|
||||
# that since the caller knows what is an arg vs a kwarg, but while
|
||||
# we make the transition we will load "kwargs" using format_call if
|
||||
# there are no kwargs in the low object passed in
|
||||
f_call = None
|
||||
if u'arg' not in low:
|
||||
# there are no kwargs in the low object passed in.
|
||||
|
||||
if u'arg' in low and u'kwarg' in low:
|
||||
args = low[u'arg']
|
||||
kwargs = low[u'kwarg']
|
||||
else:
|
||||
f_call = salt.utils.format_call(
|
||||
self.functions[fun],
|
||||
low,
|
||||
expected_extra_kws=CLIENT_INTERNAL_KEYWORDS
|
||||
)
|
||||
args = f_call.get(u'args', ())
|
||||
else:
|
||||
args = low[u'arg']
|
||||
|
||||
if u'kwarg' not in low:
|
||||
log.critical(
|
||||
u'kwargs must be passed inside the low data within the '
|
||||
u'\'kwarg\' key. See usage of '
|
||||
u'salt.utils.args.parse_input() and '
|
||||
u'salt.minion.load_args_and_kwargs() elsewhere in the '
|
||||
u'codebase.'
|
||||
)
|
||||
kwargs = {}
|
||||
else:
|
||||
kwargs = low[u'kwarg']
|
||||
kwargs = f_call.get(u'kwargs', {})
|
||||
|
||||
# Update the event data with loaded args and kwargs
|
||||
data[u'fun_args'] = list(args) + ([kwargs] if kwargs else [])
|
||||
@ -396,7 +388,7 @@ class SyncClientMixin(object):
|
||||
data[u'success'] = True
|
||||
if isinstance(data[u'return'], dict) and u'data' in data[u'return']:
|
||||
# some functions can return boolean values
|
||||
data[u'success'] = salt.utils.check_state_result(data[u'return'][u'data'])
|
||||
data[u'success'] = salt.utils.state.check_result(data[u'return'][u'data'])
|
||||
except (Exception, SystemExit) as ex:
|
||||
if isinstance(ex, salt.exceptions.NotImplemented):
|
||||
data[u'return'] = str(ex)
|
||||
@ -510,7 +502,7 @@ class AsyncClientMixin(object):
|
||||
|
||||
def _gen_async_pub(self, jid=None):
|
||||
if jid is None:
|
||||
jid = salt.utils.jid.gen_jid()
|
||||
jid = salt.utils.jid.gen_jid(self.opts)
|
||||
tag = salt.utils.event.tagify(jid, prefix=self.tag_prefix)
|
||||
return {u'tag': tag, u'jid': jid}
|
||||
|
||||
|
@ -1049,8 +1049,7 @@ class Single(object):
|
||||
opts_pkg[u'id'],
|
||||
opts_pkg.get(u'environment', u'base')
|
||||
)
|
||||
pillar_dirs = {}
|
||||
pillar_data = pillar.compile_pillar(pillar_dirs=pillar_dirs)
|
||||
pillar_data = pillar.compile_pillar()
|
||||
|
||||
# TODO: cache minion opts in datap in master.py
|
||||
data = {u'opts': opts_pkg,
|
||||
|
@ -5,7 +5,7 @@ correct cloud modules
|
||||
'''
|
||||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import, print_function, generators
|
||||
from __future__ import absolute_import, print_function, generators, unicode_literals
|
||||
import os
|
||||
import copy
|
||||
import glob
|
||||
|
@ -65,6 +65,7 @@ import salt.config as config
|
||||
import salt.utils
|
||||
import salt.utils.cloud
|
||||
import salt.utils.files
|
||||
from salt.utils.versions import LooseVersion
|
||||
from salt.ext import six
|
||||
import salt.version
|
||||
from salt.exceptions import (
|
||||
@ -79,7 +80,6 @@ HAS_LIBS = False
|
||||
try:
|
||||
import salt.utils.msazure
|
||||
from salt.utils.msazure import object_to_dict
|
||||
import azure.storage
|
||||
from azure.common.credentials import (
|
||||
UserPassCredentials,
|
||||
ServicePrincipalCredentials,
|
||||
@ -115,7 +115,9 @@ try:
|
||||
from azure.mgmt.storage import StorageManagementClient
|
||||
from azure.mgmt.web import WebSiteManagementClient
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
HAS_LIBS = True
|
||||
from azure.multiapi.storage.v2016_05_31 import CloudStorageAccount
|
||||
from azure.cli import core
|
||||
HAS_LIBS = LooseVersion(core.__version__) >= LooseVersion("2.0.12")
|
||||
except ImportError:
|
||||
pass
|
||||
# pylint: enable=wrong-import-position,wrong-import-order
|
||||
@ -1728,7 +1730,7 @@ def list_containers(call=None, kwargs=None): # pylint: disable=unused-argument
|
||||
if not storconn:
|
||||
storconn = get_conn(StorageManagementClient)
|
||||
|
||||
storageaccount = azure.storage.CloudStorageAccount(
|
||||
storageaccount = CloudStorageAccount(
|
||||
config.get_cloud_config_value(
|
||||
'storage_account',
|
||||
get_configured_provider(), __opts__, search_global=False
|
||||
@ -1769,7 +1771,7 @@ def list_blobs(call=None, kwargs=None): # pylint: disable=unused-argument
|
||||
'A container must be specified'
|
||||
)
|
||||
|
||||
storageaccount = azure.storage.CloudStorageAccount(
|
||||
storageaccount = CloudStorageAccount(
|
||||
config.get_cloud_config_value(
|
||||
'storage_account',
|
||||
get_configured_provider(), __opts__, search_global=False
|
||||
@ -1809,7 +1811,7 @@ def delete_blob(call=None, kwargs=None): # pylint: disable=unused-argument
|
||||
'A blob must be specified'
|
||||
)
|
||||
|
||||
storageaccount = azure.storage.CloudStorageAccount(
|
||||
storageaccount = CloudStorageAccount(
|
||||
config.get_cloud_config_value(
|
||||
'storage_account',
|
||||
get_configured_provider(), __opts__, search_global=False
|
||||
|
@ -20,7 +20,7 @@ under the "SSH Keys" section.
|
||||
personal_access_token: xxx
|
||||
ssh_key_file: /path/to/ssh/key/file
|
||||
ssh_key_names: my-key-name,my-key-name-2
|
||||
driver: digital_ocean
|
||||
driver: digitalocean
|
||||
|
||||
:depends: requests
|
||||
'''
|
||||
@ -59,10 +59,11 @@ except ImportError:
|
||||
# Get logging started
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__virtualname__ = 'digital_ocean'
|
||||
__virtualname__ = 'digitalocean'
|
||||
__virtual_aliases__ = ('digital_ocean', 'do')
|
||||
|
||||
|
||||
# Only load in this module if the DIGITAL_OCEAN configurations are in place
|
||||
# Only load in this module if the DIGITALOCEAN configurations are in place
|
||||
def __virtual__():
|
||||
'''
|
||||
Check for DigitalOcean configurations
|
||||
@ -274,7 +275,7 @@ def create(vm_):
|
||||
try:
|
||||
# Check for required profile parameters before sending any API calls.
|
||||
if vm_['profile'] and config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'digital_ocean',
|
||||
__active_provider_name__ or 'digitalocean',
|
||||
vm_['profile'],
|
||||
vm_=vm_) is False:
|
||||
return False
|
||||
@ -441,7 +442,7 @@ def create(vm_):
|
||||
ret = create_node(kwargs)
|
||||
except Exception as exc:
|
||||
log.error(
|
||||
'Error creating {0} on DIGITAL_OCEAN\n\n'
|
||||
'Error creating {0} on DIGITALOCEAN\n\n'
|
||||
'The following exception was thrown when trying to '
|
||||
'run the initial deployment: {1}'.format(
|
||||
vm_['name'],
|
||||
@ -716,12 +717,12 @@ def import_keypair(kwargs=None, call=None):
|
||||
with salt.utils.files.fopen(kwargs['file'], 'r') as public_key_filename:
|
||||
public_key_content = public_key_filename.read()
|
||||
|
||||
digital_ocean_kwargs = {
|
||||
digitalocean_kwargs = {
|
||||
'name': kwargs['keyname'],
|
||||
'public_key': public_key_content
|
||||
}
|
||||
|
||||
created_result = create_key(digital_ocean_kwargs, call=call)
|
||||
created_result = create_key(digitalocean_kwargs, call=call)
|
||||
return created_result
|
||||
|
||||
|
||||
@ -938,11 +939,11 @@ def show_pricing(kwargs=None, call=None):
|
||||
if not profile:
|
||||
return {'Error': 'The requested profile was not found'}
|
||||
|
||||
# Make sure the profile belongs to Digital Ocean
|
||||
# Make sure the profile belongs to DigitalOcean
|
||||
provider = profile.get('provider', '0:0')
|
||||
comps = provider.split(':')
|
||||
if len(comps) < 2 or comps[1] != 'digital_ocean':
|
||||
return {'Error': 'The requested profile does not belong to Digital Ocean'}
|
||||
if len(comps) < 2 or comps[1] != 'digitalocean':
|
||||
return {'Error': 'The requested profile does not belong to DigitalOcean'}
|
||||
|
||||
raw = {}
|
||||
ret = {}
|
||||
@ -968,7 +969,7 @@ def list_floating_ips(call=None):
|
||||
|
||||
CLI Examples:
|
||||
|
||||
... code-block:: bash
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -f list_floating_ips my-digitalocean-config
|
||||
'''
|
||||
@ -1008,7 +1009,7 @@ def show_floating_ip(kwargs=None, call=None):
|
||||
|
||||
CLI Examples:
|
||||
|
||||
... code-block:: bash
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -f show_floating_ip my-digitalocean-config floating_ip='45.55.96.47'
|
||||
'''
|
||||
@ -1041,7 +1042,7 @@ def create_floating_ip(kwargs=None, call=None):
|
||||
|
||||
CLI Examples:
|
||||
|
||||
... code-block:: bash
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -f create_floating_ip my-digitalocean-config region='NYC2'
|
||||
|
||||
@ -1083,7 +1084,7 @@ def delete_floating_ip(kwargs=None, call=None):
|
||||
|
||||
CLI Examples:
|
||||
|
||||
... code-block:: bash
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -f delete_floating_ip my-digitalocean-config floating_ip='45.55.96.47'
|
||||
'''
|
||||
@ -1118,7 +1119,7 @@ def assign_floating_ip(kwargs=None, call=None):
|
||||
|
||||
CLI Examples:
|
||||
|
||||
... code-block:: bash
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -f assign_floating_ip my-digitalocean-config droplet_id=1234567 floating_ip='45.55.96.47'
|
||||
'''
|
||||
@ -1151,7 +1152,7 @@ def unassign_floating_ip(kwargs=None, call=None):
|
||||
|
||||
CLI Examples:
|
||||
|
||||
... code-block:: bash
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -f unassign_floating_ip my-digitalocean-config floating_ip='45.55.96.47'
|
||||
'''
|
@ -3432,34 +3432,7 @@ def list_nodes_full(location=None, call=None):
|
||||
'or --function.'
|
||||
)
|
||||
|
||||
if not location:
|
||||
ret = {}
|
||||
locations = set(
|
||||
get_location(vm_) for vm_ in six.itervalues(__opts__['profiles'])
|
||||
if _vm_provider_driver(vm_)
|
||||
)
|
||||
|
||||
# If there aren't any profiles defined for EC2, check
|
||||
# the provider config file, or use the default location.
|
||||
if not locations:
|
||||
locations = [get_location()]
|
||||
|
||||
for loc in locations:
|
||||
ret.update(_list_nodes_full(loc))
|
||||
return ret
|
||||
|
||||
return _list_nodes_full(location)
|
||||
|
||||
|
||||
def _vm_provider_driver(vm_):
|
||||
alias, driver = vm_['driver'].split(':')
|
||||
if alias not in __opts__['providers']:
|
||||
return None
|
||||
|
||||
if driver not in __opts__['providers'][alias]:
|
||||
return None
|
||||
|
||||
return driver == 'ec2'
|
||||
return _list_nodes_full(location or get_location())
|
||||
|
||||
|
||||
def _extract_name_tag(item):
|
||||
@ -3570,16 +3543,15 @@ def list_nodes_min(location=None, call=None):
|
||||
|
||||
for instance in instances:
|
||||
if isinstance(instance['instancesSet']['item'], list):
|
||||
for item in instance['instancesSet']['item']:
|
||||
state = item['instanceState']['name']
|
||||
name = _extract_name_tag(item)
|
||||
id = item['instanceId']
|
||||
items = instance['instancesSet']['item']
|
||||
else:
|
||||
item = instance['instancesSet']['item']
|
||||
items = [instance['instancesSet']['item']]
|
||||
|
||||
for item in items:
|
||||
state = item['instanceState']['name']
|
||||
name = _extract_name_tag(item)
|
||||
id = item['instanceId']
|
||||
ret[name] = {'state': state, 'id': id}
|
||||
ret[name] = {'state': state, 'id': id}
|
||||
return ret
|
||||
|
||||
|
||||
|
@ -2643,7 +2643,7 @@ def show_pricing(kwargs=None, call=None):
|
||||
if not profile:
|
||||
return {'Error': 'The requested profile was not found'}
|
||||
|
||||
# Make sure the profile belongs to Digital Ocean
|
||||
# Make sure the profile belongs to DigitalOcean
|
||||
provider = profile.get('provider', '0:0')
|
||||
comps = provider.split(':')
|
||||
if len(comps) < 2 or comps[1] != 'gce':
|
||||
|
@ -41,6 +41,7 @@ Example profile:
|
||||
master_port: 5506
|
||||
|
||||
Tested on:
|
||||
- Fedora 26 (libvirt 3.2.1, qemu 2.9.1)
|
||||
- Fedora 25 (libvirt 1.3.3.2, qemu 2.6.1)
|
||||
- Fedora 23 (libvirt 1.2.18, qemu 2.4.1)
|
||||
- Centos 7 (libvirt 1.2.17, qemu 1.5.3)
|
||||
@ -82,9 +83,6 @@ from salt.exceptions import (
|
||||
SaltCloudSystemExit
|
||||
)
|
||||
|
||||
# Get logging started
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
VIRT_STATE_NAME_MAP = {0: 'running',
|
||||
1: 'running',
|
||||
2: 'running',
|
||||
@ -99,6 +97,20 @@ IP_LEARNING_XML = """<filterref filter='clean-traffic'>
|
||||
|
||||
__virtualname__ = 'libvirt'
|
||||
|
||||
# Set up logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def libvirt_error_handler(ctx, error): # pylint: disable=unused-argument
|
||||
'''
|
||||
Redirect stderr prints from libvirt to salt logging.
|
||||
'''
|
||||
log.debug("libvirt error {0}".format(error))
|
||||
|
||||
|
||||
if HAS_LIBVIRT:
|
||||
libvirt.registerErrorHandler(f=libvirt_error_handler, ctx=None)
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
@ -280,7 +292,7 @@ def create(vm_):
|
||||
|
||||
validate_xml = vm_.get('validate_xml') if vm_.get('validate_xml') is not None else True
|
||||
|
||||
log.info("Cloning machine '{0}' with strategy '{1}' validate_xml='{2}'".format(vm_['name'], clone_strategy, validate_xml))
|
||||
log.info("Cloning '{0}' with strategy '{1}' validate_xml='{2}'".format(vm_['name'], clone_strategy, validate_xml))
|
||||
|
||||
try:
|
||||
# Check for required profile parameters before sending any API calls.
|
||||
@ -516,7 +528,7 @@ def destroy(name, call=None):
|
||||
'event',
|
||||
'destroying instance',
|
||||
'salt/cloud/{0}/destroying'.format(name),
|
||||
{'name': name},
|
||||
args={'name': name},
|
||||
sock_dir=__opts__['sock_dir'],
|
||||
transport=__opts__['transport']
|
||||
)
|
||||
@ -527,7 +539,7 @@ def destroy(name, call=None):
|
||||
'event',
|
||||
'destroyed instance',
|
||||
'salt/cloud/{0}/destroyed'.format(name),
|
||||
{'name': name},
|
||||
args={'name': name},
|
||||
sock_dir=__opts__['sock_dir'],
|
||||
transport=__opts__['transport']
|
||||
)
|
||||
|
@ -44,9 +44,6 @@ from salt.exceptions import (
|
||||
SaltCloudSystemExit
|
||||
)
|
||||
|
||||
# Import Salt-Cloud Libs
|
||||
import salt.utils.cloud
|
||||
|
||||
# Get logging started
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@ -1193,7 +1190,7 @@ def list_nodes_select(call=None):
|
||||
'''
|
||||
Return a list of the VMs that are on the provider, with select fields.
|
||||
'''
|
||||
return salt.utils.cloud.list_nodes_select(
|
||||
return __utils__['cloud.list_nodes_select'](
|
||||
list_nodes_full(), __opts__['query.selection'], call,
|
||||
)
|
||||
|
||||
@ -1503,7 +1500,7 @@ def _query(action=None,
|
||||
if LASTCALL >= now:
|
||||
time.sleep(ratelimit_sleep)
|
||||
|
||||
result = salt.utils.http.query(
|
||||
result = __utils__['http.query'](
|
||||
url,
|
||||
method,
|
||||
params=args,
|
||||
|
@ -87,7 +87,6 @@ import pprint
|
||||
import time
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
import salt.config as config
|
||||
from salt.exceptions import (
|
||||
SaltCloudConfigError,
|
||||
@ -96,6 +95,7 @@ from salt.exceptions import (
|
||||
SaltCloudExecutionTimeout,
|
||||
SaltCloudSystemExit
|
||||
)
|
||||
import salt.utils.files
|
||||
|
||||
# Import salt.cloud libs
|
||||
import salt.utils.cloud
|
||||
@ -805,7 +805,7 @@ def load_public_key(vm_):
|
||||
)
|
||||
)
|
||||
|
||||
with salt.utils.fopen(public_key_filename, 'r') as public_key:
|
||||
with salt.utils.files.fopen(public_key_filename, 'r') as public_key:
|
||||
key = public_key.read().replace('\n', '')
|
||||
|
||||
return key
|
||||
|
@ -24,7 +24,6 @@ import logging
|
||||
# Import salt libs
|
||||
from salt.exceptions import SaltCloudSystemExit
|
||||
import salt.config as config
|
||||
import salt.utils.cloud as cloud
|
||||
|
||||
# Import Third Party Libs
|
||||
try:
|
||||
@ -136,7 +135,7 @@ def create(vm_info):
|
||||
)
|
||||
|
||||
log.debug("Going to fire event: starting create")
|
||||
cloud.fire_event(
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'starting create',
|
||||
'salt/cloud/{0}/creating'.format(vm_info['name']),
|
||||
@ -151,7 +150,7 @@ def create(vm_info):
|
||||
'clone_from': vm_info['clonefrom']
|
||||
}
|
||||
|
||||
cloud.fire_event(
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'requesting instance',
|
||||
'salt/cloud/{0}/requesting'.format(vm_info['name']),
|
||||
@ -174,10 +173,10 @@ def create(vm_info):
|
||||
vm_info['key_filename'] = key_filename
|
||||
vm_info['ssh_host'] = ip
|
||||
|
||||
res = cloud.bootstrap(vm_info, __opts__)
|
||||
res = __utils__['cloud.bootstrap'](vm_info)
|
||||
vm_result.update(res)
|
||||
|
||||
cloud.fire_event(
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'created machine',
|
||||
'salt/cloud/{0}/created'.format(vm_info['name']),
|
||||
@ -269,7 +268,7 @@ def list_nodes(kwargs=None, call=None):
|
||||
"private_ips",
|
||||
"public_ips",
|
||||
]
|
||||
return cloud.list_nodes_select(
|
||||
return __utils__['cloud.list_nodes_select'](
|
||||
list_nodes_full('function'), attributes, call,
|
||||
)
|
||||
|
||||
@ -278,7 +277,7 @@ def list_nodes_select(call=None):
|
||||
"""
|
||||
Return a list of the VMs that are on the provider, with select fields
|
||||
"""
|
||||
return cloud.list_nodes_select(
|
||||
return __utils__['cloud.list_nodes_select'](
|
||||
list_nodes_full('function'), __opts__['query.selection'], call,
|
||||
)
|
||||
|
||||
@ -306,7 +305,7 @@ def destroy(name, call=None):
|
||||
if not vb_machine_exists(name):
|
||||
return "{0} doesn't exist and can't be deleted".format(name)
|
||||
|
||||
cloud.fire_event(
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'destroying instance',
|
||||
'salt/cloud/{0}/destroying'.format(name),
|
||||
@ -317,7 +316,7 @@ def destroy(name, call=None):
|
||||
|
||||
vb_destroy_machine(name)
|
||||
|
||||
cloud.fire_event(
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'destroyed instance',
|
||||
'salt/cloud/{0}/destroyed'.format(name),
|
||||
|
@ -7,6 +7,7 @@ XenServer Cloud Driver
|
||||
The XenServer driver is designed to work with a Citrix XenServer.
|
||||
|
||||
Requires XenServer SDK
|
||||
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
|
||||
|
||||
Place a copy of the XenAPI.py in the Python site-packages folder.
|
||||
|
||||
@ -157,13 +158,27 @@ def _get_session():
|
||||
default=False,
|
||||
search_global=False
|
||||
)
|
||||
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
|
||||
log.debug('url: {} user: {} password: {}, originator: {}'.format(
|
||||
url,
|
||||
user,
|
||||
'XXX-pw-redacted-XXX',
|
||||
originator))
|
||||
session.xenapi.login_with_password(user, password, api_version, originator)
|
||||
try:
|
||||
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
|
||||
log.debug('url: {} user: {} password: {}, originator: {}'.format(
|
||||
url,
|
||||
user,
|
||||
'XXX-pw-redacted-XXX',
|
||||
originator))
|
||||
session.xenapi.login_with_password(
|
||||
user, password, api_version, originator)
|
||||
except XenAPI.Failure as ex:
|
||||
pool_master_addr = str(ex.__dict__['details'][1])
|
||||
slash_parts = url.split('/')
|
||||
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
|
||||
session = XenAPI.Session(new_url)
|
||||
log.debug('session is -> url: {} user: {} password: {}, originator:{}'.format(
|
||||
new_url,
|
||||
user,
|
||||
'XXX-pw-redacted-XXX',
|
||||
originator))
|
||||
session.xenapi.login_with_password(
|
||||
user, password, api_version, originator)
|
||||
return session
|
||||
|
||||
|
||||
@ -182,14 +197,19 @@ def list_nodes():
|
||||
for vm in vms:
|
||||
record = session.xenapi.VM.get_record(vm)
|
||||
if not record['is_a_template'] and not record['is_control_domain']:
|
||||
ret[record['name_label']] = {
|
||||
'id': record['uuid'],
|
||||
'image': record['other_config']['base_template_name'],
|
||||
'name': record['name_label'],
|
||||
'size': record['memory_dynamic_max'],
|
||||
'state': record['power_state'],
|
||||
'private_ips': get_vm_ip(record['name_label'], session),
|
||||
'public_ips': None}
|
||||
try:
|
||||
base_template_name = record['other_config']['base_template_name']
|
||||
except Exception:
|
||||
base_template_name = None
|
||||
log.debug('VM {}, doesnt have base_template_name attribute'.format(
|
||||
record['name_label']))
|
||||
ret[record['name_label']] = {'id': record['uuid'],
|
||||
'image': base_template_name,
|
||||
'name': record['name_label'],
|
||||
'size': record['memory_dynamic_max'],
|
||||
'state': record['power_state'],
|
||||
'private_ips': get_vm_ip(record['name_label'], session),
|
||||
'public_ips': None}
|
||||
return ret
|
||||
|
||||
|
||||
@ -296,10 +316,17 @@ def list_nodes_full(session=None):
|
||||
for vm in vms:
|
||||
record = session.xenapi.VM.get_record(vm)
|
||||
if not record['is_a_template'] and not record['is_control_domain']:
|
||||
# deal with cases where the VM doesn't have 'base_template_name' attribute
|
||||
try:
|
||||
base_template_name = record['other_config']['base_template_name']
|
||||
except Exception:
|
||||
base_template_name = None
|
||||
log.debug('VM {}, doesnt have base_template_name attribute'.format(
|
||||
record['name_label']))
|
||||
vm_cfg = session.xenapi.VM.get_record(vm)
|
||||
vm_cfg['id'] = record['uuid']
|
||||
vm_cfg['name'] = record['name_label']
|
||||
vm_cfg['image'] = record['other_config']['base_template_name']
|
||||
vm_cfg['image'] = base_template_name
|
||||
vm_cfg['size'] = None
|
||||
vm_cfg['state'] = record['power_state']
|
||||
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
|
||||
@ -455,8 +482,14 @@ def show_instance(name, session=None, call=None):
|
||||
vm = _get_vm(name, session=session)
|
||||
record = session.xenapi.VM.get_record(vm)
|
||||
if not record['is_a_template'] and not record['is_control_domain']:
|
||||
try:
|
||||
base_template_name = record['other_config']['base_template_name']
|
||||
except Exception:
|
||||
base_template_name = None
|
||||
log.debug('VM {}, doesnt have base_template_name attribute'.format(
|
||||
record['name_label']))
|
||||
ret = {'id': record['uuid'],
|
||||
'image': record['other_config']['base_template_name'],
|
||||
'image': base_template_name,
|
||||
'name': record['name_label'],
|
||||
'size': record['memory_dynamic_max'],
|
||||
'state': record['power_state'],
|
||||
@ -716,7 +749,7 @@ def _copy_vm(template=None, name=None, session=None, sr=None):
|
||||
'''
|
||||
Create VM by copy
|
||||
|
||||
This is faster and should be used if source and target are
|
||||
This is slower and should be used if source and target are
|
||||
NOT in the same storage repository
|
||||
|
||||
template = object reference
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -53,7 +53,7 @@ _DFLT_LOG_DATEFMT = '%H:%M:%S'
|
||||
_DFLT_LOG_DATEFMT_LOGFILE = '%Y-%m-%d %H:%M:%S'
|
||||
_DFLT_LOG_FMT_CONSOLE = '[%(levelname)-8s] %(message)s'
|
||||
_DFLT_LOG_FMT_LOGFILE = (
|
||||
'%(asctime)s,%(msecs)03d [%(name)-17s][%(levelname)-8s][%(process)d] %(message)s'
|
||||
'%(asctime)s,%(msecs)03d [%(name)-17s:%(lineno)-4d][%(levelname)-8s][%(process)d] %(message)s'
|
||||
)
|
||||
_DFLT_REFSPECS = ['+refs/heads/*:refs/remotes/origin/*', '+refs/tags/*:refs/tags/*']
|
||||
|
||||
@ -111,9 +111,10 @@ VALID_OPTS = {
|
||||
'master_port': (six.string_types, int),
|
||||
|
||||
# The behaviour of the minion when connecting to a master. Can specify 'failover',
|
||||
# 'disable' or 'func'. If 'func' is specified, the 'master' option should be set to an
|
||||
# exec module function to run to determine the master hostname. If 'disable' is specified
|
||||
# the minion will run, but will not try to connect to a master.
|
||||
# 'disable', 'distributed', or 'func'. If 'func' is specified, the 'master' option should be
|
||||
# set to an exec module function to run to determine the master hostname. If 'disable' is
|
||||
# specified the minion will run, but will not try to connect to a master. If 'distributed'
|
||||
# is specified the minion will try to deterministically pick a master based on its' id.
|
||||
'master_type': str,
|
||||
|
||||
# Specify the format in which the master address will be specified. Can
|
||||
@ -186,6 +187,16 @@ VALID_OPTS = {
|
||||
# A unique identifier for this daemon
|
||||
'id': str,
|
||||
|
||||
# Use a module function to determine the unique identifier. If this is
|
||||
# set and 'id' is not set, it will allow invocation of a module function
|
||||
# to determine the value of 'id'. For simple invocations without function
|
||||
# arguments, this may be a string that is the function name. For
|
||||
# invocations with function arguments, this may be a dictionary with the
|
||||
# key being the function name, and the value being an embedded dictionary
|
||||
# where each key is a function argument name and each value is the
|
||||
# corresponding argument value.
|
||||
'id_function': (dict, str),
|
||||
|
||||
# The directory to store all cache files.
|
||||
'cachedir': str,
|
||||
|
||||
@ -326,13 +337,16 @@ VALID_OPTS = {
|
||||
# Whether or not processes should be forked when needed. The alternative is to use threading.
|
||||
'multiprocessing': bool,
|
||||
|
||||
# Maximum number of concurrently active processes at any given point in time
|
||||
'process_count_max': int,
|
||||
|
||||
# Whether or not the salt minion should run scheduled mine updates
|
||||
'mine_enabled': bool,
|
||||
|
||||
# Whether or not scheduled mine updates should be accompanied by a job return for the job cache
|
||||
'mine_return_job': bool,
|
||||
|
||||
# Schedule a mine update every n number of seconds
|
||||
# The number of minutes between mine updates.
|
||||
'mine_interval': int,
|
||||
|
||||
# The ipc strategy. (i.e., sockets versus tcp, etc)
|
||||
@ -417,6 +431,12 @@ VALID_OPTS = {
|
||||
# Tell the client to display the jid when a job is published
|
||||
'show_jid': bool,
|
||||
|
||||
# Ensure that a generated jid is always unique. If this is set, the jid
|
||||
# format is different due to an underscore and process id being appended
|
||||
# to the jid. WARNING: A change to the jid format may break external
|
||||
# applications that depend on the original format.
|
||||
'unique_jid': bool,
|
||||
|
||||
# Tells the highstate outputter to show successful states. False will omit successes.
|
||||
'state_verbose': bool,
|
||||
|
||||
@ -573,6 +593,23 @@ VALID_OPTS = {
|
||||
# False in 2016.3.0
|
||||
'add_proxymodule_to_opts': bool,
|
||||
|
||||
# Merge pillar data into configuration opts.
|
||||
# As multiple proxies can run on the same server, we may need different
|
||||
# configuration options for each, while there's one single configuration file.
|
||||
# The solution is merging the pillar data of each proxy minion into the opts.
|
||||
'proxy_merge_pillar_in_opts': bool,
|
||||
|
||||
# Deep merge of pillar data into configuration opts.
|
||||
# Evaluated only when `proxy_merge_pillar_in_opts` is True.
|
||||
'proxy_deep_merge_pillar_in_opts': bool,
|
||||
|
||||
# The strategy used when merging pillar into opts.
|
||||
# Considered only when `proxy_merge_pillar_in_opts` is True.
|
||||
'proxy_merge_pillar_in_opts_strategy': str,
|
||||
|
||||
# Allow enabling mine details using pillar data.
|
||||
'proxy_mines_pillar': bool,
|
||||
|
||||
# In some particular cases, always alive proxies are not beneficial.
|
||||
# This option can be used in those less dynamic environments:
|
||||
# the user can request the connection
|
||||
@ -712,6 +749,10 @@ VALID_OPTS = {
|
||||
'fileserver_limit_traversal': bool,
|
||||
'fileserver_verify_config': bool,
|
||||
|
||||
# Optionally apply '*' permissioins to any user. By default '*' is a fallback case that is
|
||||
# applied only if the user didn't matched by other matchers.
|
||||
'permissive_acl': bool,
|
||||
|
||||
# Optionally enables keeping the calculated user's auth list in the token file.
|
||||
'keep_acl_in_token': bool,
|
||||
|
||||
@ -719,6 +760,10 @@ VALID_OPTS = {
|
||||
# same module used for external authentication.
|
||||
'eauth_acl_module': str,
|
||||
|
||||
# Subsystem to use to maintain eauth tokens. By default, tokens are stored on the local
|
||||
# filesystem
|
||||
'eauth_tokens': str,
|
||||
|
||||
# The number of open files a daemon is allowed to have open. Frequently needs to be increased
|
||||
# higher than the system default in order to account for the way zeromq consumes file handles.
|
||||
'max_open_files': int,
|
||||
@ -904,6 +949,7 @@ VALID_OPTS = {
|
||||
'ssh_scan_timeout': float,
|
||||
'ssh_identities_only': bool,
|
||||
'ssh_log_file': str,
|
||||
'ssh_config_file': str,
|
||||
|
||||
# Enable ioflo verbose logging. Warning! Very verbose!
|
||||
'ioflo_verbose': int,
|
||||
@ -1075,6 +1121,11 @@ VALID_OPTS = {
|
||||
# (in other words, require that minions have 'minion_sign_messages'
|
||||
# turned on)
|
||||
'require_minion_sign_messages': bool,
|
||||
|
||||
# The list of config entries to be passed to external pillar function as
|
||||
# part of the extra_minion_data param
|
||||
# Subconfig entries can be specified by using the ':' notation (e.g. key:subkey)
|
||||
'pass_to_ext_pillars': (six.string_types, list),
|
||||
}
|
||||
|
||||
# default configurations
|
||||
@ -1098,6 +1149,7 @@ DEFAULT_MINION_OPTS = {
|
||||
'root_dir': salt.syspaths.ROOT_DIR,
|
||||
'pki_dir': os.path.join(salt.syspaths.CONFIG_DIR, 'pki', 'minion'),
|
||||
'id': '',
|
||||
'id_function': {},
|
||||
'cachedir': os.path.join(salt.syspaths.CACHE_DIR, 'minion'),
|
||||
'append_minionid_config_dirs': [],
|
||||
'cache_jobs': False,
|
||||
@ -1193,6 +1245,7 @@ DEFAULT_MINION_OPTS = {
|
||||
'gitfs_ref_types': ['branch', 'tag', 'sha'],
|
||||
'gitfs_refspecs': _DFLT_REFSPECS,
|
||||
'gitfs_disable_saltenv_mapping': False,
|
||||
'unique_jid': False,
|
||||
'hash_type': 'sha256',
|
||||
'disable_modules': [],
|
||||
'disable_returners': [],
|
||||
@ -1212,6 +1265,7 @@ DEFAULT_MINION_OPTS = {
|
||||
'auto_accept': True,
|
||||
'autosign_timeout': 120,
|
||||
'multiprocessing': True,
|
||||
'process_count_max': -1,
|
||||
'mine_enabled': True,
|
||||
'mine_return_job': False,
|
||||
'mine_interval': 60,
|
||||
@ -1437,6 +1491,7 @@ DEFAULT_MASTER_OPTS = {
|
||||
'hgfs_saltenv_blacklist': [],
|
||||
'show_timeout': True,
|
||||
'show_jid': False,
|
||||
'unique_jid': False,
|
||||
'svnfs_remotes': [],
|
||||
'svnfs_mountpoint': '',
|
||||
'svnfs_root': '',
|
||||
@ -1469,8 +1524,9 @@ DEFAULT_MASTER_OPTS = {
|
||||
'syndic_forward_all_events': False,
|
||||
'syndic_log_file': os.path.join(salt.syspaths.LOGS_DIR, 'syndic'),
|
||||
'syndic_pidfile': os.path.join(salt.syspaths.PIDFILE_DIR, 'salt-syndic.pid'),
|
||||
'runner_dirs': [],
|
||||
'outputter_dirs': [],
|
||||
'runner_dirs': [],
|
||||
'utils_dirs': [],
|
||||
'client_acl_verify': True,
|
||||
'publisher_acl': {},
|
||||
'publisher_acl_blacklist': {},
|
||||
@ -1478,8 +1534,10 @@ DEFAULT_MASTER_OPTS = {
|
||||
'external_auth': {},
|
||||
'token_expire': 43200,
|
||||
'token_expire_user_override': False,
|
||||
'permissive_acl': False,
|
||||
'keep_acl_in_token': False,
|
||||
'eauth_acl_module': '',
|
||||
'eauth_tokens': 'localfs',
|
||||
'extension_modules': os.path.join(salt.syspaths.CACHE_DIR, 'master', 'extmods'),
|
||||
'file_recv': False,
|
||||
'file_recv_max_size': 100,
|
||||
@ -1601,6 +1659,7 @@ DEFAULT_MASTER_OPTS = {
|
||||
'ssh_scan_timeout': 0.01,
|
||||
'ssh_identities_only': False,
|
||||
'ssh_log_file': os.path.join(salt.syspaths.LOGS_DIR, 'ssh'),
|
||||
'ssh_config_file': os.path.join(salt.syspaths.HOME_DIR, '.ssh', 'config'),
|
||||
'master_floscript': os.path.join(FLO_DIR, 'master.flo'),
|
||||
'worker_floscript': os.path.join(FLO_DIR, 'worker.flo'),
|
||||
'maintenance_floscript': os.path.join(FLO_DIR, 'maint.flo'),
|
||||
@ -1667,6 +1726,12 @@ DEFAULT_PROXY_MINION_OPTS = {
|
||||
'append_minionid_config_dirs': ['cachedir', 'pidfile', 'default_include', 'extension_modules'],
|
||||
'default_include': 'proxy.d/*.conf',
|
||||
|
||||
'proxy_merge_pillar_in_opts': False,
|
||||
'proxy_deep_merge_pillar_in_opts': False,
|
||||
'proxy_merge_pillar_in_opts_strategy': 'smart',
|
||||
|
||||
'proxy_mines_pillar': True,
|
||||
|
||||
# By default, proxies will preserve the connection.
|
||||
# If this option is set to False,
|
||||
# the connection with the remote dumb device
|
||||
@ -2665,7 +2730,7 @@ def old_to_new(opts):
|
||||
providers = (
|
||||
'AWS',
|
||||
'CLOUDSTACK',
|
||||
'DIGITAL_OCEAN',
|
||||
'DIGITALOCEAN',
|
||||
'EC2',
|
||||
'GOGRID',
|
||||
'IBMSCE',
|
||||
@ -3329,6 +3394,57 @@ def _cache_id(minion_id, cache_file):
|
||||
log.error('Could not cache minion ID: {0}'.format(exc))
|
||||
|
||||
|
||||
def call_id_function(opts):
|
||||
'''
|
||||
Evaluate the function that determines the ID if the 'id_function'
|
||||
option is set and return the result
|
||||
'''
|
||||
if opts.get('id'):
|
||||
return opts['id']
|
||||
|
||||
# Import 'salt.loader' here to avoid a circular dependency
|
||||
import salt.loader as loader
|
||||
|
||||
if isinstance(opts['id_function'], str):
|
||||
mod_fun = opts['id_function']
|
||||
fun_kwargs = {}
|
||||
elif isinstance(opts['id_function'], dict):
|
||||
mod_fun, fun_kwargs = six.next(six.iteritems(opts['id_function']))
|
||||
if fun_kwargs is None:
|
||||
fun_kwargs = {}
|
||||
else:
|
||||
log.error('\'id_function\' option is neither a string nor a dictionary')
|
||||
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
|
||||
|
||||
# split module and function and try loading the module
|
||||
mod, fun = mod_fun.split('.')
|
||||
if not opts.get('grains'):
|
||||
# Get grains for use by the module
|
||||
opts['grains'] = loader.grains(opts)
|
||||
|
||||
try:
|
||||
id_mod = loader.raw_mod(opts, mod, fun)
|
||||
if not id_mod:
|
||||
raise KeyError
|
||||
# we take whatever the module returns as the minion ID
|
||||
newid = id_mod[mod_fun](**fun_kwargs)
|
||||
if not isinstance(newid, str) or not newid:
|
||||
log.error('Function {0} returned value "{1}" of type {2} instead of string'.format(
|
||||
mod_fun, newid, type(newid))
|
||||
)
|
||||
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
|
||||
log.info('Evaluated minion ID from module: {0}'.format(mod_fun))
|
||||
return newid
|
||||
except TypeError:
|
||||
log.error('Function arguments {0} are incorrect for function {1}'.format(
|
||||
fun_kwargs, mod_fun)
|
||||
)
|
||||
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
|
||||
except KeyError:
|
||||
log.error('Failed to load module {0}'.format(mod_fun))
|
||||
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
|
||||
|
||||
|
||||
def get_id(opts, cache_minion_id=False):
|
||||
'''
|
||||
Guess the id of the minion.
|
||||
@ -3370,13 +3486,21 @@ def get_id(opts, cache_minion_id=False):
|
||||
log.debug('Guessing ID. The id can be explicitly set in {0}'
|
||||
.format(os.path.join(salt.syspaths.CONFIG_DIR, 'minion')))
|
||||
|
||||
newid = salt.utils.network.generate_minion_id()
|
||||
if opts.get('id_function'):
|
||||
newid = call_id_function(opts)
|
||||
else:
|
||||
newid = salt.utils.network.generate_minion_id()
|
||||
|
||||
if opts.get('minion_id_lowercase'):
|
||||
newid = newid.lower()
|
||||
log.debug('Changed minion id {0} to lowercase.'.format(newid))
|
||||
if '__role' in opts and opts.get('__role') == 'minion':
|
||||
log.debug('Found minion id from generate_minion_id(): {0}'.format(newid))
|
||||
if opts.get('id_function'):
|
||||
log.debug('Found minion id from external function {0}: {1}'.format(
|
||||
opts['id_function'], newid))
|
||||
else:
|
||||
log.debug('Found minion id from generate_minion_id(): {0}'.format(
|
||||
newid))
|
||||
if cache_minion_id and opts.get('minion_id_caching', True):
|
||||
_cache_id(newid, id_cache)
|
||||
is_ipv4 = salt.utils.network.is_ipv4(newid)
|
||||
@ -3596,12 +3720,23 @@ def apply_master_config(overrides=None, defaults=None):
|
||||
if len(opts['sock_dir']) > len(opts['cachedir']) + 10:
|
||||
opts['sock_dir'] = os.path.join(opts['cachedir'], '.salt-unix')
|
||||
|
||||
opts['token_dir'] = os.path.join(opts['cachedir'], 'tokens')
|
||||
opts['syndic_dir'] = os.path.join(opts['cachedir'], 'syndics')
|
||||
# Make sure ext_mods gets set if it is an untrue value
|
||||
# (here to catch older bad configs)
|
||||
opts['extension_modules'] = (
|
||||
opts.get('extension_modules') or
|
||||
os.path.join(opts['cachedir'], 'extmods')
|
||||
)
|
||||
opts['token_dir'] = os.path.join(opts['cachedir'], 'tokens')
|
||||
opts['syndic_dir'] = os.path.join(opts['cachedir'], 'syndics')
|
||||
# Set up the utils_dirs location from the extension_modules location
|
||||
opts['utils_dirs'] = (
|
||||
opts.get('utils_dirs') or
|
||||
[os.path.join(opts['extension_modules'], 'utils')]
|
||||
)
|
||||
|
||||
# Insert all 'utils_dirs' directories to the system path
|
||||
insert_system_path(opts, opts['utils_dirs'])
|
||||
|
||||
if (overrides or {}).get('ipc_write_buffer', '') == 'dynamic':
|
||||
opts['ipc_write_buffer'] = _DFLT_IPC_WBUFFER
|
||||
if 'ipc_write_buffer' not in overrides:
|
||||
|
215
salt/config/schemas/esxcluster.py
Normal file
215
salt/config/schemas/esxcluster.py
Normal file
@ -0,0 +1,215 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
:codeauthor: :email:`Alexandru Bleotu (alexandru.bleotu@morganstanley.com)`
|
||||
|
||||
|
||||
salt.config.schemas.esxcluster
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
ESX Cluster configuration schemas
|
||||
'''
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Salt libs
|
||||
from salt.utils.schema import (Schema,
|
||||
DefinitionsSchema,
|
||||
ComplexSchemaItem,
|
||||
DictItem,
|
||||
ArrayItem,
|
||||
IntegerItem,
|
||||
BooleanItem,
|
||||
StringItem,
|
||||
AnyOfItem)
|
||||
|
||||
|
||||
class OptionValueItem(ComplexSchemaItem):
|
||||
'''Sechma item of the OptionValue'''
|
||||
|
||||
title = 'OptionValue'
|
||||
key = StringItem(title='Key', required=True)
|
||||
value = AnyOfItem(items=[StringItem(), BooleanItem(), IntegerItem()])
|
||||
|
||||
|
||||
class AdmissionControlPolicyItem(ComplexSchemaItem):
|
||||
'''
|
||||
Schema item of the HA admission control policy
|
||||
'''
|
||||
|
||||
title = 'Admission Control Policy'
|
||||
|
||||
cpu_failover_percent = IntegerItem(
|
||||
title='CPU Failover Percent',
|
||||
minimum=0, maximum=100)
|
||||
memory_failover_percent = IntegerItem(
|
||||
title='Memory Failover Percent',
|
||||
minimum=0, maximum=100)
|
||||
|
||||
|
||||
class DefaultVmSettingsItem(ComplexSchemaItem):
|
||||
'''
|
||||
Schema item of the HA default vm settings
|
||||
'''
|
||||
|
||||
title = 'Default VM Settings'
|
||||
|
||||
isolation_response = StringItem(
|
||||
title='Isolation Response',
|
||||
enum=['clusterIsolationResponse', 'none', 'powerOff', 'shutdown'])
|
||||
restart_priority = StringItem(
|
||||
title='Restart Priority',
|
||||
enum=['clusterRestartPriority', 'disabled', 'high', 'low', 'medium'])
|
||||
|
||||
|
||||
class HAConfigItem(ComplexSchemaItem):
|
||||
'''
|
||||
Schema item of ESX cluster high availability
|
||||
'''
|
||||
|
||||
title = 'HA Configuration'
|
||||
description = 'ESX cluster HA configuration json schema item'
|
||||
|
||||
enabled = BooleanItem(
|
||||
title='Enabled',
|
||||
description='Specifies if HA should be enabled')
|
||||
admission_control_enabled = BooleanItem(
|
||||
title='Admission Control Enabled')
|
||||
admission_control_policy = AdmissionControlPolicyItem()
|
||||
default_vm_settings = DefaultVmSettingsItem()
|
||||
hb_ds_candidate_policy = StringItem(
|
||||
title='Heartbeat Datastore Candidate Policy',
|
||||
enum=['allFeasibleDs', 'allFeasibleDsWithUserPreference',
|
||||
'userSelectedDs'])
|
||||
host_monitoring = StringItem(title='Host Monitoring',
|
||||
choices=['enabled', 'disabled'])
|
||||
options = ArrayItem(min_items=1, items=OptionValueItem())
|
||||
vm_monitoring = StringItem(
|
||||
title='Vm Monitoring',
|
||||
choices=['vmMonitoringDisabled', 'vmAndAppMonitoring',
|
||||
'vmMonitoringOnly'])
|
||||
|
||||
|
||||
class vSANClusterConfigItem(ComplexSchemaItem):
|
||||
'''
|
||||
Schema item of the ESX cluster vSAN configuration
|
||||
'''
|
||||
|
||||
title = 'vSAN Configuration'
|
||||
description = 'ESX cluster vSAN configurationi item'
|
||||
|
||||
enabled = BooleanItem(
|
||||
title='Enabled',
|
||||
description='Specifies if vSAN should be enabled')
|
||||
auto_claim_storage = BooleanItem(
|
||||
title='Auto Claim Storage',
|
||||
description='Specifies whether the storage of member ESXi hosts should '
|
||||
'be automatically claimed for vSAN')
|
||||
dedup_enabled = BooleanItem(
|
||||
title='Enabled',
|
||||
description='Specifies dedup should be enabled')
|
||||
compression_enabled = BooleanItem(
|
||||
title='Enabled',
|
||||
description='Specifies if compression should be enabled')
|
||||
|
||||
|
||||
class DRSConfigItem(ComplexSchemaItem):
|
||||
'''
|
||||
Schema item of the ESX cluster DRS configuration
|
||||
'''
|
||||
|
||||
title = 'DRS Configuration'
|
||||
description = 'ESX cluster DRS configuration item'
|
||||
|
||||
enabled = BooleanItem(
|
||||
title='Enabled',
|
||||
description='Specifies if DRS should be enabled')
|
||||
vmotion_rate = IntegerItem(
|
||||
title='vMotion rate',
|
||||
description='Aggressiveness to do automatic vMotions: '
|
||||
'1 (least aggressive) - 5 (most aggressive)',
|
||||
minimum=1,
|
||||
maximum=5)
|
||||
default_vm_behavior = StringItem(
|
||||
title='Default VM DRS Behavior',
|
||||
description='Specifies the default VM DRS behavior',
|
||||
enum=['fullyAutomated', 'partiallyAutomated', 'manual'])
|
||||
|
||||
|
||||
class ESXClusterConfigSchema(DefinitionsSchema):
|
||||
'''
|
||||
Schema of the ESX cluster config
|
||||
'''
|
||||
|
||||
title = 'ESX Cluster Configuration Schema'
|
||||
description = 'ESX cluster configuration schema'
|
||||
|
||||
ha = HAConfigItem()
|
||||
vsan = vSANClusterConfigItem()
|
||||
drs = DRSConfigItem()
|
||||
vm_swap_placement = StringItem(title='VM Swap Placement')
|
||||
|
||||
|
||||
class ESXClusterEntitySchema(Schema):
|
||||
'''Schema of the ESX cluster entity'''
|
||||
|
||||
title = 'ESX Cluster Entity Schema'
|
||||
description = 'ESX cluster entity schema'
|
||||
|
||||
type = StringItem(title='Type',
|
||||
description='Specifies the entity type',
|
||||
required=True,
|
||||
enum=['cluster'])
|
||||
|
||||
datacenter = StringItem(title='Datacenter',
|
||||
description='Specifies the cluster datacenter',
|
||||
required=True,
|
||||
pattern=r'\w+')
|
||||
|
||||
cluster = StringItem(title='Cluster',
|
||||
description='Specifies the cluster name',
|
||||
required=True,
|
||||
pattern=r'\w+')
|
||||
|
||||
|
||||
class LicenseSchema(Schema):
|
||||
'''
|
||||
Schema item of the ESX cluster vSAN configuration
|
||||
'''
|
||||
|
||||
title = 'Licenses schema'
|
||||
description = 'License configuration schema'
|
||||
|
||||
licenses = DictItem(
|
||||
title='Licenses',
|
||||
description='Dictionary containing the license name to key mapping',
|
||||
required=True,
|
||||
additional_properties=StringItem(
|
||||
title='License Key',
|
||||
description='Specifies the license key',
|
||||
pattern=r'^(\w{5}-\w{5}-\w{5}-\w{5}-\w{5})$'))
|
||||
|
||||
|
||||
class EsxclusterProxySchema(Schema):
|
||||
'''
|
||||
Schema of the esxcluster proxy input
|
||||
'''
|
||||
|
||||
title = 'Esxcluster Proxy Schema'
|
||||
description = 'Esxcluster proxy schema'
|
||||
additional_properties = False
|
||||
proxytype = StringItem(required=True,
|
||||
enum=['esxcluster'])
|
||||
vcenter = StringItem(required=True, pattern=r'[^\s]+')
|
||||
datacenter = StringItem(required=True)
|
||||
cluster = StringItem(required=True)
|
||||
mechanism = StringItem(required=True, enum=['userpass', 'sspi'])
|
||||
username = StringItem()
|
||||
passwords = ArrayItem(min_items=1,
|
||||
items=StringItem(),
|
||||
unique_items=True)
|
||||
# TODO Should be changed when anyOf is supported for schemas
|
||||
domain = StringItem()
|
||||
principal = StringItem()
|
||||
protocol = StringItem()
|
||||
port = IntegerItem(minimum=1)
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user