mirror of
https://github.com/valitydev/salt.git
synced 2024-11-06 08:35:21 +00:00
Merge branch 'develop' into issue36942
This commit is contained in:
commit
7091f8ec71
60
.github/CODEOWNERS
vendored
Normal file
60
.github/CODEOWNERS
vendored
Normal file
@ -0,0 +1,60 @@
|
||||
# SALTSTACK CODE OWNERS
|
||||
|
||||
# See https://help.github.com/articles/about-codeowners/
|
||||
# for more info about CODEOWNERS file
|
||||
|
||||
# Lines starting with '#' are comments.
|
||||
# Each line is a file pattern followed by one or more owners.
|
||||
|
||||
# See https://help.github.com/articles/about-codeowners/
|
||||
# for more info about the CODEOWNERS file
|
||||
|
||||
# Team Boto
|
||||
salt/**/*boto* @saltstack/team-boto
|
||||
|
||||
# Team Core
|
||||
salt/auth/ @saltstack/team-core
|
||||
salt/cache/ @saltstack/team-core
|
||||
salt/cli/ @saltstack/team-core
|
||||
salt/client/* @saltstack/team-core
|
||||
salt/config/* @saltstack/team-core
|
||||
salt/daemons/ @saltstack/team-core
|
||||
salt/pillar/ @saltstack/team-core
|
||||
salt/loader.py @saltstack/team-core
|
||||
salt/payload.py @saltstack/team-core
|
||||
salt/**/master* @saltstack/team-core
|
||||
salt/**/minion* @saltstack/team-core
|
||||
|
||||
# Team Cloud
|
||||
salt/cloud/ @saltstack/team-cloud
|
||||
salt/utils/openstack/ @saltstack/team-cloud
|
||||
salt/utils/aws.py @saltstack/team-cloud
|
||||
salt/**/*cloud* @saltstack/team-cloud
|
||||
|
||||
# Team NetAPI
|
||||
salt/cli/api.py @saltstack/team-netapi
|
||||
salt/client/netapi.py @saltstack/team-netapi
|
||||
salt/netapi/ @saltstack/team-netapi
|
||||
|
||||
# Team Network
|
||||
salt/proxy/ @saltstack/team-proxy
|
||||
|
||||
# Team SPM
|
||||
salt/cli/spm.py @saltstack/team-spm
|
||||
salt/spm/ @saltstack/team-spm
|
||||
|
||||
# Team SSH
|
||||
salt/cli/ssh.py @saltstack/team-ssh
|
||||
salt/client/ssh/ @saltstack/team-ssh
|
||||
salt/runners/ssh.py @saltstack/team-ssh
|
||||
salt/**/thin.py @saltstack/team-ssh
|
||||
|
||||
# Team State
|
||||
salt/state.py @saltstack/team-state
|
||||
|
||||
# Team Transport
|
||||
salt/transport/ @saltstack/team-transport
|
||||
salt/utils/zeromq.py @saltstack/team-transport
|
||||
|
||||
# Team Windows
|
||||
salt/**/*win* @saltstack/team-windows
|
4
.github/stale.yml
vendored
4
.github/stale.yml
vendored
@ -1,8 +1,8 @@
|
||||
# Probot Stale configuration file
|
||||
|
||||
# Number of days of inactivity before an issue becomes stale
|
||||
# 1100 is approximately 3 years
|
||||
daysUntilStale: 1100
|
||||
# 1000 is approximately 2 years and 9 months
|
||||
daysUntilStale: 1000
|
||||
|
||||
# Number of days of inactivity before a stale issue is closed
|
||||
daysUntilClose: 7
|
||||
|
@ -4,9 +4,14 @@
|
||||
"name": "ryan-lane",
|
||||
"files": ["salt/**/*boto*.py"],
|
||||
"skipTeamPrs": false
|
||||
},
|
||||
{
|
||||
"name": "tkwilliams",
|
||||
"files": ["salt/**/*boto*.py"],
|
||||
"skipTeamPrs": false
|
||||
}
|
||||
],
|
||||
"skipTitle": "Merge forward",
|
||||
"userBlacklist": ["cvrebert", "markusgattol", "olliewalsh"]
|
||||
"userBlacklist": ["cvrebert", "markusgattol", "olliewalsh", "basepi"]
|
||||
}
|
||||
|
||||
|
12
README.rst
12
README.rst
@ -67,10 +67,11 @@ Engage SaltStack
|
||||
|
||||
`SaltConf`_, **User Groups and Meetups** - SaltStack has a vibrant and `global
|
||||
community`_ of customers, users, developers and enthusiasts. Connect with other
|
||||
Salted folks in your area of the world, or join `SaltConf16`_, the SaltStack
|
||||
annual user conference, April 19-21 in Salt Lake City. Please let us know if
|
||||
you would like to start a user group or if we should add your existing
|
||||
SaltStack user group to this list by emailing: info@saltstack.com
|
||||
Salted folks in your area of the world, or join `SaltConf`_, the SaltStack
|
||||
annual user conference held in Salt Lake City. Please visit the `SaltConf`_ site
|
||||
for details of our next conference. Also, please let us know if you would like
|
||||
to start a user group or if we should add your existing SaltStack user group to
|
||||
this list by emailing: info@saltstack.com
|
||||
|
||||
**SaltStack Training** - Get access to proprietary `SaltStack education
|
||||
offerings`_ through instructor-led training offered on-site, virtually or at
|
||||
@ -89,9 +90,8 @@ services`_ offerings.
|
||||
* LinkedIn Group - `<https://www.linkedin.com/groups/4877160>`_
|
||||
* Google+ - `<https://plus.google.com/b/112856352920437801867/+SaltStackInc/posts>`_
|
||||
|
||||
.. _SaltConf: http://www.youtube.com/user/saltstack
|
||||
.. _global community: http://www.meetup.com/pro/saltstack/
|
||||
.. _SaltConf16: http://saltconf.com/
|
||||
.. _SaltConf: http://saltconf.com/
|
||||
.. _SaltStack education offerings: http://saltstack.com/training/
|
||||
.. _SaltStack Certified Engineer (SSCE): http://saltstack.com/certification/
|
||||
.. _SaltStack professional services: http://saltstack.com/services/
|
||||
|
@ -3,7 +3,7 @@
|
||||
# directory is identical.
|
||||
|
||||
#my-digitalocean-config:
|
||||
# driver: digital_ocean
|
||||
# driver: digitalocean
|
||||
# client_key: wFGEwgregeqw3435gDger
|
||||
# api_key: GDE43t43REGTrkilg43934t34qT43t4dgegerGEgg
|
||||
# location: New York 1
|
||||
|
@ -1,5 +1,5 @@
|
||||
#my-digitalocean-config:
|
||||
# driver: digital_ocean
|
||||
# driver: digitalocean
|
||||
# client_key: wFGEwgregeqw3435gDger
|
||||
# api_key: GDE43t43REGTrkilg43934t34qT43t4dgegerGEgg
|
||||
# location: New York 1
|
||||
|
12
conf/master
12
conf/master
@ -59,15 +59,14 @@
|
||||
|
||||
# Directory for custom modules. This directory can contain subdirectories for
|
||||
# each of Salt's module types such as "runners", "output", "wheel", "modules",
|
||||
# "states", "returners", etc.
|
||||
#extension_modules: <no default>
|
||||
# "states", "returners", "engines", "utils", etc.
|
||||
#extension_modules: /var/cache/salt/master/extmods
|
||||
|
||||
# Directory for custom modules. This directory can contain subdirectories for
|
||||
# each of Salt's module types such as "runners", "output", "wheel", "modules",
|
||||
# "states", "returners", "engines", etc.
|
||||
# "states", "returners", "engines", "utils", etc.
|
||||
# Like 'extension_modules' but can take an array of paths
|
||||
#module_dirs: <no default>
|
||||
# - /var/cache/salt/minion/extmods
|
||||
#module_dirs: []
|
||||
|
||||
# Verify and set permissions on configuration directories at startup:
|
||||
#verify_env: True
|
||||
@ -533,6 +532,9 @@
|
||||
# Add any additional locations to look for master runners:
|
||||
#runner_dirs: []
|
||||
|
||||
# Add any additional locations to look for master utils:
|
||||
#utils_dirs: []
|
||||
|
||||
# Enable Cython for master side modules:
|
||||
#cython_enable: False
|
||||
|
||||
|
@ -373,7 +373,7 @@
|
||||
# interface: eth0
|
||||
# cidr: '10.0.0.0/8'
|
||||
|
||||
# The number of seconds a mine update runs.
|
||||
# The number of minutes between mine updates.
|
||||
#mine_interval: 60
|
||||
|
||||
# Windows platforms lack posix IPC and must rely on slower TCP based inter-
|
||||
|
@ -10795,6 +10795,7 @@ cmd_whitelist_glob:
|
||||
.UNINDENT
|
||||
.UNINDENT
|
||||
.SS Thread Settings
|
||||
.SS \fBmultiprocessing\fP
|
||||
.sp
|
||||
Default: \fBTrue\fP
|
||||
.sp
|
||||
|
@ -22,6 +22,7 @@ beacon modules
|
||||
load
|
||||
log
|
||||
memusage
|
||||
napalm_beacon
|
||||
network_info
|
||||
network_settings
|
||||
pkg
|
||||
|
6
doc/ref/beacons/all/salt.beacons.napalm_beacon.rst
Normal file
6
doc/ref/beacons/all/salt.beacons.napalm_beacon.rst
Normal file
@ -0,0 +1,6 @@
|
||||
==========================
|
||||
salt.beacons.napalm_beacon
|
||||
==========================
|
||||
|
||||
.. automodule:: salt.beacons.napalm_beacon
|
||||
:members:
|
@ -136,7 +136,7 @@ Query Options
|
||||
.. versionadded:: 2014.7.0
|
||||
|
||||
Display a list of configured profiles. Pass in a cloud provider to view
|
||||
the provider's associated profiles, such as ``digital_ocean``, or pass in
|
||||
the provider's associated profiles, such as ``digitalocean``, or pass in
|
||||
``all`` to list all the configured profiles.
|
||||
|
||||
|
||||
|
@ -13,7 +13,7 @@ Full list of Salt Cloud modules
|
||||
aliyun
|
||||
azurearm
|
||||
cloudstack
|
||||
digital_ocean
|
||||
digitalocean
|
||||
dimensiondata
|
||||
ec2
|
||||
gce
|
||||
|
@ -1,6 +1,6 @@
|
||||
===============================
|
||||
salt.cloud.clouds.digital_ocean
|
||||
salt.cloud.clouds.digitalocean
|
||||
===============================
|
||||
|
||||
.. automodule:: salt.cloud.clouds.digital_ocean
|
||||
.. automodule:: salt.cloud.clouds.digitalocean
|
||||
:members:
|
@ -183,8 +183,8 @@ The directory to store the pki authentication keys.
|
||||
|
||||
Directory for custom modules. This directory can contain subdirectories for
|
||||
each of Salt's module types such as ``runners``, ``output``, ``wheel``,
|
||||
``modules``, ``states``, ``returners``, ``engines``, etc. This path is appended to
|
||||
:conf_master:`root_dir`.
|
||||
``modules``, ``states``, ``returners``, ``engines``, ``utils``, etc.
|
||||
This path is appended to :conf_master:`root_dir`.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
@ -1731,6 +1731,22 @@ Set additional directories to search for runner modules.
|
||||
runner_dirs:
|
||||
- /var/lib/salt/runners
|
||||
|
||||
.. conf_master:: utils_dirs
|
||||
|
||||
``utils_dirs``
|
||||
---------------
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
Default: ``[]``
|
||||
|
||||
Set additional directories to search for util modules.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
utils_dirs:
|
||||
- /var/lib/salt/utils
|
||||
|
||||
.. conf_master:: cython_enable
|
||||
|
||||
``cython_enable``
|
||||
@ -3807,7 +3823,7 @@ they were created by a different master.
|
||||
Default: ``True``
|
||||
|
||||
Normally, when processing :ref:`git_pillar remotes
|
||||
<git-pillar-2015-8-0-and-later>`, if more than one repo under the same ``git``
|
||||
<git-pillar-configuration>`, if more than one repo under the same ``git``
|
||||
section in the ``ext_pillar`` configuration refers to the same pillar
|
||||
environment, then each repo in a given environment will have access to the
|
||||
other repos' files to be referenced in their top files. However, it may be
|
||||
|
@ -706,7 +706,7 @@ Note these can be defined in the pillar for a minion as well.
|
||||
|
||||
Default: ``60``
|
||||
|
||||
The number of seconds a mine update runs.
|
||||
The number of minutes between mine updates.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
@ -2113,6 +2113,41 @@ It will be interpreted as megabytes.
|
||||
|
||||
file_recv_max_size: 100
|
||||
|
||||
.. conf_minion:: pass_to_ext_pillars
|
||||
|
||||
``pass_to_ext_pillars``
|
||||
-----------------------
|
||||
|
||||
Specify a list of configuration keys whose values are to be passed to
|
||||
external pillar functions.
|
||||
|
||||
Suboptions can be specified using the ':' notation (i.e. ``option:suboption``)
|
||||
|
||||
The values are merged and included in the ``extra_minion_data`` optional
|
||||
parameter of the external pillar function. The ``extra_minion_data`` parameter
|
||||
is passed only to the external pillar functions that have it explicitly
|
||||
specified in their definition.
|
||||
|
||||
If the config contains
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
opt1: value1
|
||||
opt2:
|
||||
subopt1: value2
|
||||
subopt2: value3
|
||||
|
||||
pass_to_ext_pillars:
|
||||
- opt1
|
||||
- opt2: subopt1
|
||||
|
||||
the ``extra_minion_data`` parameter will be
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
{'opt1': 'value1',
|
||||
'opt2': {'subopt1': 'value2'}}
|
||||
|
||||
Security Settings
|
||||
=================
|
||||
|
||||
@ -2369,11 +2404,14 @@ Thread Settings
|
||||
|
||||
.. conf_minion:: multiprocessing
|
||||
|
||||
``multiprocessing``
|
||||
-------
|
||||
|
||||
Default: ``True``
|
||||
|
||||
If `multiprocessing` is enabled when a minion receives a
|
||||
If ``multiprocessing`` is enabled when a minion receives a
|
||||
publication a new process is spawned and the command is executed therein.
|
||||
Conversely, if `multiprocessing` is disabled the new publication will be run
|
||||
Conversely, if ``multiprocessing`` is disabled the new publication will be run
|
||||
executed in a thread.
|
||||
|
||||
|
||||
|
@ -118,3 +118,53 @@ has to be closed after every command.
|
||||
.. code-block:: yaml
|
||||
|
||||
proxy_always_alive: False
|
||||
|
||||
``proxy_merge_pillar_in_opts``
|
||||
------------------------------
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
|
||||
Default: ``False``.
|
||||
|
||||
Wheter the pillar data to be merged into the proxy configuration options.
|
||||
As multiple proxies can run on the same server, we may need different
|
||||
configuration options for each, while there's one single configuration file.
|
||||
The solution is merging the pillar data of each proxy minion into the opts.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
proxy_merge_pillar_in_opts: True
|
||||
|
||||
``proxy_deep_merge_pillar_in_opts``
|
||||
-----------------------------------
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
|
||||
Default: ``False``.
|
||||
|
||||
Deep merge of pillar data into configuration opts.
|
||||
This option is evaluated only when :conf_proxy:`proxy_merge_pillar_in_opts` is
|
||||
enabled.
|
||||
|
||||
``proxy_merge_pillar_in_opts_strategy``
|
||||
---------------------------------------
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
|
||||
Default: ``smart``.
|
||||
|
||||
The strategy used when merging pillar configuration into opts.
|
||||
This option is evaluated only when :conf_proxy:`proxy_merge_pillar_in_opts` is
|
||||
enabled.
|
||||
|
||||
``proxy_mines_pillar``
|
||||
----------------------
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
|
||||
Default: ``True``.
|
||||
|
||||
Allow enabling mine details using pillar data. This evaluates the mine
|
||||
configuration under the pillar, for the following regular minion options that
|
||||
are also equally available on the proxy minion: :conf_minion:`mine_interval`,
|
||||
and :conf_minion:`mine_functions`.
|
||||
|
@ -44,6 +44,7 @@ execution modules
|
||||
boto_apigateway
|
||||
boto_asg
|
||||
boto_cfn
|
||||
boto_cloudfront
|
||||
boto_cloudtrail
|
||||
boto_cloudwatch
|
||||
boto_cloudwatch_event
|
||||
@ -326,6 +327,7 @@ execution modules
|
||||
ps
|
||||
publish
|
||||
puppet
|
||||
purefa
|
||||
pushbullet
|
||||
pushover_notify
|
||||
pw_group
|
||||
@ -417,6 +419,7 @@ execution modules
|
||||
test
|
||||
testinframod
|
||||
test_virtual
|
||||
textfsm_mod
|
||||
timezone
|
||||
tls
|
||||
tomcat
|
||||
|
6
doc/ref/modules/all/salt.modules.boto_cloudfront.rst
Normal file
6
doc/ref/modules/all/salt.modules.boto_cloudfront.rst
Normal file
@ -0,0 +1,6 @@
|
||||
============================
|
||||
salt.modules.boto_cloudfront
|
||||
============================
|
||||
|
||||
.. automodule:: salt.modules.boto_cloudfront
|
||||
:members:
|
@ -13,7 +13,7 @@ salt.modules.kernelpkg
|
||||
Execution Module Used for
|
||||
============================================ ========================================
|
||||
:py:mod:`~salt.modules.kernelpkg_linux_apt` Debian/Ubuntu-based distros which use
|
||||
``apt-get(8)`` for package management
|
||||
``apt-get`` for package management
|
||||
:py:mod:`~salt.modules.kernelpkg_linux_yum` RedHat-based distros and derivatives
|
||||
using ``yum(8)`` or ``dnf(8)``
|
||||
using ``yum`` or ``dnf``
|
||||
============================================ ========================================
|
||||
|
6
doc/ref/modules/all/salt.modules.purefa.rst
Normal file
6
doc/ref/modules/all/salt.modules.purefa.rst
Normal file
@ -0,0 +1,6 @@
|
||||
===================
|
||||
salt.modules.purefa
|
||||
===================
|
||||
|
||||
.. automodule:: salt.modules.purefa
|
||||
:members:
|
5
doc/ref/modules/all/salt.modules.textfsm_mod.rst
Normal file
5
doc/ref/modules/all/salt.modules.textfsm_mod.rst
Normal file
@ -0,0 +1,5 @@
|
||||
salt.modules.textfsm_mod module
|
||||
===============================
|
||||
|
||||
.. automodule:: salt.modules.textfsm_mod
|
||||
:members:
|
@ -451,7 +451,7 @@ For example:
|
||||
'''
|
||||
Only load if git exists on the system
|
||||
'''
|
||||
if salt.utils.which('git') is None:
|
||||
if salt.utils.path.which('git') is None:
|
||||
return (False,
|
||||
'The git execution module cannot be loaded: git unavailable.')
|
||||
else:
|
||||
|
@ -122,7 +122,7 @@ This example, simplified from the pkg state, shows how to create mod_aggregate f
|
||||
for chunk in chunks:
|
||||
# The state runtime uses "tags" to track completed jobs, it may
|
||||
# look familiar with the _|-
|
||||
tag = salt.utils.gen_state_tag(chunk)
|
||||
tag = __utils__['state.gen_tag'](chunk)
|
||||
if tag in running:
|
||||
# Already ran the pkg state, skip aggregation
|
||||
continue
|
||||
|
@ -31,6 +31,7 @@ state modules
|
||||
boto_apigateway
|
||||
boto_asg
|
||||
boto_cfn
|
||||
boto_cloudfront
|
||||
boto_cloudtrail
|
||||
boto_cloudwatch_alarm
|
||||
boto_cloudwatch_event
|
||||
@ -179,6 +180,7 @@ state modules
|
||||
netusers
|
||||
network
|
||||
netyang
|
||||
nfs_export
|
||||
nftables
|
||||
npm
|
||||
ntp
|
||||
|
6
doc/ref/states/all/salt.states.boto_cloudfront.rst
Normal file
6
doc/ref/states/all/salt.states.boto_cloudfront.rst
Normal file
@ -0,0 +1,6 @@
|
||||
===========================
|
||||
salt.states.boto_cloudfront
|
||||
===========================
|
||||
|
||||
.. automodule:: salt.states.boto_cloudfront
|
||||
:members:
|
6
doc/ref/states/all/salt.states.nfs_export.rst
Normal file
6
doc/ref/states/all/salt.states.nfs_export.rst
Normal file
@ -0,0 +1,6 @@
|
||||
======================
|
||||
salt.states.nfs_export
|
||||
======================
|
||||
|
||||
.. automodule:: salt.states.nfs_export
|
||||
:members:
|
@ -153,7 +153,12 @@ A State Module must return a dict containing the following keys/values:
|
||||
However, if a state is going to fail and this can be determined
|
||||
in test mode without applying the change, ``False`` can be returned.
|
||||
|
||||
- **comment:** A string containing a summary of the result.
|
||||
- **comment:** A list of strings or a single string summarizing the result.
|
||||
Note that support for lists of strings is available as of Salt Oxygen.
|
||||
Lists of strings will be joined with newlines to form the final comment;
|
||||
this is useful to allow multiple comments from subparts of a state.
|
||||
Prefer to keep line lengths short (use multiple lines as needed),
|
||||
and end with punctuation (e.g. a period) to delimit multiple comments.
|
||||
|
||||
The return data can also, include the **pchanges** key, this stands for
|
||||
`predictive changes`. The **pchanges** key informs the State system what
|
||||
|
@ -15,9 +15,7 @@ More information about Azure is located at `http://www.windowsazure.com/
|
||||
|
||||
Dependencies
|
||||
============
|
||||
* `Microsoft Azure SDK for Python <https://pypi.python.org/pypi/azure>`_ >= 2.0rc6
|
||||
* `Microsoft Azure Storage SDK for Python <https://pypi.python.org/pypi/azure-storage>`_ >= 0.32
|
||||
* The python-requests library, for Python < 2.7.9.
|
||||
* Azure Cli ```pip install 'azure-cli>=2.0.12'```
|
||||
* A Microsoft Azure account
|
||||
* `Salt <https://github.com/saltstack/salt>`_
|
||||
|
||||
|
@ -183,7 +183,7 @@ imports should be absent from the Salt Cloud module.
|
||||
|
||||
A good example of a non-libcloud driver is the DigitalOcean driver:
|
||||
|
||||
https://github.com/saltstack/salt/tree/develop/salt/cloud/clouds/digital_ocean.py
|
||||
https://github.com/saltstack/salt/tree/develop/salt/cloud/clouds/digitalocean.py
|
||||
|
||||
The ``create()`` Function
|
||||
-------------------------
|
||||
|
@ -444,7 +444,7 @@ under the API Access tab.
|
||||
.. code-block:: yaml
|
||||
|
||||
my-digitalocean-config:
|
||||
driver: digital_ocean
|
||||
driver: digitalocean
|
||||
personal_access_token: xxx
|
||||
location: New York 1
|
||||
|
||||
|
@ -19,7 +19,7 @@ under the "SSH Keys" section.
|
||||
# /etc/salt/cloud.providers.d/ directory.
|
||||
|
||||
my-digitalocean-config:
|
||||
driver: digital_ocean
|
||||
driver: digitalocean
|
||||
personal_access_token: xxx
|
||||
ssh_key_file: /path/to/ssh/key/file
|
||||
ssh_key_names: my-key-name,my-key-name-2
|
||||
@ -63,7 +63,7 @@ command:
|
||||
# salt-cloud --list-locations my-digitalocean-config
|
||||
my-digitalocean-config:
|
||||
----------
|
||||
digital_ocean:
|
||||
digitalocean:
|
||||
----------
|
||||
Amsterdam 1:
|
||||
----------
|
||||
@ -87,7 +87,7 @@ command:
|
||||
# salt-cloud --list-sizes my-digitalocean-config
|
||||
my-digitalocean-config:
|
||||
----------
|
||||
digital_ocean:
|
||||
digitalocean:
|
||||
----------
|
||||
512MB:
|
||||
----------
|
||||
@ -117,7 +117,7 @@ command:
|
||||
# salt-cloud --list-images my-digitalocean-config
|
||||
my-digitalocean-config:
|
||||
----------
|
||||
digital_ocean:
|
||||
digitalocean:
|
||||
----------
|
||||
10.1:
|
||||
----------
|
||||
@ -142,7 +142,7 @@ Profile Specifics:
|
||||
ssh_username
|
||||
------------
|
||||
|
||||
If using a FreeBSD image from Digital Ocean, you'll need to set the ``ssh_username``
|
||||
If using a FreeBSD image from DigitalOcean, you'll need to set the ``ssh_username``
|
||||
setting to ``freebsd`` in your profile configuration.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
@ -260,6 +260,21 @@ The Salt development team will back-port bug fixes made to ``develop`` to the
|
||||
current release branch if the contributor cannot create the pull request
|
||||
against that branch.
|
||||
|
||||
Release Branches
|
||||
----------------
|
||||
|
||||
For each release, a branch will be created when the SaltStack release team is
|
||||
ready to tag. The release branch is created from the parent branch and will be
|
||||
the same name as the tag minus the ``v``. For example, the ``2017.7.1`` release
|
||||
branch was created from the ``2017.7`` parent branch and the ``v2017.7.1``
|
||||
release was tagged at the ``HEAD`` of the ``2017.7.1`` branch. This branching
|
||||
strategy will allow for more stability when there is a need for a re-tag during
|
||||
the testing phase of the release process.
|
||||
|
||||
Once the release branch is created, the fixes required for a given release, as
|
||||
determined by the SaltStack release team, will be added to this branch. All
|
||||
commits in this branch will be merged forward into the parent branch as well.
|
||||
|
||||
Keeping Salt Forks in Sync
|
||||
==========================
|
||||
|
||||
|
@ -219,7 +219,7 @@ the default cloud provider configuration file for DigitalOcean looks like this:
|
||||
.. code-block:: yaml
|
||||
|
||||
digitalocean-config:
|
||||
driver: digital_ocean
|
||||
driver: digitalocean
|
||||
client_key: ''
|
||||
api_key: ''
|
||||
location: New York 1
|
||||
@ -230,7 +230,7 @@ must be provided:
|
||||
.. code-block:: yaml
|
||||
|
||||
digitalocean-config:
|
||||
driver: digital_ocean
|
||||
driver: digitalocean
|
||||
client_key: wFGEwgregeqw3435gDger
|
||||
api_key: GDE43t43REGTrkilg43934t34qT43t4dgegerGEgg
|
||||
location: New York 1
|
||||
|
@ -541,7 +541,7 @@ provider configuration file in the integration test file directory located at
|
||||
``tests/integration/files/conf/cloud.*.d/``.
|
||||
|
||||
The following is an example of the default profile configuration file for Digital
|
||||
Ocean, located at: ``tests/integration/files/conf/cloud.profiles.d/digital_ocean.conf``:
|
||||
Ocean, located at: ``tests/integration/files/conf/cloud.profiles.d/digitalocean.conf``:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
@ -557,12 +557,12 @@ be provided by the user by editing the provider configuration file before runnin
|
||||
tests.
|
||||
|
||||
The following is an example of the default provider configuration file for Digital
|
||||
Ocean, located at: ``tests/integration/files/conf/cloud.providers.d/digital_ocean.conf``:
|
||||
Ocean, located at: ``tests/integration/files/conf/cloud.providers.d/digitalocean.conf``:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
digitalocean-config:
|
||||
driver: digital_ocean
|
||||
driver: digitalocean
|
||||
client_key: ''
|
||||
api_key: ''
|
||||
location: New York 1
|
||||
|
@ -18,7 +18,7 @@ Installation from official Debian and Raspbian repositories is described
|
||||
Installation from the Official SaltStack Repository
|
||||
===================================================
|
||||
|
||||
Packages for Debian 8 (Jessie) and Debian 7 (Wheezy) are available in the
|
||||
Packages for Debian 9 (Stretch) and Debian 8 (Jessie) are available in the
|
||||
Official SaltStack repository.
|
||||
|
||||
Instructions are at https://repo.saltstack.com/#debian.
|
||||
|
@ -106,7 +106,7 @@ bringing with it the ability to access authenticated repositories.
|
||||
|
||||
Using the new features will require updates to the git ext_pillar
|
||||
configuration, further details can be found in the :ref:`pillar.git_pillar
|
||||
<git-pillar-2015-8-0-and-later>` docs.
|
||||
<git-pillar-configuration>` docs.
|
||||
|
||||
.. _pygit2: https://github.com/libgit2/pygit2
|
||||
|
||||
|
@ -51,6 +51,19 @@ New NaCl Renderer
|
||||
|
||||
A new renderer has been added for encrypted data.
|
||||
|
||||
New support for Cisco UCS Chassis
|
||||
---------------------------------
|
||||
|
||||
The salt proxy minion now allows for control of Cisco USC chassis. See
|
||||
the `cimc` modules for details.
|
||||
|
||||
New salt-ssh roster
|
||||
-------------------
|
||||
|
||||
A new roster has been added that allows users to pull in a list of hosts
|
||||
for salt-ssh targeting from a ~/.ssh configuration. For full details,
|
||||
please see the `sshconfig` roster.
|
||||
|
||||
New GitFS Features
|
||||
------------------
|
||||
|
||||
@ -103,6 +116,14 @@ Newer PyWinRM Versions
|
||||
Versions of ``pywinrm>=0.2.1`` are finally able to disable validation of self
|
||||
signed certificates. :ref:`Here<new-pywinrm>` for more information.
|
||||
|
||||
DigitalOcean
|
||||
------------
|
||||
|
||||
The DigitalOcean driver has been renamed to conform to the companies name. The
|
||||
new driver name is ``digitalocean``. The old name ``digital_ocean`` and a
|
||||
short one ``do`` will still be supported through virtual aliases, this is mostly
|
||||
cosmetic.
|
||||
|
||||
Solaris Logical Domains In Virtual Grain
|
||||
----------------------------------------
|
||||
|
||||
@ -110,9 +131,15 @@ Support has been added to the ``virtual`` grain for detecting Solaris LDOMs
|
||||
running on T-Series SPARC hardware. The ``virtual_subtype`` grain is
|
||||
populated as a list of domain roles.
|
||||
|
||||
Lists of comments in state returns
|
||||
----------------------------------
|
||||
|
||||
State functions can now return a list of strings for the ``comment`` field,
|
||||
as opposed to only a single string.
|
||||
This is meant to ease writing states with multiple or multi-part comments.
|
||||
|
||||
Beacon configuration changes
|
||||
----------------------------------------
|
||||
----------------------------
|
||||
|
||||
In order to remain consistent and to align with other Salt components such as states,
|
||||
support for configuring beacons using dictionary based configuration has been deprecated
|
||||
@ -617,6 +644,11 @@ Profitbricks Cloud Updated Dependency
|
||||
The minimum version of the ``profitbrick`` python package for the ``profitbricks``
|
||||
cloud driver has changed from 3.0.0 to 3.1.0.
|
||||
|
||||
Azure Cloud Updated Dependency
|
||||
------------------------------
|
||||
|
||||
The azure sdk used for the ``azurearm`` cloud driver now depends on ``azure-cli>=2.0.12``
|
||||
|
||||
Module Deprecations
|
||||
===================
|
||||
|
||||
@ -708,6 +740,13 @@ during blackout. This release adds support for using this feature in the grains
|
||||
as well, by using special grains keys ``minion_blackout`` and
|
||||
``minion_blackout_whitelist``.
|
||||
|
||||
Pillar Deprecations
|
||||
-------------------
|
||||
|
||||
The legacy configuration for ``git_pillar`` has been removed. Please use the new
|
||||
configuration for ``git_pillar``, which is documented in the external pillar module
|
||||
for :mod:`git_pillar <salt.pillar.git_pillar>`.
|
||||
|
||||
Utils Deprecations
|
||||
==================
|
||||
|
||||
@ -722,3 +761,7 @@ Other Miscellaneous Deprecations
|
||||
The ``version.py`` file had the following changes:
|
||||
|
||||
- The ``rc_info`` function was removed. Please use ``pre_info`` instead.
|
||||
|
||||
Warnings for moving away from the ``env`` option were removed. ``saltenv`` should be
|
||||
used instead. The removal of these warnings does not have a behavior change. Only
|
||||
the warning text was removed.
|
||||
|
@ -1110,15 +1110,8 @@ Using Git as an External Pillar Source
|
||||
The git external pillar (a.k.a. git_pillar) has been rewritten for the 2015.8.0
|
||||
release. This rewrite brings with it pygit2_ support (allowing for access to
|
||||
authenticated repositories), as well as more granular support for per-remote
|
||||
configuration.
|
||||
|
||||
To make use of the new features, changes to the git ext_pillar configuration
|
||||
must be made. The new configuration schema is detailed :ref:`here
|
||||
<git-pillar-2015-8-0-and-later>`.
|
||||
|
||||
For Salt releases before 2015.8.0, click :ref:`here <git-pillar-pre-2015-8-0>`
|
||||
for documentation.
|
||||
|
||||
configuration. This configuration schema is detailed :ref:`here
|
||||
<git-pillar-configuration>`.
|
||||
|
||||
.. _faq-gitfs-bug:
|
||||
|
||||
|
@ -13,7 +13,7 @@ Using Apache Libcloud for declarative and procedural multi-cloud orchestration
|
||||
|
||||
Apache Libcloud is a Python library which hides differences between different cloud provider APIs and allows
|
||||
you to manage different cloud resources through a unified and easy to use API. Apache Libcloud supports over
|
||||
60 cloud platforms, including Amazon, Microsoft Azure, Digital Ocean, Google Cloud Platform and OpenStack.
|
||||
60 cloud platforms, including Amazon, Microsoft Azure, DigitalOcean, Google Cloud Platform and OpenStack.
|
||||
|
||||
Execution and state modules are available for Compute, DNS, Storage and Load Balancer drivers from Apache Libcloud in
|
||||
SaltStack.
|
||||
|
@ -81,14 +81,18 @@ the ``foo`` utility module with a ``__virtual__`` function.
|
||||
def bar():
|
||||
return 'baz'
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
Instantiating objects from classes declared in util modules works with
|
||||
Master side modules, such as Runners, Outputters, etc.
|
||||
|
||||
Also you could even write your utility modules in object oriented fashion:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
My utils module
|
||||
---------------
|
||||
My OOP-style utils module
|
||||
-------------------------
|
||||
|
||||
This module contains common functions for use in my other custom types.
|
||||
'''
|
||||
|
@ -481,11 +481,17 @@ Alternatively the ``uninstaller`` can also simply repeat the URL of the msi file
|
||||
:param bool allusers: This parameter is specific to `.msi` installations. It
|
||||
tells `msiexec` to install the software for all users. The default is True.
|
||||
|
||||
:param bool cache_dir: If true, the entire directory where the installer resides
|
||||
will be recursively cached. This is useful for installers that depend on
|
||||
other files in the same directory for installation.
|
||||
:param bool cache_dir: If true when installer URL begins with salt://, the
|
||||
entire directory where the installer resides will be recursively cached.
|
||||
This is useful for installers that depend on other files in the same
|
||||
directory for installation.
|
||||
|
||||
.. note:: Only applies to salt: installer URLs.
|
||||
:param str cache_file:
|
||||
When installer URL begins with salt://, this indicates single file to copy
|
||||
down for use with the installer. Copied to the same location as the
|
||||
installer. Use this over ``cache_dir`` if there are many files in the
|
||||
directory and you only need a specific file and don't want to cache
|
||||
additional files that may reside in the installer directory.
|
||||
|
||||
Here's an example for a software package that has dependent files:
|
||||
|
||||
|
@ -15,91 +15,119 @@
|
||||
# This script is run as a part of the macOS Salt Installation
|
||||
#
|
||||
###############################################################################
|
||||
echo "Post install started on:" > /tmp/postinstall.txt
|
||||
date >> /tmp/postinstall.txt
|
||||
|
||||
###############################################################################
|
||||
# Define Variables
|
||||
###############################################################################
|
||||
# Get Minor Version
|
||||
OSX_VERSION=$(sw_vers | grep ProductVersion | cut -f 2 -d: | tr -d '[:space:]')
|
||||
MINOR=$(echo ${OSX_VERSION} | cut -f 2 -d.)
|
||||
# Path Variables
|
||||
INSTALL_DIR="/opt/salt"
|
||||
BIN_DIR="$INSTALL_DIR/bin"
|
||||
CONFIG_DIR="/etc/salt"
|
||||
TEMP_DIR="/tmp"
|
||||
SBIN_DIR="/usr/local/sbin"
|
||||
|
||||
###############################################################################
|
||||
# Set up logging and error handling
|
||||
###############################################################################
|
||||
echo "Post install script started on:" > "$TEMP_DIR/postinstall.txt"
|
||||
date "+%Y/%m/%d %H:%m:%S" >> "$TEMP_DIR/postinstall.txt"
|
||||
trap 'quit_on_error $LINENO $BASH_COMMAND' ERR
|
||||
|
||||
quit_on_error() {
|
||||
echo "$(basename $0) caught error on line : $1 command was: $2" >> /tmp/postinstall.txt
|
||||
echo "$(basename $0) caught error on line : $1 command was: $2" >> "$TEMP_DIR/postinstall.txt"
|
||||
exit -1
|
||||
}
|
||||
|
||||
###############################################################################
|
||||
# Check for existing minion config, copy if it doesn't exist
|
||||
###############################################################################
|
||||
if [ ! -f /etc/salt/minion ]; then
|
||||
echo "Config copy: Started..." >> /tmp/postinstall.txt
|
||||
cp /etc/salt/minion.dist /etc/salt/minion
|
||||
echo "Config copy: Successful" >> /tmp/postinstall.txt
|
||||
if [ ! -f "$CONFIG_DIR/minion" ]; then
|
||||
echo "Config: Copy Started..." >> "$TEMP_DIR/postinstall.txt"
|
||||
cp "$CONFIG_DIR/minion.dist" "$CONFIG_DIR/minion"
|
||||
echo "Config: Copied Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
fi
|
||||
|
||||
###############################################################################
|
||||
# Create symlink to salt-config.sh
|
||||
###############################################################################
|
||||
# echo "Symlink: Creating symlink for salt-config..." >> /tmp/postinstall.txt
|
||||
if [ ! -d "/usr/local/sbin" ]; then
|
||||
mkdir /usr/local/sbin
|
||||
if [ ! -d "$SBIN_DIR" ]; then
|
||||
echo "Symlink: Creating $SBIN_DIR..." >> "$TEMP_DIR/postinstall.txt"
|
||||
mkdir "$SBIN_DIR"
|
||||
echo "Symlink: Created Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
fi
|
||||
ln -sf /opt/salt/bin/salt-config.sh /usr/local/sbin/salt-config
|
||||
echo "Symlink: Creating symlink for salt-config..." >> "$TEMP_DIR/postinstall.txt"
|
||||
ln -sf "$BIN_DIR/salt-config.sh" "$SBIN_DIR/salt-config"
|
||||
echo "Symlink: Created Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
###############################################################################
|
||||
# Add salt to paths.d
|
||||
###############################################################################
|
||||
# echo "Path: Adding salt to the path..." >> /tmp/postinstall.txt
|
||||
if [ ! -d "/etc/paths.d" ]; then
|
||||
echo "Path: Creating paths.d directory..." >> "$TEMP_DIR/postinstall.txt"
|
||||
mkdir /etc/paths.d
|
||||
echo "Path: Created Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
fi
|
||||
sh -c 'echo "/opt/salt/bin" > /etc/paths.d/salt'
|
||||
sh -c 'echo "/usr/local/sbin" >> /etc/paths.d/salt'
|
||||
echo "Path: Adding salt to the path..." >> "$TEMP_DIR/postinstall.txt"
|
||||
sh -c "echo \"$BIN_DIR\" > /etc/paths.d/salt"
|
||||
sh -c "echo \"$SBIN_DIR\" >> /etc/paths.d/salt"
|
||||
echo "Path: Added Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
###############################################################################
|
||||
# Register Salt as a service
|
||||
###############################################################################
|
||||
setup_services_maverick() {
|
||||
echo "Using old (< 10.10) launchctl interface" >> /tmp/postinstall.txt
|
||||
echo "Service: Using old (< 10.10) launchctl interface" >> "$TEMP_DIR/postinstall.txt"
|
||||
if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then
|
||||
echo "Stop running service..." >> /tmp/postinstall.txt
|
||||
echo "Service: Stopping salt-minion..." >> "$TEMP_DIR/postinstall.txt"
|
||||
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist
|
||||
echo "Service: Stopped Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
fi;
|
||||
echo "Service: Starting salt-minion..." >> "$TEMP_DIR/postinstall.txt"
|
||||
launchctl load -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist || return 1
|
||||
echo "Service: Started Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
echo "Service start: Successful" >> /tmp/postinstall.txt
|
||||
|
||||
echo "Service disable: Disabling Master, Syndic, and API" >> /tmp/postinstall.txt
|
||||
|
||||
echo "Service: Disabling Master, Syndic, and API services..." >> "$TEMP_DIR/postinstall.txt"
|
||||
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.api.plist
|
||||
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.master.plist
|
||||
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.syndic.plist
|
||||
echo "Service: Disabled Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
setup_services_yosemite_and_later() {
|
||||
echo "Using new (>= 10.10) launchctl interface" >> /tmp/postinstall.txt
|
||||
echo "Service: Using new (>= 10.10) launchctl interface" >> "$TEMP_DIR/postinstall.txt"
|
||||
echo "Service: Enabling salt-minion..." >> "$TEMP_DIR/postinstall.txt"
|
||||
launchctl enable system/com.saltstack.salt.minion
|
||||
echo "Service start: Bootstrapping service..." >> /tmp/postinstall.txt
|
||||
echo "Service: Enabled Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
echo "Service: Bootstrapping salt-minion..." >> "$TEMP_DIR/postinstall.txt"
|
||||
launchctl bootstrap system /Library/LaunchDaemons/com.saltstack.salt.minion.plist
|
||||
echo "Service: Bootstrapped Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then
|
||||
echo "Service is running" >> /tmp/postinstall.txt
|
||||
echo "Service: Service Running" >> "$TEMP_DIR/postinstall.txt"
|
||||
else
|
||||
echo "Service start: Kickstarting service..." >> /tmp/postinstall.txt
|
||||
echo "Service: Kickstarting Service..." >> "$TEMP_DIR/postinstall.txt"
|
||||
launchctl kickstart -kp system/com.saltstack.salt.minion
|
||||
echo "Service: Kickstarted Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
fi
|
||||
|
||||
echo "Service start: Successful" >> /tmp/postinstall.txt
|
||||
|
||||
echo "Service disable: Disabling Master, Syndic, and API" >> /tmp/postinstall.txt
|
||||
echo "Service: Started Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
echo "Service: Disabling Master, Syndic, and API services" >> "$TEMP_DIR/postinstall.txt"
|
||||
launchctl disable system/com.saltstack.salt.master
|
||||
launchctl disable system/com.saltstack.salt.syndic
|
||||
launchctl disable system/com.saltstack.salt.api
|
||||
echo "Service: Disabled Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
OSX_VERSION=$(sw_vers | grep ProductVersion | cut -f 2 -d: | tr -d '[:space:]')
|
||||
MINOR=$(echo ${OSX_VERSION} | cut -f 2 -d.)
|
||||
|
||||
echo "Service start: Enabling service..." >> /tmp/postinstall.txt
|
||||
echo "Service: Configuring..." >> "$TEMP_DIR/postinstall.txt"
|
||||
case $MINOR in
|
||||
9 )
|
||||
setup_services_maverick;
|
||||
@ -108,7 +136,9 @@ case $MINOR in
|
||||
setup_services_yosemite_and_later;
|
||||
;;
|
||||
esac
|
||||
echo "Service: Configured Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
echo "Post install completed successfully" >> /tmp/postinstall.txt
|
||||
echo "Post install completed successfully on:" >> "$TEMP_DIR/postinstall.txt"
|
||||
date "+%Y/%m/%d %H:%m:%S" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
exit 0
|
||||
|
@ -6,7 +6,8 @@
|
||||
# Date: December 2015
|
||||
#
|
||||
# Description: This script stops the salt minion service before attempting to
|
||||
# install Salt on macOS
|
||||
# install Salt on macOS. It also removes the /opt/salt/bin
|
||||
# directory, symlink to salt-config, and salt from paths.d.
|
||||
#
|
||||
# Requirements:
|
||||
# - None
|
||||
@ -15,12 +16,29 @@
|
||||
# This script is run as a part of the macOS Salt Installation
|
||||
#
|
||||
###############################################################################
|
||||
echo "Preinstall started on:" > /tmp/preinstall.txt
|
||||
date >> /tmp/preinstall.txt
|
||||
|
||||
###############################################################################
|
||||
# Define Variables
|
||||
###############################################################################
|
||||
# Get Minor Version
|
||||
OSX_VERSION=$(sw_vers | grep ProductVersion | cut -f 2 -d: | tr -d '[:space:]')
|
||||
MINOR=$(echo ${OSX_VERSION} | cut -f 2 -d.)
|
||||
# Path Variables
|
||||
INSTALL_DIR="/opt/salt"
|
||||
BIN_DIR="$INSTALL_DIR/bin"
|
||||
CONFIG_DIR="/etc/salt"
|
||||
TEMP_DIR="/tmp"
|
||||
SBIN_DIR="/usr/local/sbin"
|
||||
|
||||
###############################################################################
|
||||
# Set up logging and error handling
|
||||
###############################################################################
|
||||
echo "Preinstall started on:" > "$TEMP_DIR/preinstall.txt"
|
||||
date "+%Y/%m/%d %H:%m:%S" >> "$TEMP_DIR/preinstall.txt"
|
||||
trap 'quit_on_error $LINENO $BASH_COMMAND' ERR
|
||||
|
||||
quit_on_error() {
|
||||
echo "$(basename $0) caught error on line : $1 command was: $2" >> /tmp/preinstall.txt
|
||||
echo "$(basename $0) caught error on line : $1 command was: $2" >> "$TEMP_DIR/preinstall.txt"
|
||||
exit -1
|
||||
}
|
||||
|
||||
@ -31,24 +49,58 @@ MINOR=$(echo ${OSX_VERSION} | cut -f 2 -d.)
|
||||
# Stop the service
|
||||
###############################################################################
|
||||
stop_service_maverick() {
|
||||
echo "Using old (< 10.10) launchctl interface" >> /tmp/preinstall.txt
|
||||
echo "Service: Using old (< 10.10) launchctl interface" >> "$TEMP_DIR/preinstall.txt"
|
||||
if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then
|
||||
echo "Stop service: Started..." >> /tmp/preinstall.txt
|
||||
echo "Service: Unloading minion..." >> "$TEMP_DIR/preinstall.txt"
|
||||
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist
|
||||
echo "Stop service: Successful" >> /tmp/preinstall.txt
|
||||
echo "Service: Unloaded Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
if /bin/launchctl list "com.saltstack.salt.master" &> /dev/null; then
|
||||
echo "Service: Unloading master..." >> "$TEMP_DIR/preinstall.txt"
|
||||
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.master.plist
|
||||
echo "Service: Unloaded Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
if /bin/launchctl list "com.saltstack.salt.syndic" &> /dev/null; then
|
||||
echo "Service: Unloading syndic..." >> "$TEMP_DIR/preinstall.txt"
|
||||
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.syndic.plist
|
||||
echo "Service: Unloaded Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
if /bin/launchctl list "com.saltstack.salt.api" &> /dev/null; then
|
||||
echo "Service: Unloading api..." >> "$TEMP_DIR/preinstall.txt"
|
||||
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.api.plist
|
||||
echo "Service: Unloaded Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
}
|
||||
|
||||
stop_service_yosemite_and_later() {
|
||||
echo "Using new (>= 10.10) launchctl interface" >> /tmp/preinstall.txt
|
||||
echo "Service: Using new (>= 10.10) launchctl interface" >> "$TEMP_DIR/preinstall.txt"
|
||||
if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then
|
||||
echo "Stop service: Started..." >> /tmp/preinstall.txt
|
||||
echo "Service: Stopping minion..." >> "$TEMP_DIR/preinstall.txt"
|
||||
launchctl disable system/com.saltstack.salt.minion
|
||||
launchctl bootout system /Library/LaunchDaemons/com.saltstack.salt.minion.plist
|
||||
echo "Stop service: Successful" >> /tmp/preinstall.txt
|
||||
echo "Service: Stopped Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
if /bin/launchctl list "com.saltstack.salt.master" &> /dev/null; then
|
||||
echo "Service: Stopping master..." >> "$TEMP_DIR/preinstall.txt"
|
||||
launchctl disable system/com.saltstack.salt.master
|
||||
launchctl bootout system /Library/LaunchDaemons/com.saltstack.salt.master.plist
|
||||
echo "Service: Stopped Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
if /bin/launchctl list "com.saltstack.salt.syndic" &> /dev/null; then
|
||||
echo "Service: Stopping syndic..." >> "$TEMP_DIR/preinstall.txt"
|
||||
launchctl disable system/com.saltstack.salt.syndic
|
||||
launchctl bootout system /Library/LaunchDaemons/com.saltstack.salt.syndic.plist
|
||||
echo "Service: Stopped Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
if /bin/launchctl list "com.saltstack.salt.api" &> /dev/null; then
|
||||
echo "Service: Stopping api..." >> "$TEMP_DIR/preinstall.txt"
|
||||
launchctl disable system/com.saltstack.salt.api
|
||||
launchctl bootout system /Library/LaunchDaemons/com.saltstack.salt.api.plist
|
||||
echo "Service: Stopped Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
}
|
||||
|
||||
echo "Service: Configuring..." >> "$TEMP_DIR/preinstall.txt"
|
||||
case $MINOR in
|
||||
9 )
|
||||
stop_service_maverick;
|
||||
@ -57,6 +109,36 @@ case $MINOR in
|
||||
stop_service_yosemite_and_later;
|
||||
;;
|
||||
esac
|
||||
echo "Preinstall Completed Successfully" >> /tmp/preinstall.txt
|
||||
echo "Service: Configured Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
|
||||
###############################################################################
|
||||
# Remove the Symlink to salt-config.sh
|
||||
###############################################################################
|
||||
if [ -L "$SBIN_DIR/salt-config" ]; then
|
||||
echo "Cleanup: Removing Symlink $BIN_DIR/salt-config" >> "$TEMP_DIR/preinstall.txt"
|
||||
rm "$SBIN_DIR/salt-config"
|
||||
echo "Cleanup: Removed Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
|
||||
###############################################################################
|
||||
# Remove the $INSTALL_DIR directory
|
||||
###############################################################################
|
||||
if [ -d "$INSTALL_DIR" ]; then
|
||||
echo "Cleanup: Removing $INSTALL_DIR" >> "$TEMP_DIR/preinstall.txt"
|
||||
rm -rf "$INSTALL_DIR"
|
||||
echo "Cleanup: Removed Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
|
||||
###############################################################################
|
||||
# Remove the salt from the paths.d
|
||||
###############################################################################
|
||||
if [ ! -f "/etc/paths.d/salt" ]; then
|
||||
echo "Path: Removing salt from the path..." >> "$TEMP_DIR/preinstall.txt"
|
||||
rm "/etc/paths.d/salt"
|
||||
echo "Path: Removed Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
|
||||
echo "Preinstall Completed Successfully on:" >> "$TEMP_DIR/preinstall.txt"
|
||||
date "+%Y/%m/%d %H:%m:%S" >> "$TEMP_DIR/preinstall.txt"
|
||||
|
||||
exit 0
|
||||
|
@ -89,7 +89,7 @@ if Defined x (
|
||||
if %Python%==2 (
|
||||
Set "PyDir=C:\Python27"
|
||||
) else (
|
||||
Set "PyDir=C:\Program Files\Python35"
|
||||
Set "PyDir=C:\Python35"
|
||||
)
|
||||
Set "PATH=%PATH%;%PyDir%;%PyDir%\Scripts"
|
||||
|
||||
|
@ -175,7 +175,7 @@ If (Test-Path "$($ini['Settings']['Python2Dir'])\python.exe") {
|
||||
DownloadFileWithProgress $url $file
|
||||
|
||||
Write-Output " - $script_name :: Installing $($ini[$bitPrograms]['Python2']) . . ."
|
||||
$p = Start-Process msiexec -ArgumentList "/i $file /qb ADDLOCAL=DefaultFeature,SharedCRT,Extensions,pip_feature,PrependPath TARGETDIR=$($ini['Settings']['Python2Dir'])" -Wait -NoNewWindow -PassThru
|
||||
$p = Start-Process msiexec -ArgumentList "/i $file /qb ADDLOCAL=DefaultFeature,SharedCRT,Extensions,pip_feature,PrependPath TARGETDIR=`"$($ini['Settings']['Python2Dir'])`"" -Wait -NoNewWindow -PassThru
|
||||
}
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
@ -191,7 +191,7 @@ If (!($Path.ToLower().Contains("$($ini['Settings']['Scripts2Dir'])".ToLower())))
|
||||
|
||||
#==============================================================================
|
||||
# Update PIP and SetupTools
|
||||
# caching depends on environmant variable SALT_PIP_LOCAL_CACHE
|
||||
# caching depends on environment variable SALT_PIP_LOCAL_CACHE
|
||||
#==============================================================================
|
||||
Write-Output " ----------------------------------------------------------------"
|
||||
Write-Output " - $script_name :: Updating PIP and SetupTools . . ."
|
||||
@ -212,7 +212,7 @@ if ( ! [bool]$Env:SALT_PIP_LOCAL_CACHE) {
|
||||
|
||||
#==============================================================================
|
||||
# Install pypi resources using pip
|
||||
# caching depends on environmant variable SALT_REQ_LOCAL_CACHE
|
||||
# caching depends on environment variable SALT_REQ_LOCAL_CACHE
|
||||
#==============================================================================
|
||||
Write-Output " ----------------------------------------------------------------"
|
||||
Write-Output " - $script_name :: Installing pypi resources using pip . . ."
|
||||
@ -230,6 +230,24 @@ if ( ! [bool]$Env:SALT_REQ_LOCAL_CACHE) {
|
||||
Start_Process_and_test_exitcode "$($ini['Settings']['Python2Dir'])\python.exe" "-m pip install --no-index --find-links=$Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req_2.txt" "pip install"
|
||||
}
|
||||
|
||||
#==============================================================================
|
||||
# Move PyWin32 DLL's to site-packages\win32
|
||||
#==============================================================================
|
||||
Write-Output " - $script_name :: Moving PyWin32 DLLs . . ."
|
||||
Move-Item "$($ini['Settings']['SitePkgs2Dir'])\pywin32_system32\*.dll" "$($ini['Settings']['SitePkgs2Dir'])\win32" -Force
|
||||
|
||||
# Remove pywin32_system32 directory
|
||||
Write-Output " - $script_name :: Removing pywin32_system32 Directory . . ."
|
||||
Remove-Item "$($ini['Settings']['SitePkgs2Dir'])\pywin32_system32"
|
||||
|
||||
# Remove pythonwin directory
|
||||
Write-Output " - $script_name :: Removing pythonwin Directory . . ."
|
||||
Remove-Item "$($ini['Settings']['SitePkgs2Dir'])\pythonwin" -Force -Recurse
|
||||
|
||||
# Remove PyWin32 PostInstall and testall Scripts
|
||||
Write-Output " - $script_name :: Removing PyWin32 scripts . . ."
|
||||
Remove-Item "$($ini['Settings']['Scripts2Dir'])\pywin32_*" -Force -Recurse
|
||||
|
||||
#==============================================================================
|
||||
# Install PyYAML with CLoader
|
||||
# This has to be a compiled binary to get the CLoader
|
||||
|
@ -175,7 +175,7 @@ If (Test-Path "$($ini['Settings']['Python3Dir'])\python.exe") {
|
||||
DownloadFileWithProgress $url $file
|
||||
|
||||
Write-Output " - $script_name :: Installing $($ini[$bitPrograms]['Python3']) . . ."
|
||||
$p = Start-Process $file -ArgumentList '/passive InstallAllUsers=1 TargetDir="C:\Program Files\Python35" Include_doc=0 Include_tcltk=0 Include_test=0 Include_launcher=0 PrependPath=1 Shortcuts=0' -Wait -NoNewWindow -PassThru
|
||||
$p = Start-Process $file -ArgumentList "/passive InstallAllUsers=1 TargetDir=`"$($ini['Settings']['Python3Dir'])`" Include_doc=0 Include_tcltk=0 Include_test=0 Include_launcher=0 PrependPath=1 Shortcuts=0" -Wait -NoNewWindow -PassThru
|
||||
}
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
@ -247,7 +247,7 @@ Start_Process_and_test_exitcode "$($ini['Settings']['Scripts3Dir'])\pip.exe" "i
|
||||
|
||||
# Move DLL's to Python Root
|
||||
Write-Output " - $script_name :: Moving PyWin32 DLLs . . ."
|
||||
Move-Item "$($ini['Settings']['SitePkgs3Dir'])\pywin32_system32\*.dll" "$($ini['Settings']['Python3Dir'])" -Force
|
||||
Move-Item "$($ini['Settings']['SitePkgs3Dir'])\pywin32_system32\*.dll" "$($ini['Settings']['SitePkgs3Dir'])\win32" -Force
|
||||
|
||||
# Remove pywin32_system32 directory
|
||||
Write-Output " - $script_name :: Removing pywin32_system32 Directory . . ."
|
||||
@ -257,6 +257,10 @@ Remove-Item "$($ini['Settings']['SitePkgs3Dir'])\pywin32_system32"
|
||||
Write-Output " - $script_name :: Removing pythonwin Directory . . ."
|
||||
Remove-Item "$($ini['Settings']['SitePkgs3Dir'])\pythonwin" -Force -Recurse
|
||||
|
||||
# Remove PyWin32 PostInstall and testall Scripts
|
||||
Write-Output " - $script_name :: Removing PyWin32 scripts . . ."
|
||||
Remove-Item "$($ini['Settings']['Scripts3Dir'])\pywin32_*" -Force -Recurse
|
||||
|
||||
#==============================================================================
|
||||
# Fix PyCrypto
|
||||
#==============================================================================
|
||||
|
@ -56,7 +56,7 @@ if %Python%==2 (
|
||||
Set "PyVerMajor=2"
|
||||
Set "PyVerMinor=7"
|
||||
) else (
|
||||
Set "PyDir=C:\Program Files\Python35"
|
||||
Set "PyDir=C:\Python35"
|
||||
Set "PyVerMajor=3"
|
||||
Set "PyVerMinor=5"
|
||||
)
|
||||
|
@ -16,9 +16,10 @@ if %errorLevel%==0 (
|
||||
)
|
||||
echo.
|
||||
|
||||
:CheckPython2
|
||||
if exist "\Python27" goto RemovePython2
|
||||
if exist "\Program Files\Python35" goto RemovePython3
|
||||
goto eof
|
||||
|
||||
goto CheckPython3
|
||||
|
||||
:RemovePython2
|
||||
rem Uninstall Python 2.7
|
||||
@ -47,25 +48,30 @@ goto eof
|
||||
|
||||
goto eof
|
||||
|
||||
:CheckPython3
|
||||
if exist "\Python35" goto RemovePython3
|
||||
|
||||
goto eof
|
||||
|
||||
:RemovePython3
|
||||
echo %0 :: Uninstalling Python 3 ...
|
||||
echo ---------------------------------------------------------------------
|
||||
:: 64 bit
|
||||
if exist "%LOCALAPPDATA%\Package Cache\{b94f45d6-8461-440c-aa4d-bf197b2c2499}" (
|
||||
echo %0 :: - 3.5.3 64bit
|
||||
"%LOCALAPPDATA%\Package Cache\{b94f45d6-8461-440c-aa4d-bf197b2c2499}\python-3.5.3-amd64.exe" /uninstall
|
||||
"%LOCALAPPDATA%\Package Cache\{b94f45d6-8461-440c-aa4d-bf197b2c2499}\python-3.5.3-amd64.exe" /uninstall /passive
|
||||
)
|
||||
|
||||
:: 32 bit
|
||||
if exist "%LOCALAPPDATA%\Package Cache\{a10037e1-4247-47c9-935b-c5ca049d0299}" (
|
||||
echo %0 :: - 3.5.3 32bit
|
||||
"%LOCALAPPDATA%\Package Cache\{a10037e1-4247-47c9-935b-c5ca049d0299}\python-3.5.3" /uninstall
|
||||
"%LOCALAPPDATA%\Package Cache\{a10037e1-4247-47c9-935b-c5ca049d0299}\python-3.5.3" /uninstall /passive
|
||||
)
|
||||
|
||||
rem wipe the Python directory
|
||||
echo %0 :: Removing the C:\Program Files\Python35 Directory ...
|
||||
echo %0 :: Removing the C:\Python35 Directory ...
|
||||
echo ---------------------------------------------------------------------
|
||||
rd /s /q "C:\Program Files\Python35"
|
||||
rd /s /q "C:\Python35"
|
||||
if %errorLevel%==0 (
|
||||
echo Successful
|
||||
) else (
|
||||
|
@ -44,7 +44,7 @@ ${StrStrAdv}
|
||||
!define CPUARCH "x86"
|
||||
!endif
|
||||
|
||||
; Part of the Trim function for Strings
|
||||
# Part of the Trim function for Strings
|
||||
!define Trim "!insertmacro Trim"
|
||||
!macro Trim ResultVar String
|
||||
Push "${String}"
|
||||
@ -61,27 +61,27 @@ ${StrStrAdv}
|
||||
!define MUI_UNICON "salt.ico"
|
||||
!define MUI_WELCOMEFINISHPAGE_BITMAP "panel.bmp"
|
||||
|
||||
; Welcome page
|
||||
# Welcome page
|
||||
!insertmacro MUI_PAGE_WELCOME
|
||||
|
||||
; License page
|
||||
# License page
|
||||
!insertmacro MUI_PAGE_LICENSE "LICENSE.txt"
|
||||
|
||||
; Configure Minion page
|
||||
# Configure Minion page
|
||||
Page custom pageMinionConfig pageMinionConfig_Leave
|
||||
|
||||
; Instfiles page
|
||||
# Instfiles page
|
||||
!insertmacro MUI_PAGE_INSTFILES
|
||||
|
||||
; Finish page (Customized)
|
||||
# Finish page (Customized)
|
||||
!define MUI_PAGE_CUSTOMFUNCTION_SHOW pageFinish_Show
|
||||
!define MUI_PAGE_CUSTOMFUNCTION_LEAVE pageFinish_Leave
|
||||
!insertmacro MUI_PAGE_FINISH
|
||||
|
||||
; Uninstaller pages
|
||||
# Uninstaller pages
|
||||
!insertmacro MUI_UNPAGE_INSTFILES
|
||||
|
||||
; Language files
|
||||
# Language files
|
||||
!insertmacro MUI_LANGUAGE "English"
|
||||
|
||||
|
||||
@ -201,8 +201,8 @@ ShowInstDetails show
|
||||
ShowUnInstDetails show
|
||||
|
||||
|
||||
; Check and install Visual C++ redist packages
|
||||
; See http://blogs.msdn.com/b/astebner/archive/2009/01/29/9384143.aspx for more info
|
||||
# Check and install Visual C++ redist packages
|
||||
# See http://blogs.msdn.com/b/astebner/archive/2009/01/29/9384143.aspx for more info
|
||||
Section -Prerequisites
|
||||
|
||||
Var /GLOBAL VcRedistName
|
||||
@ -211,12 +211,12 @@ Section -Prerequisites
|
||||
Var /Global CheckVcRedist
|
||||
StrCpy $CheckVcRedist "False"
|
||||
|
||||
; Visual C++ 2015 redist packages
|
||||
# Visual C++ 2015 redist packages
|
||||
!define PY3_VC_REDIST_NAME "VC_Redist_2015"
|
||||
!define PY3_VC_REDIST_X64_GUID "{50A2BC33-C9CD-3BF1-A8FF-53C10A0B183C}"
|
||||
!define PY3_VC_REDIST_X86_GUID "{BBF2AC74-720C-3CB3-8291-5E34039232FA}"
|
||||
|
||||
; Visual C++ 2008 SP1 MFC Security Update redist packages
|
||||
# Visual C++ 2008 SP1 MFC Security Update redist packages
|
||||
!define PY2_VC_REDIST_NAME "VC_Redist_2008_SP1_MFC"
|
||||
!define PY2_VC_REDIST_X64_GUID "{5FCE6D76-F5DC-37AB-B2B8-22AB8CEDB1D4}"
|
||||
!define PY2_VC_REDIST_X86_GUID "{9BE518E6-ECC6-35A9-88E4-87755C07200F}"
|
||||
@ -239,7 +239,7 @@ Section -Prerequisites
|
||||
StrCpy $VcRedistGuid ${PY2_VC_REDIST_X86_GUID}
|
||||
${EndIf}
|
||||
|
||||
; VCRedist 2008 only needed on Windows Server 2008R2/Windows 7 and below
|
||||
# VCRedist 2008 only needed on Windows Server 2008R2/Windows 7 and below
|
||||
${If} ${AtMostWin2008R2}
|
||||
StrCpy $CheckVcRedist "True"
|
||||
${EndIf}
|
||||
@ -255,20 +255,41 @@ Section -Prerequisites
|
||||
"$VcRedistName is currently not installed. Would you like to install?" \
|
||||
/SD IDYES IDNO endVcRedist
|
||||
|
||||
ClearErrors
|
||||
; The Correct version of VCRedist is copied over by "build_pkg.bat"
|
||||
# The Correct version of VCRedist is copied over by "build_pkg.bat"
|
||||
SetOutPath "$INSTDIR\"
|
||||
File "..\prereqs\vcredist.exe"
|
||||
; /passive used by 2015 installer
|
||||
; /qb! used by 2008 installer
|
||||
; It just ignores the unrecognized switches...
|
||||
ExecWait "$INSTDIR\vcredist.exe /qb! /passive"
|
||||
IfErrors 0 endVcRedist
|
||||
# If an output variable is specified ($0 in the case below),
|
||||
# ExecWait sets the variable with the exit code (and only sets the
|
||||
# error flag if an error occurs; if an error occurs, the contents
|
||||
# of the user variable are undefined).
|
||||
# http://nsis.sourceforge.net/Reference/ExecWait
|
||||
# /passive used by 2015 installer
|
||||
# /qb! used by 2008 installer
|
||||
# It just ignores the unrecognized switches...
|
||||
ClearErrors
|
||||
ExecWait '"$INSTDIR\vcredist.exe" /qb! /passive /norestart' $0
|
||||
IfErrors 0 CheckVcRedistErrorCode
|
||||
MessageBox MB_OK \
|
||||
"$VcRedistName failed to install. Try installing the package manually." \
|
||||
/SD IDOK
|
||||
Goto endVcRedist
|
||||
|
||||
CheckVcRedistErrorCode:
|
||||
# Check for Reboot Error Code (3010)
|
||||
${If} $0 == 3010
|
||||
MessageBox MB_OK \
|
||||
"$VcRedistName installed but requires a restart to complete." \
|
||||
/SD IDOK
|
||||
|
||||
# Check for any other errors
|
||||
${ElseIfNot} $0 == 0
|
||||
MessageBox MB_OK \
|
||||
"$VcRedistName failed with ErrorCode: $0. Try installing the package manually." \
|
||||
/SD IDOK
|
||||
${EndIf}
|
||||
|
||||
endVcRedist:
|
||||
|
||||
${EndIf}
|
||||
|
||||
${EndIf}
|
||||
@ -294,12 +315,12 @@ Function .onInit
|
||||
|
||||
Call parseCommandLineSwitches
|
||||
|
||||
; Check for existing installation
|
||||
# Check for existing installation
|
||||
ReadRegStr $R0 HKLM \
|
||||
"Software\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT_NAME}" \
|
||||
"UninstallString"
|
||||
StrCmp $R0 "" checkOther
|
||||
; Found existing installation, prompt to uninstall
|
||||
# Found existing installation, prompt to uninstall
|
||||
MessageBox MB_OKCANCEL|MB_ICONEXCLAMATION \
|
||||
"${PRODUCT_NAME} is already installed.$\n$\n\
|
||||
Click `OK` to remove the existing installation." \
|
||||
@ -307,12 +328,12 @@ Function .onInit
|
||||
Abort
|
||||
|
||||
checkOther:
|
||||
; Check for existing installation of full salt
|
||||
# Check for existing installation of full salt
|
||||
ReadRegStr $R0 HKLM \
|
||||
"Software\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT_NAME_OTHER}" \
|
||||
"UninstallString"
|
||||
StrCmp $R0 "" skipUninstall
|
||||
; Found existing installation, prompt to uninstall
|
||||
# Found existing installation, prompt to uninstall
|
||||
MessageBox MB_OKCANCEL|MB_ICONEXCLAMATION \
|
||||
"${PRODUCT_NAME_OTHER} is already installed.$\n$\n\
|
||||
Click `OK` to remove the existing installation." \
|
||||
@ -321,22 +342,22 @@ Function .onInit
|
||||
|
||||
uninst:
|
||||
|
||||
; Get current Silent status
|
||||
# Get current Silent status
|
||||
StrCpy $R0 0
|
||||
${If} ${Silent}
|
||||
StrCpy $R0 1
|
||||
${EndIf}
|
||||
|
||||
; Turn on Silent mode
|
||||
# Turn on Silent mode
|
||||
SetSilent silent
|
||||
|
||||
; Don't remove all directories
|
||||
# Don't remove all directories
|
||||
StrCpy $DeleteInstallDir 0
|
||||
|
||||
; Uninstall silently
|
||||
# Uninstall silently
|
||||
Call uninstallSalt
|
||||
|
||||
; Set it back to Normal mode, if that's what it was before
|
||||
# Set it back to Normal mode, if that's what it was before
|
||||
${If} $R0 == 0
|
||||
SetSilent normal
|
||||
${EndIf}
|
||||
@ -350,7 +371,7 @@ Section -Post
|
||||
|
||||
WriteUninstaller "$INSTDIR\uninst.exe"
|
||||
|
||||
; Uninstall Registry Entries
|
||||
# Uninstall Registry Entries
|
||||
WriteRegStr ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_UNINST_KEY}" \
|
||||
"DisplayName" "$(^Name)"
|
||||
WriteRegStr ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_UNINST_KEY}" \
|
||||
@ -366,19 +387,19 @@ Section -Post
|
||||
WriteRegStr HKLM "SYSTEM\CurrentControlSet\services\salt-minion" \
|
||||
"DependOnService" "nsi"
|
||||
|
||||
; Set the estimated size
|
||||
# Set the estimated size
|
||||
${GetSize} "$INSTDIR\bin" "/S=OK" $0 $1 $2
|
||||
IntFmt $0 "0x%08X" $0
|
||||
WriteRegDWORD ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_UNINST_KEY}" \
|
||||
"EstimatedSize" "$0"
|
||||
|
||||
; Commandline Registry Entries
|
||||
# Commandline Registry Entries
|
||||
WriteRegStr HKLM "${PRODUCT_CALL_REGKEY}" "" "$INSTDIR\salt-call.bat"
|
||||
WriteRegStr HKLM "${PRODUCT_CALL_REGKEY}" "Path" "$INSTDIR\bin\"
|
||||
WriteRegStr HKLM "${PRODUCT_MINION_REGKEY}" "" "$INSTDIR\salt-minion.bat"
|
||||
WriteRegStr HKLM "${PRODUCT_MINION_REGKEY}" "Path" "$INSTDIR\bin\"
|
||||
|
||||
; Register the Salt-Minion Service
|
||||
# Register the Salt-Minion Service
|
||||
nsExec::Exec "nssm.exe install salt-minion $INSTDIR\bin\python.exe -E -s $INSTDIR\bin\Scripts\salt-minion -c $INSTDIR\conf -l quiet"
|
||||
nsExec::Exec "nssm.exe set salt-minion Description Salt Minion from saltstack.com"
|
||||
nsExec::Exec "nssm.exe set salt-minion Start SERVICE_AUTO_START"
|
||||
@ -398,12 +419,12 @@ SectionEnd
|
||||
|
||||
Function .onInstSuccess
|
||||
|
||||
; If StartMinionDelayed is 1, then set the service to start delayed
|
||||
# If StartMinionDelayed is 1, then set the service to start delayed
|
||||
${If} $StartMinionDelayed == 1
|
||||
nsExec::Exec "nssm.exe set salt-minion Start SERVICE_DELAYED_AUTO_START"
|
||||
${EndIf}
|
||||
|
||||
; If start-minion is 1, then start the service
|
||||
# If start-minion is 1, then start the service
|
||||
${If} $StartMinion == 1
|
||||
nsExec::Exec 'net start salt-minion'
|
||||
${EndIf}
|
||||
@ -413,10 +434,11 @@ FunctionEnd
|
||||
|
||||
Function un.onInit
|
||||
|
||||
; Load the parameters
|
||||
# Load the parameters
|
||||
${GetParameters} $R0
|
||||
|
||||
# Uninstaller: Remove Installation Directory
|
||||
ClearErrors
|
||||
${GetOptions} $R0 "/delete-install-dir" $R1
|
||||
IfErrors delete_install_dir_not_found
|
||||
StrCpy $DeleteInstallDir 1
|
||||
@ -434,7 +456,7 @@ Section Uninstall
|
||||
|
||||
Call un.uninstallSalt
|
||||
|
||||
; Remove C:\salt from the Path
|
||||
# Remove C:\salt from the Path
|
||||
Push "C:\salt"
|
||||
Call un.RemoveFromPath
|
||||
|
||||
@ -444,27 +466,27 @@ SectionEnd
|
||||
!macro uninstallSalt un
|
||||
Function ${un}uninstallSalt
|
||||
|
||||
; Make sure we're in the right directory
|
||||
# Make sure we're in the right directory
|
||||
${If} $INSTDIR == "c:\salt\bin\Scripts"
|
||||
StrCpy $INSTDIR "C:\salt"
|
||||
${EndIf}
|
||||
|
||||
; Stop and Remove salt-minion service
|
||||
# Stop and Remove salt-minion service
|
||||
nsExec::Exec 'net stop salt-minion'
|
||||
nsExec::Exec 'sc delete salt-minion'
|
||||
|
||||
; Stop and remove the salt-master service
|
||||
# Stop and remove the salt-master service
|
||||
nsExec::Exec 'net stop salt-master'
|
||||
nsExec::Exec 'sc delete salt-master'
|
||||
|
||||
; Remove files
|
||||
# Remove files
|
||||
Delete "$INSTDIR\uninst.exe"
|
||||
Delete "$INSTDIR\nssm.exe"
|
||||
Delete "$INSTDIR\salt*"
|
||||
Delete "$INSTDIR\vcredist.exe"
|
||||
RMDir /r "$INSTDIR\bin"
|
||||
|
||||
; Remove Registry entries
|
||||
# Remove Registry entries
|
||||
DeleteRegKey ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_UNINST_KEY}"
|
||||
DeleteRegKey ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_UNINST_KEY_OTHER}"
|
||||
DeleteRegKey ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_CALL_REGKEY}"
|
||||
@ -474,17 +496,17 @@ Function ${un}uninstallSalt
|
||||
DeleteRegKey ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_MINION_REGKEY}"
|
||||
DeleteRegKey ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_RUN_REGKEY}"
|
||||
|
||||
; Automatically close when finished
|
||||
# Automatically close when finished
|
||||
SetAutoClose true
|
||||
|
||||
; Prompt to remove the Installation directory
|
||||
# Prompt to remove the Installation directory
|
||||
${IfNot} $DeleteInstallDir == 1
|
||||
MessageBox MB_ICONQUESTION|MB_YESNO|MB_DEFBUTTON2 \
|
||||
"Would you like to completely remove $INSTDIR and all of its contents?" \
|
||||
/SD IDNO IDNO finished
|
||||
${EndIf}
|
||||
|
||||
; Make sure you're not removing Program Files
|
||||
# Make sure you're not removing Program Files
|
||||
${If} $INSTDIR != 'Program Files'
|
||||
${AndIf} $INSTDIR != 'Program Files (x86)'
|
||||
RMDir /r "$INSTDIR"
|
||||
@ -526,7 +548,7 @@ FunctionEnd
|
||||
|
||||
Function Trim
|
||||
|
||||
Exch $R1 ; Original string
|
||||
Exch $R1 # Original string
|
||||
Push $R2
|
||||
|
||||
Loop:
|
||||
@ -558,36 +580,36 @@ Function Trim
|
||||
FunctionEnd
|
||||
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; StrStr Function
|
||||
; - find substring in a string
|
||||
;
|
||||
; Usage:
|
||||
; Push "this is some string"
|
||||
; Push "some"
|
||||
; Call StrStr
|
||||
; Pop $0 ; "some string"
|
||||
;------------------------------------------------------------------------------
|
||||
#------------------------------------------------------------------------------
|
||||
# StrStr Function
|
||||
# - find substring in a string
|
||||
#
|
||||
# Usage:
|
||||
# Push "this is some string"
|
||||
# Push "some"
|
||||
# Call StrStr
|
||||
# Pop $0 ; "some string"
|
||||
#------------------------------------------------------------------------------
|
||||
!macro StrStr un
|
||||
Function ${un}StrStr
|
||||
|
||||
Exch $R1 ; $R1=substring, stack=[old$R1,string,...]
|
||||
Exch ; stack=[string,old$R1,...]
|
||||
Exch $R2 ; $R2=string, stack=[old$R2,old$R1,...]
|
||||
Push $R3 ; $R3=strlen(substring)
|
||||
Push $R4 ; $R4=count
|
||||
Push $R5 ; $R5=tmp
|
||||
StrLen $R3 $R1 ; Get the length of the Search String
|
||||
StrCpy $R4 0 ; Set the counter to 0
|
||||
Exch $R1 # $R1=substring, stack=[old$R1,string,...]
|
||||
Exch # stack=[string,old$R1,...]
|
||||
Exch $R2 # $R2=string, stack=[old$R2,old$R1,...]
|
||||
Push $R3 # $R3=strlen(substring)
|
||||
Push $R4 # $R4=count
|
||||
Push $R5 # $R5=tmp
|
||||
StrLen $R3 $R1 # Get the length of the Search String
|
||||
StrCpy $R4 0 # Set the counter to 0
|
||||
|
||||
loop:
|
||||
StrCpy $R5 $R2 $R3 $R4 ; Create a moving window of the string that is
|
||||
; the size of the length of the search string
|
||||
StrCmp $R5 $R1 done ; Is the contents of the window the same as
|
||||
; search string, then done
|
||||
StrCmp $R5 "" done ; Is the window empty, then done
|
||||
IntOp $R4 $R4 + 1 ; Shift the windows one character
|
||||
Goto loop ; Repeat
|
||||
StrCpy $R5 $R2 $R3 $R4 # Create a moving window of the string that is
|
||||
# the size of the length of the search string
|
||||
StrCmp $R5 $R1 done # Is the contents of the window the same as
|
||||
# search string, then done
|
||||
StrCmp $R5 "" done # Is the window empty, then done
|
||||
IntOp $R4 $R4 + 1 # Shift the windows one character
|
||||
Goto loop # Repeat
|
||||
|
||||
done:
|
||||
StrCpy $R1 $R2 "" $R4
|
||||
@ -595,7 +617,7 @@ Function ${un}StrStr
|
||||
Pop $R4
|
||||
Pop $R3
|
||||
Pop $R2
|
||||
Exch $R1 ; $R1=old$R1, stack=[result,...]
|
||||
Exch $R1 # $R1=old$R1, stack=[result,...]
|
||||
|
||||
FunctionEnd
|
||||
!macroend
|
||||
@ -603,74 +625,74 @@ FunctionEnd
|
||||
!insertmacro StrStr "un."
|
||||
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; AddToPath Function
|
||||
; - Adds item to Path for All Users
|
||||
; - Overcomes NSIS ReadRegStr limitation of 1024 characters by using Native
|
||||
; Windows Commands
|
||||
;
|
||||
; Usage:
|
||||
; Push "C:\path\to\add"
|
||||
; Call AddToPath
|
||||
;------------------------------------------------------------------------------
|
||||
#------------------------------------------------------------------------------
|
||||
# AddToPath Function
|
||||
# - Adds item to Path for All Users
|
||||
# - Overcomes NSIS ReadRegStr limitation of 1024 characters by using Native
|
||||
# Windows Commands
|
||||
#
|
||||
# Usage:
|
||||
# Push "C:\path\to\add"
|
||||
# Call AddToPath
|
||||
#------------------------------------------------------------------------------
|
||||
!define Environ 'HKLM "SYSTEM\CurrentControlSet\Control\Session Manager\Environment"'
|
||||
Function AddToPath
|
||||
|
||||
Exch $0 ; Path to add
|
||||
Push $1 ; Current Path
|
||||
Push $2 ; Results of StrStr / Length of Path + Path to Add
|
||||
Push $3 ; Handle to Reg / Length of Path
|
||||
Push $4 ; Result of Registry Call
|
||||
Exch $0 # Path to add
|
||||
Push $1 # Current Path
|
||||
Push $2 # Results of StrStr / Length of Path + Path to Add
|
||||
Push $3 # Handle to Reg / Length of Path
|
||||
Push $4 # Result of Registry Call
|
||||
|
||||
; Open a handle to the key in the registry, handle in $3, Error in $4
|
||||
# Open a handle to the key in the registry, handle in $3, Error in $4
|
||||
System::Call "advapi32::RegOpenKey(i 0x80000002, t'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', *i.r3) i.r4"
|
||||
; Make sure registry handle opened successfully (returned 0)
|
||||
# Make sure registry handle opened successfully (returned 0)
|
||||
IntCmp $4 0 0 done done
|
||||
|
||||
; Load the contents of path into $1, Error Code into $4, Path length into $2
|
||||
# Load the contents of path into $1, Error Code into $4, Path length into $2
|
||||
System::Call "advapi32::RegQueryValueEx(i $3, t'PATH', i 0, i 0, t.r1, *i ${NSIS_MAX_STRLEN} r2) i.r4"
|
||||
|
||||
; Close the handle to the registry ($3)
|
||||
# Close the handle to the registry ($3)
|
||||
System::Call "advapi32::RegCloseKey(i $3)"
|
||||
|
||||
; Check for Error Code 234, Path too long for the variable
|
||||
IntCmp $4 234 0 +4 +4 ; $4 == ERROR_MORE_DATA
|
||||
# Check for Error Code 234, Path too long for the variable
|
||||
IntCmp $4 234 0 +4 +4 # $4 == ERROR_MORE_DATA
|
||||
DetailPrint "AddToPath Failed: original length $2 > ${NSIS_MAX_STRLEN}"
|
||||
MessageBox MB_OK \
|
||||
"You may add C:\salt to the %PATH% for convenience when issuing local salt commands from the command line." \
|
||||
/SD IDOK
|
||||
Goto done
|
||||
|
||||
; If no error, continue
|
||||
IntCmp $4 0 +5 ; $4 != NO_ERROR
|
||||
; Error 2 means the Key was not found
|
||||
IntCmp $4 2 +3 ; $4 != ERROR_FILE_NOT_FOUND
|
||||
# If no error, continue
|
||||
IntCmp $4 0 +5 # $4 != NO_ERROR
|
||||
# Error 2 means the Key was not found
|
||||
IntCmp $4 2 +3 # $4 != ERROR_FILE_NOT_FOUND
|
||||
DetailPrint "AddToPath: unexpected error code $4"
|
||||
Goto done
|
||||
StrCpy $1 ""
|
||||
|
||||
; Check if already in PATH
|
||||
Push "$1;" ; The string to search
|
||||
Push "$0;" ; The string to find
|
||||
# Check if already in PATH
|
||||
Push "$1;" # The string to search
|
||||
Push "$0;" # The string to find
|
||||
Call StrStr
|
||||
Pop $2 ; The result of the search
|
||||
StrCmp $2 "" 0 done ; String not found, try again with ';' at the end
|
||||
; Otherwise, it's already in the path
|
||||
Push "$1;" ; The string to search
|
||||
Push "$0\;" ; The string to find
|
||||
Pop $2 # The result of the search
|
||||
StrCmp $2 "" 0 done # String not found, try again with ';' at the end
|
||||
# Otherwise, it's already in the path
|
||||
Push "$1;" # The string to search
|
||||
Push "$0\;" # The string to find
|
||||
Call StrStr
|
||||
Pop $2 ; The result
|
||||
StrCmp $2 "" 0 done ; String not found, continue (add)
|
||||
; Otherwise, it's already in the path
|
||||
Pop $2 # The result
|
||||
StrCmp $2 "" 0 done # String not found, continue (add)
|
||||
# Otherwise, it's already in the path
|
||||
|
||||
; Prevent NSIS string overflow
|
||||
StrLen $2 $0 ; Length of path to add ($2)
|
||||
StrLen $3 $1 ; Length of current path ($3)
|
||||
IntOp $2 $2 + $3 ; Length of current path + path to add ($2)
|
||||
IntOp $2 $2 + 2 ; Account for the additional ';'
|
||||
; $2 = strlen(dir) + strlen(PATH) + sizeof(";")
|
||||
# Prevent NSIS string overflow
|
||||
StrLen $2 $0 # Length of path to add ($2)
|
||||
StrLen $3 $1 # Length of current path ($3)
|
||||
IntOp $2 $2 + $3 # Length of current path + path to add ($2)
|
||||
IntOp $2 $2 + 2 # Account for the additional ';'
|
||||
# $2 = strlen(dir) + strlen(PATH) + sizeof(";")
|
||||
|
||||
; Make sure the new length isn't over the NSIS_MAX_STRLEN
|
||||
# Make sure the new length isn't over the NSIS_MAX_STRLEN
|
||||
IntCmp $2 ${NSIS_MAX_STRLEN} +4 +4 0
|
||||
DetailPrint "AddToPath: new length $2 > ${NSIS_MAX_STRLEN}"
|
||||
MessageBox MB_OK \
|
||||
@ -678,18 +700,18 @@ Function AddToPath
|
||||
/SD IDOK
|
||||
Goto done
|
||||
|
||||
; Append dir to PATH
|
||||
# Append dir to PATH
|
||||
DetailPrint "Add to PATH: $0"
|
||||
StrCpy $2 $1 1 -1 ; Copy the last character of the existing path
|
||||
StrCmp $2 ";" 0 +2 ; Check for trailing ';'
|
||||
StrCpy $1 $1 -1 ; remove trailing ';'
|
||||
StrCmp $1 "" +2 ; Make sure Path is not empty
|
||||
StrCpy $0 "$1;$0" ; Append new path at the end ($0)
|
||||
StrCpy $2 $1 1 -1 # Copy the last character of the existing path
|
||||
StrCmp $2 ";" 0 +2 # Check for trailing ';'
|
||||
StrCpy $1 $1 -1 # remove trailing ';'
|
||||
StrCmp $1 "" +2 # Make sure Path is not empty
|
||||
StrCpy $0 "$1;$0" # Append new path at the end ($0)
|
||||
|
||||
; We can use the NSIS command here. Only 'ReadRegStr' is affected
|
||||
# We can use the NSIS command here. Only 'ReadRegStr' is affected
|
||||
WriteRegExpandStr ${Environ} "PATH" $0
|
||||
|
||||
; Broadcast registry change to open programs
|
||||
# Broadcast registry change to open programs
|
||||
SendMessage ${HWND_BROADCAST} ${WM_WININICHANGE} 0 "STR:Environment" /TIMEOUT=5000
|
||||
|
||||
done:
|
||||
@ -702,16 +724,16 @@ Function AddToPath
|
||||
FunctionEnd
|
||||
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; RemoveFromPath Function
|
||||
; - Removes item from Path for All Users
|
||||
; - Overcomes NSIS ReadRegStr limitation of 1024 characters by using Native
|
||||
; Windows Commands
|
||||
;
|
||||
; Usage:
|
||||
; Push "C:\path\to\add"
|
||||
; Call un.RemoveFromPath
|
||||
;------------------------------------------------------------------------------
|
||||
#------------------------------------------------------------------------------
|
||||
# RemoveFromPath Function
|
||||
# - Removes item from Path for All Users
|
||||
# - Overcomes NSIS ReadRegStr limitation of 1024 characters by using Native
|
||||
# Windows Commands
|
||||
#
|
||||
# Usage:
|
||||
# Push "C:\path\to\add"
|
||||
# Call un.RemoveFromPath
|
||||
#------------------------------------------------------------------------------
|
||||
Function un.RemoveFromPath
|
||||
|
||||
Exch $0
|
||||
@ -722,59 +744,59 @@ Function un.RemoveFromPath
|
||||
Push $5
|
||||
Push $6
|
||||
|
||||
; Open a handle to the key in the registry, handle in $3, Error in $4
|
||||
# Open a handle to the key in the registry, handle in $3, Error in $4
|
||||
System::Call "advapi32::RegOpenKey(i 0x80000002, t'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', *i.r3) i.r4"
|
||||
; Make sure registry handle opened successfully (returned 0)
|
||||
# Make sure registry handle opened successfully (returned 0)
|
||||
IntCmp $4 0 0 done done
|
||||
|
||||
; Load the contents of path into $1, Error Code into $4, Path length into $2
|
||||
# Load the contents of path into $1, Error Code into $4, Path length into $2
|
||||
System::Call "advapi32::RegQueryValueEx(i $3, t'PATH', i 0, i 0, t.r1, *i ${NSIS_MAX_STRLEN} r2) i.r4"
|
||||
|
||||
; Close the handle to the registry ($3)
|
||||
# Close the handle to the registry ($3)
|
||||
System::Call "advapi32::RegCloseKey(i $3)"
|
||||
|
||||
; Check for Error Code 234, Path too long for the variable
|
||||
IntCmp $4 234 0 +4 +4 ; $4 == ERROR_MORE_DATA
|
||||
# Check for Error Code 234, Path too long for the variable
|
||||
IntCmp $4 234 0 +4 +4 # $4 == ERROR_MORE_DATA
|
||||
DetailPrint "AddToPath: original length $2 > ${NSIS_MAX_STRLEN}"
|
||||
Goto done
|
||||
|
||||
; If no error, continue
|
||||
IntCmp $4 0 +5 ; $4 != NO_ERROR
|
||||
; Error 2 means the Key was not found
|
||||
IntCmp $4 2 +3 ; $4 != ERROR_FILE_NOT_FOUND
|
||||
# If no error, continue
|
||||
IntCmp $4 0 +5 # $4 != NO_ERROR
|
||||
# Error 2 means the Key was not found
|
||||
IntCmp $4 2 +3 # $4 != ERROR_FILE_NOT_FOUND
|
||||
DetailPrint "AddToPath: unexpected error code $4"
|
||||
Goto done
|
||||
StrCpy $1 ""
|
||||
|
||||
; Ensure there's a trailing ';'
|
||||
StrCpy $5 $1 1 -1 ; Copy the last character of the path
|
||||
StrCmp $5 ";" +2 ; Check for trailing ';', if found continue
|
||||
StrCpy $1 "$1;" ; ensure trailing ';'
|
||||
# Ensure there's a trailing ';'
|
||||
StrCpy $5 $1 1 -1 # Copy the last character of the path
|
||||
StrCmp $5 ";" +2 # Check for trailing ';', if found continue
|
||||
StrCpy $1 "$1;" # ensure trailing ';'
|
||||
|
||||
; Check for our directory inside the path
|
||||
Push $1 ; String to Search
|
||||
Push "$0;" ; Dir to Find
|
||||
# Check for our directory inside the path
|
||||
Push $1 # String to Search
|
||||
Push "$0;" # Dir to Find
|
||||
Call un.StrStr
|
||||
Pop $2 ; The results of the search
|
||||
StrCmp $2 "" done ; If results are empty, we're done, otherwise continue
|
||||
Pop $2 # The results of the search
|
||||
StrCmp $2 "" done # If results are empty, we're done, otherwise continue
|
||||
|
||||
; Remove our Directory from the Path
|
||||
# Remove our Directory from the Path
|
||||
DetailPrint "Remove from PATH: $0"
|
||||
StrLen $3 "$0;" ; Get the length of our dir ($3)
|
||||
StrLen $4 $2 ; Get the length of the return from StrStr ($4)
|
||||
StrCpy $5 $1 -$4 ; $5 is now the part before the path to remove
|
||||
StrCpy $6 $2 "" $3 ; $6 is now the part after the path to remove
|
||||
StrCpy $3 "$5$6" ; Combine $5 and $6
|
||||
StrLen $3 "$0;" # Get the length of our dir ($3)
|
||||
StrLen $4 $2 # Get the length of the return from StrStr ($4)
|
||||
StrCpy $5 $1 -$4 # $5 is now the part before the path to remove
|
||||
StrCpy $6 $2 "" $3 # $6 is now the part after the path to remove
|
||||
StrCpy $3 "$5$6" # Combine $5 and $6
|
||||
|
||||
; Check for Trailing ';'
|
||||
StrCpy $5 $3 1 -1 ; Load the last character of the string
|
||||
StrCmp $5 ";" 0 +2 ; Check for ';'
|
||||
StrCpy $3 $3 -1 ; remove trailing ';'
|
||||
# Check for Trailing ';'
|
||||
StrCpy $5 $3 1 -1 # Load the last character of the string
|
||||
StrCmp $5 ";" 0 +2 # Check for ';'
|
||||
StrCpy $3 $3 -1 # remove trailing ';'
|
||||
|
||||
; Write the new path to the registry
|
||||
# Write the new path to the registry
|
||||
WriteRegExpandStr ${Environ} "PATH" $3
|
||||
|
||||
; Broadcast the change to all open applications
|
||||
# Broadcast the change to all open applications
|
||||
SendMessage ${HWND_BROADCAST} ${WM_WININICHANGE} 0 "STR:Environment" /TIMEOUT=5000
|
||||
|
||||
done:
|
||||
@ -808,6 +830,7 @@ Function getMinionConfig
|
||||
confFound:
|
||||
FileOpen $0 "$INSTDIR\conf\minion" r
|
||||
|
||||
ClearErrors
|
||||
confLoop:
|
||||
FileRead $0 $1
|
||||
IfErrors EndOfFile
|
||||
@ -838,68 +861,69 @@ FunctionEnd
|
||||
Function updateMinionConfig
|
||||
|
||||
ClearErrors
|
||||
FileOpen $0 "$INSTDIR\conf\minion" "r" ; open target file for reading
|
||||
GetTempFileName $R0 ; get new temp file name
|
||||
FileOpen $1 $R0 "w" ; open temp file for writing
|
||||
FileOpen $0 "$INSTDIR\conf\minion" "r" # open target file for reading
|
||||
GetTempFileName $R0 # get new temp file name
|
||||
FileOpen $1 $R0 "w" # open temp file for writing
|
||||
|
||||
loop: ; loop through each line
|
||||
FileRead $0 $2 ; read line from target file
|
||||
IfErrors done ; end if errors are encountered (end of line)
|
||||
loop: # loop through each line
|
||||
FileRead $0 $2 # read line from target file
|
||||
IfErrors done # end if errors are encountered (end of line)
|
||||
|
||||
${If} $MasterHost_State != "" ; if master is empty
|
||||
${AndIf} $MasterHost_State != "salt" ; and if master is not 'salt'
|
||||
${StrLoc} $3 $2 "master:" ">" ; where is 'master:' in this line
|
||||
${If} $3 == 0 ; is it in the first...
|
||||
${OrIf} $3 == 1 ; or second position (account for comments)
|
||||
StrCpy $2 "master: $MasterHost_State$\r$\n" ; write the master
|
||||
${EndIf} ; close if statement
|
||||
${EndIf} ; close if statement
|
||||
${If} $MasterHost_State != "" # if master is empty
|
||||
${AndIf} $MasterHost_State != "salt" # and if master is not 'salt'
|
||||
${StrLoc} $3 $2 "master:" ">" # where is 'master:' in this line
|
||||
${If} $3 == 0 # is it in the first...
|
||||
${OrIf} $3 == 1 # or second position (account for comments)
|
||||
StrCpy $2 "master: $MasterHost_State$\r$\n" # write the master
|
||||
${EndIf} # close if statement
|
||||
${EndIf} # close if statement
|
||||
|
||||
${If} $MinionName_State != "" ; if minion is empty
|
||||
${AndIf} $MinionName_State != "hostname" ; and if minion is not 'hostname'
|
||||
${StrLoc} $3 $2 "id:" ">" ; where is 'id:' in this line
|
||||
${If} $3 == 0 ; is it in the first...
|
||||
${OrIf} $3 == 1 ; or the second position (account for comments)
|
||||
StrCpy $2 "id: $MinionName_State$\r$\n" ; change line
|
||||
${EndIf} ; close if statement
|
||||
${EndIf} ; close if statement
|
||||
${If} $MinionName_State != "" # if minion is empty
|
||||
${AndIf} $MinionName_State != "hostname" # and if minion is not 'hostname'
|
||||
${StrLoc} $3 $2 "id:" ">" # where is 'id:' in this line
|
||||
${If} $3 == 0 # is it in the first...
|
||||
${OrIf} $3 == 1 # or the second position (account for comments)
|
||||
StrCpy $2 "id: $MinionName_State$\r$\n" # change line
|
||||
${EndIf} # close if statement
|
||||
${EndIf} # close if statement
|
||||
|
||||
FileWrite $1 $2 ; write changed or unchanged line to temp file
|
||||
FileWrite $1 $2 # write changed or unchanged line to temp file
|
||||
Goto loop
|
||||
|
||||
done:
|
||||
FileClose $0 ; close target file
|
||||
FileClose $1 ; close temp file
|
||||
Delete "$INSTDIR\conf\minion" ; delete target file
|
||||
CopyFiles /SILENT $R0 "$INSTDIR\conf\minion" ; copy temp file to target file
|
||||
Delete $R0 ; delete temp file
|
||||
FileClose $0 # close target file
|
||||
FileClose $1 # close temp file
|
||||
Delete "$INSTDIR\conf\minion" # delete target file
|
||||
CopyFiles /SILENT $R0 "$INSTDIR\conf\minion" # copy temp file to target file
|
||||
Delete $R0 # delete temp file
|
||||
|
||||
FunctionEnd
|
||||
|
||||
|
||||
Function parseCommandLineSwitches
|
||||
|
||||
; Load the parameters
|
||||
# Load the parameters
|
||||
${GetParameters} $R0
|
||||
|
||||
; Check for start-minion switches
|
||||
; /start-service is to be deprecated, so we must check for both
|
||||
# Check for start-minion switches
|
||||
# /start-service is to be deprecated, so we must check for both
|
||||
${GetOptions} $R0 "/start-service=" $R1
|
||||
${GetOptions} $R0 "/start-minion=" $R2
|
||||
|
||||
# Service: Start Salt Minion
|
||||
${IfNot} $R2 == ""
|
||||
; If start-minion was passed something, then set it
|
||||
# If start-minion was passed something, then set it
|
||||
StrCpy $StartMinion $R2
|
||||
${ElseIfNot} $R1 == ""
|
||||
; If start-service was passed something, then set StartMinion to that
|
||||
# If start-service was passed something, then set StartMinion to that
|
||||
StrCpy $StartMinion $R1
|
||||
${Else}
|
||||
; Otherwise default to 1
|
||||
# Otherwise default to 1
|
||||
StrCpy $StartMinion 1
|
||||
${EndIf}
|
||||
|
||||
# Service: Minion Startup Type Delayed
|
||||
ClearErrors
|
||||
${GetOptions} $R0 "/start-minion-delayed" $R1
|
||||
IfErrors start_minion_delayed_not_found
|
||||
StrCpy $StartMinionDelayed 1
|
||||
|
@ -19,9 +19,9 @@ Function Get-Settings {
|
||||
"Python2Dir" = "C:\Python27"
|
||||
"Scripts2Dir" = "C:\Python27\Scripts"
|
||||
"SitePkgs2Dir" = "C:\Python27\Lib\site-packages"
|
||||
"Python3Dir" = "C:\Program Files\Python35"
|
||||
"Scripts3Dir" = "C:\Program Files\Python35\Scripts"
|
||||
"SitePkgs3Dir" = "C:\Program Files\Python35\Lib\site-packages"
|
||||
"Python3Dir" = "C:\Python35"
|
||||
"Scripts3Dir" = "C:\Python35\Scripts"
|
||||
"SitePkgs3Dir" = "C:\Python35\Lib\site-packages"
|
||||
"DownloadDir" = "$env:Temp\DevSalt"
|
||||
}
|
||||
# The script deletes the DownLoadDir (above) for each install.
|
||||
|
@ -29,8 +29,10 @@ import salt.config
|
||||
import salt.loader
|
||||
import salt.transport.client
|
||||
import salt.utils
|
||||
import salt.utils.args
|
||||
import salt.utils.files
|
||||
import salt.utils.minions
|
||||
import salt.utils.versions
|
||||
import salt.payload
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
@ -68,7 +70,7 @@ class LoadAuth(object):
|
||||
if fstr not in self.auth:
|
||||
return ''
|
||||
try:
|
||||
pname_arg = salt.utils.arg_lookup(self.auth[fstr])['args'][0]
|
||||
pname_arg = salt.utils.args.arg_lookup(self.auth[fstr])['args'][0]
|
||||
return load[pname_arg]
|
||||
except IndexError:
|
||||
return ''
|
||||
@ -215,8 +217,9 @@ class LoadAuth(object):
|
||||
acl_ret = self.__get_acl(load)
|
||||
tdata['auth_list'] = acl_ret
|
||||
|
||||
if 'groups' in load:
|
||||
tdata['groups'] = load['groups']
|
||||
groups = self.get_groups(load)
|
||||
if groups:
|
||||
tdata['groups'] = groups
|
||||
|
||||
return self.tokens["{0}.mk_token".format(self.opts['eauth_tokens'])](self.opts, tdata)
|
||||
|
||||
@ -291,29 +294,31 @@ class LoadAuth(object):
|
||||
def authenticate_key(self, load, key):
|
||||
'''
|
||||
Authenticate a user by the key passed in load.
|
||||
Return the effective user id (name) if it's differ from the specified one (for sudo).
|
||||
If the effective user id is the same as passed one return True on success or False on
|
||||
Return the effective user id (name) if it's different from the specified one (for sudo).
|
||||
If the effective user id is the same as the passed one, return True on success or False on
|
||||
failure.
|
||||
'''
|
||||
auth_key = load.pop('key')
|
||||
if not auth_key:
|
||||
log.warning('Authentication failure of type "user" occurred.')
|
||||
error_msg = 'Authentication failure of type "user" occurred.'
|
||||
auth_key = load.pop('key', None)
|
||||
if auth_key is None:
|
||||
log.warning(error_msg)
|
||||
return False
|
||||
|
||||
if 'user' in load:
|
||||
auth_user = AuthUser(load['user'])
|
||||
if auth_user.is_sudo():
|
||||
# If someone sudos check to make sure there is no ACL's around their username
|
||||
if auth_key != key[self.opts.get('user', 'root')]:
|
||||
log.warning('Authentication failure of type "user" occurred.')
|
||||
log.warning(error_msg)
|
||||
return False
|
||||
return auth_user.sudo_name()
|
||||
elif load['user'] == self.opts.get('user', 'root') or load['user'] == 'root':
|
||||
if auth_key != key[self.opts.get('user', 'root')]:
|
||||
log.warning('Authentication failure of type "user" occurred.')
|
||||
log.warning(error_msg)
|
||||
return False
|
||||
elif auth_user.is_running_user():
|
||||
if auth_key != key.get(load['user']):
|
||||
log.warning('Authentication failure of type "user" occurred.')
|
||||
log.warning(error_msg)
|
||||
return False
|
||||
elif auth_key == key.get('root'):
|
||||
pass
|
||||
@ -321,19 +326,19 @@ class LoadAuth(object):
|
||||
if load['user'] in key:
|
||||
# User is authorised, check key and check perms
|
||||
if auth_key != key[load['user']]:
|
||||
log.warning('Authentication failure of type "user" occurred.')
|
||||
log.warning(error_msg)
|
||||
return False
|
||||
return load['user']
|
||||
else:
|
||||
log.warning('Authentication failure of type "user" occurred.')
|
||||
log.warning(error_msg)
|
||||
return False
|
||||
else:
|
||||
if auth_key != key[salt.utils.get_user()]:
|
||||
log.warning('Authentication failure of type "other" occurred.')
|
||||
log.warning(error_msg)
|
||||
return False
|
||||
return True
|
||||
|
||||
def get_auth_list(self, load):
|
||||
def get_auth_list(self, load, token=None):
|
||||
'''
|
||||
Retrieve access list for the user specified in load.
|
||||
The list is built by eauth module or from master eauth configuration.
|
||||
@ -341,30 +346,37 @@ class LoadAuth(object):
|
||||
list if the user has no rights to execute anything on this master and returns non-empty list
|
||||
if user is allowed to execute particular functions.
|
||||
'''
|
||||
# Get auth list from token
|
||||
if token and self.opts['keep_acl_in_token'] and 'auth_list' in token:
|
||||
return token['auth_list']
|
||||
# Get acl from eauth module.
|
||||
auth_list = self.__get_acl(load)
|
||||
if auth_list is not None:
|
||||
return auth_list
|
||||
|
||||
if load['eauth'] not in self.opts['external_auth']:
|
||||
eauth = token['eauth'] if token else load['eauth']
|
||||
if eauth not in self.opts['external_auth']:
|
||||
# No matching module is allowed in config
|
||||
log.warning('Authorization failure occurred.')
|
||||
return None
|
||||
|
||||
name = self.load_name(load) # The username we are attempting to auth with
|
||||
groups = self.get_groups(load) # The groups this user belongs to
|
||||
eauth_config = self.opts['external_auth'][load['eauth']]
|
||||
if groups is None or groups is False:
|
||||
if token:
|
||||
name = token['name']
|
||||
groups = token.get('groups')
|
||||
else:
|
||||
name = self.load_name(load) # The username we are attempting to auth with
|
||||
groups = self.get_groups(load) # The groups this user belongs to
|
||||
eauth_config = self.opts['external_auth'][eauth]
|
||||
if not groups:
|
||||
groups = []
|
||||
group_perm_keys = [item for item in eauth_config if item.endswith('%')] # The configured auth groups
|
||||
|
||||
# First we need to know if the user is allowed to proceed via any of their group memberships.
|
||||
group_auth_match = False
|
||||
for group_config in group_perm_keys:
|
||||
group_config = group_config.rstrip('%')
|
||||
for group in groups:
|
||||
if group == group_config:
|
||||
group_auth_match = True
|
||||
if group_config.rstrip('%') in groups:
|
||||
group_auth_match = True
|
||||
break
|
||||
# If a group_auth_match is set it means only that we have a
|
||||
# user which matches at least one or more of the groups defined
|
||||
# in the configuration file.
|
||||
@ -404,12 +416,77 @@ class LoadAuth(object):
|
||||
|
||||
return auth_list
|
||||
|
||||
def check_authentication(self, load, auth_type, key=None, show_username=False):
|
||||
'''
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
Go through various checks to see if the token/eauth/user can be authenticated.
|
||||
|
||||
Returns a dictionary containing the following keys:
|
||||
|
||||
- auth_list
|
||||
- username
|
||||
- error
|
||||
|
||||
If an error is encountered, return immediately with the relevant error dictionary
|
||||
as authentication has failed. Otherwise, return the username and valid auth_list.
|
||||
'''
|
||||
auth_list = []
|
||||
username = load.get('username', 'UNKNOWN')
|
||||
ret = {'auth_list': auth_list,
|
||||
'username': username,
|
||||
'error': {}}
|
||||
|
||||
# Authenticate
|
||||
if auth_type == 'token':
|
||||
token = self.authenticate_token(load)
|
||||
if not token:
|
||||
ret['error'] = {'name': 'TokenAuthenticationError',
|
||||
'message': 'Authentication failure of type "token" occurred.'}
|
||||
return ret
|
||||
|
||||
# Update username for token
|
||||
username = token['name']
|
||||
ret['username'] = username
|
||||
auth_list = self.get_auth_list(load, token=token)
|
||||
elif auth_type == 'eauth':
|
||||
if not self.authenticate_eauth(load):
|
||||
ret['error'] = {'name': 'EauthAuthenticationError',
|
||||
'message': 'Authentication failure of type "eauth" occurred for '
|
||||
'user {0}.'.format(username)}
|
||||
return ret
|
||||
|
||||
auth_list = self.get_auth_list(load)
|
||||
elif auth_type == 'user':
|
||||
if not self.authenticate_key(load, key):
|
||||
if show_username:
|
||||
msg = 'Authentication failure of type "user" occurred for user {0}.'.format(username)
|
||||
else:
|
||||
msg = 'Authentication failure of type "user" occurred'
|
||||
ret['error'] = {'name': 'UserAuthenticationError', 'message': msg}
|
||||
return ret
|
||||
else:
|
||||
ret['error'] = {'name': 'SaltInvocationError',
|
||||
'message': 'Authentication type not supported.'}
|
||||
return ret
|
||||
|
||||
# Authentication checks passed
|
||||
ret['auth_list'] = auth_list
|
||||
return ret
|
||||
|
||||
|
||||
class Authorize(object):
|
||||
'''
|
||||
The authorization engine used by EAUTH
|
||||
'''
|
||||
def __init__(self, opts, load, loadauth=None):
|
||||
salt.utils.versions.warn_until(
|
||||
'Neon',
|
||||
'The \'Authorize\' class has been deprecated. Please use the '
|
||||
'\'LoadAuth\', \'Reslover\', or \'AuthUser\' classes instead. '
|
||||
'Support for the \'Authorze\' class will be removed in Salt '
|
||||
'{version}.'
|
||||
)
|
||||
self.opts = salt.config.master_config(opts['conf_file'])
|
||||
self.load = load
|
||||
self.ckminions = salt.utils.minions.CkMinions(opts)
|
||||
@ -542,6 +619,15 @@ class Authorize(object):
|
||||
load.get('arg', None),
|
||||
load.get('tgt', None),
|
||||
load.get('tgt_type', 'glob'))
|
||||
|
||||
# Handle possible return of dict data structure from any_auth call to
|
||||
# avoid a stacktrace. As mentioned in PR #43181, this entire class is
|
||||
# dead code and is marked for removal in Salt Neon. But until then, we
|
||||
# should handle the dict return, which is an error and should return
|
||||
# False until this class is removed.
|
||||
if isinstance(good, dict):
|
||||
return False
|
||||
|
||||
if not good:
|
||||
# Accept find_job so the CLI will function cleanly
|
||||
if load.get('fun', '') != 'saltutil.find_job':
|
||||
@ -554,7 +640,7 @@ class Authorize(object):
|
||||
authorization
|
||||
|
||||
Note: this will check that the user has at least one right that will let
|
||||
him execute "load", this does not deal with conflicting rules
|
||||
the user execute "load", this does not deal with conflicting rules
|
||||
'''
|
||||
|
||||
adata = self.auth_data
|
||||
@ -626,7 +712,7 @@ class Resolver(object):
|
||||
'not available').format(eauth))
|
||||
return ret
|
||||
|
||||
args = salt.utils.arg_lookup(self.auth[fstr])
|
||||
args = salt.utils.args.arg_lookup(self.auth[fstr])
|
||||
for arg in args['args']:
|
||||
if arg in self.opts:
|
||||
ret[arg] = self.opts[arg]
|
||||
|
@ -378,7 +378,7 @@ def groups(username, **kwargs):
|
||||
search_results = bind.search_s(search_base,
|
||||
ldap.SCOPE_SUBTREE,
|
||||
search_string,
|
||||
[_config('accountattributename'), 'cn'])
|
||||
[_config('accountattributename'), 'cn', _config('groupattribute')])
|
||||
for _, entry in search_results:
|
||||
if username in entry[_config('accountattributename')]:
|
||||
group_list.append(entry['cn'][0])
|
||||
@ -390,7 +390,7 @@ def groups(username, **kwargs):
|
||||
|
||||
# Only test user auth on first call for job.
|
||||
# 'show_jid' only exists on first payload so we can use that for the conditional.
|
||||
if 'show_jid' in kwargs and not _bind(username, kwargs['password'],
|
||||
if 'show_jid' in kwargs and not _bind(username, kwargs.get('password'),
|
||||
anonymous=_config('auth_by_group_membership_only', mandatory=False) and
|
||||
_config('anonymous', mandatory=False)):
|
||||
log.error('LDAP username and password do not match')
|
||||
|
@ -59,7 +59,7 @@ class Beacon(object):
|
||||
|
||||
if 'enabled' in current_beacon_config:
|
||||
if not current_beacon_config['enabled']:
|
||||
log.trace('Beacon {0} disabled'.format(mod))
|
||||
log.trace('Beacon %s disabled', mod)
|
||||
continue
|
||||
else:
|
||||
# remove 'enabled' item before processing the beacon
|
||||
@ -68,7 +68,7 @@ class Beacon(object):
|
||||
else:
|
||||
self._remove_list_item(config[mod], 'enabled')
|
||||
|
||||
log.trace('Beacon processing: {0}'.format(mod))
|
||||
log.trace('Beacon processing: %s', mod)
|
||||
fun_str = '{0}.beacon'.format(mod)
|
||||
validate_str = '{0}.validate'.format(mod)
|
||||
if fun_str in self.beacons:
|
||||
@ -77,10 +77,10 @@ class Beacon(object):
|
||||
if interval:
|
||||
b_config = self._trim_config(b_config, mod, 'interval')
|
||||
if not self._process_interval(mod, interval):
|
||||
log.trace('Skipping beacon {0}. Interval not reached.'.format(mod))
|
||||
log.trace('Skipping beacon %s. Interval not reached.', mod)
|
||||
continue
|
||||
if self._determine_beacon_config(current_beacon_config, 'disable_during_state_run'):
|
||||
log.trace('Evaluting if beacon {0} should be skipped due to a state run.'.format(mod))
|
||||
log.trace('Evaluting if beacon %s should be skipped due to a state run.', mod)
|
||||
b_config = self._trim_config(b_config, mod, 'disable_during_state_run')
|
||||
is_running = False
|
||||
running_jobs = salt.utils.minion.running(self.opts)
|
||||
@ -90,10 +90,10 @@ class Beacon(object):
|
||||
if is_running:
|
||||
close_str = '{0}.close'.format(mod)
|
||||
if close_str in self.beacons:
|
||||
log.info('Closing beacon {0}. State run in progress.'.format(mod))
|
||||
log.info('Closing beacon %s. State run in progress.', mod)
|
||||
self.beacons[close_str](b_config[mod])
|
||||
else:
|
||||
log.info('Skipping beacon {0}. State run in progress.'.format(mod))
|
||||
log.info('Skipping beacon %s. State run in progress.', mod)
|
||||
continue
|
||||
# Update __grains__ on the beacon
|
||||
self.beacons[fun_str].__globals__['__grains__'] = grains
|
||||
@ -120,7 +120,7 @@ class Beacon(object):
|
||||
if runonce:
|
||||
self.disable_beacon(mod)
|
||||
else:
|
||||
log.warning('Unable to process beacon {0}'.format(mod))
|
||||
log.warning('Unable to process beacon %s', mod)
|
||||
return ret
|
||||
|
||||
def _trim_config(self, b_config, mod, key):
|
||||
@ -149,19 +149,19 @@ class Beacon(object):
|
||||
Process beacons with intervals
|
||||
Return True if a beacon should be run on this loop
|
||||
'''
|
||||
log.trace('Processing interval {0} for beacon mod {1}'.format(interval, mod))
|
||||
log.trace('Processing interval %s for beacon mod %s', interval, mod)
|
||||
loop_interval = self.opts['loop_interval']
|
||||
if mod in self.interval_map:
|
||||
log.trace('Processing interval in map')
|
||||
counter = self.interval_map[mod]
|
||||
log.trace('Interval counter: {0}'.format(counter))
|
||||
log.trace('Interval counter: %s', counter)
|
||||
if counter * loop_interval >= interval:
|
||||
self.interval_map[mod] = 1
|
||||
return True
|
||||
else:
|
||||
self.interval_map[mod] += 1
|
||||
else:
|
||||
log.trace('Interval process inserting mod: {0}'.format(mod))
|
||||
log.trace('Interval process inserting mod: %s', mod)
|
||||
self.interval_map[mod] = 1
|
||||
return False
|
||||
|
||||
@ -205,15 +205,50 @@ class Beacon(object):
|
||||
'''
|
||||
# Fire the complete event back along with the list of beacons
|
||||
evt = salt.utils.event.get_event('minion', opts=self.opts)
|
||||
b_conf = self.functions['config.merge']('beacons')
|
||||
if not isinstance(self.opts['beacons'], dict):
|
||||
self.opts['beacons'] = {}
|
||||
self.opts['beacons'].update(b_conf)
|
||||
evt.fire_event({'complete': True, 'beacons': self.opts['beacons']},
|
||||
tag='/salt/minion/minion_beacons_list_complete')
|
||||
|
||||
return True
|
||||
|
||||
def list_available_beacons(self):
|
||||
'''
|
||||
List the available beacons
|
||||
'''
|
||||
_beacons = ['{0}'.format(_beacon.replace('.beacon', ''))
|
||||
for _beacon in self.beacons if '.beacon' in _beacon]
|
||||
|
||||
# Fire the complete event back along with the list of beacons
|
||||
evt = salt.utils.event.get_event('minion', opts=self.opts)
|
||||
evt.fire_event({'complete': True, 'beacons': _beacons},
|
||||
tag='/salt/minion/minion_beacons_list_available_complete')
|
||||
|
||||
return True
|
||||
|
||||
def validate_beacon(self, name, beacon_data):
|
||||
'''
|
||||
Return available beacon functions
|
||||
'''
|
||||
validate_str = '{}.validate'.format(name)
|
||||
# Run the validate function if it's available,
|
||||
# otherwise there is a warning about it being missing
|
||||
if validate_str in self.beacons:
|
||||
if 'enabled' in beacon_data:
|
||||
del beacon_data['enabled']
|
||||
valid, vcomment = self.beacons[validate_str](beacon_data)
|
||||
else:
|
||||
log.info('Beacon %s does not have a validate'
|
||||
' function, skipping validation.', name)
|
||||
valid = True
|
||||
|
||||
# Fire the complete event back along with the list of beacons
|
||||
evt = salt.utils.event.get_event('minion', opts=self.opts)
|
||||
evt.fire_event({'complete': True,
|
||||
'vcomment': vcomment,
|
||||
'valid': valid},
|
||||
tag='/salt/minion/minion_beacon_validation_complete')
|
||||
|
||||
return True
|
||||
|
||||
def add_beacon(self, name, beacon_data):
|
||||
'''
|
||||
Add a beacon item
|
||||
@ -224,9 +259,9 @@ class Beacon(object):
|
||||
|
||||
if name in self.opts['beacons']:
|
||||
log.info('Updating settings for beacon '
|
||||
'item: {0}'.format(name))
|
||||
'item: %s', name)
|
||||
else:
|
||||
log.info('Added new beacon item {0}'.format(name))
|
||||
log.info('Added new beacon item %s', name)
|
||||
self.opts['beacons'].update(data)
|
||||
|
||||
# Fire the complete event back along with updated list of beacons
|
||||
@ -245,7 +280,7 @@ class Beacon(object):
|
||||
data[name] = beacon_data
|
||||
|
||||
log.info('Updating settings for beacon '
|
||||
'item: {0}'.format(name))
|
||||
'item: %s', name)
|
||||
self.opts['beacons'].update(data)
|
||||
|
||||
# Fire the complete event back along with updated list of beacons
|
||||
@ -261,7 +296,7 @@ class Beacon(object):
|
||||
'''
|
||||
|
||||
if name in self.opts['beacons']:
|
||||
log.info('Deleting beacon item {0}'.format(name))
|
||||
log.info('Deleting beacon item %s', name)
|
||||
del self.opts['beacons'][name]
|
||||
|
||||
# Fire the complete event back along with updated list of beacons
|
||||
|
353
salt/beacons/napalm_beacon.py
Normal file
353
salt/beacons/napalm_beacon.py
Normal file
@ -0,0 +1,353 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
NAPALM functions
|
||||
================
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
Watch NAPALM functions and fire events on specific triggers.
|
||||
|
||||
.. note::
|
||||
|
||||
The ``NAPALM`` beacon only works only when running under
|
||||
a regular Minion or a Proxy Minion, managed via NAPALM_.
|
||||
Check the documentation for the
|
||||
:mod:`NAPALM proxy module <salt.proxy.napalm>`.
|
||||
|
||||
_NAPALM: http://napalm.readthedocs.io/en/latest/index.html
|
||||
|
||||
The configuration accepts a list of Salt functions to be
|
||||
invoked, and the corresponding output hierarchy that should
|
||||
be matched against. To invoke a function with certain
|
||||
arguments, they can be specified using the ``_args`` key, or
|
||||
``_kwargs`` for more specific key-value arguments.
|
||||
|
||||
The match structure follows the output hierarchy of the NAPALM
|
||||
functions, under the ``out`` key.
|
||||
|
||||
For example, the following is normal structure returned by the
|
||||
:mod:`ntp.stats <salt.modules.napalm_ntp.stats>` execution function:
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
{
|
||||
"comment": "",
|
||||
"result": true,
|
||||
"out": [
|
||||
{
|
||||
"referenceid": ".GPSs.",
|
||||
"remote": "172.17.17.1",
|
||||
"synchronized": true,
|
||||
"reachability": 377,
|
||||
"offset": 0.461,
|
||||
"when": "860",
|
||||
"delay": 143.606,
|
||||
"hostpoll": 1024,
|
||||
"stratum": 1,
|
||||
"jitter": 0.027,
|
||||
"type": "-"
|
||||
},
|
||||
{
|
||||
"referenceid": ".INIT.",
|
||||
"remote": "172.17.17.2",
|
||||
"synchronized": false,
|
||||
"reachability": 0,
|
||||
"offset": 0.0,
|
||||
"when": "-",
|
||||
"delay": 0.0,
|
||||
"hostpoll": 1024,
|
||||
"stratum": 16,
|
||||
"jitter": 4000.0,
|
||||
"type": "-"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
In order to fire events when the synchronization is lost with
|
||||
one of the NTP peers, e.g., ``172.17.17.2``, we can match it explicitly as:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
ntp.stats:
|
||||
remote: 172.17.17.2
|
||||
synchronized: false
|
||||
|
||||
There is one single nesting level, as the output of ``ntp.stats`` is
|
||||
just a list of dictionaries, and this beacon will compare each dictionary
|
||||
from the list with the structure examplified above.
|
||||
|
||||
.. note::
|
||||
|
||||
When we want to match on any element at a certain level, we can
|
||||
configure ``*`` to match anything.
|
||||
|
||||
Considering a more complex structure consisting on multiple nested levels,
|
||||
e.g., the output of the :mod:`bgp.neighbors <salt.modules.napalm_bgp.neighbors>`
|
||||
execution function, to check when any neighbor from the ``global``
|
||||
routing table is down, the match structure would have the format:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
bgp.neighbors:
|
||||
global:
|
||||
'*':
|
||||
up: false
|
||||
|
||||
The match structure above will match any BGP neighbor, with
|
||||
any network (``*`` matches any AS number), under the ``global`` VRF.
|
||||
In other words, this beacon will push an event on the Salt bus
|
||||
when there's a BGP neighbor down.
|
||||
|
||||
The right operand can also accept mathematical operations
|
||||
(i.e., ``<``, ``<=``, ``!=``, ``>``, ``>=`` etc.) when comparing
|
||||
numerical values.
|
||||
|
||||
Configuration Example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
beacons:
|
||||
napalm:
|
||||
- net.interfaces:
|
||||
# fire events when any interfaces is down
|
||||
'*':
|
||||
is_up: false
|
||||
- net.interfaces:
|
||||
# fire events only when the xe-0/0/0 interface is down
|
||||
'xe-0/0/0':
|
||||
is_up: false
|
||||
- ntp.stats:
|
||||
# fire when there's any NTP peer unsynchornized
|
||||
synchronized: false
|
||||
- ntp.stats:
|
||||
# fire only when the synchronization
|
||||
# with with the 172.17.17.2 NTP server is lost
|
||||
_args:
|
||||
- 172.17.17.2
|
||||
synchronized: false
|
||||
- ntp.stats:
|
||||
# fire only when there's a NTP peer with
|
||||
# synchronization stratum > 5
|
||||
stratum: '> 5'
|
||||
|
||||
Event structure example:
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
salt/beacon/edge01.bjm01/napalm/junos/ntp.stats {
|
||||
"_stamp": "2017-09-05T09:51:09.377202",
|
||||
"args": [],
|
||||
"data": {
|
||||
"comment": "",
|
||||
"out": [
|
||||
{
|
||||
"delay": 0.0,
|
||||
"hostpoll": 1024,
|
||||
"jitter": 4000.0,
|
||||
"offset": 0.0,
|
||||
"reachability": 0,
|
||||
"referenceid": ".INIT.",
|
||||
"remote": "172.17.17.1",
|
||||
"stratum": 16,
|
||||
"synchronized": false,
|
||||
"type": "-",
|
||||
"when": "-"
|
||||
}
|
||||
],
|
||||
"result": true
|
||||
},
|
||||
"fun": "ntp.stats",
|
||||
"id": "edge01.bjm01",
|
||||
"kwargs": {},
|
||||
"match": {
|
||||
"stratum": "> 5"
|
||||
}
|
||||
}
|
||||
|
||||
The event examplified above has been fired when the device
|
||||
identified by the Minion id ``edge01.bjm01`` has been synchronized
|
||||
with a NTP server at a stratum level greater than 5.
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Python std lib
|
||||
import re
|
||||
import logging
|
||||
|
||||
# Import Salt modules
|
||||
from salt.ext import six
|
||||
import salt.utils.napalm
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
_numeric_regex = re.compile(r'^(<|>|<=|>=|==|!=)\s*(\d+(\.\d+){0,1})$')
|
||||
# the numeric regex will match the right operand, e.g '>= 20', '< 100', '!= 20', '< 1000.12' etc.
|
||||
_numeric_operand = {
|
||||
'<': '__lt__',
|
||||
'>': '__gt__',
|
||||
'>=': '__ge__',
|
||||
'<=': '__le__',
|
||||
'==': '__eq__',
|
||||
'!=': '__ne__',
|
||||
} # mathematical operand - private method map
|
||||
|
||||
|
||||
__virtualname__ = 'napalm'
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
This beacon can only work when running under a regular or a proxy minion, managed through napalm.
|
||||
'''
|
||||
return salt.utils.napalm.virtual(__opts__, __virtualname__, __file__)
|
||||
|
||||
|
||||
def _compare(cur_cmp, cur_struct):
|
||||
'''
|
||||
Compares two objects and return a boolean value
|
||||
when there's a match.
|
||||
'''
|
||||
if isinstance(cur_cmp, dict) and isinstance(cur_struct, dict):
|
||||
log.debug('Comparing dict to dict')
|
||||
for cmp_key, cmp_value in six.iteritems(cur_cmp):
|
||||
if cmp_key == '*':
|
||||
# matches any key from the source dictionary
|
||||
if isinstance(cmp_value, dict):
|
||||
found = False
|
||||
for _, cur_struct_val in six.iteritems(cur_struct):
|
||||
found |= _compare(cmp_value, cur_struct_val)
|
||||
return found
|
||||
else:
|
||||
found = False
|
||||
if isinstance(cur_struct, (list, tuple)):
|
||||
for cur_ele in cur_struct:
|
||||
found |= _compare(cmp_value, cur_ele)
|
||||
elif isinstance(cur_struct, dict):
|
||||
for _, cur_ele in six.iteritems(cur_struct):
|
||||
found |= _compare(cmp_value, cur_ele)
|
||||
return found
|
||||
else:
|
||||
if isinstance(cmp_value, dict):
|
||||
if cmp_key not in cur_struct:
|
||||
return False
|
||||
return _compare(cmp_value, cur_struct[cmp_key])
|
||||
if isinstance(cmp_value, list):
|
||||
found = False
|
||||
for _, cur_struct_val in six.iteritems(cur_struct):
|
||||
found |= _compare(cmp_value, cur_struct_val)
|
||||
return found
|
||||
else:
|
||||
return _compare(cmp_value, cur_struct[cmp_key])
|
||||
elif isinstance(cur_cmp, (list, tuple)) and isinstance(cur_struct, (list, tuple)):
|
||||
log.debug('Comparing list to list')
|
||||
found = False
|
||||
for cur_cmp_ele in cur_cmp:
|
||||
for cur_struct_ele in cur_struct:
|
||||
found |= _compare(cur_cmp_ele, cur_struct_ele)
|
||||
return found
|
||||
elif isinstance(cur_cmp, dict) and isinstance(cur_struct, (list, tuple)):
|
||||
log.debug('Comparing dict to list (of dicts?)')
|
||||
found = False
|
||||
for cur_struct_ele in cur_struct:
|
||||
found |= _compare(cur_cmp, cur_struct_ele)
|
||||
return found
|
||||
elif isinstance(cur_cmp, bool) and isinstance(cur_struct, bool):
|
||||
log.debug('Comparing booleans: %s ? %s', cur_cmp, cur_struct)
|
||||
return cur_cmp == cur_struct
|
||||
elif isinstance(cur_cmp, (six.string_types, six.text_type)) and \
|
||||
isinstance(cur_struct, (six.string_types, six.text_type)):
|
||||
log.debug('Comparing strings (and regex?): %s ? %s', cur_cmp, cur_struct)
|
||||
# Trying literal match
|
||||
matched = re.match(cur_cmp, cur_struct, re.I)
|
||||
if matched:
|
||||
return True
|
||||
return False
|
||||
elif isinstance(cur_cmp, (six.integer_types, float)) and \
|
||||
isinstance(cur_struct, (six.integer_types, float)):
|
||||
log.debug('Comparing numeric values: %d ? %d', cur_cmp, cur_struct)
|
||||
# numeric compare
|
||||
return cur_cmp == cur_struct
|
||||
elif isinstance(cur_struct, (six.integer_types, float)) and \
|
||||
isinstance(cur_cmp, (six.string_types, six.text_type)):
|
||||
# Comapring the numerical value agains a presumably mathematical value
|
||||
log.debug('Comparing a numeric value (%d) with a string (%s)', cur_struct, cur_cmp)
|
||||
numeric_compare = _numeric_regex.match(cur_cmp)
|
||||
# determine if the value to compare agains is a mathematical operand
|
||||
if numeric_compare:
|
||||
compare_value = numeric_compare.group(2)
|
||||
return getattr(float(cur_struct), _numeric_operand[numeric_compare.group(1)])(float(compare_value))
|
||||
return False
|
||||
return False
|
||||
|
||||
|
||||
def validate(config):
|
||||
'''
|
||||
Validate the beacon configuration.
|
||||
'''
|
||||
# Must be a list of dicts.
|
||||
if not isinstance(config, list):
|
||||
return False, 'Configuration for napalm beacon must be a list.'
|
||||
for mod in config:
|
||||
fun = mod.keys()[0]
|
||||
fun_cfg = mod.values()[0]
|
||||
if not isinstance(fun_cfg, dict):
|
||||
return False, 'The match structure for the {} execution function output must be a dictionary'.format(fun)
|
||||
if fun not in __salt__:
|
||||
return False, 'Execution function {} is not availabe!'.format(fun)
|
||||
return True, 'Valid configuration for the napal beacon!'
|
||||
|
||||
|
||||
def beacon(config):
|
||||
'''
|
||||
Watch napalm function and fire events.
|
||||
'''
|
||||
log.debug('Executing napalm beacon with config:')
|
||||
log.debug(config)
|
||||
ret = []
|
||||
for mod in config:
|
||||
if not mod:
|
||||
continue
|
||||
event = {}
|
||||
fun = mod.keys()[0]
|
||||
fun_cfg = mod.values()[0]
|
||||
args = fun_cfg.pop('_args', [])
|
||||
kwargs = fun_cfg.pop('_kwargs', {})
|
||||
log.debug('Executing {fun} with {args} and {kwargs}'.format(
|
||||
fun=fun,
|
||||
args=args,
|
||||
kwargs=kwargs
|
||||
))
|
||||
fun_ret = __salt__[fun](*args, **kwargs)
|
||||
log.debug('Got the reply from the minion:')
|
||||
log.debug(fun_ret)
|
||||
if not fun_ret.get('result', False):
|
||||
log.error('Error whilst executing {}'.format(fun))
|
||||
log.error(fun_ret)
|
||||
continue
|
||||
fun_ret_out = fun_ret['out']
|
||||
log.debug('Comparing to:')
|
||||
log.debug(fun_cfg)
|
||||
try:
|
||||
fun_cmp_result = _compare(fun_cfg, fun_ret_out)
|
||||
except Exception as err:
|
||||
log.error(err, exc_info=True)
|
||||
# catch any exception and continue
|
||||
# to not jeopardise the execution of the next function in the list
|
||||
continue
|
||||
log.debug('Result of comparison: {res}'.format(res=fun_cmp_result))
|
||||
if fun_cmp_result:
|
||||
log.info('Matched {fun} with {cfg}'.format(
|
||||
fun=fun,
|
||||
cfg=fun_cfg
|
||||
))
|
||||
event['tag'] = '{os}/{fun}'.format(os=__grains__['os'], fun=fun)
|
||||
event['fun'] = fun
|
||||
event['args'] = args
|
||||
event['kwargs'] = kwargs
|
||||
event['data'] = fun_ret
|
||||
event['match'] = fun_cfg
|
||||
log.debug('Queueing event:')
|
||||
log.debug(event)
|
||||
ret.append(event)
|
||||
log.debug('NAPALM beacon generated the events:')
|
||||
log.debug(ret)
|
||||
return ret
|
9
salt/cache/consul.py
vendored
9
salt/cache/consul.py
vendored
@ -4,6 +4,8 @@ Minion data cache plugin for Consul key/value data store.
|
||||
|
||||
.. versionadded:: 2016.11.2
|
||||
|
||||
:depends: python-consul >= 0.2.0
|
||||
|
||||
It is up to the system administrator to set up and configure the Consul
|
||||
infrastructure. All is needed for this plugin is a working Consul agent
|
||||
with a read-write access to the key-value store.
|
||||
@ -81,8 +83,11 @@ def __virtual__():
|
||||
'verify': __opts__.get('consul.verify', True),
|
||||
}
|
||||
|
||||
global api
|
||||
api = consul.Consul(**consul_kwargs)
|
||||
try:
|
||||
global api
|
||||
api = consul.Consul(**consul_kwargs)
|
||||
except AttributeError:
|
||||
return (False, "Failed to invoke consul.Consul, please make sure you have python-consul >= 0.2.0 installed")
|
||||
|
||||
return __virtualname__
|
||||
|
||||
|
@ -157,7 +157,7 @@ class BaseCaller(object):
|
||||
'''
|
||||
ret = {}
|
||||
fun = self.opts['fun']
|
||||
ret['jid'] = salt.utils.jid.gen_jid()
|
||||
ret['jid'] = salt.utils.jid.gen_jid(self.opts)
|
||||
proc_fn = os.path.join(
|
||||
salt.minion.get_proc_dir(self.opts['cachedir']),
|
||||
ret['jid']
|
||||
|
@ -20,6 +20,7 @@ import sys
|
||||
import salt.client
|
||||
import salt.output
|
||||
import salt.utils
|
||||
import salt.utils.files
|
||||
import salt.utils.gzip_util
|
||||
import salt.utils.itertools
|
||||
import salt.utils.minions
|
||||
@ -112,7 +113,7 @@ class SaltCP(object):
|
||||
err = 'The referenced file, {0} is not available.'.format(fn_)
|
||||
sys.stderr.write(err + '\n')
|
||||
sys.exit(42)
|
||||
with salt.utils.fopen(fn_, 'r') as fp_:
|
||||
with salt.utils.files.fopen(fn_, 'r') as fp_:
|
||||
data = fp_.read()
|
||||
return {fn_: data}
|
||||
|
||||
@ -183,9 +184,10 @@ class SaltCP(object):
|
||||
if gzip \
|
||||
else salt.utils.itertools.read_file
|
||||
|
||||
minions = salt.utils.minions.CkMinions(self.opts).check_minions(
|
||||
_res = salt.utils.minions.CkMinions(self.opts).check_minions(
|
||||
tgt,
|
||||
tgt_type=selected_target_option or 'glob')
|
||||
minions = _res['minions']
|
||||
|
||||
local = salt.client.get_local_client(self.opts['conf_file'])
|
||||
|
||||
|
@ -14,7 +14,7 @@ from __future__ import absolute_import
|
||||
# Import Salt libs
|
||||
import salt.spm
|
||||
import salt.utils.parsers as parsers
|
||||
from salt.utils.verify import verify_log
|
||||
from salt.utils.verify import verify_log, verify_env
|
||||
|
||||
|
||||
class SPM(parsers.SPMParser):
|
||||
@ -29,6 +29,10 @@ class SPM(parsers.SPMParser):
|
||||
ui = salt.spm.SPMCmdlineInterface()
|
||||
self.parse_args()
|
||||
self.setup_logfile_logger()
|
||||
v_dirs = [
|
||||
self.config['cachedir'],
|
||||
]
|
||||
verify_env(v_dirs, self.config['user'],)
|
||||
verify_log(self.config)
|
||||
client = salt.spm.SPMClient(ui, self.config)
|
||||
client.run(self.args)
|
||||
|
@ -347,7 +347,8 @@ class LocalClient(object):
|
||||
return self._check_pub_data(pub_data)
|
||||
|
||||
def gather_minions(self, tgt, expr_form):
|
||||
return salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form)
|
||||
_res = salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form)
|
||||
return _res['minions']
|
||||
|
||||
@tornado.gen.coroutine
|
||||
def run_job_async(
|
||||
@ -1141,6 +1142,7 @@ class LocalClient(object):
|
||||
minion_timeouts = {}
|
||||
|
||||
found = set()
|
||||
missing = []
|
||||
# Check to see if the jid is real, if not return the empty dict
|
||||
try:
|
||||
if self.returners[u'{0}.get_load'.format(self.opts[u'master_job_cache'])](jid) == {}:
|
||||
@ -1179,6 +1181,8 @@ class LocalClient(object):
|
||||
break
|
||||
if u'minions' in raw.get(u'data', {}):
|
||||
minions.update(raw[u'data'][u'minions'])
|
||||
if u'missing' in raw.get(u'data', {}):
|
||||
missing.extend(raw[u'data'][u'missing'])
|
||||
continue
|
||||
if u'return' not in raw[u'data']:
|
||||
continue
|
||||
@ -1320,6 +1324,10 @@ class LocalClient(object):
|
||||
for minion in list((minions - found)):
|
||||
yield {minion: {u'failed': True}}
|
||||
|
||||
if missing:
|
||||
for minion in missing:
|
||||
yield {minion: {'failed': True}}
|
||||
|
||||
def get_returns(
|
||||
self,
|
||||
jid,
|
||||
|
@ -14,8 +14,9 @@ client applications.
|
||||
http://docs.saltstack.com/ref/clients/index.html
|
||||
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import os
|
||||
|
||||
# Import Salt libs
|
||||
@ -24,9 +25,9 @@ import salt.auth
|
||||
import salt.client
|
||||
import salt.runner
|
||||
import salt.wheel
|
||||
import salt.utils
|
||||
import salt.utils.args
|
||||
import salt.utils.event
|
||||
import salt.syspaths as syspaths
|
||||
from salt.utils.event import tagify
|
||||
from salt.exceptions import EauthAuthenticationError
|
||||
|
||||
|
||||
@ -229,7 +230,7 @@ class APIClient(object):
|
||||
functions = self.wheelClient.functions
|
||||
elif client == u'runner':
|
||||
functions = self.runnerClient.functions
|
||||
result = {u'master': salt.utils.argspec_report(functions, module)}
|
||||
result = {u'master': salt.utils.args.argspec_report(functions, module)}
|
||||
return result
|
||||
|
||||
def create_token(self, creds):
|
||||
@ -322,4 +323,4 @@ class APIClient(object):
|
||||
Need to convert this to a master call with appropriate authentication
|
||||
|
||||
'''
|
||||
return self.event.fire_event(data, tagify(tag, u'wui'))
|
||||
return self.event.fire_event(data, salt.utils.event.tagify(tag, u'wui'))
|
||||
|
@ -16,7 +16,8 @@ import copy as pycopy
|
||||
# Import Salt libs
|
||||
import salt.exceptions
|
||||
import salt.minion
|
||||
import salt.utils
|
||||
import salt.utils # Can be removed once daemonize, get_specific_user, format_call are moved
|
||||
import salt.utils.args
|
||||
import salt.utils.doc
|
||||
import salt.utils.error
|
||||
import salt.utils.event
|
||||
@ -25,6 +26,7 @@ import salt.utils.job
|
||||
import salt.utils.lazy
|
||||
import salt.utils.platform
|
||||
import salt.utils.process
|
||||
import salt.utils.state
|
||||
import salt.utils.versions
|
||||
import salt.transport
|
||||
import salt.log.setup
|
||||
@ -297,7 +299,7 @@ class SyncClientMixin(object):
|
||||
# this is not to clutter the output with the module loading
|
||||
# if we have a high debug level.
|
||||
self.mminion # pylint: disable=W0104
|
||||
jid = low.get(u'__jid__', salt.utils.jid.gen_jid())
|
||||
jid = low.get(u'__jid__', salt.utils.jid.gen_jid(self.opts))
|
||||
tag = low.get(u'__tag__', salt.utils.event.tagify(jid, prefix=self.tag_prefix))
|
||||
|
||||
data = {u'fun': u'{0}.{1}'.format(self.client, fun),
|
||||
@ -396,7 +398,7 @@ class SyncClientMixin(object):
|
||||
data[u'success'] = True
|
||||
if isinstance(data[u'return'], dict) and u'data' in data[u'return']:
|
||||
# some functions can return boolean values
|
||||
data[u'success'] = salt.utils.check_state_result(data[u'return'][u'data'])
|
||||
data[u'success'] = salt.utils.state.check_result(data[u'return'][u'data'])
|
||||
except (Exception, SystemExit) as ex:
|
||||
if isinstance(ex, salt.exceptions.NotImplemented):
|
||||
data[u'return'] = str(ex)
|
||||
@ -510,7 +512,7 @@ class AsyncClientMixin(object):
|
||||
|
||||
def _gen_async_pub(self, jid=None):
|
||||
if jid is None:
|
||||
jid = salt.utils.jid.gen_jid()
|
||||
jid = salt.utils.jid.gen_jid(self.opts)
|
||||
tag = salt.utils.event.tagify(jid, prefix=self.tag_prefix)
|
||||
return {u'tag': tag, u'jid': jid}
|
||||
|
||||
|
@ -1049,8 +1049,7 @@ class Single(object):
|
||||
opts_pkg[u'id'],
|
||||
opts_pkg.get(u'environment', u'base')
|
||||
)
|
||||
pillar_dirs = {}
|
||||
pillar_data = pillar.compile_pillar(pillar_dirs=pillar_dirs)
|
||||
pillar_data = pillar.compile_pillar()
|
||||
|
||||
# TODO: cache minion opts in datap in master.py
|
||||
data = {u'opts': opts_pkg,
|
||||
|
@ -5,7 +5,7 @@ correct cloud modules
|
||||
'''
|
||||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import, print_function, generators
|
||||
from __future__ import absolute_import, print_function, generators, unicode_literals
|
||||
import os
|
||||
import copy
|
||||
import glob
|
||||
|
@ -65,6 +65,7 @@ import salt.config as config
|
||||
import salt.utils
|
||||
import salt.utils.cloud
|
||||
import salt.utils.files
|
||||
from salt.utils.versions import LooseVersion
|
||||
from salt.ext import six
|
||||
import salt.version
|
||||
from salt.exceptions import (
|
||||
@ -79,7 +80,6 @@ HAS_LIBS = False
|
||||
try:
|
||||
import salt.utils.msazure
|
||||
from salt.utils.msazure import object_to_dict
|
||||
import azure.storage
|
||||
from azure.common.credentials import (
|
||||
UserPassCredentials,
|
||||
ServicePrincipalCredentials,
|
||||
@ -115,7 +115,9 @@ try:
|
||||
from azure.mgmt.storage import StorageManagementClient
|
||||
from azure.mgmt.web import WebSiteManagementClient
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
HAS_LIBS = True
|
||||
from azure.multiapi.storage.v2016_05_31 import CloudStorageAccount
|
||||
from azure.cli import core
|
||||
HAS_LIBS = LooseVersion(core.__version__) >= LooseVersion("2.0.12")
|
||||
except ImportError:
|
||||
pass
|
||||
# pylint: enable=wrong-import-position,wrong-import-order
|
||||
@ -1728,7 +1730,7 @@ def list_containers(call=None, kwargs=None): # pylint: disable=unused-argument
|
||||
if not storconn:
|
||||
storconn = get_conn(StorageManagementClient)
|
||||
|
||||
storageaccount = azure.storage.CloudStorageAccount(
|
||||
storageaccount = CloudStorageAccount(
|
||||
config.get_cloud_config_value(
|
||||
'storage_account',
|
||||
get_configured_provider(), __opts__, search_global=False
|
||||
@ -1769,7 +1771,7 @@ def list_blobs(call=None, kwargs=None): # pylint: disable=unused-argument
|
||||
'A container must be specified'
|
||||
)
|
||||
|
||||
storageaccount = azure.storage.CloudStorageAccount(
|
||||
storageaccount = CloudStorageAccount(
|
||||
config.get_cloud_config_value(
|
||||
'storage_account',
|
||||
get_configured_provider(), __opts__, search_global=False
|
||||
@ -1809,7 +1811,7 @@ def delete_blob(call=None, kwargs=None): # pylint: disable=unused-argument
|
||||
'A blob must be specified'
|
||||
)
|
||||
|
||||
storageaccount = azure.storage.CloudStorageAccount(
|
||||
storageaccount = CloudStorageAccount(
|
||||
config.get_cloud_config_value(
|
||||
'storage_account',
|
||||
get_configured_provider(), __opts__, search_global=False
|
||||
|
@ -20,7 +20,7 @@ under the "SSH Keys" section.
|
||||
personal_access_token: xxx
|
||||
ssh_key_file: /path/to/ssh/key/file
|
||||
ssh_key_names: my-key-name,my-key-name-2
|
||||
driver: digital_ocean
|
||||
driver: digitalocean
|
||||
|
||||
:depends: requests
|
||||
'''
|
||||
@ -59,10 +59,11 @@ except ImportError:
|
||||
# Get logging started
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__virtualname__ = 'digital_ocean'
|
||||
__virtualname__ = 'digitalocean'
|
||||
__virtual_aliases__ = ('digital_ocean', 'do')
|
||||
|
||||
|
||||
# Only load in this module if the DIGITAL_OCEAN configurations are in place
|
||||
# Only load in this module if the DIGITALOCEAN configurations are in place
|
||||
def __virtual__():
|
||||
'''
|
||||
Check for DigitalOcean configurations
|
||||
@ -274,7 +275,7 @@ def create(vm_):
|
||||
try:
|
||||
# Check for required profile parameters before sending any API calls.
|
||||
if vm_['profile'] and config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'digital_ocean',
|
||||
__active_provider_name__ or 'digitalocean',
|
||||
vm_['profile'],
|
||||
vm_=vm_) is False:
|
||||
return False
|
||||
@ -441,7 +442,7 @@ def create(vm_):
|
||||
ret = create_node(kwargs)
|
||||
except Exception as exc:
|
||||
log.error(
|
||||
'Error creating {0} on DIGITAL_OCEAN\n\n'
|
||||
'Error creating {0} on DIGITALOCEAN\n\n'
|
||||
'The following exception was thrown when trying to '
|
||||
'run the initial deployment: {1}'.format(
|
||||
vm_['name'],
|
||||
@ -716,12 +717,12 @@ def import_keypair(kwargs=None, call=None):
|
||||
with salt.utils.files.fopen(kwargs['file'], 'r') as public_key_filename:
|
||||
public_key_content = public_key_filename.read()
|
||||
|
||||
digital_ocean_kwargs = {
|
||||
digitalocean_kwargs = {
|
||||
'name': kwargs['keyname'],
|
||||
'public_key': public_key_content
|
||||
}
|
||||
|
||||
created_result = create_key(digital_ocean_kwargs, call=call)
|
||||
created_result = create_key(digitalocean_kwargs, call=call)
|
||||
return created_result
|
||||
|
||||
|
||||
@ -938,11 +939,11 @@ def show_pricing(kwargs=None, call=None):
|
||||
if not profile:
|
||||
return {'Error': 'The requested profile was not found'}
|
||||
|
||||
# Make sure the profile belongs to Digital Ocean
|
||||
# Make sure the profile belongs to DigitalOcean
|
||||
provider = profile.get('provider', '0:0')
|
||||
comps = provider.split(':')
|
||||
if len(comps) < 2 or comps[1] != 'digital_ocean':
|
||||
return {'Error': 'The requested profile does not belong to Digital Ocean'}
|
||||
if len(comps) < 2 or comps[1] != 'digitalocean':
|
||||
return {'Error': 'The requested profile does not belong to DigitalOcean'}
|
||||
|
||||
raw = {}
|
||||
ret = {}
|
||||
@ -968,7 +969,7 @@ def list_floating_ips(call=None):
|
||||
|
||||
CLI Examples:
|
||||
|
||||
... code-block:: bash
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -f list_floating_ips my-digitalocean-config
|
||||
'''
|
||||
@ -1008,7 +1009,7 @@ def show_floating_ip(kwargs=None, call=None):
|
||||
|
||||
CLI Examples:
|
||||
|
||||
... code-block:: bash
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -f show_floating_ip my-digitalocean-config floating_ip='45.55.96.47'
|
||||
'''
|
||||
@ -1041,7 +1042,7 @@ def create_floating_ip(kwargs=None, call=None):
|
||||
|
||||
CLI Examples:
|
||||
|
||||
... code-block:: bash
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -f create_floating_ip my-digitalocean-config region='NYC2'
|
||||
|
||||
@ -1083,7 +1084,7 @@ def delete_floating_ip(kwargs=None, call=None):
|
||||
|
||||
CLI Examples:
|
||||
|
||||
... code-block:: bash
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -f delete_floating_ip my-digitalocean-config floating_ip='45.55.96.47'
|
||||
'''
|
||||
@ -1118,7 +1119,7 @@ def assign_floating_ip(kwargs=None, call=None):
|
||||
|
||||
CLI Examples:
|
||||
|
||||
... code-block:: bash
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -f assign_floating_ip my-digitalocean-config droplet_id=1234567 floating_ip='45.55.96.47'
|
||||
'''
|
||||
@ -1151,7 +1152,7 @@ def unassign_floating_ip(kwargs=None, call=None):
|
||||
|
||||
CLI Examples:
|
||||
|
||||
... code-block:: bash
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -f unassign_floating_ip my-digitalocean-config floating_ip='45.55.96.47'
|
||||
'''
|
@ -2643,7 +2643,7 @@ def show_pricing(kwargs=None, call=None):
|
||||
if not profile:
|
||||
return {'Error': 'The requested profile was not found'}
|
||||
|
||||
# Make sure the profile belongs to Digital Ocean
|
||||
# Make sure the profile belongs to DigitalOcean
|
||||
provider = profile.get('provider', '0:0')
|
||||
comps = provider.split(':')
|
||||
if len(comps) < 2 or comps[1] != 'gce':
|
||||
|
@ -41,6 +41,7 @@ Example profile:
|
||||
master_port: 5506
|
||||
|
||||
Tested on:
|
||||
- Fedora 26 (libvirt 3.2.1, qemu 2.9.1)
|
||||
- Fedora 25 (libvirt 1.3.3.2, qemu 2.6.1)
|
||||
- Fedora 23 (libvirt 1.2.18, qemu 2.4.1)
|
||||
- Centos 7 (libvirt 1.2.17, qemu 1.5.3)
|
||||
@ -82,9 +83,6 @@ from salt.exceptions import (
|
||||
SaltCloudSystemExit
|
||||
)
|
||||
|
||||
# Get logging started
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
VIRT_STATE_NAME_MAP = {0: 'running',
|
||||
1: 'running',
|
||||
2: 'running',
|
||||
@ -99,6 +97,20 @@ IP_LEARNING_XML = """<filterref filter='clean-traffic'>
|
||||
|
||||
__virtualname__ = 'libvirt'
|
||||
|
||||
# Set up logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def libvirt_error_handler(ctx, error):
|
||||
'''
|
||||
Redirect stderr prints from libvirt to salt logging.
|
||||
'''
|
||||
log.debug("libvirt error {0}".format(error))
|
||||
|
||||
|
||||
if HAS_LIBVIRT:
|
||||
libvirt.registerErrorHandler(f=libvirt_error_handler, ctx=None)
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
@ -280,7 +292,7 @@ def create(vm_):
|
||||
|
||||
validate_xml = vm_.get('validate_xml') if vm_.get('validate_xml') is not None else True
|
||||
|
||||
log.info("Cloning machine '{0}' with strategy '{1}' validate_xml='{2}'".format(vm_['name'], clone_strategy, validate_xml))
|
||||
log.info("Cloning '{0}' with strategy '{1}' validate_xml='{2}'".format(vm_['name'], clone_strategy, validate_xml))
|
||||
|
||||
try:
|
||||
# Check for required profile parameters before sending any API calls.
|
||||
@ -516,7 +528,7 @@ def destroy(name, call=None):
|
||||
'event',
|
||||
'destroying instance',
|
||||
'salt/cloud/{0}/destroying'.format(name),
|
||||
{'name': name},
|
||||
args={'name': name},
|
||||
sock_dir=__opts__['sock_dir'],
|
||||
transport=__opts__['transport']
|
||||
)
|
||||
@ -527,7 +539,7 @@ def destroy(name, call=None):
|
||||
'event',
|
||||
'destroyed instance',
|
||||
'salt/cloud/{0}/destroyed'.format(name),
|
||||
{'name': name},
|
||||
args={'name': name},
|
||||
sock_dir=__opts__['sock_dir'],
|
||||
transport=__opts__['transport']
|
||||
)
|
||||
|
@ -44,9 +44,6 @@ from salt.exceptions import (
|
||||
SaltCloudSystemExit
|
||||
)
|
||||
|
||||
# Import Salt-Cloud Libs
|
||||
import salt.utils.cloud
|
||||
|
||||
# Get logging started
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@ -1193,7 +1190,7 @@ def list_nodes_select(call=None):
|
||||
'''
|
||||
Return a list of the VMs that are on the provider, with select fields.
|
||||
'''
|
||||
return salt.utils.cloud.list_nodes_select(
|
||||
return __utils__['cloud.list_nodes_select'](
|
||||
list_nodes_full(), __opts__['query.selection'], call,
|
||||
)
|
||||
|
||||
@ -1503,7 +1500,7 @@ def _query(action=None,
|
||||
if LASTCALL >= now:
|
||||
time.sleep(ratelimit_sleep)
|
||||
|
||||
result = salt.utils.http.query(
|
||||
result = __utils__['http.query'](
|
||||
url,
|
||||
method,
|
||||
params=args,
|
||||
|
@ -87,7 +87,6 @@ import pprint
|
||||
import time
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
import salt.config as config
|
||||
from salt.exceptions import (
|
||||
SaltCloudConfigError,
|
||||
@ -96,6 +95,7 @@ from salt.exceptions import (
|
||||
SaltCloudExecutionTimeout,
|
||||
SaltCloudSystemExit
|
||||
)
|
||||
import salt.utils.files
|
||||
|
||||
# Import salt.cloud libs
|
||||
import salt.utils.cloud
|
||||
@ -805,7 +805,7 @@ def load_public_key(vm_):
|
||||
)
|
||||
)
|
||||
|
||||
with salt.utils.fopen(public_key_filename, 'r') as public_key:
|
||||
with salt.utils.files.fopen(public_key_filename, 'r') as public_key:
|
||||
key = public_key.read().replace('\n', '')
|
||||
|
||||
return key
|
||||
|
@ -24,7 +24,6 @@ import logging
|
||||
# Import salt libs
|
||||
from salt.exceptions import SaltCloudSystemExit
|
||||
import salt.config as config
|
||||
import salt.utils.cloud as cloud
|
||||
|
||||
# Import Third Party Libs
|
||||
try:
|
||||
@ -136,7 +135,7 @@ def create(vm_info):
|
||||
)
|
||||
|
||||
log.debug("Going to fire event: starting create")
|
||||
cloud.fire_event(
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'starting create',
|
||||
'salt/cloud/{0}/creating'.format(vm_info['name']),
|
||||
@ -151,7 +150,7 @@ def create(vm_info):
|
||||
'clone_from': vm_info['clonefrom']
|
||||
}
|
||||
|
||||
cloud.fire_event(
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'requesting instance',
|
||||
'salt/cloud/{0}/requesting'.format(vm_info['name']),
|
||||
@ -174,10 +173,10 @@ def create(vm_info):
|
||||
vm_info['key_filename'] = key_filename
|
||||
vm_info['ssh_host'] = ip
|
||||
|
||||
res = cloud.bootstrap(vm_info, __opts__)
|
||||
res = __utils__['cloud.bootstrap'](vm_info)
|
||||
vm_result.update(res)
|
||||
|
||||
cloud.fire_event(
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'created machine',
|
||||
'salt/cloud/{0}/created'.format(vm_info['name']),
|
||||
@ -269,7 +268,7 @@ def list_nodes(kwargs=None, call=None):
|
||||
"private_ips",
|
||||
"public_ips",
|
||||
]
|
||||
return cloud.list_nodes_select(
|
||||
return __utils__['cloud.list_nodes_select'](
|
||||
list_nodes_full('function'), attributes, call,
|
||||
)
|
||||
|
||||
@ -278,7 +277,7 @@ def list_nodes_select(call=None):
|
||||
"""
|
||||
Return a list of the VMs that are on the provider, with select fields
|
||||
"""
|
||||
return cloud.list_nodes_select(
|
||||
return __utils__['cloud.list_nodes_select'](
|
||||
list_nodes_full('function'), __opts__['query.selection'], call,
|
||||
)
|
||||
|
||||
@ -306,7 +305,7 @@ def destroy(name, call=None):
|
||||
if not vb_machine_exists(name):
|
||||
return "{0} doesn't exist and can't be deleted".format(name)
|
||||
|
||||
cloud.fire_event(
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'destroying instance',
|
||||
'salt/cloud/{0}/destroying'.format(name),
|
||||
@ -317,7 +316,7 @@ def destroy(name, call=None):
|
||||
|
||||
vb_destroy_machine(name)
|
||||
|
||||
cloud.fire_event(
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'destroyed instance',
|
||||
'salt/cloud/{0}/destroyed'.format(name),
|
||||
|
@ -53,7 +53,7 @@ _DFLT_LOG_DATEFMT = '%H:%M:%S'
|
||||
_DFLT_LOG_DATEFMT_LOGFILE = '%Y-%m-%d %H:%M:%S'
|
||||
_DFLT_LOG_FMT_CONSOLE = '[%(levelname)-8s] %(message)s'
|
||||
_DFLT_LOG_FMT_LOGFILE = (
|
||||
'%(asctime)s,%(msecs)03d [%(name)-17s][%(levelname)-8s][%(process)d] %(message)s'
|
||||
'%(asctime)s,%(msecs)03d [%(name)-17s:%(lineno)-4d][%(levelname)-8s][%(process)d] %(message)s'
|
||||
)
|
||||
_DFLT_REFSPECS = ['+refs/heads/*:refs/remotes/origin/*', '+refs/tags/*:refs/tags/*']
|
||||
|
||||
@ -111,9 +111,10 @@ VALID_OPTS = {
|
||||
'master_port': (six.string_types, int),
|
||||
|
||||
# The behaviour of the minion when connecting to a master. Can specify 'failover',
|
||||
# 'disable' or 'func'. If 'func' is specified, the 'master' option should be set to an
|
||||
# exec module function to run to determine the master hostname. If 'disable' is specified
|
||||
# the minion will run, but will not try to connect to a master.
|
||||
# 'disable', 'distributed', or 'func'. If 'func' is specified, the 'master' option should be
|
||||
# set to an exec module function to run to determine the master hostname. If 'disable' is
|
||||
# specified the minion will run, but will not try to connect to a master. If 'distributed'
|
||||
# is specified the minion will try to deterministically pick a master based on its' id.
|
||||
'master_type': str,
|
||||
|
||||
# Specify the format in which the master address will be specified. Can
|
||||
@ -186,6 +187,16 @@ VALID_OPTS = {
|
||||
# A unique identifier for this daemon
|
||||
'id': str,
|
||||
|
||||
# Use a module function to determine the unique identifier. If this is
|
||||
# set and 'id' is not set, it will allow invocation of a module function
|
||||
# to determine the value of 'id'. For simple invocations without function
|
||||
# arguments, this may be a string that is the function name. For
|
||||
# invocations with function arguments, this may be a dictionary with the
|
||||
# key being the function name, and the value being an embedded dictionary
|
||||
# where each key is a function argument name and each value is the
|
||||
# corresponding argument value.
|
||||
'id_function': (dict, str),
|
||||
|
||||
# The directory to store all cache files.
|
||||
'cachedir': str,
|
||||
|
||||
@ -332,7 +343,7 @@ VALID_OPTS = {
|
||||
# Whether or not scheduled mine updates should be accompanied by a job return for the job cache
|
||||
'mine_return_job': bool,
|
||||
|
||||
# Schedule a mine update every n number of seconds
|
||||
# The number of minutes between mine updates.
|
||||
'mine_interval': int,
|
||||
|
||||
# The ipc strategy. (i.e., sockets versus tcp, etc)
|
||||
@ -417,6 +428,12 @@ VALID_OPTS = {
|
||||
# Tell the client to display the jid when a job is published
|
||||
'show_jid': bool,
|
||||
|
||||
# Ensure that a generated jid is always unique. If this is set, the jid
|
||||
# format is different due to an underscore and process id being appended
|
||||
# to the jid. WARNING: A change to the jid format may break external
|
||||
# applications that depend on the original format.
|
||||
'unique_jid': bool,
|
||||
|
||||
# Tells the highstate outputter to show successful states. False will omit successes.
|
||||
'state_verbose': bool,
|
||||
|
||||
@ -573,6 +590,23 @@ VALID_OPTS = {
|
||||
# False in 2016.3.0
|
||||
'add_proxymodule_to_opts': bool,
|
||||
|
||||
# Merge pillar data into configuration opts.
|
||||
# As multiple proxies can run on the same server, we may need different
|
||||
# configuration options for each, while there's one single configuration file.
|
||||
# The solution is merging the pillar data of each proxy minion into the opts.
|
||||
'proxy_merge_pillar_in_opts': bool,
|
||||
|
||||
# Deep merge of pillar data into configuration opts.
|
||||
# Evaluated only when `proxy_merge_pillar_in_opts` is True.
|
||||
'proxy_deep_merge_pillar_in_opts': bool,
|
||||
|
||||
# The strategy used when merging pillar into opts.
|
||||
# Considered only when `proxy_merge_pillar_in_opts` is True.
|
||||
'proxy_merge_pillar_in_opts_strategy': str,
|
||||
|
||||
# Allow enabling mine details using pillar data.
|
||||
'proxy_mines_pillar': bool,
|
||||
|
||||
# In some particular cases, always alive proxies are not beneficial.
|
||||
# This option can be used in those less dynamic environments:
|
||||
# the user can request the connection
|
||||
@ -916,6 +950,7 @@ VALID_OPTS = {
|
||||
'ssh_scan_timeout': float,
|
||||
'ssh_identities_only': bool,
|
||||
'ssh_log_file': str,
|
||||
'ssh_config_file': str,
|
||||
|
||||
# Enable ioflo verbose logging. Warning! Very verbose!
|
||||
'ioflo_verbose': int,
|
||||
@ -1087,6 +1122,11 @@ VALID_OPTS = {
|
||||
# (in other words, require that minions have 'minion_sign_messages'
|
||||
# turned on)
|
||||
'require_minion_sign_messages': bool,
|
||||
|
||||
# The list of config entries to be passed to external pillar function as
|
||||
# part of the extra_minion_data param
|
||||
# Subconfig entries can be specified by using the ':' notation (e.g. key:subkey)
|
||||
'pass_to_ext_pillars': (six.string_types, list),
|
||||
}
|
||||
|
||||
# default configurations
|
||||
@ -1110,6 +1150,7 @@ DEFAULT_MINION_OPTS = {
|
||||
'root_dir': salt.syspaths.ROOT_DIR,
|
||||
'pki_dir': os.path.join(salt.syspaths.CONFIG_DIR, 'pki', 'minion'),
|
||||
'id': '',
|
||||
'id_function': {},
|
||||
'cachedir': os.path.join(salt.syspaths.CACHE_DIR, 'minion'),
|
||||
'append_minionid_config_dirs': [],
|
||||
'cache_jobs': False,
|
||||
@ -1205,6 +1246,7 @@ DEFAULT_MINION_OPTS = {
|
||||
'gitfs_ref_types': ['branch', 'tag', 'sha'],
|
||||
'gitfs_refspecs': _DFLT_REFSPECS,
|
||||
'gitfs_disable_saltenv_mapping': False,
|
||||
'unique_jid': False,
|
||||
'hash_type': 'sha256',
|
||||
'disable_modules': [],
|
||||
'disable_returners': [],
|
||||
@ -1449,6 +1491,7 @@ DEFAULT_MASTER_OPTS = {
|
||||
'hgfs_saltenv_blacklist': [],
|
||||
'show_timeout': True,
|
||||
'show_jid': False,
|
||||
'unique_jid': False,
|
||||
'svnfs_remotes': [],
|
||||
'svnfs_mountpoint': '',
|
||||
'svnfs_root': '',
|
||||
@ -1481,8 +1524,9 @@ DEFAULT_MASTER_OPTS = {
|
||||
'syndic_forward_all_events': False,
|
||||
'syndic_log_file': os.path.join(salt.syspaths.LOGS_DIR, 'syndic'),
|
||||
'syndic_pidfile': os.path.join(salt.syspaths.PIDFILE_DIR, 'salt-syndic.pid'),
|
||||
'runner_dirs': [],
|
||||
'outputter_dirs': [],
|
||||
'runner_dirs': [],
|
||||
'utils_dirs': [],
|
||||
'client_acl_verify': True,
|
||||
'publisher_acl': {},
|
||||
'publisher_acl_blacklist': {},
|
||||
@ -1616,6 +1660,7 @@ DEFAULT_MASTER_OPTS = {
|
||||
'ssh_scan_timeout': 0.01,
|
||||
'ssh_identities_only': False,
|
||||
'ssh_log_file': os.path.join(salt.syspaths.LOGS_DIR, 'ssh'),
|
||||
'ssh_config_file': os.path.join(salt.syspaths.HOME_DIR, '.ssh', 'config'),
|
||||
'master_floscript': os.path.join(FLO_DIR, 'master.flo'),
|
||||
'worker_floscript': os.path.join(FLO_DIR, 'worker.flo'),
|
||||
'maintenance_floscript': os.path.join(FLO_DIR, 'maint.flo'),
|
||||
@ -1682,6 +1727,12 @@ DEFAULT_PROXY_MINION_OPTS = {
|
||||
'append_minionid_config_dirs': ['cachedir', 'pidfile', 'default_include', 'extension_modules'],
|
||||
'default_include': 'proxy.d/*.conf',
|
||||
|
||||
'proxy_merge_pillar_in_opts': False,
|
||||
'proxy_deep_merge_pillar_in_opts': False,
|
||||
'proxy_merge_pillar_in_opts_strategy': 'smart',
|
||||
|
||||
'proxy_mines_pillar': True,
|
||||
|
||||
# By default, proxies will preserve the connection.
|
||||
# If this option is set to False,
|
||||
# the connection with the remote dumb device
|
||||
@ -2680,7 +2731,7 @@ def old_to_new(opts):
|
||||
providers = (
|
||||
'AWS',
|
||||
'CLOUDSTACK',
|
||||
'DIGITAL_OCEAN',
|
||||
'DIGITALOCEAN',
|
||||
'EC2',
|
||||
'GOGRID',
|
||||
'IBMSCE',
|
||||
@ -3344,6 +3395,57 @@ def _cache_id(minion_id, cache_file):
|
||||
log.error('Could not cache minion ID: {0}'.format(exc))
|
||||
|
||||
|
||||
def call_id_function(opts):
|
||||
'''
|
||||
Evaluate the function that determines the ID if the 'id_function'
|
||||
option is set and return the result
|
||||
'''
|
||||
if opts.get('id'):
|
||||
return opts['id']
|
||||
|
||||
# Import 'salt.loader' here to avoid a circular dependency
|
||||
import salt.loader as loader
|
||||
|
||||
if isinstance(opts['id_function'], str):
|
||||
mod_fun = opts['id_function']
|
||||
fun_kwargs = {}
|
||||
elif isinstance(opts['id_function'], dict):
|
||||
mod_fun, fun_kwargs = six.next(six.iteritems(opts['id_function']))
|
||||
if fun_kwargs is None:
|
||||
fun_kwargs = {}
|
||||
else:
|
||||
log.error('\'id_function\' option is neither a string nor a dictionary')
|
||||
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
|
||||
|
||||
# split module and function and try loading the module
|
||||
mod, fun = mod_fun.split('.')
|
||||
if not opts.get('grains'):
|
||||
# Get grains for use by the module
|
||||
opts['grains'] = loader.grains(opts)
|
||||
|
||||
try:
|
||||
id_mod = loader.raw_mod(opts, mod, fun)
|
||||
if not id_mod:
|
||||
raise KeyError
|
||||
# we take whatever the module returns as the minion ID
|
||||
newid = id_mod[mod_fun](**fun_kwargs)
|
||||
if not isinstance(newid, str) or not newid:
|
||||
log.error('Function {0} returned value "{1}" of type {2} instead of string'.format(
|
||||
mod_fun, newid, type(newid))
|
||||
)
|
||||
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
|
||||
log.info('Evaluated minion ID from module: {0}'.format(mod_fun))
|
||||
return newid
|
||||
except TypeError:
|
||||
log.error('Function arguments {0} are incorrect for function {1}'.format(
|
||||
fun_kwargs, mod_fun)
|
||||
)
|
||||
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
|
||||
except KeyError:
|
||||
log.error('Failed to load module {0}'.format(mod_fun))
|
||||
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
|
||||
|
||||
|
||||
def get_id(opts, cache_minion_id=False):
|
||||
'''
|
||||
Guess the id of the minion.
|
||||
@ -3385,13 +3487,21 @@ def get_id(opts, cache_minion_id=False):
|
||||
log.debug('Guessing ID. The id can be explicitly set in {0}'
|
||||
.format(os.path.join(salt.syspaths.CONFIG_DIR, 'minion')))
|
||||
|
||||
newid = salt.utils.network.generate_minion_id()
|
||||
if opts.get('id_function'):
|
||||
newid = call_id_function(opts)
|
||||
else:
|
||||
newid = salt.utils.network.generate_minion_id()
|
||||
|
||||
if opts.get('minion_id_lowercase'):
|
||||
newid = newid.lower()
|
||||
log.debug('Changed minion id {0} to lowercase.'.format(newid))
|
||||
if '__role' in opts and opts.get('__role') == 'minion':
|
||||
log.debug('Found minion id from generate_minion_id(): {0}'.format(newid))
|
||||
if opts.get('id_function'):
|
||||
log.debug('Found minion id from external function {0}: {1}'.format(
|
||||
opts['id_function'], newid))
|
||||
else:
|
||||
log.debug('Found minion id from generate_minion_id(): {0}'.format(
|
||||
newid))
|
||||
if cache_minion_id and opts.get('minion_id_caching', True):
|
||||
_cache_id(newid, id_cache)
|
||||
is_ipv4 = salt.utils.network.is_ipv4(newid)
|
||||
@ -3611,12 +3721,23 @@ def apply_master_config(overrides=None, defaults=None):
|
||||
if len(opts['sock_dir']) > len(opts['cachedir']) + 10:
|
||||
opts['sock_dir'] = os.path.join(opts['cachedir'], '.salt-unix')
|
||||
|
||||
opts['token_dir'] = os.path.join(opts['cachedir'], 'tokens')
|
||||
opts['syndic_dir'] = os.path.join(opts['cachedir'], 'syndics')
|
||||
# Make sure ext_mods gets set if it is an untrue value
|
||||
# (here to catch older bad configs)
|
||||
opts['extension_modules'] = (
|
||||
opts.get('extension_modules') or
|
||||
os.path.join(opts['cachedir'], 'extmods')
|
||||
)
|
||||
opts['token_dir'] = os.path.join(opts['cachedir'], 'tokens')
|
||||
opts['syndic_dir'] = os.path.join(opts['cachedir'], 'syndics')
|
||||
# Set up the utils_dirs location from the extension_modules location
|
||||
opts['utils_dirs'] = (
|
||||
opts.get('utils_dirs') or
|
||||
[os.path.join(opts['extension_modules'], 'utils')]
|
||||
)
|
||||
|
||||
# Insert all 'utils_dirs' directories to the system path
|
||||
insert_system_path(opts, opts['utils_dirs'])
|
||||
|
||||
if (overrides or {}).get('ipc_write_buffer', '') == 'dynamic':
|
||||
opts['ipc_write_buffer'] = _DFLT_IPC_WBUFFER
|
||||
if 'ipc_write_buffer' not in overrides:
|
||||
|
215
salt/config/schemas/esxcluster.py
Normal file
215
salt/config/schemas/esxcluster.py
Normal file
@ -0,0 +1,215 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
:codeauthor: :email:`Alexandru Bleotu (alexandru.bleotu@morganstanley.com)`
|
||||
|
||||
|
||||
salt.config.schemas.esxcluster
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
ESX Cluster configuration schemas
|
||||
'''
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Salt libs
|
||||
from salt.utils.schema import (Schema,
|
||||
DefinitionsSchema,
|
||||
ComplexSchemaItem,
|
||||
DictItem,
|
||||
ArrayItem,
|
||||
IntegerItem,
|
||||
BooleanItem,
|
||||
StringItem,
|
||||
AnyOfItem)
|
||||
|
||||
|
||||
class OptionValueItem(ComplexSchemaItem):
|
||||
'''Sechma item of the OptionValue'''
|
||||
|
||||
title = 'OptionValue'
|
||||
key = StringItem(title='Key', required=True)
|
||||
value = AnyOfItem(items=[StringItem(), BooleanItem(), IntegerItem()])
|
||||
|
||||
|
||||
class AdmissionControlPolicyItem(ComplexSchemaItem):
|
||||
'''
|
||||
Schema item of the HA admission control policy
|
||||
'''
|
||||
|
||||
title = 'Admission Control Policy'
|
||||
|
||||
cpu_failover_percent = IntegerItem(
|
||||
title='CPU Failover Percent',
|
||||
minimum=0, maximum=100)
|
||||
memory_failover_percent = IntegerItem(
|
||||
title='Memory Failover Percent',
|
||||
minimum=0, maximum=100)
|
||||
|
||||
|
||||
class DefaultVmSettingsItem(ComplexSchemaItem):
|
||||
'''
|
||||
Schema item of the HA default vm settings
|
||||
'''
|
||||
|
||||
title = 'Default VM Settings'
|
||||
|
||||
isolation_response = StringItem(
|
||||
title='Isolation Response',
|
||||
enum=['clusterIsolationResponse', 'none', 'powerOff', 'shutdown'])
|
||||
restart_priority = StringItem(
|
||||
title='Restart Priority',
|
||||
enum=['clusterRestartPriority', 'disabled', 'high', 'low', 'medium'])
|
||||
|
||||
|
||||
class HAConfigItem(ComplexSchemaItem):
|
||||
'''
|
||||
Schema item of ESX cluster high availability
|
||||
'''
|
||||
|
||||
title = 'HA Configuration'
|
||||
description = 'ESX cluster HA configuration json schema item'
|
||||
|
||||
enabled = BooleanItem(
|
||||
title='Enabled',
|
||||
description='Specifies if HA should be enabled')
|
||||
admission_control_enabled = BooleanItem(
|
||||
title='Admission Control Enabled')
|
||||
admission_control_policy = AdmissionControlPolicyItem()
|
||||
default_vm_settings = DefaultVmSettingsItem()
|
||||
hb_ds_candidate_policy = StringItem(
|
||||
title='Heartbeat Datastore Candidate Policy',
|
||||
enum=['allFeasibleDs', 'allFeasibleDsWithUserPreference',
|
||||
'userSelectedDs'])
|
||||
host_monitoring = StringItem(title='Host Monitoring',
|
||||
choices=['enabled', 'disabled'])
|
||||
options = ArrayItem(min_items=1, items=OptionValueItem())
|
||||
vm_monitoring = StringItem(
|
||||
title='Vm Monitoring',
|
||||
choices=['vmMonitoringDisabled', 'vmAndAppMonitoring',
|
||||
'vmMonitoringOnly'])
|
||||
|
||||
|
||||
class vSANClusterConfigItem(ComplexSchemaItem):
|
||||
'''
|
||||
Schema item of the ESX cluster vSAN configuration
|
||||
'''
|
||||
|
||||
title = 'vSAN Configuration'
|
||||
description = 'ESX cluster vSAN configurationi item'
|
||||
|
||||
enabled = BooleanItem(
|
||||
title='Enabled',
|
||||
description='Specifies if vSAN should be enabled')
|
||||
auto_claim_storage = BooleanItem(
|
||||
title='Auto Claim Storage',
|
||||
description='Specifies whether the storage of member ESXi hosts should '
|
||||
'be automatically claimed for vSAN')
|
||||
dedup_enabled = BooleanItem(
|
||||
title='Enabled',
|
||||
description='Specifies dedup should be enabled')
|
||||
compression_enabled = BooleanItem(
|
||||
title='Enabled',
|
||||
description='Specifies if compression should be enabled')
|
||||
|
||||
|
||||
class DRSConfigItem(ComplexSchemaItem):
|
||||
'''
|
||||
Schema item of the ESX cluster DRS configuration
|
||||
'''
|
||||
|
||||
title = 'DRS Configuration'
|
||||
description = 'ESX cluster DRS configuration item'
|
||||
|
||||
enabled = BooleanItem(
|
||||
title='Enabled',
|
||||
description='Specifies if DRS should be enabled')
|
||||
vmotion_rate = IntegerItem(
|
||||
title='vMotion rate',
|
||||
description='Aggressiveness to do automatic vMotions: '
|
||||
'1 (least aggressive) - 5 (most aggressive)',
|
||||
minimum=1,
|
||||
maximum=5)
|
||||
default_vm_behavior = StringItem(
|
||||
title='Default VM DRS Behavior',
|
||||
description='Specifies the default VM DRS behavior',
|
||||
enum=['fullyAutomated', 'partiallyAutomated', 'manual'])
|
||||
|
||||
|
||||
class ESXClusterConfigSchema(DefinitionsSchema):
|
||||
'''
|
||||
Schema of the ESX cluster config
|
||||
'''
|
||||
|
||||
title = 'ESX Cluster Configuration Schema'
|
||||
description = 'ESX cluster configuration schema'
|
||||
|
||||
ha = HAConfigItem()
|
||||
vsan = vSANClusterConfigItem()
|
||||
drs = DRSConfigItem()
|
||||
vm_swap_placement = StringItem(title='VM Swap Placement')
|
||||
|
||||
|
||||
class ESXClusterEntitySchema(Schema):
|
||||
'''Schema of the ESX cluster entity'''
|
||||
|
||||
title = 'ESX Cluster Entity Schema'
|
||||
description = 'ESX cluster entity schema'
|
||||
|
||||
type = StringItem(title='Type',
|
||||
description='Specifies the entity type',
|
||||
required=True,
|
||||
enum=['cluster'])
|
||||
|
||||
datacenter = StringItem(title='Datacenter',
|
||||
description='Specifies the cluster datacenter',
|
||||
required=True,
|
||||
pattern=r'\w+')
|
||||
|
||||
cluster = StringItem(title='Cluster',
|
||||
description='Specifies the cluster name',
|
||||
required=True,
|
||||
pattern=r'\w+')
|
||||
|
||||
|
||||
class LicenseSchema(Schema):
|
||||
'''
|
||||
Schema item of the ESX cluster vSAN configuration
|
||||
'''
|
||||
|
||||
title = 'Licenses schema'
|
||||
description = 'License configuration schema'
|
||||
|
||||
licenses = DictItem(
|
||||
title='Licenses',
|
||||
description='Dictionary containing the license name to key mapping',
|
||||
required=True,
|
||||
additional_properties=StringItem(
|
||||
title='License Key',
|
||||
description='Specifies the license key',
|
||||
pattern=r'^(\w{5}-\w{5}-\w{5}-\w{5}-\w{5})$'))
|
||||
|
||||
|
||||
class EsxclusterProxySchema(Schema):
|
||||
'''
|
||||
Schema of the esxcluster proxy input
|
||||
'''
|
||||
|
||||
title = 'Esxcluster Proxy Schema'
|
||||
description = 'Esxcluster proxy schema'
|
||||
additional_properties = False
|
||||
proxytype = StringItem(required=True,
|
||||
enum=['esxcluster'])
|
||||
vcenter = StringItem(required=True, pattern=r'[^\s]+')
|
||||
datacenter = StringItem(required=True)
|
||||
cluster = StringItem(required=True)
|
||||
mechanism = StringItem(required=True, enum=['userpass', 'sspi'])
|
||||
username = StringItem()
|
||||
passwords = ArrayItem(min_items=1,
|
||||
items=StringItem(),
|
||||
unique_items=True)
|
||||
# TODO Should be changed when anyOf is supported for schemas
|
||||
domain = StringItem()
|
||||
principal = StringItem()
|
||||
protocol = StringItem()
|
||||
port = IntegerItem(minimum=1)
|
33
salt/config/schemas/vcenter.py
Normal file
33
salt/config/schemas/vcenter.py
Normal file
@ -0,0 +1,33 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
:codeauthor: :email:`Rod McKenzie (roderick.mckenzie@morganstanley.com)`
|
||||
:codeauthor: :email:`Alexandru Bleotu (alexandru.bleotu@morganstanley.com)`
|
||||
|
||||
salt.config.schemas.vcenter
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
VCenter configuration schemas
|
||||
'''
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Salt libs
|
||||
from salt.utils.schema import (Schema,
|
||||
StringItem)
|
||||
|
||||
|
||||
class VCenterEntitySchema(Schema):
|
||||
'''
|
||||
Entity Schema for a VCenter.
|
||||
'''
|
||||
title = 'VCenter Entity Schema'
|
||||
description = 'VCenter entity schema'
|
||||
type = StringItem(title='Type',
|
||||
description='Specifies the entity type',
|
||||
required=True,
|
||||
enum=['vcenter'])
|
||||
|
||||
vcenter = StringItem(title='vCenter',
|
||||
description='Specifies the vcenter hostname',
|
||||
required=True)
|
@ -400,7 +400,7 @@ class SaltRaetRoadStackJoiner(ioflo.base.deeding.Deed):
|
||||
kind=kinds.applKinds.master))
|
||||
except gaierror as ex:
|
||||
log.warning("Unable to connect to master {0}: {1}".format(mha, ex))
|
||||
if self.opts.value.get('master_type') != 'failover':
|
||||
if self.opts.value.get(u'master_type') not in (u'failover', u'distributed'):
|
||||
raise ex
|
||||
if not stack.remotes:
|
||||
raise ex
|
||||
|
@ -32,6 +32,8 @@ import salt.utils.atomicfile
|
||||
import salt.utils.event
|
||||
import salt.utils.files
|
||||
import salt.utils.gitfs
|
||||
import salt.utils.verify
|
||||
import salt.utils.minions
|
||||
import salt.utils.gzip_util
|
||||
import salt.utils.jid
|
||||
import salt.utils.minions
|
||||
@ -64,44 +66,19 @@ def init_git_pillar(opts):
|
||||
ret = []
|
||||
for opts_dict in [x for x in opts.get('ext_pillar', [])]:
|
||||
if 'git' in opts_dict:
|
||||
if isinstance(opts_dict['git'], six.string_types):
|
||||
# Legacy git pillar code
|
||||
try:
|
||||
import git
|
||||
except ImportError:
|
||||
return ret
|
||||
parts = opts_dict['git'].strip().split()
|
||||
try:
|
||||
br = parts[0]
|
||||
loc = parts[1]
|
||||
except IndexError:
|
||||
log.critical(
|
||||
'Unable to extract external pillar data: {0}'
|
||||
.format(opts_dict['git'])
|
||||
)
|
||||
try:
|
||||
pillar = salt.utils.gitfs.GitPillar(opts)
|
||||
pillar.init_remotes(
|
||||
opts_dict['git'],
|
||||
git_pillar.PER_REMOTE_OVERRIDES,
|
||||
git_pillar.PER_REMOTE_ONLY
|
||||
)
|
||||
ret.append(pillar)
|
||||
except FileserverConfigError:
|
||||
if opts.get('git_pillar_verify_config', True):
|
||||
raise
|
||||
else:
|
||||
ret.append(
|
||||
git_pillar._LegacyGitPillar(
|
||||
br,
|
||||
loc,
|
||||
opts
|
||||
)
|
||||
)
|
||||
else:
|
||||
# New git_pillar code
|
||||
try:
|
||||
pillar = salt.utils.gitfs.GitPillar(opts)
|
||||
pillar.init_remotes(
|
||||
opts_dict['git'],
|
||||
git_pillar.PER_REMOTE_OVERRIDES,
|
||||
git_pillar.PER_REMOTE_ONLY
|
||||
)
|
||||
ret.append(pillar)
|
||||
except FileserverConfigError:
|
||||
if opts.get('git_pillar_verify_config', True):
|
||||
raise
|
||||
else:
|
||||
log.critical('Could not initialize git_pillar')
|
||||
log.critical('Could not initialize git_pillar')
|
||||
return ret
|
||||
|
||||
|
||||
@ -575,11 +552,12 @@ class RemoteFuncs(object):
|
||||
if match_type.lower() == 'compound':
|
||||
match_type = 'compound_pillar_exact'
|
||||
checker = salt.utils.minions.CkMinions(self.opts)
|
||||
minions = checker.check_minions(
|
||||
_res = checker.check_minions(
|
||||
load['tgt'],
|
||||
match_type,
|
||||
greedy=False
|
||||
)
|
||||
minions = _res['minions']
|
||||
for minion in minions:
|
||||
fdata = self.cache.fetch('minions/{0}'.format(minion), 'mine')
|
||||
if isinstance(fdata, dict):
|
||||
@ -707,8 +685,7 @@ class RemoteFuncs(object):
|
||||
load.get('ext'),
|
||||
self.mminion.functions,
|
||||
pillar_override=load.get('pillar_override', {}))
|
||||
pillar_dirs = {}
|
||||
data = pillar.compile_pillar(pillar_dirs=pillar_dirs)
|
||||
data = pillar.compile_pillar()
|
||||
if self.opts.get('minion_data_cache', False):
|
||||
self.cache.store('minions/{0}'.format(load['id']),
|
||||
'data',
|
||||
@ -744,7 +721,7 @@ class RemoteFuncs(object):
|
||||
Handle the return data sent from the minions
|
||||
'''
|
||||
# Generate EndTime
|
||||
endtime = salt.utils.jid.jid_to_time(salt.utils.jid.gen_jid())
|
||||
endtime = salt.utils.jid.jid_to_time(salt.utils.jid.gen_jid(self.opts))
|
||||
# If the return data is invalid, just ignore it
|
||||
if any(key not in load for key in ('return', 'jid', 'id')):
|
||||
return False
|
||||
@ -898,9 +875,10 @@ class RemoteFuncs(object):
|
||||
pub_load['tgt_type'] = load['tgt_type']
|
||||
ret = {}
|
||||
ret['jid'] = self.local.cmd_async(**pub_load)
|
||||
ret['minions'] = self.ckminions.check_minions(
|
||||
_res = self.ckminions.check_minions(
|
||||
load['tgt'],
|
||||
pub_load['tgt_type'])
|
||||
ret['minions'] = _res['minions']
|
||||
auth_cache = os.path.join(
|
||||
self.opts['cachedir'],
|
||||
'publish_auth')
|
||||
@ -1037,35 +1015,33 @@ class LocalFuncs(object):
|
||||
'''
|
||||
Send a master control function back to the runner system
|
||||
'''
|
||||
if 'token' in load:
|
||||
auth_type = 'token'
|
||||
err_name = 'TokenAuthenticationError'
|
||||
token = self.loadauth.authenticate_token(load)
|
||||
if not token:
|
||||
return dict(error=dict(name=err_name,
|
||||
message='Authentication failure of type "token" occurred.'))
|
||||
username = token['name']
|
||||
if self.opts['keep_acl_in_token'] and 'auth_list' in token:
|
||||
auth_list = token['auth_list']
|
||||
else:
|
||||
load['eauth'] = token['eauth']
|
||||
load['username'] = username
|
||||
auth_list = self.loadauth.get_auth_list(load)
|
||||
else:
|
||||
auth_type = 'eauth'
|
||||
err_name = 'EauthAuthenticationError'
|
||||
username = load.get('username', 'UNKNOWN')
|
||||
if not self.loadauth.authenticate_eauth(load):
|
||||
return dict(error=dict(name=err_name,
|
||||
message=('Authentication failure of type "eauth" occurred '
|
||||
'for user {0}.').format(username)))
|
||||
auth_list = self.loadauth.get_auth_list(load)
|
||||
# All runner opts pass through eauth
|
||||
auth_type, err_name, key = self._prep_auth_info(load)
|
||||
|
||||
if not self.ckminions.runner_check(auth_list, load['fun'], load['kwarg']):
|
||||
return dict(error=dict(name=err_name,
|
||||
message=('Authentication failure of type "{0}" occurred '
|
||||
'for user {1}.').format(auth_type, username)))
|
||||
# Authenticate
|
||||
auth_check = self.loadauth.check_authentication(load, auth_type)
|
||||
error = auth_check.get('error')
|
||||
|
||||
if error:
|
||||
# Authentication error occurred: do not continue.
|
||||
return {'error': error}
|
||||
|
||||
# Authorize
|
||||
runner_check = self.ckminions.runner_check(
|
||||
auth_check.get('auth_list', []),
|
||||
load['fun'],
|
||||
load['kwarg']
|
||||
)
|
||||
username = auth_check.get('username')
|
||||
if not runner_check:
|
||||
return {'error': {'name': err_name,
|
||||
'message': 'Authentication failure of type "{0}" occurred '
|
||||
'for user {1}.'.format(auth_type, username)}}
|
||||
elif isinstance(runner_check, dict) and 'error' in runner_check:
|
||||
# A dictionary with an error name/message was handled by ckminions.runner_check
|
||||
return runner_check
|
||||
|
||||
# Authorized. Do the job!
|
||||
try:
|
||||
fun = load.pop('fun')
|
||||
runner_client = salt.runner.RunnerClient(self.opts)
|
||||
@ -1074,56 +1050,49 @@ class LocalFuncs(object):
|
||||
username)
|
||||
except Exception as exc:
|
||||
log.error('Exception occurred while '
|
||||
'introspecting {0}: {1}'.format(fun, exc))
|
||||
return dict(error=dict(name=exc.__class__.__name__,
|
||||
args=exc.args,
|
||||
message=str(exc)))
|
||||
'introspecting {0}: {1}'.format(fun, exc))
|
||||
return {'error': {'name': exc.__class__.__name__,
|
||||
'args': exc.args,
|
||||
'message': str(exc)}}
|
||||
|
||||
def wheel(self, load):
|
||||
'''
|
||||
Send a master control function back to the wheel system
|
||||
'''
|
||||
# All wheel ops pass through eauth
|
||||
if 'token' in load:
|
||||
auth_type = 'token'
|
||||
err_name = 'TokenAuthenticationError'
|
||||
token = self.loadauth.authenticate_token(load)
|
||||
if not token:
|
||||
return dict(error=dict(name=err_name,
|
||||
message='Authentication failure of type "token" occurred.'))
|
||||
username = token['name']
|
||||
if self.opts['keep_acl_in_token'] and 'auth_list' in token:
|
||||
auth_list = token['auth_list']
|
||||
else:
|
||||
load['eauth'] = token['eauth']
|
||||
load['username'] = username
|
||||
auth_list = self.loadauth.get_auth_list(load)
|
||||
elif 'eauth' in load:
|
||||
auth_type = 'eauth'
|
||||
err_name = 'EauthAuthenticationError'
|
||||
username = load.get('username', 'UNKNOWN')
|
||||
if not self.loadauth.authenticate_eauth(load):
|
||||
return dict(error=dict(name=err_name,
|
||||
message=('Authentication failure of type "eauth" occurred for '
|
||||
'user {0}.').format(username)))
|
||||
auth_list = self.loadauth.get_auth_list(load)
|
||||
else:
|
||||
auth_type = 'user'
|
||||
err_name = 'UserAuthenticationError'
|
||||
username = load.get('username', 'UNKNOWN')
|
||||
if not self.loadauth.authenticate_key(load, self.key):
|
||||
return dict(error=dict(name=err_name,
|
||||
message=('Authentication failure of type "user" occurred for '
|
||||
'user {0}.').format(username)))
|
||||
auth_type, err_name, key = self._prep_auth_info(load)
|
||||
|
||||
# Authenticate
|
||||
auth_check = self.loadauth.check_authentication(
|
||||
load,
|
||||
auth_type,
|
||||
key=key,
|
||||
show_username=True
|
||||
)
|
||||
error = auth_check.get('error')
|
||||
|
||||
if error:
|
||||
# Authentication error occurred: do not continue.
|
||||
return {'error': error}
|
||||
|
||||
# Authorize
|
||||
username = auth_check.get('username')
|
||||
if auth_type != 'user':
|
||||
if not self.ckminions.wheel_check(auth_list, load['fun'], load['kwarg']):
|
||||
return dict(error=dict(name=err_name,
|
||||
message=('Authentication failure of type "{0}" occurred for '
|
||||
'user {1}.').format(auth_type, username)))
|
||||
wheel_check = self.ckminions.wheel_check(
|
||||
auth_check.get('auth_list', []),
|
||||
load['fun'],
|
||||
load['kwarg']
|
||||
)
|
||||
if not wheel_check:
|
||||
return {'error': {'name': err_name,
|
||||
'message': 'Authentication failure of type "{0}" occurred for '
|
||||
'user {1}.'.format(auth_type, username)}}
|
||||
elif isinstance(wheel_check, dict) and 'error' in wheel_check:
|
||||
# A dictionary with an error name/message was handled by ckminions.wheel_check
|
||||
return wheel_check
|
||||
|
||||
# Authenticated. Do the job.
|
||||
jid = salt.utils.jid.gen_jid()
|
||||
jid = salt.utils.jid.gen_jid(self.opts)
|
||||
fun = load.pop('fun')
|
||||
tag = salt.utils.event.tagify(jid, prefix='wheel')
|
||||
data = {'fun': "wheel.{0}".format(fun),
|
||||
@ -1140,7 +1109,7 @@ class LocalFuncs(object):
|
||||
'data': data}
|
||||
except Exception as exc:
|
||||
log.error('Exception occurred while '
|
||||
'introspecting {0}: {1}'.format(fun, exc))
|
||||
'introspecting {0}: {1}'.format(fun, exc))
|
||||
data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format(
|
||||
fun,
|
||||
exc.__class__.__name__,
|
||||
@ -1193,11 +1162,12 @@ class LocalFuncs(object):
|
||||
|
||||
# Retrieve the minions list
|
||||
delimiter = load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
|
||||
minions = self.ckminions.check_minions(
|
||||
_res = self.ckminions.check_minions(
|
||||
load['tgt'],
|
||||
load.get('tgt_type', 'glob'),
|
||||
delimiter
|
||||
)
|
||||
minions = _res['minions']
|
||||
|
||||
# Check for external auth calls
|
||||
if extra.get('token', False):
|
||||
@ -1207,12 +1177,7 @@ class LocalFuncs(object):
|
||||
return ''
|
||||
|
||||
# Get acl from eauth module.
|
||||
if self.opts['keep_acl_in_token'] and 'auth_list' in token:
|
||||
auth_list = token['auth_list']
|
||||
else:
|
||||
extra['eauth'] = token['eauth']
|
||||
extra['username'] = token['name']
|
||||
auth_list = self.loadauth.get_auth_list(extra)
|
||||
auth_list = self.loadauth.get_auth_list(extra, token)
|
||||
|
||||
# Authorize the request
|
||||
if not self.ckminions.auth_check(
|
||||
@ -1409,3 +1374,18 @@ class LocalFuncs(object):
|
||||
},
|
||||
'pub': pub_load
|
||||
}
|
||||
|
||||
def _prep_auth_info(self, load):
|
||||
key = None
|
||||
if 'token' in load:
|
||||
auth_type = 'token'
|
||||
err_name = 'TokenAuthenticationError'
|
||||
elif 'eauth' in load:
|
||||
auth_type = 'eauth'
|
||||
err_name = 'EauthAuthenticationError'
|
||||
else:
|
||||
auth_type = 'user'
|
||||
err_name = 'UserAuthenticationError'
|
||||
key = self.key
|
||||
|
||||
return auth_type, err_name, key
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -393,7 +393,19 @@ class TemplateError(SaltException):
|
||||
# Validation related exceptions
|
||||
class InvalidConfigError(CommandExecutionError):
|
||||
'''
|
||||
Used when the input is invalid
|
||||
Used when the config is invalid
|
||||
'''
|
||||
|
||||
|
||||
class ArgumentValueError(CommandExecutionError):
|
||||
'''
|
||||
Used when an invalid argument was passed to a command execution
|
||||
'''
|
||||
|
||||
|
||||
class InvalidEntityError(CommandExecutionError):
|
||||
'''
|
||||
Used when an entity fails validation
|
||||
'''
|
||||
|
||||
|
||||
|
7
salt/ext/vsan/__init__.py
Normal file
7
salt/ext/vsan/__init__.py
Normal file
@ -0,0 +1,7 @@
|
||||
# coding: utf-8 -*-
|
||||
'''
|
||||
This directory contains the object model and utils for the vsan VMware SDK
|
||||
extension.
|
||||
|
||||
They are governed under their respective licenses.
|
||||
'''
|
165
salt/ext/vsan/vsanapiutils.py
Normal file
165
salt/ext/vsan/vsanapiutils.py
Normal file
@ -0,0 +1,165 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Copyright 2016 VMware, Inc. All rights reserved.
|
||||
|
||||
This module defines basic helper functions used in the sampe codes
|
||||
"""
|
||||
|
||||
# pylint: skip-file
|
||||
__author__ = 'VMware, Inc'
|
||||
|
||||
from pyVmomi import vim, vmodl, SoapStubAdapter
|
||||
#import the VSAN API python bindings
|
||||
import vsanmgmtObjects
|
||||
|
||||
VSAN_API_VC_SERVICE_ENDPOINT = '/vsanHealth'
|
||||
VSAN_API_ESXI_SERVICE_ENDPOINT = '/vsan'
|
||||
|
||||
#Constuct a stub for VSAN API access using VC or ESXi sessions from existing
|
||||
#stubs. Correspoding VC or ESXi service endpoint is required. VC service
|
||||
#endpoint is used as default
|
||||
def _GetVsanStub(
|
||||
stub, endpoint=VSAN_API_VC_SERVICE_ENDPOINT,
|
||||
context=None, version='vim.version.version10'
|
||||
):
|
||||
|
||||
hostname = stub.host.split(':')[0]
|
||||
vsanStub = SoapStubAdapter(
|
||||
host=hostname,
|
||||
path=endpoint,
|
||||
version=version,
|
||||
sslContext=context
|
||||
)
|
||||
vsanStub.cookie = stub.cookie
|
||||
return vsanStub
|
||||
|
||||
#Construct a stub for access VC side VSAN APIs
|
||||
def GetVsanVcStub(stub, context=None):
|
||||
return _GetVsanStub(stub, endpoint=VSAN_API_VC_SERVICE_ENDPOINT,
|
||||
context=context)
|
||||
|
||||
#Construct a stub for access ESXi side VSAN APIs
|
||||
def GetVsanEsxStub(stub, context=None):
|
||||
return _GetVsanStub(stub, endpoint=VSAN_API_ESXI_SERVICE_ENDPOINT,
|
||||
context=context)
|
||||
|
||||
#Construct a stub for access ESXi side VSAN APIs
|
||||
def GetVsanVcMos(vcStub, context=None):
|
||||
vsanStub = GetVsanVcStub(vcStub, context)
|
||||
vcMos = {
|
||||
'vsan-disk-management-system' : vim.cluster.VsanVcDiskManagementSystem(
|
||||
'vsan-disk-management-system',
|
||||
vsanStub
|
||||
),
|
||||
'vsan-stretched-cluster-system' : vim.cluster.VsanVcStretchedClusterSystem(
|
||||
'vsan-stretched-cluster-system',
|
||||
vsanStub
|
||||
),
|
||||
'vsan-cluster-config-system' : vim.cluster.VsanVcClusterConfigSystem(
|
||||
'vsan-cluster-config-system',
|
||||
vsanStub
|
||||
),
|
||||
'vsan-performance-manager' : vim.cluster.VsanPerformanceManager(
|
||||
'vsan-performance-manager',
|
||||
vsanStub
|
||||
),
|
||||
'vsan-cluster-health-system' : vim.cluster.VsanVcClusterHealthSystem(
|
||||
'vsan-cluster-health-system',
|
||||
vsanStub
|
||||
),
|
||||
'vsan-upgrade-systemex' : vim.VsanUpgradeSystemEx(
|
||||
'vsan-upgrade-systemex',
|
||||
vsanStub
|
||||
),
|
||||
'vsan-cluster-space-report-system' : vim.cluster.VsanSpaceReportSystem(
|
||||
'vsan-cluster-space-report-system',
|
||||
vsanStub
|
||||
),
|
||||
|
||||
'vsan-cluster-object-system' : vim.cluster.VsanObjectSystem(
|
||||
'vsan-cluster-object-system',
|
||||
vsanStub
|
||||
),
|
||||
}
|
||||
|
||||
return vcMos
|
||||
|
||||
#Construct a stub for access ESXi side VSAN APIs
|
||||
def GetVsanEsxMos(esxStub, context=None):
|
||||
vsanStub = GetVsanEsxStub(esxStub, context)
|
||||
esxMos = {
|
||||
'vsan-performance-manager' : vim.cluster.VsanPerformanceManager(
|
||||
'vsan-performance-manager',
|
||||
vsanStub
|
||||
),
|
||||
'ha-vsan-health-system' : vim.host.VsanHealthSystem(
|
||||
'ha-vsan-health-system',
|
||||
vsanStub
|
||||
),
|
||||
'vsan-object-system' : vim.cluster.VsanObjectSystem(
|
||||
'vsan-object-system',
|
||||
vsanStub
|
||||
),
|
||||
}
|
||||
|
||||
return esxMos
|
||||
|
||||
#Convert a VSAN Task to a Task MO binding to VC service
|
||||
#@param vsanTask the VSAN Task MO
|
||||
#@param stub the stub for the VC API
|
||||
def ConvertVsanTaskToVcTask(vsanTask, vcStub):
|
||||
vcTask = vim.Task(vsanTask._moId, vcStub)
|
||||
return vcTask
|
||||
|
||||
def WaitForTasks(tasks, si):
|
||||
"""
|
||||
Given the service instance si and tasks, it returns after all the
|
||||
tasks are complete
|
||||
"""
|
||||
|
||||
pc = si.content.propertyCollector
|
||||
|
||||
taskList = [str(task) for task in tasks]
|
||||
|
||||
# Create filter
|
||||
objSpecs = [vmodl.query.PropertyCollector.ObjectSpec(obj=task)
|
||||
for task in tasks]
|
||||
propSpec = vmodl.query.PropertyCollector.PropertySpec(type=vim.Task,
|
||||
pathSet=[], all=True)
|
||||
filterSpec = vmodl.query.PropertyCollector.FilterSpec()
|
||||
filterSpec.objectSet = objSpecs
|
||||
filterSpec.propSet = [propSpec]
|
||||
filter = pc.CreateFilter(filterSpec, True)
|
||||
|
||||
try:
|
||||
version, state = None, None
|
||||
|
||||
# Loop looking for updates till the state moves to a completed state.
|
||||
while len(taskList):
|
||||
update = pc.WaitForUpdates(version)
|
||||
for filterSet in update.filterSet:
|
||||
for objSet in filterSet.objectSet:
|
||||
task = objSet.obj
|
||||
for change in objSet.changeSet:
|
||||
if change.name == 'info':
|
||||
state = change.val.state
|
||||
elif change.name == 'info.state':
|
||||
state = change.val
|
||||
else:
|
||||
continue
|
||||
|
||||
if not str(task) in taskList:
|
||||
continue
|
||||
|
||||
if state == vim.TaskInfo.State.success:
|
||||
# Remove task from taskList
|
||||
taskList.remove(str(task))
|
||||
elif state == vim.TaskInfo.State.error:
|
||||
raise task.info.error
|
||||
# Move to next version
|
||||
version = update.version
|
||||
finally:
|
||||
if filter:
|
||||
filter.Destroy()
|
143
salt/ext/vsan/vsanmgmtObjects.py
Normal file
143
salt/ext/vsan/vsanmgmtObjects.py
Normal file
File diff suppressed because one or more lines are too long
@ -185,12 +185,13 @@ class Client(object):
|
||||
'''
|
||||
raise NotImplementedError
|
||||
|
||||
def cache_file(self, path, saltenv=u'base', cachedir=None):
|
||||
def cache_file(self, path, saltenv=u'base', cachedir=None, source_hash=None):
|
||||
'''
|
||||
Pull a file down from the file server and store it in the minion
|
||||
file cache
|
||||
'''
|
||||
return self.get_url(path, u'', True, saltenv, cachedir=cachedir)
|
||||
return self.get_url(
|
||||
path, u'', True, saltenv, cachedir=cachedir, source_hash=source_hash)
|
||||
|
||||
def cache_files(self, paths, saltenv=u'base', cachedir=None):
|
||||
'''
|
||||
@ -470,7 +471,7 @@ class Client(object):
|
||||
return ret
|
||||
|
||||
def get_url(self, url, dest, makedirs=False, saltenv=u'base',
|
||||
no_cache=False, cachedir=None):
|
||||
no_cache=False, cachedir=None, source_hash=None):
|
||||
'''
|
||||
Get a single file from a URL.
|
||||
'''
|
||||
@ -525,6 +526,18 @@ class Client(object):
|
||||
return u''
|
||||
elif not no_cache:
|
||||
dest = self._extrn_path(url, saltenv, cachedir=cachedir)
|
||||
if source_hash is not None:
|
||||
try:
|
||||
source_hash = source_hash.split('=')[-1]
|
||||
form = salt.utils.files.HASHES_REVMAP[len(source_hash)]
|
||||
if salt.utils.get_hash(dest, form) == source_hash:
|
||||
log.debug(
|
||||
'Cached copy of %s (%s) matches source_hash %s, '
|
||||
'skipping download', url, dest, source_hash
|
||||
)
|
||||
return dest
|
||||
except (AttributeError, KeyError, IOError, OSError):
|
||||
pass
|
||||
destdir = os.path.dirname(dest)
|
||||
if not os.path.isdir(destdir):
|
||||
os.makedirs(destdir)
|
||||
@ -532,7 +545,9 @@ class Client(object):
|
||||
if url_data.scheme == u's3':
|
||||
try:
|
||||
def s3_opt(key, default=None):
|
||||
u'''Get value of s3.<key> from Minion config or from Pillar'''
|
||||
'''
|
||||
Get value of s3.<key> from Minion config or from Pillar
|
||||
'''
|
||||
if u's3.' + key in self.opts:
|
||||
return self.opts[u's3.' + key]
|
||||
try:
|
||||
@ -744,12 +759,7 @@ class Client(object):
|
||||
Cache a file then process it as a template
|
||||
'''
|
||||
if u'env' in kwargs:
|
||||
salt.utils.versions.warn_until(
|
||||
u'Oxygen',
|
||||
u'Parameter \'env\' has been detected in the argument list. This '
|
||||
u'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
u'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
kwargs.pop(u'env')
|
||||
|
||||
kwargs[u'saltenv'] = saltenv
|
||||
@ -790,7 +800,7 @@ class Client(object):
|
||||
|
||||
def _extrn_path(self, url, saltenv, cachedir=None):
|
||||
'''
|
||||
Return the extn_filepath for a given url
|
||||
Return the extrn_filepath for a given url
|
||||
'''
|
||||
url_data = urlparse(url)
|
||||
if salt.utils.platform.is_windows():
|
||||
@ -1300,10 +1310,10 @@ class RemoteClient(Client):
|
||||
hash_type = self.opts.get(u'hash_type', u'md5')
|
||||
ret[u'hsum'] = salt.utils.get_hash(path, form=hash_type)
|
||||
ret[u'hash_type'] = hash_type
|
||||
return ret, list(os.stat(path))
|
||||
return ret
|
||||
load = {u'path': path,
|
||||
u'saltenv': saltenv,
|
||||
u'cmd': u'_file_hash_and_stat'}
|
||||
u'cmd': u'_file_hash'}
|
||||
return self.channel.send(load)
|
||||
|
||||
def hash_file(self, path, saltenv=u'base'):
|
||||
@ -1312,14 +1322,33 @@ class RemoteClient(Client):
|
||||
master file server prepend the path with salt://<file on server>
|
||||
otherwise, prepend the file with / for a local file.
|
||||
'''
|
||||
return self.__hash_and_stat_file(path, saltenv)[0]
|
||||
return self.__hash_and_stat_file(path, saltenv)
|
||||
|
||||
def hash_and_stat_file(self, path, saltenv=u'base'):
|
||||
'''
|
||||
The same as hash_file, but also return the file's mode, or None if no
|
||||
mode data is present.
|
||||
'''
|
||||
return self.__hash_and_stat_file(path, saltenv)
|
||||
hash_result = self.hash_file(path, saltenv)
|
||||
try:
|
||||
path = self._check_proto(path)
|
||||
except MinionError as err:
|
||||
if not os.path.isfile(path):
|
||||
return hash_result, None
|
||||
else:
|
||||
try:
|
||||
return hash_result, list(os.stat(path))
|
||||
except Exception:
|
||||
return hash_result, None
|
||||
load = {'path': path,
|
||||
'saltenv': saltenv,
|
||||
'cmd': '_file_find'}
|
||||
fnd = self.channel.send(load)
|
||||
try:
|
||||
stat_result = fnd.get('stat')
|
||||
except AttributeError:
|
||||
stat_result = None
|
||||
return hash_result, stat_result
|
||||
|
||||
def list_env(self, saltenv=u'base'):
|
||||
'''
|
||||
|
@ -553,12 +553,7 @@ class Fileserver(object):
|
||||
kwargs[args[0]] = args[1]
|
||||
|
||||
if 'env' in kwargs:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
kwargs.pop('env')
|
||||
if 'saltenv' in kwargs:
|
||||
saltenv = kwargs.pop('saltenv')
|
||||
@ -583,12 +578,7 @@ class Fileserver(object):
|
||||
'dest': ''}
|
||||
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if 'path' not in load or 'loc' not in load or 'saltenv' not in load:
|
||||
@ -609,13 +599,7 @@ class Fileserver(object):
|
||||
Common code for hashing and stating files
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. '
|
||||
'This parameter is no longer used and has been replaced by '
|
||||
'\'saltenv\' as of Salt 2016.11.0. This warning will be removed '
|
||||
'in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if 'path' not in load or 'saltenv' not in load:
|
||||
@ -656,12 +640,7 @@ class Fileserver(object):
|
||||
Deletes the file_lists cache files
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
saltenv = load.get('saltenv', [])
|
||||
@ -738,12 +717,7 @@ class Fileserver(object):
|
||||
Return a list of files from the dominant environment
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = set()
|
||||
@ -769,12 +743,7 @@ class Fileserver(object):
|
||||
List all emptydirs in the given environment
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = set()
|
||||
@ -800,12 +769,7 @@ class Fileserver(object):
|
||||
List all directories in the given environment
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = set()
|
||||
@ -831,12 +795,7 @@ class Fileserver(object):
|
||||
Return a list of symlinked files and dirs
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = {}
|
||||
|
@ -736,12 +736,7 @@ def serve_file(load, fnd):
|
||||
Return a chunk from a file based on the data received
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = {'data': '',
|
||||
@ -770,12 +765,7 @@ def file_hash(load, fnd):
|
||||
Return a file hash, the hash type is set in the master config file
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if not all(x in load for x in ('path', 'saltenv')):
|
||||
@ -804,12 +794,7 @@ def _file_lists(load, form):
|
||||
Return a dict containing the file lists for files and dirs
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/hgfs')
|
||||
@ -852,12 +837,7 @@ def _get_file_list(load):
|
||||
Get a list of all files on the file server in a specified environment
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if 'saltenv' not in load or load['saltenv'] not in envs():
|
||||
@ -897,12 +877,7 @@ def _get_dir_list(load):
|
||||
Get a list of all directories on the master
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if 'saltenv' not in load or load['saltenv'] not in envs():
|
||||
|
@ -165,12 +165,7 @@ def file_hash(load, fnd):
|
||||
ret = {}
|
||||
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if load['saltenv'] not in envs():
|
||||
@ -235,12 +230,7 @@ def file_list(load):
|
||||
Return a list of all files on the file server in a specified environment
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if load['saltenv'] not in envs():
|
||||
@ -319,12 +309,7 @@ def dir_list(load):
|
||||
- source-minion/absolute/path
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if load['saltenv'] not in envs():
|
||||
|
@ -40,12 +40,7 @@ def find_file(path, saltenv='base', **kwargs):
|
||||
Search the environment for the relative path.
|
||||
'''
|
||||
if 'env' in kwargs:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
kwargs.pop('env')
|
||||
|
||||
path = os.path.normpath(path)
|
||||
@ -117,12 +112,7 @@ def serve_file(load, fnd):
|
||||
Return a chunk from a file based on the data received
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = {'data': '',
|
||||
@ -218,12 +208,7 @@ def file_hash(load, fnd):
|
||||
Return a file hash, the hash type is set in the master config file
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if 'path' not in load or 'saltenv' not in load:
|
||||
@ -298,12 +283,7 @@ def _file_lists(load, form):
|
||||
Return a dict containing the file lists for files, dirs, emtydirs and symlinks
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if load['saltenv'] not in __opts__['file_roots']:
|
||||
@ -444,12 +424,7 @@ def symlink_list(load):
|
||||
Return a dict of all symlinks based on a given path on the Master
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = {}
|
||||
|
@ -126,12 +126,7 @@ def find_file(path, saltenv='base', **kwargs):
|
||||
is missing, or if the MD5 does not match.
|
||||
'''
|
||||
if 'env' in kwargs:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
kwargs.pop('env')
|
||||
|
||||
fnd = {'bucket': None,
|
||||
@ -168,12 +163,7 @@ def file_hash(load, fnd):
|
||||
Return an MD5 file hash
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = {}
|
||||
@ -201,12 +191,7 @@ def serve_file(load, fnd):
|
||||
Return a chunk from a file based on the data received
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = {'data': '',
|
||||
@ -245,12 +230,7 @@ def file_list(load):
|
||||
Return a list of all files on the file server in a specified environment
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = []
|
||||
@ -286,12 +266,7 @@ def dir_list(load):
|
||||
Return a list of all directories on the master
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = []
|
||||
|
@ -631,12 +631,7 @@ def serve_file(load, fnd):
|
||||
Return a chunk from a file based on the data received
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = {'data': '',
|
||||
@ -665,12 +660,7 @@ def file_hash(load, fnd):
|
||||
Return a file hash, the hash type is set in the master config file
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if not all(x in load for x in ('path', 'saltenv')):
|
||||
@ -723,12 +713,7 @@ def _file_lists(load, form):
|
||||
Return a dict containing the file lists for files, dirs, emptydirs and symlinks
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if 'saltenv' not in load or load['saltenv'] not in envs():
|
||||
|
38
salt/grains/cimc.py
Normal file
38
salt/grains/cimc.py
Normal file
@ -0,0 +1,38 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Generate baseline proxy minion grains for cimc hosts.
|
||||
|
||||
'''
|
||||
|
||||
# Import Python Libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
|
||||
# Import Salt Libs
|
||||
import salt.utils.platform
|
||||
import salt.proxy.cimc
|
||||
|
||||
__proxyenabled__ = ['cimc']
|
||||
__virtualname__ = 'cimc'
|
||||
|
||||
log = logging.getLogger(__file__)
|
||||
|
||||
GRAINS_CACHE = {'os_family': 'Cisco UCS'}
|
||||
|
||||
|
||||
def __virtual__():
|
||||
try:
|
||||
if salt.utils.platform.is_proxy() and __opts__['proxy']['proxytype'] == 'cimc':
|
||||
return __virtualname__
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def cimc(proxy=None):
|
||||
if not proxy:
|
||||
return {}
|
||||
if proxy['cimc.initialized']() is False:
|
||||
return {}
|
||||
return {'cimc': proxy['cimc.grains']()}
|
@ -543,7 +543,7 @@ def _virtual(osdata):
|
||||
command = 'system_profiler'
|
||||
args = ['SPDisplaysDataType']
|
||||
elif osdata['kernel'] == 'SunOS':
|
||||
virtinfo = salt.utils.which('virtinfo')
|
||||
virtinfo = salt.utils.path.which('virtinfo')
|
||||
if virtinfo:
|
||||
try:
|
||||
ret = __salt__['cmd.run_all']('{0} -a'.format(virtinfo))
|
||||
@ -805,6 +805,8 @@ def _virtual(osdata):
|
||||
grains['virtual_subtype'] = 'ovirt'
|
||||
elif 'Google' in output:
|
||||
grains['virtual'] = 'gce'
|
||||
elif 'BHYVE' in output:
|
||||
grains['virtual'] = 'bhyve'
|
||||
except IOError:
|
||||
pass
|
||||
elif osdata['kernel'] == 'FreeBSD':
|
||||
@ -2423,4 +2425,46 @@ def get_master():
|
||||
# master
|
||||
return {'master': __opts__.get('master', '')}
|
||||
|
||||
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
||||
|
||||
def default_gateway():
|
||||
'''
|
||||
Populates grains which describe whether a server has a default gateway
|
||||
configured or not. Uses `ip -4 route show` and `ip -6 route show` and greps
|
||||
for a `default` at the beginning of any line. Assuming the standard
|
||||
`default via <ip>` format for default gateways, it will also parse out the
|
||||
ip address of the default gateway, and put it in ip4_gw or ip6_gw.
|
||||
|
||||
If the `ip` command is unavailable, no grains will be populated.
|
||||
|
||||
Currently does not support multiple default gateways. The grains will be
|
||||
set to the first default gateway found.
|
||||
|
||||
List of grains:
|
||||
|
||||
ip4_gw: True # ip/True/False if default ipv4 gateway
|
||||
ip6_gw: True # ip/True/False if default ipv6 gateway
|
||||
ip_gw: True # True if either of the above is True, False otherwise
|
||||
'''
|
||||
grains = {}
|
||||
if not salt.utils.path.which('ip'):
|
||||
return {}
|
||||
grains['ip_gw'] = False
|
||||
grains['ip4_gw'] = False
|
||||
grains['ip6_gw'] = False
|
||||
if __salt__['cmd.run']('ip -4 route show | grep "^default"', python_shell=True):
|
||||
grains['ip_gw'] = True
|
||||
grains['ip4_gw'] = True
|
||||
try:
|
||||
gateway_ip = __salt__['cmd.run']('ip -4 route show | grep "^default via"', python_shell=True).split(' ')[2].strip()
|
||||
grains['ip4_gw'] = gateway_ip if gateway_ip else True
|
||||
except Exception as exc:
|
||||
pass
|
||||
if __salt__['cmd.run']('ip -6 route show | grep "^default"', python_shell=True):
|
||||
grains['ip_gw'] = True
|
||||
grains['ip6_gw'] = True
|
||||
try:
|
||||
gateway_ip = __salt__['cmd.run']('ip -6 route show | grep "^default via"', python_shell=True).split(' ')[2].strip()
|
||||
grains['ip6_gw'] = gateway_ip if gateway_ip else True
|
||||
except Exception as exc:
|
||||
pass
|
||||
return grains
|
||||
|
@ -148,7 +148,7 @@ def _linux_disks():
|
||||
|
||||
|
||||
def _windows_disks():
|
||||
wmic = salt.utils.which('wmic')
|
||||
wmic = salt.utils.path.which('wmic')
|
||||
|
||||
namespace = r'\\root\microsoft\windows\storage'
|
||||
path = 'MSFT_PhysicalDisk'
|
||||
|
@ -11,6 +11,7 @@ import logging
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils.files
|
||||
import salt.utils.platform
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@ -21,7 +22,14 @@ def shell():
|
||||
'''
|
||||
# Provides:
|
||||
# shell
|
||||
return {'shell': os.environ.get('SHELL', '/bin/sh')}
|
||||
if salt.utils.platform.is_windows():
|
||||
env_var = 'COMSPEC'
|
||||
default = r'C:\Windows\system32\cmd.exe'
|
||||
else:
|
||||
env_var = 'SHELL'
|
||||
default = '/bin/sh'
|
||||
|
||||
return {'shell': os.environ.get(env_var, default)}
|
||||
|
||||
|
||||
def config():
|
||||
|
@ -17,6 +17,7 @@ metadata server set `metadata_server_grains: True`.
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import python libs
|
||||
import json
|
||||
import os
|
||||
import socket
|
||||
|
||||
@ -47,16 +48,30 @@ def _search(prefix="latest/"):
|
||||
Recursively look up all grains in the metadata server
|
||||
'''
|
||||
ret = {}
|
||||
for line in http.query(os.path.join(HOST, prefix))['body'].split('\n'):
|
||||
linedata = http.query(os.path.join(HOST, prefix))
|
||||
if 'body' not in linedata:
|
||||
return ret
|
||||
for line in linedata['body'].split('\n'):
|
||||
if line.endswith('/'):
|
||||
ret[line[:-1]] = _search(prefix=os.path.join(prefix, line))
|
||||
elif prefix == 'latest/':
|
||||
# (gtmanfred) The first level should have a forward slash since
|
||||
# they have stuff underneath. This will not be doubled up though,
|
||||
# because lines ending with a slash are checked first.
|
||||
ret[line] = _search(prefix=os.path.join(prefix, line + '/'))
|
||||
elif line.endswith(('dynamic', 'meta-data')):
|
||||
ret[line] = _search(prefix=os.path.join(prefix, line))
|
||||
elif '=' in line:
|
||||
key, value = line.split('=')
|
||||
ret[value] = _search(prefix=os.path.join(prefix, key))
|
||||
else:
|
||||
ret[line] = http.query(os.path.join(HOST, prefix, line))['body']
|
||||
retdata = http.query(os.path.join(HOST, prefix, line)).get('body', None)
|
||||
# (gtmanfred) This try except block is slightly faster than
|
||||
# checking if the string starts with a curly brace
|
||||
try:
|
||||
ret[line] = json.loads(retdata)
|
||||
except ValueError:
|
||||
ret[line] = retdata
|
||||
return ret
|
||||
|
||||
|
||||
|
@ -447,8 +447,8 @@ def optional_args(proxy=None):
|
||||
device2:
|
||||
True
|
||||
'''
|
||||
opt_args = _get_device_grain('optional_args', proxy=proxy)
|
||||
if _FORBIDDEN_OPT_ARGS:
|
||||
opt_args = _get_device_grain('optional_args', proxy=proxy) or {}
|
||||
if opt_args and _FORBIDDEN_OPT_ARGS:
|
||||
for arg in _FORBIDDEN_OPT_ARGS:
|
||||
opt_args.pop(arg, None)
|
||||
return {'optional_args': opt_args}
|
||||
|
37
salt/key.py
37
salt/key.py
@ -496,7 +496,7 @@ class Key(object):
|
||||
minions = []
|
||||
for key, val in six.iteritems(keys):
|
||||
minions.extend(val)
|
||||
if not self.opts.get(u'preserve_minion_cache', False) or not preserve_minions:
|
||||
if not self.opts.get(u'preserve_minion_cache', False):
|
||||
m_cache = os.path.join(self.opts[u'cachedir'], self.ACC)
|
||||
if os.path.isdir(m_cache):
|
||||
for minion in os.listdir(m_cache):
|
||||
@ -743,7 +743,7 @@ class Key(object):
|
||||
def delete_key(self,
|
||||
match=None,
|
||||
match_dict=None,
|
||||
preserve_minions=False,
|
||||
preserve_minions=None,
|
||||
revoke_auth=False):
|
||||
'''
|
||||
Delete public keys. If "match" is passed, it is evaluated as a glob.
|
||||
@ -781,11 +781,10 @@ class Key(object):
|
||||
salt.utils.event.tagify(prefix=u'key'))
|
||||
except (OSError, IOError):
|
||||
pass
|
||||
if preserve_minions:
|
||||
preserve_minions_list = matches.get(u'minions', [])
|
||||
if self.opts.get(u'preserve_minions') is True:
|
||||
self.check_minion_cache(preserve_minions=matches.get(u'minions', []))
|
||||
else:
|
||||
preserve_minions_list = []
|
||||
self.check_minion_cache(preserve_minions=preserve_minions_list)
|
||||
self.check_minion_cache()
|
||||
if self.opts.get(u'rotate_aes_key'):
|
||||
salt.crypt.dropfile(self.opts[u'cachedir'], self.opts[u'user'])
|
||||
return (
|
||||
@ -976,16 +975,17 @@ class RaetKey(Key):
|
||||
minions.extend(val)
|
||||
|
||||
m_cache = os.path.join(self.opts[u'cachedir'], u'minions')
|
||||
if os.path.isdir(m_cache):
|
||||
for minion in os.listdir(m_cache):
|
||||
if minion not in minions:
|
||||
shutil.rmtree(os.path.join(m_cache, minion))
|
||||
cache = salt.cache.factory(self.opts)
|
||||
clist = cache.list(self.ACC)
|
||||
if clist:
|
||||
for minion in clist:
|
||||
if not self.opts.get('preserve_minion_cache', False):
|
||||
if os.path.isdir(m_cache):
|
||||
for minion in os.listdir(m_cache):
|
||||
if minion not in minions and minion not in preserve_minions:
|
||||
cache.flush(u'{0}/{1}'.format(self.ACC, minion))
|
||||
shutil.rmtree(os.path.join(m_cache, minion))
|
||||
cache = salt.cache.factory(self.opts)
|
||||
clist = cache.list(self.ACC)
|
||||
if clist:
|
||||
for minion in clist:
|
||||
if minion not in minions and minion not in preserve_minions:
|
||||
cache.flush(u'{0}/{1}'.format(self.ACC, minion))
|
||||
|
||||
kind = self.opts.get(u'__role', u'') # application kind
|
||||
if kind not in kinds.APPL_KINDS:
|
||||
@ -1227,7 +1227,7 @@ class RaetKey(Key):
|
||||
def delete_key(self,
|
||||
match=None,
|
||||
match_dict=None,
|
||||
preserve_minions=False,
|
||||
preserve_minions=None,
|
||||
revoke_auth=False):
|
||||
'''
|
||||
Delete public keys. If "match" is passed, it is evaluated as a glob.
|
||||
@ -1258,7 +1258,10 @@ class RaetKey(Key):
|
||||
os.remove(os.path.join(self.opts[u'pki_dir'], status, key))
|
||||
except (OSError, IOError):
|
||||
pass
|
||||
self.check_minion_cache(preserve_minions=matches.get(u'minions', []))
|
||||
if self.opts.get('preserve_minions') is True:
|
||||
self.check_minion_cache(preserve_minions=matches.get(u'minions', []))
|
||||
else:
|
||||
self.check_minion_cache()
|
||||
return (
|
||||
self.name_match(match) if match is not None
|
||||
else self.dict_match(matches)
|
||||
|
@ -270,7 +270,7 @@ def raw_mod(opts, name, functions, mod=u'modules'):
|
||||
testmod['test.ping']()
|
||||
'''
|
||||
loader = LazyLoader(
|
||||
_module_dirs(opts, mod, u'rawmodule'),
|
||||
_module_dirs(opts, mod, u'module'),
|
||||
opts,
|
||||
tag=u'rawmodule',
|
||||
virtual_enable=False,
|
||||
|
186
salt/master.py
186
salt/master.py
@ -315,7 +315,7 @@ class Maintenance(salt.utils.process.SignalHandlingMultiprocessingProcess):
|
||||
'''
|
||||
try:
|
||||
for pillar in self.git_pillar:
|
||||
pillar.update()
|
||||
pillar.fetch_remotes()
|
||||
except Exception as exc:
|
||||
log.error(u'Exception caught while updating git_pillar',
|
||||
exc_info=True)
|
||||
@ -471,18 +471,18 @@ class Master(SMaster):
|
||||
pass
|
||||
|
||||
if self.opts.get(u'git_pillar_verify_config', True):
|
||||
non_legacy_git_pillars = [
|
||||
git_pillars = [
|
||||
x for x in self.opts.get(u'ext_pillar', [])
|
||||
if u'git' in x
|
||||
and not isinstance(x[u'git'], six.string_types)
|
||||
]
|
||||
if non_legacy_git_pillars:
|
||||
if git_pillars:
|
||||
try:
|
||||
new_opts = copy.deepcopy(self.opts)
|
||||
from salt.pillar.git_pillar \
|
||||
import PER_REMOTE_OVERRIDES as per_remote_overrides, \
|
||||
PER_REMOTE_ONLY as per_remote_only
|
||||
for repo in non_legacy_git_pillars:
|
||||
for repo in git_pillars:
|
||||
new_opts[u'ext_pillar'] = [repo]
|
||||
try:
|
||||
git_pillar = salt.utils.gitfs.GitPillar(new_opts)
|
||||
@ -1306,7 +1306,6 @@ class AESFuncs(object):
|
||||
return False
|
||||
load[u'grains'][u'id'] = load[u'id']
|
||||
|
||||
pillar_dirs = {}
|
||||
pillar = salt.pillar.get_pillar(
|
||||
self.opts,
|
||||
load[u'grains'],
|
||||
@ -1314,8 +1313,9 @@ class AESFuncs(object):
|
||||
load.get(u'saltenv', load.get(u'env')),
|
||||
ext=load.get(u'ext'),
|
||||
pillar_override=load.get(u'pillar_override', {}),
|
||||
pillarenv=load.get(u'pillarenv'))
|
||||
data = pillar.compile_pillar(pillar_dirs=pillar_dirs)
|
||||
pillarenv=load.get(u'pillarenv'),
|
||||
extra_minion_data=load.get(u'extra_minion_data'))
|
||||
data = pillar.compile_pillar()
|
||||
self.fs_.update_opts()
|
||||
if self.opts.get(u'minion_data_cache', False):
|
||||
self.masterapi.cache.store(u'minions/{0}'.format(load[u'id']),
|
||||
@ -1670,49 +1670,36 @@ class ClearFuncs(object):
|
||||
Send a master control function back to the runner system
|
||||
'''
|
||||
# All runner ops pass through eauth
|
||||
if u'token' in clear_load:
|
||||
# Authenticate
|
||||
token = self.loadauth.authenticate_token(clear_load)
|
||||
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
|
||||
|
||||
if not token:
|
||||
return dict(error=dict(name=u'TokenAuthenticationError',
|
||||
message=u'Authentication failure of type "token" occurred.'))
|
||||
# Authenticate
|
||||
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
|
||||
error = auth_check.get(u'error')
|
||||
|
||||
# Authorize
|
||||
if self.opts[u'keep_acl_in_token'] and u'auth_list' in token:
|
||||
auth_list = token[u'auth_list']
|
||||
else:
|
||||
clear_load[u'eauth'] = token[u'eauth']
|
||||
clear_load[u'username'] = token[u'name']
|
||||
auth_list = self.loadauth.get_auth_list(clear_load)
|
||||
if error:
|
||||
# Authentication error occurred: do not continue.
|
||||
return {u'error': error}
|
||||
|
||||
if not self.ckminions.runner_check(auth_list, clear_load[u'fun'], clear_load.get(u'kwarg', {})):
|
||||
return dict(error=dict(name=u'TokenAuthenticationError',
|
||||
message=(u'Authentication failure of type "token" occurred for '
|
||||
u'user {0}.').format(token[u'name'])))
|
||||
clear_load.pop(u'token')
|
||||
username = token[u'name']
|
||||
elif u'eauth' in clear_load:
|
||||
if not self.loadauth.authenticate_eauth(clear_load):
|
||||
return dict(error=dict(name=u'EauthAuthenticationError',
|
||||
message=(u'Authentication failure of type "eauth" occurred for '
|
||||
u'user {0}.').format(clear_load.get(u'username', u'UNKNOWN'))))
|
||||
# Authorize
|
||||
username = auth_check.get(u'username')
|
||||
if auth_type != u'user':
|
||||
runner_check = self.ckminions.runner_check(
|
||||
auth_check.get(u'auth_list', []),
|
||||
clear_load[u'fun'],
|
||||
clear_load.get(u'kwarg', {})
|
||||
)
|
||||
if not runner_check:
|
||||
return {u'error': {u'name': err_name,
|
||||
u'message': u'Authentication failure of type "{0}" occurred for '
|
||||
u'user {1}.'.format(auth_type, username)}}
|
||||
elif isinstance(runner_check, dict) and u'error' in runner_check:
|
||||
# A dictionary with an error name/message was handled by ckminions.runner_check
|
||||
return runner_check
|
||||
|
||||
auth_list = self.loadauth.get_auth_list(clear_load)
|
||||
if not self.ckminions.runner_check(auth_list, clear_load[u'fun'], clear_load.get(u'kwarg', {})):
|
||||
return dict(error=dict(name=u'EauthAuthenticationError',
|
||||
message=(u'Authentication failure of type "eauth" occurred for '
|
||||
u'user {0}.').format(clear_load.get(u'username', u'UNKNOWN'))))
|
||||
|
||||
# No error occurred, consume the password from the clear_load if
|
||||
# passed
|
||||
username = clear_load.pop(u'username', u'UNKNOWN')
|
||||
clear_load.pop(u'password', None)
|
||||
# No error occurred, consume sensitive settings from the clear_load if passed.
|
||||
for item in sensitive_load_keys:
|
||||
clear_load.pop(item, None)
|
||||
else:
|
||||
if not self.loadauth.authenticate_key(clear_load, self.key):
|
||||
return dict(error=dict(name=u'UserAuthenticationError',
|
||||
message=u'Authentication failure of type "user" occurred'))
|
||||
|
||||
if u'user' in clear_load:
|
||||
username = clear_load[u'user']
|
||||
if salt.auth.AuthUser(username).is_sudo():
|
||||
@ -1729,57 +1716,45 @@ class ClearFuncs(object):
|
||||
username)
|
||||
except Exception as exc:
|
||||
log.error(u'Exception occurred while introspecting %s: %s', fun, exc)
|
||||
return dict(error=dict(name=exc.__class__.__name__,
|
||||
args=exc.args,
|
||||
message=str(exc)))
|
||||
return {u'error': {u'name': exc.__class__.__name__,
|
||||
u'args': exc.args,
|
||||
u'message': str(exc)}}
|
||||
|
||||
def wheel(self, clear_load):
|
||||
'''
|
||||
Send a master control function back to the wheel system
|
||||
'''
|
||||
# All wheel ops pass through eauth
|
||||
username = None
|
||||
if u'token' in clear_load:
|
||||
# Authenticate
|
||||
token = self.loadauth.authenticate_token(clear_load)
|
||||
if not token:
|
||||
return dict(error=dict(name=u'TokenAuthenticationError',
|
||||
message=u'Authentication failure of type "token" occurred.'))
|
||||
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
|
||||
|
||||
# Authorize
|
||||
if self.opts[u'keep_acl_in_token'] and u'auth_list' in token:
|
||||
auth_list = token[u'auth_list']
|
||||
else:
|
||||
clear_load[u'eauth'] = token[u'eauth']
|
||||
clear_load[u'username'] = token[u'name']
|
||||
auth_list = self.loadauth.get_auth_list(clear_load)
|
||||
if not self.ckminions.wheel_check(auth_list, clear_load[u'fun'], clear_load.get(u'kwarg', {})):
|
||||
return dict(error=dict(name=u'TokenAuthenticationError',
|
||||
message=(u'Authentication failure of type "token" occurred for '
|
||||
u'user {0}.').format(token[u'name'])))
|
||||
clear_load.pop(u'token')
|
||||
username = token[u'name']
|
||||
elif u'eauth' in clear_load:
|
||||
if not self.loadauth.authenticate_eauth(clear_load):
|
||||
return dict(error=dict(name=u'EauthAuthenticationError',
|
||||
message=(u'Authentication failure of type "eauth" occurred for '
|
||||
u'user {0}.').format(clear_load.get(u'username', u'UNKNOWN'))))
|
||||
# Authenticate
|
||||
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
|
||||
error = auth_check.get(u'error')
|
||||
|
||||
auth_list = self.loadauth.get_auth_list(clear_load)
|
||||
if not self.ckminions.wheel_check(auth_list, clear_load[u'fun'], clear_load.get(u'kwarg', {})):
|
||||
return dict(error=dict(name=u'EauthAuthenticationError',
|
||||
message=(u'Authentication failure of type "eauth" occurred for '
|
||||
u'user {0}.').format(clear_load.get(u'username', u'UNKNOWN'))))
|
||||
if error:
|
||||
# Authentication error occurred: do not continue.
|
||||
return {u'error': error}
|
||||
|
||||
# No error occurred, consume the password from the clear_load if
|
||||
# passed
|
||||
clear_load.pop(u'password', None)
|
||||
username = clear_load.pop(u'username', u'UNKNOWN')
|
||||
# Authorize
|
||||
username = auth_check.get(u'username')
|
||||
if auth_type != u'user':
|
||||
wheel_check = self.ckminions.wheel_check(
|
||||
auth_check.get(u'auth_list', []),
|
||||
clear_load[u'fun'],
|
||||
clear_load.get(u'kwarg', {})
|
||||
)
|
||||
if not wheel_check:
|
||||
return {u'error': {u'name': err_name,
|
||||
u'message': u'Authentication failure of type "{0}" occurred for '
|
||||
u'user {1}.'.format(auth_type, username)}}
|
||||
elif isinstance(wheel_check, dict) and u'error' in wheel_check:
|
||||
# A dictionary with an error name/message was handled by ckminions.wheel_check
|
||||
return wheel_check
|
||||
|
||||
# No error occurred, consume sensitive settings from the clear_load if passed.
|
||||
for item in sensitive_load_keys:
|
||||
clear_load.pop(item, None)
|
||||
else:
|
||||
if not self.loadauth.authenticate_key(clear_load, self.key):
|
||||
return dict(error=dict(name=u'UserAuthenticationError',
|
||||
message=u'Authentication failure of type "user" occurred'))
|
||||
|
||||
if u'user' in clear_load:
|
||||
username = clear_load[u'user']
|
||||
if salt.auth.AuthUser(username).is_sudo():
|
||||
@ -1789,7 +1764,7 @@ class ClearFuncs(object):
|
||||
|
||||
# Authorized. Do the job!
|
||||
try:
|
||||
jid = salt.utils.jid.gen_jid()
|
||||
jid = salt.utils.jid.gen_jid(self.opts)
|
||||
fun = clear_load.pop(u'fun')
|
||||
tag = tagify(jid, prefix=u'wheel')
|
||||
data = {u'fun': u"wheel.{0}".format(fun),
|
||||
@ -1855,11 +1830,13 @@ class ClearFuncs(object):
|
||||
|
||||
# Retrieve the minions list
|
||||
delimiter = clear_load.get(u'kwargs', {}).get(u'delimiter', DEFAULT_TARGET_DELIM)
|
||||
minions = self.ckminions.check_minions(
|
||||
_res = self.ckminions.check_minions(
|
||||
clear_load[u'tgt'],
|
||||
clear_load.get(u'tgt_type', u'glob'),
|
||||
delimiter
|
||||
)
|
||||
minions = _res.get('minions', list())
|
||||
missing = _res.get('missing', list())
|
||||
|
||||
# Check for external auth calls
|
||||
if extra.get(u'token', False):
|
||||
@ -1869,12 +1846,7 @@ class ClearFuncs(object):
|
||||
return u''
|
||||
|
||||
# Get acl
|
||||
if self.opts[u'keep_acl_in_token'] and u'auth_list' in token:
|
||||
auth_list = token[u'auth_list']
|
||||
else:
|
||||
extra[u'eauth'] = token[u'eauth']
|
||||
extra[u'username'] = token[u'name']
|
||||
auth_list = self.loadauth.get_auth_list(extra)
|
||||
auth_list = self.loadauth.get_auth_list(extra, token)
|
||||
|
||||
# Authorize the request
|
||||
if not self.ckminions.auth_check(
|
||||
@ -1964,7 +1936,7 @@ class ClearFuncs(object):
|
||||
if jid is None:
|
||||
return {u'enc': u'clear',
|
||||
u'load': {u'error': u'Master failed to assign jid'}}
|
||||
payload = self._prep_pub(minions, jid, clear_load, extra)
|
||||
payload = self._prep_pub(minions, jid, clear_load, extra, missing)
|
||||
|
||||
# Send it!
|
||||
self._send_pub(payload)
|
||||
@ -1973,10 +1945,29 @@ class ClearFuncs(object):
|
||||
u'enc': u'clear',
|
||||
u'load': {
|
||||
u'jid': clear_load[u'jid'],
|
||||
u'minions': minions
|
||||
u'minions': minions,
|
||||
u'missing': missing
|
||||
}
|
||||
}
|
||||
|
||||
def _prep_auth_info(self, clear_load):
|
||||
sensitive_load_keys = []
|
||||
key = None
|
||||
if u'token' in clear_load:
|
||||
auth_type = u'token'
|
||||
err_name = u'TokenAuthenticationError'
|
||||
sensitive_load_keys = [u'token']
|
||||
elif u'eauth' in clear_load:
|
||||
auth_type = u'eauth'
|
||||
err_name = u'EauthAuthenticationError'
|
||||
sensitive_load_keys = [u'username', u'password']
|
||||
else:
|
||||
auth_type = u'user'
|
||||
err_name = u'UserAuthenticationError'
|
||||
key = self.key
|
||||
|
||||
return auth_type, err_name, key, sensitive_load_keys
|
||||
|
||||
def _prep_jid(self, clear_load, extra):
|
||||
'''
|
||||
Return a jid for this publication
|
||||
@ -2010,7 +2001,7 @@ class ClearFuncs(object):
|
||||
chan = salt.transport.server.PubServerChannel.factory(opts)
|
||||
chan.publish(load)
|
||||
|
||||
def _prep_pub(self, minions, jid, clear_load, extra):
|
||||
def _prep_pub(self, minions, jid, clear_load, extra, missing):
|
||||
'''
|
||||
Take a given load and perform the necessary steps
|
||||
to prepare a publication.
|
||||
@ -2031,6 +2022,7 @@ class ClearFuncs(object):
|
||||
u'fun': clear_load[u'fun'],
|
||||
u'arg': clear_load[u'arg'],
|
||||
u'minions': minions,
|
||||
u'missing': missing,
|
||||
}
|
||||
|
||||
# Announce the job on the event bus
|
||||
|
@ -21,6 +21,7 @@ import multiprocessing
|
||||
from random import randint, shuffle
|
||||
from stat import S_IMODE
|
||||
import salt.serializers.msgpack
|
||||
from binascii import crc32
|
||||
|
||||
# Import Salt Libs
|
||||
# pylint: disable=import-error,no-name-in-module,redefined-builtin
|
||||
@ -102,6 +103,7 @@ import salt.defaults.exitcodes
|
||||
import salt.cli.daemons
|
||||
import salt.log.setup
|
||||
|
||||
import salt.utils.dictupdate
|
||||
from salt.config import DEFAULT_MINION_OPTS
|
||||
from salt.defaults import DEFAULT_TARGET_DELIM
|
||||
from salt.utils.debug import enable_sigusr1_handler
|
||||
@ -443,13 +445,30 @@ class MinionBase(object):
|
||||
if opts[u'master_type'] == u'func':
|
||||
eval_master_func(opts)
|
||||
|
||||
# if failover is set, master has to be of type list
|
||||
elif opts[u'master_type'] == u'failover':
|
||||
# if failover or distributed is set, master has to be of type list
|
||||
elif opts[u'master_type'] in (u'failover', u'distributed'):
|
||||
if isinstance(opts[u'master'], list):
|
||||
log.info(
|
||||
u'Got list of available master addresses: %s',
|
||||
opts[u'master']
|
||||
)
|
||||
|
||||
if opts[u'master_type'] == u'distributed':
|
||||
master_len = len(opts[u'master'])
|
||||
if master_len > 1:
|
||||
secondary_masters = opts[u'master'][1:]
|
||||
master_idx = crc32(opts[u'id']) % master_len
|
||||
try:
|
||||
preferred_masters = opts[u'master']
|
||||
preferred_masters[0] = opts[u'master'][master_idx]
|
||||
preferred_masters[1:] = [m for m in opts[u'master'] if m != preferred_masters[0]]
|
||||
opts[u'master'] = preferred_masters
|
||||
log.info(u'Distributed to the master at \'{0}\'.'.format(opts[u'master'][0]))
|
||||
except (KeyError, AttributeError, TypeError):
|
||||
log.warning(u'Failed to distribute to a specific master.')
|
||||
else:
|
||||
log.warning(u'master_type = distributed needs more than 1 master.')
|
||||
|
||||
if opts[u'master_shuffle']:
|
||||
if opts[u'master_failback']:
|
||||
secondary_masters = opts[u'master'][1:]
|
||||
@ -497,7 +516,7 @@ class MinionBase(object):
|
||||
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
|
||||
# If failover is set, minion have to failover on DNS errors instead of retry DNS resolve.
|
||||
# See issue 21082 for details
|
||||
if opts[u'retry_dns']:
|
||||
if opts[u'retry_dns'] and opts[u'master_type'] == u'failover':
|
||||
msg = (u'\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. '
|
||||
u'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.')
|
||||
log.critical(msg)
|
||||
@ -845,7 +864,7 @@ class MinionManager(MinionBase):
|
||||
Spawn all the coroutines which will sign in to masters
|
||||
'''
|
||||
masters = self.opts[u'master']
|
||||
if self.opts[u'master_type'] == u'failover' or not isinstance(self.opts[u'master'], list):
|
||||
if (self.opts[u'master_type'] in (u'failover', u'distributed')) or not isinstance(self.opts[u'master'], list):
|
||||
masters = [masters]
|
||||
|
||||
for master in masters:
|
||||
@ -1930,6 +1949,10 @@ class Minion(MinionBase):
|
||||
self.beacons.disable_beacon(name)
|
||||
elif func == u'list':
|
||||
self.beacons.list_beacons()
|
||||
elif func == u'list_available':
|
||||
self.beacons.list_available_beacons()
|
||||
elif func == u'validate_beacon':
|
||||
self.beacons.validate_beacon(name, beacon_data)
|
||||
|
||||
def environ_setenv(self, tag, data):
|
||||
'''
|
||||
@ -3191,6 +3214,26 @@ class ProxyMinion(Minion):
|
||||
if u'proxy' not in self.opts:
|
||||
self.opts[u'proxy'] = self.opts[u'pillar'][u'proxy']
|
||||
|
||||
if self.opts.get(u'proxy_merge_pillar_in_opts'):
|
||||
# Override proxy opts with pillar data when the user required.
|
||||
self.opts = salt.utils.dictupdate.merge(self.opts,
|
||||
self.opts[u'pillar'],
|
||||
strategy=self.opts.get(u'proxy_merge_pillar_in_opts_strategy'),
|
||||
merge_lists=self.opts.get(u'proxy_deep_merge_pillar_in_opts', False))
|
||||
elif self.opts.get(u'proxy_mines_pillar'):
|
||||
# Even when not required, some details such as mine configuration
|
||||
# should be merged anyway whenever possible.
|
||||
if u'mine_interval' in self.opts[u'pillar']:
|
||||
self.opts[u'mine_interval'] = self.opts[u'pillar'][u'mine_interval']
|
||||
if u'mine_functions' in self.opts[u'pillar']:
|
||||
general_proxy_mines = self.opts.get(u'mine_functions', [])
|
||||
specific_proxy_mines = self.opts[u'pillar'][u'mine_functions']
|
||||
try:
|
||||
self.opts[u'mine_functions'] = general_proxy_mines + specific_proxy_mines
|
||||
except TypeError as terr:
|
||||
log.error(u'Unable to merge mine functions from the pillar in the opts, for proxy {}'.format(
|
||||
self.opts[u'id']))
|
||||
|
||||
fq_proxyname = self.opts[u'proxy'][u'proxytype']
|
||||
|
||||
# Need to load the modules so they get all the dunder variables
|
||||
|
@ -12,6 +12,7 @@ import logging
|
||||
|
||||
# Import Salt libs
|
||||
import salt.utils.files
|
||||
import salt.utils.path
|
||||
|
||||
# Import 3rd-party libs
|
||||
from salt.ext import six
|
||||
@ -241,4 +242,4 @@ def _read_link(name):
|
||||
Throws an OSError if the link does not exist
|
||||
'''
|
||||
alt_link_path = '/etc/alternatives/{0}'.format(name)
|
||||
return os.readlink(alt_link_path)
|
||||
return salt.utils.path.readlink(alt_link_path)
|
||||
|
@ -447,11 +447,15 @@ def config(name, config, edit=True):
|
||||
salt '*' apache.config /etc/httpd/conf.d/ports.conf config="[{'Listen': '22'}]"
|
||||
'''
|
||||
|
||||
configs = []
|
||||
for entry in config:
|
||||
key = next(six.iterkeys(entry))
|
||||
configs = _parse_config(entry[key], key)
|
||||
if edit:
|
||||
with salt.utils.files.fopen(name, 'w') as configfile:
|
||||
configfile.write('# This file is managed by Salt.\n')
|
||||
configfile.write(configs)
|
||||
return configs
|
||||
configs.append(_parse_config(entry[key], key))
|
||||
|
||||
# Python auto-correct line endings
|
||||
configstext = "\n".join(configs)
|
||||
if edit:
|
||||
with salt.utils.files.fopen(name, 'w') as configfile:
|
||||
configfile.write('# This file is managed by Salt.\n')
|
||||
configfile.write(configstext)
|
||||
return configstext
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user