mirror of
https://github.com/valitydev/salt.git
synced 2024-11-07 08:58:59 +00:00
Merge branch 'develop' into ssh_known_hosts_improvements
This commit is contained in:
commit
f0175623ed
@ -1,5 +1,13 @@
|
||||
languages:
|
||||
Ruby: false
|
||||
JavaScript: false
|
||||
Python: true
|
||||
PHP: false
|
||||
Ruby: false
|
||||
JavaScript: false
|
||||
Python: true
|
||||
PHP: false
|
||||
|
||||
engines:
|
||||
radon:
|
||||
enabled: true
|
||||
exclude_paths:
|
||||
- "templates/"
|
||||
config:
|
||||
threshold: "D"
|
||||
|
4
.github/stale.yml
vendored
4
.github/stale.yml
vendored
@ -1,8 +1,8 @@
|
||||
# Probot Stale configuration file
|
||||
|
||||
# Number of days of inactivity before an issue becomes stale
|
||||
# 1000 is approximately 2 years and 9 months
|
||||
daysUntilStale: 1000
|
||||
# 950 is approximately 2 years and 7 months
|
||||
daysUntilStale: 950
|
||||
|
||||
# Number of days of inactivity before a stale issue is closed
|
||||
daysUntilClose: 7
|
||||
|
3
.gitignore
vendored
3
.gitignore
vendored
@ -88,3 +88,6 @@ tests/integration/cloud/providers/logs
|
||||
|
||||
# Private keys from the integration tests
|
||||
tests/integration/cloud/providers/pki/minions
|
||||
|
||||
# Ignore tox virtualenvs
|
||||
/.tox/
|
||||
|
@ -258,8 +258,8 @@ ignore-imports=no
|
||||
|
||||
|
||||
[BASIC]
|
||||
# Required attributes for module, separated by a comma
|
||||
required-attributes=
|
||||
# Required attributes for module, separated by a comma (will be removed in Pylint 2.0)
|
||||
#required-attributes=
|
||||
|
||||
# List of builtins function names that should not be used, separated by a comma
|
||||
bad-functions=map,filter,apply,input
|
||||
@ -365,7 +365,8 @@ spelling-store-unknown-words=no
|
||||
[CLASSES]
|
||||
# List of interface methods to ignore, separated by a comma. This is used for
|
||||
# instance to not check methods defines in Zope's Interface base class.
|
||||
ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
|
||||
# Will be removed in Pylint 2.0
|
||||
#ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
|
||||
|
||||
# List of method names used to declare (i.e. assign) instance attributes.
|
||||
defining-attr-methods=__init__,__new__,setUp
|
||||
|
17
conf/master
17
conf/master
@ -464,11 +464,13 @@
|
||||
|
||||
##### Salt-SSH Configuration #####
|
||||
##########################################
|
||||
# Define the default salt-ssh roster module to use
|
||||
#roster: flat
|
||||
|
||||
# Pass in an alternative location for the salt-ssh roster file
|
||||
# Pass in an alternative location for the salt-ssh `flat` roster file
|
||||
#roster_file: /etc/salt/roster
|
||||
|
||||
# Define locations for roster files so they can be chosen when using Salt API.
|
||||
# Define locations for `flat` roster files so they can be chosen when using Salt API.
|
||||
# An administrator can place roster files into these locations. Then when
|
||||
# calling Salt API, parameter 'roster_file' should contain a relative path to
|
||||
# these locations. That is, "roster_file=/foo/roster" will be resolved as
|
||||
@ -589,11 +591,12 @@
|
||||
# all data that has a result of True and no changes will be suppressed.
|
||||
#state_verbose: True
|
||||
|
||||
# The state_output setting changes if the output is the full multi line
|
||||
# output for each changed state if set to 'full', but if set to 'terse'
|
||||
# the output will be shortened to a single line. If set to 'mixed', the output
|
||||
# will be terse unless a state failed, in which case that output will be full.
|
||||
# If set to 'changes', the output will be full unless the state didn't change.
|
||||
# The state_output setting controls which results will be output full multi line
|
||||
# full, terse - each state will be full/terse
|
||||
# mixed - only states with errors will be full
|
||||
# changes - states with changes and errors will be full
|
||||
# full_id, mixed_id, changes_id and terse_id are also allowed;
|
||||
# when set, the state ID will be used as name in the output
|
||||
#state_output: full
|
||||
|
||||
# The state_output_diff setting changes whether or not the output from
|
||||
|
15
conf/minion
15
conf/minion
@ -635,9 +635,12 @@
|
||||
# all data that has a result of True and no changes will be suppressed.
|
||||
#state_verbose: True
|
||||
|
||||
# The state_output setting changes if the output is the full multi line
|
||||
# output for each changed state if set to 'full', but if set to 'terse'
|
||||
# the output will be shortened to a single line.
|
||||
# The state_output setting controls which results will be output full multi line
|
||||
# full, terse - each state will be full/terse
|
||||
# mixed - only states with errors will be full
|
||||
# changes - states with changes and errors will be full
|
||||
# full_id, mixed_id, changes_id and terse_id are also allowed;
|
||||
# when set, the state ID will be used as name in the output
|
||||
#state_output: full
|
||||
|
||||
# The state_output_diff setting changes whether or not the output from
|
||||
@ -689,6 +692,12 @@
|
||||
# for a full explanation.
|
||||
#multiprocessing: True
|
||||
|
||||
# Limit the maximum amount of processes or threads created by salt-minion.
|
||||
# This is useful to avoid resource exhaustion in case the minion receives more
|
||||
# publications than it is able to handle, as it limits the number of spawned
|
||||
# processes or threads. -1 is the default and disables the limit.
|
||||
#process_count_max: -1
|
||||
|
||||
|
||||
##### Logging settings #####
|
||||
##########################################
|
||||
|
@ -498,9 +498,12 @@
|
||||
# all data that has a result of True and no changes will be suppressed.
|
||||
#state_verbose: True
|
||||
|
||||
# The state_output setting changes if the output is the full multi line
|
||||
# output for each changed state if set to 'full', but if set to 'terse'
|
||||
# the output will be shortened to a single line.
|
||||
# The state_output setting controls which results will be output full multi line
|
||||
# full, terse - each state will be full/terse
|
||||
# mixed - only states with errors will be full
|
||||
# changes - states with changes and errors will be full
|
||||
# full_id, mixed_id, changes_id and terse_id are also allowed;
|
||||
# when set, the state ID will be used as name in the output
|
||||
#state_output: full
|
||||
|
||||
# The state_output_diff setting changes whether or not the output from
|
||||
|
@ -438,11 +438,13 @@ syndic_user: salt
|
||||
|
||||
##### Salt-SSH Configuration #####
|
||||
##########################################
|
||||
# Define the default salt-ssh roster module to use
|
||||
#roster: flat
|
||||
|
||||
# Pass in an alternative location for the salt-ssh roster file
|
||||
# Pass in an alternative location for the salt-ssh `flat` roster file
|
||||
#roster_file: /etc/salt/roster
|
||||
|
||||
# Define locations for roster files so they can be chosen when using Salt API.
|
||||
# Define locations for `flat` roster files so they can be chosen when using Salt API.
|
||||
# An administrator can place roster files into these locations. Then when
|
||||
# calling Salt API, parameter 'roster_file' should contain a relative path to
|
||||
# these locations. That is, "roster_file=/foo/roster" will be resolved as
|
||||
@ -560,11 +562,12 @@ syndic_user: salt
|
||||
# all data that has a result of True and no changes will be suppressed.
|
||||
#state_verbose: True
|
||||
|
||||
# The state_output setting changes if the output is the full multi line
|
||||
# output for each changed state if set to 'full', but if set to 'terse'
|
||||
# the output will be shortened to a single line. If set to 'mixed', the output
|
||||
# will be terse unless a state failed, in which case that output will be full.
|
||||
# If set to 'changes', the output will be full unless the state didn't change.
|
||||
# The state_output setting controls which results will be output full multi line
|
||||
# full, terse - each state will be full/terse
|
||||
# mixed - only states with errors will be full
|
||||
# changes - states with changes and errors will be full
|
||||
# full_id, mixed_id, changes_id and terse_id are also allowed;
|
||||
# when set, the state ID will be used as name in the output
|
||||
#state_output: full
|
||||
|
||||
# The state_output_diff setting changes whether or not the output from
|
||||
|
BIN
doc/_themes/saltstack2/static/images/DOCBANNER.jpg
vendored
BIN
doc/_themes/saltstack2/static/images/DOCBANNER.jpg
vendored
Binary file not shown.
Before Width: | Height: | Size: 790 KiB After Width: | Height: | Size: 438 KiB |
@ -245,8 +245,8 @@ on_saltstack = 'SALT_ON_SALTSTACK' in os.environ
|
||||
project = 'Salt'
|
||||
|
||||
version = salt.version.__version__
|
||||
latest_release = '2017.7.1' # latest release
|
||||
previous_release = '2016.11.7' # latest release from previous branch
|
||||
latest_release = '2017.7.2' # latest release
|
||||
previous_release = '2016.11.8' # latest release from previous branch
|
||||
previous_release_dir = '2016.11' # path on web server for previous branch
|
||||
next_release = '' # next release
|
||||
next_release_dir = '' # path on web server for next release branch
|
||||
|
@ -325,7 +325,7 @@ The following example works on UNIX-like operating systems:
|
||||
|
||||
.. code-block:: jinja
|
||||
|
||||
{%- if grains['os'] != 'Windows' %
|
||||
{%- if grains['os'] != 'Windows' %}
|
||||
Restart Salt Minion:
|
||||
cmd.run:
|
||||
- name: 'salt-call --local service.restart salt-minion'
|
||||
|
@ -29818,7 +29818,7 @@ If the master seems to be unresponsive, a SIGUSR1 can be passed to the
|
||||
salt\-master threads to display what piece of code is executing. This debug
|
||||
information can be invaluable in tracking down bugs.
|
||||
.sp
|
||||
To pass a SIGUSR1 to the master, first make sure the minion is running in the
|
||||
To pass a SIGUSR1 to the master, first make sure the master is running in the
|
||||
foreground. Stop the service if it is running as a daemon, and start it in the
|
||||
foreground like so:
|
||||
.INDENT 0.0
|
||||
|
@ -34,6 +34,7 @@ Full list of Salt Cloud modules
|
||||
scaleway
|
||||
softlayer
|
||||
softlayer_hw
|
||||
vagrant
|
||||
virtualbox
|
||||
vmware
|
||||
vultrpy
|
||||
|
@ -1,6 +1,6 @@
|
||||
===============================
|
||||
==============================
|
||||
salt.cloud.clouds.digitalocean
|
||||
===============================
|
||||
==============================
|
||||
|
||||
.. automodule:: salt.cloud.clouds.digitalocean
|
||||
:members:
|
6
doc/ref/clouds/all/salt.cloud.clouds.vagrant.rst
Normal file
6
doc/ref/clouds/all/salt.cloud.clouds.vagrant.rst
Normal file
@ -0,0 +1,6 @@
|
||||
=========================
|
||||
salt.cloud.clouds.vagrant
|
||||
=========================
|
||||
|
||||
.. automodule:: salt.cloud.clouds.vagrant
|
||||
:members:
|
@ -963,6 +963,19 @@ The TCP port for ``mworkers`` to connect to on the master.
|
||||
Salt-SSH Configuration
|
||||
======================
|
||||
|
||||
.. conf_master:: roster
|
||||
|
||||
``roster``
|
||||
---------------
|
||||
|
||||
Default: ``flat``
|
||||
|
||||
Define the default salt-ssh roster module to use
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
roster: cache
|
||||
|
||||
.. conf_master:: roster_file
|
||||
|
||||
``roster_file``
|
||||
@ -970,12 +983,31 @@ Salt-SSH Configuration
|
||||
|
||||
Default: ``/etc/salt/roster``
|
||||
|
||||
Pass in an alternative location for the salt-ssh roster file.
|
||||
Pass in an alternative location for the salt-ssh `flat` roster file.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
roster_file: /root/roster
|
||||
|
||||
.. conf_master:: roster_file
|
||||
|
||||
``rosters``
|
||||
---------------
|
||||
|
||||
Default: None
|
||||
|
||||
Define locations for `flat` roster files so they can be chosen when using Salt API.
|
||||
An administrator can place roster files into these locations.
|
||||
Then when calling Salt API, parameter 'roster_file' should contain a relative path to these locations.
|
||||
That is, "roster_file=/foo/roster" will be resolved as "/etc/salt/roster.d/foo/roster" etc.
|
||||
This feature prevents passing insecure custom rosters through the Salt API.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
rosters:
|
||||
- /etc/salt/roster.d
|
||||
- /opt/salt/some/more/rosters
|
||||
|
||||
.. conf_master:: ssh_passwd
|
||||
|
||||
``ssh_passwd``
|
||||
@ -2011,11 +2043,14 @@ output for states that failed or states that have changes.
|
||||
|
||||
Default: ``full``
|
||||
|
||||
The state_output setting changes if the output is the full multi line
|
||||
output for each changed state if set to 'full', but if set to 'terse'
|
||||
the output will be shortened to a single line. If set to 'mixed', the output
|
||||
will be terse unless a state failed, in which case that output will be full.
|
||||
If set to 'changes', the output will be full unless the state didn't change.
|
||||
The state_output setting controls which results will be output full multi line:
|
||||
|
||||
* ``full``, ``terse`` - each state will be full/terse
|
||||
* ``mixed`` - only states with errors will be full
|
||||
* ``changes`` - states with changes and errors will be full
|
||||
|
||||
``full_id``, ``mixed_id``, ``changes_id`` and ``terse_id`` are also allowed;
|
||||
when set, the state ID will be used as name in the output.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
@ -4175,7 +4210,9 @@ information.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
reactor: []
|
||||
reactor:
|
||||
- 'salt/minion/*/start':
|
||||
- salt://reactor/startup_tasks.sls
|
||||
|
||||
.. conf_master:: reactor_refresh_interval
|
||||
|
||||
|
@ -1196,7 +1196,7 @@ be able to execute a certain module. The ``sys`` module is built into the minion
|
||||
and cannot be disabled.
|
||||
|
||||
This setting can also tune the minion. Because all modules are loaded into system
|
||||
memory, disabling modules will lover the minion's memory footprint.
|
||||
memory, disabling modules will lower the minion's memory footprint.
|
||||
|
||||
Modules should be specified according to their file name on the system and not by
|
||||
their virtual name. For example, to disable ``cmd``, use the string ``cmdmod`` which
|
||||
@ -1664,15 +1664,19 @@ output for states that failed or states that have changes.
|
||||
|
||||
Default: ``full``
|
||||
|
||||
The state_output setting changes if the output is the full multi line
|
||||
output for each changed state if set to 'full', but if set to 'terse'
|
||||
the output will be shortened to a single line.
|
||||
The state_output setting controls which results will be output full multi line:
|
||||
|
||||
* ``full``, ``terse`` - each state will be full/terse
|
||||
* ``mixed`` - only states with errors will be full
|
||||
* ``changes`` - states with changes and errors will be full
|
||||
|
||||
``full_id``, ``mixed_id``, ``changes_id`` and ``terse_id`` are also allowed;
|
||||
when set, the state ID will be used as name in the output.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
state_output: full
|
||||
|
||||
|
||||
.. conf_minion:: state_output_diff
|
||||
|
||||
``state_output_diff``
|
||||
@ -2419,6 +2423,23 @@ executed in a thread.
|
||||
|
||||
multiprocessing: True
|
||||
|
||||
.. conf_minion:: process_count_max
|
||||
|
||||
``process_count_max``
|
||||
-------
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
Default: ``-1``
|
||||
|
||||
Limit the maximum amount of processes or threads created by ``salt-minion``.
|
||||
This is useful to avoid resource exhaustion in case the minion receives more
|
||||
publications than it is able to handle, as it limits the number of spawned
|
||||
processes or threads. ``-1`` is the default and disables the limit.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
process_count_max: -1
|
||||
|
||||
.. _minion-logging-settings:
|
||||
|
||||
|
@ -432,6 +432,7 @@ execution modules
|
||||
uptime
|
||||
useradd
|
||||
uwsgi
|
||||
vagrant
|
||||
varnish
|
||||
vault
|
||||
vbox_guest
|
||||
|
6
doc/ref/modules/all/salt.modules.vagrant.rst
Normal file
6
doc/ref/modules/all/salt.modules.vagrant.rst
Normal file
@ -0,0 +1,6 @@
|
||||
====================
|
||||
salt.modules.vagrant
|
||||
====================
|
||||
|
||||
.. automodule:: salt.modules.vagrant
|
||||
:members:
|
@ -25,6 +25,9 @@ configuration:
|
||||
- web*:
|
||||
- test.*
|
||||
- pkg.*
|
||||
# Allow managers to use saltutil module functions
|
||||
manager_.*:
|
||||
- saltutil.*
|
||||
|
||||
Permission Issues
|
||||
-----------------
|
||||
|
@ -1,5 +1,5 @@
|
||||
salt.runners.auth module
|
||||
========================
|
||||
salt.runners.auth
|
||||
=================
|
||||
|
||||
.. automodule:: salt.runners.auth
|
||||
:members:
|
||||
|
@ -1,5 +1,5 @@
|
||||
salt.runners.digicertapi module
|
||||
===============================
|
||||
salt.runners.digicertapi
|
||||
========================
|
||||
|
||||
.. automodule:: salt.runners.digicertapi
|
||||
:members:
|
||||
|
@ -1,5 +1,5 @@
|
||||
salt.runners.event module
|
||||
=========================
|
||||
salt.runners.event
|
||||
==================
|
||||
|
||||
.. automodule:: salt.runners.event
|
||||
:members:
|
||||
|
@ -1,5 +1,11 @@
|
||||
salt.runners.mattermost module
|
||||
==============================
|
||||
salt.runners.mattermost
|
||||
=======================
|
||||
|
||||
**Note for 2017.7 releases!**
|
||||
|
||||
Due to the `salt.runners.config <https://github.com/saltstack/salt/blob/develop/salt/runners/config.py>`_ module not being available in this release series, importing the `salt.runners.config <https://github.com/saltstack/salt/blob/develop/salt/runners/config.py>`_ module from the develop branch is required to make this module work.
|
||||
|
||||
Ref: `Mattermost runner failing to retrieve config values due to unavailable config runner #43479 <https://github.com/saltstack/salt/issues/43479>`_
|
||||
|
||||
.. automodule:: salt.runners.mattermost
|
||||
:members:
|
||||
|
@ -1,5 +1,5 @@
|
||||
salt.runners.smartos_vmadm module
|
||||
=================================
|
||||
salt.runners.smartos_vmadm
|
||||
==========================
|
||||
|
||||
.. automodule:: salt.runners.smartos_vmadm
|
||||
:members:
|
||||
|
@ -1,5 +1,5 @@
|
||||
salt.runners.vault module
|
||||
=========================
|
||||
salt.runners.vault
|
||||
==================
|
||||
|
||||
.. automodule:: salt.runners.vault
|
||||
:members:
|
||||
|
@ -1,5 +1,5 @@
|
||||
salt.runners.venafiapi module
|
||||
=============================
|
||||
salt.runners.venafiapi
|
||||
======================
|
||||
|
||||
.. automodule:: salt.runners.venafiapi
|
||||
:members:
|
||||
|
@ -1,5 +1,5 @@
|
||||
salt.runners.vistara module
|
||||
===========================
|
||||
salt.runners.vistara
|
||||
====================
|
||||
|
||||
.. automodule:: salt.runners.vistara
|
||||
:members:
|
||||
|
@ -267,6 +267,7 @@ state modules
|
||||
tuned
|
||||
uptime
|
||||
user
|
||||
vagrant
|
||||
vault
|
||||
vbox_guest
|
||||
victorops
|
||||
|
6
doc/ref/states/all/salt.states.vagrant.rst
Normal file
6
doc/ref/states/all/salt.states.vagrant.rst
Normal file
@ -0,0 +1,6 @@
|
||||
===================
|
||||
salt.states.vagrant
|
||||
===================
|
||||
|
||||
.. automodule:: salt.states.vagrant
|
||||
:members:
|
@ -6,7 +6,7 @@ Introduced in Salt version ``2017.7.0`` it is now possible to run select states
|
||||
in parallel. This is accomplished very easily by adding the ``parallel: True``
|
||||
option to your state declaration:
|
||||
|
||||
.. code_block:: yaml
|
||||
.. code-block:: yaml
|
||||
|
||||
nginx:
|
||||
service.running:
|
||||
@ -24,7 +24,7 @@ state to finish.
|
||||
|
||||
Given this example:
|
||||
|
||||
.. code_block:: yaml
|
||||
.. code-block:: yaml
|
||||
|
||||
sleep 10:
|
||||
cmd.run:
|
||||
@ -74,16 +74,16 @@ also complete.
|
||||
Things to be Careful of
|
||||
=======================
|
||||
|
||||
Parallel States does not prevent you from creating parallel conflicts on your
|
||||
Parallel States do not prevent you from creating parallel conflicts on your
|
||||
system. This means that if you start multiple package installs using Salt then
|
||||
the package manager will block or fail. If you attempt to manage the same file
|
||||
with multiple states in parallel then the result can produce an unexpected
|
||||
file.
|
||||
|
||||
Make sure that the states you choose to run in parallel do not conflict, or
|
||||
else, like in and parallel programming environment, the outcome may not be
|
||||
else, like in any parallel programming environment, the outcome may not be
|
||||
what you expect. Doing things like just making all states run in parallel
|
||||
will almost certinly result in unexpected behavior.
|
||||
will almost certainly result in unexpected behavior.
|
||||
|
||||
With that said, running states in parallel should be safe the vast majority
|
||||
of the time and the most likely culprit for unexpected behavior is running
|
||||
|
@ -253,9 +253,8 @@ in ``/etc/salt/master.d/reactor.conf``:
|
||||
|
||||
.. note::
|
||||
You can have only one top level ``reactor`` section, so if one already
|
||||
exists, add this code to the existing section. See :ref:`Understanding the
|
||||
Structure of Reactor Formulas <reactor-structure>` to learn more about
|
||||
reactor SLS syntax.
|
||||
exists, add this code to the existing section. See :ref:`here
|
||||
<reactor-sls>` to learn more about reactor SLS syntax.
|
||||
|
||||
|
||||
Start the Salt Master in Debug Mode
|
||||
|
@ -146,24 +146,24 @@ library. The following two lines set up the imports:
|
||||
.. code-block:: python
|
||||
|
||||
from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401
|
||||
import salt.utils
|
||||
import salt.utils.functools
|
||||
|
||||
And then a series of declarations will make the necessary functions available
|
||||
within the cloud module.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
get_size = salt.utils.namespaced_function(get_size, globals())
|
||||
get_image = salt.utils.namespaced_function(get_image, globals())
|
||||
avail_locations = salt.utils.namespaced_function(avail_locations, globals())
|
||||
avail_images = salt.utils.namespaced_function(avail_images, globals())
|
||||
avail_sizes = salt.utils.namespaced_function(avail_sizes, globals())
|
||||
script = salt.utils.namespaced_function(script, globals())
|
||||
destroy = salt.utils.namespaced_function(destroy, globals())
|
||||
list_nodes = salt.utils.namespaced_function(list_nodes, globals())
|
||||
list_nodes_full = salt.utils.namespaced_function(list_nodes_full, globals())
|
||||
list_nodes_select = salt.utils.namespaced_function(list_nodes_select, globals())
|
||||
show_instance = salt.utils.namespaced_function(show_instance, globals())
|
||||
get_size = salt.utils.functools.namespaced_function(get_size, globals())
|
||||
get_image = salt.utils.functools.namespaced_function(get_image, globals())
|
||||
avail_locations = salt.utils.functools.namespaced_function(avail_locations, globals())
|
||||
avail_images = salt.utils.functools.namespaced_function(avail_images, globals())
|
||||
avail_sizes = salt.utils.functools.namespaced_function(avail_sizes, globals())
|
||||
script = salt.utils.functools.namespaced_function(script, globals())
|
||||
destroy = salt.utils.functools.namespaced_function(destroy, globals())
|
||||
list_nodes = salt.utils.functools.namespaced_function(list_nodes, globals())
|
||||
list_nodes_full = salt.utils.functools.namespaced_function(list_nodes_full, globals())
|
||||
list_nodes_select = salt.utils.functools.namespaced_function(list_nodes_select, globals())
|
||||
show_instance = salt.utils.functools.namespaced_function(show_instance, globals())
|
||||
|
||||
If necessary, these functions may be replaced by removing the appropriate
|
||||
declaration line, and then adding the function as normal.
|
||||
|
@ -540,6 +540,17 @@ machines which are already installed, but not Salted. For more information about
|
||||
this driver and for configuration examples, please see the
|
||||
:ref:`Gettting Started with Saltify <getting-started-with-saltify>` documentation.
|
||||
|
||||
.. _config_vagrant:
|
||||
|
||||
Vagrant
|
||||
-------
|
||||
|
||||
The Vagrant driver is a new, experimental driver for controlling a VagrantBox
|
||||
virtual machine, and installing Salt on it. The target host machine must be a
|
||||
working salt minion, which is controlled via the salt master using salt-api.
|
||||
For more information, see
|
||||
:ref:`Getting Started With Vagrant <getting-started-with-vagrant>`.
|
||||
|
||||
|
||||
Extending Profiles and Cloud Providers Configuration
|
||||
====================================================
|
||||
|
@ -54,6 +54,10 @@ Set up an initial profile at ``/etc/salt/cloud.profiles`` or in the
|
||||
ipv6: True
|
||||
create_dns_record: True
|
||||
userdata_file: /etc/salt/cloud.userdata.d/setup
|
||||
tags:
|
||||
- tag1
|
||||
- tag2
|
||||
- tag3
|
||||
|
||||
Locations can be obtained using the ``--list-locations`` option for the ``salt-cloud``
|
||||
command:
|
||||
|
@ -38,26 +38,30 @@ These are features that are available for almost every cloud host.
|
||||
|
||||
.. container:: scrollable
|
||||
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
| |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify|Softlayer|Softlayer|Aliyun|
|
||||
| |(Legacy)| |Ocean | | | | | | |(Legacy) | | |Hardware | |
|
||||
+=======================+========+==========+=======+===+======+======+======+=========+=========+=========+=======+=========+=========+======+
|
||||
|Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes | |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|Full Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes | |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|Selective Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes | |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|List Sizes |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes | |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|List Images |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes | |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|List Locations |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes | |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|create |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|destroy |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes | |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
|
||||
| |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify|Vagrant|Softlayer|Softlayer|Aliyun|
|
||||
| |(Legacy)| |Ocean | | | | | | |(Legacy) | | | |Hardware | |
|
||||
+=======================+========+==========+=======+===+======+======+======+=========+=========+=========+=======+=======+=========+=========+======+
|
||||
|Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[1] |[1] |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
|
||||
|Full Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[1] |[1] |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
|
||||
|Selective Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[1] |[1] |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
|
||||
|List Sizes |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[2] |[2] |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
|
||||
|List Images |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
|
||||
|List Locations |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[2] |[2] |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
|
||||
|create |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |[1] |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
|
||||
|destroy |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[1] |[1] |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
|
||||
|
||||
[1] Yes, if salt-api is enabled.
|
||||
|
||||
[2] Always returns `{}`.
|
||||
|
||||
Actions
|
||||
=======
|
||||
@ -70,46 +74,46 @@ instance name to be passed in. For example:
|
||||
|
||||
.. container:: scrollable
|
||||
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|Actions |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify|Softlayer|Softlayer|Aliyun|
|
||||
| |(Legacy)| |Ocean | | | | | | |(Legacy) | | |Hardware | |
|
||||
+=======================+========+==========+=======+===+======+======+======+=========+=========+=========+=======+=========+=========+======+
|
||||
|attach_volume | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|create_attach_volumes |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|del_tags |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|delvol_on_destroy | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|detach_volume | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|disable_term_protect |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|enable_term_protect |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_tags |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|keepvol_on_destroy | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|list_keypairs | | |Yes | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|rename |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|set_tags |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|show_delvol_on_destroy | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|show_instance | | |Yes |Yes| | |Yes | |Yes | | |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|show_term_protect | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|start |Yes | | |Yes| |Yes |Yes | |Yes | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|stop |Yes | | |Yes| |Yes |Yes | |Yes | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|take_action | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|Actions |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify&|Softlayer|Softlayer|Aliyun|
|
||||
| |(Legacy)| |Ocean | | | | | | |(Legacy) | Vagrant| |Hardware | |
|
||||
+=======================+========+==========+=======+===+======+======+======+=========+=========+=========+========+=========+=========+======+
|
||||
|attach_volume | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|create_attach_volumes |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|del_tags |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|delvol_on_destroy | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|detach_volume | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|disable_term_protect |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|enable_term_protect |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_tags |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|keepvol_on_destroy | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|list_keypairs | | |Yes | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|rename |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|set_tags |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|show_delvol_on_destroy | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|show_instance | | |Yes |Yes| | |Yes | |Yes | | |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|show_term_protect | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|start |Yes | | |Yes| |Yes |Yes | |Yes | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|stop |Yes | | |Yes| |Yes |Yes | |Yes | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|take_action | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|
||||
Functions
|
||||
=========
|
||||
@ -122,81 +126,83 @@ require the name of the provider to be passed in. For example:
|
||||
|
||||
.. container:: scrollable
|
||||
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|Functions |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify|Softlayer|Softlayer|Aliyun|
|
||||
| |(Legacy)| |Ocean | | | | | | |(Legacy) | | |Hardware | |
|
||||
+=======================+========+==========+=======+===+======+======+======+=========+=========+=========+=======+=========+=========+======+
|
||||
|block_device_mappings |Yes | | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|create_keypair | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|create_volume | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|delete_key | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|delete_keypair | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|delete_volume | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_image | | |Yes | | |Yes | | |Yes | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_ip | |Yes | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_key | |Yes | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_keyid | | |Yes | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_keypair | |Yes | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_networkid | |Yes | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_node | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_password | |Yes | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_size | | |Yes | | |Yes | | | | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_spot_config | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_subnetid | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|iam_profile |Yes | | |Yes| | | | | | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|import_key | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|key_list | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|keyname |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|list_availability_zones| | | |Yes| | | | | | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|list_custom_images | | | | | | | | | | | |Yes | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|list_keys | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|list_nodes |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|list_nodes_full |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|list_nodes_select |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|list_vlans | | | | | | | | | | | |Yes |Yes | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|rackconnect | | | | | | | |Yes | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|reboot | | | |Yes| |Yes | | | | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|reformat_node | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|securitygroup |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|securitygroupid | | | |Yes| | | | | | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|show_image | | | |Yes| | | | |Yes | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|show_key | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|show_keypair | | |Yes |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|show_volume | | | |Yes| | | | | | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|Functions |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify&|Softlayer|Softlayer|Aliyun|
|
||||
| |(Legacy)| |Ocean | | | | | | |(Legacy) | Vagrant| |Hardware | |
|
||||
+=======================+========+==========+=======+===+======+======+======+=========+=========+=========+========+=========+=========+======+
|
||||
|block_device_mappings |Yes | | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|create_keypair | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|create_volume | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|delete_key | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|delete_keypair | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|delete_volume | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_image | | |Yes | | |Yes | | |Yes | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_ip | |Yes | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_key | |Yes | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_keyid | | |Yes | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_keypair | |Yes | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_networkid | |Yes | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_node | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_password | |Yes | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_size | | |Yes | | |Yes | | | | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_spot_config | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_subnetid | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|iam_profile |Yes | | |Yes| | | | | | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|import_key | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|key_list | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|keyname |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|list_availability_zones| | | |Yes| | | | | | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|list_custom_images | | | | | | | | | | | |Yes | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|list_keys | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|list_nodes |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|list_nodes_full |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|list_nodes_select |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|list_vlans | | | | | | | | | | | |Yes |Yes | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|rackconnect | | | | | | | |Yes | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|reboot | | | |Yes| |Yes | | | | |[1] | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|reformat_node | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|securitygroup |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|securitygroupid | | | |Yes| | | | | | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|show_image | | | |Yes| | | | |Yes | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|show_key | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|show_keypair | | |Yes |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|show_volume | | | |Yes| | | | | | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|
||||
[1] Yes, if salt-api is enabled.
|
||||
|
@ -129,6 +129,7 @@ Cloud Provider Specifics
|
||||
Getting Started With Scaleway <scaleway>
|
||||
Getting Started With Saltify <saltify>
|
||||
Getting Started With SoftLayer <softlayer>
|
||||
Getting Started With Vagrant <vagrant>
|
||||
Getting Started With Vexxhost <vexxhost>
|
||||
Getting Started With Virtualbox <virtualbox>
|
||||
Getting Started With VMware <vmware>
|
||||
|
@ -183,6 +183,8 @@ simple installation.
|
||||
ssh_username: vagrant # a user name which has passwordless sudo
|
||||
password: vagrant # on your target machine
|
||||
provider: my_saltify_provider
|
||||
shutdown_on_destroy: true # halt the target on "salt-cloud -d" command
|
||||
|
||||
|
||||
|
||||
.. code-block:: yaml
|
||||
|
@ -40,7 +40,7 @@ Set up an initial profile at /etc/salt/cloud.profiles or in the /etc/salt/cloud.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
scalewa-ubuntu:
|
||||
scaleway-ubuntu:
|
||||
provider: my-scaleway-config
|
||||
image: Ubuntu Trusty (14.04 LTS)
|
||||
|
||||
|
268
doc/topics/cloud/vagrant.rst
Normal file
268
doc/topics/cloud/vagrant.rst
Normal file
@ -0,0 +1,268 @@
|
||||
.. _getting-started-with-vagrant:
|
||||
|
||||
============================
|
||||
Getting Started With Vagrant
|
||||
============================
|
||||
|
||||
The Vagrant driver is a new, experimental driver for spinning up a VagrantBox
|
||||
virtual machine, and installing Salt on it.
|
||||
|
||||
Dependencies
|
||||
============
|
||||
The Vagrant driver itself has no external dependencies.
|
||||
|
||||
The machine which will host the VagrantBox must be an already existing minion
|
||||
of the cloud server's Salt master.
|
||||
It must have Vagrant_ installed, and a Vagrant-compatible virtual machine engine,
|
||||
such as VirtualBox_.
|
||||
(Note: The Vagrant driver does not depend on the salt-cloud VirtualBox driver in any way.)
|
||||
|
||||
.. _Vagrant: https://www.vagrantup.com/
|
||||
.. _VirtualBox: https://www.virtualbox.org/
|
||||
|
||||
\[Caution: The version of Vagrant packaged for ``apt install`` in Ubuntu 16.04 will not connect a bridged
|
||||
network adapter correctly. Use a version downloaded directly from the web site.\]
|
||||
|
||||
Include the Vagrant guest editions plugin:
|
||||
``vagrant plugin install vagrant-vbguest``.
|
||||
|
||||
Configuration
|
||||
=============
|
||||
|
||||
Configuration of the client virtual machine (using VirtualBox, VMware, etc)
|
||||
will be done by Vagrant as specified in the Vagrantfile on the host machine.
|
||||
|
||||
Salt-cloud will push the commands to install and provision a salt minion on
|
||||
the virtual machine, so you need not (perhaps **should** not) provision salt
|
||||
in your Vagrantfile, in most cases.
|
||||
|
||||
If, however, your cloud master cannot open an SSH connection to the child VM,
|
||||
you may **need** to let Vagrant provision the VM with Salt, and use some other
|
||||
method (such as passing a pillar dictionary to the VM) to pass the master's
|
||||
IP address to the VM. The VM can then attempt to reach the salt master in the
|
||||
usual way for non-cloud minions. Specify the profile configuration argument
|
||||
as ``deploy: False`` to prevent the cloud master from trying.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# Note: This example is for /etc/salt/cloud.providers file or any file in
|
||||
# the /etc/salt/cloud.providers.d/ directory.
|
||||
|
||||
my-vagrant-config:
|
||||
minion:
|
||||
master: 111.222.333.444
|
||||
provider: vagrant
|
||||
|
||||
|
||||
Because the Vagrant driver needs a place to store the mapping between the
|
||||
node name you use for Salt commands and the Vagrantfile which controls the VM,
|
||||
you must configure your salt minion as a Salt smb server.
|
||||
(See `host provisioning example`_ below.)
|
||||
|
||||
Profiles
|
||||
========
|
||||
|
||||
Vagrant requires a profile to be configured for each machine that needs Salt
|
||||
installed. The initial profile can be set up at ``/etc/salt/cloud.profiles``
|
||||
or in the ``/etc/salt/cloud.profiles.d/`` directory.
|
||||
|
||||
Each profile requires a ``vagrantfile`` parameter. If the Vagrantfile has
|
||||
definitions for `multiple machines`_ then you need a ``machine`` parameter,
|
||||
|
||||
.. _`multiple machines`: https://www.vagrantup.com/docs/multi-machine/
|
||||
|
||||
Salt-cloud uses SSH to provision the minion. There must be a routable path
|
||||
from the cloud master to the VM. Usually, you will want to use
|
||||
a bridged network adapter for SSH. The address may not be known until
|
||||
DHCP assigns it. If ``ssh_host`` is not defined, and ``target_network``
|
||||
is defined, the driver will attempt to read the address from the output
|
||||
of an ``ifconfig`` command. Lacking either setting,
|
||||
the driver will try to use the value Vagrant returns as its ``ssh_host``,
|
||||
which will work only if the cloud master is running somewhere on the same host.
|
||||
|
||||
The ``target_network`` setting should be used
|
||||
to identify the IP network your bridged adapter is expected to appear on.
|
||||
Use CIDR notation, like ``target_network: '2001:DB8::/32'``
|
||||
or ``target_network: '192.0.2.0/24'``.
|
||||
|
||||
Profile configuration example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# /etc/salt/cloud.profiles.d/vagrant.conf
|
||||
|
||||
vagrant-machine:
|
||||
host: my-vhost # the Salt id of the virtual machine's host computer.
|
||||
provider: my-vagrant-config
|
||||
cwd: /srv/machines # the path to your Virtualbox file.
|
||||
vagrant_runas: my-username # the username who defined the Vagrantbox on the host
|
||||
# vagrant_up_timeout: 300 # (seconds) timeout for cmd.run of the "vagrant up" command
|
||||
# vagrant_provider: '' # option for "vagrant up" like: "--provider vmware_fusion"
|
||||
# ssh_host: None # "None" means try to find the routable IP address from "ifconfig"
|
||||
# target_network: None # Expected CIDR address of your bridged network
|
||||
# force_minion_config: false # Set "true" to re-purpose an existing VM
|
||||
|
||||
The machine can now be created and configured with the following command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -p vagrant-machine my-id
|
||||
|
||||
This will create the machine specified by the cloud profile
|
||||
``vagrant-machine``, and will give the machine the minion id of
|
||||
``my-id``. If the cloud master is also the salt-master, its Salt
|
||||
key will automatically be accepted on the master.
|
||||
|
||||
Once a salt-minion has been successfully installed on the instance, connectivity
|
||||
to it can be verified with Salt:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt my-id test.ping
|
||||
|
||||
.. _host provisioning example:
|
||||
|
||||
Provisioning a Vagrant cloud host (example)
|
||||
===========================================
|
||||
|
||||
In order to query or control minions it created, each host
|
||||
minion needs to track the Salt node names associated with
|
||||
any guest virtual machines on it.
|
||||
It does that using a Salt sdb database.
|
||||
|
||||
The Salt sdb is not configured by default. The following example shows a
|
||||
simple installation.
|
||||
|
||||
This example assumes:
|
||||
|
||||
- you are on a large network using the 10.x.x.x IP address space
|
||||
- your Salt master's Salt id is "bevymaster"
|
||||
- it will also be your salt-cloud controller
|
||||
- it is at hardware address 10.124.30.7
|
||||
- it is running a recent Debian family Linux (raspbian)
|
||||
- your workstation is a Salt minion of bevymaster
|
||||
- your workstation's minion id is "my_laptop"
|
||||
- VirtualBox has been installed on "my_laptop" (apt install is okay)
|
||||
- Vagrant was installed from vagrantup.com. (not the 16.04 Ubuntu apt)
|
||||
- "my_laptop" has done "vagrant plugin install vagrant-vbguest"
|
||||
- the VM you want to start is on "my_laptop" at "/home/my_username/Vagrantfile"
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# file /etc/salt/minion.d/vagrant_sdb.conf on host computer "my_laptop"
|
||||
# -- this sdb database is required by the Vagrant module --
|
||||
vagrant_sdb_data: # The sdb database must have this name.
|
||||
driver: sqlite3 # Let's use SQLite to store the data ...
|
||||
database: /var/cache/salt/vagrant.sqlite # ... in this file ...
|
||||
table: sdb # ... using this table name.
|
||||
create_table: True # if not present
|
||||
|
||||
Remember to re-start your minion after changing its configuration files...
|
||||
|
||||
``sudo systemctl restart salt-minion``
|
||||
|
||||
.. code-block:: ruby
|
||||
|
||||
# -*- mode: ruby -*-
|
||||
# file /home/my_username/Vagrantfile on host computer "my_laptop"
|
||||
BEVY = "bevy1"
|
||||
DOMAIN = BEVY + ".test" # .test is an ICANN reserved non-public TLD
|
||||
|
||||
# must supply a list of names to avoid Vagrant asking for interactive input
|
||||
def get_good_ifc() # try to find a working Ubuntu network adapter name
|
||||
addr_infos = Socket.getifaddrs
|
||||
addr_infos.each do |info|
|
||||
a = info.addr
|
||||
if a and a.ip? and not a.ip_address.start_with?("127.")
|
||||
return info.name
|
||||
end
|
||||
end
|
||||
return "eth0" # fall back to an old reliable name
|
||||
end
|
||||
|
||||
Vagrant.configure(2) do |config|
|
||||
config.ssh.forward_agent = true # so you can use git ssh://...
|
||||
|
||||
# add a bridged network interface. (try to detect name, then guess MacOS names, too)
|
||||
interface_guesses = [get_good_ifc(), 'en0: Ethernet', 'en1: Wi-Fi (AirPort)']
|
||||
config.vm.network "public_network", bridge: interface_guesses
|
||||
if ARGV[0] == "up"
|
||||
puts "Trying bridge network using interfaces: #{interface_guesses}"
|
||||
end
|
||||
config.vm.provision "shell", inline: "ip address", run: "always" # make user feel good
|
||||
|
||||
# . . . . . . . . . . . . Define machine QUAIL1 . . . . . . . . . . . . . .
|
||||
config.vm.define "quail1", primary: true do |quail_config|
|
||||
quail_config.vm.box = "boxesio/xenial64-standard" # a public VMware & Virtualbox box
|
||||
quail_config.vm.hostname = "quail1." + DOMAIN # supply a name in our bevy
|
||||
quail_config.vm.provider "virtualbox" do |v|
|
||||
v.memory = 1024 # limit memory for the virtual box
|
||||
v.cpus = 1
|
||||
v.linked_clone = true # make a soft copy of the base Vagrant box
|
||||
v.customize ["modifyvm", :id, "--natnet1", "192.168.128.0/24"] # do not use 10.x network for NAT
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# file /etc/salt/cloud.profiles.d/my_vagrant_profiles.conf on bevymaster
|
||||
q1:
|
||||
host: my_laptop # the Salt id of your virtual machine host
|
||||
machine: quail1 # a machine name in the Vagrantfile (if not primary)
|
||||
vagrant_runas: my_username # owner of Vagrant box files on "my_laptop"
|
||||
cwd: '/home/my_username' # the path (on "my_laptop") of the Vagrantfile
|
||||
provider: my_vagrant_provider # name of entry in provider.conf file
|
||||
target_network: '10.0.0.0/8' # VM external address will be somewhere here
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# file /etc/salt/cloud.providers.d/vagrant_provider.conf on bevymaster
|
||||
my_vagrant_provider:
|
||||
driver: vagrant
|
||||
minion:
|
||||
master: 10.124.30.7 # the hard address of the master
|
||||
|
||||
|
||||
Create and use your new Salt minion
|
||||
-----------------------------------
|
||||
|
||||
- Typing on the Salt master computer ``bevymaster``, tell it to create a new minion named ``v1`` using profile ``q1``...
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo salt-cloud -p q1 v1
|
||||
sudo salt v1 network.ip_addrs
|
||||
[ you get a list of IP addresses, including the bridged one ]
|
||||
|
||||
- logged in to your laptop (or some other computer known to GitHub)...
|
||||
|
||||
\[NOTE:\] if you are using MacOS, you need to type ``ssh-add -K`` after each boot,
|
||||
unless you use one of the methods in `this gist`_.
|
||||
|
||||
.. _this gist: https://github.com/jirsbek/SSH-keys-in-macOS-Sierra-keychain
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
ssh -A vagrant@< the bridged network address >
|
||||
# [ or, if you are at /home/my_username/ on my_laptop ]
|
||||
vagrant ssh quail1
|
||||
|
||||
- then typing on your new node "v1" (a.k.a. quail1.bevy1.test)...
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
password: vagrant
|
||||
# [ stuff types out ... ]
|
||||
|
||||
ls -al /vagrant
|
||||
# [ should be shared /home/my_username from my_laptop ]
|
||||
|
||||
# you can access other network facilities using the ssh authorization
|
||||
# as recorded in your ~.ssh/ directory on my_laptop ...
|
||||
|
||||
sudo apt update
|
||||
sudo apt install git
|
||||
git clone ssh://git@github.com/yourID/your_project
|
||||
# etc...
|
||||
|
@ -283,6 +283,10 @@ Set up an initial profile at ``/etc/salt/cloud.profiles`` or
|
||||
thin_provision
|
||||
Specifies whether the disk should be thin provisioned or not. Default is ``thin_provision: False``.
|
||||
.. versionadded:: 2016.3.0
|
||||
eagerly_scrub
|
||||
Specifies whether the disk should be rewrite with zeros during thick provisioning or not.
|
||||
Default is ``eagerly_scrub: False``.
|
||||
.. versionadded:: Oxygen
|
||||
controller
|
||||
Specify the SCSI controller label to which this disk should be attached.
|
||||
This should be specified only when creating both the specified SCSI
|
||||
|
@ -31,7 +31,7 @@ documentation for more information.
|
||||
.. _github-pull-request:
|
||||
|
||||
Sending a GitHub pull request
|
||||
=============================
|
||||
-----------------------------
|
||||
|
||||
Sending pull requests on GitHub is the preferred method for receiving
|
||||
contributions. The workflow advice below mirrors `GitHub's own guide <GitHub
|
||||
@ -66,7 +66,7 @@ Fork a Repo Guide_>`_ and is well worth reading.
|
||||
.. code-block:: bash
|
||||
|
||||
git fetch upstream
|
||||
git checkout -b fix-broken-thing upstream/2016.3
|
||||
git checkout -b fix-broken-thing upstream/2016.11
|
||||
|
||||
If you're working on a feature, create your branch from the develop branch.
|
||||
|
||||
@ -130,7 +130,7 @@ Fork a Repo Guide_>`_ and is well worth reading.
|
||||
.. code-block:: bash
|
||||
|
||||
git fetch upstream
|
||||
git rebase upstream/2016.3 fix-broken-thing
|
||||
git rebase upstream/2016.11 fix-broken-thing
|
||||
git push -u origin fix-broken-thing
|
||||
|
||||
or
|
||||
@ -170,9 +170,9 @@ Fork a Repo Guide_>`_ and is well worth reading.
|
||||
https://github.com/my-account/salt/pull/new/fix-broken-thing
|
||||
|
||||
#. If your branch is a fix for a release branch, choose that as the base
|
||||
branch (e.g. ``2016.3``),
|
||||
branch (e.g. ``2016.11``),
|
||||
|
||||
https://github.com/my-account/salt/compare/saltstack:2016.3...fix-broken-thing
|
||||
https://github.com/my-account/salt/compare/saltstack:2016.11...fix-broken-thing
|
||||
|
||||
If your branch is a feature, choose ``develop`` as the base branch,
|
||||
|
||||
@ -205,80 +205,206 @@ Fork a Repo Guide_>`_ and is well worth reading.
|
||||
|
||||
.. _which-salt-branch:
|
||||
|
||||
Which Salt branch?
|
||||
==================
|
||||
|
||||
GitHub will open pull requests against Salt's main branch, ``develop``, by
|
||||
default. Ideally, features should go into ``develop`` and bug fixes and
|
||||
documentation changes should go into the oldest supported release branch
|
||||
affected by the bug or documentation update. See
|
||||
:ref:`Sending a GitHub pull request <github-pull-request>`.
|
||||
|
||||
If you have a bug fix or doc change and have already forked your working
|
||||
branch from ``develop`` and do not know how to rebase your commits against
|
||||
another branch, then submit it to ``develop`` anyway and we'll be sure to
|
||||
back-port it to the correct place.
|
||||
|
||||
The current release branch
|
||||
--------------------------
|
||||
|
||||
The current release branch is the most recent stable release. Pull requests
|
||||
containing bug fixes should be made against the release branch.
|
||||
|
||||
The branch name will be a date-based name such as ``2016.3``.
|
||||
|
||||
Bug fixes are made on this branch so that minor releases can be cut from this
|
||||
branch without introducing surprises and new features. This approach maximizes
|
||||
stability.
|
||||
|
||||
The Salt development team will "merge-forward" any fixes made on the release
|
||||
branch to the ``develop`` branch once the pull request has been accepted. This
|
||||
keeps the fix in isolation on the release branch and also keeps the ``develop``
|
||||
branch up-to-date.
|
||||
|
||||
.. note:: Closing GitHub issues from commits
|
||||
|
||||
This "merge-forward" strategy requires that `the magic keywords to close a
|
||||
GitHub issue <Closing issues via commit message_>`_ appear in the commit
|
||||
message text directly. Only including the text in a pull request will not
|
||||
close the issue.
|
||||
|
||||
GitHub will close the referenced issue once the *commit* containing the
|
||||
magic text is merged into the default branch (``develop``). Any magic text
|
||||
input only into the pull request description will not be seen at the
|
||||
Git-level when those commits are merged-forward. In other words, only the
|
||||
commits are merged-forward and not the pull request.
|
||||
|
||||
The ``develop`` branch
|
||||
Salt's Branch Topology
|
||||
----------------------
|
||||
|
||||
There are three different kinds of branches in use: develop, main release
|
||||
branches, and dot release branches.
|
||||
|
||||
- All feature work should go into the ``develop`` branch.
|
||||
- Bug fixes and documentation changes should go into the oldest supported
|
||||
**main** release branch affected by the the bug or documentation change.
|
||||
Main release branches are named after a year and month, such as
|
||||
``2016.11`` and ``2017.7``.
|
||||
- Hot fixes, as determined by SaltStack's release team, should be submitted
|
||||
against **dot** release branches. Dot release branches are named after a
|
||||
year, month, and version. Examples include ``2016.11.8`` and ``2017.7.2``.
|
||||
|
||||
.. note::
|
||||
|
||||
GitHub will open pull requests against Salt's main branch, ``develop``,
|
||||
byndefault. Be sure to check which branch is selected when creating the
|
||||
pull request.
|
||||
|
||||
The Develop Branch
|
||||
==================
|
||||
|
||||
The ``develop`` branch is unstable and bleeding-edge. Pull requests containing
|
||||
feature additions or non-bug-fix changes should be made against the ``develop``
|
||||
branch.
|
||||
|
||||
The Salt development team will back-port bug fixes made to ``develop`` to the
|
||||
current release branch if the contributor cannot create the pull request
|
||||
against that branch.
|
||||
.. note::
|
||||
|
||||
Release Branches
|
||||
----------------
|
||||
If you have a bug fix or documentation change and have already forked your
|
||||
working branch from ``develop`` and do not know how to rebase your commits
|
||||
against another branch, then submit it to ``develop`` anyway. SaltStack's
|
||||
development team will be happy to back-port it to the correct branch.
|
||||
|
||||
For each release, a branch will be created when the SaltStack release team is
|
||||
ready to tag. The release branch is created from the parent branch and will be
|
||||
the same name as the tag minus the ``v``. For example, the ``2017.7.1`` release
|
||||
branch was created from the ``2017.7`` parent branch and the ``v2017.7.1``
|
||||
release was tagged at the ``HEAD`` of the ``2017.7.1`` branch. This branching
|
||||
strategy will allow for more stability when there is a need for a re-tag during
|
||||
the testing phase of the release process.
|
||||
**Please make sure you let the maintainers know that the pull request needs
|
||||
to be back-ported.**
|
||||
|
||||
Once the release branch is created, the fixes required for a given release, as
|
||||
determined by the SaltStack release team, will be added to this branch. All
|
||||
commits in this branch will be merged forward into the parent branch as well.
|
||||
Main Release Branches
|
||||
=====================
|
||||
|
||||
The current release branch is the most recent stable release. Pull requests
|
||||
containing bug fixes or documentation changes should be made against the main
|
||||
release branch that is affected.
|
||||
|
||||
The branch name will be a date-based name such as ``2016.11``.
|
||||
|
||||
Bug fixes are made on this branch so that dot release branches can be cut from
|
||||
the main release branch without introducing surprises and new features. This
|
||||
approach maximizes stability.
|
||||
|
||||
Dot Release Branches
|
||||
====================
|
||||
|
||||
Prior to tagging an official release, a branch will be created when the SaltStack
|
||||
release team is ready to tag. The dot release branch is created from a main release
|
||||
branch. The dot release branch will be the same name as the tag minus the ``v``.
|
||||
For example, the ``2017.7.1`` dot release branch was created from the ``2017.7``
|
||||
main release branch. The ``v2017.7.1`` release was tagged at the ``HEAD`` of the
|
||||
``2017.7.1`` branch.
|
||||
|
||||
This branching strategy will allow for more stability when there is a need for
|
||||
a re-tag during the testing phase of the release process and further increases
|
||||
stability.
|
||||
|
||||
Once the dot release branch is created, the fixes required for a given release,
|
||||
as determined by the SaltStack release team, will be added to this branch. All
|
||||
commits in this branch will be merged forward into the main release branch as
|
||||
well.
|
||||
|
||||
Merge Forward Process
|
||||
=====================
|
||||
|
||||
The Salt repository follows a "Merge Forward" policy. The merge-forward
|
||||
behavior means that changes submitted to older main release branches will
|
||||
automatically be "merged-forward" into the newer branches.
|
||||
|
||||
For example, a pull request is merged into ``2016.11``. Then, the entire
|
||||
``2016.11`` branch is merged-forward into the ``2017.7`` branch, and the
|
||||
``2017.7`` branch is merged-forward into the ``develop`` branch.
|
||||
|
||||
This process makes is easy for contributors to make only one pull-request
|
||||
against an older branch, but allows the change to propagate to all **main**
|
||||
release branches.
|
||||
|
||||
The merge-forward work-flow applies to all main release branches and the
|
||||
operation runs continuously.
|
||||
|
||||
Merge-Forwards for Dot Release Branches
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The merge-forward policy applies to dot release branches as well, but has a
|
||||
slightly different behavior. If a change is submitted to a **dot** release
|
||||
branch, the dot release branch will be merged into its parent **main**
|
||||
release branch.
|
||||
|
||||
For example, a pull request is merged into the ``2017.7.2`` release branch.
|
||||
Then, the entire ``2017.7.2`` branch is merged-forward into the ``2017.7``
|
||||
branch. From there, the merge forward process continues as normal.
|
||||
|
||||
The only way in which dot release branches differ from main release branches
|
||||
in regard to merge-forwards, is that once a dot release branch is created
|
||||
from the main release branch, the dot release branch does not receive merge
|
||||
forwards.
|
||||
|
||||
.. note::
|
||||
|
||||
The merge forward process for dot release branches is one-way:
|
||||
dot release branch --> main release branch.
|
||||
|
||||
Closing GitHub issues from commits
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This "merge-forward" strategy requires that `the magic keywords to close a
|
||||
GitHub issue <Closing issues via commit message_>`_ appear in the commit
|
||||
message text directly. Only including the text in a pull request will not
|
||||
close the issue.
|
||||
|
||||
GitHub will close the referenced issue once the *commit* containing the
|
||||
magic text is merged into the default branch (``develop``). Any magic text
|
||||
input only into the pull request description will not be seen at the
|
||||
Git-level when those commits are merged-forward. In other words, only the
|
||||
commits are merged-forward and not the pull request text.
|
||||
|
||||
.. _backporting-pull-requests:
|
||||
|
||||
Backporting Pull Requests
|
||||
=========================
|
||||
|
||||
If a bug is fixed on ``develop`` and the bug is also present on a
|
||||
currently-supported release branch, it will need to be back-ported to an
|
||||
applicable branch.
|
||||
|
||||
.. note:: Most Salt contributors can skip these instructions
|
||||
|
||||
These instructions do not need to be read in order to contribute to the
|
||||
Salt project! The SaltStack team will back-port fixes on behalf of
|
||||
contributors in order to keep the contribution process easy.
|
||||
|
||||
These instructions are intended for frequent Salt contributors, advanced
|
||||
Git users, SaltStack employees, or independent souls who wish to back-port
|
||||
changes themselves.
|
||||
|
||||
It is often easiest to fix a bug on the oldest supported release branch and
|
||||
then merge that branch forward into ``develop`` (as described earlier in this
|
||||
document). When that is not possible the fix must be back-ported, or copied,
|
||||
into any other affected branches.
|
||||
|
||||
These steps assume a pull request ``#1234`` has been merged into ``develop``.
|
||||
And ``upstream`` is the name of the remote pointing to the main Salt repo.
|
||||
|
||||
#. Identify the oldest supported release branch that is affected by the bug.
|
||||
|
||||
#. Create a new branch for the back-port by reusing the same branch from the
|
||||
original pull request.
|
||||
|
||||
Name the branch ``bp-<NNNN>`` and use the number of the original pull
|
||||
request.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
git fetch upstream refs/pull/1234/head:bp-1234
|
||||
git checkout bp-1234
|
||||
|
||||
#. Find the parent commit of the original pull request.
|
||||
|
||||
The parent commit of the original pull request must be known in order to
|
||||
rebase onto a release branch. The easiest way to find this is on GitHub.
|
||||
|
||||
Open the original pull request on GitHub and find the first commit in the
|
||||
list of commits. Select and copy the SHA for that commit. The parent of
|
||||
that commit can be specified by appending ``~1`` to the end.
|
||||
|
||||
#. Rebase the new branch on top of the release branch.
|
||||
|
||||
* ``<release-branch>`` is the branch identified in step #1.
|
||||
|
||||
* ``<orig-base>`` is the SHA identified in step #3 -- don't forget to add
|
||||
``~1`` to the end!
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
git rebase --onto <release-branch> <orig-base> bp-1234
|
||||
|
||||
Note, release branches prior to ``2016.11`` will not be able to make use of
|
||||
rebase and must use cherry-picking instead.
|
||||
|
||||
#. Push the back-port branch to GitHub and open a new pull request.
|
||||
|
||||
Opening a pull request for the back-port allows for the test suite and
|
||||
normal code-review process.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
git push -u origin bp-1234
|
||||
|
||||
Keeping Salt Forks in Sync
|
||||
==========================
|
||||
--------------------------
|
||||
|
||||
Salt is advancing quickly. It is therefore critical to pull upstream changes
|
||||
Salt advances quickly. It is therefore critical to pull upstream changes
|
||||
from upstream into your fork on a regular basis. Nothing is worse than putting
|
||||
hard work into a pull request only to see bunches of merge conflicts because it
|
||||
has diverged too far from upstream.
|
||||
@ -340,92 +466,53 @@ the name of the main `saltstack/salt`_ repository.
|
||||
the current release branch.
|
||||
|
||||
Posting patches to the mailing list
|
||||
===================================
|
||||
-----------------------------------
|
||||
|
||||
Patches will also be accepted by email. Format patches using `git
|
||||
format-patch`_ and send them to the `salt-users`_ mailing list. The contributor
|
||||
will then get credit for the patch, and the Salt community will have an archive
|
||||
of the patch and a place for discussion.
|
||||
|
||||
.. _backporting-pull-requests:
|
||||
|
||||
Backporting Pull Requests
|
||||
=========================
|
||||
|
||||
If a bug is fixed on ``develop`` and the bug is also present on a
|
||||
currently-supported release branch it will need to be back-ported to all
|
||||
applicable branches.
|
||||
|
||||
.. note:: Most Salt contributors can skip these instructions
|
||||
|
||||
These instructions do not need to be read in order to contribute to the
|
||||
Salt project! The SaltStack team will back-port fixes on behalf of
|
||||
contributors in order to keep the contribution process easy.
|
||||
|
||||
These instructions are intended for frequent Salt contributors, advanced
|
||||
Git users, SaltStack employees, or independent souls who wish to back-port
|
||||
changes themselves.
|
||||
|
||||
It is often easiest to fix a bug on the oldest supported release branch and
|
||||
then merge that branch forward into ``develop`` (as described earlier in this
|
||||
document). When that is not possible the fix must be back-ported, or copied,
|
||||
into any other affected branches.
|
||||
|
||||
These steps assume a pull request ``#1234`` has been merged into ``develop``.
|
||||
And ``upstream`` is the name of the remote pointing to the main Salt repo.
|
||||
|
||||
1. Identify the oldest supported release branch that is affected by the bug.
|
||||
|
||||
2. Create a new branch for the back-port by reusing the same branch from the
|
||||
original pull request.
|
||||
|
||||
Name the branch ``bp-<NNNN>`` and use the number of the original pull
|
||||
request.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
git fetch upstream refs/pull/1234/head:bp-1234
|
||||
git checkout bp-1234
|
||||
|
||||
3. Find the parent commit of the original pull request.
|
||||
|
||||
The parent commit of the original pull request must be known in order to
|
||||
rebase onto a release branch. The easiest way to find this is on GitHub.
|
||||
|
||||
Open the original pull request on GitHub and find the first commit in the
|
||||
list of commits. Select and copy the SHA for that commit. The parent of
|
||||
that commit can be specified by appending ``~1`` to the end.
|
||||
|
||||
4. Rebase the new branch on top of the release branch.
|
||||
|
||||
* ``<release-branch>`` is the branch identified in step #1.
|
||||
|
||||
* ``<orig-base>`` is the SHA identified in step #3 -- don't forget to add
|
||||
``~1`` to the end!
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
git rebase --onto <release-branch> <orig-base> bp-1234
|
||||
|
||||
Note, release branches prior to ``2016.3`` will not be able to make use of
|
||||
rebase and must use cherry-picking instead.
|
||||
|
||||
5. Push the back-port branch to GitHub and open a new pull request.
|
||||
|
||||
Opening a pull request for the back-port allows for the test suite and
|
||||
normal code-review process.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
git push -u origin bp-1234
|
||||
|
||||
Issue and Pull Request Labeling System
|
||||
======================================
|
||||
--------------------------------------
|
||||
|
||||
SaltStack uses several labeling schemes to help facilitate code contributions
|
||||
and bug resolution. See the :ref:`Labels and Milestones
|
||||
<labels-and-milestones>` documentation for more information.
|
||||
|
||||
Mentionbot
|
||||
----------
|
||||
|
||||
SaltStack runs a mention-bot which notifies contributors who might be able
|
||||
to help review incoming pull-requests based on their past contribution to
|
||||
files which are being changed.
|
||||
|
||||
If you do not wish to receive these notifications, please add your GitHub
|
||||
handle to the blacklist line in the ``.mention-bot`` file located in the
|
||||
root of the Salt repository.
|
||||
|
||||
.. _probot-gpg-verification:
|
||||
|
||||
GPG Verification
|
||||
----------------
|
||||
|
||||
SaltStack has enabled `GPG Probot`_ to enforce GPG signatures for all
|
||||
commits included in a Pull Request.
|
||||
|
||||
In order for the GPG verification status check to pass, *every* contributor in
|
||||
the pull request must:
|
||||
|
||||
- Set up a GPG key on local machine
|
||||
- Sign all commits in the pull request with key
|
||||
- Link key with GitHub account
|
||||
|
||||
This applies to all commits in the pull request.
|
||||
|
||||
GitHub hosts a number of `help articles`_ for creating a GPG key, using the
|
||||
GPG key with ``git`` locally, and linking the GPG key to your GitHub account.
|
||||
Once these steps are completed, the commit signing verification will look like
|
||||
the example in GitHub's `GPG Signature Verification feature announcement`_.
|
||||
|
||||
.. _`saltstack/salt`: https://github.com/saltstack/salt
|
||||
.. _`GitHub Fork a Repo Guide`: https://help.github.com/articles/fork-a-repo
|
||||
.. _`GitHub issue tracker`: https://github.com/saltstack/salt/issues
|
||||
@ -434,14 +521,6 @@ and bug resolution. See the :ref:`Labels and Milestones
|
||||
.. _`Closing issues via commit message`: https://help.github.com/articles/closing-issues-via-commit-messages
|
||||
.. _`git format-patch`: https://www.kernel.org/pub/software/scm/git/docs/git-format-patch.html
|
||||
.. _salt-users: https://groups.google.com/forum/#!forum/salt-users
|
||||
|
||||
Mentionbot
|
||||
==========
|
||||
|
||||
SaltStack runs a mention-bot which notifies contributors who might be able
|
||||
to help review incoming pull-requests based on their past contribution to
|
||||
files which are being changed.
|
||||
|
||||
If you do not wish to receive these notifications, please add your GitHub
|
||||
handle to the blacklist line in the `.mention-bot` file located in the
|
||||
root of the Salt repository.
|
||||
.. _GPG Probot: https://probot.github.io/apps/gpg/
|
||||
.. _help articles: https://help.github.com/articles/signing-commits-with-gpg/
|
||||
.. _GPG Signature Verification feature announcement: https://github.com/blog/2144-gpg-signature-verification
|
||||
|
@ -137,11 +137,11 @@ The second return value will be a string with two possible values:
|
||||
|
||||
Both before and after the installing the target(s), you should run
|
||||
:strong:`list_pkgs` to obtain a list of the installed packages. You should then
|
||||
return the output of ``salt.utils.compare_dicts()``
|
||||
return the output of ``salt.utils.data.compare_dicts()``:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
return salt.utils.compare_dicts(old, new)
|
||||
return salt.utils.data.compare_dicts(old, new)
|
||||
|
||||
|
||||
remove
|
||||
|
@ -123,7 +123,7 @@ to the module being tests one should do:
|
||||
}
|
||||
|
||||
Consider this more extensive example from
|
||||
``tests/unit/modules/test_libcloud_dns.py``::
|
||||
``tests/unit/modules/test_libcloud_dns.py``:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
|
@ -246,6 +246,7 @@ Server configuration values and their defaults:
|
||||
|
||||
# Bind to LDAP anonymously to determine group membership
|
||||
# Active Directory does not allow anonymous binds without special configuration
|
||||
# In addition, if auth.ldap.anonymous is True, empty bind passwords are not permitted.
|
||||
auth.ldap.anonymous: False
|
||||
|
||||
# FOR TESTING ONLY, this is a VERY insecure setting.
|
||||
@ -285,7 +286,11 @@ and groups, it re-authenticates as the user running the Salt commands.
|
||||
|
||||
If you are already aware of the structure of your DNs and permissions in your LDAP store are set such that
|
||||
users can look up their own group memberships, then the first and second users can be the same. To tell Salt this is
|
||||
the case, omit the ``auth.ldap.bindpw`` parameter. You can template the ``binddn`` like this:
|
||||
the case, omit the ``auth.ldap.bindpw`` parameter. Note this is not the same thing as using an anonymous bind.
|
||||
Most LDAP servers will not permit anonymous bind, and as mentioned above, if `auth.ldap.anonymous` is False you
|
||||
cannot use an empty password.
|
||||
|
||||
You can template the ``binddn`` like this:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
|
@ -27,7 +27,12 @@ Salt engines are configured under an ``engines`` top-level section in your Salt
|
||||
port: 5959
|
||||
proto: tcp
|
||||
|
||||
Salt engines must be in the Salt path, or you can add the ``engines_dirs`` option in your Salt master configuration with a list of directories under which Salt attempts to find Salt engines.
|
||||
Salt engines must be in the Salt path, or you can add the ``engines_dirs`` option in your Salt master configuration with a list of directories under which Salt attempts to find Salt engines. This option should be formatted as a list of directories to search, such as:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
engines_dirs:
|
||||
- /home/bob/engines
|
||||
|
||||
Writing an Engine
|
||||
=================
|
||||
|
@ -45,11 +45,27 @@ but leave any existing config, cache, and PKI information.
|
||||
Salt Minion Installation
|
||||
========================
|
||||
|
||||
If the system is missing the appropriate version of the Visual C++
|
||||
Redistributable (vcredist) the user will be prompted to install it. Click ``OK``
|
||||
to install the vcredist. Click ``Cancel`` to abort the installation without
|
||||
making modifications to the system.
|
||||
|
||||
If Salt is already installed on the system the user will be prompted to remove
|
||||
the previous installation. Click ``OK`` to uninstall Salt without removing the
|
||||
configuration, PKI information, or cached files. Click ``Cancel`` to abort the
|
||||
installation before making any modifications to the system.
|
||||
|
||||
After the Welcome and the License Agreement, the installer asks for two bits of
|
||||
information to configure the minion; the master hostname and the minion name.
|
||||
The installer will update the minion config with these options. If the installer
|
||||
finds an existing minion config file, these fields will be populated with values
|
||||
from the existing config.
|
||||
The installer will update the minion config with these options.
|
||||
|
||||
If the installer finds an existing minion config file, these fields will be
|
||||
populated with values from the existing config, but they will be grayed out.
|
||||
There will also be a checkbox to use the existing config. If you continue, the
|
||||
existing config will be used. If the checkbox is unchecked, default values are
|
||||
displayed and can be changed. If you continue, the existing config file in
|
||||
``c:\salt\conf`` will be removed along with the ``c:\salt\conf\minion.d`
|
||||
directory. The values entered will be used with the default config.
|
||||
|
||||
The final page allows you to start the minion service and optionally change its
|
||||
startup type. By default, the minion is set to ``Automatic``. You can change the
|
||||
@ -71,11 +87,6 @@ be managed there or from the command line like any other Windows service.
|
||||
sc start salt-minion
|
||||
net start salt-minion
|
||||
|
||||
.. note::
|
||||
If the minion won't start, you may need to install the Microsoft Visual C++
|
||||
2008 x64 SP1 redistributable. Allow all Windows updates to run salt-minion
|
||||
smoothly.
|
||||
|
||||
Installation Prerequisites
|
||||
--------------------------
|
||||
|
||||
@ -96,15 +107,29 @@ Minion silently:
|
||||
========================= =====================================================
|
||||
Option Description
|
||||
========================= =====================================================
|
||||
``/minion-name=`` A string value to set the minion name. Default is
|
||||
'hostname'
|
||||
``/master=`` A string value to set the IP address or host name of
|
||||
the master. Default value is 'salt'
|
||||
the master. Default value is 'salt'. You can pass a
|
||||
single master or a comma-separated list of masters.
|
||||
Setting the master will replace existing config with
|
||||
the default config. Cannot be used in conjunction
|
||||
with ``/use-existing-config``
|
||||
``/minion-name=`` A string value to set the minion name. Default is
|
||||
'hostname'. Setting the minion name will replace
|
||||
existing config with the default config. Cannot be
|
||||
used in conjunction with ``/use-existing-config``
|
||||
``/start-minion=`` Either a 1 or 0. '1' will start the salt-minion
|
||||
service, '0' will not. Default is to start the
|
||||
service after installation.
|
||||
service after installation
|
||||
``/start-minion-delayed`` Set the minion start type to
|
||||
``Automatic (Delayed Start)``
|
||||
``/use-existing-config`` Either a 1 or 0. '1' will use the existing config if
|
||||
present. '0' will replace existing config with the
|
||||
default config. Default is '1'. If this is set to '1'
|
||||
values passed in ``/master`` and ``/minion-name``
|
||||
will be ignored
|
||||
``/S`` Runs the installation silently. Uses the above
|
||||
settings or the defaults
|
||||
``/?`` Displays command line help
|
||||
========================= =====================================================
|
||||
|
||||
.. note::
|
||||
|
@ -1727,7 +1727,7 @@ in the current Jinja context.
|
||||
|
||||
.. code-block:: jinja
|
||||
|
||||
Context is: {{ show_full_context() }}
|
||||
Context is: {{ show_full_context()|yaml(False) }}
|
||||
|
||||
.. jinja_ref:: logs
|
||||
|
||||
|
@ -27,9 +27,9 @@ event bus is an open system used for sending information notifying Salt and
|
||||
other systems about operations.
|
||||
|
||||
The event system fires events with a very specific criteria. Every event has a
|
||||
:strong:`tag`. Event tags allow for fast top level filtering of events. In
|
||||
addition to the tag, each event has a data structure. This data structure is a
|
||||
dict, which contains information about the event.
|
||||
**tag**. Event tags allow for fast top-level filtering of events. In addition
|
||||
to the tag, each event has a data structure. This data structure is a
|
||||
dictionary, which contains information about the event.
|
||||
|
||||
.. _reactor-mapping-events:
|
||||
|
||||
@ -65,15 +65,12 @@ and each event tag has a list of reactor SLS files to be run.
|
||||
the :ref:`querystring syntax <querystring-syntax>` (e.g.
|
||||
``salt://reactor/mycustom.sls?saltenv=reactor``).
|
||||
|
||||
Reactor sls files are similar to state and pillar sls files. They are
|
||||
by default yaml + Jinja templates and are passed familiar context variables.
|
||||
Reactor SLS files are similar to State and Pillar SLS files. They are by
|
||||
default YAML + Jinja templates and are passed familiar context variables.
|
||||
Click :ref:`here <reactor-jinja-context>` for more detailed information on the
|
||||
variables availble in Jinja templating.
|
||||
|
||||
They differ because of the addition of the ``tag`` and ``data`` variables.
|
||||
|
||||
- The ``tag`` variable is just the tag in the fired event.
|
||||
- The ``data`` variable is the event's data dict.
|
||||
|
||||
Here is a simple reactor sls:
|
||||
Here is the SLS for a simple reaction:
|
||||
|
||||
.. code-block:: jinja
|
||||
|
||||
@ -90,71 +87,278 @@ data structure and compiler used for the state system is used for the reactor
|
||||
system. The only difference is that the data is matched up to the salt command
|
||||
API and the runner system. In this example, a command is published to the
|
||||
``mysql1`` minion with a function of :py:func:`state.apply
|
||||
<salt.modules.state.apply_>`. Similarly, a runner can be called:
|
||||
<salt.modules.state.apply_>`, which performs a :ref:`highstate
|
||||
<running-highstate>`. Similarly, a runner can be called:
|
||||
|
||||
.. code-block:: jinja
|
||||
|
||||
{% if data['data']['custom_var'] == 'runit' %}
|
||||
call_runit_orch:
|
||||
runner.state.orchestrate:
|
||||
- mods: _orch.runit
|
||||
- args:
|
||||
- mods: orchestrate.runit
|
||||
{% endif %}
|
||||
|
||||
This example will execute the state.orchestrate runner and intiate an execution
|
||||
of the runit orchestrator located at ``/srv/salt/_orch/runit.sls``. Using
|
||||
``_orch/`` is any arbitrary path but it is recommended to avoid using "orchestrate"
|
||||
as this is most likely to cause confusion.
|
||||
of the ``runit`` orchestrator located at ``/srv/salt/orchestrate/runit.sls``.
|
||||
|
||||
Writing SLS Files
|
||||
-----------------
|
||||
Types of Reactions
|
||||
==================
|
||||
|
||||
Reactor SLS files are stored in the same location as State SLS files. This means
|
||||
that both ``file_roots`` and ``gitfs_remotes`` impact what SLS files are
|
||||
available to the reactor and orchestrator.
|
||||
============================== ==================================================================================
|
||||
Name Description
|
||||
============================== ==================================================================================
|
||||
:ref:`local <reactor-local>` Runs a :ref:`remote-execution function <all-salt.modules>` on targeted minions
|
||||
:ref:`runner <reactor-runner>` Executes a :ref:`runner function <all-salt.runners>`
|
||||
:ref:`wheel <reactor-wheel>` Executes a :ref:`wheel function <all-salt.wheel>` on the master
|
||||
:ref:`caller <reactor-caller>` Runs a :ref:`remote-execution function <all-salt.modules>` on a masterless minion
|
||||
============================== ==================================================================================
|
||||
|
||||
It is recommended to keep reactor and orchestrator SLS files in their own uniquely
|
||||
named subdirectories such as ``_orch/``, ``orch/``, ``_orchestrate/``, ``react/``,
|
||||
``_reactor/``, etc. Keeping a unique name helps prevent confusion when trying to
|
||||
read through this a few years down the road.
|
||||
.. note::
|
||||
The ``local`` and ``caller`` reaction types will be renamed for the Oxygen
|
||||
release. These reaction types were named after Salt's internal client
|
||||
interfaces, and are not intuitively named. Both ``local`` and ``caller``
|
||||
will continue to work in Reactor SLS files, but for the Oxygen release the
|
||||
documentation will be updated to reflect the new preferred naming.
|
||||
|
||||
The Goal of Writing Reactor SLS Files
|
||||
=====================================
|
||||
Where to Put Reactor SLS Files
|
||||
==============================
|
||||
|
||||
Reactor SLS files share the familiar syntax from Salt States but there are
|
||||
important differences. The goal of a Reactor file is to process a Salt event as
|
||||
quickly as possible and then to optionally start a **new** process in response.
|
||||
Reactor SLS files can come both from files local to the master, and from any of
|
||||
backends enabled via the :conf_master:`fileserver_backend` config option. Files
|
||||
placed in the Salt fileserver can be referenced using a ``salt://`` URL, just
|
||||
like they can in State SLS files.
|
||||
|
||||
1. The Salt Reactor watches Salt's event bus for new events.
|
||||
2. The event tag is matched against the list of event tags under the
|
||||
``reactor`` section in the Salt Master config.
|
||||
3. The SLS files for any matches are Rendered into a data structure that
|
||||
represents one or more function calls.
|
||||
4. That data structure is given to a pool of worker threads for execution.
|
||||
It is recommended to place reactor and orchestrator SLS files in their own
|
||||
uniquely-named subdirectories such as ``orch/``, ``orchestrate/``, ``react/``,
|
||||
``reactor/``, etc., to keep them organized.
|
||||
|
||||
.. _reactor-sls:
|
||||
|
||||
Writing Reactor SLS
|
||||
===================
|
||||
|
||||
The different reaction types were developed separately and have historically
|
||||
had different methods for passing arguments. For the 2017.7.2 release a new,
|
||||
unified configuration schema has been introduced, which applies to all reaction
|
||||
types.
|
||||
|
||||
The old config schema will continue to be supported, and there is no plan to
|
||||
deprecate it at this time.
|
||||
|
||||
.. _reactor-local:
|
||||
|
||||
Local Reactions
|
||||
---------------
|
||||
|
||||
A ``local`` reaction runs a :ref:`remote-execution function <all-salt.modules>`
|
||||
on the targeted minions.
|
||||
|
||||
The old config schema required the positional and keyword arguments to be
|
||||
manually separated by the user under ``arg`` and ``kwarg`` parameters. However,
|
||||
this is not very user-friendly, as it forces the user to distinguish which type
|
||||
of argument is which, and make sure that positional arguments are ordered
|
||||
properly. Therefore, the new config schema is recommended if the master is
|
||||
running a supported release.
|
||||
|
||||
The below two examples are equivalent:
|
||||
|
||||
+---------------------------------+-----------------------------+
|
||||
| Supported in 2017.7.2 and later | Supported in all releases |
|
||||
+=================================+=============================+
|
||||
| :: | :: |
|
||||
| | |
|
||||
| install_zsh: | install_zsh: |
|
||||
| local.state.single: | local.state.single: |
|
||||
| - tgt: 'kernel:Linux' | - tgt: 'kernel:Linux' |
|
||||
| - tgt_type: grain | - tgt_type: grain |
|
||||
| - args: | - arg: |
|
||||
| - fun: pkg.installed | - pkg.installed |
|
||||
| - name: zsh | - zsh |
|
||||
| - fromrepo: updates | - kwarg: |
|
||||
| | fromrepo: updates |
|
||||
+---------------------------------+-----------------------------+
|
||||
|
||||
This reaction would be equvalent to running the following Salt command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt -G 'kernel:Linux' state.single pkg.installed name=zsh fromrepo=updates
|
||||
|
||||
.. note::
|
||||
Any other parameters in the :py:meth:`LocalClient().cmd_async()
|
||||
<salt.client.LocalClient.cmd_async>` method can be passed at the same
|
||||
indentation level as ``tgt``.
|
||||
|
||||
.. note::
|
||||
``tgt_type`` is only required when the target expression defined in ``tgt``
|
||||
uses a :ref:`target type <targeting>` other than a minion ID glob.
|
||||
|
||||
The ``tgt_type`` argument was named ``expr_form`` in releases prior to
|
||||
2017.7.0.
|
||||
|
||||
.. _reactor-runner:
|
||||
|
||||
Runner Reactions
|
||||
----------------
|
||||
|
||||
Runner reactions execute :ref:`runner functions <all-salt.runners>` locally on
|
||||
the master.
|
||||
|
||||
The old config schema called for passing arguments to the reaction directly
|
||||
under the name of the runner function. However, this can cause unpredictable
|
||||
interactions with the Reactor system's internal arguments. It is also possible
|
||||
to pass positional and keyword arguments under ``arg`` and ``kwarg`` like above
|
||||
in :ref:`local reactions <reactor-local>`, but as noted above this is not very
|
||||
user-friendly. Therefore, the new config schema is recommended if the master
|
||||
is running a supported release.
|
||||
|
||||
The below two examples are equivalent:
|
||||
|
||||
+-------------------------------------------------+-------------------------------------------------+
|
||||
| Supported in 2017.7.2 and later | Supported in all releases |
|
||||
+=================================================+=================================================+
|
||||
| :: | :: |
|
||||
| | |
|
||||
| deploy_app: | deploy_app: |
|
||||
| runner.state.orchestrate: | runner.state.orchestrate: |
|
||||
| - args: | - mods: orchestrate.deploy_app |
|
||||
| - mods: orchestrate.deploy_app | - kwarg: |
|
||||
| - pillar: | pillar: |
|
||||
| event_tag: {{ tag }} | event_tag: {{ tag }} |
|
||||
| event_data: {{ data['data']|json }} | event_data: {{ data['data']|json }} |
|
||||
+-------------------------------------------------+-------------------------------------------------+
|
||||
|
||||
Assuming that the event tag is ``foo``, and the data passed to the event is
|
||||
``{'bar': 'baz'}``, then this reaction is equvalent to running the following
|
||||
Salt command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-run state.orchestrate mods=orchestrate.deploy_app pillar='{"event_tag": "foo", "event_data": {"bar": "baz"}}'
|
||||
|
||||
.. _reactor-wheel:
|
||||
|
||||
Wheel Reactions
|
||||
---------------
|
||||
|
||||
Wheel reactions run :ref:`wheel functions <all-salt.wheel>` locally on the
|
||||
master.
|
||||
|
||||
Like :ref:`runner reactions <reactor-runner>`, the old config schema called for
|
||||
wheel reactions to have arguments passed directly under the name of the
|
||||
:ref:`wheel function <all-salt.wheel>` (or in ``arg`` or ``kwarg`` parameters).
|
||||
|
||||
The below two examples are equivalent:
|
||||
|
||||
+-----------------------------------+---------------------------------+
|
||||
| Supported in 2017.7.2 and later | Supported in all releases |
|
||||
+===================================+=================================+
|
||||
| :: | :: |
|
||||
| | |
|
||||
| remove_key: | remove_key: |
|
||||
| wheel.key.delete: | wheel.key.delete: |
|
||||
| - args: | - match: {{ data['id'] }} |
|
||||
| - match: {{ data['id'] }} | |
|
||||
+-----------------------------------+---------------------------------+
|
||||
|
||||
.. _reactor-caller:
|
||||
|
||||
Caller Reactions
|
||||
----------------
|
||||
|
||||
Caller reactions run :ref:`remote-execution functions <all-salt.modules>` on a
|
||||
minion daemon's Reactor system. To run a Reactor on the minion, it is necessary
|
||||
to configure the :mod:`Reactor Engine <salt.engines.reactor>` in the minion
|
||||
config file, and then setup your watched events in a ``reactor`` section in the
|
||||
minion config file as well.
|
||||
|
||||
.. note:: Masterless Minions use this Reactor
|
||||
|
||||
This is the only way to run the Reactor if you use masterless minions.
|
||||
|
||||
Both the old and new config schemas involve passing arguments under an ``args``
|
||||
parameter. However, the old config schema only supports positional arguments.
|
||||
Therefore, the new config schema is recommended if the masterless minion is
|
||||
running a supported release.
|
||||
|
||||
The below two examples are equivalent:
|
||||
|
||||
+---------------------------------+---------------------------+
|
||||
| Supported in 2017.7.2 and later | Supported in all releases |
|
||||
+=================================+===========================+
|
||||
| :: | :: |
|
||||
| | |
|
||||
| touch_file: | touch_file: |
|
||||
| caller.file.touch: | caller.file.touch: |
|
||||
| - args: | - args: |
|
||||
| - name: /tmp/foo | - /tmp/foo |
|
||||
+---------------------------------+---------------------------+
|
||||
|
||||
This reaction is equvalent to running the following Salt command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-call file.touch name=/tmp/foo
|
||||
|
||||
Best Practices for Writing Reactor SLS Files
|
||||
============================================
|
||||
|
||||
The Reactor works as follows:
|
||||
|
||||
1. The Salt Reactor watches Salt's event bus for new events.
|
||||
2. Each event's tag is matched against the list of event tags configured under
|
||||
the :conf_master:`reactor` section in the Salt Master config.
|
||||
3. The SLS files for any matches are rendered into a data structure that
|
||||
represents one or more function calls.
|
||||
4. That data structure is given to a pool of worker threads for execution.
|
||||
|
||||
Matching and rendering Reactor SLS files is done sequentially in a single
|
||||
process. Complex Jinja that calls out to slow Execution or Runner modules slows
|
||||
down the rendering and causes other reactions to pile up behind the current
|
||||
one. The worker pool is designed to handle complex and long-running processes
|
||||
such as Salt Orchestrate.
|
||||
process. For that reason, reactor SLS files should contain few individual
|
||||
reactions (one, if at all possible). Also, keep in mind that reactions are
|
||||
fired asynchronously (with the exception of :ref:`caller <reactor-caller>`) and
|
||||
do *not* support :ref:`requisites <requisites>`.
|
||||
|
||||
tl;dr: Rendering Reactor SLS files MUST be simple and quick. The new process
|
||||
started by the worker threads can be long-running. Using the reactor to fire
|
||||
an orchestrate runner would be ideal.
|
||||
Complex Jinja templating that calls out to slow :ref:`remote-execution
|
||||
<all-salt.modules>` or :ref:`runner <all-salt.runners>` functions slows down
|
||||
the rendering and causes other reactions to pile up behind the current one. The
|
||||
worker pool is designed to handle complex and long-running processes like
|
||||
:ref:`orchestration <orchestrate-runner>` jobs.
|
||||
|
||||
Therefore, when complex tasks are in order, :ref:`orchestration
|
||||
<orchestrate-runner>` is a natural fit. Orchestration SLS files can be more
|
||||
complex, and use requisites. Performing a complex task using orchestration lets
|
||||
the Reactor system fire off the orchestration job and proceed with processing
|
||||
other reactions.
|
||||
|
||||
.. _reactor-jinja-context:
|
||||
|
||||
Jinja Context
|
||||
-------------
|
||||
=============
|
||||
|
||||
Reactor files only have access to a minimal Jinja context. ``grains`` and
|
||||
``pillar`` are not available. The ``salt`` object is available for calling
|
||||
Runner and Execution modules but it should be used sparingly and only for quick
|
||||
tasks for the reasons mentioned above.
|
||||
Reactor SLS files only have access to a minimal Jinja context. ``grains`` and
|
||||
``pillar`` are *not* available. The ``salt`` object is available for calling
|
||||
:ref:`remote-execution <all-salt.modules>` or :ref:`runner <all-salt.runners>`
|
||||
functions, but it should be used sparingly and only for quick tasks for the
|
||||
reasons mentioned above.
|
||||
|
||||
In addition to the ``salt`` object, the following variables are available in
|
||||
the Jinja context:
|
||||
|
||||
- ``tag`` - the tag from the event that triggered execution of the Reactor SLS
|
||||
file
|
||||
- ``data`` - the event's data dictionary
|
||||
|
||||
The ``data`` dict will contain an ``id`` key containing the minion ID, if the
|
||||
event was fired from a minion, and a ``data`` key containing the data passed to
|
||||
the event.
|
||||
|
||||
Advanced State System Capabilities
|
||||
----------------------------------
|
||||
==================================
|
||||
|
||||
Reactor SLS files, by design, do not support Requisites, ordering,
|
||||
``onlyif``/``unless`` conditionals and most other powerful constructs from
|
||||
Salt's State system.
|
||||
Reactor SLS files, by design, do not support :ref:`requisites <requisites>`,
|
||||
ordering, ``onlyif``/``unless`` conditionals and most other powerful constructs
|
||||
from Salt's State system.
|
||||
|
||||
Complex Master-side operations are best performed by Salt's Orchestrate system
|
||||
so using the Reactor to kick off an Orchestrate run is a very common pairing.
|
||||
@ -166,7 +370,7 @@ For example:
|
||||
# /etc/salt/master.d/reactor.conf
|
||||
# A custom event containing: {"foo": "Foo!", "bar: "bar*", "baz": "Baz!"}
|
||||
reactor:
|
||||
- myco/custom/event:
|
||||
- my/custom/event:
|
||||
- /srv/reactor/some_event.sls
|
||||
|
||||
.. code-block:: jinja
|
||||
@ -174,15 +378,15 @@ For example:
|
||||
# /srv/reactor/some_event.sls
|
||||
invoke_orchestrate_file:
|
||||
runner.state.orchestrate:
|
||||
- mods: _orch.do_complex_thing # /srv/salt/_orch/do_complex_thing.sls
|
||||
- kwarg:
|
||||
pillar:
|
||||
event_tag: {{ tag }}
|
||||
event_data: {{ data|json() }}
|
||||
- args:
|
||||
- mods: orchestrate.do_complex_thing
|
||||
- pillar:
|
||||
event_tag: {{ tag }}
|
||||
event_data: {{ data|json }}
|
||||
|
||||
.. code-block:: jinja
|
||||
|
||||
# /srv/salt/_orch/do_complex_thing.sls
|
||||
# /srv/salt/orchestrate/do_complex_thing.sls
|
||||
{% set tag = salt.pillar.get('event_tag') %}
|
||||
{% set data = salt.pillar.get('event_data') %}
|
||||
|
||||
@ -209,7 +413,7 @@ For example:
|
||||
.. _beacons-and-reactors:
|
||||
|
||||
Beacons and Reactors
|
||||
--------------------
|
||||
====================
|
||||
|
||||
An event initiated by a beacon, when it arrives at the master will be wrapped
|
||||
inside a second event, such that the data object containing the beacon
|
||||
@ -219,27 +423,52 @@ For example, to access the ``id`` field of the beacon event in a reactor file,
|
||||
you will need to reference ``{{ data['data']['id'] }}`` rather than ``{{
|
||||
data['id'] }}`` as for events initiated directly on the event bus.
|
||||
|
||||
Similarly, the data dictionary attached to the event would be located in
|
||||
``{{ data['data']['data'] }}`` instead of ``{{ data['data'] }}``.
|
||||
|
||||
See the :ref:`beacon documentation <beacon-example>` for examples.
|
||||
|
||||
Fire an event
|
||||
=============
|
||||
Manually Firing an Event
|
||||
========================
|
||||
|
||||
To fire an event from a minion call ``event.send``
|
||||
From the Master
|
||||
---------------
|
||||
|
||||
Use the :py:func:`event.send <salt.runners.event.send>` runner:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-call event.send 'foo' '{orchestrate: refresh}'
|
||||
salt-run event.send foo '{orchestrate: refresh}'
|
||||
|
||||
After this is called, any reactor sls files matching event tag ``foo`` will
|
||||
execute with ``{{ data['data']['orchestrate'] }}`` equal to ``'refresh'``.
|
||||
From the Minion
|
||||
---------------
|
||||
|
||||
See :py:mod:`salt.modules.event` for more information.
|
||||
To fire an event to the master from a minion, call :py:func:`event.send
|
||||
<salt.modules.event.send>`:
|
||||
|
||||
Knowing what event is being fired
|
||||
=================================
|
||||
.. code-block:: bash
|
||||
|
||||
The best way to see exactly what events are fired and what data is available in
|
||||
each event is to use the :py:func:`state.event runner
|
||||
salt-call event.send foo '{orchestrate: refresh}'
|
||||
|
||||
To fire an event to the minion's local event bus, call :py:func:`event.fire
|
||||
<salt.modules.event.fire>`:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-call event.fire '{orchestrate: refresh}' foo
|
||||
|
||||
Referencing Data Passed in Events
|
||||
---------------------------------
|
||||
|
||||
Assuming any of the above examples, any reactor SLS files triggered by watching
|
||||
the event tag ``foo`` will execute with ``{{ data['data']['orchestrate'] }}``
|
||||
equal to ``'refresh'``.
|
||||
|
||||
Getting Information About Events
|
||||
================================
|
||||
|
||||
The best way to see exactly what events have been fired and what data is
|
||||
available in each event is to use the :py:func:`state.event runner
|
||||
<salt.runners.state.event>`.
|
||||
|
||||
.. seealso:: :ref:`Common Salt Events <event-master_events>`
|
||||
@ -308,156 +537,10 @@ rendered SLS file (or any errors generated while rendering the SLS file).
|
||||
view the result of referencing Jinja variables. If the result is empty then
|
||||
Jinja produced an empty result and the Reactor will ignore it.
|
||||
|
||||
.. _reactor-structure:
|
||||
Passing Event Data to Minions or Orchestration as Pillar
|
||||
--------------------------------------------------------
|
||||
|
||||
Understanding the Structure of Reactor Formulas
|
||||
===============================================
|
||||
|
||||
**I.e., when to use `arg` and `kwarg` and when to specify the function
|
||||
arguments directly.**
|
||||
|
||||
While the reactor system uses the same basic data structure as the state
|
||||
system, the functions that will be called using that data structure are
|
||||
different functions than are called via Salt's state system. The Reactor can
|
||||
call Runner modules using the `runner` prefix, Wheel modules using the `wheel`
|
||||
prefix, and can also cause minions to run Execution modules using the `local`
|
||||
prefix.
|
||||
|
||||
.. versionchanged:: 2014.7.0
|
||||
The ``cmd`` prefix was renamed to ``local`` for consistency with other
|
||||
parts of Salt. A backward-compatible alias was added for ``cmd``.
|
||||
|
||||
The Reactor runs on the master and calls functions that exist on the master. In
|
||||
the case of Runner and Wheel functions the Reactor can just call those
|
||||
functions directly since they exist on the master and are run on the master.
|
||||
|
||||
In the case of functions that exist on minions and are run on minions, the
|
||||
Reactor still needs to call a function on the master in order to send the
|
||||
necessary data to the minion so the minion can execute that function.
|
||||
|
||||
The Reactor calls functions exposed in :ref:`Salt's Python API documentation
|
||||
<client-apis>`. and thus the structure of Reactor files very transparently
|
||||
reflects the function signatures of those functions.
|
||||
|
||||
Calling Execution modules on Minions
|
||||
------------------------------------
|
||||
|
||||
The Reactor sends commands down to minions in the exact same way Salt's CLI
|
||||
interface does. It calls a function locally on the master that sends the name
|
||||
of the function as well as a list of any arguments and a dictionary of any
|
||||
keyword arguments that the minion should use to execute that function.
|
||||
|
||||
Specifically, the Reactor calls the async version of :py:meth:`this function
|
||||
<salt.client.LocalClient.cmd>`. You can see that function has 'arg' and 'kwarg'
|
||||
parameters which are both values that are sent down to the minion.
|
||||
|
||||
Executing remote commands maps to the :strong:`LocalClient` interface which is
|
||||
used by the :strong:`salt` command. This interface more specifically maps to
|
||||
the :strong:`cmd_async` method inside of the :strong:`LocalClient` class. This
|
||||
means that the arguments passed are being passed to the :strong:`cmd_async`
|
||||
method, not the remote method. A field starts with :strong:`local` to use the
|
||||
:strong:`LocalClient` subsystem. The result is, to execute a remote command,
|
||||
a reactor formula would look like this:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
clean_tmp:
|
||||
local.cmd.run:
|
||||
- tgt: '*'
|
||||
- arg:
|
||||
- rm -rf /tmp/*
|
||||
|
||||
The ``arg`` option takes a list of arguments as they would be presented on the
|
||||
command line, so the above declaration is the same as running this salt
|
||||
command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cmd.run 'rm -rf /tmp/*'
|
||||
|
||||
Use the ``tgt_type`` argument to specify a matcher:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
clean_tmp:
|
||||
local.cmd.run:
|
||||
- tgt: 'os:Ubuntu'
|
||||
- tgt_type: grain
|
||||
- arg:
|
||||
- rm -rf /tmp/*
|
||||
|
||||
|
||||
clean_tmp:
|
||||
local.cmd.run:
|
||||
- tgt: 'G@roles:hbase_master'
|
||||
- tgt_type: compound
|
||||
- arg:
|
||||
- rm -rf /tmp/*
|
||||
|
||||
.. note::
|
||||
The ``tgt_type`` argument was named ``expr_form`` in releases prior to
|
||||
2017.7.0 (2016.11.x and earlier).
|
||||
|
||||
Any other parameters in the :py:meth:`LocalClient().cmd()
|
||||
<salt.client.LocalClient.cmd>` method can be specified as well.
|
||||
|
||||
Executing Reactors from the Minion
|
||||
----------------------------------
|
||||
|
||||
The minion can be setup to use the Reactor via a reactor engine. This just
|
||||
sets up and listens to the minions event bus, instead of to the masters.
|
||||
|
||||
The biggest difference is that you have to use the caller method on the
|
||||
Reactor, which is the equivalent of salt-call, to run your commands.
|
||||
|
||||
:mod:`Reactor Engine setup <salt.engines.reactor>`
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
clean_tmp:
|
||||
caller.cmd.run:
|
||||
- arg:
|
||||
- rm -rf /tmp/*
|
||||
|
||||
.. note:: Masterless Minions use this Reactor
|
||||
|
||||
This is the only way to run the Reactor if you use masterless minions.
|
||||
|
||||
Calling Runner modules and Wheel modules
|
||||
----------------------------------------
|
||||
|
||||
Calling Runner modules and Wheel modules from the Reactor uses a more direct
|
||||
syntax since the function is being executed locally instead of sending a
|
||||
command to a remote system to be executed there. There are no 'arg' or 'kwarg'
|
||||
parameters (unless the Runner function or Wheel function accepts a parameter
|
||||
with either of those names.)
|
||||
|
||||
For example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
clear_the_grains_cache_for_all_minions:
|
||||
runner.cache.clear_grains
|
||||
|
||||
If the :py:func:`the runner takes arguments <salt.runners.cloud.profile>` then
|
||||
they must be specified as keyword arguments.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
spin_up_more_web_machines:
|
||||
runner.cloud.profile:
|
||||
- prof: centos_6
|
||||
- instances:
|
||||
- web11 # These VM names would be generated via Jinja in a
|
||||
- web12 # real-world example.
|
||||
|
||||
To determine the proper names for the arguments, check the documentation
|
||||
or source code for the runner function you wish to call.
|
||||
|
||||
Passing event data to Minions or Orchestrate as Pillar
|
||||
------------------------------------------------------
|
||||
|
||||
An interesting trick to pass data from the Reactor script to
|
||||
An interesting trick to pass data from the Reactor SLS file to
|
||||
:py:func:`state.apply <salt.modules.state.apply_>` is to pass it as inline
|
||||
Pillar data since both functions take a keyword argument named ``pillar``.
|
||||
|
||||
@ -484,10 +567,9 @@ from the event to the state file via inline Pillar.
|
||||
add_new_minion_to_pool:
|
||||
local.state.apply:
|
||||
- tgt: 'haproxy*'
|
||||
- arg:
|
||||
- haproxy.refresh_pool
|
||||
- kwarg:
|
||||
pillar:
|
||||
- args:
|
||||
- mods: haproxy.refresh_pool
|
||||
- pillar:
|
||||
new_minion: {{ data['id'] }}
|
||||
{% endif %}
|
||||
|
||||
@ -503,17 +585,16 @@ This works with Orchestrate files as well:
|
||||
|
||||
call_some_orchestrate_file:
|
||||
runner.state.orchestrate:
|
||||
- mods: _orch.some_orchestrate_file
|
||||
- pillar:
|
||||
stuff: things
|
||||
- args:
|
||||
- mods: orchestrate.some_orchestrate_file
|
||||
- pillar:
|
||||
stuff: things
|
||||
|
||||
Which is equivalent to the following command at the CLI:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-run state.orchestrate _orch.some_orchestrate_file pillar='{stuff: things}'
|
||||
|
||||
This expects to find a file at /srv/salt/_orch/some_orchestrate_file.sls.
|
||||
salt-run state.orchestrate orchestrate.some_orchestrate_file pillar='{stuff: things}'
|
||||
|
||||
Finally, that data is available in the state file using the normal Pillar
|
||||
lookup syntax. The following example is grabbing web server names and IP
|
||||
@ -564,7 +645,7 @@ includes the minion id, which we can use for matching.
|
||||
- 'salt/minion/ink*/start':
|
||||
- /srv/reactor/auth-complete.sls
|
||||
|
||||
In this sls file, we say that if the key was rejected we will delete the key on
|
||||
In this SLS file, we say that if the key was rejected we will delete the key on
|
||||
the master and then also tell the master to ssh in to the minion and tell it to
|
||||
restart the minion, since a minion process will die if the key is rejected.
|
||||
|
||||
@ -580,19 +661,21 @@ authentication every ten seconds by default.
|
||||
{% if not data['result'] and data['id'].startswith('ink') %}
|
||||
minion_remove:
|
||||
wheel.key.delete:
|
||||
- match: {{ data['id'] }}
|
||||
- args:
|
||||
- match: {{ data['id'] }}
|
||||
minion_rejoin:
|
||||
local.cmd.run:
|
||||
- tgt: salt-master.domain.tld
|
||||
- arg:
|
||||
- ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "{{ data['id'] }}" 'sleep 10 && /etc/init.d/salt-minion restart'
|
||||
- args:
|
||||
- cmd: ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "{{ data['id'] }}" 'sleep 10 && /etc/init.d/salt-minion restart'
|
||||
{% endif %}
|
||||
|
||||
{# Ink server is sending new key -- accept this key #}
|
||||
{% if 'act' in data and data['act'] == 'pend' and data['id'].startswith('ink') %}
|
||||
minion_add:
|
||||
wheel.key.accept:
|
||||
- match: {{ data['id'] }}
|
||||
- args:
|
||||
- match: {{ data['id'] }}
|
||||
{% endif %}
|
||||
|
||||
No if statements are needed here because we already limited this action to just
|
||||
|
1731
doc/topics/releases/2016.11.8.rst
Normal file
1731
doc/topics/releases/2016.11.8.rst
Normal file
File diff suppressed because it is too large
Load Diff
6
doc/topics/releases/2016.11.9.rst
Normal file
6
doc/topics/releases/2016.11.9.rst
Normal file
@ -0,0 +1,6 @@
|
||||
============================
|
||||
Salt 2016.11.9 Release Notes
|
||||
============================
|
||||
|
||||
Version 2016.11.9 is a bugfix release for :ref:`2016.11.0 <release-2016-11-0>`.]
|
||||
|
@ -7,23 +7,9 @@ Version 2016.3.8 is a bugfix release for :ref:`2016.3.0 <release-2016-3-0>`.
|
||||
Changes for v2016.3.7..v2016.3.8
|
||||
--------------------------------
|
||||
|
||||
New master configuration option `allow_minion_key_revoke`, defaults to True. This option
|
||||
controls whether a minion can request that the master revoke its key. When True, a minion
|
||||
can request a key revocation and the master will comply. If it is False, the key will not
|
||||
be revoked by the msater.
|
||||
Security Fix
|
||||
============
|
||||
|
||||
New master configuration option `require_minion_sign_messages`
|
||||
This requires that minions cryptographically sign the messages they
|
||||
publish to the master. If minions are not signing, then log this information
|
||||
at loglevel 'INFO' and drop the message without acting on it.
|
||||
CVE-2017-14695 Directory traversal vulnerability in minion id validation in SaltStack. Allows remote minions with incorrect credentials to authenticate to a master via a crafted minion ID. Credit for discovering the security flaw goes to: Julian Brost (julian@0x4a42.net)
|
||||
|
||||
New master configuration option `drop_messages_signature_fail`
|
||||
Drop messages from minions when their signatures do not validate.
|
||||
Note that when this option is False but `require_minion_sign_messages` is True
|
||||
minions MUST sign their messages but the validity of their signatures
|
||||
is ignored.
|
||||
|
||||
New minion configuration option `minion_sign_messages`
|
||||
Causes the minion to cryptographically sign the payload of messages it places
|
||||
on the event bus for the master. The payloads are signed with the minion's
|
||||
private key so the master can verify the signature with its public key.
|
||||
CVE-2017-14696 Remote Denial of Service with a specially crafted authentication request. Credit for discovering the security flaw goes to: Julian Brost (julian@0x4a42.net)
|
||||
|
29
doc/topics/releases/2016.3.9.rst
Normal file
29
doc/topics/releases/2016.3.9.rst
Normal file
@ -0,0 +1,29 @@
|
||||
===========================
|
||||
Salt 2016.3.9 Release Notes
|
||||
===========================
|
||||
|
||||
Version 2016.3.9 is a bugfix release for :ref:`2016.3.0 <release-2016-3-0>`.
|
||||
|
||||
Changes for v2016.3.7..v2016.3.9
|
||||
--------------------------------
|
||||
|
||||
New master configuration option `allow_minion_key_revoke`, defaults to True. This option
|
||||
controls whether a minion can request that the master revoke its key. When True, a minion
|
||||
can request a key revocation and the master will comply. If it is False, the key will not
|
||||
be revoked by the msater.
|
||||
|
||||
New master configuration option `require_minion_sign_messages`
|
||||
This requires that minions cryptographically sign the messages they
|
||||
publish to the master. If minions are not signing, then log this information
|
||||
at loglevel 'INFO' and drop the message without acting on it.
|
||||
|
||||
New master configuration option `drop_messages_signature_fail`
|
||||
Drop messages from minions when their signatures do not validate.
|
||||
Note that when this option is False but `require_minion_sign_messages` is True
|
||||
minions MUST sign their messages but the validity of their signatures
|
||||
is ignored.
|
||||
|
||||
New minion configuration option `minion_sign_messages`
|
||||
Causes the minion to cryptographically sign the payload of messages it places
|
||||
on the event bus for the master. The payloads are signed with the minion's
|
||||
private key so the master can verify the signature with its public key.
|
@ -21,6 +21,9 @@ Salt will no longer support Python 2.6. We will provide python2.7 packages on ou
|
||||
|
||||
.. _repo: https://repo.saltstack.com/
|
||||
|
||||
As this will impact the installation of additional dependencies for salt modules please use pip packages if there is not a package available in a repository. You will need to install the python27-pip package to get access to the correct pip27 executable: ``yum install python27-pip``
|
||||
|
||||
|
||||
============
|
||||
Known Issues
|
||||
============
|
||||
|
3189
doc/topics/releases/2017.7.2.rst
Normal file
3189
doc/topics/releases/2017.7.2.rst
Normal file
File diff suppressed because it is too large
Load Diff
6
doc/topics/releases/2017.7.3.rst
Normal file
6
doc/topics/releases/2017.7.3.rst
Normal file
@ -0,0 +1,6 @@
|
||||
============================
|
||||
Salt 2017.7.3 Release Notes
|
||||
============================
|
||||
|
||||
Version 2017.7.3 is a bugfix release for :ref:`2017.7.0 <release-2017-7-0>`.
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -141,7 +141,7 @@ If the master seems to be unresponsive, a SIGUSR1 can be passed to the
|
||||
salt-master threads to display what piece of code is executing. This debug
|
||||
information can be invaluable in tracking down bugs.
|
||||
|
||||
To pass a SIGUSR1 to the master, first make sure the minion is running in the
|
||||
To pass a SIGUSR1 to the master, first make sure the master is running in the
|
||||
foreground. Stop the service if it is running as a daemon, and start it in the
|
||||
foreground like so:
|
||||
|
||||
|
@ -27,7 +27,7 @@ Installing Dependencies
|
||||
=======================
|
||||
|
||||
Both pygit2_ and GitPython_ are supported Python interfaces to git. If
|
||||
compatible versions of both are installed, pygit2_ will preferred. In these
|
||||
compatible versions of both are installed, pygit2_ will be preferred. In these
|
||||
cases, GitPython_ can be forced using the :conf_master:`gitfs_provider`
|
||||
parameter in the master config file.
|
||||
|
||||
|
@ -33,3 +33,5 @@ Tutorials Index
|
||||
* :ref:`SaltStack Walk-through <tutorial-salt-walk-through>`
|
||||
* :ref:`Writing Salt Tests <tutorial-salt-testing>`
|
||||
* :ref:`Multi-cloud orchestration with Apache Libcloud <tutorial-libcloud>`
|
||||
* :ref:`Running Salt States and Commands in Docker Containers <docker-sls>`
|
||||
* :ref:`Preseed Minion with Accepted Key <tutorial-preseed-key>`
|
||||
|
@ -23,7 +23,7 @@ Supported Operating Systems
|
||||
.. note::
|
||||
|
||||
In the event you do not see your distribution or version available please
|
||||
review the develop branch on GitHub as it main contain updates that are
|
||||
review the develop branch on GitHub as it may contain updates that are
|
||||
not present in the stable release:
|
||||
https://github.com/saltstack/salt-bootstrap/tree/develop
|
||||
|
||||
|
@ -88,7 +88,8 @@ sudo $PKGRESOURCES/build_env.sh $PYVER
|
||||
echo -n -e "\033]0;Build: Install Salt\007"
|
||||
sudo rm -rf $SRCDIR/build
|
||||
sudo rm -rf $SRCDIR/dist
|
||||
sudo $PYTHON $SRCDIR/setup.py build -e "$PYTHON -E -s" install
|
||||
sudo $PYTHON $SRCDIR/setup.py build -e "$PYTHON -E -s"
|
||||
sudo $PYTHON $SRCDIR/setup.py install
|
||||
|
||||
############################################################################
|
||||
# Build Package
|
||||
|
@ -132,7 +132,7 @@ fi
|
||||
###############################################################################
|
||||
# Remove the salt from the paths.d
|
||||
###############################################################################
|
||||
if [ ! -f "/etc/paths.d/salt" ]; then
|
||||
if [ -f "/etc/paths.d/salt" ]; then
|
||||
echo "Path: Removing salt from the path..." >> "$TEMP_DIR/preinstall.txt"
|
||||
rm "/etc/paths.d/salt"
|
||||
echo "Path: Removed Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
|
@ -67,7 +67,7 @@ _su_cmd() {
|
||||
|
||||
|
||||
_get_pid() {
|
||||
netstat $NS_NOTRIM -ap --protocol=unix 2>$ERROR_TO_DEVNULL \
|
||||
netstat -n $NS_NOTRIM -ap --protocol=unix 2>$ERROR_TO_DEVNULL \
|
||||
| sed -r -e "\|\s${SOCK_DIR}/minion_event_${MINION_ID_HASH}_pub\.ipc$|"'!d; s|/.*||; s/.*\s//;' \
|
||||
| uniq
|
||||
}
|
||||
@ -156,7 +156,7 @@ start() {
|
||||
printf "\nPROCESSES:\n" >&2
|
||||
ps wwwaxu | grep '[s]alt-minion' >&2
|
||||
printf "\nSOCKETS:\n" >&2
|
||||
netstat $NS_NOTRIM -ap --protocol=unix | grep 'salt.*minion' >&2
|
||||
netstat -n $NS_NOTRIM -ap --protocol=unix | grep 'salt.*minion' >&2
|
||||
printf "\nLOG_FILE:\n" >&2
|
||||
tail -n 20 "$LOG_FILE" >&2
|
||||
printf "\nENVIRONMENT:\n" >&2
|
||||
|
@ -35,8 +35,9 @@ _salt_get_keys(){
|
||||
}
|
||||
|
||||
_salt(){
|
||||
local _salt_cache_functions=${SALT_COMP_CACHE_FUNCTIONS:-"$HOME/.cache/salt-comp-cache_functions"}
|
||||
local _salt_cache_timeout=${SALT_COMP_CACHE_TIMEOUT:-"last hour"}
|
||||
CACHE_DIR="$HOME/.cache/salt-comp-cache_functions"
|
||||
local _salt_cache_functions=${SALT_COMP_CACHE_FUNCTIONS:=$CACHE_DIR}
|
||||
local _salt_cache_timeout=${SALT_COMP_CACHE_TIMEOUT:='last hour'}
|
||||
|
||||
if [ ! -d "$(dirname ${_salt_cache_functions})" ]; then
|
||||
mkdir -p "$(dirname ${_salt_cache_functions})"
|
||||
|
@ -11,6 +11,7 @@
|
||||
!define PRODUCT_UNINST_KEY "Software\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT_NAME}"
|
||||
!define PRODUCT_UNINST_KEY_OTHER "Software\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT_NAME_OTHER}"
|
||||
!define PRODUCT_UNINST_ROOT_KEY "HKLM"
|
||||
!define OUTFILE "Salt-Minion-${PRODUCT_VERSION}-Py${PYTHON_VERSION}-${CPUARCH}-Setup.exe"
|
||||
|
||||
# Import Libraries
|
||||
!include "MUI2.nsh"
|
||||
@ -52,6 +53,15 @@ ${StrStrAdv}
|
||||
Pop "${ResultVar}"
|
||||
!macroend
|
||||
|
||||
# Part of the Explode function for Strings
|
||||
!define Explode "!insertmacro Explode"
|
||||
!macro Explode Length Separator String
|
||||
Push `${Separator}`
|
||||
Push `${String}`
|
||||
Call Explode
|
||||
Pop `${Length}`
|
||||
!macroend
|
||||
|
||||
|
||||
###############################################################################
|
||||
# Configure Pages, Ordering, and Configuration
|
||||
@ -92,10 +102,17 @@ Var Dialog
|
||||
Var Label
|
||||
Var CheckBox_Minion_Start
|
||||
Var CheckBox_Minion_Start_Delayed
|
||||
Var ConfigMasterHost
|
||||
Var MasterHost
|
||||
Var MasterHost_State
|
||||
Var ConfigMinionName
|
||||
Var MinionName
|
||||
Var MinionName_State
|
||||
Var ExistingConfigFound
|
||||
Var UseExistingConfig
|
||||
Var UseExistingConfig_State
|
||||
Var WarningExistingConfig
|
||||
Var WarningDefaultConfig
|
||||
Var StartMinion
|
||||
Var StartMinionDelayed
|
||||
Var DeleteInstallDir
|
||||
@ -115,27 +132,105 @@ Function pageMinionConfig
|
||||
Abort
|
||||
${EndIf}
|
||||
|
||||
# Master IP or Hostname Dialog Control
|
||||
${NSD_CreateLabel} 0 0 100% 12u "Master IP or Hostname:"
|
||||
Pop $Label
|
||||
|
||||
${NSD_CreateText} 0 13u 100% 12u $MasterHost_State
|
||||
Pop $MasterHost
|
||||
|
||||
# Minion ID Dialog Control
|
||||
${NSD_CreateLabel} 0 30u 100% 12u "Minion Name:"
|
||||
Pop $Label
|
||||
|
||||
${NSD_CreateText} 0 43u 100% 12u $MinionName_State
|
||||
Pop $MinionName
|
||||
|
||||
# Use Existing Config Checkbox
|
||||
${NSD_CreateCheckBox} 0 65u 100% 12u "&Use Existing Config"
|
||||
Pop $UseExistingConfig
|
||||
${NSD_OnClick} $UseExistingConfig pageMinionConfig_OnClick
|
||||
|
||||
# Add Existing Config Warning Label
|
||||
${NSD_CreateLabel} 0 80u 100% 60u "The values above are taken from an \
|
||||
existing configuration found in `c:\salt\conf\minion`. Configuration \
|
||||
settings defined in the `minion.d` directories, if they exist, are not \
|
||||
shown here.$\r$\n\
|
||||
$\r$\n\
|
||||
Clicking `Install` will leave the existing config unchanged."
|
||||
Pop $WarningExistingConfig
|
||||
CreateFont $0 "Arial" 10 500 /ITALIC
|
||||
SendMessage $WarningExistingConfig ${WM_SETFONT} $0 1
|
||||
SetCtlColors $WarningExistingConfig 0xBB0000 transparent
|
||||
|
||||
# Add Default Config Warning Label
|
||||
${NSD_CreateLabel} 0 80u 100% 60u "Clicking `Install` will remove the \
|
||||
the existing minion config file and remove the minion.d directories. \
|
||||
The values above will be used in the new default config."
|
||||
Pop $WarningDefaultConfig
|
||||
CreateFont $0 "Arial" 10 500 /ITALIC
|
||||
SendMessage $WarningDefaultConfig ${WM_SETFONT} $0 1
|
||||
SetCtlColors $WarningDefaultConfig 0xBB0000 transparent
|
||||
|
||||
# If no existing config found, disable the checkbox and stuff
|
||||
# Set UseExistingConfig_State to 0
|
||||
${If} $ExistingConfigFound == 0
|
||||
StrCpy $UseExistingConfig_State 0
|
||||
ShowWindow $UseExistingConfig ${SW_HIDE}
|
||||
ShowWindow $WarningExistingConfig ${SW_HIDE}
|
||||
ShowWindow $WarningDefaultConfig ${SW_HIDE}
|
||||
${Endif}
|
||||
|
||||
${NSD_SetState} $UseExistingConfig $UseExistingConfig_State
|
||||
|
||||
Call pageMinionConfig_OnClick
|
||||
|
||||
nsDialogs::Show
|
||||
|
||||
FunctionEnd
|
||||
|
||||
|
||||
Function pageMinionConfig_OnClick
|
||||
|
||||
# You have to pop the top handle to keep the stack clean
|
||||
Pop $R0
|
||||
|
||||
# Assign the current checkbox state to the variable
|
||||
${NSD_GetState} $UseExistingConfig $UseExistingConfig_State
|
||||
|
||||
# Validate the checkboxes
|
||||
${If} $UseExistingConfig_State == ${BST_CHECKED}
|
||||
# Use Existing Config is checked, show warning
|
||||
ShowWindow $WarningExistingConfig ${SW_SHOW}
|
||||
EnableWindow $MasterHost 0
|
||||
EnableWindow $MinionName 0
|
||||
${NSD_SetText} $MasterHost $ConfigMasterHost
|
||||
${NSD_SetText} $MinionName $ConfigMinionName
|
||||
${If} $ExistingConfigFound == 1
|
||||
ShowWindow $WarningDefaultConfig ${SW_HIDE}
|
||||
${Endif}
|
||||
${Else}
|
||||
# Use Existing Config is not checked, hide the warning
|
||||
ShowWindow $WarningExistingConfig ${SW_HIDE}
|
||||
EnableWindow $MasterHost 1
|
||||
EnableWindow $MinionName 1
|
||||
${NSD_SetText} $MasterHost $MasterHost_State
|
||||
${NSD_SetText} $MinionName $MinionName_State
|
||||
${If} $ExistingConfigFound == 1
|
||||
ShowWindow $WarningDefaultConfig ${SW_SHOW}
|
||||
${Endif}
|
||||
${EndIf}
|
||||
|
||||
FunctionEnd
|
||||
|
||||
|
||||
Function pageMinionConfig_Leave
|
||||
|
||||
${NSD_GetText} $MasterHost $MasterHost_State
|
||||
${NSD_GetText} $MinionName $MinionName_State
|
||||
${NSD_GetState} $UseExistingConfig $UseExistingConfig_State
|
||||
|
||||
Call RemoveExistingConfig
|
||||
|
||||
FunctionEnd
|
||||
|
||||
@ -194,7 +289,7 @@ FunctionEnd
|
||||
!else
|
||||
Name "${PRODUCT_NAME} ${PRODUCT_VERSION}"
|
||||
!endif
|
||||
OutFile "Salt-Minion-${PRODUCT_VERSION}-Py${PYTHON_VERSION}-${CPUARCH}-Setup.exe"
|
||||
OutFile "${OutFile}"
|
||||
InstallDir "c:\salt"
|
||||
InstallDirRegKey HKLM "${PRODUCT_DIR_REGKEY}" ""
|
||||
ShowInstDetails show
|
||||
@ -311,8 +406,6 @@ SectionEnd
|
||||
|
||||
Function .onInit
|
||||
|
||||
Call getMinionConfig
|
||||
|
||||
Call parseCommandLineSwitches
|
||||
|
||||
# Check for existing installation
|
||||
@ -364,6 +457,23 @@ Function .onInit
|
||||
|
||||
skipUninstall:
|
||||
|
||||
Call getMinionConfig
|
||||
|
||||
IfSilent 0 +2
|
||||
Call RemoveExistingConfig
|
||||
|
||||
FunctionEnd
|
||||
|
||||
|
||||
Function RemoveExistingConfig
|
||||
|
||||
${If} $ExistingConfigFound == 1
|
||||
${AndIf} $UseExistingConfig_State == 0
|
||||
# Wipe out the Existing Config
|
||||
Delete "$INSTDIR\conf\minion"
|
||||
RMDir /r "$INSTDIR\conf\minion.d"
|
||||
${EndIf}
|
||||
|
||||
FunctionEnd
|
||||
|
||||
|
||||
@ -407,7 +517,9 @@ Section -Post
|
||||
nsExec::Exec "nssm.exe set salt-minion AppStopMethodConsole 24000"
|
||||
nsExec::Exec "nssm.exe set salt-minion AppStopMethodWindow 2000"
|
||||
|
||||
Call updateMinionConfig
|
||||
${If} $UseExistingConfig_State == 0
|
||||
Call updateMinionConfig
|
||||
${EndIf}
|
||||
|
||||
Push "C:\salt"
|
||||
Call AddToPath
|
||||
@ -534,18 +646,32 @@ FunctionEnd
|
||||
# Helper Functions
|
||||
###############################################################################
|
||||
Function MsiQueryProductState
|
||||
# Used for detecting VCRedist Installation
|
||||
!define INSTALLSTATE_DEFAULT "5"
|
||||
|
||||
!define INSTALLSTATE_DEFAULT "5"
|
||||
|
||||
Pop $R0
|
||||
StrCpy $NeedVcRedist "False"
|
||||
System::Call "msi::MsiQueryProductStateA(t '$R0') i.r0"
|
||||
StrCmp $0 ${INSTALLSTATE_DEFAULT} +2 0
|
||||
StrCpy $NeedVcRedist "True"
|
||||
Pop $R0
|
||||
StrCpy $NeedVcRedist "False"
|
||||
System::Call "msi::MsiQueryProductStateA(t '$R0') i.r0"
|
||||
StrCmp $0 ${INSTALLSTATE_DEFAULT} +2 0
|
||||
StrCpy $NeedVcRedist "True"
|
||||
|
||||
FunctionEnd
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# Trim Function
|
||||
# - Trim whitespace from the beginning and end of a string
|
||||
# - Trims spaces, \r, \n, \t
|
||||
#
|
||||
# Usage:
|
||||
# Push " some string " ; String to Trim
|
||||
# Call Trim
|
||||
# Pop $0 ; Trimmed String: "some string"
|
||||
#
|
||||
# or
|
||||
#
|
||||
# ${Trim} $0 $1 ; Trimmed String, String to Trim
|
||||
#------------------------------------------------------------------------------
|
||||
Function Trim
|
||||
|
||||
Exch $R1 # Original string
|
||||
@ -580,6 +706,95 @@ Function Trim
|
||||
FunctionEnd
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# Explode Function
|
||||
# - Splits a string based off the passed separator
|
||||
# - Each item in the string is pushed to the stack
|
||||
# - The last item pushed to the stack is the length of the array
|
||||
#
|
||||
# Usage:
|
||||
# Push "," ; Separator
|
||||
# Push "string,to,separate" ; String to explode
|
||||
# Call Explode
|
||||
# Pop $0 ; Number of items in the array
|
||||
#
|
||||
# or
|
||||
#
|
||||
# ${Explode} $0 $1 $2 ; Length, Separator, String
|
||||
#------------------------------------------------------------------------------
|
||||
Function Explode
|
||||
# Initialize variables
|
||||
Var /GLOBAL explString
|
||||
Var /GLOBAL explSeparator
|
||||
Var /GLOBAL explStrLen
|
||||
Var /GLOBAL explSepLen
|
||||
Var /GLOBAL explOffset
|
||||
Var /GLOBAL explTmp
|
||||
Var /GLOBAL explTmp2
|
||||
Var /GLOBAL explTmp3
|
||||
Var /GLOBAL explArrCount
|
||||
|
||||
# Get input from user
|
||||
Pop $explString
|
||||
Pop $explSeparator
|
||||
|
||||
# Calculates initial values
|
||||
StrLen $explStrLen $explString
|
||||
StrLen $explSepLen $explSeparator
|
||||
StrCpy $explArrCount 1
|
||||
|
||||
${If} $explStrLen <= 1 # If we got a single character
|
||||
${OrIf} $explSepLen > $explStrLen # or separator is larger than the string,
|
||||
Push $explString # then we return initial string with no change
|
||||
Push 1 # and set array's length to 1
|
||||
Return
|
||||
${EndIf}
|
||||
|
||||
# Set offset to the last symbol of the string
|
||||
StrCpy $explOffset $explStrLen
|
||||
IntOp $explOffset $explOffset - 1
|
||||
|
||||
# Clear temp string to exclude the possibility of appearance of occasional data
|
||||
StrCpy $explTmp ""
|
||||
StrCpy $explTmp2 ""
|
||||
StrCpy $explTmp3 ""
|
||||
|
||||
# Loop until the offset becomes negative
|
||||
${Do}
|
||||
# If offset becomes negative, it is time to leave the function
|
||||
${IfThen} $explOffset == -1 ${|} ${ExitDo} ${|}
|
||||
|
||||
# Remove everything before and after the searched part ("TempStr")
|
||||
StrCpy $explTmp $explString $explSepLen $explOffset
|
||||
|
||||
${If} $explTmp == $explSeparator
|
||||
# Calculating offset to start copy from
|
||||
IntOp $explTmp2 $explOffset + $explSepLen # Offset equals to the current offset plus length of separator
|
||||
StrCpy $explTmp3 $explString "" $explTmp2
|
||||
|
||||
Push $explTmp3 # Throwing array item to the stack
|
||||
IntOp $explArrCount $explArrCount + 1 # Increasing array's counter
|
||||
|
||||
StrCpy $explString $explString $explOffset 0 # Cutting all characters beginning with the separator entry
|
||||
StrLen $explStrLen $explString
|
||||
${EndIf}
|
||||
|
||||
${If} $explOffset = 0 # If the beginning of the line met and there is no separator,
|
||||
# copying the rest of the string
|
||||
${If} $explSeparator == "" # Fix for the empty separator
|
||||
IntOp $explArrCount $explArrCount - 1
|
||||
${Else}
|
||||
Push $explString
|
||||
${EndIf}
|
||||
${EndIf}
|
||||
|
||||
IntOp $explOffset $explOffset - 1
|
||||
${Loop}
|
||||
|
||||
Push $explArrCount
|
||||
FunctionEnd
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# StrStr Function
|
||||
# - find substring in a string
|
||||
@ -816,6 +1031,9 @@ FunctionEnd
|
||||
###############################################################################
|
||||
Function getMinionConfig
|
||||
|
||||
# Set Config Found Default Value
|
||||
StrCpy $ExistingConfigFound 0
|
||||
|
||||
confFind:
|
||||
IfFileExists "$INSTDIR\conf\minion" confFound confNotFound
|
||||
|
||||
@ -828,24 +1046,42 @@ Function getMinionConfig
|
||||
${EndIf}
|
||||
|
||||
confFound:
|
||||
StrCpy $ExistingConfigFound 1
|
||||
FileOpen $0 "$INSTDIR\conf\minion" r
|
||||
|
||||
ClearErrors
|
||||
confLoop:
|
||||
FileRead $0 $1
|
||||
IfErrors EndOfFile
|
||||
${StrLoc} $2 $1 "master:" ">"
|
||||
${If} $2 == 0
|
||||
${StrStrAdv} $2 $1 "master: " ">" ">" "0" "0" "0"
|
||||
${Trim} $2 $2
|
||||
StrCpy $MasterHost_State $2
|
||||
ClearErrors # Clear Errors
|
||||
FileRead $0 $1 # Read the next line
|
||||
IfErrors EndOfFile # Error is probably EOF
|
||||
${StrLoc} $2 $1 "master:" ">" # Find `master:` starting at the beginning
|
||||
${If} $2 == 0 # If it found it in the first position, then it is defined
|
||||
${StrStrAdv} $2 $1 "master: " ">" ">" "0" "0" "0" # Read everything after `master: `
|
||||
${Trim} $2 $2 # Trim white space
|
||||
${If} $2 == "" # If it's empty, it's probably a list
|
||||
masterLoop:
|
||||
ClearErrors # Clear Errors
|
||||
FileRead $0 $1 # Read the next line
|
||||
IfErrors EndOfFile # Error is probably EOF
|
||||
${StrStrAdv} $2 $1 "- " ">" ">" "0" "0" "0" # Read everything after `- `
|
||||
${Trim} $2 $2 # Trim white space
|
||||
${IfNot} $2 == "" # If it's not empty, we found something
|
||||
${If} $ConfigMasterHost == "" # Is the default `salt` there
|
||||
StrCpy $ConfigMasterHost $2 # If so, make the first item the new entry
|
||||
${Else}
|
||||
StrCpy $ConfigMasterHost "$ConfigMasterHost,$2" # Append the new master, comma separated
|
||||
${EndIf}
|
||||
Goto masterLoop # Check the next one
|
||||
${EndIf}
|
||||
${Else}
|
||||
StrCpy $ConfigMasterHost $2 # A single master entry
|
||||
${EndIf}
|
||||
${EndIf}
|
||||
|
||||
${StrLoc} $2 $1 "id:" ">"
|
||||
${If} $2 == 0
|
||||
${StrStrAdv} $2 $1 "id: " ">" ">" "0" "0" "0"
|
||||
${Trim} $2 $2
|
||||
StrCpy $MinionName_State $2
|
||||
StrCpy $ConfigMinionName $2
|
||||
${EndIf}
|
||||
|
||||
Goto confLoop
|
||||
@ -855,6 +1091,14 @@ Function getMinionConfig
|
||||
|
||||
confReallyNotFound:
|
||||
|
||||
# Set Default Config Values if not found
|
||||
${If} $ConfigMasterHost == ""
|
||||
StrCpy $ConfigMasterHost "salt"
|
||||
${EndIf}
|
||||
${If} $ConfigMinionName == ""
|
||||
StrCpy $ConfigMinionName "hostname"
|
||||
${EndIf}
|
||||
|
||||
FunctionEnd
|
||||
|
||||
|
||||
@ -874,7 +1118,22 @@ Function updateMinionConfig
|
||||
${StrLoc} $3 $2 "master:" ">" # where is 'master:' in this line
|
||||
${If} $3 == 0 # is it in the first...
|
||||
${OrIf} $3 == 1 # or second position (account for comments)
|
||||
StrCpy $2 "master: $MasterHost_State$\r$\n" # write the master
|
||||
|
||||
${Explode} $9 "," $MasterHost_state # Split the hostname on commas, $9 is the number of items found
|
||||
${If} $9 == 1 # 1 means only a single master was passed
|
||||
StrCpy $2 "master: $MasterHost_State$\r$\n" # write the master
|
||||
${Else} # Make a multi-master entry
|
||||
StrCpy $2 "master:" # Make the first line "master:"
|
||||
|
||||
loop_explode: # Start a loop to go through the list in the config
|
||||
pop $8 # Pop the next item off the stack
|
||||
${Trim} $8 $8 # Trim any whitespace
|
||||
StrCpy $2 "$2$\r$\n - $8" # Add it to the master variable ($2)
|
||||
IntOp $9 $9 - 1 # Decrement the list count
|
||||
${If} $9 >= 1 # If it's not 0
|
||||
Goto loop_explode # Do it again
|
||||
${EndIf} # close if statement
|
||||
${EndIf} # close if statement
|
||||
${EndIf} # close if statement
|
||||
${EndIf} # close if statement
|
||||
|
||||
@ -905,6 +1164,67 @@ Function parseCommandLineSwitches
|
||||
# Load the parameters
|
||||
${GetParameters} $R0
|
||||
|
||||
# Display Help
|
||||
ClearErrors
|
||||
${GetOptions} $R0 "/?" $R1
|
||||
IfErrors display_help_not_found
|
||||
|
||||
System::Call 'kernel32::GetStdHandle(i -11)i.r0'
|
||||
System::Call 'kernel32::AttachConsole(i -1)i.r1'
|
||||
${If} $0 = 0
|
||||
${OrIf} $1 = 0
|
||||
System::Call 'kernel32::AllocConsole()'
|
||||
System::Call 'kernel32::GetStdHandle(i -11)i.r0'
|
||||
${EndIf}
|
||||
FileWrite $0 "$\n"
|
||||
FileWrite $0 "$\n"
|
||||
FileWrite $0 "Help for Salt Minion installation$\n"
|
||||
FileWrite $0 "===============================================================================$\n"
|
||||
FileWrite $0 "$\n"
|
||||
FileWrite $0 "/minion-name=$\t$\tA string value to set the minion name. Default is$\n"
|
||||
FileWrite $0 "$\t$\t$\t'hostname'. Setting the minion name will replace$\n"
|
||||
FileWrite $0 "$\t$\t$\texisting config with a default config. Cannot be$\n"
|
||||
FileWrite $0 "$\t$\t$\tused in conjunction with /use-existing-config=1$\n"
|
||||
FileWrite $0 "$\n"
|
||||
FileWrite $0 "/master=$\t$\tA string value to set the IP address or hostname of$\n"
|
||||
FileWrite $0 "$\t$\t$\tthe master. Default value is 'salt'. You may pass a$\n"
|
||||
FileWrite $0 "$\t$\t$\tsingle master, or a comma separated list of masters.$\n"
|
||||
FileWrite $0 "$\t$\t$\tSetting the master will replace existing config with$\n"
|
||||
FileWrite $0 "$\t$\t$\ta default config. Cannot be used in conjunction with$\n"
|
||||
FileWrite $0 "$\t$\t$\t/use-existing-config=1$\n"
|
||||
FileWrite $0 "$\n"
|
||||
FileWrite $0 "/start-minion=$\t$\t1 will start the service, 0 will not. Default is 1$\n"
|
||||
FileWrite $0 "$\n"
|
||||
FileWrite $0 "/start-minion-delayed$\tSet the minion start type to 'Automatic (Delayed Start)'$\n"
|
||||
FileWrite $0 "$\n"
|
||||
FileWrite $0 "/use-existing-config=$\t1 will use the existing config if present, 0 will$\n"
|
||||
FileWrite $0 "$\t$\t$\treplace existing config with a default config. Default$\n"
|
||||
FileWrite $0 "$\t$\t$\tis 1. If this is set to 1, values passed in$\n"
|
||||
FileWrite $0 "$\t$\t$\t/minion-name and /master will be ignored$\n"
|
||||
FileWrite $0 "$\n"
|
||||
FileWrite $0 "/S$\t$\t$\tInstall Salt silently$\n"
|
||||
FileWrite $0 "$\n"
|
||||
FileWrite $0 "/?$\t$\t$\tDisplay this help screen$\n"
|
||||
FileWrite $0 "$\n"
|
||||
FileWrite $0 "-------------------------------------------------------------------------------$\n"
|
||||
FileWrite $0 "$\n"
|
||||
FileWrite $0 "Examples:$\n"
|
||||
FileWrite $0 "$\n"
|
||||
FileWrite $0 "${OutFile} /S$\n"
|
||||
FileWrite $0 "$\n"
|
||||
FileWrite $0 "${OutFile} /S /minion-name=myminion /master=master.mydomain.com /start-minion-delayed$\n"
|
||||
FileWrite $0 "$\n"
|
||||
FileWrite $0 "===============================================================================$\n"
|
||||
FileWrite $0 "Press Enter to continue..."
|
||||
System::Free $0
|
||||
System::Free $1
|
||||
System::Call 'kernel32::FreeConsole()'
|
||||
Abort
|
||||
display_help_not_found:
|
||||
|
||||
# Set default value for Use Existing Config
|
||||
StrCpy $UseExistingConfig_State 1
|
||||
|
||||
# Check for start-minion switches
|
||||
# /start-service is to be deprecated, so we must check for both
|
||||
${GetOptions} $R0 "/start-service=" $R1
|
||||
@ -930,19 +1250,31 @@ Function parseCommandLineSwitches
|
||||
start_minion_delayed_not_found:
|
||||
|
||||
# Minion Config: Master IP/Name
|
||||
# If setting master, we don't want to use existing config
|
||||
${GetOptions} $R0 "/master=" $R1
|
||||
${IfNot} $R1 == ""
|
||||
StrCpy $MasterHost_State $R1
|
||||
StrCpy $UseExistingConfig_State 0
|
||||
${ElseIf} $MasterHost_State == ""
|
||||
StrCpy $MasterHost_State "salt"
|
||||
${EndIf}
|
||||
|
||||
# Minion Config: Minion ID
|
||||
# If setting minion id, we don't want to use existing config
|
||||
${GetOptions} $R0 "/minion-name=" $R1
|
||||
${IfNot} $R1 == ""
|
||||
StrCpy $MinionName_State $R1
|
||||
StrCpy $UseExistingConfig_State 0
|
||||
${ElseIf} $MinionName_State == ""
|
||||
StrCpy $MinionName_State "hostname"
|
||||
${EndIf}
|
||||
|
||||
# Use Existing Config
|
||||
# Overrides above settings with user passed settings
|
||||
${GetOptions} $R0 "/use-existing-config=" $R1
|
||||
${IfNot} $R1 == ""
|
||||
# Use Existing Config was passed something, set it
|
||||
StrCpy $UseExistingConfig_State $R1
|
||||
${EndIf}
|
||||
|
||||
FunctionEnd
|
||||
|
@ -12,7 +12,7 @@ found by reading the salt documentation:
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
import salt.utils.stringutils
|
||||
|
||||
# Import 3rd-party libs
|
||||
from salt.ext import six
|
||||
@ -31,13 +31,13 @@ class PublisherACL(object):
|
||||
Takes a username as a string and returns a boolean. True indicates that
|
||||
the provided user has been blacklisted
|
||||
'''
|
||||
return not salt.utils.check_whitelist_blacklist(user, blacklist=self.blacklist.get('users', []))
|
||||
return not salt.utils.stringutils.check_whitelist_blacklist(user, blacklist=self.blacklist.get('users', []))
|
||||
|
||||
def cmd_is_blacklisted(self, cmd):
|
||||
# If this is a regular command, it is a single function
|
||||
if isinstance(cmd, six.string_types):
|
||||
cmd = [cmd]
|
||||
for fun in cmd:
|
||||
if not salt.utils.check_whitelist_blacklist(fun, blacklist=self.blacklist.get('modules', [])):
|
||||
if not salt.utils.stringutils.check_whitelist_blacklist(fun, blacklist=self.blacklist.get('modules', [])):
|
||||
return True
|
||||
return False
|
||||
|
@ -28,11 +28,13 @@ from salt.ext.six.moves import input
|
||||
import salt.config
|
||||
import salt.loader
|
||||
import salt.transport.client
|
||||
import salt.utils
|
||||
import salt.utils.args
|
||||
import salt.utils.dictupdate
|
||||
import salt.utils.files
|
||||
import salt.utils.minions
|
||||
import salt.utils.user
|
||||
import salt.utils.versions
|
||||
import salt.utils.zeromq
|
||||
import salt.payload
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
@ -87,9 +89,10 @@ class LoadAuth(object):
|
||||
fstr = '{0}.auth'.format(load['eauth'])
|
||||
if fstr not in self.auth:
|
||||
return False
|
||||
fcall = salt.utils.format_call(self.auth[fstr],
|
||||
load,
|
||||
expected_extra_kws=AUTH_INTERNAL_KEYWORDS)
|
||||
fcall = salt.utils.args.format_call(
|
||||
self.auth[fstr],
|
||||
load,
|
||||
expected_extra_kws=AUTH_INTERNAL_KEYWORDS)
|
||||
try:
|
||||
if 'kwargs' in fcall:
|
||||
return self.auth[fstr](*fcall['args'], **fcall['kwargs'])
|
||||
@ -133,9 +136,10 @@ class LoadAuth(object):
|
||||
fstr = '{0}.acl'.format(mod)
|
||||
if fstr not in self.auth:
|
||||
return None
|
||||
fcall = salt.utils.format_call(self.auth[fstr],
|
||||
load,
|
||||
expected_extra_kws=AUTH_INTERNAL_KEYWORDS)
|
||||
fcall = salt.utils.args.format_call(
|
||||
self.auth[fstr],
|
||||
load,
|
||||
expected_extra_kws=AUTH_INTERNAL_KEYWORDS)
|
||||
try:
|
||||
return self.auth[fstr](*fcall['args'], **fcall['kwargs'])
|
||||
except Exception as e:
|
||||
@ -168,9 +172,10 @@ class LoadAuth(object):
|
||||
fstr = '{0}.groups'.format(load['eauth'])
|
||||
if fstr not in self.auth:
|
||||
return False
|
||||
fcall = salt.utils.format_call(self.auth[fstr],
|
||||
load,
|
||||
expected_extra_kws=AUTH_INTERNAL_KEYWORDS)
|
||||
fcall = salt.utils.args.format_call(
|
||||
self.auth[fstr],
|
||||
load,
|
||||
expected_extra_kws=AUTH_INTERNAL_KEYWORDS)
|
||||
try:
|
||||
return self.auth[fstr](*fcall['args'], **fcall['kwargs'])
|
||||
except IndexError:
|
||||
@ -333,7 +338,7 @@ class LoadAuth(object):
|
||||
log.warning(error_msg)
|
||||
return False
|
||||
else:
|
||||
if auth_key != key[salt.utils.get_user()]:
|
||||
if auth_key != key[salt.utils.user.get_user()]:
|
||||
log.warning(error_msg)
|
||||
return False
|
||||
return True
|
||||
@ -369,46 +374,13 @@ class LoadAuth(object):
|
||||
eauth_config = self.opts['external_auth'][eauth]
|
||||
if not groups:
|
||||
groups = []
|
||||
group_perm_keys = [item for item in eauth_config if item.endswith('%')] # The configured auth groups
|
||||
|
||||
# First we need to know if the user is allowed to proceed via any of their group memberships.
|
||||
group_auth_match = False
|
||||
for group_config in group_perm_keys:
|
||||
if group_config.rstrip('%') in groups:
|
||||
group_auth_match = True
|
||||
break
|
||||
# If a group_auth_match is set it means only that we have a
|
||||
# user which matches at least one or more of the groups defined
|
||||
# in the configuration file.
|
||||
|
||||
external_auth_in_db = False
|
||||
for entry in eauth_config:
|
||||
if entry.startswith('^'):
|
||||
external_auth_in_db = True
|
||||
break
|
||||
|
||||
# If neither a catchall, a named membership or a group
|
||||
# membership is found, there is no need to continue. Simply
|
||||
# deny the user access.
|
||||
if not ((name in eauth_config) |
|
||||
('*' in eauth_config) |
|
||||
group_auth_match | external_auth_in_db):
|
||||
# Auth successful, but no matching user found in config
|
||||
log.warning('Authorization failure occurred.')
|
||||
return None
|
||||
|
||||
# We now have an authenticated session and it is time to determine
|
||||
# what the user has access to.
|
||||
auth_list = []
|
||||
if name in eauth_config:
|
||||
auth_list = eauth_config[name]
|
||||
elif '*' in eauth_config:
|
||||
auth_list = eauth_config['*']
|
||||
if group_auth_match:
|
||||
auth_list = self.ckminions.fill_auth_list_from_groups(
|
||||
eauth_config,
|
||||
groups,
|
||||
auth_list)
|
||||
auth_list = self.ckminions.fill_auth_list(
|
||||
eauth_config,
|
||||
name,
|
||||
groups)
|
||||
|
||||
auth_list = self.__process_acl(load, auth_list)
|
||||
|
||||
@ -685,7 +657,7 @@ class Resolver(object):
|
||||
|
||||
def _send_token_request(self, load):
|
||||
if self.opts['transport'] in ('zeromq', 'tcp'):
|
||||
master_uri = 'tcp://' + salt.utils.ip_bracket(self.opts['interface']) + \
|
||||
master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \
|
||||
':' + str(self.opts['ret_port'])
|
||||
channel = salt.transport.client.ReqChannel.factory(self.opts,
|
||||
crypt='clear',
|
||||
@ -728,7 +700,7 @@ class Resolver(object):
|
||||
|
||||
# Use current user if empty
|
||||
if 'username' in ret and not ret['username']:
|
||||
ret['username'] = salt.utils.get_user()
|
||||
ret['username'] = salt.utils.user.get_user()
|
||||
|
||||
return ret
|
||||
|
||||
@ -799,7 +771,7 @@ class AuthUser(object):
|
||||
Returns True if the user is the same user as the one running
|
||||
this process and False if not.
|
||||
'''
|
||||
return self.user == salt.utils.get_user()
|
||||
return self.user == salt.utils.user.get_user()
|
||||
|
||||
def sudo_name(self):
|
||||
'''
|
||||
|
@ -110,6 +110,10 @@ class _LDAPConnection(object):
|
||||
self.ldap.set_option(ldap.OPT_REFERRALS, 0) # Needed for AD
|
||||
|
||||
if not anonymous:
|
||||
if self.bindpw is None or len(self.bindpw) < 1:
|
||||
raise CommandExecutionError(
|
||||
'LDAP bind password is not set: password cannot be empty if auth.ldap.anonymous is False'
|
||||
)
|
||||
self.ldap.simple_bind_s(self.binddn, self.bindpw)
|
||||
except Exception as ldap_error:
|
||||
raise CommandExecutionError(
|
||||
|
@ -42,7 +42,7 @@ from ctypes import c_void_p, c_uint, c_char_p, c_char, c_int
|
||||
from ctypes.util import find_library
|
||||
|
||||
# Import Salt libs
|
||||
import salt.utils # Can be removed once get_group_list is moved
|
||||
import salt.utils.user
|
||||
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
|
||||
|
||||
# Import 3rd-party libs
|
||||
@ -214,4 +214,4 @@ def groups(username, *args, **kwargs):
|
||||
|
||||
Uses system groups
|
||||
'''
|
||||
return salt.utils.get_group_list(username)
|
||||
return salt.utils.user.get_group_list(username)
|
||||
|
@ -10,7 +10,7 @@ import re
|
||||
|
||||
# Import Salt libs
|
||||
import salt.loader
|
||||
import salt.utils
|
||||
import salt.utils.event
|
||||
import salt.utils.minion
|
||||
from salt.ext.six.moves import map
|
||||
from salt.exceptions import CommandExecutionError
|
||||
|
@ -10,14 +10,19 @@ Beacon to fire events at failed login of users
|
||||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
import os
|
||||
import struct
|
||||
import time
|
||||
|
||||
# Import Salt Libs
|
||||
import salt.utils.files
|
||||
|
||||
# Import 3rd-party libs
|
||||
from salt.ext import six
|
||||
import salt.ext.six
|
||||
# pylint: disable=import-error
|
||||
from salt.ext.six.moves import map
|
||||
# pylint: enable=import-error
|
||||
|
||||
__virtualname__ = 'btmp'
|
||||
BTMP = '/var/log/btmp'
|
||||
@ -37,6 +42,15 @@ FIELDS = [
|
||||
SIZE = struct.calcsize(FMT)
|
||||
LOC_KEY = 'btmp.loc'
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# pylint: disable=import-error
|
||||
try:
|
||||
import dateutil.parser as dateutil_parser
|
||||
_TIME_SUPPORTED = True
|
||||
except ImportError:
|
||||
_TIME_SUPPORTED = False
|
||||
|
||||
|
||||
def __virtual__():
|
||||
if os.path.isfile(BTMP):
|
||||
@ -44,6 +58,20 @@ def __virtual__():
|
||||
return False
|
||||
|
||||
|
||||
def _check_time_range(time_range, now):
|
||||
'''
|
||||
Check time range
|
||||
'''
|
||||
if _TIME_SUPPORTED:
|
||||
_start = int(time.mktime(dateutil_parser.parse(time_range['start']).timetuple()))
|
||||
_end = int(time.mktime(dateutil_parser.parse(time_range['end']).timetuple()))
|
||||
|
||||
return bool(_start <= now <= _end)
|
||||
else:
|
||||
log.error('Dateutil is required.')
|
||||
return False
|
||||
|
||||
|
||||
def _get_loc():
|
||||
'''
|
||||
return the active file location
|
||||
@ -60,6 +88,45 @@ def validate(config):
|
||||
if not isinstance(config, list):
|
||||
return False, ('Configuration for btmp beacon must '
|
||||
'be a list.')
|
||||
else:
|
||||
_config = {}
|
||||
list(map(_config.update, config))
|
||||
|
||||
if 'users' in _config:
|
||||
if not isinstance(_config['users'], dict):
|
||||
return False, ('User configuration for btmp beacon must '
|
||||
'be a dictionary.')
|
||||
else:
|
||||
for user in _config['users']:
|
||||
if _config['users'][user] and \
|
||||
'time_range' in _config['users'][user]:
|
||||
_time_range = _config['users'][user]['time_range']
|
||||
if not isinstance(_time_range, dict):
|
||||
return False, ('The time_range parameter for '
|
||||
'btmp beacon must '
|
||||
'be a dictionary.')
|
||||
else:
|
||||
if not all(k in _time_range for k in ('start', 'end')):
|
||||
return False, ('The time_range parameter for '
|
||||
'btmp beacon must contain '
|
||||
'start & end options.')
|
||||
if 'defaults' in _config:
|
||||
if not isinstance(_config['defaults'], dict):
|
||||
return False, ('Defaults configuration for btmp beacon must '
|
||||
'be a dictionary.')
|
||||
else:
|
||||
if 'time_range' in _config['defaults']:
|
||||
_time_range = _config['defaults']['time_range']
|
||||
if not isinstance(_time_range, dict):
|
||||
return False, ('The time_range parameter for '
|
||||
'btmp beacon must '
|
||||
'be a dictionary.')
|
||||
else:
|
||||
if not all(k in _time_range for k in ('start', 'end')):
|
||||
return False, ('The time_range parameter for '
|
||||
'btmp beacon must contain '
|
||||
'start & end options.')
|
||||
|
||||
return True, 'Valid beacon configuration'
|
||||
|
||||
|
||||
@ -72,8 +139,40 @@ def beacon(config):
|
||||
|
||||
beacons:
|
||||
btmp: []
|
||||
|
||||
beacons:
|
||||
btmp:
|
||||
- users:
|
||||
gareth:
|
||||
- defaults:
|
||||
time_range:
|
||||
start: '8am'
|
||||
end: '4pm'
|
||||
|
||||
beacons:
|
||||
btmp:
|
||||
- users:
|
||||
gareth:
|
||||
time_range:
|
||||
start: '8am'
|
||||
end: '4pm'
|
||||
- defaults:
|
||||
time_range:
|
||||
start: '8am'
|
||||
end: '4pm'
|
||||
'''
|
||||
ret = []
|
||||
|
||||
users = None
|
||||
defaults = None
|
||||
|
||||
for config_item in config:
|
||||
if 'users' in config_item:
|
||||
users = config_item['users']
|
||||
|
||||
if 'defaults' in config_item:
|
||||
defaults = config_item['defaults']
|
||||
|
||||
with salt.utils.files.fopen(BTMP, 'rb') as fp_:
|
||||
loc = __context__.get(LOC_KEY, 0)
|
||||
if loc == 0:
|
||||
@ -83,6 +182,7 @@ def beacon(config):
|
||||
else:
|
||||
fp_.seek(loc)
|
||||
while True:
|
||||
now = int(time.time())
|
||||
raw = fp_.read(SIZE)
|
||||
if len(raw) != SIZE:
|
||||
return ret
|
||||
@ -91,7 +191,30 @@ def beacon(config):
|
||||
event = {}
|
||||
for ind, field in enumerate(FIELDS):
|
||||
event[field] = pack[ind]
|
||||
if isinstance(event[field], six.string_types):
|
||||
event[field] = event[field].strip('\x00')
|
||||
ret.append(event)
|
||||
if isinstance(event[field], salt.ext.six.string_types):
|
||||
if isinstance(event[field], bytes):
|
||||
event[field] = event[field].decode()
|
||||
event[field] = event[field].strip('b\x00')
|
||||
else:
|
||||
event[field] = event[field].strip('\x00')
|
||||
|
||||
if users:
|
||||
if event['user'] in users:
|
||||
_user = users[event['user']]
|
||||
if isinstance(_user, dict) and 'time_range' in _user:
|
||||
if _check_time_range(_user['time_range'], now):
|
||||
ret.append(event)
|
||||
else:
|
||||
if defaults and 'time_range' in defaults:
|
||||
if _check_time_range(defaults['time_range'],
|
||||
now):
|
||||
ret.append(event)
|
||||
else:
|
||||
ret.append(event)
|
||||
else:
|
||||
if defaults and 'time_range' in defaults:
|
||||
if _check_time_range(defaults['time_range'], now):
|
||||
ret.append(event)
|
||||
else:
|
||||
ret.append(event)
|
||||
return ret
|
||||
|
@ -23,7 +23,9 @@ import re
|
||||
|
||||
# Import salt libs
|
||||
import salt.ext.six
|
||||
# pylint: disable=import-error
|
||||
from salt.ext.six.moves import map
|
||||
# pylint: enable=import-error
|
||||
|
||||
# Import third party libs
|
||||
try:
|
||||
|
@ -7,7 +7,7 @@ A simple beacon to watch journald for specific entries
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
import salt.utils.data
|
||||
import salt.utils.locales
|
||||
import salt.ext.six
|
||||
from salt.ext.six.moves import map
|
||||
@ -99,7 +99,7 @@ def beacon(config):
|
||||
n_flag += 1
|
||||
if n_flag == len(_config['services'][name]):
|
||||
# Match!
|
||||
sub = salt.utils.simple_types_filter(cur)
|
||||
sub = salt.utils.data.simple_types_filter(cur)
|
||||
sub.update({'tag': name})
|
||||
ret.append(sub)
|
||||
return ret
|
||||
|
@ -8,7 +8,6 @@ from __future__ import absolute_import
|
||||
import time
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
import salt.utils.path
|
||||
import salt.utils.vt
|
||||
|
||||
|
@ -10,14 +10,19 @@ Beacon to fire events at login of users as registered in the wtmp file
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
import os
|
||||
import struct
|
||||
import time
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils.files
|
||||
|
||||
# Import 3rd-party libs
|
||||
from salt.ext import six
|
||||
import salt.ext.six
|
||||
# pylint: disable=import-error
|
||||
from salt.ext.six.moves import map
|
||||
# pylint: enable=import-error
|
||||
|
||||
__virtualname__ = 'wtmp'
|
||||
WTMP = '/var/log/wtmp'
|
||||
@ -37,9 +42,15 @@ FIELDS = [
|
||||
SIZE = struct.calcsize(FMT)
|
||||
LOC_KEY = 'wtmp.loc'
|
||||
|
||||
import logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# pylint: disable=import-error
|
||||
try:
|
||||
import dateutil.parser as dateutil_parser
|
||||
_TIME_SUPPORTED = True
|
||||
except ImportError:
|
||||
_TIME_SUPPORTED = False
|
||||
|
||||
|
||||
def __virtual__():
|
||||
if os.path.isfile(WTMP):
|
||||
@ -47,6 +58,20 @@ def __virtual__():
|
||||
return False
|
||||
|
||||
|
||||
def _check_time_range(time_range, now):
|
||||
'''
|
||||
Check time range
|
||||
'''
|
||||
if _TIME_SUPPORTED:
|
||||
_start = int(time.mktime(dateutil_parser.parse(time_range['start']).timetuple()))
|
||||
_end = int(time.mktime(dateutil_parser.parse(time_range['end']).timetuple()))
|
||||
|
||||
return bool(_start <= now <= _end)
|
||||
else:
|
||||
log.error('Dateutil is required.')
|
||||
return False
|
||||
|
||||
|
||||
def _get_loc():
|
||||
'''
|
||||
return the active file location
|
||||
@ -62,6 +87,44 @@ def validate(config):
|
||||
# Configuration for wtmp beacon should be a list of dicts
|
||||
if not isinstance(config, list):
|
||||
return False, ('Configuration for wtmp beacon must be a list.')
|
||||
else:
|
||||
_config = {}
|
||||
list(map(_config.update, config))
|
||||
|
||||
if 'users' in _config:
|
||||
if not isinstance(_config['users'], dict):
|
||||
return False, ('User configuration for btmp beacon must '
|
||||
'be a dictionary.')
|
||||
else:
|
||||
for user in _config['users']:
|
||||
if _config['users'][user] and \
|
||||
'time_range' in _config['users'][user]:
|
||||
_time_range = _config['users'][user]['time_range']
|
||||
if not isinstance(_time_range, dict):
|
||||
return False, ('The time_range parameter for '
|
||||
'btmp beacon must '
|
||||
'be a dictionary.')
|
||||
else:
|
||||
if not all(k in _time_range for k in ('start', 'end')):
|
||||
return False, ('The time_range parameter for '
|
||||
'btmp beacon must contain '
|
||||
'start & end options.')
|
||||
if 'defaults' in _config:
|
||||
if not isinstance(_config['defaults'], dict):
|
||||
return False, ('Defaults configuration for btmp beacon must '
|
||||
'be a dictionary.')
|
||||
else:
|
||||
if 'time_range' in _config['defaults']:
|
||||
_time_range = _config['defaults']['time_range']
|
||||
if not isinstance(_time_range, dict):
|
||||
return False, ('The time_range parameter for '
|
||||
'btmp beacon must '
|
||||
'be a dictionary.')
|
||||
else:
|
||||
if not all(k in _time_range for k in ('start', 'end')):
|
||||
return False, ('The time_range parameter for '
|
||||
'btmp beacon must contain '
|
||||
'start & end options.')
|
||||
return True, 'Valid beacon configuration'
|
||||
|
||||
|
||||
@ -74,8 +137,40 @@ def beacon(config):
|
||||
|
||||
beacons:
|
||||
wtmp: []
|
||||
'''
|
||||
|
||||
beacons:
|
||||
wtmp:
|
||||
- users:
|
||||
gareth:
|
||||
- defaults:
|
||||
time_range:
|
||||
start: '8am'
|
||||
end: '4pm'
|
||||
|
||||
beacons:
|
||||
wtmp:
|
||||
- users:
|
||||
gareth:
|
||||
time_range:
|
||||
start: '8am'
|
||||
end: '4pm'
|
||||
- defaults:
|
||||
time_range:
|
||||
start: '8am'
|
||||
end: '4pm'
|
||||
'''
|
||||
ret = []
|
||||
|
||||
users = None
|
||||
defaults = None
|
||||
|
||||
for config_item in config:
|
||||
if 'users' in config_item:
|
||||
users = config_item['users']
|
||||
|
||||
if 'defaults' in config_item:
|
||||
defaults = config_item['defaults']
|
||||
|
||||
with salt.utils.files.fopen(WTMP, 'rb') as fp_:
|
||||
loc = __context__.get(LOC_KEY, 0)
|
||||
if loc == 0:
|
||||
@ -85,6 +180,7 @@ def beacon(config):
|
||||
else:
|
||||
fp_.seek(loc)
|
||||
while True:
|
||||
now = int(time.time())
|
||||
raw = fp_.read(SIZE)
|
||||
if len(raw) != SIZE:
|
||||
return ret
|
||||
@ -93,7 +189,30 @@ def beacon(config):
|
||||
event = {}
|
||||
for ind, field in enumerate(FIELDS):
|
||||
event[field] = pack[ind]
|
||||
if isinstance(event[field], six.string_types):
|
||||
event[field] = event[field].strip('\x00')
|
||||
ret.append(event)
|
||||
if isinstance(event[field], salt.ext.six.string_types):
|
||||
if isinstance(event[field], bytes):
|
||||
event[field] = event[field].decode()
|
||||
event[field] = event[field].strip('b\x00')
|
||||
else:
|
||||
event[field] = event[field].strip('\x00')
|
||||
|
||||
if users:
|
||||
if event['user'] in users:
|
||||
_user = users[event['user']]
|
||||
if isinstance(_user, dict) and 'time_range' in _user:
|
||||
if _check_time_range(_user['time_range'], now):
|
||||
ret.append(event)
|
||||
else:
|
||||
if defaults and 'time_range' in defaults:
|
||||
if _check_time_range(defaults['time_range'],
|
||||
now):
|
||||
ret.append(event)
|
||||
else:
|
||||
ret.append(event)
|
||||
else:
|
||||
if defaults and 'time_range' in defaults:
|
||||
if _check_time_range(defaults['time_range'], now):
|
||||
ret.append(event)
|
||||
else:
|
||||
ret.append(event)
|
||||
return ret
|
||||
|
2
salt/cache/__init__.py
vendored
2
salt/cache/__init__.py
vendored
@ -73,7 +73,7 @@ class Cache(object):
|
||||
self.cachedir = opts.get('cachedir', salt.syspaths.CACHE_DIR)
|
||||
else:
|
||||
self.cachedir = cachedir
|
||||
self.driver = opts.get('cache', salt.config.DEFAULT_MASTER_OPTS)
|
||||
self.driver = opts.get('cache', salt.config.DEFAULT_MASTER_OPTS['cache'])
|
||||
self.serial = Serial(opts)
|
||||
self._modules = None
|
||||
self._kwargs = kwargs
|
||||
|
1
salt/cache/localfs.py
vendored
1
salt/cache/localfs.py
vendored
@ -18,7 +18,6 @@ import shutil
|
||||
import tempfile
|
||||
|
||||
from salt.exceptions import SaltCacheError
|
||||
import salt.utils
|
||||
import salt.utils.atomicfile
|
||||
import salt.utils.files
|
||||
|
||||
|
21
salt/cache/redis_cache.py
vendored
21
salt/cache/redis_cache.py
vendored
@ -481,18 +481,17 @@ def list_(bank):
|
||||
Lists entries stored in the specified bank.
|
||||
'''
|
||||
redis_server = _get_redis_server()
|
||||
bank_keys_redis_key = _get_bank_keys_redis_key(bank)
|
||||
bank_keys = None
|
||||
bank_redis_key = _get_bank_redis_key(bank)
|
||||
try:
|
||||
bank_keys = redis_server.smembers(bank_keys_redis_key)
|
||||
banks = redis_server.smembers(bank_redis_key)
|
||||
except (RedisConnectionError, RedisResponseError) as rerr:
|
||||
mesg = 'Cannot list the Redis cache key {rkey}: {rerr}'.format(rkey=bank_keys_redis_key,
|
||||
mesg = 'Cannot list the Redis cache key {rkey}: {rerr}'.format(rkey=bank_redis_key,
|
||||
rerr=rerr)
|
||||
log.error(mesg)
|
||||
raise SaltCacheError(mesg)
|
||||
if not bank_keys:
|
||||
if not banks:
|
||||
return []
|
||||
return list(bank_keys)
|
||||
return list(banks)
|
||||
|
||||
|
||||
def contains(bank, key):
|
||||
@ -500,15 +499,11 @@ def contains(bank, key):
|
||||
Checks if the specified bank contains the specified key.
|
||||
'''
|
||||
redis_server = _get_redis_server()
|
||||
bank_keys_redis_key = _get_bank_keys_redis_key(bank)
|
||||
bank_keys = None
|
||||
bank_redis_key = _get_bank_redis_key(bank)
|
||||
try:
|
||||
bank_keys = redis_server.smembers(bank_keys_redis_key)
|
||||
return redis_server.sismember(bank_redis_key, key)
|
||||
except (RedisConnectionError, RedisResponseError) as rerr:
|
||||
mesg = 'Cannot retrieve the Redis cache key {rkey}: {rerr}'.format(rkey=bank_keys_redis_key,
|
||||
mesg = 'Cannot retrieve the Redis cache key {rkey}: {rerr}'.format(rkey=bank_redis_key,
|
||||
rerr=rerr)
|
||||
log.error(mesg)
|
||||
raise SaltCacheError(mesg)
|
||||
if not bank_keys:
|
||||
return False
|
||||
return key in bank_keys
|
||||
|
@ -11,7 +11,7 @@ import copy
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils # Can be removed once print_cli is moved
|
||||
import salt.utils.stringutils
|
||||
import salt.client
|
||||
import salt.output
|
||||
import salt.exceptions
|
||||
@ -73,7 +73,7 @@ class Batch(object):
|
||||
m = next(six.iterkeys(ret))
|
||||
except StopIteration:
|
||||
if not self.quiet:
|
||||
salt.utils.print_cli('No minions matched the target.')
|
||||
salt.utils.stringutils.print_cli('No minions matched the target.')
|
||||
break
|
||||
if m is not None:
|
||||
fret.add(m)
|
||||
@ -95,7 +95,7 @@ class Batch(object):
|
||||
return int(self.opts['batch'])
|
||||
except ValueError:
|
||||
if not self.quiet:
|
||||
salt.utils.print_cli('Invalid batch data sent: {0}\nData must be in the '
|
||||
salt.utils.stringutils.print_cli('Invalid batch data sent: {0}\nData must be in the '
|
||||
'form of %10, 10% or 3'.format(self.opts['batch']))
|
||||
|
||||
def __update_wait(self, wait):
|
||||
@ -143,10 +143,11 @@ class Batch(object):
|
||||
# sure that the main while loop finishes even with unresp minions
|
||||
minion_tracker = {}
|
||||
|
||||
# We already know some minions didn't respond to the ping, so inform
|
||||
# the user we won't be attempting to run a job on them
|
||||
for down_minion in self.down_minions:
|
||||
salt.utils.print_cli('Minion {0} did not respond. No job will be sent.'.format(down_minion))
|
||||
if not self.quiet:
|
||||
# We already know some minions didn't respond to the ping, so inform
|
||||
# the user we won't be attempting to run a job on them
|
||||
for down_minion in self.down_minions:
|
||||
salt.utils.stringutils.print_cli('Minion {0} did not respond. No job will be sent.'.format(down_minion))
|
||||
|
||||
# Iterate while we still have things to execute
|
||||
while len(ret) < len(self.minions):
|
||||
@ -171,7 +172,7 @@ class Batch(object):
|
||||
|
||||
if next_:
|
||||
if not self.quiet:
|
||||
salt.utils.print_cli('\nExecuting run on {0}\n'.format(sorted(next_)))
|
||||
salt.utils.stringutils.print_cli('\nExecuting run on {0}\n'.format(sorted(next_)))
|
||||
# create a new iterator for this batch of minions
|
||||
new_iter = self.local.cmd_iter_no_block(
|
||||
*args,
|
||||
@ -218,14 +219,14 @@ class Batch(object):
|
||||
if part['data']['id'] in minion_tracker[queue]['minions']:
|
||||
minion_tracker[queue]['minions'].remove(part['data']['id'])
|
||||
else:
|
||||
salt.utils.print_cli('minion {0} was already deleted from tracker, probably a duplicate key'.format(part['id']))
|
||||
salt.utils.stringutils.print_cli('minion {0} was already deleted from tracker, probably a duplicate key'.format(part['id']))
|
||||
else:
|
||||
parts.update(part)
|
||||
for id in part:
|
||||
if id in minion_tracker[queue]['minions']:
|
||||
minion_tracker[queue]['minions'].remove(id)
|
||||
else:
|
||||
salt.utils.print_cli('minion {0} was already deleted from tracker, probably a duplicate key'.format(id))
|
||||
salt.utils.stringutils.print_cli('minion {0} was already deleted from tracker, probably a duplicate key'.format(id))
|
||||
except StopIteration:
|
||||
# if a iterator is done:
|
||||
# - set it to inactive
|
||||
|
@ -20,12 +20,13 @@ import salt.minion
|
||||
import salt.output
|
||||
import salt.payload
|
||||
import salt.transport
|
||||
import salt.utils # Can be removed once print_cli, activate_profile, and output_profile are moved
|
||||
import salt.utils.args
|
||||
import salt.utils.files
|
||||
import salt.utils.jid
|
||||
import salt.utils.kinds as kinds
|
||||
import salt.utils.minion
|
||||
import salt.utils.profile
|
||||
import salt.utils.stringutils
|
||||
import salt.defaults.exitcodes
|
||||
from salt.cli import daemons
|
||||
from salt.log import LOG_LEVELS
|
||||
@ -113,7 +114,7 @@ class BaseCaller(object):
|
||||
docs[name] = func.__doc__
|
||||
for name in sorted(docs):
|
||||
if name.startswith(self.opts.get('fun', '')):
|
||||
salt.utils.print_cli('{0}:\n{1}\n'.format(name, docs[name]))
|
||||
salt.utils.stringutils.print_cli('{0}:\n{1}\n'.format(name, docs[name]))
|
||||
|
||||
def print_grains(self):
|
||||
'''
|
||||
@ -128,11 +129,11 @@ class BaseCaller(object):
|
||||
'''
|
||||
profiling_enabled = self.opts.get('profiling_enabled', False)
|
||||
try:
|
||||
pr = salt.utils.activate_profile(profiling_enabled)
|
||||
pr = salt.utils.profile.activate_profile(profiling_enabled)
|
||||
try:
|
||||
ret = self.call()
|
||||
finally:
|
||||
salt.utils.output_profile(
|
||||
salt.utils.profile.output_profile(
|
||||
pr,
|
||||
stats_path=self.opts.get('profiling_path', '/tmp/stats'),
|
||||
stop=True)
|
||||
@ -209,7 +210,7 @@ class BaseCaller(object):
|
||||
ret['return'] = func(*args, **kwargs)
|
||||
except TypeError as exc:
|
||||
sys.stderr.write('\nPassed invalid arguments: {0}.\n\nUsage:\n'.format(exc))
|
||||
salt.utils.print_cli(func.__doc__)
|
||||
salt.utils.stringutils.print_cli(func.__doc__)
|
||||
active_level = LOG_LEVELS.get(
|
||||
self.opts['log_level'].lower(), logging.ERROR)
|
||||
if active_level <= logging.DEBUG:
|
||||
|
@ -19,7 +19,6 @@ import sys
|
||||
# Import salt libs
|
||||
import salt.client
|
||||
import salt.output
|
||||
import salt.utils
|
||||
import salt.utils.files
|
||||
import salt.utils.gzip_util
|
||||
import salt.utils.itertools
|
||||
@ -127,9 +126,10 @@ class SaltCP(object):
|
||||
if os.path.isfile(fn_):
|
||||
files.update(self._file_dict(fn_))
|
||||
elif os.path.isdir(fn_):
|
||||
salt.utils.print_cli(fn_ + ' is a directory, only files are supported '
|
||||
'in non-chunked mode. Use "--chunked" command '
|
||||
'line argument.')
|
||||
salt.utils.stringutils.print_cli(
|
||||
fn_ + ' is a directory, only files are supported '
|
||||
'in non-chunked mode. Use "--chunked" command '
|
||||
'line argument.')
|
||||
sys.exit(1)
|
||||
return files
|
||||
|
||||
|
@ -44,7 +44,7 @@ from salt.utils import migrations
|
||||
import salt.utils.kinds as kinds
|
||||
|
||||
try:
|
||||
from salt.utils import ip_bracket
|
||||
from salt.utils.zeromq import ip_bracket
|
||||
import salt.utils.parsers
|
||||
from salt.utils.verify import check_user, verify_env, verify_socket
|
||||
except ImportError as exc:
|
||||
|
@ -2,8 +2,8 @@
|
||||
from __future__ import print_function
|
||||
from __future__ import absolute_import
|
||||
|
||||
import salt.utils # Can be removed once activate_profile and output_profile are moved
|
||||
import salt.utils.parsers
|
||||
import salt.utils.profile
|
||||
from salt.utils.verify import check_user, verify_log
|
||||
from salt.exceptions import SaltClientError
|
||||
import salt.defaults.exitcodes # pylint: disable=W0611
|
||||
@ -35,7 +35,7 @@ class SaltRun(salt.utils.parsers.SaltRunOptionParser):
|
||||
# someone tries to use the runners via the python API
|
||||
try:
|
||||
if check_user(self.config['user']):
|
||||
pr = salt.utils.activate_profile(profiling_enabled)
|
||||
pr = salt.utils.profile.activate_profile(profiling_enabled)
|
||||
try:
|
||||
ret = runner.run()
|
||||
# In older versions ret['data']['retcode'] was used
|
||||
@ -49,7 +49,7 @@ class SaltRun(salt.utils.parsers.SaltRunOptionParser):
|
||||
elif isinstance(ret, dict) and 'retcode' in ret.get('data', {}):
|
||||
self.exit(ret['data']['retcode'])
|
||||
finally:
|
||||
salt.utils.output_profile(
|
||||
salt.utils.profile.output_profile(
|
||||
pr,
|
||||
stats_path=self.options.profiling_path,
|
||||
stop=True)
|
||||
|
@ -7,8 +7,9 @@ sys.modules['pkg_resources'] = None
|
||||
import os
|
||||
|
||||
# Import Salt libs
|
||||
import salt.utils # Can be removed once print_cli is moved
|
||||
import salt.utils.job
|
||||
import salt.utils.parsers
|
||||
import salt.utils.stringutils
|
||||
from salt.utils.args import yamlify_arg
|
||||
from salt.utils.verify import verify_log
|
||||
from salt.exceptions import (
|
||||
@ -93,7 +94,7 @@ class SaltCMD(salt.utils.parsers.SaltCMDOptionParser):
|
||||
# potentially switch to batch execution
|
||||
if self.options.batch_safe_limit > 1:
|
||||
if len(self._preview_target()) >= self.options.batch_safe_limit:
|
||||
salt.utils.print_cli('\nNOTICE: Too many minions targeted, switching to batch execution.')
|
||||
salt.utils.stringutils.print_cli('\nNOTICE: Too many minions targeted, switching to batch execution.')
|
||||
self.options.batch = self.options.batch_safe_size
|
||||
self._run_batch()
|
||||
return
|
||||
@ -140,7 +141,7 @@ class SaltCMD(salt.utils.parsers.SaltCMDOptionParser):
|
||||
|
||||
if self.config['async']:
|
||||
jid = self.local_client.cmd_async(**kwargs)
|
||||
salt.utils.print_cli('Executed command with job ID: {0}'.format(jid))
|
||||
salt.utils.stringutils.print_cli('Executed command with job ID: {0}'.format(jid))
|
||||
return
|
||||
|
||||
# local will be None when there was an error
|
||||
@ -279,12 +280,12 @@ class SaltCMD(salt.utils.parsers.SaltCMDOptionParser):
|
||||
|
||||
def _print_errors_summary(self, errors):
|
||||
if errors:
|
||||
salt.utils.print_cli('\n')
|
||||
salt.utils.print_cli('---------------------------')
|
||||
salt.utils.print_cli('Errors')
|
||||
salt.utils.print_cli('---------------------------')
|
||||
salt.utils.stringutils.print_cli('\n')
|
||||
salt.utils.stringutils.print_cli('---------------------------')
|
||||
salt.utils.stringutils.print_cli('Errors')
|
||||
salt.utils.stringutils.print_cli('---------------------------')
|
||||
for error in errors:
|
||||
salt.utils.print_cli(self._format_error(error))
|
||||
salt.utils.stringutils.print_cli(self._format_error(error))
|
||||
|
||||
def _print_returns_summary(self, ret):
|
||||
'''
|
||||
@ -314,22 +315,22 @@ class SaltCMD(salt.utils.parsers.SaltCMDOptionParser):
|
||||
return_counter += 1
|
||||
if self._get_retcode(ret[each_minion]):
|
||||
failed_minions.append(each_minion)
|
||||
salt.utils.print_cli('\n')
|
||||
salt.utils.print_cli('-------------------------------------------')
|
||||
salt.utils.print_cli('Summary')
|
||||
salt.utils.print_cli('-------------------------------------------')
|
||||
salt.utils.print_cli('# of minions targeted: {0}'.format(return_counter + not_return_counter))
|
||||
salt.utils.print_cli('# of minions returned: {0}'.format(return_counter))
|
||||
salt.utils.print_cli('# of minions that did not return: {0}'.format(not_return_counter))
|
||||
salt.utils.print_cli('# of minions with errors: {0}'.format(len(failed_minions)))
|
||||
salt.utils.stringutils.print_cli('\n')
|
||||
salt.utils.stringutils.print_cli('-------------------------------------------')
|
||||
salt.utils.stringutils.print_cli('Summary')
|
||||
salt.utils.stringutils.print_cli('-------------------------------------------')
|
||||
salt.utils.stringutils.print_cli('# of minions targeted: {0}'.format(return_counter + not_return_counter))
|
||||
salt.utils.stringutils.print_cli('# of minions returned: {0}'.format(return_counter))
|
||||
salt.utils.stringutils.print_cli('# of minions that did not return: {0}'.format(not_return_counter))
|
||||
salt.utils.stringutils.print_cli('# of minions with errors: {0}'.format(len(failed_minions)))
|
||||
if self.options.verbose:
|
||||
if not_connected_minions:
|
||||
salt.utils.print_cli('Minions not connected: {0}'.format(" ".join(not_connected_minions)))
|
||||
salt.utils.stringutils.print_cli('Minions not connected: {0}'.format(" ".join(not_connected_minions)))
|
||||
if not_response_minions:
|
||||
salt.utils.print_cli('Minions not responding: {0}'.format(" ".join(not_response_minions)))
|
||||
salt.utils.stringutils.print_cli('Minions not responding: {0}'.format(" ".join(not_response_minions)))
|
||||
if failed_minions:
|
||||
salt.utils.print_cli('Minions with failures: {0}'.format(" ".join(failed_minions)))
|
||||
salt.utils.print_cli('-------------------------------------------')
|
||||
salt.utils.stringutils.print_cli('Minions with failures: {0}'.format(" ".join(failed_minions)))
|
||||
salt.utils.stringutils.print_cli('-------------------------------------------')
|
||||
|
||||
def _progress_end(self, out):
|
||||
import salt.output
|
||||
@ -421,6 +422,6 @@ class SaltCMD(salt.utils.parsers.SaltCMDOptionParser):
|
||||
salt.output.display_output({fun: docs[fun]}, 'nested', self.config)
|
||||
else:
|
||||
for fun in sorted(docs):
|
||||
salt.utils.print_cli('{0}:'.format(fun))
|
||||
salt.utils.print_cli(docs[fun])
|
||||
salt.utils.print_cli('')
|
||||
salt.utils.stringutils.print_cli('{0}:'.format(fun))
|
||||
salt.utils.stringutils.print_cli(docs[fun])
|
||||
salt.utils.stringutils.print_cli('')
|
||||
|
@ -32,15 +32,16 @@ import salt.cache
|
||||
import salt.payload
|
||||
import salt.transport
|
||||
import salt.loader
|
||||
import salt.utils
|
||||
import salt.utils.args
|
||||
import salt.utils.event
|
||||
import salt.utils.files
|
||||
import salt.utils.jid
|
||||
import salt.utils.minions
|
||||
import salt.utils.platform
|
||||
import salt.utils.user
|
||||
import salt.utils.verify
|
||||
import salt.utils.versions
|
||||
import salt.utils.jid
|
||||
import salt.utils.zeromq
|
||||
import salt.syspaths as syspaths
|
||||
from salt.exceptions import (
|
||||
EauthAuthenticationError, SaltInvocationError, SaltReqTimeoutError,
|
||||
@ -157,7 +158,7 @@ class LocalClient(object):
|
||||
)
|
||||
self.opts = salt.config.client_config(c_path)
|
||||
self.serial = salt.payload.Serial(self.opts)
|
||||
self.salt_user = salt.utils.get_specific_user()
|
||||
self.salt_user = salt.utils.user.get_specific_user()
|
||||
self.skip_perm_errors = skip_perm_errors
|
||||
self.key = self.__read_master_key()
|
||||
self.auto_reconnect = auto_reconnect
|
||||
@ -844,6 +845,10 @@ class LocalClient(object):
|
||||
The function signature is the same as :py:meth:`cmd` with the
|
||||
following exceptions.
|
||||
|
||||
Normally :py:meth:`cmd_iter` does not yield results for minions that
|
||||
are not connected. If you want it to return results for disconnected
|
||||
minions set `expect_minions=True` in `kwargs`.
|
||||
|
||||
:return: A generator yielding the individual minion returns
|
||||
|
||||
.. code-block:: python
|
||||
@ -1786,7 +1791,7 @@ class LocalClient(object):
|
||||
timeout,
|
||||
**kwargs)
|
||||
|
||||
master_uri = u'tcp://' + salt.utils.ip_bracket(self.opts[u'interface']) + \
|
||||
master_uri = u'tcp://' + salt.utils.zeromq.ip_bracket(self.opts[u'interface']) + \
|
||||
u':' + str(self.opts[u'ret_port'])
|
||||
channel = salt.transport.Channel.factory(self.opts,
|
||||
crypt=u'clear',
|
||||
@ -1894,7 +1899,7 @@ class LocalClient(object):
|
||||
timeout,
|
||||
**kwargs)
|
||||
|
||||
master_uri = u'tcp://' + salt.utils.ip_bracket(self.opts[u'interface']) + \
|
||||
master_uri = u'tcp://' + salt.utils.zeromq.ip_bracket(self.opts[u'interface']) + \
|
||||
u':' + str(self.opts[u'ret_port'])
|
||||
channel = salt.transport.client.AsyncReqChannel.factory(self.opts,
|
||||
io_loop=io_loop,
|
||||
|
@ -16,7 +16,6 @@ import copy as pycopy
|
||||
# Import Salt libs
|
||||
import salt.exceptions
|
||||
import salt.minion
|
||||
import salt.utils # Can be removed once daemonize, get_specific_user, format_call are moved
|
||||
import salt.utils.args
|
||||
import salt.utils.doc
|
||||
import salt.utils.error
|
||||
@ -27,6 +26,7 @@ import salt.utils.lazy
|
||||
import salt.utils.platform
|
||||
import salt.utils.process
|
||||
import salt.utils.state
|
||||
import salt.utils.user
|
||||
import salt.utils.versions
|
||||
import salt.transport
|
||||
import salt.log.setup
|
||||
@ -96,7 +96,7 @@ class ClientFuncsDict(collections.MutableMapping):
|
||||
|
||||
async_pub = self.client._gen_async_pub(pub_data.get(u'__pub_jid'))
|
||||
|
||||
user = salt.utils.get_specific_user()
|
||||
user = salt.utils.user.get_specific_user()
|
||||
return self.client._proc_function(
|
||||
key,
|
||||
low,
|
||||
@ -364,29 +364,19 @@ class SyncClientMixin(object):
|
||||
# packed into the top level object. The plan is to move away from
|
||||
# that since the caller knows what is an arg vs a kwarg, but while
|
||||
# we make the transition we will load "kwargs" using format_call if
|
||||
# there are no kwargs in the low object passed in
|
||||
f_call = None
|
||||
if u'arg' not in low:
|
||||
f_call = salt.utils.format_call(
|
||||
# there are no kwargs in the low object passed in.
|
||||
|
||||
if u'arg' in low and u'kwarg' in low:
|
||||
args = low[u'arg']
|
||||
kwargs = low[u'kwarg']
|
||||
else:
|
||||
f_call = salt.utils.args.format_call(
|
||||
self.functions[fun],
|
||||
low,
|
||||
expected_extra_kws=CLIENT_INTERNAL_KEYWORDS
|
||||
)
|
||||
args = f_call.get(u'args', ())
|
||||
else:
|
||||
args = low[u'arg']
|
||||
|
||||
if u'kwarg' not in low:
|
||||
log.critical(
|
||||
u'kwargs must be passed inside the low data within the '
|
||||
u'\'kwarg\' key. See usage of '
|
||||
u'salt.utils.args.parse_input() and '
|
||||
u'salt.minion.load_args_and_kwargs() elsewhere in the '
|
||||
u'codebase.'
|
||||
)
|
||||
kwargs = {}
|
||||
else:
|
||||
kwargs = low[u'kwarg']
|
||||
kwargs = f_call.get(u'kwargs', {})
|
||||
|
||||
# Update the event data with loaded args and kwargs
|
||||
data[u'fun_args'] = list(args) + ([kwargs] if kwargs else [])
|
||||
@ -478,7 +468,7 @@ class AsyncClientMixin(object):
|
||||
# Shutdown the multiprocessing before daemonizing
|
||||
salt.log.setup.shutdown_multiprocessing_logging()
|
||||
|
||||
salt.utils.daemonize()
|
||||
salt.utils.process.daemonize()
|
||||
|
||||
# Reconfigure multiprocessing logging after daemonizing
|
||||
salt.log.setup.setup_multiprocessing_logging()
|
||||
|
@ -36,11 +36,12 @@ import salt.minion
|
||||
import salt.roster
|
||||
import salt.serializers.yaml
|
||||
import salt.state
|
||||
import salt.utils
|
||||
import salt.utils.args
|
||||
import salt.utils.atomicfile
|
||||
import salt.utils.event
|
||||
import salt.utils.files
|
||||
import salt.utils.hashutils
|
||||
import salt.utils.json
|
||||
import salt.utils.network
|
||||
import salt.utils.path
|
||||
import salt.utils.stringutils
|
||||
@ -496,7 +497,7 @@ class SSH(object):
|
||||
**target)
|
||||
stdout, stderr, retcode = single.cmd_block()
|
||||
try:
|
||||
data = salt.utils.find_json(stdout)
|
||||
data = salt.utils.json.find_json(stdout)
|
||||
return {host: data.get(u'local', data)}
|
||||
except Exception:
|
||||
if stderr:
|
||||
@ -524,7 +525,7 @@ class SSH(object):
|
||||
stdout, stderr, retcode = single.run()
|
||||
# This job is done, yield
|
||||
try:
|
||||
data = salt.utils.find_json(stdout)
|
||||
data = salt.utils.json.find_json(stdout)
|
||||
if len(data) < 2 and u'local' in data:
|
||||
ret[u'ret'] = data[u'local']
|
||||
else:
|
||||
@ -1507,7 +1508,7 @@ def mod_data(fsclient):
|
||||
if not os.path.isfile(mod_path):
|
||||
continue
|
||||
mods_data[os.path.basename(fn_)] = mod_path
|
||||
chunk = salt.utils.get_hash(mod_path)
|
||||
chunk = salt.utils.hashutils.get_hash(mod_path)
|
||||
ver_base += chunk
|
||||
if mods_data:
|
||||
if ref in ret:
|
||||
|
@ -15,7 +15,6 @@ import subprocess
|
||||
|
||||
# Import salt libs
|
||||
import salt.defaults.exitcodes
|
||||
import salt.utils
|
||||
import salt.utils.nb_popen
|
||||
import salt.utils.vt
|
||||
|
||||
|
@ -137,7 +137,7 @@ def need_deployment():
|
||||
sys.exit(EX_THIN_DEPLOY)
|
||||
|
||||
|
||||
# Adapted from salt.utils.get_hash()
|
||||
# Adapted from salt.utils.hashutils.get_hash()
|
||||
def get_hash(path, form=u'sha1', chunk_size=4096):
|
||||
'''
|
||||
Generate a hash digest string for a file.
|
||||
|
@ -13,7 +13,7 @@ import copy
|
||||
|
||||
# Import salt libs
|
||||
import salt.loader
|
||||
import salt.utils
|
||||
import salt.utils.data
|
||||
import salt.client.ssh
|
||||
|
||||
# Import 3rd-party libs
|
||||
@ -121,7 +121,7 @@ class FunctionWrapper(object):
|
||||
u'stderr': stderr,
|
||||
u'retcode': retcode}
|
||||
try:
|
||||
ret = json.loads(stdout, object_hook=salt.utils.decode_dict)
|
||||
ret = json.loads(stdout, object_hook=salt.utils.data.decode_dict)
|
||||
if len(ret) < 2 and u'local' in ret:
|
||||
ret = ret[u'local']
|
||||
ret = ret.get(u'return', {})
|
||||
|
@ -9,7 +9,8 @@ import re
|
||||
import os
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
import salt.utils.data
|
||||
import salt.utils.files
|
||||
import salt.syspaths as syspaths
|
||||
|
||||
# Import 3rd-party libs
|
||||
@ -81,7 +82,7 @@ def manage_mode(mode):
|
||||
# config.manage_mode should no longer be invoked from the __salt__ dunder
|
||||
# in Salt code, this function is only being left here for backwards
|
||||
# compatibility.
|
||||
return salt.utils.normalize_mode(mode)
|
||||
return salt.utils.files.normalize_mode(mode)
|
||||
|
||||
|
||||
def valid_fileproto(uri):
|
||||
@ -215,16 +216,16 @@ def get(key, default=u''):
|
||||
|
||||
salt '*' config.get pkg:apache
|
||||
'''
|
||||
ret = salt.utils.traverse_dict_and_list(__opts__, key, u'_|-')
|
||||
ret = salt.utils.data.traverse_dict_and_list(__opts__, key, u'_|-')
|
||||
if ret != u'_|-':
|
||||
return ret
|
||||
ret = salt.utils.traverse_dict_and_list(__grains__, key, u'_|-')
|
||||
ret = salt.utils.data.traverse_dict_and_list(__grains__, key, u'_|-')
|
||||
if ret != u'_|-':
|
||||
return ret
|
||||
ret = salt.utils.traverse_dict_and_list(__pillar__, key, u'_|-')
|
||||
ret = salt.utils.data.traverse_dict_and_list(__pillar__, key, u'_|-')
|
||||
if ret != u'_|-':
|
||||
return ret
|
||||
ret = salt.utils.traverse_dict_and_list(__pillar__.get(u'master', {}), key, u'_|-')
|
||||
ret = salt.utils.data.traverse_dict_and_list(__pillar__.get(u'master', {}), key, u'_|-')
|
||||
if ret != u'_|-':
|
||||
return ret
|
||||
return default
|
||||
|
@ -11,7 +11,7 @@ import math
|
||||
import json
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
import salt.utils.data
|
||||
import salt.utils.dictupdate
|
||||
from salt.defaults import DEFAULT_TARGET_DELIM
|
||||
from salt.exceptions import SaltException
|
||||
@ -75,10 +75,11 @@ def get(key, default=u'', delimiter=DEFAULT_TARGET_DELIM, ordered=True):
|
||||
grains = __grains__
|
||||
else:
|
||||
grains = json.loads(json.dumps(__grains__))
|
||||
return salt.utils.traverse_dict_and_list(__grains__,
|
||||
key,
|
||||
default,
|
||||
delimiter)
|
||||
return salt.utils.data.traverse_dict_and_list(
|
||||
__grains__,
|
||||
key,
|
||||
default,
|
||||
delimiter)
|
||||
|
||||
|
||||
def has_value(key):
|
||||
@ -99,7 +100,9 @@ def has_value(key):
|
||||
|
||||
salt '*' grains.has_value pkg:apache
|
||||
'''
|
||||
return True if salt.utils.traverse_dict_and_list(__grains__, key, False) else False
|
||||
return True \
|
||||
if salt.utils.data.traverse_dict_and_list(__grains__, key, False) \
|
||||
else False
|
||||
|
||||
|
||||
def items(sanitize=False):
|
||||
@ -118,7 +121,7 @@ def items(sanitize=False):
|
||||
|
||||
salt '*' grains.items sanitize=True
|
||||
'''
|
||||
if salt.utils.is_true(sanitize):
|
||||
if salt.utils.data.is_true(sanitize):
|
||||
out = dict(__grains__)
|
||||
for key, func in six.iteritems(_SANITIZERS):
|
||||
if key in out:
|
||||
@ -151,7 +154,7 @@ def item(*args, **kwargs):
|
||||
ret[arg] = __grains__[arg]
|
||||
except KeyError:
|
||||
pass
|
||||
if salt.utils.is_true(kwargs.get(u'sanitize')):
|
||||
if salt.utils.data.is_true(kwargs.get(u'sanitize')):
|
||||
for arg, func in six.iteritems(_SANITIZERS):
|
||||
if arg in ret:
|
||||
ret[arg] = func(ret[arg])
|
||||
|
@ -9,7 +9,8 @@ import collections
|
||||
|
||||
# Import salt libs
|
||||
import salt.pillar
|
||||
import salt.utils
|
||||
import salt.utils.data
|
||||
import salt.utils.dictupdate
|
||||
from salt.defaults import DEFAULT_TARGET_DELIM
|
||||
|
||||
|
||||
@ -51,15 +52,16 @@ def get(key, default=u'', merge=False, delimiter=DEFAULT_TARGET_DELIM):
|
||||
salt '*' pillar.get pkg:apache
|
||||
'''
|
||||
if merge:
|
||||
ret = salt.utils.traverse_dict_and_list(__pillar__, key, {}, delimiter)
|
||||
ret = salt.utils.data.traverse_dict_and_list(__pillar__, key, {}, delimiter)
|
||||
if isinstance(ret, collections.Mapping) and \
|
||||
isinstance(default, collections.Mapping):
|
||||
return salt.utils.dictupdate.update(default, ret)
|
||||
|
||||
return salt.utils.traverse_dict_and_list(__pillar__,
|
||||
key,
|
||||
default,
|
||||
delimiter)
|
||||
return salt.utils.data.traverse_dict_and_list(
|
||||
__pillar__,
|
||||
key,
|
||||
default,
|
||||
delimiter)
|
||||
|
||||
|
||||
def item(*args):
|
||||
@ -126,7 +128,7 @@ def keys(key, delimiter=DEFAULT_TARGET_DELIM):
|
||||
|
||||
salt '*' pillar.keys web:sites
|
||||
'''
|
||||
ret = salt.utils.traverse_dict_and_list(
|
||||
ret = salt.utils.data.traverse_dict_and_list(
|
||||
__pillar__, key, KeyError, delimiter)
|
||||
|
||||
if ret is KeyError:
|
||||
|
@ -13,7 +13,9 @@ import logging
|
||||
# Import salt libs
|
||||
import salt.client.ssh.shell
|
||||
import salt.client.ssh.state
|
||||
import salt.utils
|
||||
import salt.utils.args
|
||||
import salt.utils.data
|
||||
import salt.utils.hashutils
|
||||
import salt.utils.thin
|
||||
import salt.roster
|
||||
import salt.state
|
||||
@ -100,7 +102,7 @@ def sls(mods, saltenv=u'base', test=None, exclude=None, **kwargs):
|
||||
__pillar__,
|
||||
st_kwargs[u'id_'],
|
||||
roster_grains)
|
||||
trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__[u'hash_type'])
|
||||
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, __opts__[u'hash_type'])
|
||||
cmd = u'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
|
||||
__opts__[u'thin_dir'],
|
||||
test,
|
||||
@ -125,7 +127,7 @@ def sls(mods, saltenv=u'base', test=None, exclude=None, **kwargs):
|
||||
|
||||
# Read in the JSON data and return the data structure
|
||||
try:
|
||||
return json.loads(stdout, object_hook=salt.utils.decode_dict)
|
||||
return json.loads(stdout, object_hook=salt.utils.data.decode_dict)
|
||||
except Exception as e:
|
||||
log.error(u"JSON Render failed for: %s\n%s", stdout, stderr)
|
||||
log.error(str(e))
|
||||
@ -177,7 +179,7 @@ def low(data, **kwargs):
|
||||
__pillar__,
|
||||
st_kwargs[u'id_'],
|
||||
roster_grains)
|
||||
trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__[u'hash_type'])
|
||||
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, __opts__[u'hash_type'])
|
||||
cmd = u'state.pkg {0}/salt_state.tgz pkg_sum={1} hash_type={2}'.format(
|
||||
__opts__[u'thin_dir'],
|
||||
trans_tar_sum,
|
||||
@ -201,7 +203,7 @@ def low(data, **kwargs):
|
||||
|
||||
# Read in the JSON data and return the data structure
|
||||
try:
|
||||
return json.loads(stdout, object_hook=salt.utils.decode_dict)
|
||||
return json.loads(stdout, object_hook=salt.utils.data.decode_dict)
|
||||
except Exception as e:
|
||||
log.error(u"JSON Render failed for: %s\n%s", stdout, stderr)
|
||||
log.error(str(e))
|
||||
@ -250,7 +252,7 @@ def high(data, **kwargs):
|
||||
__pillar__,
|
||||
st_kwargs[u'id_'],
|
||||
roster_grains)
|
||||
trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__[u'hash_type'])
|
||||
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, __opts__[u'hash_type'])
|
||||
cmd = u'state.pkg {0}/salt_state.tgz pkg_sum={1} hash_type={2}'.format(
|
||||
__opts__[u'thin_dir'],
|
||||
trans_tar_sum,
|
||||
@ -274,7 +276,7 @@ def high(data, **kwargs):
|
||||
|
||||
# Read in the JSON data and return the data structure
|
||||
try:
|
||||
return json.loads(stdout, object_hook=salt.utils.decode_dict)
|
||||
return json.loads(stdout, object_hook=salt.utils.data.decode_dict)
|
||||
except Exception as e:
|
||||
log.error(u"JSON Render failed for: %s\n%s", stdout, stderr)
|
||||
log.error(str(e))
|
||||
@ -353,7 +355,7 @@ def highstate(test=None, **kwargs):
|
||||
__pillar__,
|
||||
st_kwargs[u'id_'],
|
||||
roster_grains)
|
||||
trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__[u'hash_type'])
|
||||
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, __opts__[u'hash_type'])
|
||||
cmd = u'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
|
||||
__opts__[u'thin_dir'],
|
||||
test,
|
||||
@ -378,7 +380,7 @@ def highstate(test=None, **kwargs):
|
||||
|
||||
# Read in the JSON data and return the data structure
|
||||
try:
|
||||
return json.loads(stdout, object_hook=salt.utils.decode_dict)
|
||||
return json.loads(stdout, object_hook=salt.utils.data.decode_dict)
|
||||
except Exception as e:
|
||||
log.error(u"JSON Render failed for: %s\n%s", stdout, stderr)
|
||||
log.error(str(e))
|
||||
@ -402,7 +404,7 @@ def top(topfn, test=None, **kwargs):
|
||||
__pillar__.update(kwargs.get(u'pillar', {}))
|
||||
st_kwargs = __salt__.kwargs
|
||||
__opts__[u'grains'] = __grains__
|
||||
if salt.utils.test_mode(test=test, **kwargs):
|
||||
if salt.utils.args.test_mode(test=test, **kwargs):
|
||||
__opts__[u'test'] = True
|
||||
else:
|
||||
__opts__[u'test'] = __opts__.get(u'test', None)
|
||||
@ -433,7 +435,7 @@ def top(topfn, test=None, **kwargs):
|
||||
__pillar__,
|
||||
st_kwargs[u'id_'],
|
||||
roster_grains)
|
||||
trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__[u'hash_type'])
|
||||
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, __opts__[u'hash_type'])
|
||||
cmd = u'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
|
||||
__opts__[u'thin_dir'],
|
||||
test,
|
||||
@ -458,7 +460,7 @@ def top(topfn, test=None, **kwargs):
|
||||
|
||||
# Read in the JSON data and return the data structure
|
||||
try:
|
||||
return json.loads(stdout, object_hook=salt.utils.decode_dict)
|
||||
return json.loads(stdout, object_hook=salt.utils.data.decode_dict)
|
||||
except Exception as e:
|
||||
log.error(u"JSON Render failed for: %s\n%s", stdout, stderr)
|
||||
log.error(str(e))
|
||||
@ -519,7 +521,7 @@ def show_sls(mods, saltenv=u'base', test=None, **kwargs):
|
||||
__pillar__.update(kwargs.get(u'pillar', {}))
|
||||
__opts__[u'grains'] = __grains__
|
||||
opts = copy.copy(__opts__)
|
||||
if salt.utils.test_mode(test=test, **kwargs):
|
||||
if salt.utils.args.test_mode(test=test, **kwargs):
|
||||
opts[u'test'] = True
|
||||
else:
|
||||
opts[u'test'] = __opts__.get(u'test', None)
|
||||
@ -562,7 +564,7 @@ def show_low_sls(mods, saltenv=u'base', test=None, **kwargs):
|
||||
__opts__[u'grains'] = __grains__
|
||||
|
||||
opts = copy.copy(__opts__)
|
||||
if salt.utils.test_mode(test=test, **kwargs):
|
||||
if salt.utils.args.test_mode(test=test, **kwargs):
|
||||
opts[u'test'] = True
|
||||
else:
|
||||
opts[u'test'] = __opts__.get(u'test', None)
|
||||
@ -651,7 +653,7 @@ def single(fun, name, test=None, **kwargs):
|
||||
opts = copy.deepcopy(__opts__)
|
||||
|
||||
# Set test mode
|
||||
if salt.utils.test_mode(test=test, **kwargs):
|
||||
if salt.utils.args.test_mode(test=test, **kwargs):
|
||||
opts[u'test'] = True
|
||||
else:
|
||||
opts[u'test'] = __opts__.get(u'test', None)
|
||||
@ -695,7 +697,7 @@ def single(fun, name, test=None, **kwargs):
|
||||
roster_grains)
|
||||
|
||||
# Create a hash so we can verify the tar on the target system
|
||||
trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__[u'hash_type'])
|
||||
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, __opts__[u'hash_type'])
|
||||
|
||||
# We use state.pkg to execute the "state package"
|
||||
cmd = u'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
|
||||
@ -728,7 +730,7 @@ def single(fun, name, test=None, **kwargs):
|
||||
|
||||
# Read in the JSON data and return the data structure
|
||||
try:
|
||||
return json.loads(stdout, object_hook=salt.utils.decode_dict)
|
||||
return json.loads(stdout, object_hook=salt.utils.data.decode_dict)
|
||||
except Exception as e:
|
||||
log.error(u"JSON Render failed for: %s\n%s", stdout, stderr)
|
||||
log.error(str(e))
|
||||
|
@ -29,10 +29,11 @@ from salt.exceptions import (
|
||||
import salt.config
|
||||
import salt.client
|
||||
import salt.loader
|
||||
import salt.utils
|
||||
import salt.utils.args
|
||||
import salt.utils.cloud
|
||||
import salt.utils.context
|
||||
import salt.utils.crypt
|
||||
import salt.utils.data
|
||||
import salt.utils.dictupdate
|
||||
import salt.utils.files
|
||||
import salt.syspaths
|
||||
@ -233,7 +234,7 @@ class CloudClient(object):
|
||||
if a.get('provider', '')]
|
||||
if providers:
|
||||
_providers = opts.get('providers', {})
|
||||
for provider in list(_providers):
|
||||
for provider in list(_providers).copy():
|
||||
if provider not in providers:
|
||||
_providers.pop(provider)
|
||||
return opts
|
||||
@ -243,7 +244,7 @@ class CloudClient(object):
|
||||
Pass the cloud function and low data structure to run
|
||||
'''
|
||||
l_fun = getattr(self, fun)
|
||||
f_call = salt.utils.format_call(l_fun, low)
|
||||
f_call = salt.utils.args.format_call(l_fun, low)
|
||||
return l_fun(*f_call.get('args', ()), **f_call.get('kwargs', {}))
|
||||
|
||||
def list_sizes(self, provider=None):
|
||||
@ -251,7 +252,7 @@ class CloudClient(object):
|
||||
List all available sizes in configured cloud systems
|
||||
'''
|
||||
mapper = salt.cloud.Map(self._opts_defaults())
|
||||
return salt.utils.simple_types_filter(
|
||||
return salt.utils.data.simple_types_filter(
|
||||
mapper.size_list(provider)
|
||||
)
|
||||
|
||||
@ -260,7 +261,7 @@ class CloudClient(object):
|
||||
List all available images in configured cloud systems
|
||||
'''
|
||||
mapper = salt.cloud.Map(self._opts_defaults())
|
||||
return salt.utils.simple_types_filter(
|
||||
return salt.utils.data.simple_types_filter(
|
||||
mapper.image_list(provider)
|
||||
)
|
||||
|
||||
@ -269,7 +270,7 @@ class CloudClient(object):
|
||||
List all available locations in configured cloud systems
|
||||
'''
|
||||
mapper = salt.cloud.Map(self._opts_defaults())
|
||||
return salt.utils.simple_types_filter(
|
||||
return salt.utils.data.simple_types_filter(
|
||||
mapper.location_list(provider)
|
||||
)
|
||||
|
||||
@ -343,7 +344,7 @@ class CloudClient(object):
|
||||
mapper = salt.cloud.Map(self._opts_defaults(**kwargs))
|
||||
if isinstance(names, six.string_types):
|
||||
names = names.split(',')
|
||||
return salt.utils.simple_types_filter(
|
||||
return salt.utils.data.simple_types_filter(
|
||||
mapper.run_profile(profile, names, vm_overrides=vm_overrides)
|
||||
)
|
||||
|
||||
@ -357,7 +358,7 @@ class CloudClient(object):
|
||||
kwarg.update(kwargs)
|
||||
mapper = salt.cloud.Map(self._opts_defaults(**kwarg))
|
||||
dmap = mapper.map_data()
|
||||
return salt.utils.simple_types_filter(
|
||||
return salt.utils.data.simple_types_filter(
|
||||
mapper.run_map(dmap)
|
||||
)
|
||||
|
||||
@ -368,7 +369,7 @@ class CloudClient(object):
|
||||
mapper = salt.cloud.Map(self._opts_defaults(destroy=True))
|
||||
if isinstance(names, six.string_types):
|
||||
names = names.split(',')
|
||||
return salt.utils.simple_types_filter(
|
||||
return salt.utils.data.simple_types_filter(
|
||||
mapper.destroy(names)
|
||||
)
|
||||
|
||||
@ -407,7 +408,7 @@ class CloudClient(object):
|
||||
vm_['profile'] = None
|
||||
vm_['provider'] = provider
|
||||
|
||||
ret[name] = salt.utils.simple_types_filter(
|
||||
ret[name] = salt.utils.data.simple_types_filter(
|
||||
mapper.create(vm_))
|
||||
return ret
|
||||
|
||||
@ -442,7 +443,7 @@ class CloudClient(object):
|
||||
extra_['provider'] = provider
|
||||
extra_['profile'] = None
|
||||
extra_['action'] = action
|
||||
ret[name] = salt.utils.simple_types_filter(
|
||||
ret[name] = salt.utils.data.simple_types_filter(
|
||||
mapper.extras(extra_)
|
||||
)
|
||||
return ret
|
||||
@ -1416,7 +1417,7 @@ class Cloud(object):
|
||||
if name in vms:
|
||||
prov = vms[name]['provider']
|
||||
driv = vms[name]['driver']
|
||||
msg = six.u('{0} already exists under {1}:{2}').format(
|
||||
msg = u'{0} already exists under {1}:{2}'.format(
|
||||
name, prov, driv
|
||||
)
|
||||
log.error(msg)
|
||||
@ -2098,7 +2099,7 @@ class Map(Cloud):
|
||||
master_temp_pub = salt.utils.files.mkstemp()
|
||||
with salt.utils.files.fopen(master_temp_pub, 'w') as mtp:
|
||||
mtp.write(pub)
|
||||
master_finger = salt.utils.pem_finger(master_temp_pub, sum_type=self.opts['hash_type'])
|
||||
master_finger = salt.utils.crypt.pem_finger(master_temp_pub, sum_type=self.opts['hash_type'])
|
||||
os.unlink(master_temp_pub)
|
||||
|
||||
if master_profile.get('make_minion', True) is True:
|
||||
@ -2183,7 +2184,7 @@ class Map(Cloud):
|
||||
# mitigate man-in-the-middle attacks
|
||||
master_pub = os.path.join(self.opts['pki_dir'], 'master.pub')
|
||||
if os.path.isfile(master_pub):
|
||||
master_finger = salt.utils.pem_finger(master_pub, sum_type=self.opts['hash_type'])
|
||||
master_finger = salt.utils.crypt.pem_finger(master_pub, sum_type=self.opts['hash_type'])
|
||||
|
||||
opts = self.opts.copy()
|
||||
if self.opts['parallel']:
|
||||
@ -2300,7 +2301,7 @@ def create_multiprocessing(parallel_data, queue=None):
|
||||
This function will be called from another process when running a map in
|
||||
parallel mode. The result from the create is always a json object.
|
||||
'''
|
||||
salt.utils.reinit_crypto()
|
||||
salt.utils.crypt.reinit_crypto()
|
||||
|
||||
parallel_data['opts']['output'] = 'json'
|
||||
cloud = Cloud(parallel_data['opts'])
|
||||
@ -2323,7 +2324,7 @@ def create_multiprocessing(parallel_data, queue=None):
|
||||
output.pop('deploy_kwargs', None)
|
||||
|
||||
return {
|
||||
parallel_data['name']: salt.utils.simple_types_filter(output)
|
||||
parallel_data['name']: salt.utils.data.simple_types_filter(output)
|
||||
}
|
||||
|
||||
|
||||
@ -2332,7 +2333,7 @@ def destroy_multiprocessing(parallel_data, queue=None):
|
||||
This function will be called from another process when running a map in
|
||||
parallel mode. The result from the destroy is always a json object.
|
||||
'''
|
||||
salt.utils.reinit_crypto()
|
||||
salt.utils.crypt.reinit_crypto()
|
||||
|
||||
parallel_data['opts']['output'] = 'json'
|
||||
clouds = salt.loader.clouds(parallel_data['opts'])
|
||||
@ -2359,7 +2360,7 @@ def destroy_multiprocessing(parallel_data, queue=None):
|
||||
return {parallel_data['name']: {'Error': str(exc)}}
|
||||
|
||||
return {
|
||||
parallel_data['name']: salt.utils.simple_types_filter(output)
|
||||
parallel_data['name']: salt.utils.data.simple_types_filter(output)
|
||||
}
|
||||
|
||||
|
||||
@ -2368,7 +2369,7 @@ def run_parallel_map_providers_query(data, queue=None):
|
||||
This function will be called from another process when building the
|
||||
providers map.
|
||||
'''
|
||||
salt.utils.reinit_crypto()
|
||||
salt.utils.crypt.reinit_crypto()
|
||||
|
||||
cloud = Cloud(data['opts'])
|
||||
try:
|
||||
@ -2382,7 +2383,7 @@ def run_parallel_map_providers_query(data, queue=None):
|
||||
return (
|
||||
data['alias'],
|
||||
data['driver'],
|
||||
salt.utils.simple_types_filter(
|
||||
salt.utils.data.simple_types_filter(
|
||||
cloud.clouds[data['fun']]()
|
||||
)
|
||||
)
|
||||
|
@ -21,13 +21,13 @@ from salt.ext.six.moves import input
|
||||
|
||||
# Import salt libs
|
||||
import salt.cloud
|
||||
import salt.utils.cloud
|
||||
import salt.config
|
||||
import salt.defaults.exitcodes
|
||||
import salt.output
|
||||
import salt.syspaths as syspaths
|
||||
import salt.utils
|
||||
import salt.utils.cloud
|
||||
import salt.utils.parsers
|
||||
import salt.utils.user
|
||||
from salt.exceptions import SaltCloudException, SaltCloudSystemExit
|
||||
from salt.utils.verify import check_user, verify_env, verify_files, verify_log
|
||||
|
||||
@ -48,7 +48,7 @@ class SaltCloud(salt.utils.parsers.SaltCloudParser):
|
||||
|
||||
salt_master_user = self.config.get('user')
|
||||
if salt_master_user is None:
|
||||
salt_master_user = salt.utils.get_user()
|
||||
salt_master_user = salt.utils.user.get_user()
|
||||
|
||||
if not check_user(salt_master_user):
|
||||
self.error(
|
||||
|
@ -42,6 +42,7 @@ from salt.ext.six.moves.urllib.parse import quote as _quote # pylint: disable=i
|
||||
|
||||
# Import salt cloud libs
|
||||
import salt.utils.cloud
|
||||
import salt.utils.data
|
||||
import salt.config as config
|
||||
from salt.exceptions import (
|
||||
SaltCloudNotFound,
|
||||
@ -823,7 +824,7 @@ def query(params=None):
|
||||
|
||||
content = request.text
|
||||
|
||||
result = json.loads(content, object_hook=salt.utils.decode_dict)
|
||||
result = json.loads(content, object_hook=salt.utils.data.decode_dict)
|
||||
if 'Code' in result:
|
||||
raise SaltCloudSystemExit(
|
||||
pprint.pformat(result.get('Message', {}))
|
||||
|
@ -10,6 +10,7 @@ The Azure cloud module is used to control access to Microsoft Azure
|
||||
:depends:
|
||||
* `Microsoft Azure SDK for Python <https://pypi.python.org/pypi/azure>`_ >= 2.0rc5
|
||||
* `Microsoft Azure Storage SDK for Python <https://pypi.python.org/pypi/azure-storage>`_ >= 0.32
|
||||
* `Microsoft Azure CLI <https://pypi.python.org/pypi/azure-cli>` >= 2.0.12
|
||||
:configuration:
|
||||
Required provider parameters:
|
||||
|
||||
@ -62,8 +63,8 @@ import yaml
|
||||
import collections
|
||||
import salt.cache
|
||||
import salt.config as config
|
||||
import salt.utils
|
||||
import salt.utils.cloud
|
||||
import salt.utils.data
|
||||
import salt.utils.files
|
||||
from salt.utils.versions import LooseVersion
|
||||
from salt.ext import six
|
||||
@ -146,7 +147,13 @@ def __virtual__():
|
||||
return False
|
||||
|
||||
if get_dependencies() is False:
|
||||
return False
|
||||
return (
|
||||
False,
|
||||
'The following dependencies are required to use the AzureARM driver: '
|
||||
'Microsoft Azure SDK for Python >= 2.0rc5, '
|
||||
'Microsoft Azure Storage SDK for Python >= 0.32, '
|
||||
'Microsoft Azure CLI >= 2.0.12'
|
||||
)
|
||||
|
||||
global cache # pylint: disable=global-statement,invalid-name
|
||||
cache = salt.cache.Cache(__opts__)
|
||||
@ -401,8 +408,9 @@ def list_nodes(conn=None, call=None): # pylint: disable=unused-argument
|
||||
pass
|
||||
|
||||
for node in nodes:
|
||||
if not nodes[node]['resource_group'] == active_resource_group:
|
||||
continue
|
||||
if active_resource_group is not None:
|
||||
if nodes[node]['resource_group'] != active_resource_group:
|
||||
continue
|
||||
ret[node] = {'name': node}
|
||||
for prop in ('id', 'image', 'size', 'state', 'private_ips', 'public_ips'):
|
||||
ret[node][prop] = nodes[node].get(prop)
|
||||
@ -567,7 +575,7 @@ def show_instance(name, resource_group=None, call=None): # pylint: disable=unus
|
||||
data['resource_group'] = resource_group
|
||||
|
||||
__utils__['cloud.cache_node'](
|
||||
salt.utils.simple_types_filter(data),
|
||||
salt.utils.data.simple_types_filter(data),
|
||||
__active_provider_name__,
|
||||
__opts__
|
||||
)
|
||||
@ -659,11 +667,12 @@ def list_subnets(call=None, kwargs=None): # pylint: disable=unused-argument
|
||||
for subnet in subnets:
|
||||
ret[subnet.name] = make_safe(subnet)
|
||||
ret[subnet.name]['ip_configurations'] = {}
|
||||
for ip_ in subnet.ip_configurations:
|
||||
comps = ip_.id.split('/')
|
||||
name = comps[-1]
|
||||
ret[subnet.name]['ip_configurations'][name] = make_safe(ip_)
|
||||
ret[subnet.name]['ip_configurations'][name]['subnet'] = subnet.name
|
||||
if subnet.ip_configurations:
|
||||
for ip_ in subnet.ip_configurations:
|
||||
comps = ip_.id.split('/')
|
||||
name = comps[-1]
|
||||
ret[subnet.name]['ip_configurations'][name] = make_safe(ip_)
|
||||
ret[subnet.name]['ip_configurations'][name]['subnet'] = subnet.name
|
||||
ret[subnet.name]['resource_group'] = kwargs['resource_group']
|
||||
return ret
|
||||
|
||||
@ -1451,7 +1460,7 @@ def make_safe(data):
|
||||
'''
|
||||
Turn object data into something serializable
|
||||
'''
|
||||
return salt.utils.simple_types_filter(object_to_dict(data))
|
||||
return salt.utils.data.simple_types_filter(object_to_dict(data))
|
||||
|
||||
|
||||
def create_security_group(call=None, kwargs=None): # pylint: disable=unused-argument
|
||||
|
371
salt/cloud/clouds/clc.py
Normal file
371
salt/cloud/clouds/clc.py
Normal file
@ -0,0 +1,371 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
CenturyLink Cloud Module
|
||||
===================
|
||||
|
||||
.. versionadded:: 0yxgen
|
||||
|
||||
The CLC cloud module allows you to manage CLC Via the CLC SDK.
|
||||
|
||||
:codeauthor: Stephan Looney <slooney@stephanlooney.com>
|
||||
|
||||
|
||||
Dependencies
|
||||
============
|
||||
|
||||
- clc-sdk Python Module
|
||||
- flask
|
||||
|
||||
CLC SDK
|
||||
-------
|
||||
|
||||
clc-sdk can be installed via pip:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install clc-sdk
|
||||
|
||||
.. note::
|
||||
For sdk reference see: https://github.com/CenturyLinkCloud/clc-python-sdk
|
||||
|
||||
Flask
|
||||
-------
|
||||
flask can be installed via pip:
|
||||
.. code-block:: bash
|
||||
pip install flask
|
||||
|
||||
Configuration
|
||||
=============
|
||||
|
||||
To use this module: set up the clc-sdk, user, password, key in the
|
||||
cloud configuration at
|
||||
``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/clc.conf``:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
my-clc-config:
|
||||
driver: clc
|
||||
user: 'web-user'
|
||||
password: 'verybadpass'
|
||||
token: ''
|
||||
token_pass:''
|
||||
accountalias: 'ACT'
|
||||
.. note::
|
||||
|
||||
The ``provider`` parameter in cloud provider configuration was renamed to ``driver``.
|
||||
This change was made to avoid confusion with the ``provider`` parameter that is
|
||||
used in cloud profile configuration. Cloud provider configuration now uses ``driver``
|
||||
to refer to the salt-cloud driver that provides the underlying functionality to
|
||||
connect to a cloud provider, while cloud profile configuration continues to use
|
||||
``provider`` to refer to the cloud provider configuration that you define.
|
||||
|
||||
'''
|
||||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
import time
|
||||
import json
|
||||
# Import salt libs
|
||||
import importlib
|
||||
from salt.exceptions import SaltCloudSystemExit
|
||||
# Import salt cloud libs
|
||||
import salt.config as config
|
||||
|
||||
# Get logging started
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# Attempt to import clc-sdk lib
|
||||
try:
|
||||
#when running this in linode's Ubuntu 16.x version the following line is required to get the clc sdk libraries to load
|
||||
importlib.import_module('clc')
|
||||
import clc
|
||||
HAS_CLC = True
|
||||
except ImportError:
|
||||
HAS_CLC = False
|
||||
# Disable InsecureRequestWarning generated on python > 2.6
|
||||
try:
|
||||
from requests.packages.urllib3 import disable_warnings # pylint: disable=no-name-in-module
|
||||
disable_warnings()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
__virtualname__ = 'clc'
|
||||
|
||||
|
||||
# Only load in this module if the CLC configurations are in place
|
||||
def __virtual__():
|
||||
'''
|
||||
Check for CLC configuration and if required libs are available.
|
||||
'''
|
||||
if get_configured_provider() is False or get_dependencies() is False:
|
||||
return False
|
||||
|
||||
return __virtualname__
|
||||
|
||||
|
||||
def get_configured_provider():
|
||||
return config.is_provider_configured(
|
||||
__opts__,
|
||||
__active_provider_name__ or __virtualname__,
|
||||
('token', 'token_pass', 'user', 'password', )
|
||||
)
|
||||
|
||||
|
||||
def get_dependencies():
|
||||
'''
|
||||
Warn if dependencies aren't met.
|
||||
'''
|
||||
deps = {
|
||||
'clc': HAS_CLC,
|
||||
}
|
||||
return config.check_driver_dependencies(
|
||||
__virtualname__,
|
||||
deps
|
||||
)
|
||||
|
||||
|
||||
def get_creds():
|
||||
user = config.get_cloud_config_value(
|
||||
'user', get_configured_provider(), __opts__, search_global=False
|
||||
)
|
||||
password = config.get_cloud_config_value(
|
||||
'password', get_configured_provider(), __opts__, search_global=False
|
||||
)
|
||||
accountalias = config.get_cloud_config_value(
|
||||
'accountalias', get_configured_provider(), __opts__, search_global=False
|
||||
)
|
||||
token = config.get_cloud_config_value(
|
||||
'token', get_configured_provider(), __opts__, search_global=False
|
||||
)
|
||||
token_pass = config.get_cloud_config_value(
|
||||
'token_pass', get_configured_provider(), __opts__, search_global=False
|
||||
)
|
||||
creds = {'user': user, 'password': password, 'token': token, 'token_pass': token_pass, 'accountalias': accountalias}
|
||||
return creds
|
||||
|
||||
|
||||
def list_nodes_full(call=None, for_output=True):
|
||||
'''
|
||||
Return a list of the VMs that are on the provider
|
||||
'''
|
||||
if call == 'action':
|
||||
raise SaltCloudSystemExit(
|
||||
'The list_nodes_full function must be called with -f or --function.'
|
||||
)
|
||||
creds = get_creds()
|
||||
clc.v1.SetCredentials(creds["token"], creds["token_pass"])
|
||||
servers_raw = clc.v1.Server.GetServers(location=None)
|
||||
servers_raw = json.dumps(servers_raw)
|
||||
servers = json.loads(servers_raw)
|
||||
return servers
|
||||
|
||||
|
||||
def get_queue_data(call=None, for_output=True):
|
||||
creds = get_creds()
|
||||
clc.v1.SetCredentials(creds["token"], creds["token_pass"])
|
||||
cl_queue = clc.v1.Queue.List()
|
||||
return cl_queue
|
||||
|
||||
|
||||
def get_monthly_estimate(call=None, for_output=True):
|
||||
'''
|
||||
Return a list of the VMs that are on the provider
|
||||
'''
|
||||
creds = get_creds()
|
||||
clc.v1.SetCredentials(creds["token"], creds["token_pass"])
|
||||
if call == 'action':
|
||||
raise SaltCloudSystemExit(
|
||||
'The list_nodes_full function must be called with -f or --function.'
|
||||
)
|
||||
try:
|
||||
billing_raw = clc.v1.Billing.GetAccountSummary(alias=creds["accountalias"])
|
||||
billing_raw = json.dumps(billing_raw)
|
||||
billing = json.loads(billing_raw)
|
||||
billing = round(billing["MonthlyEstimate"], 2)
|
||||
return {"Monthly Estimate": billing}
|
||||
except RuntimeError:
|
||||
return {"Monthly Estimate": 0}
|
||||
|
||||
|
||||
def get_month_to_date(call=None, for_output=True):
|
||||
'''
|
||||
Return a list of the VMs that are on the provider
|
||||
'''
|
||||
creds = get_creds()
|
||||
clc.v1.SetCredentials(creds["token"], creds["token_pass"])
|
||||
if call == 'action':
|
||||
raise SaltCloudSystemExit(
|
||||
'The list_nodes_full function must be called with -f or --function.'
|
||||
)
|
||||
try:
|
||||
billing_raw = clc.v1.Billing.GetAccountSummary(alias=creds["accountalias"])
|
||||
billing_raw = json.dumps(billing_raw)
|
||||
billing = json.loads(billing_raw)
|
||||
billing = round(billing["MonthToDateTotal"], 2)
|
||||
return {"Month To Date": billing}
|
||||
except RuntimeError:
|
||||
return 0
|
||||
|
||||
|
||||
def get_server_alerts(call=None, for_output=True, **kwargs):
|
||||
'''
|
||||
Return a list of alerts from CLC as reported by their infra
|
||||
'''
|
||||
for key, value in kwargs.items():
|
||||
servername = ""
|
||||
if key == "servername":
|
||||
servername = value
|
||||
creds = get_creds()
|
||||
clc.v2.SetCredentials(creds["user"], creds["password"])
|
||||
alerts = clc.v2.Server(servername).Alerts()
|
||||
return alerts
|
||||
|
||||
|
||||
def get_group_estimate(call=None, for_output=True, **kwargs):
|
||||
'''
|
||||
Return a list of the VMs that are on the provider
|
||||
usage: "salt-cloud -f get_group_estimate clc group=Dev location=VA1"
|
||||
'''
|
||||
for key, value in kwargs.items():
|
||||
group = ""
|
||||
location = ""
|
||||
if key == "group":
|
||||
group = value
|
||||
if key == "location":
|
||||
location = value
|
||||
creds = get_creds()
|
||||
clc.v1.SetCredentials(creds["token"], creds["token_pass"])
|
||||
if call == 'action':
|
||||
raise SaltCloudSystemExit(
|
||||
'The list_nodes_full function must be called with -f or --function.'
|
||||
)
|
||||
try:
|
||||
billing_raw = clc.v1.Billing.GetGroupEstimate(group=group, alias=creds["accountalias"], location=location)
|
||||
billing_raw = json.dumps(billing_raw)
|
||||
billing = json.loads(billing_raw)
|
||||
estimate = round(billing["MonthlyEstimate"], 2)
|
||||
month_to_date = round(billing["MonthToDate"], 2)
|
||||
return {"Monthly Estimate": estimate, "Month to Date": month_to_date}
|
||||
except RuntimeError:
|
||||
return 0
|
||||
|
||||
|
||||
def avail_images(call=None):
|
||||
'''
|
||||
returns a list of images available to you
|
||||
'''
|
||||
all_servers = list_nodes_full()
|
||||
templates = {}
|
||||
for server in all_servers:
|
||||
if server["IsTemplate"]:
|
||||
templates.update({"Template Name": server["Name"]})
|
||||
return templates
|
||||
|
||||
|
||||
def avail_locations(call=None):
|
||||
'''
|
||||
returns a list of locations available to you
|
||||
'''
|
||||
creds = get_creds()
|
||||
clc.v1.SetCredentials(creds["token"], creds["token_pass"])
|
||||
locations = clc.v1.Account.GetLocations()
|
||||
return locations
|
||||
|
||||
|
||||
def avail_sizes(call=None):
|
||||
'''
|
||||
use templates for this
|
||||
'''
|
||||
return {"Sizes": "Sizes are built into templates. Choose appropriate template"}
|
||||
|
||||
|
||||
def get_build_status(req_id, nodename):
|
||||
'''
|
||||
get the build status from CLC to make sure we dont return to early
|
||||
'''
|
||||
counter = 0
|
||||
req_id = str(req_id)
|
||||
while counter < 10:
|
||||
queue = clc.v1.Blueprint.GetStatus(request_id=(req_id))
|
||||
if queue["PercentComplete"] == 100:
|
||||
server_name = queue["Servers"][0]
|
||||
creds = get_creds()
|
||||
clc.v2.SetCredentials(creds["user"], creds["password"])
|
||||
ip_addresses = clc.v2.Server(server_name).ip_addresses
|
||||
internal_ip_address = ip_addresses[0]["internal"]
|
||||
return internal_ip_address
|
||||
else:
|
||||
counter = counter + 1
|
||||
log.info("Creating Cloud VM " + nodename + " Time out in " + str(10 - counter) + " minutes")
|
||||
time.sleep(60)
|
||||
|
||||
|
||||
def create(vm_):
|
||||
'''
|
||||
get the system build going
|
||||
'''
|
||||
creds = get_creds()
|
||||
clc.v1.SetCredentials(creds["token"], creds["token_pass"])
|
||||
cloud_profile = config.is_provider_configured(
|
||||
__opts__,
|
||||
__active_provider_name__ or __virtualname__,
|
||||
('token',)
|
||||
)
|
||||
group = config.get_cloud_config_value(
|
||||
'group', vm_, __opts__, search_global=False, default=None,
|
||||
)
|
||||
name = vm_['name']
|
||||
description = config.get_cloud_config_value(
|
||||
'description', vm_, __opts__, search_global=False, default=None,
|
||||
)
|
||||
ram = config.get_cloud_config_value(
|
||||
'ram', vm_, __opts__, search_global=False, default=None,
|
||||
)
|
||||
backup_level = config.get_cloud_config_value(
|
||||
'backup_level', vm_, __opts__, search_global=False, default=None,
|
||||
)
|
||||
template = config.get_cloud_config_value(
|
||||
'template', vm_, __opts__, search_global=False, default=None,
|
||||
)
|
||||
password = config.get_cloud_config_value(
|
||||
'password', vm_, __opts__, search_global=False, default=None,
|
||||
)
|
||||
cpu = config.get_cloud_config_value(
|
||||
'cpu', vm_, __opts__, search_global=False, default=None,
|
||||
)
|
||||
network = config.get_cloud_config_value(
|
||||
'network', vm_, __opts__, search_global=False, default=None,
|
||||
)
|
||||
location = config.get_cloud_config_value(
|
||||
'location', vm_, __opts__, search_global=False, default=None,
|
||||
)
|
||||
if len(name) > 6:
|
||||
name = name[0:6]
|
||||
if len(password) < 9:
|
||||
password = ''
|
||||
clc_return = clc.v1.Server.Create(alias=None, location=(location), name=(name), template=(template), cpu=(cpu), ram=(ram), backup_level=(backup_level), group=(group), network=(network), description=(description), password=(password))
|
||||
req_id = clc_return["RequestID"]
|
||||
vm_['ssh_host'] = get_build_status(req_id, name)
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'waiting for ssh',
|
||||
'salt/cloud/{0}/waiting_for_ssh'.format(name),
|
||||
sock_dir=__opts__['sock_dir'],
|
||||
args={'ip_address': vm_['ssh_host']},
|
||||
transport=__opts__['transport']
|
||||
)
|
||||
|
||||
# Bootstrap!
|
||||
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
|
||||
return_message = {"Server Name": name, "IP Address": vm_['ssh_host']}
|
||||
ret.update(return_message)
|
||||
return return_message
|
||||
|
||||
|
||||
def destroy(name, call=None):
|
||||
'''
|
||||
destroy the vm
|
||||
'''
|
||||
return {"status": "destroying must be done via https://control.ctl.io at this time"}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user