mirror of
https://github.com/valitydev/salt.git
synced 2024-11-08 01:18:58 +00:00
Merge branch '2016.11' into issue-35840-fix-preserve-minion-cache-2016.11
This commit is contained in:
commit
93a68e32a5
27
conf/master
27
conf/master
@ -59,15 +59,14 @@
|
||||
|
||||
# Directory for custom modules. This directory can contain subdirectories for
|
||||
# each of Salt's module types such as "runners", "output", "wheel", "modules",
|
||||
# "states", "returners", etc.
|
||||
#extension_modules: <no default>
|
||||
# "states", "returners", "engines", "utils", etc.
|
||||
#extension_modules: /var/cache/salt/master/extmods
|
||||
|
||||
# Directory for custom modules. This directory can contain subdirectories for
|
||||
# each of Salt's module types such as "runners", "output", "wheel", "modules",
|
||||
# "states", "returners", "engines", etc.
|
||||
# "states", "returners", "engines", "utils", etc.
|
||||
# Like 'extension_modules' but can take an array of paths
|
||||
#module_dirs: <no default>
|
||||
# - /var/cache/salt/minion/extmods
|
||||
#module_dirs: []
|
||||
|
||||
# Verify and set permissions on configuration directories at startup:
|
||||
#verify_env: True
|
||||
@ -302,6 +301,9 @@
|
||||
# public keys from the minions. Note that this is insecure.
|
||||
#auto_accept: False
|
||||
|
||||
# The size of key that should be generated when creating new keys.
|
||||
#keysize: 2048
|
||||
|
||||
# Time in minutes that an incoming public key with a matching name found in
|
||||
# pki_dir/minion_autosign/keyid is automatically accepted. Expired autosign keys
|
||||
# are removed when the master checks the minion_autosign directory.
|
||||
@ -916,6 +918,21 @@
|
||||
#pillar_cache_backend: disk
|
||||
|
||||
|
||||
###### Reactor Settings #####
|
||||
###########################################
|
||||
# Define a salt reactor. See https://docs.saltstack.com/en/latest/topics/reactor/
|
||||
#reactor: []
|
||||
|
||||
#Set the TTL for the cache of the reactor configuration.
|
||||
#reactor_refresh_interval: 60
|
||||
|
||||
#Configure the number of workers for the runner/wheel in the reactor.
|
||||
#reactor_worker_threads: 10
|
||||
|
||||
#Define the queue size for workers in the reactor.
|
||||
#reactor_worker_hwm: 10000
|
||||
|
||||
|
||||
##### Syndic settings #####
|
||||
##########################################
|
||||
# The Salt syndic is used to pass commands through a master from a higher
|
||||
|
18
conf/minion
18
conf/minion
@ -615,6 +615,9 @@
|
||||
# you do so at your own risk!
|
||||
#open_mode: False
|
||||
|
||||
# The size of key that should be generated when creating new keys.
|
||||
#keysize: 2048
|
||||
|
||||
# Enable permissive access to the salt keys. This allows you to run the
|
||||
# master or minion as root, but have a non-root group be given access to
|
||||
# your pki_dir. To make the access explicit, root must belong to the group
|
||||
@ -656,6 +659,21 @@
|
||||
# ssl_version: PROTOCOL_TLSv1_2
|
||||
|
||||
|
||||
###### Reactor Settings #####
|
||||
###########################################
|
||||
# Define a salt reactor. See https://docs.saltstack.com/en/latest/topics/reactor/
|
||||
#reactor: []
|
||||
|
||||
#Set the TTL for the cache of the reactor configuration.
|
||||
#reactor_refresh_interval: 60
|
||||
|
||||
#Configure the number of workers for the runner/wheel in the reactor.
|
||||
#reactor_worker_threads: 10
|
||||
|
||||
#Define the queue size for workers in the reactor.
|
||||
#reactor_worker_hwm: 10000
|
||||
|
||||
|
||||
###### Thread settings #####
|
||||
###########################################
|
||||
# Disable multiprocessing support, by default when a minion receives a
|
||||
|
2
doc/_themes/saltstack2/layout.html
vendored
2
doc/_themes/saltstack2/layout.html
vendored
@ -252,7 +252,7 @@
|
||||
|
||||
<div class="col-sm-6">
|
||||
|
||||
<a href="http://saltstack.com/support" target="_blank"><img class="nolightbox footer-banner center" src="{{ pathto('_static/images/footer-support.png', 1) }}"/></a>
|
||||
<a href="http://saltconf.com" target="_blank"><img class="nolightbox footer-banner center" src="{{ pathto('_static/images/DOCBANNER.jpg', 1) }}"/></a>
|
||||
|
||||
|
||||
</div>
|
||||
|
BIN
doc/_themes/saltstack2/static/images/DOCBANNER.jpg
vendored
Normal file
BIN
doc/_themes/saltstack2/static/images/DOCBANNER.jpg
vendored
Normal file
Binary file not shown.
After Width: | Height: | Size: 790 KiB |
11
doc/conf.py
11
doc/conf.py
@ -72,6 +72,7 @@ MOCK_MODULES = [
|
||||
'Crypto.Signature',
|
||||
'Crypto.Signature.PKCS1_v1_5',
|
||||
'M2Crypto',
|
||||
'msgpack',
|
||||
'yaml',
|
||||
'yaml.constructor',
|
||||
'yaml.nodes',
|
||||
@ -239,9 +240,9 @@ on_saltstack = 'SALT_ON_SALTSTACK' in os.environ
|
||||
project = 'Salt'
|
||||
|
||||
version = salt.version.__version__
|
||||
latest_release = '2016.11.5' # latest release
|
||||
previous_release = '2016.3.6' # latest release from previous branch
|
||||
previous_release_dir = '2016.3' # path on web server for previous branch
|
||||
latest_release = '2017.7.1' # latest release
|
||||
previous_release = '2016.11.7' # latest release from previous branch
|
||||
previous_release_dir = '2016.11' # path on web server for previous branch
|
||||
next_release = '' # next release
|
||||
next_release_dir = '' # path on web server for next release branch
|
||||
|
||||
@ -252,8 +253,8 @@ if on_saltstack:
|
||||
copyright = time.strftime("%Y")
|
||||
|
||||
# < --- START do not merge these settings to other branches START ---> #
|
||||
build_type = 'latest' # latest, previous, develop, next
|
||||
release = latest_release # version, latest_release, previous_release
|
||||
build_type = 'previous' # latest, previous, develop, next
|
||||
release = previous_release # version, latest_release, previous_release
|
||||
# < --- END do not merge these settings to other branches END ---> #
|
||||
|
||||
# Set google custom search engine
|
||||
|
26
doc/faq.rst
26
doc/faq.rst
@ -319,7 +319,27 @@ Restart using states
|
||||
********************
|
||||
|
||||
Now we can apply the workaround to restart the Minion in reliable way.
|
||||
The following example works on both UNIX-like and Windows operating systems:
|
||||
The following example works on UNIX-like operating systems:
|
||||
|
||||
.. code-block:: jinja
|
||||
|
||||
{%- if grains['os'] != 'Windows' %
|
||||
Restart Salt Minion:
|
||||
cmd.run:
|
||||
- name: 'salt-call --local service.restart salt-minion'
|
||||
- bg: True
|
||||
- onchanges:
|
||||
- pkg: Upgrade Salt Minion
|
||||
{%- endif %}
|
||||
|
||||
Note that restarting the ``salt-minion`` service on Windows operating systems is
|
||||
not always necessary when performing an upgrade. The installer stops the
|
||||
``salt-minion`` service, removes it, deletes the contents of the ``\salt\bin``
|
||||
directory, installs the new code, re-creates the ``salt-minion`` service, and
|
||||
starts it (by default). The restart step **would** be necessary during the
|
||||
upgrade process, however, if the minion config was edited after the upgrade or
|
||||
installation. If a minion restart is necessary, the state above can be edited
|
||||
as follows:
|
||||
|
||||
.. code-block:: jinja
|
||||
|
||||
@ -335,8 +355,8 @@ The following example works on both UNIX-like and Windows operating systems:
|
||||
- pkg: Upgrade Salt Minion
|
||||
|
||||
However, it requires more advanced tricks to upgrade from legacy version of
|
||||
Salt (before ``2016.3.0``), where executing commands in the background is not
|
||||
supported:
|
||||
Salt (before ``2016.3.0``) on UNIX-like operating systems, where executing
|
||||
commands in the background is not supported:
|
||||
|
||||
.. code-block:: jinja
|
||||
|
||||
|
@ -33,6 +33,10 @@ Output Options
|
||||
|
||||
Write the output to the specified file.
|
||||
|
||||
.. option:: --out-file-append, --output-file-append
|
||||
|
||||
Append the output to the specified file.
|
||||
|
||||
.. option:: --no-color
|
||||
|
||||
Disable all colored output
|
||||
@ -46,3 +50,14 @@ Output Options
|
||||
|
||||
``green`` denotes success, ``red`` denotes failure, ``blue`` denotes
|
||||
changes and success and ``yellow`` denotes a expected future change in configuration.
|
||||
|
||||
.. option:: --state-output=STATE_OUTPUT, --state_output=STATE_OUTPUT
|
||||
|
||||
Override the configured state_output value for minion
|
||||
output. One of 'full', 'terse', 'mixed', 'changes' or
|
||||
'filter'. Default: 'none'.
|
||||
|
||||
.. option:: --state-verbose=STATE_VERBOSE, --state_verbose=STATE_VERBOSE
|
||||
|
||||
Override the configured state_verbose value for minion
|
||||
output. Set to True or False. Default: none.
|
||||
|
@ -39,6 +39,13 @@ specified target expression.
|
||||
desitination will be assumed to be a directory. Finally, recursion is now
|
||||
supported, allowing for entire directories to be copied.
|
||||
|
||||
.. versionchanged:: 2016.11.7,2017.7.2
|
||||
Reverted back to the old copy mode to preserve backward compatibility. The
|
||||
new functionality added in 2016.6.6 and 2017.7.0 is now available using the
|
||||
``-C`` or ``--chunked`` CLI arguments. Note that compression, recursive
|
||||
copying, and support for copying large files is only available in chunked
|
||||
mode.
|
||||
|
||||
Options
|
||||
=======
|
||||
|
||||
@ -56,9 +63,16 @@ Options
|
||||
.. include:: _includes/target-selection.rst
|
||||
|
||||
|
||||
.. option:: -C, --chunked
|
||||
|
||||
Use new chunked mode to copy files. This mode supports large files, recursive
|
||||
directories copying and compression.
|
||||
|
||||
.. versionadded:: 2016.11.7,2017.7.2
|
||||
|
||||
.. option:: -n, --no-compression
|
||||
|
||||
Disable gzip compression.
|
||||
Disable gzip compression in chunked mode.
|
||||
|
||||
.. versionadded:: 2016.3.7,2016.11.6,Nitrogen
|
||||
|
||||
|
@ -91,64 +91,6 @@ The user to run the Salt processes
|
||||
|
||||
user: root
|
||||
|
||||
.. conf_master:: max_open_files
|
||||
|
||||
``max_open_files``
|
||||
------------------
|
||||
|
||||
Default: ``100000``
|
||||
|
||||
Each minion connecting to the master uses AT LEAST one file descriptor, the
|
||||
master subscription connection. If enough minions connect you might start
|
||||
seeing on the console(and then salt-master crashes):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
Too many open files (tcp_listener.cpp:335)
|
||||
Aborted (core dumped)
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
max_open_files: 100000
|
||||
|
||||
By default this value will be the one of `ulimit -Hn`, i.e., the hard limit for
|
||||
max open files.
|
||||
|
||||
To set a different value than the default one, uncomment, and configure this
|
||||
setting. Remember that this value CANNOT be higher than the hard limit. Raising
|
||||
the hard limit depends on the OS and/or distribution, a good way to find the
|
||||
limit is to search the internet for something like this:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
raise max open files hard limit debian
|
||||
|
||||
.. conf_master:: worker_threads
|
||||
|
||||
``worker_threads``
|
||||
------------------
|
||||
|
||||
Default: ``5``
|
||||
|
||||
The number of threads to start for receiving commands and replies from minions.
|
||||
If minions are stalling on replies because you have many minions, raise the
|
||||
worker_threads value.
|
||||
|
||||
Worker threads should not be put below 3 when using the peer system, but can
|
||||
drop down to 1 worker otherwise.
|
||||
|
||||
.. note::
|
||||
When the master daemon starts, it is expected behaviour to see
|
||||
multiple salt-master processes, even if 'worker_threads' is set to '1'. At
|
||||
a minimum, a controlling process will start along with a Publisher, an
|
||||
EventPublisher, and a number of MWorker processes will be started. The
|
||||
number of MWorker processes is tuneable by the 'worker_threads'
|
||||
configuration value while the others are not.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
worker_threads: 5
|
||||
|
||||
.. conf_master:: ret_port
|
||||
|
||||
``ret_port``
|
||||
@ -238,8 +180,8 @@ The directory to store the pki authentication keys.
|
||||
|
||||
Directory for custom modules. This directory can contain subdirectories for
|
||||
each of Salt's module types such as ``runners``, ``output``, ``wheel``,
|
||||
``modules``, ``states``, ``returners``, ``engines``, etc. This path is appended to
|
||||
:conf_master:`root_dir`.
|
||||
``modules``, ``states``, ``returners``, ``engines``, ``utils``, etc.
|
||||
This path is appended to :conf_master:`root_dir`.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
@ -850,6 +792,94 @@ what you are doing! Transports are explained in :ref:`Salt Transports
|
||||
ret_port: 4606
|
||||
zeromq: []
|
||||
|
||||
``sock_pool_size``
|
||||
------------------
|
||||
|
||||
Default: 1
|
||||
|
||||
To avoid blocking waiting while writing a data to a socket, we support
|
||||
socket pool for Salt applications. For example, a job with a large number
|
||||
of target host list can cause long period blocking waiting. The option
|
||||
is used by ZMQ and TCP transports, and the other transport methods don't
|
||||
need the socket pool by definition. Most of Salt tools, including CLI,
|
||||
are enough to use a single bucket of socket pool. On the other hands,
|
||||
it is highly recommended to set the size of socket pool larger than 1
|
||||
for other Salt applications, especially Salt API, which must write data
|
||||
to socket concurrently.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
sock_pool_size: 15
|
||||
|
||||
.. conf_master:: ipc_mode
|
||||
|
||||
``ipc_mode``
|
||||
------------
|
||||
|
||||
Default: ``ipc``
|
||||
|
||||
The ipc strategy. (i.e., sockets versus tcp, etc.) Windows platforms lack
|
||||
POSIX IPC and must rely on TCP based inter-process communications. ``ipc_mode``
|
||||
is set to ``tcp`` by default on Windows.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
ipc_mode: ipc
|
||||
|
||||
.. conf_master::
|
||||
|
||||
``tcp_master_pub_port``
|
||||
-----------------------
|
||||
|
||||
Default: ``4512``
|
||||
|
||||
The TCP port on which events for the master should be published if ``ipc_mode`` is TCP.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
tcp_master_pub_port: 4512
|
||||
|
||||
.. conf_master:: tcp_master_pull_port
|
||||
|
||||
``tcp_master_pull_port``
|
||||
------------------------
|
||||
|
||||
Default: ``4513``
|
||||
|
||||
The TCP port on which events for the master should be pulled if ``ipc_mode`` is TCP.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
tcp_master_pull_port: 4513
|
||||
|
||||
.. conf_master:: tcp_master_publish_pull
|
||||
|
||||
``tcp_master_publish_pull``
|
||||
---------------------------
|
||||
|
||||
Default: ``4514``
|
||||
|
||||
The TCP port on which events for the master should be pulled fom and then republished onto
|
||||
the event bus on the master.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
tcp_master_publish_pull: 4514
|
||||
|
||||
.. conf_master:: tcp_master_workers
|
||||
|
||||
``tcp_master_workers``
|
||||
----------------------
|
||||
|
||||
Default: ``4515``
|
||||
|
||||
The TCP port for ``mworkers`` to connect to on the master.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
tcp_master_workers: 4515
|
||||
|
||||
|
||||
Salt-SSH Configuration
|
||||
======================
|
||||
|
||||
@ -1083,6 +1113,19 @@ public keys from minions.
|
||||
|
||||
auto_accept: False
|
||||
|
||||
.. conf_master:: keysize
|
||||
|
||||
``keysize``
|
||||
-----------
|
||||
|
||||
Default: ``2048``
|
||||
|
||||
The size of key that should be generated when creating new keys.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
keysize: 2048
|
||||
|
||||
.. conf_master:: autosign_timeout
|
||||
|
||||
``autosign_timeout``
|
||||
@ -1127,6 +1170,24 @@ minion IDs for which keys will automatically be rejected. Will override both
|
||||
membership in the :conf_master:`autosign_file` and the
|
||||
:conf_master:`auto_accept` setting.
|
||||
|
||||
.. conf_master:: permissive_pki_access
|
||||
|
||||
``permissive_pki_access``
|
||||
-------------------------
|
||||
|
||||
Default: ``False``
|
||||
|
||||
Enable permissive access to the salt keys. This allows you to run the
|
||||
master or minion as root, but have a non-root group be given access to
|
||||
your pki_dir. To make the access explicit, root must belong to the group
|
||||
you've given access to. This is potentially quite insecure. If an autosign_file
|
||||
is specified, enabling permissive_pki_access will allow group access to that
|
||||
specific file.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
permissive_pki_access: False
|
||||
|
||||
.. conf_master:: publisher_acl
|
||||
|
||||
``publisher_acl``
|
||||
@ -1171,6 +1232,20 @@ This is completely disabled by default.
|
||||
- cmd.*
|
||||
- test.echo
|
||||
|
||||
.. conf_master:: sudo_acl
|
||||
|
||||
``sudo_acl``
|
||||
------------
|
||||
|
||||
Default: ``False``
|
||||
|
||||
Enforce ``publisher_acl`` and ``publisher_acl_blacklist`` when users have sudo
|
||||
access to the salt command.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
sudo_acl: False
|
||||
|
||||
.. conf_master:: external_auth
|
||||
|
||||
``external_auth``
|
||||
@ -1327,6 +1402,18 @@ Do not disable this unless it is absolutely clear what this does.
|
||||
|
||||
rotate_aes_key: True
|
||||
|
||||
.. conf_master:: publish_session
|
||||
|
||||
``publish_session``
|
||||
-------------------
|
||||
|
||||
Default: ``86400``
|
||||
|
||||
The number of seconds between AES key rotations on the master.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
publish_session: Default: 86400
|
||||
|
||||
.. conf_master:: ssl
|
||||
|
||||
@ -1358,6 +1445,24 @@ constant names without ssl module prefix: ``CERT_REQUIRED`` or ``PROTOCOL_SSLv23
|
||||
``allow_minion_key_revoke``
|
||||
---------------------------
|
||||
|
||||
Default: ``False``
|
||||
|
||||
By default, the master deletes its cache of minion data when the key for that
|
||||
minion is removed. To preserve the cache after key deletion, set
|
||||
``preserve_minion_cache`` to True.
|
||||
|
||||
WARNING: This may have security implications if compromised minions auth with
|
||||
a previous deleted minion ID.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
preserve_minion_cache: False
|
||||
|
||||
.. conf_master:: allow_minion_key_revoke
|
||||
|
||||
``allow_minion_key_revoke``
|
||||
---------------------------
|
||||
|
||||
Default: ``True``
|
||||
|
||||
Controls whether a minion can request its own key revocation. When True
|
||||
@ -1369,6 +1474,128 @@ the master will drop the request and the minion's key will remain accepted.
|
||||
|
||||
rotate_aes_key: True
|
||||
|
||||
|
||||
Master Large Scale Tuning Settings
|
||||
==================================
|
||||
|
||||
.. conf_master:: max_open_files
|
||||
|
||||
``max_open_files``
|
||||
------------------
|
||||
|
||||
Default: ``100000``
|
||||
|
||||
Each minion connecting to the master uses AT LEAST one file descriptor, the
|
||||
master subscription connection. If enough minions connect you might start
|
||||
seeing on the console(and then salt-master crashes):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
Too many open files (tcp_listener.cpp:335)
|
||||
Aborted (core dumped)
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
max_open_files: 100000
|
||||
|
||||
By default this value will be the one of `ulimit -Hn`, i.e., the hard limit for
|
||||
max open files.
|
||||
|
||||
To set a different value than the default one, uncomment, and configure this
|
||||
setting. Remember that this value CANNOT be higher than the hard limit. Raising
|
||||
the hard limit depends on the OS and/or distribution, a good way to find the
|
||||
limit is to search the internet for something like this:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
raise max open files hard limit debian
|
||||
|
||||
.. conf_master:: worker_threads
|
||||
|
||||
``worker_threads``
|
||||
------------------
|
||||
|
||||
Default: ``5``
|
||||
|
||||
The number of threads to start for receiving commands and replies from minions.
|
||||
If minions are stalling on replies because you have many minions, raise the
|
||||
worker_threads value.
|
||||
|
||||
Worker threads should not be put below 3 when using the peer system, but can
|
||||
drop down to 1 worker otherwise.
|
||||
|
||||
.. note::
|
||||
When the master daemon starts, it is expected behaviour to see
|
||||
multiple salt-master processes, even if 'worker_threads' is set to '1'. At
|
||||
a minimum, a controlling process will start along with a Publisher, an
|
||||
EventPublisher, and a number of MWorker processes will be started. The
|
||||
number of MWorker processes is tuneable by the 'worker_threads'
|
||||
configuration value while the others are not.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
worker_threads: 5
|
||||
|
||||
.. conf_master:: pub_hwm
|
||||
|
||||
``pub_hwm``
|
||||
-----------
|
||||
|
||||
Default: ``1000``
|
||||
|
||||
The zeromq high water mark on the publisher interface.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
pub_hwm: 1000
|
||||
|
||||
.. conf_master:: zmq_backlog
|
||||
|
||||
``zmq_backlog``
|
||||
---------------
|
||||
|
||||
Default: ``1000``
|
||||
|
||||
The listen queue size of the ZeroMQ backlog.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
zmq_backlog: 1000
|
||||
|
||||
.. conf_master:: salt_event_pub_hwm
|
||||
.. conf_master:: event_publisher_pub_hwm
|
||||
|
||||
``salt_event_pub_hwm`` and ``event_publisher_pub_hwm``
|
||||
------------------------------------------------------
|
||||
|
||||
These two ZeroMQ High Water Mark settings, ``salt_event_pub_hwm`` and
|
||||
``event_publisher_pub_hwm`` are significant for masters with thousands of
|
||||
minions. When these are insufficiently high it will manifest in random
|
||||
responses missing in the CLI and even missing from the job cache. Masters
|
||||
that have fast CPUs and many cores with appropriate ``worker_threads``
|
||||
will not need these set as high.
|
||||
|
||||
The ZeroMQ high-water-mark for the ``SaltEvent`` pub socket default is:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
salt_event_pub_hwm: 20000
|
||||
|
||||
The ZeroMQ high-water-mark for the ``EventPublisher`` pub socket default is:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
event_publisher_pub_hwm: 10000
|
||||
|
||||
As an example, on single master deployment with 8,000 minions, 2.4GHz CPUs,
|
||||
24 cores, and 32GiB memory has these settings:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
salt_event_pub_hwm: 128000
|
||||
event_publisher_pub_hwm: 64000
|
||||
|
||||
|
||||
Master Module Management
|
||||
========================
|
||||
|
||||
@ -2922,6 +3149,26 @@ configuration.
|
||||
|
||||
pillar_opts: False
|
||||
|
||||
.. conf_master:: pillar_safe_render_error
|
||||
|
||||
``pillar_safe_render_error``
|
||||
----------------------------
|
||||
|
||||
Default: ``True``
|
||||
|
||||
The pillar_safe_render_error option prevents the master from passing pillar
|
||||
render errors to the minion. This is set on by default because the error could
|
||||
contain templating data which would give that minion information it shouldn't
|
||||
have, like a password! When set ``True`` the error message will only show:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
Rendering SLS 'my.sls' failed. Please see master log for details.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
pillar_safe_render_error: True
|
||||
|
||||
.. _master-configuration-ext-pillar:
|
||||
|
||||
.. conf_master:: ext_pillar
|
||||
@ -3505,6 +3752,63 @@ can be utilized:
|
||||
|
||||
pillar_cache_backend: disk
|
||||
|
||||
|
||||
Master Reactor Settings
|
||||
=======================
|
||||
|
||||
.. conf_master:: reactor
|
||||
|
||||
``reactor``
|
||||
-----------
|
||||
|
||||
Default: ``[]``
|
||||
|
||||
Defines a salt reactor. See the :ref:`Reactor <reactor>` documentation for more
|
||||
information.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
reactor: []
|
||||
|
||||
.. conf_master:: reactor_refresh_interval
|
||||
|
||||
``reactor_refresh_interval``
|
||||
----------------------------
|
||||
|
||||
Default: ``60``
|
||||
|
||||
The TTL for the cache of the reactor configuration.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
reactor_refresh_interval: 60
|
||||
|
||||
.. conf_master:: reactor_worker_threads
|
||||
|
||||
``reactor_worker_threads``
|
||||
--------------------------
|
||||
|
||||
Default: ``10``
|
||||
|
||||
The number of workers for the runner/wheel in the reactor.
|
||||
|
||||
.. code-block:: yaml
|
||||
reactor_worker_threads: 10
|
||||
|
||||
.. conf_master:: reactor_worker_hwm
|
||||
|
||||
``reactor_worker_hwm``
|
||||
----------------------
|
||||
|
||||
Default: ``10000``
|
||||
|
||||
The queue size for workers in the reactor.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
reactor_worker_hwm: 10000
|
||||
|
||||
|
||||
Syndic Server Settings
|
||||
======================
|
||||
|
||||
@ -3950,6 +4254,64 @@ option then the master will log a warning message.
|
||||
- master.d/*
|
||||
- /etc/roles/webserver
|
||||
|
||||
|
||||
Keepalive Settings
|
||||
==================
|
||||
|
||||
.. conf_master:: tcp_keepalive
|
||||
|
||||
``tcp_keepalive``
|
||||
-----------------
|
||||
|
||||
Default: ``True``
|
||||
|
||||
The tcp keepalive interval to set on TCP ports. This setting can be used to tune Salt
|
||||
connectivity issues in messy network environments with misbehaving firewalls.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
tcp_keepalive: True
|
||||
|
||||
.. conf_master:: tcp_keepalive_cnt
|
||||
|
||||
``tcp_keepalive_cnt``
|
||||
---------------------
|
||||
|
||||
Default: ``-1``
|
||||
|
||||
Sets the ZeroMQ TCP keepalive count. May be used to tune issues with minion disconnects.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
tcp_keepalive_cnt: -1
|
||||
|
||||
.. conf_master:: tcp_keepalive_idle
|
||||
|
||||
``tcp_keepalive_idle``
|
||||
----------------------
|
||||
|
||||
Default: ``300``
|
||||
|
||||
Sets ZeroMQ TCP keepalive idle. May be used to tune issues with minion disconnects.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
tcp_keepalive_idle: 300
|
||||
|
||||
.. conf_master:: tcp_keepalive_intvl
|
||||
|
||||
``tcp_keepalive_intvl``
|
||||
-----------------------
|
||||
|
||||
Default: ``-1``
|
||||
|
||||
Sets ZeroMQ TCP keepalive interval. May be used to tune issues with minion disconnects.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
tcp_keepalive_intvl': -1
|
||||
|
||||
|
||||
.. _winrepo-master-config-opts:
|
||||
|
||||
Windows Software Repo Settings
|
||||
@ -4088,7 +4450,7 @@ URL of the repository:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
winrepo_remotes:
|
||||
winrepo_remotes_ng:
|
||||
- '<commit_id> https://github.com/saltstack/salt-winrepo-ng.git'
|
||||
|
||||
Replace ``<commit_id>`` with the SHA1 hash of a commit ID. Specifying a commit
|
||||
|
@ -750,6 +750,20 @@ seconds each iteration.
|
||||
|
||||
acceptance_wait_time_max: 0
|
||||
|
||||
.. conf_minion:: rejected_retry
|
||||
|
||||
``rejected_retry``
|
||||
------------------
|
||||
|
||||
Default: ``False``
|
||||
|
||||
If the master rejects the minion's public key, retry instead of exiting.
|
||||
Rejected keys will be handled the same as waiting on acceptance.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
rejected_retry: False
|
||||
|
||||
.. conf_minion:: random_reauth_delay
|
||||
|
||||
``random_reauth_delay``
|
||||
@ -1180,7 +1194,7 @@ If certain returners should be disabled, this is the place
|
||||
.. conf_minion:: enable_whitelist_modules
|
||||
|
||||
``whitelist_modules``
|
||||
----------------------------
|
||||
---------------------
|
||||
|
||||
Default: ``[]`` (Module whitelisting is disabled. Adding anything to the config option
|
||||
will cause only the listed modules to be enabled. Modules not in the list will
|
||||
@ -1272,6 +1286,20 @@ A list of extra directories to search for Salt renderers
|
||||
render_dirs:
|
||||
- /var/lib/salt/renderers
|
||||
|
||||
.. conf_minion:: utils_dirs
|
||||
|
||||
``utils_dirs``
|
||||
--------------
|
||||
|
||||
Default: ``[]``
|
||||
|
||||
A list of extra directories to search for Salt utilities
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
utils_dirs:
|
||||
- /var/lib/salt/utils
|
||||
|
||||
.. conf_minion:: cython_enable
|
||||
|
||||
``cython_enable``
|
||||
@ -1320,6 +1348,20 @@ below.
|
||||
providers:
|
||||
service: systemd
|
||||
|
||||
.. conf_minion:: modules_max_memory
|
||||
|
||||
``modules_max_memory``
|
||||
----------------------
|
||||
|
||||
Default: ``-1``
|
||||
|
||||
Specify a max size (in bytes) for modules on import. This feature is currently
|
||||
only supported on *nix operating systems and requires psutil.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
modules_max_memory: -1
|
||||
|
||||
|
||||
Top File Settings
|
||||
=================
|
||||
@ -1451,6 +1493,52 @@ environment lacks one.
|
||||
|
||||
default_top: dev
|
||||
|
||||
.. conf_minion:: startup_states
|
||||
|
||||
``startup_states``
|
||||
------------------
|
||||
|
||||
Default: ``''``
|
||||
|
||||
States to run when the minion daemon starts. To enable, set ``startup_states`` to:
|
||||
|
||||
- ``highstate``: Execute state.highstate
|
||||
- ``sls``: Read in the sls_list option and execute the named sls files
|
||||
- ``top``: Read top_file option and execute based on that file on the Master
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
startup_states: ''
|
||||
|
||||
.. conf_minion:: sls_list
|
||||
|
||||
``sls_list``
|
||||
------------
|
||||
|
||||
Default: ``[]``
|
||||
|
||||
List of states to run when the minion starts up if ``startup_states`` is set to ``sls``.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
sls_list:
|
||||
- edit.vim
|
||||
- hyper
|
||||
|
||||
.. conf_minion:: top_file
|
||||
|
||||
``top_file``
|
||||
------------
|
||||
|
||||
Default: ``''``
|
||||
|
||||
Top file to execute if ``startup_states`` is set to ``top``.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
top_file: ''
|
||||
|
||||
|
||||
State Management Settings
|
||||
=========================
|
||||
|
||||
@ -1467,7 +1555,7 @@ The default renderer used for local state executions
|
||||
|
||||
renderer: yaml_jinja
|
||||
|
||||
.. conf_master:: test
|
||||
.. conf_minion:: test
|
||||
|
||||
``test``
|
||||
--------
|
||||
@ -1888,6 +1976,35 @@ before the initial key exchange. The master fingerprint can be found by running
|
||||
|
||||
master_finger: 'ba:30:65:2a:d6:9e:20:4f:d8:b2:f3:a7:d4:65:11:13'
|
||||
|
||||
.. conf_minion:: keysize
|
||||
|
||||
``keysize``
|
||||
-----------
|
||||
|
||||
Default: ``2048``
|
||||
|
||||
The size of key that should be generated when creating new keys.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
keysize: 2048
|
||||
|
||||
.. conf_minion:: permissive_pki_access
|
||||
|
||||
``permissive_pki_access``
|
||||
-------------------------
|
||||
|
||||
Default: ``False``
|
||||
|
||||
Enable permissive access to the salt keys. This allows you to run the
|
||||
master or minion as root, but have a non-root group be given access to
|
||||
your pki_dir. To make the access explicit, root must belong to the group
|
||||
you've given access to. This is potentially quite insecure.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
permissive_pki_access: False
|
||||
|
||||
.. conf_minion:: verify_master_pubkey_sign
|
||||
|
||||
``verify_master_pubkey_sign``
|
||||
@ -1995,7 +2112,7 @@ blocked. If `cmd_whitelist_glob` is NOT SET, then all shell commands are permitt
|
||||
- 'cat /etc/fstab'
|
||||
|
||||
|
||||
.. conf_master:: ssl
|
||||
.. conf_minion:: ssl
|
||||
|
||||
``ssl``
|
||||
-------
|
||||
@ -2021,6 +2138,62 @@ constant names without ssl module prefix: ``CERT_REQUIRED`` or ``PROTOCOL_SSLv23
|
||||
ssl_version: PROTOCOL_TLSv1_2
|
||||
|
||||
|
||||
Reactor Settings
|
||||
================
|
||||
|
||||
.. conf_minion:: reactor
|
||||
|
||||
``reactor``
|
||||
-----------
|
||||
|
||||
Default: ``[]``
|
||||
|
||||
Defines a salt reactor. See the :ref:`Reactor <reactor>` documentation for more
|
||||
information.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
reactor: []
|
||||
|
||||
.. conf_minion:: reactor_refresh_interval
|
||||
|
||||
``reactor_refresh_interval``
|
||||
----------------------------
|
||||
|
||||
Default: ``60``
|
||||
|
||||
The TTL for the cache of the reactor configuration.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
reactor_refresh_interval: 60
|
||||
|
||||
.. conf_minion:: reactor_worker_threads
|
||||
|
||||
``reactor_worker_threads``
|
||||
--------------------------
|
||||
|
||||
Default: ``10``
|
||||
|
||||
The number of workers for the runner/wheel in the reactor.
|
||||
|
||||
.. code-block:: yaml
|
||||
reactor_worker_threads: 10
|
||||
|
||||
.. conf_minion:: reactor_worker_hwm
|
||||
|
||||
``reactor_worker_hwm``
|
||||
----------------------
|
||||
|
||||
Default: ``10000``
|
||||
|
||||
The queue size for workers in the reactor.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
reactor_worker_hwm: 10000
|
||||
|
||||
|
||||
Thread Settings
|
||||
===============
|
||||
|
||||
@ -2291,6 +2464,62 @@ option then the minion will log a warning message.
|
||||
- /etc/roles/webserver
|
||||
|
||||
|
||||
Keepalive Settings
|
||||
==================
|
||||
|
||||
.. conf_minion:: tcp_keepalive
|
||||
|
||||
``tcp_keepalive``
|
||||
-----------------
|
||||
|
||||
Default: ``True``
|
||||
|
||||
The tcp keepalive interval to set on TCP ports. This setting can be used to tune Salt
|
||||
connectivity issues in messy network environments with misbehaving firewalls.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
tcp_keepalive: True
|
||||
|
||||
.. conf_minion:: tcp_keepalive_cnt
|
||||
|
||||
``tcp_keepalive_cnt``
|
||||
---------------------
|
||||
|
||||
Default: ``-1``
|
||||
|
||||
Sets the ZeroMQ TCP keepalive count. May be used to tune issues with minion disconnects.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
tcp_keepalive_cnt: -1
|
||||
|
||||
.. conf_minion:: tcp_keepalive_idle
|
||||
|
||||
``tcp_keepalive_idle``
|
||||
----------------------
|
||||
|
||||
Default: ``300``
|
||||
|
||||
Sets ZeroMQ TCP keepalive idle. May be used to tune issues with minion disconnects.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
tcp_keepalive_idle: 300
|
||||
|
||||
.. conf_minion:: tcp_keepalive_intvl
|
||||
|
||||
``tcp_keepalive_intvl``
|
||||
-----------------------
|
||||
|
||||
Default: ``-1``
|
||||
|
||||
Sets ZeroMQ TCP keepalive interval. May be used to tune issues with minion disconnects.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
tcp_keepalive_intvl': -1
|
||||
|
||||
|
||||
Frozen Build Update Settings
|
||||
============================
|
||||
@ -2392,6 +2621,36 @@ out.
|
||||
|
||||
winrepo_dir: 'D:\winrepo'
|
||||
|
||||
.. conf_minion:: winrepo_dir_ng
|
||||
|
||||
``winrepo_dir_ng``
|
||||
------------------
|
||||
|
||||
.. versionadded:: 2015.8.0
|
||||
A new :ref:`ng <windows-package-manager>` repo was added.
|
||||
|
||||
Default: ``/srv/salt/win/repo-ng``
|
||||
|
||||
Location on the minion where the :conf_minion:`winrepo_remotes_ng` are checked
|
||||
out for 2015.8.0 and later minions.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
winrepo_dir_ng: /srv/salt/win/repo-ng
|
||||
|
||||
.. conf_minion:: winrepo_source_dir
|
||||
|
||||
``winrepo_source_dir``
|
||||
----------------------
|
||||
|
||||
Default: ``salt://win/repo-ng/``
|
||||
|
||||
The source location for the winrepo sls files.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
winrepo_source_dir: salt://win/repo-ng/
|
||||
|
||||
.. conf_minion:: winrepo_cachefile
|
||||
.. conf_minion:: win_repo_cachefile
|
||||
|
||||
@ -2444,3 +2703,33 @@ URL of the the repository:
|
||||
Replace ``<commit_id>`` with the SHA1 hash of a commit ID. Specifying a commit
|
||||
ID is useful in that it allows one to revert back to a previous version in the
|
||||
event that an error is introduced in the latest revision of the repo.
|
||||
|
||||
.. conf_minion:: winrepo_remotes_ng
|
||||
|
||||
``winrepo_remotes_ng``
|
||||
----------------------
|
||||
|
||||
.. versionadded:: 2015.8.0
|
||||
A new :ref:`ng <windows-package-manager>` repo was added.
|
||||
|
||||
Default: ``['https://github.com/saltstack/salt-winrepo-ng.git']``
|
||||
|
||||
List of git repositories to checkout and include in the winrepo for
|
||||
2015.8.0 and later minions.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
winrepo_remotes_ng:
|
||||
- https://github.com/saltstack/salt-winrepo-ng.git
|
||||
|
||||
To specify a specific revision of the repository, prepend a commit ID to the
|
||||
URL of the repository:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
winrepo_remotes_ng:
|
||||
- '<commit_id> https://github.com/saltstack/salt-winrepo-ng.git'
|
||||
|
||||
Replace ``<commit_id>`` with the SHA1 hash of a commit ID. Specifying a commit
|
||||
ID is useful in that it allows one to revert back to a previous version in the
|
||||
event that an error is introduced in the latest revision of the repo.
|
||||
|
@ -405,6 +405,29 @@ similar to the following:
|
||||
return __virtualname__
|
||||
return False
|
||||
|
||||
The ``__virtual__()`` function can return a ``True`` or ``False`` boolean, a tuple,
|
||||
or a string. If it returns a ``True`` value, this ``__virtualname__`` module-level
|
||||
attribute can be set as seen in the above example. This is the string that the module
|
||||
should be referred to as.
|
||||
|
||||
When ``__virtual__()`` returns a tuple, the first item should be a boolean and the
|
||||
second should be a string. This is typically done when the module should not load. The
|
||||
first value of the tuple is ``False`` and the second is the error message to display
|
||||
for why the module did not load.
|
||||
|
||||
For example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only load if git exists on the system
|
||||
'''
|
||||
if salt.utils.which('git') is None:
|
||||
return (False,
|
||||
'The git execution module cannot be loaded: git unavailable.')
|
||||
else:
|
||||
return True
|
||||
|
||||
Documentation
|
||||
=============
|
||||
|
@ -21,7 +21,7 @@ Or you may specify a map which includes all VMs to perform the action on:
|
||||
|
||||
$ salt-cloud -a reboot -m /path/to/mapfile
|
||||
|
||||
The following is a list of actions currently supported by salt-cloud:
|
||||
The following is an example list of actions currently supported by ``salt-cloud``:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
@ -36,5 +36,5 @@ The following is a list of actions currently supported by salt-cloud:
|
||||
- start
|
||||
- stop
|
||||
|
||||
Another useful reference for viewing more salt-cloud actions is the
|
||||
:ref:Salt Cloud Feature Matrix <salt-cloud-feature-matrix>
|
||||
Another useful reference for viewing more ``salt-cloud`` actions is the
|
||||
:ref:`Salt Cloud Feature Matrix <salt-cloud-feature-matrix>`.
|
||||
|
@ -78,6 +78,7 @@ parameters are discussed in more detail below.
|
||||
# RHEL -> ec2-user
|
||||
# CentOS -> ec2-user
|
||||
# Ubuntu -> ubuntu
|
||||
# Debian -> admin
|
||||
#
|
||||
ssh_username: ec2-user
|
||||
|
||||
|
@ -371,7 +371,6 @@ both.
|
||||
compute_name: cloudServersOpenStack
|
||||
protocol: ipv4
|
||||
compute_region: DFW
|
||||
protocol: ipv4
|
||||
user: myuser
|
||||
tenant: 5555555
|
||||
password: mypass
|
||||
|
@ -26,5 +26,5 @@ gathering information about instances on a provider basis:
|
||||
$ salt-cloud -f list_nodes_full linode
|
||||
$ salt-cloud -f list_nodes_select linode
|
||||
|
||||
Another useful reference for viewing salt-cloud functions is the
|
||||
Another useful reference for viewing ``salt-cloud`` functions is the
|
||||
:ref:`Salt Cloud Feature Matrix <salt-cloud-feature-matrix>`.
|
||||
|
@ -64,7 +64,9 @@ automatically installed salt-cloud for you. Use your distribution's package
|
||||
manager to install the ``salt-cloud`` package from the same repo that you
|
||||
used to install Salt. These repos will automatically be setup by Salt Bootstrap.
|
||||
|
||||
If there is no salt-cloud package, install with ``pip install salt-cloud``.
|
||||
Alternatively, the ``-L`` option can be passed to the `Salt Bootstrap`_ script when
|
||||
installing Salt. The ``-L`` option will install ``salt-cloud`` and the required
|
||||
``libcloud`` package.
|
||||
|
||||
.. _`Salt Bootstrap`: https://github.com/saltstack/salt-bootstrap
|
||||
|
||||
|
@ -49,7 +49,7 @@ Set up an initial profile at ``/etc/salt/cloud.profiles`` or in the
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
joyent_512
|
||||
joyent_512:
|
||||
provider: my-joyent-config
|
||||
size: g4-highcpu-512M
|
||||
image: ubuntu-16.04
|
||||
|
@ -12,7 +12,9 @@ automatically installed salt-cloud for you. Use your distribution's package
|
||||
manager to install the ``salt-cloud`` package from the same repo that you
|
||||
used to install Salt. These repos will automatically be setup by Salt Bootstrap.
|
||||
|
||||
If there is no salt-cloud package, install with ``pip install salt-cloud``.
|
||||
Alternatively, the ``-L`` option can be passed to the `Salt Bootstrap`_ script when
|
||||
installing Salt. The ``-L`` option will install ``salt-cloud`` and the required
|
||||
``libcloud`` package.
|
||||
|
||||
.. _`Salt Bootstrap`: https://github.com/saltstack/salt-bootstrap
|
||||
|
||||
|
@ -260,6 +260,13 @@ The Salt development team will back-port bug fixes made to ``develop`` to the
|
||||
current release branch if the contributor cannot create the pull request
|
||||
against that branch.
|
||||
|
||||
Release Branches
|
||||
----------------
|
||||
|
||||
For each release a branch will be created when we are ready to tag. The branch will be the same name as the tag minus the v. For example, the v2017.7.1 release was created from the 2017.7.1 branch. This branching strategy will allow for more stability when there is a need for a re-tag during the testing phase of our releases.
|
||||
|
||||
Once the branch is created, the fixes required for a given release, as determined by the SaltStack release team, will be added to this branch. All commits in this branch will be merged forward into the parent branch as well.
|
||||
|
||||
Keeping Salt Forks in Sync
|
||||
==========================
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
15
doc/topics/releases/2016.11.7.rst
Normal file
15
doc/topics/releases/2016.11.7.rst
Normal file
@ -0,0 +1,15 @@
|
||||
============================
|
||||
Salt 2016.11.7 Release Notes
|
||||
============================
|
||||
|
||||
Version 2016.11.7 is a bugfix release for :ref:`2016.11.0 <release-2016-11-0>`.
|
||||
|
||||
Changes for v2016.11.6..v2016.11.7
|
||||
----------------------------------
|
||||
|
||||
Security Fix
|
||||
============
|
||||
|
||||
CVE-2017-12791 Maliciously crafted minion IDs can cause unwanted directory traversals on the Salt-master
|
||||
|
||||
Correct a flaw in minion id validation which could allow certain minions to authenticate to a master despite not having the correct credentials. To exploit the vulnerability, an attacker must create a salt-minion with an ID containing characters that will cause a directory traversal. Credit for discovering the security flaw goes to: Vernhk@qq.com
|
@ -4,23 +4,12 @@ Salt 2016.3.7 Release Notes
|
||||
|
||||
Version 2016.3.7 is a bugfix release for :ref:`2016.3.0 <release-2016-3-0>`.
|
||||
|
||||
New master configuration option `allow_minion_key_revoke`, defaults to True. This option
|
||||
controls whether a minion can request that the master revoke its key. When True, a minion
|
||||
can request a key revocation and the master will comply. If it is False, the key will not
|
||||
be revoked by the msater.
|
||||
Changes for v2016.3.6..v2016.3.7
|
||||
--------------------------------
|
||||
|
||||
New master configuration option `require_minion_sign_messages`
|
||||
This requires that minions cryptographically sign the messages they
|
||||
publish to the master. If minions are not signing, then log this information
|
||||
at loglevel 'INFO' and drop the message without acting on it.
|
||||
Security Fix
|
||||
============
|
||||
|
||||
New master configuration option `drop_messages_signature_fail`
|
||||
Drop messages from minions when their signatures do not validate.
|
||||
Note that when this option is False but `require_minion_sign_messages` is True
|
||||
minions MUST sign their messages but the validity of their signatures
|
||||
is ignored.
|
||||
CVE-2017-12791 Maliciously crafted minion IDs can cause unwanted directory traversals on the Salt-master
|
||||
|
||||
New minion configuration option `minion_sign_messages`
|
||||
Causes the minion to cryptographically sign the payload of messages it places
|
||||
on the event bus for the master. The payloads are signed with the minion's
|
||||
private key so the master can verify the signature with its public key.
|
||||
Correct a flaw in minion id validation which could allow certain minions to authenticate to a master despite not having the correct credentials. To exploit the vulnerability, an attacker must create a salt-minion with an ID containing characters that will cause a directory traversal. Credit for discovering the security flaw goes to: Vernhk@qq.com
|
||||
|
29
doc/topics/releases/2016.3.8.rst
Normal file
29
doc/topics/releases/2016.3.8.rst
Normal file
@ -0,0 +1,29 @@
|
||||
===========================
|
||||
Salt 2016.3.8 Release Notes
|
||||
===========================
|
||||
|
||||
Version 2016.3.8 is a bugfix release for :ref:`2016.3.0 <release-2016-3-0>`.
|
||||
|
||||
Changes for v2016.3.7..v2016.3.8
|
||||
--------------------------------
|
||||
|
||||
New master configuration option `allow_minion_key_revoke`, defaults to True. This option
|
||||
controls whether a minion can request that the master revoke its key. When True, a minion
|
||||
can request a key revocation and the master will comply. If it is False, the key will not
|
||||
be revoked by the msater.
|
||||
|
||||
New master configuration option `require_minion_sign_messages`
|
||||
This requires that minions cryptographically sign the messages they
|
||||
publish to the master. If minions are not signing, then log this information
|
||||
at loglevel 'INFO' and drop the message without acting on it.
|
||||
|
||||
New master configuration option `drop_messages_signature_fail`
|
||||
Drop messages from minions when their signatures do not validate.
|
||||
Note that when this option is False but `require_minion_sign_messages` is True
|
||||
minions MUST sign their messages but the validity of their signatures
|
||||
is ignored.
|
||||
|
||||
New minion configuration option `minion_sign_messages`
|
||||
Causes the minion to cryptographically sign the payload of messages it places
|
||||
on the event bus for the master. The payloads are signed with the minion's
|
||||
private key so the master can verify the signature with its public key.
|
@ -8,7 +8,7 @@ Installing/Testing a Salt Release Candidate
|
||||
|
||||
It's time for a new feature release of Salt! Follow the instructions below to
|
||||
install the latest release candidate of Salt, and try :ref:`all the shiny new
|
||||
features <release-2016-11-0>`! Be sure to report any bugs you find on `Github
|
||||
features <release-2017-7-0>`! Be sure to report any bugs you find on `Github
|
||||
<https://github.com/saltstack/salt/issues/new/>`_.
|
||||
|
||||
Installing Using Packages
|
||||
@ -32,32 +32,12 @@ Builds for a few platforms are available as part of the RC at https://repo.salts
|
||||
|
||||
Available builds:
|
||||
|
||||
- Amazon Linux
|
||||
- Debian 8
|
||||
- macOS
|
||||
- RHEL 7
|
||||
- SmartOS (see below)
|
||||
- Ubuntu 16.04
|
||||
- Ubuntu16
|
||||
- Redhat7
|
||||
- Windows
|
||||
|
||||
.. FreeBSD
|
||||
|
||||
SmartOS
|
||||
-------
|
||||
Release candidate builds for SmartOS are available at http://pkg.blackdot.be/extras/salt-2016.11rc/.
|
||||
|
||||
On a base64 2015Q4-x86_64 based native zone the package can be installed by the following:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pfexec pkg_add -U https://pkg.blackdot.be/extras/salt-2016.11rc/salt-2016.11.0rc2_2015Q4_x86_64.tgz
|
||||
|
||||
When using the 2016Q2-tools release on the global zone by the following:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pfexec pkg_add -U https://pkg.blackdot.be/extras/salt-2016.11rc/salt-2016.11.0rc2_2016Q2_TOOLS.tgz
|
||||
|
||||
Installing Using Bootstrap
|
||||
==========================
|
||||
|
||||
@ -67,14 +47,14 @@ You can install a release candidate of Salt using `Salt Bootstrap
|
||||
.. code-block:: bash
|
||||
|
||||
curl -o install_salt.sh -L https://bootstrap.saltstack.com
|
||||
sudo sh install_salt.sh -P git v2016.11.0rc2
|
||||
sudo sh install_salt.sh -P git v2017.7.0rc1
|
||||
|
||||
If you want to also install a master using Salt Bootstrap, use the ``-M`` flag:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
curl -o install_salt.sh -L https://bootstrap.saltstack.com
|
||||
sudo sh install_salt.sh -P -M git v2016.11.0rc2
|
||||
sudo sh install_salt.sh -P -M git v2017.7.0rc1
|
||||
|
||||
If you want to install only a master and not a minion using Salt Bootstrap, use
|
||||
the ``-M`` and ``-N`` flags:
|
||||
@ -82,13 +62,13 @@ the ``-M`` and ``-N`` flags:
|
||||
.. code-block:: bash
|
||||
|
||||
curl -o install_salt.sh -L https://bootstrap.saltstack.com
|
||||
sudo sh install_salt.sh -P -M -N git v2016.11.0rc2
|
||||
sudo sh install_salt.sh -P -M -N git v2017.7.0rc1
|
||||
|
||||
Installing Using PyPI
|
||||
=====================
|
||||
|
||||
Installing from the `source archive
|
||||
<https://pypi.python.org/packages/7a/87/3b29ac215208bed9559d6c4df24175ddd1d52e62c5c00ae3afb3b7d9144d/salt-2016.11.0rc2.tar.gz>`_ on
|
||||
<https://pypi.python.org/packages/5c/cf/13c14f8bcd7b5076b9a8c3580f9582c1c4ea8b0458793ac6744ea66c0baf/salt-2017.7.0rc1.tar.gz>`_ on
|
||||
`PyPI <https://pypi.python.org/pypi>`_ is fairly straightforward.
|
||||
|
||||
.. note::
|
||||
@ -126,4 +106,4 @@ Then install salt using the following command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo pip install salt==2016.11.0rc2
|
||||
sudo pip install salt==2017.7.0rc1
|
||||
|
@ -64,7 +64,8 @@ Deploy ssh key for salt-ssh
|
||||
===========================
|
||||
|
||||
By default, salt-ssh will generate key pairs for ssh, the default path will be
|
||||
/etc/salt/pki/master/ssh/salt-ssh.rsa
|
||||
``/etc/salt/pki/master/ssh/salt-ssh.rsa``. The key generation happens when you run
|
||||
``salt-ssh`` for the first time.
|
||||
|
||||
You can use ssh-copy-id, (the OpenSSH key deployment tool) to deploy keys to your servers.
|
||||
|
||||
|
@ -28,6 +28,7 @@ hit `Enter`. Also, you can convert tabs to 2 spaces by these commands in Vim:
|
||||
|
||||
Indentation
|
||||
===========
|
||||
|
||||
The suggested syntax for YAML files is to use 2 spaces for indentation,
|
||||
but YAML will follow whatever indentation system that the individual file
|
||||
uses. Indentation of two spaces works very well for SLS files given the
|
||||
@ -112,8 +113,24 @@ PyYAML will load these values as boolean ``True`` or ``False``. Un-capitalized
|
||||
versions will also be loaded as booleans (``true``, ``false``, ``yes``, ``no``,
|
||||
``on``, and ``off``). This can be especially problematic when constructing
|
||||
Pillar data. Make sure that your Pillars which need to use the string versions
|
||||
of these values are enclosed in quotes. Pillars will be parsed twice by salt,
|
||||
so you'll need to wrap your values in multiple quotes, for example '"false"'.
|
||||
of these values are enclosed in quotes. Pillars will be parsed twice by salt,
|
||||
so you'll need to wrap your values in multiple quotes, including double quotation
|
||||
marks (``" "``) and single quotation marks (``' '``). Note that spaces are included
|
||||
in the quotation type examples for clarity.
|
||||
|
||||
Multiple quoting examples looks like this:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
- '"false"'
|
||||
- "'True'"
|
||||
- "'YES'"
|
||||
- '"No"'
|
||||
|
||||
.. note::
|
||||
|
||||
When using multiple quotes in this manner, they must be different. Using ``"" ""``
|
||||
or ``'' ''`` won't work in this case (spaces are included in examples for clarity).
|
||||
|
||||
The '%' Sign
|
||||
============
|
||||
|
@ -166,13 +166,15 @@ Ubuntu 14.04 LTS and Debian Wheezy (7.x) also have a compatible version packaged
|
||||
|
||||
# apt-get install python-git
|
||||
|
||||
If your master is running an older version (such as Ubuntu 12.04 LTS or Debian
|
||||
Squeeze), then you will need to install GitPython using either pip_ or
|
||||
easy_install (it is recommended to use pip). Version 0.3.2.RC1 is now marked as
|
||||
the stable release in PyPI, so it should be a simple matter of running ``pip
|
||||
install GitPython`` (or ``easy_install GitPython``) as root.
|
||||
GitPython_ requires the ``git`` CLI utility to work. If installed from a system
|
||||
package, then git should already be installed, but if installed via pip_ then
|
||||
it may still be necessary to install git separately. For MacOS users,
|
||||
GitPython_ comes bundled in with the Salt installer, but git must still be
|
||||
installed for it to work properly. Git can be installed in several ways,
|
||||
including by installing XCode_.
|
||||
|
||||
.. _`pip`: http://www.pip-installer.org/
|
||||
.. _pip: http://www.pip-installer.org/
|
||||
.. _XCode: https://developer.apple.com/xcode/
|
||||
|
||||
.. warning::
|
||||
|
||||
|
@ -110,7 +110,7 @@ To pass through a file that contains jinja + yaml templating (the default):
|
||||
method='POST',
|
||||
data_file='/srv/salt/somefile.jinja',
|
||||
data_render=True,
|
||||
template_data={'key1': 'value1', 'key2': 'value2'}
|
||||
template_dict={'key1': 'value1', 'key2': 'value2'}
|
||||
)
|
||||
|
||||
To pass through a file that contains mako templating:
|
||||
@ -123,7 +123,7 @@ To pass through a file that contains mako templating:
|
||||
data_file='/srv/salt/somefile.mako',
|
||||
data_render=True,
|
||||
data_renderer='mako',
|
||||
template_data={'key1': 'value1', 'key2': 'value2'}
|
||||
template_dict={'key1': 'value1', 'key2': 'value2'}
|
||||
)
|
||||
|
||||
Because this function uses Salt's own rendering system, any Salt renderer can
|
||||
@ -140,7 +140,7 @@ However, this can be changed to ``master`` if necessary.
|
||||
method='POST',
|
||||
data_file='/srv/salt/somefile.jinja',
|
||||
data_render=True,
|
||||
template_data={'key1': 'value1', 'key2': 'value2'},
|
||||
template_dict={'key1': 'value1', 'key2': 'value2'},
|
||||
opts=__opts__
|
||||
)
|
||||
|
||||
@ -149,7 +149,7 @@ However, this can be changed to ``master`` if necessary.
|
||||
method='POST',
|
||||
data_file='/srv/salt/somefile.jinja',
|
||||
data_render=True,
|
||||
template_data={'key1': 'value1', 'key2': 'value2'},
|
||||
template_dict={'key1': 'value1', 'key2': 'value2'},
|
||||
node='master'
|
||||
)
|
||||
|
||||
@ -170,11 +170,11 @@ a Python dict.
|
||||
header_file='/srv/salt/headers.jinja',
|
||||
header_render=True,
|
||||
header_renderer='jinja',
|
||||
template_data={'key1': 'value1', 'key2': 'value2'}
|
||||
template_dict={'key1': 'value1', 'key2': 'value2'}
|
||||
)
|
||||
|
||||
Because much of the data that would be templated between headers and data may be
|
||||
the same, the ``template_data`` is the same for both. Correcting possible
|
||||
the same, the ``template_dict`` is the same for both. Correcting possible
|
||||
variable name collisions is up to the user.
|
||||
|
||||
Authentication
|
||||
|
@ -75,7 +75,7 @@ The default location for the pillar is in /srv/pillar.
|
||||
|
||||
.. note::
|
||||
|
||||
The pillar location can be configured via the `pillar_roots` option inside
|
||||
The pillar location can be configured via the ``pillar_roots`` option inside
|
||||
the master configuration file. It must not be in a subdirectory of the state
|
||||
tree or file_roots. If the pillar is under file_roots, any pillar targeting
|
||||
can be bypassed by minions.
|
||||
@ -242,7 +242,7 @@ set in the minion's pillar, then the default of ``httpd`` will be used.
|
||||
.. note::
|
||||
|
||||
Under the hood, pillar is just a Python dict, so Python dict methods such
|
||||
as `get` and `items` can be used.
|
||||
as ``get`` and ``items`` can be used.
|
||||
|
||||
Pillar Makes Simple States Grow Easily
|
||||
======================================
|
||||
@ -303,6 +303,18 @@ Where the vimrc source location can now be changed via pillar:
|
||||
|
||||
Ensuring that the right vimrc is sent out to the correct minions.
|
||||
|
||||
The pillar top file must include a reference to the new sls pillar file:
|
||||
|
||||
``/srv/pillar/top.sls``:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
base:
|
||||
'*':
|
||||
- pkg
|
||||
- edit.vim
|
||||
|
||||
|
||||
Setting Pillar Data on the Command Line
|
||||
=======================================
|
||||
|
||||
|
@ -54,7 +54,7 @@ types like so:
|
||||
|
||||
salt '*' mymodule.observe_the_awesomeness
|
||||
'''
|
||||
print __utils__['foo.bar']()
|
||||
return __utils__['foo.bar']()
|
||||
|
||||
Utility modules, like any other kind of Salt extension, support using a
|
||||
:ref:`__virtual__ function <modules-virtual-name>` to conditionally load them,
|
||||
@ -81,11 +81,56 @@ the ``foo`` utility module with a ``__virtual__`` function.
|
||||
def bar():
|
||||
return 'baz'
|
||||
|
||||
Also you could even write your utility modules in object oriented fashion:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
My OOP-style utils module
|
||||
-------------------------
|
||||
|
||||
This module contains common functions for use in my other custom types.
|
||||
'''
|
||||
|
||||
class Foo(object):
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def bar(self):
|
||||
return 'baz'
|
||||
|
||||
And import them into other custom modules:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
My awesome execution module
|
||||
---------------------------
|
||||
'''
|
||||
|
||||
import mymodule
|
||||
|
||||
def observe_the_awesomeness():
|
||||
'''
|
||||
Prints information from my utility module
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' mymodule.observe_the_awesomeness
|
||||
'''
|
||||
foo = mymodule.Foo()
|
||||
return foo.bar()
|
||||
|
||||
These are, of course, contrived examples, but they should serve to show some of
|
||||
the possibilities opened up by writing utility modules. Keep in mind though
|
||||
that States still have access to all of the execution modules, so it is not
|
||||
that states still have access to all of the execution modules, so it is not
|
||||
necessary to write a utility module to make a function available to both a
|
||||
state and an execution module. One good use case for utililty modules is one
|
||||
state and an execution module. One good use case for utility modules is one
|
||||
where it is necessary to invoke the same function from a custom :ref:`outputter
|
||||
<all-salt.output>`/returner, as well as an execution module.
|
||||
|
||||
|
@ -140,7 +140,7 @@ packages:
|
||||
- 2015.8.0 and later minions: https://github.com/saltstack/salt-winrepo-ng
|
||||
- Earlier releases: https://github.com/saltstack/salt-winrepo
|
||||
|
||||
By default, these repositories are mirrored to ``/srv/salt/win/repo_ng``
|
||||
By default, these repositories are mirrored to ``/srv/salt/win/repo-ng``
|
||||
and ``/srv/salt/win/repo``.
|
||||
|
||||
This location can be changed in the master config file by setting the
|
||||
|
@ -15,66 +15,130 @@
|
||||
# This script is run as a part of the macOS Salt Installation
|
||||
#
|
||||
###############################################################################
|
||||
echo "Post install started on:" > /tmp/postinstall.txt
|
||||
date >> /tmp/postinstall.txt
|
||||
|
||||
###############################################################################
|
||||
# Define Variables
|
||||
###############################################################################
|
||||
# Get Minor Version
|
||||
OSX_VERSION=$(sw_vers | grep ProductVersion | cut -f 2 -d: | tr -d '[:space:]')
|
||||
MINOR=$(echo ${OSX_VERSION} | cut -f 2 -d.)
|
||||
# Path Variables
|
||||
INSTALL_DIR="/opt/salt"
|
||||
BIN_DIR="$INSTALL_DIR/bin"
|
||||
CONFIG_DIR="/etc/salt"
|
||||
TEMP_DIR="/tmp"
|
||||
SBIN_DIR="/usr/local/sbin"
|
||||
|
||||
###############################################################################
|
||||
# Set up logging and error handling
|
||||
###############################################################################
|
||||
echo "Post install script started on:" > "$TEMP_DIR/postinstall.txt"
|
||||
date "+%Y/%m/%d %H:%m:%S" >> "$TEMP_DIR/postinstall.txt"
|
||||
trap 'quit_on_error $LINENO $BASH_COMMAND' ERR
|
||||
|
||||
quit_on_error() {
|
||||
echo "$(basename $0) caught error on line : $1 command was: $2" >> /tmp/postinstall.txt
|
||||
echo "$(basename $0) caught error on line : $1 command was: $2" >> "$TEMP_DIR/postinstall.txt"
|
||||
exit -1
|
||||
}
|
||||
|
||||
###############################################################################
|
||||
# Check for existing minion config, copy if it doesn't exist
|
||||
###############################################################################
|
||||
if [ ! -f /etc/salt/minion ]; then
|
||||
echo "Config copy: Started..." >> /tmp/postinstall.txt
|
||||
cp /etc/salt/minion.dist /etc/salt/minion
|
||||
echo "Config copy: Successful" >> /tmp/postinstall.txt
|
||||
if [ ! -f "$CONFIG_DIR/minion" ]; then
|
||||
echo "Config: Copy Started..." >> "$TEMP_DIR/postinstall.txt"
|
||||
cp "$CONFIG_DIR/minion.dist" "$CONFIG_DIR/minion"
|
||||
echo "Config: Copied Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
fi
|
||||
|
||||
###############################################################################
|
||||
# Create symlink to salt-config.sh
|
||||
###############################################################################
|
||||
# echo "Symlink: Creating symlink for salt-config..." >> /tmp/postinstall.txt
|
||||
if [ ! -d "/usr/local/sbin" ]; then
|
||||
mkdir /usr/local/sbin
|
||||
if [ ! -d "$SBIN_DIR" ]; then
|
||||
echo "Symlink: Creating $SBIN_DIR..." >> "$TEMP_DIR/postinstall.txt"
|
||||
mkdir "$SBIN_DIR"
|
||||
echo "Symlink: Created Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
fi
|
||||
ln -sf /opt/salt/bin/salt-config.sh /usr/local/sbin/salt-config
|
||||
echo "Symlink: Creating symlink for salt-config..." >> "$TEMP_DIR/postinstall.txt"
|
||||
ln -sf "$BIN_DIR/salt-config.sh" "$SBIN_DIR/salt-config"
|
||||
echo "Symlink: Created Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
###############################################################################
|
||||
# Add salt to paths.d
|
||||
###############################################################################
|
||||
# echo "Path: Adding salt to the path..." >> /tmp/postinstall.txt
|
||||
if [ ! -d "/etc/paths.d" ]; then
|
||||
echo "Path: Creating paths.d directory..." >> "$TEMP_DIR/postinstall.txt"
|
||||
mkdir /etc/paths.d
|
||||
echo "Path: Created Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
fi
|
||||
sh -c 'echo "/opt/salt/bin" > /etc/paths.d/salt'
|
||||
sh -c 'echo "/usr/local/sbin" >> /etc/paths.d/salt'
|
||||
echo "Path: Adding salt to the path..." >> "$TEMP_DIR/postinstall.txt"
|
||||
sh -c "echo \"$BIN_DIR\" > /etc/paths.d/salt"
|
||||
sh -c "echo \"$SBIN_DIR\" >> /etc/paths.d/salt"
|
||||
echo "Path: Added Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
###############################################################################
|
||||
# Register Salt as a service
|
||||
###############################################################################
|
||||
echo "Service start: Enabling service..." >> /tmp/postinstall.txt
|
||||
launchctl enable system/com.saltstack.salt.minion
|
||||
echo "Service start: Bootstrapping service..." >> /tmp/postinstall.txt
|
||||
launchctl bootstrap system /Library/LaunchDaemons/com.saltstack.salt.minion.plist
|
||||
setup_services_maverick() {
|
||||
echo "Service: Using old (< 10.10) launchctl interface" >> "$TEMP_DIR/postinstall.txt"
|
||||
if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then
|
||||
echo "Service: Stopping salt-minion..." >> "$TEMP_DIR/postinstall.txt"
|
||||
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist
|
||||
echo "Service: Stopped Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
fi;
|
||||
echo "Service: Starting salt-minion..." >> "$TEMP_DIR/postinstall.txt"
|
||||
launchctl load -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist || return 1
|
||||
echo "Service: Started Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then
|
||||
echo "Service is running" >> /tmp/postinstall.txt
|
||||
else
|
||||
echo "Service start: Kickstarting service..." >> /tmp/postinstall.txt
|
||||
launchctl kickstart -kp system/com.saltstack.salt.minion
|
||||
fi
|
||||
echo "Service: Disabling Master, Syndic, and API services..." >> "$TEMP_DIR/postinstall.txt"
|
||||
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.api.plist
|
||||
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.master.plist
|
||||
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.syndic.plist
|
||||
echo "Service: Disabled Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
echo "Service start: Successful" >> /tmp/postinstall.txt
|
||||
return 0
|
||||
}
|
||||
|
||||
echo "Service disable: Disabling Master, Syndic, and API" >> /tmp/postinstall.txt
|
||||
setup_services_yosemite_and_later() {
|
||||
echo "Service: Using new (>= 10.10) launchctl interface" >> "$TEMP_DIR/postinstall.txt"
|
||||
echo "Service: Enabling salt-minion..." >> "$TEMP_DIR/postinstall.txt"
|
||||
launchctl enable system/com.saltstack.salt.minion
|
||||
echo "Service: Enabled Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
launchctl disable system/com.saltstack.salt.master
|
||||
launchctl disable system/com.saltstack.salt.syndic
|
||||
launchctl disable system/com.saltstack.salt.api
|
||||
echo "Service: Bootstrapping salt-minion..." >> "$TEMP_DIR/postinstall.txt"
|
||||
launchctl bootstrap system /Library/LaunchDaemons/com.saltstack.salt.minion.plist
|
||||
echo "Service: Bootstrapped Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
echo "Post install completed successfully" >> /tmp/postinstall.txt
|
||||
if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then
|
||||
echo "Service: Service Running" >> "$TEMP_DIR/postinstall.txt"
|
||||
else
|
||||
echo "Service: Kickstarting Service..." >> "$TEMP_DIR/postinstall.txt"
|
||||
launchctl kickstart -kp system/com.saltstack.salt.minion
|
||||
echo "Service: Kickstarted Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
fi
|
||||
|
||||
echo "Service: Started Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
echo "Service: Disabling Master, Syndic, and API services" >> "$TEMP_DIR/postinstall.txt"
|
||||
launchctl disable system/com.saltstack.salt.master
|
||||
launchctl disable system/com.saltstack.salt.syndic
|
||||
launchctl disable system/com.saltstack.salt.api
|
||||
echo "Service: Disabled Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
echo "Service: Configuring..." >> "$TEMP_DIR/postinstall.txt"
|
||||
case $MINOR in
|
||||
9 )
|
||||
setup_services_maverick;
|
||||
;;
|
||||
* )
|
||||
setup_services_yosemite_and_later;
|
||||
;;
|
||||
esac
|
||||
echo "Service: Configured Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
echo "Post install completed successfully on:" >> "$TEMP_DIR/postinstall.txt"
|
||||
date "+%Y/%m/%d %H:%m:%S" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
exit 0
|
||||
|
@ -6,7 +6,8 @@
|
||||
# Date: December 2015
|
||||
#
|
||||
# Description: This script stops the salt minion service before attempting to
|
||||
# install Salt on macOS
|
||||
# install Salt on macOS. It also removes the /opt/salt/bin
|
||||
# directory, symlink to salt-config, and salt from paths.d.
|
||||
#
|
||||
# Requirements:
|
||||
# - None
|
||||
@ -15,26 +16,126 @@
|
||||
# This script is run as a part of the macOS Salt Installation
|
||||
#
|
||||
###############################################################################
|
||||
echo "Preinstall started on:" > /tmp/preinstall.txt
|
||||
date >> /tmp/preinstall.txt
|
||||
|
||||
###############################################################################
|
||||
# Define Variables
|
||||
###############################################################################
|
||||
# Get Minor Version
|
||||
OSX_VERSION=$(sw_vers | grep ProductVersion | cut -f 2 -d: | tr -d '[:space:]')
|
||||
MINOR=$(echo ${OSX_VERSION} | cut -f 2 -d.)
|
||||
# Path Variables
|
||||
INSTALL_DIR="/opt/salt"
|
||||
BIN_DIR="$INSTALL_DIR/bin"
|
||||
CONFIG_DIR="/etc/salt"
|
||||
TEMP_DIR="/tmp"
|
||||
SBIN_DIR="/usr/local/sbin"
|
||||
|
||||
###############################################################################
|
||||
# Set up logging and error handling
|
||||
###############################################################################
|
||||
echo "Preinstall started on:" > "$TEMP_DIR/preinstall.txt"
|
||||
date "+%Y/%m/%d %H:%m:%S" >> "$TEMP_DIR/preinstall.txt"
|
||||
trap 'quit_on_error $LINENO $BASH_COMMAND' ERR
|
||||
|
||||
quit_on_error() {
|
||||
echo "$(basename $0) caught error on line : $1 command was: $2" >> /tmp/preinstall.txt
|
||||
echo "$(basename $0) caught error on line : $1 command was: $2" >> "$TEMP_DIR/preinstall.txt"
|
||||
exit -1
|
||||
}
|
||||
|
||||
###############################################################################
|
||||
# Stop the service
|
||||
###############################################################################
|
||||
if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then
|
||||
echo "Stop service: Started..." >> /tmp/preinstall.txt
|
||||
# /bin/launchctl unload "/Library/LaunchDaemons/com.saltstack.salt.minion.plist"
|
||||
launchctl disable system/com.saltstack.salt.minion
|
||||
launchctl bootout system /Library/LaunchDaemons/com.saltstack.salt.minion.plist
|
||||
echo "Stop service: Successful" >> /tmp/preinstall.txt
|
||||
stop_service_maverick() {
|
||||
echo "Service: Using old (< 10.10) launchctl interface" >> "$TEMP_DIR/preinstall.txt"
|
||||
if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then
|
||||
echo "Service: Unloading minion..." >> "$TEMP_DIR/preinstall.txt"
|
||||
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist
|
||||
echo "Service: Unloaded Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
if /bin/launchctl list "com.saltstack.salt.master" &> /dev/null; then
|
||||
echo "Service: Unloading master..." >> "$TEMP_DIR/preinstall.txt"
|
||||
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.master.plist
|
||||
echo "Service: Unloaded Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
if /bin/launchctl list "com.saltstack.salt.syndic" &> /dev/null; then
|
||||
echo "Service: Unloading syndic..." >> "$TEMP_DIR/preinstall.txt"
|
||||
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.syndic.plist
|
||||
echo "Service: Unloaded Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
if /bin/launchctl list "com.saltstack.salt.api" &> /dev/null; then
|
||||
echo "Service: Unloading api..." >> "$TEMP_DIR/preinstall.txt"
|
||||
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.api.plist
|
||||
echo "Service: Unloaded Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
}
|
||||
|
||||
stop_service_yosemite_and_later() {
|
||||
echo "Service: Using new (>= 10.10) launchctl interface" >> "$TEMP_DIR/preinstall.txt"
|
||||
if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then
|
||||
echo "Service: Stopping minion..." >> "$TEMP_DIR/preinstall.txt"
|
||||
launchctl disable system/com.saltstack.salt.minion
|
||||
launchctl bootout system /Library/LaunchDaemons/com.saltstack.salt.minion.plist
|
||||
echo "Service: Stopped Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
if /bin/launchctl list "com.saltstack.salt.master" &> /dev/null; then
|
||||
echo "Service: Stopping master..." >> "$TEMP_DIR/preinstall.txt"
|
||||
launchctl disable system/com.saltstack.salt.master
|
||||
launchctl bootout system /Library/LaunchDaemons/com.saltstack.salt.master.plist
|
||||
echo "Service: Stopped Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
if /bin/launchctl list "com.saltstack.salt.syndic" &> /dev/null; then
|
||||
echo "Service: Stopping syndic..." >> "$TEMP_DIR/preinstall.txt"
|
||||
launchctl disable system/com.saltstack.salt.syndic
|
||||
launchctl bootout system /Library/LaunchDaemons/com.saltstack.salt.syndic.plist
|
||||
echo "Service: Stopped Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
if /bin/launchctl list "com.saltstack.salt.api" &> /dev/null; then
|
||||
echo "Service: Stopping api..." >> "$TEMP_DIR/preinstall.txt"
|
||||
launchctl disable system/com.saltstack.salt.api
|
||||
launchctl bootout system /Library/LaunchDaemons/com.saltstack.salt.api.plist
|
||||
echo "Service: Stopped Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
}
|
||||
|
||||
echo "Service: Configuring..." >> "$TEMP_DIR/preinstall.txt"
|
||||
case $MINOR in
|
||||
9 )
|
||||
stop_service_maverick;
|
||||
;;
|
||||
* )
|
||||
stop_service_yosemite_and_later;
|
||||
;;
|
||||
esac
|
||||
echo "Service: Configured Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
|
||||
###############################################################################
|
||||
# Remove the Symlink to salt-config.sh
|
||||
###############################################################################
|
||||
if [ -L "$SBIN_DIR/salt-config" ]; then
|
||||
echo "Cleanup: Removing Symlink $BIN_DIR/salt-config" >> "$TEMP_DIR/preinstall.txt"
|
||||
rm "$SBIN_DIR/salt-config"
|
||||
echo "Cleanup: Removed Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
|
||||
echo "Preinstall Completed Successfully" >> /tmp/preinstall.txt
|
||||
###############################################################################
|
||||
# Remove the $INSTALL_DIR directory
|
||||
###############################################################################
|
||||
if [ -d "$INSTALL_DIR" ]; then
|
||||
echo "Cleanup: Removing $INSTALL_DIR" >> "$TEMP_DIR/preinstall.txt"
|
||||
rm -rf "$INSTALL_DIR"
|
||||
echo "Cleanup: Removed Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
|
||||
###############################################################################
|
||||
# Remove the salt from the paths.d
|
||||
###############################################################################
|
||||
if [ ! -f "/etc/paths.d/salt" ]; then
|
||||
echo "Path: Removing salt from the path..." >> "$TEMP_DIR/preinstall.txt"
|
||||
rm "/etc/paths.d/salt"
|
||||
echo "Path: Removed Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
|
||||
echo "Preinstall Completed Successfully on:" >> "$TEMP_DIR/preinstall.txt"
|
||||
date "+%Y/%m/%d %H:%m:%S" >> "$TEMP_DIR/preinstall.txt"
|
||||
|
||||
exit 0
|
||||
|
@ -42,6 +42,13 @@ if not %errorLevel%==0 (
|
||||
)
|
||||
@echo.
|
||||
|
||||
:: Remove build and dist directories
|
||||
@echo %0 :: Remove build and dist directories...
|
||||
@echo ---------------------------------------------------------------------
|
||||
rd /s /q "%SrcDir%\build"
|
||||
rd /s /q "%SrcDir%\dist"
|
||||
@echo.
|
||||
|
||||
:: Install Current Version of salt
|
||||
@echo %0 :: Install Current Version of salt...
|
||||
@echo ---------------------------------------------------------------------
|
||||
|
@ -9,5 +9,4 @@ Set Python=%SaltDir%\bin\python.exe
|
||||
Set Script=%SaltDir%\bin\Scripts\salt-call
|
||||
|
||||
:: Launch Script
|
||||
"%Python%" "%Script%" %*
|
||||
|
||||
"%Python%" -E -s "%Script%" %*
|
||||
|
@ -9,5 +9,4 @@ Set Python=%SaltDir%\bin\python.exe
|
||||
Set Script=%SaltDir%\bin\Scripts\salt-cp
|
||||
|
||||
:: Launch Script
|
||||
"%Python%" "%Script%" %*
|
||||
|
||||
"%Python%" -E -s "%Script%" %*
|
||||
|
@ -9,5 +9,4 @@ Set Python=%SaltDir%\bin\python.exe
|
||||
Set Script=%SaltDir%\bin\Scripts\salt-key
|
||||
|
||||
:: Launch Script
|
||||
"%Python%" "%Script%" %*
|
||||
|
||||
"%Python%" -E -s "%Script%" %*
|
||||
|
@ -9,5 +9,4 @@ Set Python=%SaltDir%\bin\python.exe
|
||||
Set Script=%SaltDir%\bin\Scripts\salt-master
|
||||
|
||||
:: Launch Script
|
||||
"%Python%" "%Script%" %*
|
||||
|
||||
"%Python%" -E -s "%Script%" %*
|
||||
|
@ -12,5 +12,4 @@ Set Script=%SaltDir%\bin\Scripts\salt-minion
|
||||
net stop salt-minion
|
||||
|
||||
:: Launch Script
|
||||
"%Python%" "%Script%" -l debug
|
||||
|
||||
"%Python%" -E -s "%Script%" -l debug
|
||||
|
@ -9,5 +9,4 @@ Set Python=%SaltDir%\bin\python.exe
|
||||
Set Script=%SaltDir%\bin\Scripts\salt-minion
|
||||
|
||||
:: Launch Script
|
||||
"%Python%" "%Script%" %*
|
||||
|
||||
"%Python%" -E -s "%Script%" %*
|
||||
|
@ -9,5 +9,4 @@ Set Python=%SaltDir%\bin\python.exe
|
||||
Set Script=%SaltDir%\bin\Scripts\salt-run
|
||||
|
||||
:: Launch Script
|
||||
"%Python%" "%Script%" %*
|
||||
|
||||
"%Python%" -E -s "%Script%" %*
|
||||
|
@ -9,5 +9,4 @@ Set Python=%SaltDir%\bin\python.exe
|
||||
Set Script=%SaltDir%\bin\Scripts\salt
|
||||
|
||||
:: Launch Script
|
||||
"%Python%" "%Script%" %*
|
||||
|
||||
"%Python%" -E -s "%Script%" %*
|
||||
|
@ -334,8 +334,7 @@ Section -Post
|
||||
WriteRegStr HKLM "${PRODUCT_MINION_REGKEY}" "Path" "$INSTDIR\bin\"
|
||||
|
||||
; Register the Salt-Minion Service
|
||||
nsExec::Exec "nssm.exe install salt-minion $INSTDIR\bin\python.exe $INSTDIR\bin\Scripts\salt-minion -c $INSTDIR\conf -l quiet"
|
||||
nsExec::Exec "nssm.exe set salt-minion AppEnvironmentExtra PYTHONHOME="
|
||||
nsExec::Exec "nssm.exe install salt-minion $INSTDIR\bin\python.exe -E -s $INSTDIR\bin\Scripts\salt-minion -c $INSTDIR\conf -l quiet"
|
||||
nsExec::Exec "nssm.exe set salt-minion Description Salt Minion from saltstack.com"
|
||||
nsExec::Exec "nssm.exe set salt-minion AppNoConsole 1"
|
||||
|
||||
|
@ -31,6 +31,7 @@ import salt.config
|
||||
import salt.loader
|
||||
import salt.transport.client
|
||||
import salt.utils
|
||||
import salt.utils.files
|
||||
import salt.utils.minions
|
||||
import salt.payload
|
||||
|
||||
@ -193,8 +194,13 @@ class LoadAuth(object):
|
||||
if 'groups' in load:
|
||||
tdata['groups'] = load['groups']
|
||||
|
||||
with salt.utils.fopen(t_path, 'w+b') as fp_:
|
||||
fp_.write(self.serial.dumps(tdata))
|
||||
try:
|
||||
with salt.utils.files.set_umask(0o177):
|
||||
with salt.utils.fopen(t_path, 'w+b') as fp_:
|
||||
fp_.write(self.serial.dumps(tdata))
|
||||
except (IOError, OSError):
|
||||
log.warning('Authentication failure: can not write token file "{0}".'.format(t_path))
|
||||
return {}
|
||||
return tdata
|
||||
|
||||
def get_tok(self, tok):
|
||||
@ -473,14 +479,12 @@ class Resolver(object):
|
||||
tdata = self._send_token_request(load)
|
||||
if 'token' not in tdata:
|
||||
return tdata
|
||||
oldmask = os.umask(0o177)
|
||||
try:
|
||||
with salt.utils.fopen(self.opts['token_file'], 'w+') as fp_:
|
||||
fp_.write(tdata['token'])
|
||||
with salt.utils.files.set_umask(0o177):
|
||||
with salt.utils.fopen(self.opts['token_file'], 'w+') as fp_:
|
||||
fp_.write(tdata['token'])
|
||||
except (IOError, OSError):
|
||||
pass
|
||||
finally:
|
||||
os.umask(oldmask)
|
||||
return tdata
|
||||
|
||||
def mk_token(self, load):
|
||||
|
@ -19,8 +19,9 @@ import sys
|
||||
# Import salt libs
|
||||
import salt.client
|
||||
import salt.utils.gzip_util
|
||||
import salt.utils.itertools
|
||||
import salt.utils.minions
|
||||
from salt.utils import parsers, to_bytes
|
||||
from salt.utils import parsers, to_bytes, print_cli
|
||||
from salt.utils.verify import verify_log
|
||||
import salt.output
|
||||
|
||||
@ -100,10 +101,69 @@ class SaltCP(object):
|
||||
empty_dirs.update(empty_dirs_)
|
||||
return files, sorted(empty_dirs)
|
||||
|
||||
def _file_dict(self, fn_):
|
||||
'''
|
||||
Take a path and return the contents of the file as a string
|
||||
'''
|
||||
if not os.path.isfile(fn_):
|
||||
err = 'The referenced file, {0} is not available.'.format(fn_)
|
||||
sys.stderr.write(err + '\n')
|
||||
sys.exit(42)
|
||||
with salt.utils.fopen(fn_, 'r') as fp_:
|
||||
data = fp_.read()
|
||||
return {fn_: data}
|
||||
|
||||
def _load_files(self):
|
||||
'''
|
||||
Parse the files indicated in opts['src'] and load them into a python
|
||||
object for transport
|
||||
'''
|
||||
files = {}
|
||||
for fn_ in self.opts['src']:
|
||||
if os.path.isfile(fn_):
|
||||
files.update(self._file_dict(fn_))
|
||||
elif os.path.isdir(fn_):
|
||||
print_cli(fn_ + ' is a directory, only files are supported in non-chunked mode. '
|
||||
'Use "--chunked" command line argument.')
|
||||
sys.exit(1)
|
||||
return files
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
Make the salt client call
|
||||
'''
|
||||
if self.opts['chunked']:
|
||||
ret = self.run_chunked()
|
||||
else:
|
||||
ret = self.run_oldstyle()
|
||||
|
||||
salt.output.display_output(
|
||||
ret,
|
||||
self.opts.get('output', 'nested'),
|
||||
self.opts)
|
||||
|
||||
def run_oldstyle(self):
|
||||
'''
|
||||
Make the salt client call in old-style all-in-one call method
|
||||
'''
|
||||
arg = [self._load_files(), self.opts['dest']]
|
||||
local = salt.client.get_local_client(self.opts['conf_file'])
|
||||
args = [self.opts['tgt'],
|
||||
'cp.recv',
|
||||
arg,
|
||||
self.opts['timeout'],
|
||||
]
|
||||
|
||||
selected_target_option = self.opts.get('selected_target_option', None)
|
||||
if selected_target_option is not None:
|
||||
args.append(selected_target_option)
|
||||
|
||||
return local.cmd(*args)
|
||||
|
||||
def run_chunked(self):
|
||||
'''
|
||||
Make the salt client call in the new fasion chunked multi-call way
|
||||
'''
|
||||
files, empty_dirs = self._list_files()
|
||||
dest = self.opts['dest']
|
||||
gzip = self.opts['gzip']
|
||||
@ -165,7 +225,7 @@ class SaltCP(object):
|
||||
)
|
||||
args = [
|
||||
tgt,
|
||||
'cp.recv',
|
||||
'cp.recv_chunked',
|
||||
[remote_path, chunk, append, gzip, mode],
|
||||
timeout,
|
||||
]
|
||||
@ -211,14 +271,11 @@ class SaltCP(object):
|
||||
else '',
|
||||
tgt,
|
||||
)
|
||||
args = [tgt, 'cp.recv', [remote_path, None], timeout]
|
||||
args = [tgt, 'cp.recv_chunked', [remote_path, None], timeout]
|
||||
if selected_target_option is not None:
|
||||
args.append(selected_target_option)
|
||||
|
||||
for minion_id, minion_ret in six.iteritems(local.cmd(*args)):
|
||||
ret.setdefault(minion_id, {})[remote_path] = minion_ret
|
||||
|
||||
salt.output.display_output(
|
||||
ret,
|
||||
self.opts.get('output', 'nested'),
|
||||
self.opts)
|
||||
return ret
|
||||
|
@ -408,8 +408,6 @@ class SyncClientMixin(object):
|
||||
)
|
||||
data['success'] = False
|
||||
|
||||
namespaced_event.fire_event(data, 'ret')
|
||||
|
||||
if self.store_job:
|
||||
try:
|
||||
salt.utils.job.store_job(
|
||||
@ -427,6 +425,9 @@ class SyncClientMixin(object):
|
||||
log.error('Could not store job cache info. '
|
||||
'Job details for this run may be unavailable.')
|
||||
|
||||
# Outputters _can_ mutate data so write to the job cache first!
|
||||
namespaced_event.fire_event(data, 'ret')
|
||||
|
||||
# if we fired an event, make sure to delete the event object.
|
||||
# This will ensure that we call destroy, which will do the 0MQ linger
|
||||
log.info('Runner completed: {0}'.format(data['jid']))
|
||||
|
@ -961,10 +961,18 @@ def ssh_interface(vm_):
|
||||
Return the ssh_interface type to connect to. Either 'public_ips' (default)
|
||||
or 'private_ips'.
|
||||
'''
|
||||
return config.get_cloud_config_value(
|
||||
ret = config.get_cloud_config_value(
|
||||
'ssh_interface', vm_, __opts__, default='public_ips',
|
||||
search_global=False
|
||||
)
|
||||
if ret not in ('public_ips', 'private_ips'):
|
||||
log.warning((
|
||||
'Invalid ssh_interface: {0}. '
|
||||
'Allowed options are ("public_ips", "private_ips"). '
|
||||
'Defaulting to "public_ips".'
|
||||
).format(ret))
|
||||
ret = 'public_ips'
|
||||
return ret
|
||||
|
||||
|
||||
def get_ssh_gateway_config(vm_):
|
||||
|
@ -726,12 +726,18 @@ def request_instance(vm_=None, call=None):
|
||||
|
||||
else:
|
||||
pool = floating_ip_conf.get('pool', 'public')
|
||||
for fl_ip, opts in six.iteritems(conn.floating_ip_list()):
|
||||
if opts['fixed_ip'] is None and opts['pool'] == pool:
|
||||
floating_ip = fl_ip
|
||||
break
|
||||
if floating_ip is None:
|
||||
try:
|
||||
floating_ip = conn.floating_ip_create(pool)['ip']
|
||||
except Exception:
|
||||
log.info('A new IP address was unable to be allocated. '
|
||||
'An IP address will be pulled from the already allocated list, '
|
||||
'This will cause a race condition when building in parallel.')
|
||||
for fl_ip, opts in six.iteritems(conn.floating_ip_list()):
|
||||
if opts['fixed_ip'] is None and opts['pool'] == pool:
|
||||
floating_ip = fl_ip
|
||||
break
|
||||
if floating_ip is None:
|
||||
log.error('No IP addresses available to allocate for this server: {0}'.format(vm_['name']))
|
||||
|
||||
def __query_node_data(vm_):
|
||||
try:
|
||||
|
@ -135,6 +135,14 @@ Alternatively, one could use the private IP to connect by specifying:
|
||||
ssh_interface: private_ips
|
||||
|
||||
|
||||
.. note::
|
||||
|
||||
When using floating ips from networks, if the OpenStack driver is unable to
|
||||
allocate a new ip address for the server, it will check that for
|
||||
unassociated ip addresses in the floating ip pool. If SaltCloud is running
|
||||
in parallel mode, it is possible that more than one server will attempt to
|
||||
use the same ip address.
|
||||
|
||||
'''
|
||||
|
||||
# Import python libs
|
||||
@ -866,40 +874,43 @@ def _assign_floating_ips(vm_, conn, kwargs):
|
||||
pool = OpenStack_1_1_FloatingIpPool(
|
||||
net['floating'], conn.connection
|
||||
)
|
||||
for idx in pool.list_floating_ips():
|
||||
if idx.node_id is None:
|
||||
floating.append(idx)
|
||||
try:
|
||||
floating.append(pool.create_floating_ip())
|
||||
except Exception as e:
|
||||
log.debug('Cannot allocate IP from floating pool \'%s\'. Checking for unassociated ips.',
|
||||
net['floating'])
|
||||
for idx in pool.list_floating_ips():
|
||||
if idx.node_id is None:
|
||||
floating.append(idx)
|
||||
break
|
||||
if not floating:
|
||||
try:
|
||||
floating.append(pool.create_floating_ip())
|
||||
except Exception as e:
|
||||
raise SaltCloudSystemExit(
|
||||
'Floating pool \'{0}\' does not have any more '
|
||||
'please create some more or use a different '
|
||||
'pool.'.format(net['floating'])
|
||||
)
|
||||
raise SaltCloudSystemExit(
|
||||
'There are no more floating IP addresses '
|
||||
'available, please create some more'
|
||||
)
|
||||
# otherwise, attempt to obtain list without specifying pool
|
||||
# this is the same as 'nova floating-ip-list'
|
||||
elif ssh_interface(vm_) != 'private_ips':
|
||||
try:
|
||||
# This try/except is here because it appears some
|
||||
# *cough* Rackspace *cough*
|
||||
# OpenStack providers return a 404 Not Found for the
|
||||
# floating ip pool URL if there are no pools setup
|
||||
pool = OpenStack_1_1_FloatingIpPool(
|
||||
'', conn.connection
|
||||
)
|
||||
for idx in pool.list_floating_ips():
|
||||
if idx.node_id is None:
|
||||
floating.append(idx)
|
||||
try:
|
||||
floating.append(pool.create_floating_ip())
|
||||
except Exception as e:
|
||||
log.debug('Cannot allocate IP from the default floating pool. Checking for unassociated ips.')
|
||||
for idx in pool.list_floating_ips():
|
||||
if idx.node_id is None:
|
||||
floating.append(idx)
|
||||
break
|
||||
if not floating:
|
||||
try:
|
||||
floating.append(pool.create_floating_ip())
|
||||
except Exception as e:
|
||||
raise SaltCloudSystemExit(
|
||||
'There are no more floating IP addresses '
|
||||
'available, please create some more'
|
||||
)
|
||||
log.warning(
|
||||
'There are no more floating IP addresses '
|
||||
'available, please create some more if necessary'
|
||||
)
|
||||
except Exception as e:
|
||||
if str(e).startswith('404'):
|
||||
pass
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -25,7 +25,7 @@ try:
|
||||
)
|
||||
HAS_LIBCLOUD = True
|
||||
LIBCLOUD_VERSION_INFO = tuple([
|
||||
int(part) for part in libcloud.__version__.replace('-', '.').split('.')[:3]
|
||||
int(part) for part in libcloud.__version__.replace('-', '.').replace('rc', '.').split('.')[:3]
|
||||
])
|
||||
|
||||
except ImportError:
|
||||
@ -150,7 +150,7 @@ def avail_locations(conn=None, call=None):
|
||||
|
||||
ret[img_name] = {}
|
||||
for attr in dir(img):
|
||||
if attr.startswith('_'):
|
||||
if attr.startswith('_') or attr == 'driver':
|
||||
continue
|
||||
|
||||
attr_value = getattr(img, attr)
|
||||
@ -187,7 +187,7 @@ def avail_images(conn=None, call=None):
|
||||
|
||||
ret[img_name] = {}
|
||||
for attr in dir(img):
|
||||
if attr.startswith('_'):
|
||||
if attr.startswith('_') or attr in ('driver', 'get_uuid'):
|
||||
continue
|
||||
attr_value = getattr(img, attr)
|
||||
if isinstance(attr_value, string_types) and not six.PY3:
|
||||
@ -222,7 +222,7 @@ def avail_sizes(conn=None, call=None):
|
||||
|
||||
ret[size_name] = {}
|
||||
for attr in dir(size):
|
||||
if attr.startswith('_'):
|
||||
if attr.startswith('_') or attr in ('driver', 'get_uuid'):
|
||||
continue
|
||||
|
||||
try:
|
||||
|
@ -199,6 +199,9 @@ VALID_OPTS = {
|
||||
# The directory containing unix sockets for things like the event bus
|
||||
'sock_dir': str,
|
||||
|
||||
# The pool size of unix sockets, it is necessary to avoid blocking waiting for zeromq and tcp communications.
|
||||
'sock_pool_size': int,
|
||||
|
||||
# Specifies how the file server should backup files, if enabled. The backups
|
||||
# live in the cache dir.
|
||||
'backup_mode': str,
|
||||
@ -328,7 +331,7 @@ VALID_OPTS = {
|
||||
# The TCP port on which minion events should be pulled if ipc_mode is TCP
|
||||
'tcp_pull_port': int,
|
||||
|
||||
# The TCP port on which events for the master should be pulled if ipc_mode is TCP
|
||||
# The TCP port on which events for the master should be published if ipc_mode is TCP
|
||||
'tcp_master_pub_port': int,
|
||||
|
||||
# The TCP port on which events for the master should be pulled if ipc_mode is TCP
|
||||
@ -1005,6 +1008,7 @@ DEFAULT_MINION_OPTS = {
|
||||
'grains_deep_merge': False,
|
||||
'conf_file': os.path.join(salt.syspaths.CONFIG_DIR, 'minion'),
|
||||
'sock_dir': os.path.join(salt.syspaths.SOCK_DIR, 'minion'),
|
||||
'sock_pool_size': 1,
|
||||
'backup_mode': '',
|
||||
'renderer': 'yaml_jinja',
|
||||
'renderer_whitelist': [],
|
||||
@ -1230,6 +1234,7 @@ DEFAULT_MASTER_OPTS = {
|
||||
'user': salt.utils.get_user(),
|
||||
'worker_threads': 5,
|
||||
'sock_dir': os.path.join(salt.syspaths.SOCK_DIR, 'master'),
|
||||
'sock_pool_size': 1,
|
||||
'ret_port': 4506,
|
||||
'timeout': 5,
|
||||
'keep_jobs': 24,
|
||||
@ -2122,6 +2127,7 @@ def syndic_config(master_config_path,
|
||||
'sock_dir': os.path.join(
|
||||
opts['cachedir'], opts.get('syndic_sock_dir', opts['sock_dir'])
|
||||
),
|
||||
'sock_pool_size': master_opts['sock_pool_size'],
|
||||
'cachedir': master_opts['cachedir'],
|
||||
}
|
||||
opts.update(syndic_opts)
|
||||
@ -2130,7 +2136,7 @@ def syndic_config(master_config_path,
|
||||
'pki_dir', 'cachedir', 'pidfile', 'sock_dir', 'extension_modules',
|
||||
'autosign_file', 'autoreject_file', 'token_dir'
|
||||
]
|
||||
for config_key in ('log_file', 'key_logfile'):
|
||||
for config_key in ('log_file', 'key_logfile', 'syndic_log_file'):
|
||||
# If this is not a URI and instead a local path
|
||||
if urlparse(opts.get(config_key, '')).scheme == '':
|
||||
prepend_root_dirs.append(config_key)
|
||||
|
@ -372,17 +372,18 @@ class AsyncAuth(object):
|
||||
loop_instance_map = AsyncAuth.instance_map[io_loop]
|
||||
|
||||
key = cls.__key(opts)
|
||||
if key not in loop_instance_map:
|
||||
auth = loop_instance_map.get(key)
|
||||
if auth is None:
|
||||
log.debug('Initializing new AsyncAuth for {0}'.format(key))
|
||||
# we need to make a local variable for this, as we are going to store
|
||||
# it in a WeakValueDictionary-- which will remove the item if no one
|
||||
# references it-- this forces a reference while we return to the caller
|
||||
new_auth = object.__new__(cls)
|
||||
new_auth.__singleton_init__(opts, io_loop=io_loop)
|
||||
loop_instance_map[key] = new_auth
|
||||
auth = object.__new__(cls)
|
||||
auth.__singleton_init__(opts, io_loop=io_loop)
|
||||
loop_instance_map[key] = auth
|
||||
else:
|
||||
log.debug('Re-using AsyncAuth for {0}'.format(key))
|
||||
return loop_instance_map[key]
|
||||
return auth
|
||||
|
||||
@classmethod
|
||||
def __key(cls, opts, io_loop=None):
|
||||
@ -1008,14 +1009,15 @@ class SAuth(AsyncAuth):
|
||||
Only create one instance of SAuth per __key()
|
||||
'''
|
||||
key = cls.__key(opts)
|
||||
if key not in SAuth.instances:
|
||||
auth = SAuth.instances.get(key)
|
||||
if auth is None:
|
||||
log.debug('Initializing new SAuth for {0}'.format(key))
|
||||
new_auth = object.__new__(cls)
|
||||
new_auth.__singleton_init__(opts)
|
||||
SAuth.instances[key] = new_auth
|
||||
auth = object.__new__(cls)
|
||||
auth.__singleton_init__(opts)
|
||||
SAuth.instances[key] = auth
|
||||
else:
|
||||
log.debug('Re-using SAuth for {0}'.format(key))
|
||||
return SAuth.instances[key]
|
||||
return auth
|
||||
|
||||
@classmethod
|
||||
def __key(cls, opts, io_loop=None):
|
||||
|
@ -716,7 +716,7 @@ class RemoteFuncs(object):
|
||||
load.get('saltenv', load.get('env')),
|
||||
load.get('ext'),
|
||||
self.mminion.functions,
|
||||
pillar=load.get('pillar_override', {}))
|
||||
pillar_override=load.get('pillar_override', {}))
|
||||
pillar_dirs = {}
|
||||
data = pillar.compile_pillar(pillar_dirs=pillar_dirs)
|
||||
if self.opts.get('minion_data_cache', False):
|
||||
|
@ -50,8 +50,8 @@ def start(docker_url='unix://var/run/docker.sock',
|
||||
.. code-block:: yaml
|
||||
|
||||
engines:
|
||||
docker_events:
|
||||
docker_url: unix://var/run/docker.sock
|
||||
- docker_events:
|
||||
docker_url: unix://var/run/docker.sock
|
||||
|
||||
The config above sets up engines to listen
|
||||
for events from the Docker daemon and publish
|
||||
|
@ -14,22 +14,22 @@ keys make the engine interactive.
|
||||
.. code-block:: yaml
|
||||
|
||||
engines:
|
||||
hipchat:
|
||||
token: 'XXXXXX'
|
||||
room: 'salt'
|
||||
control: True
|
||||
valid_users:
|
||||
- SomeUser
|
||||
valid_commands:
|
||||
- test.ping
|
||||
- cmd.run
|
||||
- list_jobs
|
||||
- list_commands
|
||||
aliases:
|
||||
list_jobs:
|
||||
cmd: jobs.list_jobs
|
||||
list_commands:
|
||||
cmd: pillar.get salt:engines:hipchat:valid_commands target=saltmaster
|
||||
- hipchat:
|
||||
token: 'XXXXXX'
|
||||
room: 'salt'
|
||||
control: True
|
||||
valid_users:
|
||||
- SomeUser
|
||||
valid_commands:
|
||||
- test.ping
|
||||
- cmd.run
|
||||
- list_jobs
|
||||
- list_commands
|
||||
aliases:
|
||||
list_jobs:
|
||||
cmd: jobs.list_jobs
|
||||
list_commands:
|
||||
cmd: pillar.get salt:engines:hipchat:valid_commands target=saltmaster
|
||||
'''
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
@ -12,13 +12,13 @@ them onto a logstash endpoint via HTTP requests.
|
||||
|
||||
engines:
|
||||
- http_logstash:
|
||||
url: http://blabla.com/salt-stuff
|
||||
tags:
|
||||
- salt/job/*/new
|
||||
- salt/job/*/ret/*
|
||||
funs:
|
||||
- probes.results
|
||||
- bgp.config
|
||||
url: http://blabla.com/salt-stuff
|
||||
tags:
|
||||
- salt/job/*/new
|
||||
- salt/job/*/ret/*
|
||||
funs:
|
||||
- probes.results
|
||||
- bgp.config
|
||||
'''
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
@ -24,6 +24,9 @@ master config.
|
||||
:configuration:
|
||||
|
||||
Example configuration
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
engines:
|
||||
- logentries:
|
||||
endpoint: data.logentries.com
|
||||
|
@ -8,6 +8,9 @@ them onto a logstash endpoint.
|
||||
:configuration:
|
||||
|
||||
Example configuration
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
engines:
|
||||
- logstash:
|
||||
host: log.my_network.com
|
||||
|
@ -7,10 +7,10 @@ Example Config in Master or Minion config
|
||||
.. code-block:: yaml
|
||||
|
||||
engines:
|
||||
reactor:
|
||||
refresh_interval: 60
|
||||
worker_threads: 10
|
||||
worker_hwm: 10000
|
||||
- reactor:
|
||||
refresh_interval: 60
|
||||
worker_threads: 10
|
||||
worker_hwm: 10000
|
||||
|
||||
reactor:
|
||||
- 'salt/cloud/*/destroyed':
|
||||
|
@ -8,6 +8,9 @@ events based on the channels they are subscribed to.
|
||||
:configuration:
|
||||
|
||||
Example configuration
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
engines:
|
||||
- redis_sentinel:
|
||||
hosts:
|
||||
|
@ -12,21 +12,21 @@ prefaced with a ``!``.
|
||||
.. code-block:: yaml
|
||||
|
||||
engines:
|
||||
slack:
|
||||
token: 'xoxb-xxxxxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxx'
|
||||
control: True
|
||||
valid_users:
|
||||
- garethgreenaway
|
||||
valid_commands:
|
||||
- test.ping
|
||||
- cmd.run
|
||||
- list_jobs
|
||||
- list_commands
|
||||
aliases:
|
||||
list_jobs:
|
||||
cmd: jobs.list_jobs
|
||||
list_commands:
|
||||
cmd: pillar.get salt:engines:slack:valid_commands target=saltmaster
|
||||
- slack:
|
||||
token: 'xoxb-xxxxxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxx'
|
||||
control: True
|
||||
valid_users:
|
||||
- garethgreenaway
|
||||
valid_commands:
|
||||
- test.ping
|
||||
- cmd.run
|
||||
- list_jobs
|
||||
- list_commands
|
||||
aliases:
|
||||
list_jobs:
|
||||
cmd: jobs.list_jobs
|
||||
list_commands:
|
||||
cmd: pillar.get salt:engines:slack:valid_commands target=saltmaster
|
||||
|
||||
:depends: slackclient
|
||||
'''
|
||||
@ -202,7 +202,7 @@ def start(token,
|
||||
# Default to trying to run as a client module.
|
||||
else:
|
||||
local = salt.client.LocalClient()
|
||||
ret = local.cmd('{0}'.format(target), cmd, args, kwargs)
|
||||
ret = local.cmd('{0}'.format(target), cmd, arg=args, kwarg=kwargs)
|
||||
|
||||
if ret:
|
||||
return_text = json.dumps(ret, sort_keys=True, indent=1)
|
||||
|
@ -73,7 +73,7 @@ class SudoExecutor(ModuleExecutorBase):
|
||||
'-c', salt.syspaths.CONFIG_DIR,
|
||||
'--',
|
||||
data.get('fun')]
|
||||
if data['fun'] == 'state.sls':
|
||||
if data['fun'] in ('state.sls', 'state.highstate', 'state.apply'):
|
||||
kwargs['concurrent'] = True
|
||||
for arg in args:
|
||||
self.cmd.append(_cmd_quote(str(arg)))
|
||||
|
@ -2,7 +2,7 @@
|
||||
'''
|
||||
Subversion Fileserver Backend
|
||||
|
||||
After enabling this backend, branches, and tags in a remote subversion
|
||||
After enabling this backend, branches and tags in a remote subversion
|
||||
repository are exposed to salt as different environments. To enable this
|
||||
backend, add ``svn`` to the :conf_master:`fileserver_backend` option in the
|
||||
Master config file.
|
||||
@ -694,7 +694,7 @@ def file_hash(load, fnd):
|
||||
|
||||
def _file_lists(load, form):
|
||||
'''
|
||||
Return a dict containing the file lists for files, dirs, emtydirs and symlinks
|
||||
Return a dict containing the file lists for files, dirs, emptydirs and symlinks
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.warn_until(
|
||||
|
@ -20,6 +20,7 @@ import re
|
||||
import platform
|
||||
import logging
|
||||
import locale
|
||||
import datetime
|
||||
import salt.exceptions
|
||||
|
||||
__proxyenabled__ = ['*']
|
||||
@ -771,6 +772,8 @@ def _virtual(osdata):
|
||||
grains['virtual_subtype'] = 'ovirt'
|
||||
elif 'Google' in output:
|
||||
grains['virtual'] = 'gce'
|
||||
elif 'BHYVE' in output:
|
||||
grains['virtual'] = 'bhyve'
|
||||
except IOError:
|
||||
pass
|
||||
elif osdata['kernel'] == 'FreeBSD':
|
||||
@ -919,28 +922,19 @@ def _windows_platform_data():
|
||||
|
||||
os_release = platform.release()
|
||||
info = salt.utils.win_osinfo.get_os_version_info()
|
||||
server = {'Vista': '2008Server',
|
||||
'7': '2008ServerR2',
|
||||
'8': '2012Server',
|
||||
'8.1': '2012ServerR2',
|
||||
'10': '2016Server'}
|
||||
|
||||
# Starting with Python 2.7.12 and 3.5.2 the `platform.uname()` function
|
||||
# started reporting the Desktop version instead of the Server version on
|
||||
# Server versions of Windows, so we need to look those up
|
||||
# Check for Python >=2.7.12 or >=3.5.2
|
||||
ver = pythonversion()['pythonversion']
|
||||
if ((six.PY2 and
|
||||
salt.utils.compare_versions(ver, '>=', [2, 7, 12, 'final', 0]))
|
||||
or
|
||||
(six.PY3 and
|
||||
salt.utils.compare_versions(ver, '>=', [3, 5, 2, 'final', 0]))):
|
||||
# (Product Type 1 is Desktop, Everything else is Server)
|
||||
if info['ProductType'] > 1:
|
||||
server = {'Vista': '2008Server',
|
||||
'7': '2008ServerR2',
|
||||
'8': '2012Server',
|
||||
'8.1': '2012ServerR2',
|
||||
'10': '2016Server'}
|
||||
os_release = server.get(os_release,
|
||||
'Grain not found. Update lookup table '
|
||||
'in the `_windows_platform_data` '
|
||||
'function in `grains\\core.py`')
|
||||
# So, if you find a Server Platform that's a key in the server
|
||||
# dictionary, then lookup the actual Server Release.
|
||||
if info['ProductType'] > 1 and os_release in server:
|
||||
os_release = server[os_release]
|
||||
|
||||
service_pack = None
|
||||
if info['ServicePackMajor'] > 0:
|
||||
@ -1047,6 +1041,7 @@ _OS_NAME_MAP = {
|
||||
'synology': 'Synology',
|
||||
'nilrt': 'NILinuxRT',
|
||||
'manjaro': 'Manjaro',
|
||||
'manjarolin': 'Manjaro',
|
||||
'antergos': 'Antergos',
|
||||
'sles': 'SUSE',
|
||||
'slesexpand': 'RES',
|
||||
@ -1687,12 +1682,14 @@ def ip_fqdn():
|
||||
ret[key] = []
|
||||
else:
|
||||
try:
|
||||
start_time = datetime.datetime.utcnow()
|
||||
info = socket.getaddrinfo(_fqdn, None, socket_type)
|
||||
ret[key] = list(set(item[4][0] for item in info))
|
||||
except socket.error:
|
||||
if __opts__['__role'] == 'master':
|
||||
log.warning('Unable to find IPv{0} record for "{1}" causing a 10 second timeout when rendering grains. '
|
||||
'Set the dns or /etc/hosts for IPv{0} to clear this.'.format(ipv_num, _fqdn))
|
||||
timediff = datetime.datetime.utcnow() - start_time
|
||||
if timediff.seconds > 5 and __opts__['__role'] == 'master':
|
||||
log.warning('Unable to find IPv{0} record for "{1}" causing a {2} second timeout when rendering grains. '
|
||||
'Set the dns or /etc/hosts for IPv{0} to clear this.'.format(ipv_num, _fqdn, timediff))
|
||||
ret[key] = []
|
||||
|
||||
return ret
|
||||
|
@ -182,7 +182,7 @@ def minion_mods(
|
||||
generated modules in __context__
|
||||
|
||||
:param dict utils: Utility functions which should be made available to
|
||||
Salt modules in __utils__. See `utils_dir` in
|
||||
Salt modules in __utils__. See `utils_dirs` in
|
||||
salt.config for additional information about
|
||||
configuration.
|
||||
|
||||
|
@ -1339,7 +1339,7 @@ class AESFuncs(object):
|
||||
load['id'],
|
||||
load.get('saltenv', load.get('env')),
|
||||
ext=load.get('ext'),
|
||||
pillar=load.get('pillar_override', {}),
|
||||
pillar_override=load.get('pillar_override', {}),
|
||||
pillarenv=load.get('pillarenv'))
|
||||
data = pillar.compile_pillar(pillar_dirs=pillar_dirs)
|
||||
self.fs_.update_opts()
|
||||
|
@ -1263,7 +1263,7 @@ class Minion(MinionBase):
|
||||
ret = yield channel.send(load, timeout=timeout)
|
||||
raise tornado.gen.Return(ret)
|
||||
|
||||
def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True):
|
||||
def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None):
|
||||
'''
|
||||
Fire an event on the master, or drop message if unable to send.
|
||||
'''
|
||||
@ -1282,10 +1282,6 @@ class Minion(MinionBase):
|
||||
else:
|
||||
return
|
||||
|
||||
def timeout_handler(*_):
|
||||
log.info('fire_master failed: master could not be contacted. Request timed out.')
|
||||
return True
|
||||
|
||||
if sync:
|
||||
try:
|
||||
self._send_req_sync(load, timeout)
|
||||
@ -1296,6 +1292,12 @@ class Minion(MinionBase):
|
||||
log.info('fire_master failed: {0}'.format(traceback.format_exc()))
|
||||
return False
|
||||
else:
|
||||
if timeout_handler is None:
|
||||
def handle_timeout(*_):
|
||||
log.info('fire_master failed: master could not be contacted. Request timed out.')
|
||||
return True
|
||||
timeout_handler = handle_timeout
|
||||
|
||||
with tornado.stack_context.ExceptionStackContext(timeout_handler):
|
||||
self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
|
||||
return True
|
||||
@ -2216,13 +2218,15 @@ class Minion(MinionBase):
|
||||
if ping_interval > 0 and self.connected:
|
||||
def ping_master():
|
||||
try:
|
||||
if not self._fire_master('ping', 'minion_ping'):
|
||||
def ping_timeout_handler(*_):
|
||||
if not self.opts.get('auth_safemode', True):
|
||||
log.error('** Master Ping failed. Attempting to restart minion**')
|
||||
delay = self.opts.get('random_reauth_delay', 5)
|
||||
log.info('delaying random_reauth_delay {0}s'.format(delay))
|
||||
# regular sys.exit raises an exception -- which isn't sufficient in a thread
|
||||
os._exit(salt.defaults.exitcodes.SALT_KEEPALIVE)
|
||||
|
||||
self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler)
|
||||
except Exception:
|
||||
log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG)
|
||||
self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000, io_loop=self.io_loop)
|
||||
@ -2237,7 +2241,7 @@ class Minion(MinionBase):
|
||||
except Exception:
|
||||
log.critical('The beacon errored: ', exc_info=True)
|
||||
if beacons and self.connected:
|
||||
self._fire_master(events=beacons)
|
||||
self._fire_master(events=beacons, sync=False)
|
||||
|
||||
self.periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop)
|
||||
|
||||
|
@ -446,11 +446,15 @@ def config(name, config, edit=True):
|
||||
salt '*' apache.config /etc/httpd/conf.d/ports.conf config="[{'Listen': '22'}]"
|
||||
'''
|
||||
|
||||
configs = []
|
||||
for entry in config:
|
||||
key = next(six.iterkeys(entry))
|
||||
configs = _parse_config(entry[key], key)
|
||||
if edit:
|
||||
with salt.utils.fopen(name, 'w') as configfile:
|
||||
configfile.write('# This file is managed by Salt.\n')
|
||||
configfile.write(configs)
|
||||
return configs
|
||||
configs.append(_parse_config(entry[key], key))
|
||||
|
||||
# Python auto-correct line endings
|
||||
configstext = "\n".join(configs)
|
||||
if edit:
|
||||
with salt.utils.fopen(name, 'w') as configfile:
|
||||
configfile.write('# This file is managed by Salt.\n')
|
||||
configfile.write(configstext)
|
||||
return configstext
|
||||
|
@ -135,19 +135,6 @@ def _reconstruct_ppa_name(owner_name, ppa_name):
|
||||
return 'ppa:{0}/{1}'.format(owner_name, ppa_name)
|
||||
|
||||
|
||||
def _get_repo(**kwargs):
|
||||
'''
|
||||
Check the kwargs for either 'fromrepo' or 'repo' and return the value.
|
||||
'fromrepo' takes precedence over 'repo'.
|
||||
'''
|
||||
for key in ('fromrepo', 'repo'):
|
||||
try:
|
||||
return kwargs[key]
|
||||
except KeyError:
|
||||
pass
|
||||
return ''
|
||||
|
||||
|
||||
def _check_apt():
|
||||
'''
|
||||
Abort if python-apt is not installed
|
||||
@ -242,18 +229,11 @@ def latest_version(*names, **kwargs):
|
||||
'''
|
||||
refresh = salt.utils.is_true(kwargs.pop('refresh', True))
|
||||
show_installed = salt.utils.is_true(kwargs.pop('show_installed', False))
|
||||
|
||||
if 'repo' in kwargs:
|
||||
# Remember to kill _get_repo() too when removing this warning.
|
||||
salt.utils.warn_until(
|
||||
'Hydrogen',
|
||||
'The \'repo\' argument to apt.latest_version is deprecated, and '
|
||||
'will be removed in Salt {version}. Please use \'fromrepo\' '
|
||||
'instead.'
|
||||
raise SaltInvocationError(
|
||||
'The \'repo\' argument is invalid, use \'fromrepo\' instead'
|
||||
)
|
||||
fromrepo = _get_repo(**kwargs)
|
||||
kwargs.pop('fromrepo', None)
|
||||
kwargs.pop('repo', None)
|
||||
fromrepo = kwargs.pop('fromrepo', None)
|
||||
cache_valid_time = kwargs.pop('cache_valid_time', 0)
|
||||
|
||||
if len(names) == 0:
|
||||
@ -1380,9 +1360,10 @@ def _get_upgradable(dist_upgrade=True, **kwargs):
|
||||
cmd.append('dist-upgrade')
|
||||
else:
|
||||
cmd.append('upgrade')
|
||||
fromrepo = _get_repo(**kwargs)
|
||||
if fromrepo:
|
||||
cmd.extend(['-o', 'APT::Default-Release={0}'.format(fromrepo)])
|
||||
try:
|
||||
cmd.extend(['-o', 'APT::Default-Release={0}'.format(kwargs['fromrepo'])])
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
call = __salt__['cmd.run_all'](cmd,
|
||||
python_shell=False,
|
||||
|
@ -1892,7 +1892,7 @@ def list_policy_versions(policy_name,
|
||||
return ret.get('list_policy_versions_response', {}).get('list_policy_versions_result', {}).get('versions')
|
||||
except boto.exception.BotoServerError as e:
|
||||
log.debug(e)
|
||||
msg = 'Failed to list {0} policy vesions.'
|
||||
msg = 'Failed to list {0} policy versions.'
|
||||
log.error(msg.format(policy_name))
|
||||
return []
|
||||
|
||||
|
@ -211,7 +211,7 @@ def _gather_pillar(pillarenv, pillar_override):
|
||||
__grains__,
|
||||
__opts__['id'],
|
||||
__opts__['environment'],
|
||||
pillar=pillar_override,
|
||||
pillar_override=pillar_override,
|
||||
pillarenv=pillarenv
|
||||
)
|
||||
ret = pillar.compile_pillar()
|
||||
@ -526,10 +526,25 @@ def _run(cmd,
|
||||
try:
|
||||
proc = salt.utils.timed_subprocess.TimedProc(cmd, **kwargs)
|
||||
except (OSError, IOError) as exc:
|
||||
raise CommandExecutionError(
|
||||
msg = (
|
||||
'Unable to run command \'{0}\' with the context \'{1}\', '
|
||||
'reason: {2}'.format(cmd, kwargs, exc)
|
||||
'reason: '.format(
|
||||
cmd if _check_loglevel(output_loglevel) is not None
|
||||
else 'REDACTED',
|
||||
kwargs
|
||||
)
|
||||
)
|
||||
try:
|
||||
if exc.filename is None:
|
||||
msg += 'command not found'
|
||||
else:
|
||||
msg += '{0}: {1}'.format(exc, exc.filename)
|
||||
except AttributeError:
|
||||
# Both IOError and OSError have the filename attribute, so this
|
||||
# is a precaution in case the exception classes in the previous
|
||||
# try/except are changed.
|
||||
msg += 'unknown'
|
||||
raise CommandExecutionError(msg)
|
||||
|
||||
try:
|
||||
proc.run()
|
||||
|
@ -1972,6 +1972,8 @@ def acl_create(consul_url=None, **kwargs):
|
||||
Create a new ACL token.
|
||||
|
||||
:param consul_url: The Consul server URL.
|
||||
:param id: Unique identifier for the ACL to create
|
||||
leave it blank to let consul server generate one
|
||||
:param name: Meaningful indicator of the ACL's purpose.
|
||||
:param type: Type is either client or management. A management
|
||||
token is comparable to a root user and has the
|
||||
@ -2002,6 +2004,9 @@ def acl_create(consul_url=None, **kwargs):
|
||||
else:
|
||||
raise SaltInvocationError('Required argument "name" is missing.')
|
||||
|
||||
if 'id' in kwargs:
|
||||
data['ID'] = kwargs['id']
|
||||
|
||||
if 'type' in kwargs:
|
||||
data['Type'] = kwargs['type']
|
||||
|
||||
@ -2120,7 +2125,7 @@ def acl_delete(consul_url=None, **kwargs):
|
||||
ret['res'] = False
|
||||
return ret
|
||||
|
||||
function = 'acl/delete/{0}'.format(kwargs['id'])
|
||||
function = 'acl/destroy/{0}'.format(kwargs['id'])
|
||||
res = _query(consul_url=consul_url,
|
||||
data=data,
|
||||
method='PUT',
|
||||
|
@ -48,7 +48,7 @@ def _gather_pillar(pillarenv, pillar_override):
|
||||
__grains__,
|
||||
__opts__['id'],
|
||||
__opts__['environment'],
|
||||
pillar=pillar_override,
|
||||
pillar_override=pillar_override,
|
||||
pillarenv=pillarenv
|
||||
)
|
||||
ret = pillar.compile_pillar()
|
||||
@ -57,7 +57,36 @@ def _gather_pillar(pillarenv, pillar_override):
|
||||
return ret
|
||||
|
||||
|
||||
def recv(dest, chunk, append=False, compressed=True, mode=None):
|
||||
def recv(files, dest):
|
||||
'''
|
||||
Used with salt-cp, pass the files dict, and the destination.
|
||||
|
||||
This function receives small fast copy files from the master via salt-cp.
|
||||
It does not work via the CLI.
|
||||
'''
|
||||
ret = {}
|
||||
for path, data in six.iteritems(files):
|
||||
if os.path.basename(path) == os.path.basename(dest) \
|
||||
and not os.path.isdir(dest):
|
||||
final = dest
|
||||
elif os.path.isdir(dest):
|
||||
final = os.path.join(dest, os.path.basename(path))
|
||||
elif os.path.isdir(os.path.dirname(dest)):
|
||||
final = dest
|
||||
else:
|
||||
return 'Destination unavailable'
|
||||
|
||||
try:
|
||||
with salt.utils.fopen(final, 'w+') as fp_:
|
||||
fp_.write(data)
|
||||
ret[final] = True
|
||||
except IOError:
|
||||
ret[final] = False
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def recv_chunked(dest, chunk, append=False, compressed=True, mode=None):
|
||||
'''
|
||||
This function receives files copied to the minion using ``salt-cp`` and is
|
||||
not intended to be used directly on the CLI.
|
||||
|
@ -60,17 +60,22 @@ def __virtual__():
|
||||
return (False, 'The debbuild module could not be loaded: unsupported OS family')
|
||||
|
||||
|
||||
def _check_repo_sign_utils_support():
|
||||
util_name = 'debsign'
|
||||
if salt.utils.which(util_name):
|
||||
def _check_repo_sign_utils_support(name):
|
||||
'''
|
||||
Check for specified command name in search path
|
||||
'''
|
||||
if salt.utils.which(name):
|
||||
return True
|
||||
else:
|
||||
raise CommandExecutionError(
|
||||
'utility \'{0}\' needs to be installed'.format(util_name)
|
||||
'utility \'{0}\' needs to be installed or made available in search path'.format(name)
|
||||
)
|
||||
|
||||
|
||||
def _check_repo_gpg_phrase_utils_support():
|
||||
'''
|
||||
Check for /usr/lib/gnupg2/gpg-preset-passphrase is installed
|
||||
'''
|
||||
util_name = '/usr/lib/gnupg2/gpg-preset-passphrase'
|
||||
if __salt__['file.file_exists'](util_name):
|
||||
return True
|
||||
@ -170,8 +175,8 @@ def _get_repo_dists_env(env):
|
||||
'ORIGIN': ('O', 'Origin', 'SaltStack'),
|
||||
'LABEL': ('O', 'Label', 'salt_debian'),
|
||||
'SUITE': ('O', 'Suite', 'stable'),
|
||||
'VERSION': ('O', 'Version', '8.1'),
|
||||
'CODENAME': ('M', 'Codename', 'jessie'),
|
||||
'VERSION': ('O', 'Version', '9.0'),
|
||||
'CODENAME': ('M', 'Codename', 'stretch'),
|
||||
'ARCHS': ('M', 'Architectures', 'i386 amd64 source'),
|
||||
'COMPONENTS': ('M', 'Components', 'main'),
|
||||
'DESCRIPTION': ('O', 'Description', 'SaltStack debian package repo'),
|
||||
@ -205,7 +210,7 @@ def _get_repo_dists_env(env):
|
||||
else:
|
||||
env_dists += '{0}: {1}\n'.format(key, value)
|
||||
|
||||
## ensure mandatories are included
|
||||
# ensure mandatories are included
|
||||
env_keys = list(env.keys())
|
||||
for key in env_keys:
|
||||
if key in dflts_keys and dflts_dict[key][0] == 'M' and key not in env_man_seen:
|
||||
@ -312,7 +317,7 @@ def make_src_pkg(dest_dir, spec, sources, env=None, template=None, saltenv='base
|
||||
for src in sources:
|
||||
_get_src(tree_base, src, saltenv)
|
||||
|
||||
#.dsc then assumes sources already build
|
||||
# .dsc then assumes sources already build
|
||||
if spec_pathfile.endswith('.dsc'):
|
||||
for efile in os.listdir(tree_base):
|
||||
full = os.path.join(tree_base, efile)
|
||||
@ -578,7 +583,8 @@ def make_repo(repodir,
|
||||
with salt.utils.fopen(repoconfopts, 'w') as fow:
|
||||
fow.write('{0}'.format(repocfg_opts))
|
||||
|
||||
local_fingerprint = None
|
||||
local_keygrip_to_use = None
|
||||
local_key_fingerprint = None
|
||||
local_keyid = None
|
||||
phrase = ''
|
||||
|
||||
@ -587,17 +593,14 @@ def make_repo(repodir,
|
||||
gpg_tty_info_file = '{0}/gpg-tty-info-salt'.format(gnupghome)
|
||||
gpg_tty_info_dict = {}
|
||||
|
||||
# test if using older than gnupg 2.1, env file exists
|
||||
# if using older than gnupg 2.1, then env file exists
|
||||
older_gnupg = __salt__['file.file_exists'](gpg_info_file)
|
||||
|
||||
# interval of 0.125 is really too fast on some systems
|
||||
interval = 0.5
|
||||
|
||||
if keyid is not None:
|
||||
with salt.utils.fopen(repoconfdist, 'a') as fow:
|
||||
fow.write('SignWith: {0}\n'.format(keyid))
|
||||
|
||||
## import_keys
|
||||
# import_keys
|
||||
pkg_pub_key_file = '{0}/{1}'.format(gnupghome, __salt__['pillar.get']('gpg_pkg_pub_keyname', None))
|
||||
pkg_priv_key_file = '{0}/{1}'.format(gnupghome, __salt__['pillar.get']('gpg_pkg_priv_keyname', None))
|
||||
|
||||
@ -621,21 +624,37 @@ def make_repo(repodir,
|
||||
local_keys = __salt__['gpg.list_keys'](user=runas, gnupghome=gnupghome)
|
||||
for gpg_key in local_keys:
|
||||
if keyid == gpg_key['keyid'][8:]:
|
||||
local_fingerprint = gpg_key['fingerprint']
|
||||
local_keygrip_to_use = gpg_key['fingerprint']
|
||||
local_key_fingerprint = gpg_key['fingerprint']
|
||||
local_keyid = gpg_key['keyid']
|
||||
break
|
||||
|
||||
if not older_gnupg:
|
||||
_check_repo_sign_utils_support('gpg2')
|
||||
cmd = '{0} --with-keygrip --list-secret-keys'.format(salt.utils.which('gpg2'))
|
||||
local_keys2_keygrip = __salt__['cmd.run'](cmd, runas=runas)
|
||||
local_keys2 = iter(local_keys2_keygrip.splitlines())
|
||||
try:
|
||||
for line in local_keys2:
|
||||
if line.startswith('sec'):
|
||||
line_fingerprint = next(local_keys2).lstrip().rstrip()
|
||||
if local_key_fingerprint == line_fingerprint:
|
||||
lkeygrip = next(local_keys2).split('=')
|
||||
local_keygrip_to_use = lkeygrip[1].lstrip().rstrip()
|
||||
break
|
||||
except StopIteration:
|
||||
raise SaltInvocationError(
|
||||
'unable to find keygrip associated with fingerprint \'{0}\' for keyid \'{1}\''
|
||||
.format(local_key_fingerprint, local_keyid)
|
||||
)
|
||||
|
||||
if local_keyid is None:
|
||||
raise SaltInvocationError(
|
||||
'The key ID \'{0}\' was not found in GnuPG keyring at \'{1}\''
|
||||
.format(keyid, gnupghome)
|
||||
)
|
||||
|
||||
_check_repo_sign_utils_support()
|
||||
|
||||
if use_passphrase:
|
||||
_check_repo_gpg_phrase_utils_support()
|
||||
phrase = __salt__['pillar.get']('gpg_passphrase')
|
||||
_check_repo_sign_utils_support('debsign')
|
||||
|
||||
if older_gnupg:
|
||||
with salt.utils.fopen(gpg_info_file, 'r') as fow:
|
||||
@ -656,10 +675,30 @@ def make_repo(repodir,
|
||||
__salt__['environ.setenv'](gpg_tty_info_dict)
|
||||
break
|
||||
|
||||
## sign_it_here
|
||||
for file in os.listdir(repodir):
|
||||
if file.endswith('.dsc'):
|
||||
abs_file = os.path.join(repodir, file)
|
||||
if use_passphrase:
|
||||
_check_repo_gpg_phrase_utils_support()
|
||||
phrase = __salt__['pillar.get']('gpg_passphrase')
|
||||
cmd = '/usr/lib/gnupg2/gpg-preset-passphrase --verbose --preset --passphrase "{0}" {1}'.format(phrase, local_keygrip_to_use)
|
||||
__salt__['cmd.run'](cmd, runas=runas)
|
||||
|
||||
for debfile in os.listdir(repodir):
|
||||
abs_file = os.path.join(repodir, debfile)
|
||||
if debfile.endswith('.changes'):
|
||||
os.remove(abs_file)
|
||||
|
||||
if debfile.endswith('.dsc'):
|
||||
# sign_it_here
|
||||
if older_gnupg:
|
||||
if local_keyid is not None:
|
||||
cmd = 'debsign --re-sign -k {0} {1}'.format(keyid, abs_file)
|
||||
__salt__['cmd.run'](cmd, cwd=repodir, use_vt=True)
|
||||
|
||||
cmd = 'reprepro --ignore=wrongdistribution --component=main -Vb . includedsc {0} {1}'.format(codename, abs_file)
|
||||
__salt__['cmd.run'](cmd, cwd=repodir, use_vt=True)
|
||||
else:
|
||||
# interval of 0.125 is really too fast on some systems
|
||||
interval = 0.5
|
||||
if local_keyid is not None:
|
||||
number_retries = timeout / interval
|
||||
times_looped = 0
|
||||
error_msg = 'Failed to debsign file {0}'.format(abs_file)
|
||||
@ -702,27 +741,6 @@ def make_repo(repodir,
|
||||
finally:
|
||||
proc.close(terminate=True, kill=True)
|
||||
|
||||
if use_passphrase:
|
||||
cmd = '/usr/lib/gnupg2/gpg-preset-passphrase --verbose --forget {0}'.format(local_fingerprint)
|
||||
__salt__['cmd.run'](cmd, runas=runas)
|
||||
|
||||
cmd = '/usr/lib/gnupg2/gpg-preset-passphrase --verbose --preset --passphrase "{0}" {1}'.format(phrase, local_fingerprint)
|
||||
__salt__['cmd.run'](cmd, runas=runas)
|
||||
|
||||
for debfile in os.listdir(repodir):
|
||||
abs_file = os.path.join(repodir, debfile)
|
||||
if debfile.endswith('.changes'):
|
||||
os.remove(abs_file)
|
||||
|
||||
if debfile.endswith('.dsc'):
|
||||
if older_gnupg:
|
||||
if local_keyid is not None:
|
||||
cmd = 'debsign --re-sign -k {0} {1}'.format(keyid, abs_file)
|
||||
__salt__['cmd.run'](cmd, cwd=repodir, use_vt=True)
|
||||
|
||||
cmd = 'reprepro --ignore=wrongdistribution --component=main -Vb . includedsc {0} {1}'.format(codename, abs_file)
|
||||
__salt__['cmd.run'](cmd, cwd=repodir, use_vt=True)
|
||||
else:
|
||||
number_retries = timeout / interval
|
||||
times_looped = 0
|
||||
error_msg = 'Failed to reprepro includedsc file {0}'.format(abs_file)
|
||||
@ -747,8 +765,7 @@ def make_repo(repodir,
|
||||
|
||||
if times_looped > number_retries:
|
||||
raise SaltInvocationError(
|
||||
'Attemping to reprepro includedsc for file {0} failed, timed out after {1} loops'
|
||||
.format(abs_file, int(times_looped * interval))
|
||||
'Attemping to reprepro includedsc for file {0} failed, timed out after {1} loops'.format(abs_file, times_looped)
|
||||
)
|
||||
time.sleep(interval)
|
||||
|
||||
@ -770,8 +787,4 @@ def make_repo(repodir,
|
||||
cmd = 'reprepro --ignore=wrongdistribution --component=main -Vb . includedeb {0} {1}'.format(codename, abs_file)
|
||||
res = __salt__['cmd.run_all'](cmd, cwd=repodir, use_vt=True)
|
||||
|
||||
if use_passphrase and local_keyid is not None:
|
||||
cmd = '/usr/lib/gnupg2/gpg-preset-passphrase --forget {0}'.format(local_fingerprint)
|
||||
res = __salt__['cmd.run_all'](cmd, runas=runas)
|
||||
|
||||
return res
|
||||
|
@ -22,6 +22,10 @@ import salt.utils.decorators as decorators
|
||||
from salt.utils.decorators import depends
|
||||
from salt.exceptions import CommandExecutionError
|
||||
|
||||
__func_alias__ = {
|
||||
'format_': 'format'
|
||||
}
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
HAS_HDPARM = salt.utils.which('hdparm') is not None
|
||||
|
@ -1,6 +1,10 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Compendium of generic DNS utilities
|
||||
Compendium of generic DNS utilities.
|
||||
|
||||
.. note::
|
||||
|
||||
Some functions in the ``dnsutil`` execution module depend on ``dig``.
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
@ -232,7 +236,7 @@ def check_ip(ip_addr):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt ns1 dig.check_ip 127.0.0.1
|
||||
salt ns1 dnsutil.check_ip 127.0.0.1
|
||||
'''
|
||||
if _has_dig():
|
||||
return __salt__['dig.check_ip'](ip_addr)
|
||||
@ -242,7 +246,7 @@ def check_ip(ip_addr):
|
||||
|
||||
def A(host, nameserver=None):
|
||||
'''
|
||||
Return the A record(s) for `host`.
|
||||
Return the A record(s) for ``host``.
|
||||
|
||||
Always returns a list.
|
||||
|
||||
@ -267,7 +271,7 @@ def A(host, nameserver=None):
|
||||
|
||||
def AAAA(host, nameserver=None):
|
||||
'''
|
||||
Return the AAAA record(s) for `host`.
|
||||
Return the AAAA record(s) for ``host``.
|
||||
|
||||
Always returns a list.
|
||||
|
||||
@ -302,7 +306,7 @@ def NS(domain, resolve=True, nameserver=None):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt ns1 dig.NS google.com
|
||||
salt ns1 dnsutil.NS google.com
|
||||
|
||||
'''
|
||||
if _has_dig():
|
||||
@ -323,7 +327,7 @@ def SPF(domain, record='SPF', nameserver=None):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt ns1 dig.SPF google.com
|
||||
salt ns1 dnsutil.SPF google.com
|
||||
'''
|
||||
if _has_dig():
|
||||
return __salt__['dig.SPF'](domain, record, nameserver)
|
||||
@ -346,7 +350,7 @@ def MX(domain, resolve=False, nameserver=None):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt ns1 dig.MX google.com
|
||||
salt ns1 dnsutil.MX google.com
|
||||
'''
|
||||
if _has_dig():
|
||||
return __salt__['dig.MX'](domain, resolve, nameserver)
|
||||
|
@ -1899,6 +1899,10 @@ def login(*registries):
|
||||
cmd = ['docker', 'login', '-u', username, '-p', password]
|
||||
if registry.lower() != 'hub':
|
||||
cmd.append(registry)
|
||||
log.debug(
|
||||
'Attempting to login to docker registry \'%s\' as user \'%s\'',
|
||||
registry, username
|
||||
)
|
||||
login_cmd = __salt__['cmd.run_all'](
|
||||
cmd,
|
||||
python_shell=False,
|
||||
@ -4399,7 +4403,7 @@ def save(name,
|
||||
ret['Size_Human'] = _size_fmt(ret['Size'])
|
||||
|
||||
# Process push
|
||||
if kwargs.get(push, False):
|
||||
if kwargs.get('push', False):
|
||||
ret['Push'] = __salt__['cp.push'](path)
|
||||
|
||||
return ret
|
||||
@ -5617,7 +5621,7 @@ def _gather_pillar(pillarenv, pillar_override, **grains):
|
||||
# Not sure if these two are correct
|
||||
__opts__['id'],
|
||||
__opts__['environment'],
|
||||
pillar=pillar_override,
|
||||
pillar_override=pillar_override,
|
||||
pillarenv=pillarenv
|
||||
)
|
||||
ret = pillar.compile_pillar()
|
||||
|
@ -710,18 +710,21 @@ def check_hash(path, file_hash):
|
||||
|
||||
hash
|
||||
The hash to check against the file specified in the ``path`` argument.
|
||||
For versions 2016.11.4 and newer, the hash can be specified without an
|
||||
|
||||
.. versionchanged:: 2016.11.4
|
||||
|
||||
For this and newer versions the hash can be specified without an
|
||||
accompanying hash type (e.g. ``e138491e9d5b97023cea823fe17bac22``),
|
||||
but for earlier releases it is necessary to also specify the hash type
|
||||
in the format ``<hash_type>:<hash_value>`` (e.g.
|
||||
``md5:e138491e9d5b97023cea823fe17bac22``).
|
||||
in the format ``<hash_type>=<hash_value>`` (e.g.
|
||||
``md5=e138491e9d5b97023cea823fe17bac22``).
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' file.check_hash /etc/fstab e138491e9d5b97023cea823fe17bac22
|
||||
salt '*' file.check_hash /etc/fstab md5:e138491e9d5b97023cea823fe17bac22
|
||||
salt '*' file.check_hash /etc/fstab md5=e138491e9d5b97023cea823fe17bac22
|
||||
'''
|
||||
path = os.path.expanduser(path)
|
||||
|
||||
@ -1884,6 +1887,7 @@ def replace(path,
|
||||
show_changes=True,
|
||||
ignore_if_missing=False,
|
||||
preserve_inode=True,
|
||||
backslash_literal=False,
|
||||
):
|
||||
'''
|
||||
.. versionadded:: 0.17.0
|
||||
@ -1984,6 +1988,14 @@ def replace(path,
|
||||
filename. Hard links will then share an inode with the backup, instead
|
||||
(if using ``backup`` to create a backup copy).
|
||||
|
||||
backslash_literal : False
|
||||
.. versionadded:: 2016.11.7
|
||||
|
||||
Interpret backslashes as literal backslashes for the repl and not
|
||||
escape characters. This will help when using append/prepend so that
|
||||
the backslashes are not interpreted for the repl on the second run of
|
||||
the state.
|
||||
|
||||
If an equal sign (``=``) appears in an argument to a Salt command it is
|
||||
interpreted as a keyword argument in the format ``key=val``. That
|
||||
processing can be bypassed in order to pass an equal sign through to the
|
||||
@ -2080,7 +2092,10 @@ def replace(path,
|
||||
if re.search(cpattern, r_data):
|
||||
return True # `with` block handles file closure
|
||||
else:
|
||||
result, nrepl = re.subn(cpattern, repl, r_data, count)
|
||||
result, nrepl = re.subn(cpattern,
|
||||
repl.replace('\\', '\\\\') if backslash_literal else repl,
|
||||
r_data,
|
||||
count)
|
||||
|
||||
# found anything? (even if no change)
|
||||
if nrepl > 0:
|
||||
@ -2138,8 +2153,10 @@ def replace(path,
|
||||
r_data = mmap.mmap(r_file.fileno(),
|
||||
0,
|
||||
access=mmap.ACCESS_READ)
|
||||
result, nrepl = re.subn(cpattern, repl,
|
||||
r_data, count)
|
||||
result, nrepl = re.subn(cpattern,
|
||||
repl.replace('\\', '\\\\') if backslash_literal else repl,
|
||||
r_data,
|
||||
count)
|
||||
try:
|
||||
w_file.write(salt.utils.to_str(result))
|
||||
except (OSError, IOError) as exc:
|
||||
|
@ -24,6 +24,8 @@ import salt.utils.kickstart
|
||||
import salt.syspaths
|
||||
from salt.exceptions import SaltInvocationError
|
||||
|
||||
# Import 3rd-party libs
|
||||
from salt.ext import six
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@ -325,6 +327,8 @@ def _bootstrap_yum(
|
||||
'''
|
||||
if pkgs is None:
|
||||
pkgs = []
|
||||
elif isinstance(pkgs, six.string_types):
|
||||
pkgs = pkgs.split(',')
|
||||
|
||||
default_pkgs = ('yum', 'centos-release', 'iputils')
|
||||
for pkg in default_pkgs:
|
||||
@ -333,6 +337,8 @@ def _bootstrap_yum(
|
||||
|
||||
if exclude_pkgs is None:
|
||||
exclude_pkgs = []
|
||||
elif isinstance(exclude_pkgs, six.string_types):
|
||||
exclude_pkgs = exclude_pkgs.split(',')
|
||||
|
||||
for pkg in exclude_pkgs:
|
||||
pkgs.remove(pkg)
|
||||
@ -393,15 +399,27 @@ def _bootstrap_deb(
|
||||
if repo_url is None:
|
||||
repo_url = 'http://ftp.debian.org/debian/'
|
||||
|
||||
if not salt.utils.which('debootstrap'):
|
||||
log.error('Required tool debootstrap is not installed.')
|
||||
return False
|
||||
|
||||
if isinstance(pkgs, (list, tuple)):
|
||||
pkgs = ','.join(pkgs)
|
||||
if isinstance(exclude_pkgs, (list, tuple)):
|
||||
exclude_pkgs = ','.join(exclude_pkgs)
|
||||
|
||||
deb_args = [
|
||||
'debootstrap',
|
||||
'--foreign',
|
||||
'--arch',
|
||||
_cmd_quote(arch),
|
||||
'--include',
|
||||
] + pkgs + [
|
||||
'--exclude',
|
||||
] + exclude_pkgs + [
|
||||
_cmd_quote(arch)]
|
||||
|
||||
if pkgs:
|
||||
deb_args += ['--include', _cmd_quote(pkgs)]
|
||||
if exclude_pkgs:
|
||||
deb_args += ['--exclude', _cmd_quote(exclude_pkgs)]
|
||||
|
||||
deb_args += [
|
||||
_cmd_quote(flavor),
|
||||
_cmd_quote(root),
|
||||
_cmd_quote(repo_url),
|
||||
@ -469,6 +487,8 @@ def _bootstrap_pacman(
|
||||
|
||||
if pkgs is None:
|
||||
pkgs = []
|
||||
elif isinstance(pkgs, six.string_types):
|
||||
pkgs = pkgs.split(',')
|
||||
|
||||
default_pkgs = ('pacman', 'linux', 'systemd-sysvcompat', 'grub')
|
||||
for pkg in default_pkgs:
|
||||
@ -477,6 +497,8 @@ def _bootstrap_pacman(
|
||||
|
||||
if exclude_pkgs is None:
|
||||
exclude_pkgs = []
|
||||
elif isinstance(exclude_pkgs, six.string_types):
|
||||
exclude_pkgs = exclude_pkgs.split(',')
|
||||
|
||||
for pkg in exclude_pkgs:
|
||||
pkgs.remove(pkg)
|
||||
|
@ -170,13 +170,24 @@ def _git_run(command, cwd=None, user=None, password=None, identity=None,
|
||||
identity = [identity]
|
||||
|
||||
# try each of the identities, independently
|
||||
tmp_identity_file = None
|
||||
for id_file in identity:
|
||||
if 'salt://' in id_file:
|
||||
_id_file = id_file
|
||||
id_file = __salt__['cp.cache_file'](id_file, saltenv)
|
||||
with salt.utils.files.set_umask(0o077):
|
||||
tmp_identity_file = salt.utils.mkstemp()
|
||||
_id_file = id_file
|
||||
id_file = __salt__['cp.get_file'](id_file,
|
||||
tmp_identity_file,
|
||||
saltenv)
|
||||
if not id_file:
|
||||
log.error('identity {0} does not exist.'.format(_id_file))
|
||||
__salt__['file.remove'](tmp_identity_file)
|
||||
continue
|
||||
else:
|
||||
if user:
|
||||
os.chown(id_file,
|
||||
__salt__['file.user_to_uid'](user),
|
||||
-1)
|
||||
else:
|
||||
if not __salt__['file.file_exists'](id_file):
|
||||
missing_keys.append(id_file)
|
||||
@ -247,6 +258,11 @@ def _git_run(command, cwd=None, user=None, password=None, identity=None,
|
||||
if not salt.utils.is_windows() and 'GIT_SSH' in env:
|
||||
os.remove(env['GIT_SSH'])
|
||||
|
||||
# Cleanup the temporary identify file
|
||||
if tmp_identity_file and os.path.exists(tmp_identity_file):
|
||||
log.debug('Removing identify file {0}'.format(tmp_identity_file))
|
||||
__salt__['file.remove'](tmp_identity_file)
|
||||
|
||||
# If the command was successful, no need to try additional IDs
|
||||
if result['retcode'] == 0:
|
||||
return result
|
||||
|
@ -35,7 +35,7 @@ import salt.utils
|
||||
|
||||
# Import 3rd-party libs
|
||||
# pylint: disable=import-error,no-name-in-module,redefined-builtin
|
||||
from salt.exceptions import SaltInvocationError
|
||||
from salt.exceptions import CommandExecutionError, SaltInvocationError
|
||||
# pylint: enable=import-error,no-name-in-module
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
@ -87,6 +87,19 @@ def _connect():
|
||||
password=jenkins_password)
|
||||
|
||||
|
||||
def _retrieve_config_xml(config_xml, saltenv):
|
||||
'''
|
||||
Helper to cache the config XML and raise a CommandExecutionError if we fail
|
||||
to do so. If we successfully cache the file, return the cached path.
|
||||
'''
|
||||
ret = __salt__['cp.cache_file'](config_xml, saltenv)
|
||||
|
||||
if not ret:
|
||||
raise CommandExecutionError('Failed to retrieve {0}'.format(config_xml))
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def get_version():
|
||||
'''
|
||||
Return version of Jenkins
|
||||
@ -144,7 +157,7 @@ def job_exists(name=None):
|
||||
|
||||
'''
|
||||
if not name:
|
||||
raise SaltInvocationError('Required parameter `name` is missing.')
|
||||
raise SaltInvocationError('Required parameter \'name\' is missing')
|
||||
|
||||
server = _connect()
|
||||
if server.job_exists(name):
|
||||
@ -168,12 +181,12 @@ def get_job_info(name=None):
|
||||
|
||||
'''
|
||||
if not name:
|
||||
raise SaltInvocationError('Required parameter `name` is missing.')
|
||||
raise SaltInvocationError('Required parameter \'name\' is missing')
|
||||
|
||||
server = _connect()
|
||||
|
||||
if not job_exists(name):
|
||||
raise SaltInvocationError('Job `{0}` does not exist.'.format(name))
|
||||
raise CommandExecutionError('Job \'{0}\' does not exist'.format(name))
|
||||
|
||||
job_info = server.get_job_info(name)
|
||||
if job_info:
|
||||
@ -197,17 +210,19 @@ def build_job(name=None, parameters=None):
|
||||
|
||||
'''
|
||||
if not name:
|
||||
raise SaltInvocationError('Required parameter `name` is missing.')
|
||||
raise SaltInvocationError('Required parameter \'name\' is missing')
|
||||
|
||||
server = _connect()
|
||||
|
||||
if not job_exists(name):
|
||||
raise SaltInvocationError('Job `{0}` does not exist.'.format(name))
|
||||
raise CommandExecutionError('Job \'{0}\' does not exist.'.format(name))
|
||||
|
||||
try:
|
||||
server.build_job(name, parameters)
|
||||
except jenkins.JenkinsException as err:
|
||||
raise SaltInvocationError('Something went wrong {0}.'.format(err))
|
||||
raise CommandExecutionError(
|
||||
'Encountered error building job \'{0}\': {1}'.format(name, err)
|
||||
)
|
||||
return True
|
||||
|
||||
|
||||
@ -232,15 +247,15 @@ def create_job(name=None,
|
||||
|
||||
'''
|
||||
if not name:
|
||||
raise SaltInvocationError('Required parameter `name` is missing.')
|
||||
raise SaltInvocationError('Required parameter \'name\' is missing')
|
||||
|
||||
if job_exists(name):
|
||||
raise SaltInvocationError('Job `{0}` already exists.'.format(name))
|
||||
raise CommandExecutionError('Job \'{0}\' already exists'.format(name))
|
||||
|
||||
if not config_xml:
|
||||
config_xml = jenkins.EMPTY_CONFIG_XML
|
||||
else:
|
||||
config_xml_file = __salt__['cp.cache_file'](config_xml, saltenv)
|
||||
config_xml_file = _retrieve_config_xml(config_xml, saltenv)
|
||||
|
||||
with salt.utils.fopen(config_xml_file) as _fp:
|
||||
config_xml = _fp.read()
|
||||
@ -249,7 +264,9 @@ def create_job(name=None,
|
||||
try:
|
||||
server.create_job(name, config_xml)
|
||||
except jenkins.JenkinsException as err:
|
||||
raise SaltInvocationError('Something went wrong {0}.'.format(err))
|
||||
raise CommandExecutionError(
|
||||
'Encountered error creating job \'{0}\': {1}'.format(name, err)
|
||||
)
|
||||
return config_xml
|
||||
|
||||
|
||||
@ -274,12 +291,12 @@ def update_job(name=None,
|
||||
|
||||
'''
|
||||
if not name:
|
||||
raise SaltInvocationError('Required parameter `name` is missing.')
|
||||
raise SaltInvocationError('Required parameter \'name\' is missing')
|
||||
|
||||
if not config_xml:
|
||||
config_xml = jenkins.EMPTY_CONFIG_XML
|
||||
else:
|
||||
config_xml_file = __salt__['cp.cache_file'](config_xml, saltenv)
|
||||
config_xml_file = _retrieve_config_xml(config_xml, saltenv)
|
||||
|
||||
with salt.utils.fopen(config_xml_file) as _fp:
|
||||
config_xml = _fp.read()
|
||||
@ -288,7 +305,9 @@ def update_job(name=None,
|
||||
try:
|
||||
server.reconfig_job(name, config_xml)
|
||||
except jenkins.JenkinsException as err:
|
||||
raise SaltInvocationError('Something went wrong {0}.'.format(err))
|
||||
raise CommandExecutionError(
|
||||
'Encountered error updating job \'{0}\': {1}'.format(name, err)
|
||||
)
|
||||
return config_xml
|
||||
|
||||
|
||||
@ -307,17 +326,19 @@ def delete_job(name=None):
|
||||
|
||||
'''
|
||||
if not name:
|
||||
raise SaltInvocationError('Required parameter `name` is missing.')
|
||||
raise SaltInvocationError('Required parameter \'name\' is missing')
|
||||
|
||||
server = _connect()
|
||||
|
||||
if not job_exists(name):
|
||||
raise SaltInvocationError('Job `{0}` does not exists.'.format(name))
|
||||
raise CommandExecutionError('Job \'{0}\' does not exist'.format(name))
|
||||
|
||||
try:
|
||||
server.delete_job(name)
|
||||
except jenkins.JenkinsException as err:
|
||||
raise SaltInvocationError('Something went wrong {0}.'.format(err))
|
||||
raise CommandExecutionError(
|
||||
'Encountered error deleting job \'{0}\': {1}'.format(name, err)
|
||||
)
|
||||
return True
|
||||
|
||||
|
||||
@ -336,17 +357,19 @@ def enable_job(name=None):
|
||||
|
||||
'''
|
||||
if not name:
|
||||
raise SaltInvocationError('Required parameter `name` is missing.')
|
||||
raise SaltInvocationError('Required parameter \'name\' is missing')
|
||||
|
||||
server = _connect()
|
||||
|
||||
if not job_exists(name):
|
||||
raise SaltInvocationError('Job `{0}` does not exists.'.format(name))
|
||||
raise CommandExecutionError('Job \'{0}\' does not exist'.format(name))
|
||||
|
||||
try:
|
||||
server.enable_job(name)
|
||||
except jenkins.JenkinsException as err:
|
||||
raise SaltInvocationError('Something went wrong {0}.'.format(err))
|
||||
raise CommandExecutionError(
|
||||
'Encountered error enabling job \'{0}\': {1}'.format(name, err)
|
||||
)
|
||||
return True
|
||||
|
||||
|
||||
@ -366,17 +389,19 @@ def disable_job(name=None):
|
||||
'''
|
||||
|
||||
if not name:
|
||||
raise SaltInvocationError('Required parameter `name` is missing.')
|
||||
raise SaltInvocationError('Required parameter \'name\' is missing')
|
||||
|
||||
server = _connect()
|
||||
|
||||
if not job_exists(name):
|
||||
raise SaltInvocationError('Job `{0}` does not exists.'.format(name))
|
||||
raise CommandExecutionError('Job \'{0}\' does not exist'.format(name))
|
||||
|
||||
try:
|
||||
server.disable_job(name)
|
||||
except jenkins.JenkinsException as err:
|
||||
raise SaltInvocationError('Something went wrong {0}.'.format(err))
|
||||
raise CommandExecutionError(
|
||||
'Encountered error disabling job \'{0}\': {1}'.format(name, err)
|
||||
)
|
||||
return True
|
||||
|
||||
|
||||
@ -396,12 +421,12 @@ def job_status(name=None):
|
||||
'''
|
||||
|
||||
if not name:
|
||||
raise SaltInvocationError('Required parameter `name` is missing.')
|
||||
raise SaltInvocationError('Required parameter \'name\' is missing')
|
||||
|
||||
server = _connect()
|
||||
|
||||
if not job_exists(name):
|
||||
raise SaltInvocationError('Job `{0}` does not exists.'.format(name))
|
||||
raise CommandExecutionError('Job \'{0}\' does not exist'.format(name))
|
||||
|
||||
return server.get_job_info('empty')['buildable']
|
||||
|
||||
@ -422,12 +447,12 @@ def get_job_config(name=None):
|
||||
'''
|
||||
|
||||
if not name:
|
||||
raise SaltInvocationError('Required parameter `name` is missing.')
|
||||
raise SaltInvocationError('Required parameter \'name\' is missing')
|
||||
|
||||
server = _connect()
|
||||
|
||||
if not job_exists(name):
|
||||
raise SaltInvocationError('Job `{0}` does not exists.'.format(name))
|
||||
raise CommandExecutionError('Job \'{0}\' does not exist'.format(name))
|
||||
|
||||
job_info = server.get_job_config(name)
|
||||
return job_info
|
||||
|
@ -262,7 +262,7 @@ def create_keytab(name, keytab, enctypes=None):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt 'kdc.example.com' host/host1.example.com host1.example.com.keytab
|
||||
salt 'kdc.example.com' kerberos.create_keytab host/host1.example.com host1.example.com.keytab
|
||||
'''
|
||||
ret = {}
|
||||
|
||||
|
@ -216,7 +216,7 @@ def align_check(device, part_type, partition):
|
||||
'Invalid partition passed to partition.align_check'
|
||||
)
|
||||
|
||||
cmd = 'parted -m -s {0} align-check {1} {2}'.format(
|
||||
cmd = 'parted -m {0} align-check {1} {2}'.format(
|
||||
device, part_type, partition
|
||||
)
|
||||
out = __salt__['cmd.run'](cmd).splitlines()
|
||||
|
@ -110,7 +110,7 @@ def get(key,
|
||||
default = ''
|
||||
opt_merge_lists = __opts__.get('pillar_merge_lists', False) if \
|
||||
merge_nested_lists is None else merge_nested_lists
|
||||
pillar_dict = __pillar__ if saltenv is None else items(saltenv=saltenv)
|
||||
pillar_dict = __pillar__ if saltenv is None else items(pillarenv=saltenv)
|
||||
|
||||
if merge:
|
||||
if isinstance(default, dict):
|
||||
@ -203,7 +203,7 @@ def items(*args, **kwargs):
|
||||
__opts__,
|
||||
__grains__,
|
||||
__opts__['id'],
|
||||
pillar=kwargs.get('pillar'),
|
||||
pillar_override=kwargs.get('pillar'),
|
||||
pillarenv=kwargs.get('pillarenv') or __opts__['pillarenv'])
|
||||
|
||||
return pillar.compile_pillar()
|
||||
@ -411,7 +411,7 @@ def ext(external, pillar=None):
|
||||
__opts__['id'],
|
||||
__opts__['environment'],
|
||||
ext=external,
|
||||
pillar=pillar)
|
||||
pillar_override=pillar)
|
||||
|
||||
ret = pillar_obj.compile_pillar()
|
||||
|
||||
|
@ -345,9 +345,10 @@ def summary():
|
||||
|
||||
def plugin_sync():
|
||||
'''
|
||||
Runs a plugin synch between the puppet master and agent
|
||||
Runs a plugin sync between the puppet master and agent
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' puppet.plugin_sync
|
||||
|
@ -72,6 +72,7 @@ def _safe_output(line):
|
||||
'''
|
||||
return not any([
|
||||
line.startswith('Listing') and line.endswith('...'),
|
||||
line.startswith('Listing') and '\t' not in line,
|
||||
'...done' in line,
|
||||
line.startswith('WARNING:')
|
||||
])
|
||||
@ -471,11 +472,11 @@ def set_user_tags(name, tags, runas=None):
|
||||
if runas is None:
|
||||
runas = salt.utils.get_user()
|
||||
|
||||
if tags and isinstance(tags, (list, tuple)):
|
||||
tags = ' '.join(tags)
|
||||
if not isinstance(tags, (list, tuple)):
|
||||
tags = [tags]
|
||||
|
||||
res = __salt__['cmd.run'](
|
||||
['rabbitmqctl', 'set_user_tags', name, tags],
|
||||
['rabbitmqctl', 'set_user_tags', name] + list(tags),
|
||||
runas=runas,
|
||||
python_shell=False)
|
||||
msg = "Tag(s) set"
|
||||
|
@ -828,7 +828,7 @@ def _parse_network_settings(opts, current):
|
||||
_raise_error_network('hostname', ['server1.example.com'])
|
||||
|
||||
if 'nozeroconf' in opts:
|
||||
nozeroconf = salt.utils.dequote(opts['nozerconf'])
|
||||
nozeroconf = salt.utils.dequote(opts['nozeroconf'])
|
||||
if nozeroconf in valid:
|
||||
if nozeroconf in _CONFIG_TRUE:
|
||||
result['nozeroconf'] = true_val
|
||||
|
@ -76,7 +76,8 @@ def _get_top_file_envs():
|
||||
return __context__['saltutil._top_file_envs']
|
||||
except KeyError:
|
||||
try:
|
||||
st_ = salt.state.HighState(__opts__)
|
||||
st_ = salt.state.HighState(__opts__,
|
||||
initial_pillar=__pillar__)
|
||||
top = st_.get_top()
|
||||
if top:
|
||||
envs = list(st_.top_matches(top).keys()) or 'base'
|
||||
@ -186,10 +187,14 @@ def sync_beacons(saltenv=None, refresh=True):
|
||||
|
||||
Sync beacons from ``salt://_beacons`` to the minion
|
||||
|
||||
saltenv : base
|
||||
saltenv
|
||||
The fileserver environment from which to sync. To sync from more than
|
||||
one environment, pass a comma-separated list.
|
||||
|
||||
If not passed, then all environments configured in the :ref:`top files
|
||||
<states-top>` will be checked for beacons to sync. If no top files are
|
||||
found, then the ``base`` environment will be synced.
|
||||
|
||||
refresh : True
|
||||
If ``True``, refresh the available beacons on the minion. This refresh
|
||||
will be performed even if no new beacons are synced. Set to ``False``
|
||||
@ -215,10 +220,14 @@ def sync_sdb(saltenv=None):
|
||||
|
||||
Sync sdb modules from ``salt://_sdb`` to the minion
|
||||
|
||||
saltenv : base
|
||||
saltenv
|
||||
The fileserver environment from which to sync. To sync from more than
|
||||
one environment, pass a comma-separated list.
|
||||
|
||||
If not passed, then all environments configured in the :ref:`top files
|
||||
<states-top>` will be checked for sdb modules to sync. If no top files
|
||||
are found, then the ``base`` environment will be synced.
|
||||
|
||||
refresh : False
|
||||
This argument has no affect and is included for consistency with the
|
||||
other sync functions.
|
||||
@ -241,10 +250,14 @@ def sync_modules(saltenv=None, refresh=True):
|
||||
|
||||
Sync execution modules from ``salt://_modules`` to the minion
|
||||
|
||||
saltenv : base
|
||||
saltenv
|
||||
The fileserver environment from which to sync. To sync from more than
|
||||
one environment, pass a comma-separated list.
|
||||
|
||||
If not passed, then all environments configured in the :ref:`top files
|
||||
<states-top>` will be checked for execution modules to sync. If no top
|
||||
files are found, then the ``base`` environment will be synced.
|
||||
|
||||
refresh : True
|
||||
If ``True``, refresh the available execution modules on the minion.
|
||||
This refresh will be performed even if no new execution modules are
|
||||
@ -287,10 +300,14 @@ def sync_states(saltenv=None, refresh=True):
|
||||
|
||||
Sync state modules from ``salt://_states`` to the minion
|
||||
|
||||
saltenv : base
|
||||
saltenv
|
||||
The fileserver environment from which to sync. To sync from more than
|
||||
one environment, pass a comma-separated list.
|
||||
|
||||
If not passed, then all environments configured in the :ref:`top files
|
||||
<states-top>` will be checked for state modules to sync. If no top
|
||||
files are found, then the ``base`` environment will be synced.
|
||||
|
||||
refresh : True
|
||||
If ``True``, refresh the available states on the minion. This refresh
|
||||
will be performed even if no new state modules are synced. Set to
|
||||
@ -349,10 +366,14 @@ def sync_grains(saltenv=None, refresh=True):
|
||||
|
||||
Sync grains modules from ``salt://_grains`` to the minion
|
||||
|
||||
saltenv : base
|
||||
saltenv
|
||||
The fileserver environment from which to sync. To sync from more than
|
||||
one environment, pass a comma-separated list.
|
||||
|
||||
If not passed, then all environments configured in the :ref:`top files
|
||||
<states-top>` will be checked for grains modules to sync. If no top
|
||||
files are found, then the ``base`` environment will be synced.
|
||||
|
||||
refresh : True
|
||||
If ``True``, refresh the available execution modules and recompile
|
||||
pillar data for the minion. This refresh will be performed even if no
|
||||
@ -380,10 +401,14 @@ def sync_renderers(saltenv=None, refresh=True):
|
||||
|
||||
Sync renderers from ``salt://_renderers`` to the minion
|
||||
|
||||
saltenv : base
|
||||
saltenv
|
||||
The fileserver environment from which to sync. To sync from more than
|
||||
one environment, pass a comma-separated list.
|
||||
|
||||
If not passed, then all environments configured in the :ref:`top files
|
||||
<states-top>` will be checked for renderers to sync. If no top files
|
||||
are found, then the ``base`` environment will be synced.
|
||||
|
||||
refresh : True
|
||||
If ``True``, refresh the available execution modules on the minion.
|
||||
This refresh will be performed even if no new renderers are synced.
|
||||
@ -410,10 +435,14 @@ def sync_returners(saltenv=None, refresh=True):
|
||||
|
||||
Sync beacons from ``salt://_returners`` to the minion
|
||||
|
||||
saltenv : base
|
||||
saltenv
|
||||
The fileserver environment from which to sync. To sync from more than
|
||||
one environment, pass a comma-separated list.
|
||||
|
||||
If not passed, then all environments configured in the :ref:`top files
|
||||
<states-top>` will be checked for returners to sync. If no top files
|
||||
are found, then the ``base`` environment will be synced.
|
||||
|
||||
refresh : True
|
||||
If ``True``, refresh the available execution modules on the minion.
|
||||
This refresh will be performed even if no new returners are synced. Set
|
||||
@ -438,10 +467,14 @@ def sync_proxymodules(saltenv=None, refresh=False):
|
||||
|
||||
Sync proxy modules from ``salt://_proxy`` to the minion
|
||||
|
||||
saltenv : base
|
||||
saltenv
|
||||
The fileserver environment from which to sync. To sync from more than
|
||||
one environment, pass a comma-separated list.
|
||||
|
||||
If not passed, then all environments configured in the :ref:`top files
|
||||
<states-top>` will be checked for proxy modules to sync. If no top
|
||||
files are found, then the ``base`` environment will be synced.
|
||||
|
||||
refresh : True
|
||||
If ``True``, refresh the available execution modules on the minion.
|
||||
This refresh will be performed even if no new proxy modules are synced.
|
||||
@ -467,10 +500,14 @@ def sync_engines(saltenv=None, refresh=False):
|
||||
|
||||
Sync engine modules from ``salt://_engines`` to the minion
|
||||
|
||||
saltenv : base
|
||||
saltenv
|
||||
The fileserver environment from which to sync. To sync from more than
|
||||
one environment, pass a comma-separated list.
|
||||
|
||||
If not passed, then all environments configured in the :ref:`top files
|
||||
<states-top>` will be checked for engines to sync. If no top files are
|
||||
found, then the ``base`` environment will be synced.
|
||||
|
||||
refresh : True
|
||||
If ``True``, refresh the available execution modules on the minion.
|
||||
This refresh will be performed even if no new engine modules are synced.
|
||||
@ -493,10 +530,14 @@ def sync_output(saltenv=None, refresh=True):
|
||||
'''
|
||||
Sync outputters from ``salt://_output`` to the minion
|
||||
|
||||
saltenv : base
|
||||
saltenv
|
||||
The fileserver environment from which to sync. To sync from more than
|
||||
one environment, pass a comma-separated list.
|
||||
|
||||
If not passed, then all environments configured in the :ref:`top files
|
||||
<states-top>` will be checked for outputters to sync. If no top files
|
||||
are found, then the ``base`` environment will be synced.
|
||||
|
||||
refresh : True
|
||||
If ``True``, refresh the available execution modules on the minion.
|
||||
This refresh will be performed even if no new outputters are synced.
|
||||
@ -524,10 +565,14 @@ def sync_utils(saltenv=None, refresh=True):
|
||||
|
||||
Sync utility modules from ``salt://_utils`` to the minion
|
||||
|
||||
saltenv : base
|
||||
saltenv
|
||||
The fileserver environment from which to sync. To sync from more than
|
||||
one environment, pass a comma-separated list.
|
||||
|
||||
If not passed, then all environments configured in the :ref:`top files
|
||||
<states-top>` will be checked for utility modules to sync. If no top
|
||||
files are found, then the ``base`` environment will be synced.
|
||||
|
||||
refresh : True
|
||||
If ``True``, refresh the available execution modules on the minion.
|
||||
This refresh will be performed even if no new utility modules are
|
||||
@ -553,10 +598,14 @@ def sync_log_handlers(saltenv=None, refresh=True):
|
||||
|
||||
Sync log handlers from ``salt://_log_handlers`` to the minion
|
||||
|
||||
saltenv : base
|
||||
saltenv
|
||||
The fileserver environment from which to sync. To sync from more than
|
||||
one environment, pass a comma-separated list.
|
||||
|
||||
If not passed, then all environments configured in the :ref:`top files
|
||||
<states-top>` will be checked for log handlers to sync. If no top files
|
||||
are found, then the ``base`` environment will be synced.
|
||||
|
||||
refresh : True
|
||||
If ``True``, refresh the available execution modules on the minion.
|
||||
This refresh will be performed even if no new log handlers are synced.
|
||||
|
@ -285,7 +285,7 @@ def install_semod(module_path):
|
||||
|
||||
salt '*' selinux.install_semod [salt://]path/to/module.pp
|
||||
|
||||
.. versionadded:: develop
|
||||
.. versionadded:: 2016.11.6
|
||||
'''
|
||||
if module_path.find('salt://') == 0:
|
||||
module_path = __salt__['cp.cache_file'](module_path)
|
||||
@ -303,7 +303,7 @@ def remove_semod(module):
|
||||
|
||||
salt '*' selinux.remove_semod module_name
|
||||
|
||||
.. versionadded:: develop
|
||||
.. versionadded:: 2016.11.6
|
||||
'''
|
||||
cmd = 'semodule -r {0}'.format(module)
|
||||
return not __salt__['cmd.retcode'](cmd)
|
||||
|
@ -252,16 +252,38 @@ def _check_queue(queue, kwargs):
|
||||
return conflict
|
||||
|
||||
|
||||
def _get_opts(localconfig=None):
|
||||
def _get_opts(**kwargs):
|
||||
'''
|
||||
Return a copy of the opts for use, optionally load a local config on top
|
||||
'''
|
||||
opts = copy.deepcopy(__opts__)
|
||||
if localconfig:
|
||||
opts = salt.config.minion_config(localconfig, defaults=opts)
|
||||
|
||||
if 'localconfig' in kwargs:
|
||||
return salt.config.minion_config(kwargs['localconfig'], defaults=opts)
|
||||
|
||||
if 'saltenv' in kwargs:
|
||||
saltenv = kwargs['saltenv']
|
||||
if saltenv is not None and not isinstance(saltenv, six.string_types):
|
||||
opts['environment'] = str(kwargs['saltenv'])
|
||||
else:
|
||||
opts['environment'] = kwargs['saltenv']
|
||||
|
||||
if 'pillarenv' in kwargs:
|
||||
pillarenv = kwargs['pillarenv']
|
||||
if pillarenv is not None and not isinstance(pillarenv, six.string_types):
|
||||
opts['pillarenv'] = str(kwargs['pillarenv'])
|
||||
else:
|
||||
opts['pillarenv'] = kwargs['pillarenv']
|
||||
|
||||
return opts
|
||||
|
||||
|
||||
def _get_initial_pillar(opts):
|
||||
return __pillar__ if __opts__['__cli'] == 'salt-call' \
|
||||
and opts['pillarenv'] == __opts__['pillarenv'] \
|
||||
else None
|
||||
|
||||
|
||||
def low(data, queue=False, **kwargs):
|
||||
'''
|
||||
Execute a single low data call
|
||||
@ -325,24 +347,32 @@ def high(data, test=None, queue=False, **kwargs):
|
||||
conflict = _check_queue(queue, kwargs)
|
||||
if conflict is not None:
|
||||
return conflict
|
||||
opts = _get_opts(kwargs.get('localconfig'))
|
||||
opts = _get_opts(**kwargs)
|
||||
|
||||
opts['test'] = _get_test_value(test, **kwargs)
|
||||
|
||||
pillar = kwargs.get('pillar')
|
||||
pillar_override = kwargs.get('pillar')
|
||||
pillar_enc = kwargs.get('pillar_enc')
|
||||
if pillar_enc is None \
|
||||
and pillar is not None \
|
||||
and not isinstance(pillar, dict):
|
||||
and pillar_override is not None \
|
||||
and not isinstance(pillar_override, dict):
|
||||
raise SaltInvocationError(
|
||||
'Pillar data must be formatted as a dictionary, unless pillar_enc '
|
||||
'is specified.'
|
||||
)
|
||||
|
||||
try:
|
||||
st_ = salt.state.State(__opts__, pillar, pillar_enc=pillar_enc, proxy=__proxy__,
|
||||
context=__context__)
|
||||
st_ = salt.state.State(opts,
|
||||
pillar_override,
|
||||
pillar_enc=pillar_enc,
|
||||
proxy=__proxy__,
|
||||
context=__context__,
|
||||
initial_pillar=_get_initial_pillar(opts))
|
||||
except NameError:
|
||||
st_ = salt.state.State(__opts__, pillar, pillar_enc=pillar_enc)
|
||||
st_ = salt.state.State(opts,
|
||||
pillar_override,
|
||||
pillar_enc=pillar_enc,
|
||||
initial_pillar=_get_initial_pillar(opts))
|
||||
|
||||
ret = st_.call_high(data)
|
||||
_set_retcode(ret)
|
||||
@ -371,15 +401,15 @@ def template(tem, queue=False, **kwargs):
|
||||
)
|
||||
kwargs.pop('env')
|
||||
|
||||
if 'saltenv' in kwargs:
|
||||
saltenv = kwargs['saltenv']
|
||||
else:
|
||||
saltenv = ''
|
||||
|
||||
conflict = _check_queue(queue, kwargs)
|
||||
if conflict is not None:
|
||||
return conflict
|
||||
st_ = salt.state.HighState(__opts__, context=__context__)
|
||||
|
||||
opts = _get_opts(**kwargs)
|
||||
|
||||
st_ = salt.state.HighState(opts,
|
||||
context=__context__,
|
||||
initial_pillar=_get_initial_pillar(opts))
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
__context__['retcode'] = 5
|
||||
@ -388,7 +418,11 @@ def template(tem, queue=False, **kwargs):
|
||||
|
||||
if not tem.endswith('.sls'):
|
||||
tem = '{sls}.sls'.format(sls=tem)
|
||||
high_state, errors = st_.render_state(tem, saltenv, '', None, local=True)
|
||||
high_state, errors = st_.render_state(tem,
|
||||
kwargs.get('saltenv', ''),
|
||||
'',
|
||||
None,
|
||||
local=True)
|
||||
if errors:
|
||||
__context__['retcode'] = 1
|
||||
return errors
|
||||
@ -410,10 +444,15 @@ def template_str(tem, queue=False, **kwargs):
|
||||
conflict = _check_queue(queue, kwargs)
|
||||
if conflict is not None:
|
||||
return conflict
|
||||
|
||||
opts = _get_opts(**kwargs)
|
||||
|
||||
try:
|
||||
st_ = salt.state.State(__opts__, proxy=__proxy__)
|
||||
st_ = salt.state.State(opts,
|
||||
proxy=__proxy__,
|
||||
initial_pillar=_get_initial_pillar(opts))
|
||||
except NameError:
|
||||
st_ = salt.state.State(__opts__)
|
||||
st_ = salt.state.State(opts, initial_pillar=_get_initial_pillar(opts))
|
||||
ret = st_.call_template_str(tem)
|
||||
_set_retcode(ret)
|
||||
return ret
|
||||
@ -688,9 +727,7 @@ def run_request(name='default', **kwargs):
|
||||
return {}
|
||||
|
||||
|
||||
def highstate(test=None,
|
||||
queue=False,
|
||||
**kwargs):
|
||||
def highstate(test=None, queue=False, **kwargs):
|
||||
'''
|
||||
Retrieve the state data from the salt master for this minion and execute it
|
||||
|
||||
@ -732,7 +769,7 @@ def highstate(test=None,
|
||||
states to be run with their own custom minion configuration, including
|
||||
different pillars, file_roots, etc.
|
||||
|
||||
mock:
|
||||
mock
|
||||
The mock option allows for the state run to execute without actually
|
||||
calling any states. This then returns a mocked return which will show
|
||||
the requisite ordering as well as fully validate the state run.
|
||||
@ -765,7 +802,7 @@ def highstate(test=None,
|
||||
return conflict
|
||||
orig_test = __opts__.get('test', None)
|
||||
|
||||
opts = _get_opts(kwargs.get('localconfig'))
|
||||
opts = _get_opts(**kwargs)
|
||||
|
||||
opts['test'] = _get_test_value(test, **kwargs)
|
||||
|
||||
@ -778,36 +815,32 @@ def highstate(test=None,
|
||||
)
|
||||
kwargs.pop('env')
|
||||
|
||||
if 'saltenv' in kwargs:
|
||||
opts['environment'] = kwargs['saltenv']
|
||||
|
||||
pillar = kwargs.get('pillar')
|
||||
pillar_override = kwargs.get('pillar')
|
||||
pillar_enc = kwargs.get('pillar_enc')
|
||||
if pillar_enc is None \
|
||||
and pillar is not None \
|
||||
and not isinstance(pillar, dict):
|
||||
and pillar_override is not None \
|
||||
and not isinstance(pillar_override, dict):
|
||||
raise SaltInvocationError(
|
||||
'Pillar data must be formatted as a dictionary, unless pillar_enc '
|
||||
'is specified.'
|
||||
)
|
||||
|
||||
if 'pillarenv' in kwargs:
|
||||
opts['pillarenv'] = kwargs['pillarenv']
|
||||
|
||||
try:
|
||||
st_ = salt.state.HighState(opts,
|
||||
pillar,
|
||||
pillar_override,
|
||||
kwargs.get('__pub_jid'),
|
||||
pillar_enc=pillar_enc,
|
||||
proxy=__proxy__,
|
||||
context=__context__,
|
||||
mocked=kwargs.get('mock', False))
|
||||
mocked=kwargs.get('mock', False),
|
||||
initial_pillar=_get_initial_pillar(opts))
|
||||
except NameError:
|
||||
st_ = salt.state.HighState(opts,
|
||||
pillar,
|
||||
pillar_override,
|
||||
kwargs.get('__pub_jid'),
|
||||
pillar_enc=pillar_enc,
|
||||
mocked=kwargs.get('mock', False))
|
||||
mocked=kwargs.get('mock', False),
|
||||
initial_pillar=_get_initial_pillar(opts))
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
__context__['retcode'] = 5
|
||||
@ -844,13 +877,7 @@ def highstate(test=None,
|
||||
return ret
|
||||
|
||||
|
||||
def sls(mods,
|
||||
saltenv=None,
|
||||
test=None,
|
||||
exclude=None,
|
||||
queue=False,
|
||||
pillarenv=None,
|
||||
**kwargs):
|
||||
def sls(mods, test=None, exclude=None, queue=False, **kwargs):
|
||||
'''
|
||||
Execute the states in one or more SLS files
|
||||
|
||||
@ -894,7 +921,7 @@ def sls(mods,
|
||||
multiple state runs can safely be run at the same time. Do *not*
|
||||
use this flag for performance optimization.
|
||||
|
||||
saltenv : None
|
||||
saltenv
|
||||
Specify a salt fileserver environment to be used when applying states
|
||||
|
||||
.. versionchanged:: 0.17.0
|
||||
@ -946,16 +973,6 @@ def sls(mods,
|
||||
)
|
||||
kwargs.pop('env')
|
||||
|
||||
if saltenv is None:
|
||||
if __opts__.get('environment', None):
|
||||
saltenv = __opts__['environment']
|
||||
else:
|
||||
saltenv = 'base'
|
||||
|
||||
if not pillarenv:
|
||||
if __opts__.get('pillarenv', None):
|
||||
pillarenv = __opts__['pillarenv']
|
||||
|
||||
# Modification to __opts__ lost after this if-else
|
||||
if queue:
|
||||
_wait(kwargs.get('__pub_jid'))
|
||||
@ -965,10 +982,6 @@ def sls(mods,
|
||||
__context__['retcode'] = 1
|
||||
return conflict
|
||||
|
||||
# Ensure desired environment
|
||||
__opts__['environment'] = saltenv
|
||||
__opts__['pillarenv'] = pillarenv
|
||||
|
||||
if isinstance(mods, list):
|
||||
disabled = _disabled(mods)
|
||||
else:
|
||||
@ -976,20 +989,28 @@ def sls(mods,
|
||||
|
||||
if disabled:
|
||||
for state in disabled:
|
||||
log.debug('Salt state {0} run is disabled. To re-enable, run state.enable {0}'.format(state))
|
||||
log.debug(
|
||||
'Salt state %s is disabled. To re-enable, run '
|
||||
'state.enable %s', state, state
|
||||
)
|
||||
__context__['retcode'] = 1
|
||||
return disabled
|
||||
|
||||
orig_test = __opts__.get('test', None)
|
||||
opts = _get_opts(kwargs.get('localconfig'))
|
||||
opts = _get_opts(**kwargs)
|
||||
|
||||
opts['test'] = _get_test_value(test, **kwargs)
|
||||
|
||||
pillar = kwargs.get('pillar')
|
||||
# Since this is running a specific SLS file (or files), fall back to the
|
||||
# 'base' saltenv if none is configured and none was passed.
|
||||
if opts['environment'] is None:
|
||||
opts['environment'] = 'base'
|
||||
|
||||
pillar_override = kwargs.get('pillar')
|
||||
pillar_enc = kwargs.get('pillar_enc')
|
||||
if pillar_enc is None \
|
||||
and pillar is not None \
|
||||
and not isinstance(pillar, dict):
|
||||
and pillar_override is not None \
|
||||
and not isinstance(pillar_override, dict):
|
||||
raise SaltInvocationError(
|
||||
'Pillar data must be formatted as a dictionary, unless pillar_enc '
|
||||
'is specified.'
|
||||
@ -1000,20 +1021,23 @@ def sls(mods,
|
||||
__opts__['cachedir'],
|
||||
'{0}.cache.p'.format(kwargs.get('cache_name', 'highstate'))
|
||||
)
|
||||
|
||||
try:
|
||||
st_ = salt.state.HighState(opts,
|
||||
pillar,
|
||||
pillar_override,
|
||||
kwargs.get('__pub_jid'),
|
||||
pillar_enc=pillar_enc,
|
||||
proxy=__proxy__,
|
||||
context=__context__,
|
||||
mocked=kwargs.get('mock', False))
|
||||
mocked=kwargs.get('mock', False),
|
||||
initial_pillar=_get_initial_pillar(opts))
|
||||
except NameError:
|
||||
st_ = salt.state.HighState(opts,
|
||||
pillar,
|
||||
pillar_override,
|
||||
kwargs.get('__pub_jid'),
|
||||
pillar_enc=pillar_enc,
|
||||
mocked=kwargs.get('mock', False))
|
||||
mocked=kwargs.get('mock', False),
|
||||
initial_pillar=_get_initial_pillar(opts))
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
__context__['retcode'] = 5
|
||||
@ -1036,7 +1060,7 @@ def sls(mods,
|
||||
st_.push_active()
|
||||
ret = {}
|
||||
try:
|
||||
high_, errors = st_.render_highstate({saltenv: mods})
|
||||
high_, errors = st_.render_highstate({opts['environment']: mods})
|
||||
|
||||
if errors:
|
||||
__context__['retcode'] = 1
|
||||
@ -1086,11 +1110,7 @@ def sls(mods,
|
||||
return ret
|
||||
|
||||
|
||||
def top(topfn,
|
||||
test=None,
|
||||
queue=False,
|
||||
saltenv=None,
|
||||
**kwargs):
|
||||
def top(topfn, test=None, queue=False, **kwargs):
|
||||
'''
|
||||
Execute a specific top file instead of the default. This is useful to apply
|
||||
configurations from a different environment (for example, dev or prod), without
|
||||
@ -1108,20 +1128,24 @@ def top(topfn,
|
||||
if conflict is not None:
|
||||
return conflict
|
||||
orig_test = __opts__.get('test', None)
|
||||
opts = _get_opts(kwargs.get('localconfig'))
|
||||
opts = _get_opts(**kwargs)
|
||||
opts['test'] = _get_test_value(test, **kwargs)
|
||||
|
||||
pillar = kwargs.get('pillar')
|
||||
pillar_override = kwargs.get('pillar')
|
||||
pillar_enc = kwargs.get('pillar_enc')
|
||||
if pillar_enc is None \
|
||||
and pillar is not None \
|
||||
and not isinstance(pillar, dict):
|
||||
and pillar_override is not None \
|
||||
and not isinstance(pillar_override, dict):
|
||||
raise SaltInvocationError(
|
||||
'Pillar data must be formatted as a dictionary, unless pillar_enc '
|
||||
'is specified.'
|
||||
)
|
||||
|
||||
st_ = salt.state.HighState(opts, pillar, pillar_enc=pillar_enc, context=__context__)
|
||||
st_ = salt.state.HighState(opts,
|
||||
pillar_override,
|
||||
pillar_enc=pillar_enc,
|
||||
context=__context__,
|
||||
initial_pillar=_get_initial_pillar(opts))
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
__context__['retcode'] = 5
|
||||
err = ['Pillar failed to render with the following messages:']
|
||||
@ -1132,8 +1156,8 @@ def top(topfn,
|
||||
st_.opts['state_top'] = salt.utils.url.create(topfn)
|
||||
ret = {}
|
||||
orchestration_jid = kwargs.get('orchestration_jid')
|
||||
if saltenv:
|
||||
st_.opts['state_top_saltenv'] = saltenv
|
||||
if 'saltenv' in kwargs:
|
||||
st_.opts['state_top_saltenv'] = kwargs['saltenv']
|
||||
try:
|
||||
snapper_pre = _snapper_pre(opts, kwargs.get('__pub_jid', 'called localy'))
|
||||
ret = st_.call_highstate(
|
||||
@ -1167,17 +1191,22 @@ def show_highstate(queue=False, **kwargs):
|
||||
conflict = _check_queue(queue, kwargs)
|
||||
if conflict is not None:
|
||||
return conflict
|
||||
pillar = kwargs.get('pillar')
|
||||
pillar_override = kwargs.get('pillar')
|
||||
pillar_enc = kwargs.get('pillar_enc')
|
||||
if pillar_enc is None \
|
||||
and pillar is not None \
|
||||
and not isinstance(pillar, dict):
|
||||
and pillar_override is not None \
|
||||
and not isinstance(pillar_override, dict):
|
||||
raise SaltInvocationError(
|
||||
'Pillar data must be formatted as a dictionary, unless pillar_enc '
|
||||
'is specified.'
|
||||
)
|
||||
|
||||
st_ = salt.state.HighState(__opts__, pillar, pillar_enc=pillar_enc)
|
||||
opts = _get_opts(**kwargs)
|
||||
|
||||
st_ = salt.state.HighState(opts,
|
||||
pillar_override,
|
||||
pillar_enc=pillar_enc,
|
||||
initial_pillar=_get_initial_pillar(opts))
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
__context__['retcode'] = 5
|
||||
@ -1207,7 +1236,11 @@ def show_lowstate(queue=False, **kwargs):
|
||||
if conflict is not None:
|
||||
assert False
|
||||
return conflict
|
||||
st_ = salt.state.HighState(__opts__)
|
||||
|
||||
opts = _get_opts(**kwargs)
|
||||
|
||||
st_ = salt.state.HighState(opts,
|
||||
initial_pillar=_get_initial_pillar(opts))
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
__context__['retcode'] = 5
|
||||
@ -1222,13 +1255,7 @@ def show_lowstate(queue=False, **kwargs):
|
||||
return ret
|
||||
|
||||
|
||||
def sls_id(
|
||||
id_,
|
||||
mods,
|
||||
saltenv='base',
|
||||
test=None,
|
||||
queue=False,
|
||||
**kwargs):
|
||||
def sls_id(id_, mods, test=None, queue=False, **kwargs):
|
||||
'''
|
||||
Call a single ID from the named module(s) and handle all requisites
|
||||
|
||||
@ -1246,14 +1273,21 @@ def sls_id(
|
||||
if conflict is not None:
|
||||
return conflict
|
||||
orig_test = __opts__.get('test', None)
|
||||
opts = _get_opts(kwargs.get('localconfig'))
|
||||
opts = _get_opts(**kwargs)
|
||||
opts['test'] = _get_test_value(test, **kwargs)
|
||||
if 'pillarenv' in kwargs:
|
||||
opts['pillarenv'] = kwargs['pillarenv']
|
||||
|
||||
# Since this is running a specific ID within a specific SLS file, fall back
|
||||
# to the 'base' saltenv if none is configured and none was passed.
|
||||
if opts['environment'] is None:
|
||||
opts['environment'] = 'base'
|
||||
|
||||
try:
|
||||
st_ = salt.state.HighState(opts, proxy=__proxy__)
|
||||
st_ = salt.state.HighState(opts,
|
||||
proxy=__proxy__,
|
||||
initial_pillar=_get_initial_pillar(opts))
|
||||
except NameError:
|
||||
st_ = salt.state.HighState(opts)
|
||||
st_ = salt.state.HighState(opts,
|
||||
initial_pillar=_get_initial_pillar(opts))
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
__context__['retcode'] = 5
|
||||
@ -1265,7 +1299,7 @@ def sls_id(
|
||||
split_mods = mods.split(',')
|
||||
st_.push_active()
|
||||
try:
|
||||
high_, errors = st_.render_highstate({saltenv: split_mods})
|
||||
high_, errors = st_.render_highstate({opts['environment']: split_mods})
|
||||
finally:
|
||||
st_.pop_active()
|
||||
errors += st_.state.verify_high(high_)
|
||||
@ -1285,16 +1319,12 @@ def sls_id(
|
||||
if not ret:
|
||||
raise SaltInvocationError(
|
||||
'No matches for ID \'{0}\' found in SLS \'{1}\' within saltenv '
|
||||
'\'{2}\''.format(id_, mods, saltenv)
|
||||
'\'{2}\''.format(id_, mods, opts['environment'])
|
||||
)
|
||||
return ret
|
||||
|
||||
|
||||
def show_low_sls(mods,
|
||||
saltenv='base',
|
||||
test=None,
|
||||
queue=False,
|
||||
**kwargs):
|
||||
def show_low_sls(mods, test=None, queue=False, **kwargs):
|
||||
'''
|
||||
Display the low data from a specific sls. The default environment is
|
||||
``base``, use ``saltenv`` to specify a different environment.
|
||||
@ -1304,6 +1334,7 @@ def show_low_sls(mods,
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' state.show_low_sls foo
|
||||
salt '*' state.show_low_sls foo saltenv=dev
|
||||
'''
|
||||
if 'env' in kwargs:
|
||||
salt.utils.warn_until(
|
||||
@ -1318,11 +1349,15 @@ def show_low_sls(mods,
|
||||
if conflict is not None:
|
||||
return conflict
|
||||
orig_test = __opts__.get('test', None)
|
||||
opts = _get_opts(kwargs.get('localconfig'))
|
||||
opts = _get_opts(**kwargs)
|
||||
opts['test'] = _get_test_value(test, **kwargs)
|
||||
if 'pillarenv' in kwargs:
|
||||
opts['pillarenv'] = kwargs['pillarenv']
|
||||
st_ = salt.state.HighState(opts)
|
||||
|
||||
# Since this is dealing with a specific SLS file (or files), fall back to
|
||||
# the 'base' saltenv if none is configured and none was passed.
|
||||
if opts['environment'] is None:
|
||||
opts['environment'] = 'base'
|
||||
|
||||
st_ = salt.state.HighState(opts, initial_pillar=_get_initial_pillar(opts))
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
__context__['retcode'] = 5
|
||||
@ -1333,7 +1368,7 @@ def show_low_sls(mods,
|
||||
mods = mods.split(',')
|
||||
st_.push_active()
|
||||
try:
|
||||
high_, errors = st_.render_highstate({saltenv: mods})
|
||||
high_, errors = st_.render_highstate({opts['environment']: mods})
|
||||
finally:
|
||||
st_.pop_active()
|
||||
errors += st_.state.verify_high(high_)
|
||||
@ -1347,7 +1382,7 @@ def show_low_sls(mods,
|
||||
return ret
|
||||
|
||||
|
||||
def show_sls(mods, saltenv='base', test=None, queue=False, **kwargs):
|
||||
def show_sls(mods, test=None, queue=False, **kwargs):
|
||||
'''
|
||||
Display the state data from a specific sls or list of sls files on the
|
||||
master. The default environment is ``base``, use ``saltenv`` to specify a
|
||||
@ -1377,24 +1412,29 @@ def show_sls(mods, saltenv='base', test=None, queue=False, **kwargs):
|
||||
if conflict is not None:
|
||||
return conflict
|
||||
orig_test = __opts__.get('test', None)
|
||||
opts = _get_opts(kwargs.get('localconfig'))
|
||||
opts = _get_opts(**kwargs)
|
||||
|
||||
opts['test'] = _get_test_value(test, **kwargs)
|
||||
|
||||
pillar = kwargs.get('pillar')
|
||||
# Since this is dealing with a specific SLS file (or files), fall back to
|
||||
# the 'base' saltenv if none is configured and none was passed.
|
||||
if opts['environment'] is None:
|
||||
opts['environment'] = 'base'
|
||||
|
||||
pillar_override = kwargs.get('pillar')
|
||||
pillar_enc = kwargs.get('pillar_enc')
|
||||
if pillar_enc is None \
|
||||
and pillar is not None \
|
||||
and not isinstance(pillar, dict):
|
||||
and pillar_override is not None \
|
||||
and not isinstance(pillar_override, dict):
|
||||
raise SaltInvocationError(
|
||||
'Pillar data must be formatted as a dictionary, unless pillar_enc '
|
||||
'is specified.'
|
||||
)
|
||||
|
||||
if 'pillarenv' in kwargs:
|
||||
opts['pillarenv'] = kwargs['pillarenv']
|
||||
|
||||
st_ = salt.state.HighState(opts, pillar, pillar_enc=pillar_enc)
|
||||
st_ = salt.state.HighState(opts,
|
||||
pillar_override,
|
||||
pillar_enc=pillar_enc,
|
||||
initial_pillar=_get_initial_pillar(opts))
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
__context__['retcode'] = 5
|
||||
@ -1405,7 +1445,7 @@ def show_sls(mods, saltenv='base', test=None, queue=False, **kwargs):
|
||||
mods = mods.split(',')
|
||||
st_.push_active()
|
||||
try:
|
||||
high_, errors = st_.render_highstate({saltenv: mods})
|
||||
high_, errors = st_.render_highstate({opts['environment']: mods})
|
||||
finally:
|
||||
st_.pop_active()
|
||||
errors += st_.state.verify_high(high_)
|
||||
@ -1428,8 +1468,6 @@ def show_top(queue=False, **kwargs):
|
||||
|
||||
salt '*' state.show_top
|
||||
'''
|
||||
opts = copy.deepcopy(__opts__)
|
||||
|
||||
if 'env' in kwargs:
|
||||
salt.utils.warn_until(
|
||||
'Oxygen',
|
||||
@ -1439,12 +1477,13 @@ def show_top(queue=False, **kwargs):
|
||||
)
|
||||
kwargs.pop('env')
|
||||
|
||||
if 'saltenv' in kwargs:
|
||||
opts['environment'] = kwargs['saltenv']
|
||||
conflict = _check_queue(queue, kwargs)
|
||||
if conflict is not None:
|
||||
return conflict
|
||||
st_ = salt.state.HighState(opts)
|
||||
|
||||
opts = _get_opts(**kwargs)
|
||||
|
||||
st_ = salt.state.HighState(opts, initial_pillar=_get_initial_pillar(opts))
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
__context__['retcode'] = 5
|
||||
@ -1490,23 +1529,30 @@ def single(fun, name, test=None, queue=False, **kwargs):
|
||||
'__id__': name,
|
||||
'name': name})
|
||||
orig_test = __opts__.get('test', None)
|
||||
opts = _get_opts(kwargs.get('localconfig'))
|
||||
opts = _get_opts(**kwargs)
|
||||
opts['test'] = _get_test_value(test, **kwargs)
|
||||
|
||||
pillar = kwargs.get('pillar')
|
||||
pillar_override = kwargs.get('pillar')
|
||||
pillar_enc = kwargs.get('pillar_enc')
|
||||
if pillar_enc is None \
|
||||
and pillar is not None \
|
||||
and not isinstance(pillar, dict):
|
||||
and pillar_override is not None \
|
||||
and not isinstance(pillar_override, dict):
|
||||
raise SaltInvocationError(
|
||||
'Pillar data must be formatted as a dictionary, unless pillar_enc '
|
||||
'is specified.'
|
||||
)
|
||||
|
||||
try:
|
||||
st_ = salt.state.State(opts, pillar, pillar_enc=pillar_enc, proxy=__proxy__)
|
||||
st_ = salt.state.State(opts,
|
||||
pillar_override,
|
||||
pillar_enc=pillar_enc,
|
||||
proxy=__proxy__,
|
||||
initial_pillar=_get_initial_pillar(opts))
|
||||
except NameError:
|
||||
st_ = salt.state.State(opts, pillar, pillar_enc=pillar_enc)
|
||||
st_ = salt.state.State(opts,
|
||||
pillar_override,
|
||||
pillar_enc=pillar_enc,
|
||||
initial_pillar=_get_initial_pillar(opts))
|
||||
err = st_.verify_data(kwargs)
|
||||
if err:
|
||||
__context__['retcode'] = 1
|
||||
@ -1549,7 +1595,11 @@ def clear_cache():
|
||||
return ret
|
||||
|
||||
|
||||
def pkg(pkg_path, pkg_sum, hash_type, test=None, **kwargs):
|
||||
def pkg(pkg_path,
|
||||
pkg_sum,
|
||||
hash_type,
|
||||
test=None,
|
||||
**kwargs):
|
||||
'''
|
||||
Execute a packaged state run, the packaged state run will exist in a
|
||||
tarball available locally. This packaged state
|
||||
@ -1587,10 +1637,10 @@ def pkg(pkg_path, pkg_sum, hash_type, test=None, **kwargs):
|
||||
pillar_json = os.path.join(root, 'pillar.json')
|
||||
if os.path.isfile(pillar_json):
|
||||
with salt.utils.fopen(pillar_json, 'r') as fp_:
|
||||
pillar = json.load(fp_)
|
||||
pillar_override = json.load(fp_)
|
||||
else:
|
||||
pillar = None
|
||||
popts = _get_opts(kwargs.get('localconfig'))
|
||||
pillar_override = None
|
||||
popts = _get_opts(**kwargs)
|
||||
popts['fileclient'] = 'local'
|
||||
popts['file_roots'] = {}
|
||||
popts['test'] = _get_test_value(test, **kwargs)
|
||||
@ -1600,7 +1650,7 @@ def pkg(pkg_path, pkg_sum, hash_type, test=None, **kwargs):
|
||||
if not os.path.isdir(full):
|
||||
continue
|
||||
popts['file_roots'][fn_] = [full]
|
||||
st_ = salt.state.State(popts, pillar=pillar)
|
||||
st_ = salt.state.State(popts, pillar_override=pillar_override)
|
||||
snapper_pre = _snapper_pre(popts, kwargs.get('__pub_jid', 'called localy'))
|
||||
ret = st_.call_chunks(lowstate)
|
||||
try:
|
||||
|
@ -214,7 +214,7 @@ def uptime():
|
||||
raise CommandExecutionError("File {ut_path} was not found.".format(ut_path=ut_path))
|
||||
seconds = int(float(salt.utils.fopen(ut_path).read().split()[0]))
|
||||
elif salt.utils.is_sunos():
|
||||
# note: some flavors/vesions report the host uptime inside a zone
|
||||
# note: some flavors/versions report the host uptime inside a zone
|
||||
# https://support.oracle.com/epmos/faces/BugDisplay?id=15611584
|
||||
res = __salt__['cmd.run_all']('kstat -p unix:0:system_misc:boot_time')
|
||||
if res['retcode'] > 0:
|
||||
|
@ -242,6 +242,8 @@ def _copy_function(module_name, name=None):
|
||||
elif hasattr(mod, '__call__'):
|
||||
mod_sig = inspect.getargspec(mod.__call__)
|
||||
parameters = mod_sig.args
|
||||
log.debug('Parameters accepted by module {0}: {1}'.format(module_name,
|
||||
parameters))
|
||||
additional_args = {}
|
||||
for arg in set(parameters).intersection(set(methods.keys())):
|
||||
additional_args[arg] = methods.pop(arg)
|
||||
@ -251,12 +253,15 @@ def _copy_function(module_name, name=None):
|
||||
else:
|
||||
modinstance = mod()
|
||||
except TypeError:
|
||||
modinstance = None
|
||||
methods = {}
|
||||
log.exception('Module failed to instantiate')
|
||||
raise
|
||||
valid_methods = {}
|
||||
log.debug('Called methods are: {0}'.format(methods))
|
||||
for meth_name in methods:
|
||||
if not meth_name.startswith('_'):
|
||||
methods[meth_name] = methods[meth_name]
|
||||
for meth, arg in methods.items():
|
||||
valid_methods[meth_name] = methods[meth_name]
|
||||
log.debug('Valid methods are: {0}'.format(valid_methods))
|
||||
for meth, arg in valid_methods.items():
|
||||
result = _get_method_result(mod, modinstance, meth, arg)
|
||||
assertion_result = _apply_assertion(arg, result)
|
||||
if not assertion_result:
|
||||
@ -285,13 +290,17 @@ def _register_functions():
|
||||
functions, and then register them in the module namespace so that they
|
||||
can be called via salt.
|
||||
"""
|
||||
for module_ in modules.__all__:
|
||||
try:
|
||||
modules_ = [_to_snake_case(module_) for module_ in modules.__all__]
|
||||
except AttributeError:
|
||||
modules_ = [module_ for module_ in modules.modules]
|
||||
|
||||
for mod_name in modules_:
|
||||
mod_name = _to_snake_case(module_)
|
||||
mod_func = _copy_function(mod_name, str(mod_name))
|
||||
mod_func.__doc__ = _build_doc(module_)
|
||||
mod_func.__doc__ = _build_doc(mod_name)
|
||||
__all__.append(mod_name)
|
||||
globals()[mod_name] = mod_func
|
||||
|
||||
|
||||
if TESTINFRA_PRESENT:
|
||||
_register_functions()
|
||||
|
@ -42,9 +42,10 @@ def get_cert_serial(cert_file):
|
||||
|
||||
salt '*' certutil.get_cert_serial <certificate name>
|
||||
'''
|
||||
cmd = "certutil.exe -verify {0}".format(cert_file)
|
||||
cmd = "certutil.exe -silent -verify {0}".format(cert_file)
|
||||
out = __salt__['cmd.run'](cmd)
|
||||
matches = re.search(r"Serial: (.*)", out)
|
||||
# match serial number by paragraph to work with multiple languages
|
||||
matches = re.search(r":\s*(\w*)\r\n\r\n", out)
|
||||
if matches is not None:
|
||||
return matches.groups()[0].strip()
|
||||
else:
|
||||
@ -66,7 +67,8 @@ def get_stored_cert_serials(store):
|
||||
'''
|
||||
cmd = "certutil.exe -store {0}".format(store)
|
||||
out = __salt__['cmd.run'](cmd)
|
||||
matches = re.findall(r"Serial Number: (.*)\r", out)
|
||||
# match serial numbers by header position to work with multiple languages
|
||||
matches = re.findall(r"={16}\r\n.*:\s*(\w*)\r\n", out)
|
||||
return matches
|
||||
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -861,56 +861,93 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
||||
r'''
|
||||
Install the passed package(s) on the system using winrepo
|
||||
|
||||
:param name:
|
||||
The name of a single package, or a comma-separated list of packages to
|
||||
install. (no spaces after the commas)
|
||||
:type name: str, list, or None
|
||||
Args:
|
||||
|
||||
:param bool refresh: Boolean value representing whether or not to refresh
|
||||
the winrepo db
|
||||
name (str):
|
||||
The name of a single package, or a comma-separated list of packages
|
||||
to install. (no spaces after the commas)
|
||||
|
||||
:param pkgs: A list of packages to install from a software repository.
|
||||
All packages listed under ``pkgs`` will be installed via a single
|
||||
command.
|
||||
refresh (bool):
|
||||
Boolean value representing whether or not to refresh the winrepo db
|
||||
|
||||
:type pkgs: list or None
|
||||
pkgs (list):
|
||||
A list of packages to install from a software repository. All
|
||||
packages listed under ``pkgs`` will be installed via a single
|
||||
command.
|
||||
|
||||
*Keyword Arguments (kwargs)*
|
||||
You can specify a version by passing the item as a dict:
|
||||
|
||||
:param str version:
|
||||
The specific version to install. If omitted, the latest version will be
|
||||
installed. If passed with multiple install, the version will apply to
|
||||
all packages. Recommended for single installation only.
|
||||
CLI Example:
|
||||
|
||||
:param str cache_file:
|
||||
A single file to copy down for use with the installer. Copied to the
|
||||
same location as the installer. Use this over ``cache_dir`` if there
|
||||
are many files in the directory and you only need a specific file and
|
||||
don't want to cache additional files that may reside in the installer
|
||||
directory. Only applies to files on ``salt://``
|
||||
.. code-block:: bash
|
||||
|
||||
:param bool cache_dir:
|
||||
True will copy the contents of the installer directory. This is useful
|
||||
for installations that are not a single file. Only applies to
|
||||
directories on ``salt://``
|
||||
# will install the latest version of foo and bar
|
||||
salt '*' pkg.install pkgs='["foo", "bar"]'
|
||||
|
||||
:param str saltenv: Salt environment. Default 'base'
|
||||
# will install the latest version of foo and version 1.2.3 of bar
|
||||
salt '*' pkg.install pkgs='["foo", {"bar": "1.2.3"}]'
|
||||
|
||||
:param bool report_reboot_exit_codes:
|
||||
If the installer exits with a recognized exit code indicating that
|
||||
a reboot is required, the module function
|
||||
Kwargs:
|
||||
|
||||
version (str):
|
||||
The specific version to install. If omitted, the latest version will
|
||||
be installed. Recommend for use when installing a single package.
|
||||
|
||||
If passed with a list of packages in the ``pkgs`` parameter, the
|
||||
version will be ignored.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Version is ignored
|
||||
salt '*' pkg.install pkgs="['foo', 'bar']" version=1.2.3
|
||||
|
||||
If passed with a comma seperated list in the ``name`` parameter, the
|
||||
version will apply to all packages in the list.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Version 1.2.3 will apply to packages foo and bar
|
||||
salt '*' pkg.install foo,bar version=1.2.3
|
||||
|
||||
cache_file (str):
|
||||
A single file to copy down for use with the installer. Copied to the
|
||||
same location as the installer. Use this over ``cache_dir`` if there
|
||||
are many files in the directory and you only need a specific file
|
||||
and don't want to cache additional files that may reside in the
|
||||
installer directory. Only applies to files on ``salt://``
|
||||
|
||||
cache_dir (bool):
|
||||
True will copy the contents of the installer directory. This is
|
||||
useful for installations that are not a single file. Only applies to
|
||||
directories on ``salt://``
|
||||
|
||||
extra_install_flags (str):
|
||||
Additional install flags that will be appended to the
|
||||
``install_flags`` defined in the software definition file. Only
|
||||
applies when single package is passed.
|
||||
|
||||
saltenv (str):
|
||||
Salt environment. Default 'base'
|
||||
|
||||
report_reboot_exit_codes (bool):
|
||||
If the installer exits with a recognized exit code indicating that
|
||||
a reboot is required, the module function
|
||||
|
||||
*win_system.set_reboot_required_witnessed*
|
||||
|
||||
will be called, preserving the knowledge of this event
|
||||
for the remainder of the current boot session. For the time being,
|
||||
3010 is the only recognized exit code. The value of this param
|
||||
defaults to True.
|
||||
will be called, preserving the knowledge of this event
|
||||
for the remainder of the current boot session. For the time being,
|
||||
3010 is the only recognized exit code. The value of this param
|
||||
defaults to True.
|
||||
|
||||
.. versionadded:: 2016.11.0
|
||||
.. versionadded:: 2016.11.0
|
||||
|
||||
:return: Return a dict containing the new package names and versions::
|
||||
:rtype: dict
|
||||
Returns:
|
||||
dict: Return a dict containing the new package names and versions:
|
||||
|
||||
If the package is installed by ``pkg.install``:
|
||||
|
||||
@ -989,13 +1026,22 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
||||
# "sources" argument
|
||||
pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs, **kwargs)[0]
|
||||
|
||||
if len(pkg_params) > 1:
|
||||
if kwargs.get('extra_install_flags') is not None:
|
||||
log.warning('\'extra_install_flags\' argument will be ignored for '
|
||||
'multiple package targets')
|
||||
|
||||
# Windows expects an Options dictionary containing 'version'
|
||||
for pkg in pkg_params:
|
||||
pkg_params[pkg] = {'version': pkg_params[pkg]}
|
||||
|
||||
if pkg_params is None or len(pkg_params) == 0:
|
||||
log.error('No package definition found')
|
||||
return {}
|
||||
|
||||
if not pkgs and len(pkg_params) == 1:
|
||||
# Only use the 'version' param if 'name' was not specified as a
|
||||
# comma-separated list
|
||||
# Only use the 'version' param if a single item was passed to the 'name'
|
||||
# parameter
|
||||
pkg_params = {
|
||||
name: {
|
||||
'version': kwargs.get('version'),
|
||||
@ -1020,10 +1066,15 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
||||
ret[pkg_name] = 'Unable to locate package {0}'.format(pkg_name)
|
||||
continue
|
||||
|
||||
# Get the version number passed or the latest available
|
||||
# Get the version number passed or the latest available (must be a string)
|
||||
version_num = ''
|
||||
if options:
|
||||
version_num = options.get('version', False)
|
||||
version_num = options.get('version', '')
|
||||
# Using the salt cmdline with version=5.3 might be interpreted
|
||||
# as a float it must be converted to a string in order for
|
||||
# string matching to work.
|
||||
if not isinstance(version_num, six.string_types) and version_num is not None:
|
||||
version_num = str(version_num)
|
||||
|
||||
if not version_num:
|
||||
version_num = _get_latest_pkg_version(pkginfo)
|
||||
@ -1160,21 +1211,22 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
||||
#Compute msiexec string
|
||||
use_msiexec, msiexec = _get_msiexec(pkginfo[version_num].get('msiexec', False))
|
||||
|
||||
# Build cmd and arguments
|
||||
# cmd and arguments must be seperated for use with the task scheduler
|
||||
if use_msiexec:
|
||||
cmd = msiexec
|
||||
arguments = ['/i', cached_pkg]
|
||||
if pkginfo['version_num'].get('allusers', True):
|
||||
arguments.append('ALLUSERS="1"')
|
||||
arguments.extend(salt.utils.shlex_split(install_flags))
|
||||
else:
|
||||
cmd = cached_pkg
|
||||
arguments = salt.utils.shlex_split(install_flags)
|
||||
|
||||
# Install the software
|
||||
# Check Use Scheduler Option
|
||||
if pkginfo[version_num].get('use_scheduler', False):
|
||||
|
||||
# Build Scheduled Task Parameters
|
||||
if use_msiexec:
|
||||
cmd = msiexec
|
||||
arguments = ['/i', cached_pkg]
|
||||
if pkginfo['version_num'].get('allusers', True):
|
||||
arguments.append('ALLUSERS="1"')
|
||||
arguments.extend(salt.utils.shlex_split(install_flags))
|
||||
else:
|
||||
cmd = cached_pkg
|
||||
arguments = salt.utils.shlex_split(install_flags)
|
||||
|
||||
# Create Scheduled Task
|
||||
__salt__['task.create_task'](name='update-salt-software',
|
||||
user_name='System',
|
||||
@ -1188,21 +1240,43 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
||||
start_time='01:00',
|
||||
ac_only=False,
|
||||
stop_if_on_batteries=False)
|
||||
|
||||
# Run Scheduled Task
|
||||
if not __salt__['task.run_wait'](name='update-salt-software'):
|
||||
log.error('Failed to install {0}'.format(pkg_name))
|
||||
log.error('Scheduled Task failed to run')
|
||||
ret[pkg_name] = {'install status': 'failed'}
|
||||
else:
|
||||
# Build the install command
|
||||
cmd = []
|
||||
if use_msiexec:
|
||||
cmd.extend([msiexec, '/i', cached_pkg])
|
||||
if pkginfo[version_num].get('allusers', True):
|
||||
cmd.append('ALLUSERS="1"')
|
||||
# Special handling for installing salt
|
||||
if pkg_name in ['salt-minion', 'salt-minion-py3']:
|
||||
ret[pkg_name] = {'install status': 'task started'}
|
||||
if not __salt__['task.run'](name='update-salt-software'):
|
||||
log.error('Failed to install {0}'.format(pkg_name))
|
||||
log.error('Scheduled Task failed to run')
|
||||
ret[pkg_name] = {'install status': 'failed'}
|
||||
else:
|
||||
|
||||
# Make sure the task is running, try for 5 secs
|
||||
from time import time
|
||||
t_end = time() + 5
|
||||
while time() < t_end:
|
||||
task_running = __salt__['task.status'](
|
||||
'update-salt-software') == 'Running'
|
||||
if task_running:
|
||||
break
|
||||
|
||||
if not task_running:
|
||||
log.error(
|
||||
'Failed to install {0}'.format(pkg_name))
|
||||
log.error('Scheduled Task failed to run')
|
||||
ret[pkg_name] = {'install status': 'failed'}
|
||||
|
||||
# All other packages run with task scheduler
|
||||
else:
|
||||
cmd.append(cached_pkg)
|
||||
cmd.extend(salt.utils.shlex_split(install_flags))
|
||||
if not __salt__['task.run_wait'](name='update-salt-software'):
|
||||
log.error('Failed to install {0}'.format(pkg_name))
|
||||
log.error('Scheduled Task failed to run')
|
||||
ret[pkg_name] = {'install status': 'failed'}
|
||||
else:
|
||||
|
||||
# Combine cmd and arguments
|
||||
cmd = [cmd].extend(arguments)
|
||||
|
||||
# Launch the command
|
||||
result = __salt__['cmd.run_all'](cmd,
|
||||
cache_path,
|
||||
@ -1355,6 +1429,11 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
|
||||
continue
|
||||
|
||||
if version_num is not None:
|
||||
# Using the salt cmdline with version=5.3 might be interpreted
|
||||
# as a float it must be converted to a string in order for
|
||||
# string matching to work.
|
||||
if not isinstance(version_num, six.string_types) and version_num is not None:
|
||||
version_num = str(version_num)
|
||||
if version_num not in pkginfo and 'latest' in pkginfo:
|
||||
version_num = 'latest'
|
||||
elif 'latest' in pkginfo:
|
||||
|
@ -1,9 +1,18 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Microsoft certificate management via the Pki PowerShell module.
|
||||
Microsoft certificate management via the PKI Client PowerShell module.
|
||||
https://technet.microsoft.com/en-us/itpro/powershell/windows/pkiclient/pkiclient
|
||||
|
||||
The PKI Client PowerShell module is only available on Windows 8+ and Windows
|
||||
Server 2012+.
|
||||
https://technet.microsoft.com/en-us/library/hh848636(v=wps.620).aspx
|
||||
|
||||
:platform: Windows
|
||||
|
||||
:depends:
|
||||
- PowerShell 4
|
||||
- PKI Client Module (Windows 8+ / Windows Server 2012+)
|
||||
|
||||
.. versionadded:: 2016.11.0
|
||||
'''
|
||||
# Import python libs
|
||||
@ -29,11 +38,17 @@ __virtualname__ = 'win_pki'
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only works on Windows systems with the PKI PowerShell module installed.
|
||||
Requires Windows
|
||||
Requires Windows 8+ / Windows Server 2012+
|
||||
Requires PowerShell
|
||||
Requires PKI Client PowerShell module installed.
|
||||
'''
|
||||
if not salt.utils.is_windows():
|
||||
return False, 'Only available on Windows Systems'
|
||||
|
||||
if salt.utils.version_cmp(__grains__['osversion'], '6.2.9200') == -1:
|
||||
return False, 'Only available on Windows 8+ / Windows Server 2012 +'
|
||||
|
||||
if not __salt__['cmd.shell_info']('powershell')['installed']:
|
||||
return False, 'Powershell not available'
|
||||
|
||||
|
@ -1259,7 +1259,7 @@ def status(name, location='\\'):
|
||||
task_service = win32com.client.Dispatch("Schedule.Service")
|
||||
task_service.Connect()
|
||||
|
||||
# get the folder to delete the folder from
|
||||
# get the folder where the task is defined
|
||||
task_folder = task_service.GetFolder(location)
|
||||
task = task_folder.GetTask(name)
|
||||
|
||||
|
@ -945,12 +945,26 @@ def set_wu_settings(level=None,
|
||||
Change Windows Update settings. If no parameters are passed, the current
|
||||
value will be returned.
|
||||
|
||||
Supported:
|
||||
- Windows Vista / Server 2008
|
||||
- Windows 7 / Server 2008R2
|
||||
- Windows 8 / Server 2012
|
||||
- Windows 8.1 / Server 2012R2
|
||||
|
||||
.. note:
|
||||
Microsoft began using the Unified Update Platform (UUP) starting with
|
||||
Windows 10 / Server 2016. The Windows Update settings have changed and
|
||||
the ability to 'Save' Windows Update settings has been removed. Windows
|
||||
Update settings are read-only. See MSDN documentation:
|
||||
https://msdn.microsoft.com/en-us/library/aa385829(v=vs.85).aspx
|
||||
|
||||
:param int level:
|
||||
Number from 1 to 4 indicating the update level:
|
||||
1. Never check for updates
|
||||
2. Check for updates but let me choose whether to download and install them
|
||||
3. Download updates but let me choose whether to install them
|
||||
4. Install updates automatically
|
||||
|
||||
:param bool recommended:
|
||||
Boolean value that indicates whether to include optional or recommended
|
||||
updates when a search for updates and installation of updates is
|
||||
@ -993,8 +1007,28 @@ def set_wu_settings(level=None,
|
||||
salt '*' win_wua.set_wu_settings level=4 recommended=True featured=False
|
||||
|
||||
"""
|
||||
ret = {}
|
||||
ret['Success'] = True
|
||||
# The AutomaticUpdateSettings.Save() method used in this function does not
|
||||
# work on Windows 10 / Server 2016. It is called in throughout this function
|
||||
# like this:
|
||||
#
|
||||
# obj_au = win32com.client.Dispatch('Microsoft.Update.AutoUpdate')
|
||||
# obj_au_settings = obj_au.Settings
|
||||
# obj_au_settings.Save()
|
||||
#
|
||||
# The `Save()` method reports success but doesn't actually change anything.
|
||||
# Windows Update settings are read-only in Windows 10 / Server 2016. There's
|
||||
# a little blurb on MSDN that mentions this, but gives no alternative for
|
||||
# changing these settings in Windows 10 / Server 2016.
|
||||
#
|
||||
# https://msdn.microsoft.com/en-us/library/aa385829(v=vs.85).aspx
|
||||
#
|
||||
# Apparently the Windows Update framework in Windows Vista - Windows 8.1 has
|
||||
# been changed quite a bit in Windows 10 / Server 2016. It is now called the
|
||||
# Unified Update Platform (UUP). I haven't found an API or a Powershell
|
||||
# commandlet for working with the the UUP. Perhaps there will be something
|
||||
# forthcoming. The `win_lgpo` module might be an option for changing the
|
||||
# Windows Update settings using local group policy.
|
||||
ret = {'Success': True}
|
||||
|
||||
# Initialize the PyCom system
|
||||
pythoncom.CoInitialize()
|
||||
|
@ -330,10 +330,14 @@ def _parse_subject(subject):
|
||||
for nid_name, nid_num in six.iteritems(subject.nid):
|
||||
if nid_num in nids:
|
||||
continue
|
||||
val = getattr(subject, nid_name)
|
||||
if val:
|
||||
ret[nid_name] = val
|
||||
nids.append(nid_num)
|
||||
try:
|
||||
val = getattr(subject, nid_name)
|
||||
if val:
|
||||
ret[nid_name] = val
|
||||
nids.append(nid_num)
|
||||
except TypeError as e:
|
||||
if e.args and e.args[0] == 'No string argument provided':
|
||||
pass
|
||||
|
||||
return ret
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user