Merge branch '2016.11' into issue-35840-fix-preserve-minion-cache-2016.11

This commit is contained in:
Nicole Thomas 2017-08-29 11:28:15 -04:00 committed by GitHub
commit 93a68e32a5
170 changed files with 5548 additions and 2950 deletions

View File

@ -59,15 +59,14 @@
# Directory for custom modules. This directory can contain subdirectories for # Directory for custom modules. This directory can contain subdirectories for
# each of Salt's module types such as "runners", "output", "wheel", "modules", # each of Salt's module types such as "runners", "output", "wheel", "modules",
# "states", "returners", etc. # "states", "returners", "engines", "utils", etc.
#extension_modules: <no default> #extension_modules: /var/cache/salt/master/extmods
# Directory for custom modules. This directory can contain subdirectories for # Directory for custom modules. This directory can contain subdirectories for
# each of Salt's module types such as "runners", "output", "wheel", "modules", # each of Salt's module types such as "runners", "output", "wheel", "modules",
# "states", "returners", "engines", etc. # "states", "returners", "engines", "utils", etc.
# Like 'extension_modules' but can take an array of paths # Like 'extension_modules' but can take an array of paths
#module_dirs: <no default> #module_dirs: []
# - /var/cache/salt/minion/extmods
# Verify and set permissions on configuration directories at startup: # Verify and set permissions on configuration directories at startup:
#verify_env: True #verify_env: True
@ -302,6 +301,9 @@
# public keys from the minions. Note that this is insecure. # public keys from the minions. Note that this is insecure.
#auto_accept: False #auto_accept: False
# The size of key that should be generated when creating new keys.
#keysize: 2048
# Time in minutes that an incoming public key with a matching name found in # Time in minutes that an incoming public key with a matching name found in
# pki_dir/minion_autosign/keyid is automatically accepted. Expired autosign keys # pki_dir/minion_autosign/keyid is automatically accepted. Expired autosign keys
# are removed when the master checks the minion_autosign directory. # are removed when the master checks the minion_autosign directory.
@ -916,6 +918,21 @@
#pillar_cache_backend: disk #pillar_cache_backend: disk
###### Reactor Settings #####
###########################################
# Define a salt reactor. See https://docs.saltstack.com/en/latest/topics/reactor/
#reactor: []
#Set the TTL for the cache of the reactor configuration.
#reactor_refresh_interval: 60
#Configure the number of workers for the runner/wheel in the reactor.
#reactor_worker_threads: 10
#Define the queue size for workers in the reactor.
#reactor_worker_hwm: 10000
##### Syndic settings ##### ##### Syndic settings #####
########################################## ##########################################
# The Salt syndic is used to pass commands through a master from a higher # The Salt syndic is used to pass commands through a master from a higher

View File

@ -615,6 +615,9 @@
# you do so at your own risk! # you do so at your own risk!
#open_mode: False #open_mode: False
# The size of key that should be generated when creating new keys.
#keysize: 2048
# Enable permissive access to the salt keys. This allows you to run the # Enable permissive access to the salt keys. This allows you to run the
# master or minion as root, but have a non-root group be given access to # master or minion as root, but have a non-root group be given access to
# your pki_dir. To make the access explicit, root must belong to the group # your pki_dir. To make the access explicit, root must belong to the group
@ -656,6 +659,21 @@
# ssl_version: PROTOCOL_TLSv1_2 # ssl_version: PROTOCOL_TLSv1_2
###### Reactor Settings #####
###########################################
# Define a salt reactor. See https://docs.saltstack.com/en/latest/topics/reactor/
#reactor: []
#Set the TTL for the cache of the reactor configuration.
#reactor_refresh_interval: 60
#Configure the number of workers for the runner/wheel in the reactor.
#reactor_worker_threads: 10
#Define the queue size for workers in the reactor.
#reactor_worker_hwm: 10000
###### Thread settings ##### ###### Thread settings #####
########################################### ###########################################
# Disable multiprocessing support, by default when a minion receives a # Disable multiprocessing support, by default when a minion receives a

View File

@ -252,7 +252,7 @@
<div class="col-sm-6"> <div class="col-sm-6">
<a href="http://saltstack.com/support" target="_blank"><img class="nolightbox footer-banner center" src="{{ pathto('_static/images/footer-support.png', 1) }}"/></a> <a href="http://saltconf.com" target="_blank"><img class="nolightbox footer-banner center" src="{{ pathto('_static/images/DOCBANNER.jpg', 1) }}"/></a>
</div> </div>

Binary file not shown.

After

Width:  |  Height:  |  Size: 790 KiB

View File

@ -72,6 +72,7 @@ MOCK_MODULES = [
'Crypto.Signature', 'Crypto.Signature',
'Crypto.Signature.PKCS1_v1_5', 'Crypto.Signature.PKCS1_v1_5',
'M2Crypto', 'M2Crypto',
'msgpack',
'yaml', 'yaml',
'yaml.constructor', 'yaml.constructor',
'yaml.nodes', 'yaml.nodes',
@ -239,9 +240,9 @@ on_saltstack = 'SALT_ON_SALTSTACK' in os.environ
project = 'Salt' project = 'Salt'
version = salt.version.__version__ version = salt.version.__version__
latest_release = '2016.11.5' # latest release latest_release = '2017.7.1' # latest release
previous_release = '2016.3.6' # latest release from previous branch previous_release = '2016.11.7' # latest release from previous branch
previous_release_dir = '2016.3' # path on web server for previous branch previous_release_dir = '2016.11' # path on web server for previous branch
next_release = '' # next release next_release = '' # next release
next_release_dir = '' # path on web server for next release branch next_release_dir = '' # path on web server for next release branch
@ -252,8 +253,8 @@ if on_saltstack:
copyright = time.strftime("%Y") copyright = time.strftime("%Y")
# < --- START do not merge these settings to other branches START ---> # # < --- START do not merge these settings to other branches START ---> #
build_type = 'latest' # latest, previous, develop, next build_type = 'previous' # latest, previous, develop, next
release = latest_release # version, latest_release, previous_release release = previous_release # version, latest_release, previous_release
# < --- END do not merge these settings to other branches END ---> # # < --- END do not merge these settings to other branches END ---> #
# Set google custom search engine # Set google custom search engine

View File

@ -319,7 +319,27 @@ Restart using states
******************** ********************
Now we can apply the workaround to restart the Minion in reliable way. Now we can apply the workaround to restart the Minion in reliable way.
The following example works on both UNIX-like and Windows operating systems: The following example works on UNIX-like operating systems:
.. code-block:: jinja
{%- if grains['os'] != 'Windows' %
Restart Salt Minion:
cmd.run:
- name: 'salt-call --local service.restart salt-minion'
- bg: True
- onchanges:
- pkg: Upgrade Salt Minion
{%- endif %}
Note that restarting the ``salt-minion`` service on Windows operating systems is
not always necessary when performing an upgrade. The installer stops the
``salt-minion`` service, removes it, deletes the contents of the ``\salt\bin``
directory, installs the new code, re-creates the ``salt-minion`` service, and
starts it (by default). The restart step **would** be necessary during the
upgrade process, however, if the minion config was edited after the upgrade or
installation. If a minion restart is necessary, the state above can be edited
as follows:
.. code-block:: jinja .. code-block:: jinja
@ -335,8 +355,8 @@ The following example works on both UNIX-like and Windows operating systems:
- pkg: Upgrade Salt Minion - pkg: Upgrade Salt Minion
However, it requires more advanced tricks to upgrade from legacy version of However, it requires more advanced tricks to upgrade from legacy version of
Salt (before ``2016.3.0``), where executing commands in the background is not Salt (before ``2016.3.0``) on UNIX-like operating systems, where executing
supported: commands in the background is not supported:
.. code-block:: jinja .. code-block:: jinja

View File

@ -33,6 +33,10 @@ Output Options
Write the output to the specified file. Write the output to the specified file.
.. option:: --out-file-append, --output-file-append
Append the output to the specified file.
.. option:: --no-color .. option:: --no-color
Disable all colored output Disable all colored output
@ -46,3 +50,14 @@ Output Options
``green`` denotes success, ``red`` denotes failure, ``blue`` denotes ``green`` denotes success, ``red`` denotes failure, ``blue`` denotes
changes and success and ``yellow`` denotes a expected future change in configuration. changes and success and ``yellow`` denotes a expected future change in configuration.
.. option:: --state-output=STATE_OUTPUT, --state_output=STATE_OUTPUT
Override the configured state_output value for minion
output. One of 'full', 'terse', 'mixed', 'changes' or
'filter'. Default: 'none'.
.. option:: --state-verbose=STATE_VERBOSE, --state_verbose=STATE_VERBOSE
Override the configured state_verbose value for minion
output. Set to True or False. Default: none.

View File

@ -39,6 +39,13 @@ specified target expression.
desitination will be assumed to be a directory. Finally, recursion is now desitination will be assumed to be a directory. Finally, recursion is now
supported, allowing for entire directories to be copied. supported, allowing for entire directories to be copied.
.. versionchanged:: 2016.11.7,2017.7.2
Reverted back to the old copy mode to preserve backward compatibility. The
new functionality added in 2016.6.6 and 2017.7.0 is now available using the
``-C`` or ``--chunked`` CLI arguments. Note that compression, recursive
copying, and support for copying large files is only available in chunked
mode.
Options Options
======= =======
@ -56,9 +63,16 @@ Options
.. include:: _includes/target-selection.rst .. include:: _includes/target-selection.rst
.. option:: -C, --chunked
Use new chunked mode to copy files. This mode supports large files, recursive
directories copying and compression.
.. versionadded:: 2016.11.7,2017.7.2
.. option:: -n, --no-compression .. option:: -n, --no-compression
Disable gzip compression. Disable gzip compression in chunked mode.
.. versionadded:: 2016.3.7,2016.11.6,Nitrogen .. versionadded:: 2016.3.7,2016.11.6,Nitrogen

View File

@ -91,64 +91,6 @@ The user to run the Salt processes
user: root user: root
.. conf_master:: max_open_files
``max_open_files``
------------------
Default: ``100000``
Each minion connecting to the master uses AT LEAST one file descriptor, the
master subscription connection. If enough minions connect you might start
seeing on the console(and then salt-master crashes):
.. code-block:: bash
Too many open files (tcp_listener.cpp:335)
Aborted (core dumped)
.. code-block:: yaml
max_open_files: 100000
By default this value will be the one of `ulimit -Hn`, i.e., the hard limit for
max open files.
To set a different value than the default one, uncomment, and configure this
setting. Remember that this value CANNOT be higher than the hard limit. Raising
the hard limit depends on the OS and/or distribution, a good way to find the
limit is to search the internet for something like this:
.. code-block:: text
raise max open files hard limit debian
.. conf_master:: worker_threads
``worker_threads``
------------------
Default: ``5``
The number of threads to start for receiving commands and replies from minions.
If minions are stalling on replies because you have many minions, raise the
worker_threads value.
Worker threads should not be put below 3 when using the peer system, but can
drop down to 1 worker otherwise.
.. note::
When the master daemon starts, it is expected behaviour to see
multiple salt-master processes, even if 'worker_threads' is set to '1'. At
a minimum, a controlling process will start along with a Publisher, an
EventPublisher, and a number of MWorker processes will be started. The
number of MWorker processes is tuneable by the 'worker_threads'
configuration value while the others are not.
.. code-block:: yaml
worker_threads: 5
.. conf_master:: ret_port .. conf_master:: ret_port
``ret_port`` ``ret_port``
@ -238,8 +180,8 @@ The directory to store the pki authentication keys.
Directory for custom modules. This directory can contain subdirectories for Directory for custom modules. This directory can contain subdirectories for
each of Salt's module types such as ``runners``, ``output``, ``wheel``, each of Salt's module types such as ``runners``, ``output``, ``wheel``,
``modules``, ``states``, ``returners``, ``engines``, etc. This path is appended to ``modules``, ``states``, ``returners``, ``engines``, ``utils``, etc.
:conf_master:`root_dir`. This path is appended to :conf_master:`root_dir`.
.. code-block:: yaml .. code-block:: yaml
@ -850,6 +792,94 @@ what you are doing! Transports are explained in :ref:`Salt Transports
ret_port: 4606 ret_port: 4606
zeromq: [] zeromq: []
``sock_pool_size``
------------------
Default: 1
To avoid blocking waiting while writing a data to a socket, we support
socket pool for Salt applications. For example, a job with a large number
of target host list can cause long period blocking waiting. The option
is used by ZMQ and TCP transports, and the other transport methods don't
need the socket pool by definition. Most of Salt tools, including CLI,
are enough to use a single bucket of socket pool. On the other hands,
it is highly recommended to set the size of socket pool larger than 1
for other Salt applications, especially Salt API, which must write data
to socket concurrently.
.. code-block:: yaml
sock_pool_size: 15
.. conf_master:: ipc_mode
``ipc_mode``
------------
Default: ``ipc``
The ipc strategy. (i.e., sockets versus tcp, etc.) Windows platforms lack
POSIX IPC and must rely on TCP based inter-process communications. ``ipc_mode``
is set to ``tcp`` by default on Windows.
.. code-block:: yaml
ipc_mode: ipc
.. conf_master::
``tcp_master_pub_port``
-----------------------
Default: ``4512``
The TCP port on which events for the master should be published if ``ipc_mode`` is TCP.
.. code-block:: yaml
tcp_master_pub_port: 4512
.. conf_master:: tcp_master_pull_port
``tcp_master_pull_port``
------------------------
Default: ``4513``
The TCP port on which events for the master should be pulled if ``ipc_mode`` is TCP.
.. code-block:: yaml
tcp_master_pull_port: 4513
.. conf_master:: tcp_master_publish_pull
``tcp_master_publish_pull``
---------------------------
Default: ``4514``
The TCP port on which events for the master should be pulled fom and then republished onto
the event bus on the master.
.. code-block:: yaml
tcp_master_publish_pull: 4514
.. conf_master:: tcp_master_workers
``tcp_master_workers``
----------------------
Default: ``4515``
The TCP port for ``mworkers`` to connect to on the master.
.. code-block:: yaml
tcp_master_workers: 4515
Salt-SSH Configuration Salt-SSH Configuration
====================== ======================
@ -1083,6 +1113,19 @@ public keys from minions.
auto_accept: False auto_accept: False
.. conf_master:: keysize
``keysize``
-----------
Default: ``2048``
The size of key that should be generated when creating new keys.
.. code-block:: yaml
keysize: 2048
.. conf_master:: autosign_timeout .. conf_master:: autosign_timeout
``autosign_timeout`` ``autosign_timeout``
@ -1127,6 +1170,24 @@ minion IDs for which keys will automatically be rejected. Will override both
membership in the :conf_master:`autosign_file` and the membership in the :conf_master:`autosign_file` and the
:conf_master:`auto_accept` setting. :conf_master:`auto_accept` setting.
.. conf_master:: permissive_pki_access
``permissive_pki_access``
-------------------------
Default: ``False``
Enable permissive access to the salt keys. This allows you to run the
master or minion as root, but have a non-root group be given access to
your pki_dir. To make the access explicit, root must belong to the group
you've given access to. This is potentially quite insecure. If an autosign_file
is specified, enabling permissive_pki_access will allow group access to that
specific file.
.. code-block:: yaml
permissive_pki_access: False
.. conf_master:: publisher_acl .. conf_master:: publisher_acl
``publisher_acl`` ``publisher_acl``
@ -1171,6 +1232,20 @@ This is completely disabled by default.
- cmd.* - cmd.*
- test.echo - test.echo
.. conf_master:: sudo_acl
``sudo_acl``
------------
Default: ``False``
Enforce ``publisher_acl`` and ``publisher_acl_blacklist`` when users have sudo
access to the salt command.
.. code-block:: yaml
sudo_acl: False
.. conf_master:: external_auth .. conf_master:: external_auth
``external_auth`` ``external_auth``
@ -1327,6 +1402,18 @@ Do not disable this unless it is absolutely clear what this does.
rotate_aes_key: True rotate_aes_key: True
.. conf_master:: publish_session
``publish_session``
-------------------
Default: ``86400``
The number of seconds between AES key rotations on the master.
.. code-block:: yaml
publish_session: Default: 86400
.. conf_master:: ssl .. conf_master:: ssl
@ -1358,6 +1445,24 @@ constant names without ssl module prefix: ``CERT_REQUIRED`` or ``PROTOCOL_SSLv23
``allow_minion_key_revoke`` ``allow_minion_key_revoke``
--------------------------- ---------------------------
Default: ``False``
By default, the master deletes its cache of minion data when the key for that
minion is removed. To preserve the cache after key deletion, set
``preserve_minion_cache`` to True.
WARNING: This may have security implications if compromised minions auth with
a previous deleted minion ID.
.. code-block:: yaml
preserve_minion_cache: False
.. conf_master:: allow_minion_key_revoke
``allow_minion_key_revoke``
---------------------------
Default: ``True`` Default: ``True``
Controls whether a minion can request its own key revocation. When True Controls whether a minion can request its own key revocation. When True
@ -1369,6 +1474,128 @@ the master will drop the request and the minion's key will remain accepted.
rotate_aes_key: True rotate_aes_key: True
Master Large Scale Tuning Settings
==================================
.. conf_master:: max_open_files
``max_open_files``
------------------
Default: ``100000``
Each minion connecting to the master uses AT LEAST one file descriptor, the
master subscription connection. If enough minions connect you might start
seeing on the console(and then salt-master crashes):
.. code-block:: bash
Too many open files (tcp_listener.cpp:335)
Aborted (core dumped)
.. code-block:: yaml
max_open_files: 100000
By default this value will be the one of `ulimit -Hn`, i.e., the hard limit for
max open files.
To set a different value than the default one, uncomment, and configure this
setting. Remember that this value CANNOT be higher than the hard limit. Raising
the hard limit depends on the OS and/or distribution, a good way to find the
limit is to search the internet for something like this:
.. code-block:: text
raise max open files hard limit debian
.. conf_master:: worker_threads
``worker_threads``
------------------
Default: ``5``
The number of threads to start for receiving commands and replies from minions.
If minions are stalling on replies because you have many minions, raise the
worker_threads value.
Worker threads should not be put below 3 when using the peer system, but can
drop down to 1 worker otherwise.
.. note::
When the master daemon starts, it is expected behaviour to see
multiple salt-master processes, even if 'worker_threads' is set to '1'. At
a minimum, a controlling process will start along with a Publisher, an
EventPublisher, and a number of MWorker processes will be started. The
number of MWorker processes is tuneable by the 'worker_threads'
configuration value while the others are not.
.. code-block:: yaml
worker_threads: 5
.. conf_master:: pub_hwm
``pub_hwm``
-----------
Default: ``1000``
The zeromq high water mark on the publisher interface.
.. code-block:: yaml
pub_hwm: 1000
.. conf_master:: zmq_backlog
``zmq_backlog``
---------------
Default: ``1000``
The listen queue size of the ZeroMQ backlog.
.. code-block:: yaml
zmq_backlog: 1000
.. conf_master:: salt_event_pub_hwm
.. conf_master:: event_publisher_pub_hwm
``salt_event_pub_hwm`` and ``event_publisher_pub_hwm``
------------------------------------------------------
These two ZeroMQ High Water Mark settings, ``salt_event_pub_hwm`` and
``event_publisher_pub_hwm`` are significant for masters with thousands of
minions. When these are insufficiently high it will manifest in random
responses missing in the CLI and even missing from the job cache. Masters
that have fast CPUs and many cores with appropriate ``worker_threads``
will not need these set as high.
The ZeroMQ high-water-mark for the ``SaltEvent`` pub socket default is:
.. code-block:: yaml
salt_event_pub_hwm: 20000
The ZeroMQ high-water-mark for the ``EventPublisher`` pub socket default is:
.. code-block:: yaml
event_publisher_pub_hwm: 10000
As an example, on single master deployment with 8,000 minions, 2.4GHz CPUs,
24 cores, and 32GiB memory has these settings:
.. code-block:: yaml
salt_event_pub_hwm: 128000
event_publisher_pub_hwm: 64000
Master Module Management Master Module Management
======================== ========================
@ -2922,6 +3149,26 @@ configuration.
pillar_opts: False pillar_opts: False
.. conf_master:: pillar_safe_render_error
``pillar_safe_render_error``
----------------------------
Default: ``True``
The pillar_safe_render_error option prevents the master from passing pillar
render errors to the minion. This is set on by default because the error could
contain templating data which would give that minion information it shouldn't
have, like a password! When set ``True`` the error message will only show:
.. code-block:: shell
Rendering SLS 'my.sls' failed. Please see master log for details.
.. code-block:: yaml
pillar_safe_render_error: True
.. _master-configuration-ext-pillar: .. _master-configuration-ext-pillar:
.. conf_master:: ext_pillar .. conf_master:: ext_pillar
@ -3505,6 +3752,63 @@ can be utilized:
pillar_cache_backend: disk pillar_cache_backend: disk
Master Reactor Settings
=======================
.. conf_master:: reactor
``reactor``
-----------
Default: ``[]``
Defines a salt reactor. See the :ref:`Reactor <reactor>` documentation for more
information.
.. code-block:: yaml
reactor: []
.. conf_master:: reactor_refresh_interval
``reactor_refresh_interval``
----------------------------
Default: ``60``
The TTL for the cache of the reactor configuration.
.. code-block:: yaml
reactor_refresh_interval: 60
.. conf_master:: reactor_worker_threads
``reactor_worker_threads``
--------------------------
Default: ``10``
The number of workers for the runner/wheel in the reactor.
.. code-block:: yaml
reactor_worker_threads: 10
.. conf_master:: reactor_worker_hwm
``reactor_worker_hwm``
----------------------
Default: ``10000``
The queue size for workers in the reactor.
.. code-block:: yaml
reactor_worker_hwm: 10000
Syndic Server Settings Syndic Server Settings
====================== ======================
@ -3950,6 +4254,64 @@ option then the master will log a warning message.
- master.d/* - master.d/*
- /etc/roles/webserver - /etc/roles/webserver
Keepalive Settings
==================
.. conf_master:: tcp_keepalive
``tcp_keepalive``
-----------------
Default: ``True``
The tcp keepalive interval to set on TCP ports. This setting can be used to tune Salt
connectivity issues in messy network environments with misbehaving firewalls.
.. code-block:: yaml
tcp_keepalive: True
.. conf_master:: tcp_keepalive_cnt
``tcp_keepalive_cnt``
---------------------
Default: ``-1``
Sets the ZeroMQ TCP keepalive count. May be used to tune issues with minion disconnects.
.. code-block:: yaml
tcp_keepalive_cnt: -1
.. conf_master:: tcp_keepalive_idle
``tcp_keepalive_idle``
----------------------
Default: ``300``
Sets ZeroMQ TCP keepalive idle. May be used to tune issues with minion disconnects.
.. code-block:: yaml
tcp_keepalive_idle: 300
.. conf_master:: tcp_keepalive_intvl
``tcp_keepalive_intvl``
-----------------------
Default: ``-1``
Sets ZeroMQ TCP keepalive interval. May be used to tune issues with minion disconnects.
.. code-block:: yaml
tcp_keepalive_intvl': -1
.. _winrepo-master-config-opts: .. _winrepo-master-config-opts:
Windows Software Repo Settings Windows Software Repo Settings
@ -4088,7 +4450,7 @@ URL of the repository:
.. code-block:: yaml .. code-block:: yaml
winrepo_remotes: winrepo_remotes_ng:
- '<commit_id> https://github.com/saltstack/salt-winrepo-ng.git' - '<commit_id> https://github.com/saltstack/salt-winrepo-ng.git'
Replace ``<commit_id>`` with the SHA1 hash of a commit ID. Specifying a commit Replace ``<commit_id>`` with the SHA1 hash of a commit ID. Specifying a commit

View File

@ -750,6 +750,20 @@ seconds each iteration.
acceptance_wait_time_max: 0 acceptance_wait_time_max: 0
.. conf_minion:: rejected_retry
``rejected_retry``
------------------
Default: ``False``
If the master rejects the minion's public key, retry instead of exiting.
Rejected keys will be handled the same as waiting on acceptance.
.. code-block:: yaml
rejected_retry: False
.. conf_minion:: random_reauth_delay .. conf_minion:: random_reauth_delay
``random_reauth_delay`` ``random_reauth_delay``
@ -1180,7 +1194,7 @@ If certain returners should be disabled, this is the place
.. conf_minion:: enable_whitelist_modules .. conf_minion:: enable_whitelist_modules
``whitelist_modules`` ``whitelist_modules``
---------------------------- ---------------------
Default: ``[]`` (Module whitelisting is disabled. Adding anything to the config option Default: ``[]`` (Module whitelisting is disabled. Adding anything to the config option
will cause only the listed modules to be enabled. Modules not in the list will will cause only the listed modules to be enabled. Modules not in the list will
@ -1272,6 +1286,20 @@ A list of extra directories to search for Salt renderers
render_dirs: render_dirs:
- /var/lib/salt/renderers - /var/lib/salt/renderers
.. conf_minion:: utils_dirs
``utils_dirs``
--------------
Default: ``[]``
A list of extra directories to search for Salt utilities
.. code-block:: yaml
utils_dirs:
- /var/lib/salt/utils
.. conf_minion:: cython_enable .. conf_minion:: cython_enable
``cython_enable`` ``cython_enable``
@ -1320,6 +1348,20 @@ below.
providers: providers:
service: systemd service: systemd
.. conf_minion:: modules_max_memory
``modules_max_memory``
----------------------
Default: ``-1``
Specify a max size (in bytes) for modules on import. This feature is currently
only supported on *nix operating systems and requires psutil.
.. code-block:: yaml
modules_max_memory: -1
Top File Settings Top File Settings
================= =================
@ -1451,6 +1493,52 @@ environment lacks one.
default_top: dev default_top: dev
.. conf_minion:: startup_states
``startup_states``
------------------
Default: ``''``
States to run when the minion daemon starts. To enable, set ``startup_states`` to:
- ``highstate``: Execute state.highstate
- ``sls``: Read in the sls_list option and execute the named sls files
- ``top``: Read top_file option and execute based on that file on the Master
.. code-block:: yaml
startup_states: ''
.. conf_minion:: sls_list
``sls_list``
------------
Default: ``[]``
List of states to run when the minion starts up if ``startup_states`` is set to ``sls``.
.. code-block:: yaml
sls_list:
- edit.vim
- hyper
.. conf_minion:: top_file
``top_file``
------------
Default: ``''``
Top file to execute if ``startup_states`` is set to ``top``.
.. code-block:: yaml
top_file: ''
State Management Settings State Management Settings
========================= =========================
@ -1467,7 +1555,7 @@ The default renderer used for local state executions
renderer: yaml_jinja renderer: yaml_jinja
.. conf_master:: test .. conf_minion:: test
``test`` ``test``
-------- --------
@ -1888,6 +1976,35 @@ before the initial key exchange. The master fingerprint can be found by running
master_finger: 'ba:30:65:2a:d6:9e:20:4f:d8:b2:f3:a7:d4:65:11:13' master_finger: 'ba:30:65:2a:d6:9e:20:4f:d8:b2:f3:a7:d4:65:11:13'
.. conf_minion:: keysize
``keysize``
-----------
Default: ``2048``
The size of key that should be generated when creating new keys.
.. code-block:: yaml
keysize: 2048
.. conf_minion:: permissive_pki_access
``permissive_pki_access``
-------------------------
Default: ``False``
Enable permissive access to the salt keys. This allows you to run the
master or minion as root, but have a non-root group be given access to
your pki_dir. To make the access explicit, root must belong to the group
you've given access to. This is potentially quite insecure.
.. code-block:: yaml
permissive_pki_access: False
.. conf_minion:: verify_master_pubkey_sign .. conf_minion:: verify_master_pubkey_sign
``verify_master_pubkey_sign`` ``verify_master_pubkey_sign``
@ -1995,7 +2112,7 @@ blocked. If `cmd_whitelist_glob` is NOT SET, then all shell commands are permitt
- 'cat /etc/fstab' - 'cat /etc/fstab'
.. conf_master:: ssl .. conf_minion:: ssl
``ssl`` ``ssl``
------- -------
@ -2021,6 +2138,62 @@ constant names without ssl module prefix: ``CERT_REQUIRED`` or ``PROTOCOL_SSLv23
ssl_version: PROTOCOL_TLSv1_2 ssl_version: PROTOCOL_TLSv1_2
Reactor Settings
================
.. conf_minion:: reactor
``reactor``
-----------
Default: ``[]``
Defines a salt reactor. See the :ref:`Reactor <reactor>` documentation for more
information.
.. code-block:: yaml
reactor: []
.. conf_minion:: reactor_refresh_interval
``reactor_refresh_interval``
----------------------------
Default: ``60``
The TTL for the cache of the reactor configuration.
.. code-block:: yaml
reactor_refresh_interval: 60
.. conf_minion:: reactor_worker_threads
``reactor_worker_threads``
--------------------------
Default: ``10``
The number of workers for the runner/wheel in the reactor.
.. code-block:: yaml
reactor_worker_threads: 10
.. conf_minion:: reactor_worker_hwm
``reactor_worker_hwm``
----------------------
Default: ``10000``
The queue size for workers in the reactor.
.. code-block:: yaml
reactor_worker_hwm: 10000
Thread Settings Thread Settings
=============== ===============
@ -2291,6 +2464,62 @@ option then the minion will log a warning message.
- /etc/roles/webserver - /etc/roles/webserver
Keepalive Settings
==================
.. conf_minion:: tcp_keepalive
``tcp_keepalive``
-----------------
Default: ``True``
The tcp keepalive interval to set on TCP ports. This setting can be used to tune Salt
connectivity issues in messy network environments with misbehaving firewalls.
.. code-block:: yaml
tcp_keepalive: True
.. conf_minion:: tcp_keepalive_cnt
``tcp_keepalive_cnt``
---------------------
Default: ``-1``
Sets the ZeroMQ TCP keepalive count. May be used to tune issues with minion disconnects.
.. code-block:: yaml
tcp_keepalive_cnt: -1
.. conf_minion:: tcp_keepalive_idle
``tcp_keepalive_idle``
----------------------
Default: ``300``
Sets ZeroMQ TCP keepalive idle. May be used to tune issues with minion disconnects.
.. code-block:: yaml
tcp_keepalive_idle: 300
.. conf_minion:: tcp_keepalive_intvl
``tcp_keepalive_intvl``
-----------------------
Default: ``-1``
Sets ZeroMQ TCP keepalive interval. May be used to tune issues with minion disconnects.
.. code-block:: yaml
tcp_keepalive_intvl': -1
Frozen Build Update Settings Frozen Build Update Settings
============================ ============================
@ -2392,6 +2621,36 @@ out.
winrepo_dir: 'D:\winrepo' winrepo_dir: 'D:\winrepo'
.. conf_minion:: winrepo_dir_ng
``winrepo_dir_ng``
------------------
.. versionadded:: 2015.8.0
A new :ref:`ng <windows-package-manager>` repo was added.
Default: ``/srv/salt/win/repo-ng``
Location on the minion where the :conf_minion:`winrepo_remotes_ng` are checked
out for 2015.8.0 and later minions.
.. code-block:: yaml
winrepo_dir_ng: /srv/salt/win/repo-ng
.. conf_minion:: winrepo_source_dir
``winrepo_source_dir``
----------------------
Default: ``salt://win/repo-ng/``
The source location for the winrepo sls files.
.. code-block:: yaml
winrepo_source_dir: salt://win/repo-ng/
.. conf_minion:: winrepo_cachefile .. conf_minion:: winrepo_cachefile
.. conf_minion:: win_repo_cachefile .. conf_minion:: win_repo_cachefile
@ -2444,3 +2703,33 @@ URL of the the repository:
Replace ``<commit_id>`` with the SHA1 hash of a commit ID. Specifying a commit Replace ``<commit_id>`` with the SHA1 hash of a commit ID. Specifying a commit
ID is useful in that it allows one to revert back to a previous version in the ID is useful in that it allows one to revert back to a previous version in the
event that an error is introduced in the latest revision of the repo. event that an error is introduced in the latest revision of the repo.
.. conf_minion:: winrepo_remotes_ng
``winrepo_remotes_ng``
----------------------
.. versionadded:: 2015.8.0
A new :ref:`ng <windows-package-manager>` repo was added.
Default: ``['https://github.com/saltstack/salt-winrepo-ng.git']``
List of git repositories to checkout and include in the winrepo for
2015.8.0 and later minions.
.. code-block:: yaml
winrepo_remotes_ng:
- https://github.com/saltstack/salt-winrepo-ng.git
To specify a specific revision of the repository, prepend a commit ID to the
URL of the repository:
.. code-block:: yaml
winrepo_remotes_ng:
- '<commit_id> https://github.com/saltstack/salt-winrepo-ng.git'
Replace ``<commit_id>`` with the SHA1 hash of a commit ID. Specifying a commit
ID is useful in that it allows one to revert back to a previous version in the
event that an error is introduced in the latest revision of the repo.

View File

@ -405,6 +405,29 @@ similar to the following:
return __virtualname__ return __virtualname__
return False return False
The ``__virtual__()`` function can return a ``True`` or ``False`` boolean, a tuple,
or a string. If it returns a ``True`` value, this ``__virtualname__`` module-level
attribute can be set as seen in the above example. This is the string that the module
should be referred to as.
When ``__virtual__()`` returns a tuple, the first item should be a boolean and the
second should be a string. This is typically done when the module should not load. The
first value of the tuple is ``False`` and the second is the error message to display
for why the module did not load.
For example:
.. code-block:: python
def __virtual__():
'''
Only load if git exists on the system
'''
if salt.utils.which('git') is None:
return (False,
'The git execution module cannot be loaded: git unavailable.')
else:
return True
Documentation Documentation
============= =============

View File

@ -21,7 +21,7 @@ Or you may specify a map which includes all VMs to perform the action on:
$ salt-cloud -a reboot -m /path/to/mapfile $ salt-cloud -a reboot -m /path/to/mapfile
The following is a list of actions currently supported by salt-cloud: The following is an example list of actions currently supported by ``salt-cloud``:
.. code-block:: yaml .. code-block:: yaml
@ -36,5 +36,5 @@ The following is a list of actions currently supported by salt-cloud:
- start - start
- stop - stop
Another useful reference for viewing more salt-cloud actions is the Another useful reference for viewing more ``salt-cloud`` actions is the
:ref:Salt Cloud Feature Matrix <salt-cloud-feature-matrix> :ref:`Salt Cloud Feature Matrix <salt-cloud-feature-matrix>`.

View File

@ -78,6 +78,7 @@ parameters are discussed in more detail below.
# RHEL -> ec2-user # RHEL -> ec2-user
# CentOS -> ec2-user # CentOS -> ec2-user
# Ubuntu -> ubuntu # Ubuntu -> ubuntu
# Debian -> admin
# #
ssh_username: ec2-user ssh_username: ec2-user

View File

@ -371,7 +371,6 @@ both.
compute_name: cloudServersOpenStack compute_name: cloudServersOpenStack
protocol: ipv4 protocol: ipv4
compute_region: DFW compute_region: DFW
protocol: ipv4
user: myuser user: myuser
tenant: 5555555 tenant: 5555555
password: mypass password: mypass

View File

@ -26,5 +26,5 @@ gathering information about instances on a provider basis:
$ salt-cloud -f list_nodes_full linode $ salt-cloud -f list_nodes_full linode
$ salt-cloud -f list_nodes_select linode $ salt-cloud -f list_nodes_select linode
Another useful reference for viewing salt-cloud functions is the Another useful reference for viewing ``salt-cloud`` functions is the
:ref:`Salt Cloud Feature Matrix <salt-cloud-feature-matrix>`. :ref:`Salt Cloud Feature Matrix <salt-cloud-feature-matrix>`.

View File

@ -64,7 +64,9 @@ automatically installed salt-cloud for you. Use your distribution's package
manager to install the ``salt-cloud`` package from the same repo that you manager to install the ``salt-cloud`` package from the same repo that you
used to install Salt. These repos will automatically be setup by Salt Bootstrap. used to install Salt. These repos will automatically be setup by Salt Bootstrap.
If there is no salt-cloud package, install with ``pip install salt-cloud``. Alternatively, the ``-L`` option can be passed to the `Salt Bootstrap`_ script when
installing Salt. The ``-L`` option will install ``salt-cloud`` and the required
``libcloud`` package.
.. _`Salt Bootstrap`: https://github.com/saltstack/salt-bootstrap .. _`Salt Bootstrap`: https://github.com/saltstack/salt-bootstrap

View File

@ -49,7 +49,7 @@ Set up an initial profile at ``/etc/salt/cloud.profiles`` or in the
.. code-block:: yaml .. code-block:: yaml
joyent_512 joyent_512:
provider: my-joyent-config provider: my-joyent-config
size: g4-highcpu-512M size: g4-highcpu-512M
image: ubuntu-16.04 image: ubuntu-16.04

View File

@ -12,7 +12,9 @@ automatically installed salt-cloud for you. Use your distribution's package
manager to install the ``salt-cloud`` package from the same repo that you manager to install the ``salt-cloud`` package from the same repo that you
used to install Salt. These repos will automatically be setup by Salt Bootstrap. used to install Salt. These repos will automatically be setup by Salt Bootstrap.
If there is no salt-cloud package, install with ``pip install salt-cloud``. Alternatively, the ``-L`` option can be passed to the `Salt Bootstrap`_ script when
installing Salt. The ``-L`` option will install ``salt-cloud`` and the required
``libcloud`` package.
.. _`Salt Bootstrap`: https://github.com/saltstack/salt-bootstrap .. _`Salt Bootstrap`: https://github.com/saltstack/salt-bootstrap

View File

@ -260,6 +260,13 @@ The Salt development team will back-port bug fixes made to ``develop`` to the
current release branch if the contributor cannot create the pull request current release branch if the contributor cannot create the pull request
against that branch. against that branch.
Release Branches
----------------
For each release a branch will be created when we are ready to tag. The branch will be the same name as the tag minus the v. For example, the v2017.7.1 release was created from the 2017.7.1 branch. This branching strategy will allow for more stability when there is a need for a re-tag during the testing phase of our releases.
Once the branch is created, the fixes required for a given release, as determined by the SaltStack release team, will be added to this branch. All commits in this branch will be merged forward into the parent branch as well.
Keeping Salt Forks in Sync Keeping Salt Forks in Sync
========================== ==========================

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,15 @@
============================
Salt 2016.11.7 Release Notes
============================
Version 2016.11.7 is a bugfix release for :ref:`2016.11.0 <release-2016-11-0>`.
Changes for v2016.11.6..v2016.11.7
----------------------------------
Security Fix
============
CVE-2017-12791 Maliciously crafted minion IDs can cause unwanted directory traversals on the Salt-master
Correct a flaw in minion id validation which could allow certain minions to authenticate to a master despite not having the correct credentials. To exploit the vulnerability, an attacker must create a salt-minion with an ID containing characters that will cause a directory traversal. Credit for discovering the security flaw goes to: Vernhk@qq.com

View File

@ -4,23 +4,12 @@ Salt 2016.3.7 Release Notes
Version 2016.3.7 is a bugfix release for :ref:`2016.3.0 <release-2016-3-0>`. Version 2016.3.7 is a bugfix release for :ref:`2016.3.0 <release-2016-3-0>`.
New master configuration option `allow_minion_key_revoke`, defaults to True. This option Changes for v2016.3.6..v2016.3.7
controls whether a minion can request that the master revoke its key. When True, a minion --------------------------------
can request a key revocation and the master will comply. If it is False, the key will not
be revoked by the msater.
New master configuration option `require_minion_sign_messages` Security Fix
This requires that minions cryptographically sign the messages they ============
publish to the master. If minions are not signing, then log this information
at loglevel 'INFO' and drop the message without acting on it.
New master configuration option `drop_messages_signature_fail` CVE-2017-12791 Maliciously crafted minion IDs can cause unwanted directory traversals on the Salt-master
Drop messages from minions when their signatures do not validate.
Note that when this option is False but `require_minion_sign_messages` is True
minions MUST sign their messages but the validity of their signatures
is ignored.
New minion configuration option `minion_sign_messages` Correct a flaw in minion id validation which could allow certain minions to authenticate to a master despite not having the correct credentials. To exploit the vulnerability, an attacker must create a salt-minion with an ID containing characters that will cause a directory traversal. Credit for discovering the security flaw goes to: Vernhk@qq.com
Causes the minion to cryptographically sign the payload of messages it places
on the event bus for the master. The payloads are signed with the minion's
private key so the master can verify the signature with its public key.

View File

@ -0,0 +1,29 @@
===========================
Salt 2016.3.8 Release Notes
===========================
Version 2016.3.8 is a bugfix release for :ref:`2016.3.0 <release-2016-3-0>`.
Changes for v2016.3.7..v2016.3.8
--------------------------------
New master configuration option `allow_minion_key_revoke`, defaults to True. This option
controls whether a minion can request that the master revoke its key. When True, a minion
can request a key revocation and the master will comply. If it is False, the key will not
be revoked by the msater.
New master configuration option `require_minion_sign_messages`
This requires that minions cryptographically sign the messages they
publish to the master. If minions are not signing, then log this information
at loglevel 'INFO' and drop the message without acting on it.
New master configuration option `drop_messages_signature_fail`
Drop messages from minions when their signatures do not validate.
Note that when this option is False but `require_minion_sign_messages` is True
minions MUST sign their messages but the validity of their signatures
is ignored.
New minion configuration option `minion_sign_messages`
Causes the minion to cryptographically sign the payload of messages it places
on the event bus for the master. The payloads are signed with the minion's
private key so the master can verify the signature with its public key.

View File

@ -8,7 +8,7 @@ Installing/Testing a Salt Release Candidate
It's time for a new feature release of Salt! Follow the instructions below to It's time for a new feature release of Salt! Follow the instructions below to
install the latest release candidate of Salt, and try :ref:`all the shiny new install the latest release candidate of Salt, and try :ref:`all the shiny new
features <release-2016-11-0>`! Be sure to report any bugs you find on `Github features <release-2017-7-0>`! Be sure to report any bugs you find on `Github
<https://github.com/saltstack/salt/issues/new/>`_. <https://github.com/saltstack/salt/issues/new/>`_.
Installing Using Packages Installing Using Packages
@ -32,32 +32,12 @@ Builds for a few platforms are available as part of the RC at https://repo.salts
Available builds: Available builds:
- Amazon Linux - Ubuntu16
- Debian 8 - Redhat7
- macOS
- RHEL 7
- SmartOS (see below)
- Ubuntu 16.04
- Windows - Windows
.. FreeBSD .. FreeBSD
SmartOS
-------
Release candidate builds for SmartOS are available at http://pkg.blackdot.be/extras/salt-2016.11rc/.
On a base64 2015Q4-x86_64 based native zone the package can be installed by the following:
.. code-block:: bash
pfexec pkg_add -U https://pkg.blackdot.be/extras/salt-2016.11rc/salt-2016.11.0rc2_2015Q4_x86_64.tgz
When using the 2016Q2-tools release on the global zone by the following:
.. code-block:: bash
pfexec pkg_add -U https://pkg.blackdot.be/extras/salt-2016.11rc/salt-2016.11.0rc2_2016Q2_TOOLS.tgz
Installing Using Bootstrap Installing Using Bootstrap
========================== ==========================
@ -67,14 +47,14 @@ You can install a release candidate of Salt using `Salt Bootstrap
.. code-block:: bash .. code-block:: bash
curl -o install_salt.sh -L https://bootstrap.saltstack.com curl -o install_salt.sh -L https://bootstrap.saltstack.com
sudo sh install_salt.sh -P git v2016.11.0rc2 sudo sh install_salt.sh -P git v2017.7.0rc1
If you want to also install a master using Salt Bootstrap, use the ``-M`` flag: If you want to also install a master using Salt Bootstrap, use the ``-M`` flag:
.. code-block:: bash .. code-block:: bash
curl -o install_salt.sh -L https://bootstrap.saltstack.com curl -o install_salt.sh -L https://bootstrap.saltstack.com
sudo sh install_salt.sh -P -M git v2016.11.0rc2 sudo sh install_salt.sh -P -M git v2017.7.0rc1
If you want to install only a master and not a minion using Salt Bootstrap, use If you want to install only a master and not a minion using Salt Bootstrap, use
the ``-M`` and ``-N`` flags: the ``-M`` and ``-N`` flags:
@ -82,13 +62,13 @@ the ``-M`` and ``-N`` flags:
.. code-block:: bash .. code-block:: bash
curl -o install_salt.sh -L https://bootstrap.saltstack.com curl -o install_salt.sh -L https://bootstrap.saltstack.com
sudo sh install_salt.sh -P -M -N git v2016.11.0rc2 sudo sh install_salt.sh -P -M -N git v2017.7.0rc1
Installing Using PyPI Installing Using PyPI
===================== =====================
Installing from the `source archive Installing from the `source archive
<https://pypi.python.org/packages/7a/87/3b29ac215208bed9559d6c4df24175ddd1d52e62c5c00ae3afb3b7d9144d/salt-2016.11.0rc2.tar.gz>`_ on <https://pypi.python.org/packages/5c/cf/13c14f8bcd7b5076b9a8c3580f9582c1c4ea8b0458793ac6744ea66c0baf/salt-2017.7.0rc1.tar.gz>`_ on
`PyPI <https://pypi.python.org/pypi>`_ is fairly straightforward. `PyPI <https://pypi.python.org/pypi>`_ is fairly straightforward.
.. note:: .. note::
@ -126,4 +106,4 @@ Then install salt using the following command:
.. code-block:: bash .. code-block:: bash
sudo pip install salt==2016.11.0rc2 sudo pip install salt==2017.7.0rc1

View File

@ -64,7 +64,8 @@ Deploy ssh key for salt-ssh
=========================== ===========================
By default, salt-ssh will generate key pairs for ssh, the default path will be By default, salt-ssh will generate key pairs for ssh, the default path will be
/etc/salt/pki/master/ssh/salt-ssh.rsa ``/etc/salt/pki/master/ssh/salt-ssh.rsa``. The key generation happens when you run
``salt-ssh`` for the first time.
You can use ssh-copy-id, (the OpenSSH key deployment tool) to deploy keys to your servers. You can use ssh-copy-id, (the OpenSSH key deployment tool) to deploy keys to your servers.

View File

@ -28,6 +28,7 @@ hit `Enter`. Also, you can convert tabs to 2 spaces by these commands in Vim:
Indentation Indentation
=========== ===========
The suggested syntax for YAML files is to use 2 spaces for indentation, The suggested syntax for YAML files is to use 2 spaces for indentation,
but YAML will follow whatever indentation system that the individual file but YAML will follow whatever indentation system that the individual file
uses. Indentation of two spaces works very well for SLS files given the uses. Indentation of two spaces works very well for SLS files given the
@ -113,7 +114,23 @@ versions will also be loaded as booleans (``true``, ``false``, ``yes``, ``no``,
``on``, and ``off``). This can be especially problematic when constructing ``on``, and ``off``). This can be especially problematic when constructing
Pillar data. Make sure that your Pillars which need to use the string versions Pillar data. Make sure that your Pillars which need to use the string versions
of these values are enclosed in quotes. Pillars will be parsed twice by salt, of these values are enclosed in quotes. Pillars will be parsed twice by salt,
so you'll need to wrap your values in multiple quotes, for example '"false"'. so you'll need to wrap your values in multiple quotes, including double quotation
marks (``" "``) and single quotation marks (``' '``). Note that spaces are included
in the quotation type examples for clarity.
Multiple quoting examples looks like this:
.. code-block:: yaml
- '"false"'
- "'True'"
- "'YES'"
- '"No"'
.. note::
When using multiple quotes in this manner, they must be different. Using ``"" ""``
or ``'' ''`` won't work in this case (spaces are included in examples for clarity).
The '%' Sign The '%' Sign
============ ============

View File

@ -166,13 +166,15 @@ Ubuntu 14.04 LTS and Debian Wheezy (7.x) also have a compatible version packaged
# apt-get install python-git # apt-get install python-git
If your master is running an older version (such as Ubuntu 12.04 LTS or Debian GitPython_ requires the ``git`` CLI utility to work. If installed from a system
Squeeze), then you will need to install GitPython using either pip_ or package, then git should already be installed, but if installed via pip_ then
easy_install (it is recommended to use pip). Version 0.3.2.RC1 is now marked as it may still be necessary to install git separately. For MacOS users,
the stable release in PyPI, so it should be a simple matter of running ``pip GitPython_ comes bundled in with the Salt installer, but git must still be
install GitPython`` (or ``easy_install GitPython``) as root. installed for it to work properly. Git can be installed in several ways,
including by installing XCode_.
.. _`pip`: http://www.pip-installer.org/ .. _pip: http://www.pip-installer.org/
.. _XCode: https://developer.apple.com/xcode/
.. warning:: .. warning::

View File

@ -110,7 +110,7 @@ To pass through a file that contains jinja + yaml templating (the default):
method='POST', method='POST',
data_file='/srv/salt/somefile.jinja', data_file='/srv/salt/somefile.jinja',
data_render=True, data_render=True,
template_data={'key1': 'value1', 'key2': 'value2'} template_dict={'key1': 'value1', 'key2': 'value2'}
) )
To pass through a file that contains mako templating: To pass through a file that contains mako templating:
@ -123,7 +123,7 @@ To pass through a file that contains mako templating:
data_file='/srv/salt/somefile.mako', data_file='/srv/salt/somefile.mako',
data_render=True, data_render=True,
data_renderer='mako', data_renderer='mako',
template_data={'key1': 'value1', 'key2': 'value2'} template_dict={'key1': 'value1', 'key2': 'value2'}
) )
Because this function uses Salt's own rendering system, any Salt renderer can Because this function uses Salt's own rendering system, any Salt renderer can
@ -140,7 +140,7 @@ However, this can be changed to ``master`` if necessary.
method='POST', method='POST',
data_file='/srv/salt/somefile.jinja', data_file='/srv/salt/somefile.jinja',
data_render=True, data_render=True,
template_data={'key1': 'value1', 'key2': 'value2'}, template_dict={'key1': 'value1', 'key2': 'value2'},
opts=__opts__ opts=__opts__
) )
@ -149,7 +149,7 @@ However, this can be changed to ``master`` if necessary.
method='POST', method='POST',
data_file='/srv/salt/somefile.jinja', data_file='/srv/salt/somefile.jinja',
data_render=True, data_render=True,
template_data={'key1': 'value1', 'key2': 'value2'}, template_dict={'key1': 'value1', 'key2': 'value2'},
node='master' node='master'
) )
@ -170,11 +170,11 @@ a Python dict.
header_file='/srv/salt/headers.jinja', header_file='/srv/salt/headers.jinja',
header_render=True, header_render=True,
header_renderer='jinja', header_renderer='jinja',
template_data={'key1': 'value1', 'key2': 'value2'} template_dict={'key1': 'value1', 'key2': 'value2'}
) )
Because much of the data that would be templated between headers and data may be Because much of the data that would be templated between headers and data may be
the same, the ``template_data`` is the same for both. Correcting possible the same, the ``template_dict`` is the same for both. Correcting possible
variable name collisions is up to the user. variable name collisions is up to the user.
Authentication Authentication

View File

@ -75,7 +75,7 @@ The default location for the pillar is in /srv/pillar.
.. note:: .. note::
The pillar location can be configured via the `pillar_roots` option inside The pillar location can be configured via the ``pillar_roots`` option inside
the master configuration file. It must not be in a subdirectory of the state the master configuration file. It must not be in a subdirectory of the state
tree or file_roots. If the pillar is under file_roots, any pillar targeting tree or file_roots. If the pillar is under file_roots, any pillar targeting
can be bypassed by minions. can be bypassed by minions.
@ -242,7 +242,7 @@ set in the minion's pillar, then the default of ``httpd`` will be used.
.. note:: .. note::
Under the hood, pillar is just a Python dict, so Python dict methods such Under the hood, pillar is just a Python dict, so Python dict methods such
as `get` and `items` can be used. as ``get`` and ``items`` can be used.
Pillar Makes Simple States Grow Easily Pillar Makes Simple States Grow Easily
====================================== ======================================
@ -303,6 +303,18 @@ Where the vimrc source location can now be changed via pillar:
Ensuring that the right vimrc is sent out to the correct minions. Ensuring that the right vimrc is sent out to the correct minions.
The pillar top file must include a reference to the new sls pillar file:
``/srv/pillar/top.sls``:
.. code-block:: yaml
base:
'*':
- pkg
- edit.vim
Setting Pillar Data on the Command Line Setting Pillar Data on the Command Line
======================================= =======================================

View File

@ -54,7 +54,7 @@ types like so:
salt '*' mymodule.observe_the_awesomeness salt '*' mymodule.observe_the_awesomeness
''' '''
print __utils__['foo.bar']() return __utils__['foo.bar']()
Utility modules, like any other kind of Salt extension, support using a Utility modules, like any other kind of Salt extension, support using a
:ref:`__virtual__ function <modules-virtual-name>` to conditionally load them, :ref:`__virtual__ function <modules-virtual-name>` to conditionally load them,
@ -81,11 +81,56 @@ the ``foo`` utility module with a ``__virtual__`` function.
def bar(): def bar():
return 'baz' return 'baz'
Also you could even write your utility modules in object oriented fashion:
.. code-block:: python
# -*- coding: utf-8 -*-
'''
My OOP-style utils module
-------------------------
This module contains common functions for use in my other custom types.
'''
class Foo(object):
def __init__(self):
pass
def bar(self):
return 'baz'
And import them into other custom modules:
.. code-block:: python
# -*- coding: utf-8 -*-
'''
My awesome execution module
---------------------------
'''
import mymodule
def observe_the_awesomeness():
'''
Prints information from my utility module
CLI Example:
.. code-block:: bash
salt '*' mymodule.observe_the_awesomeness
'''
foo = mymodule.Foo()
return foo.bar()
These are, of course, contrived examples, but they should serve to show some of These are, of course, contrived examples, but they should serve to show some of
the possibilities opened up by writing utility modules. Keep in mind though the possibilities opened up by writing utility modules. Keep in mind though
that States still have access to all of the execution modules, so it is not that states still have access to all of the execution modules, so it is not
necessary to write a utility module to make a function available to both a necessary to write a utility module to make a function available to both a
state and an execution module. One good use case for utililty modules is one state and an execution module. One good use case for utility modules is one
where it is necessary to invoke the same function from a custom :ref:`outputter where it is necessary to invoke the same function from a custom :ref:`outputter
<all-salt.output>`/returner, as well as an execution module. <all-salt.output>`/returner, as well as an execution module.

View File

@ -140,7 +140,7 @@ packages:
- 2015.8.0 and later minions: https://github.com/saltstack/salt-winrepo-ng - 2015.8.0 and later minions: https://github.com/saltstack/salt-winrepo-ng
- Earlier releases: https://github.com/saltstack/salt-winrepo - Earlier releases: https://github.com/saltstack/salt-winrepo
By default, these repositories are mirrored to ``/srv/salt/win/repo_ng`` By default, these repositories are mirrored to ``/srv/salt/win/repo-ng``
and ``/srv/salt/win/repo``. and ``/srv/salt/win/repo``.
This location can be changed in the master config file by setting the This location can be changed in the master config file by setting the

View File

@ -15,66 +15,130 @@
# This script is run as a part of the macOS Salt Installation # This script is run as a part of the macOS Salt Installation
# #
############################################################################### ###############################################################################
echo "Post install started on:" > /tmp/postinstall.txt
date >> /tmp/postinstall.txt ###############################################################################
# Define Variables
###############################################################################
# Get Minor Version
OSX_VERSION=$(sw_vers | grep ProductVersion | cut -f 2 -d: | tr -d '[:space:]')
MINOR=$(echo ${OSX_VERSION} | cut -f 2 -d.)
# Path Variables
INSTALL_DIR="/opt/salt"
BIN_DIR="$INSTALL_DIR/bin"
CONFIG_DIR="/etc/salt"
TEMP_DIR="/tmp"
SBIN_DIR="/usr/local/sbin"
###############################################################################
# Set up logging and error handling
###############################################################################
echo "Post install script started on:" > "$TEMP_DIR/postinstall.txt"
date "+%Y/%m/%d %H:%m:%S" >> "$TEMP_DIR/postinstall.txt"
trap 'quit_on_error $LINENO $BASH_COMMAND' ERR trap 'quit_on_error $LINENO $BASH_COMMAND' ERR
quit_on_error() { quit_on_error() {
echo "$(basename $0) caught error on line : $1 command was: $2" >> /tmp/postinstall.txt echo "$(basename $0) caught error on line : $1 command was: $2" >> "$TEMP_DIR/postinstall.txt"
exit -1 exit -1
} }
############################################################################### ###############################################################################
# Check for existing minion config, copy if it doesn't exist # Check for existing minion config, copy if it doesn't exist
############################################################################### ###############################################################################
if [ ! -f /etc/salt/minion ]; then if [ ! -f "$CONFIG_DIR/minion" ]; then
echo "Config copy: Started..." >> /tmp/postinstall.txt echo "Config: Copy Started..." >> "$TEMP_DIR/postinstall.txt"
cp /etc/salt/minion.dist /etc/salt/minion cp "$CONFIG_DIR/minion.dist" "$CONFIG_DIR/minion"
echo "Config copy: Successful" >> /tmp/postinstall.txt echo "Config: Copied Successfully" >> "$TEMP_DIR/postinstall.txt"
fi fi
############################################################################### ###############################################################################
# Create symlink to salt-config.sh # Create symlink to salt-config.sh
############################################################################### ###############################################################################
# echo "Symlink: Creating symlink for salt-config..." >> /tmp/postinstall.txt if [ ! -d "$SBIN_DIR" ]; then
if [ ! -d "/usr/local/sbin" ]; then echo "Symlink: Creating $SBIN_DIR..." >> "$TEMP_DIR/postinstall.txt"
mkdir /usr/local/sbin mkdir "$SBIN_DIR"
echo "Symlink: Created Successfully" >> "$TEMP_DIR/postinstall.txt"
fi fi
ln -sf /opt/salt/bin/salt-config.sh /usr/local/sbin/salt-config echo "Symlink: Creating symlink for salt-config..." >> "$TEMP_DIR/postinstall.txt"
ln -sf "$BIN_DIR/salt-config.sh" "$SBIN_DIR/salt-config"
echo "Symlink: Created Successfully" >> "$TEMP_DIR/postinstall.txt"
############################################################################### ###############################################################################
# Add salt to paths.d # Add salt to paths.d
############################################################################### ###############################################################################
# echo "Path: Adding salt to the path..." >> /tmp/postinstall.txt
if [ ! -d "/etc/paths.d" ]; then if [ ! -d "/etc/paths.d" ]; then
echo "Path: Creating paths.d directory..." >> "$TEMP_DIR/postinstall.txt"
mkdir /etc/paths.d mkdir /etc/paths.d
echo "Path: Created Successfully" >> "$TEMP_DIR/postinstall.txt"
fi fi
sh -c 'echo "/opt/salt/bin" > /etc/paths.d/salt' echo "Path: Adding salt to the path..." >> "$TEMP_DIR/postinstall.txt"
sh -c 'echo "/usr/local/sbin" >> /etc/paths.d/salt' sh -c "echo \"$BIN_DIR\" > /etc/paths.d/salt"
sh -c "echo \"$SBIN_DIR\" >> /etc/paths.d/salt"
echo "Path: Added Successfully" >> "$TEMP_DIR/postinstall.txt"
############################################################################### ###############################################################################
# Register Salt as a service # Register Salt as a service
############################################################################### ###############################################################################
echo "Service start: Enabling service..." >> /tmp/postinstall.txt setup_services_maverick() {
echo "Service: Using old (< 10.10) launchctl interface" >> "$TEMP_DIR/postinstall.txt"
if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then
echo "Service: Stopping salt-minion..." >> "$TEMP_DIR/postinstall.txt"
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist
echo "Service: Stopped Successfully" >> "$TEMP_DIR/postinstall.txt"
fi;
echo "Service: Starting salt-minion..." >> "$TEMP_DIR/postinstall.txt"
launchctl load -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist || return 1
echo "Service: Started Successfully" >> "$TEMP_DIR/postinstall.txt"
echo "Service: Disabling Master, Syndic, and API services..." >> "$TEMP_DIR/postinstall.txt"
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.api.plist
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.master.plist
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.syndic.plist
echo "Service: Disabled Successfully" >> "$TEMP_DIR/postinstall.txt"
return 0
}
setup_services_yosemite_and_later() {
echo "Service: Using new (>= 10.10) launchctl interface" >> "$TEMP_DIR/postinstall.txt"
echo "Service: Enabling salt-minion..." >> "$TEMP_DIR/postinstall.txt"
launchctl enable system/com.saltstack.salt.minion launchctl enable system/com.saltstack.salt.minion
echo "Service start: Bootstrapping service..." >> /tmp/postinstall.txt echo "Service: Enabled Successfully" >> "$TEMP_DIR/postinstall.txt"
echo "Service: Bootstrapping salt-minion..." >> "$TEMP_DIR/postinstall.txt"
launchctl bootstrap system /Library/LaunchDaemons/com.saltstack.salt.minion.plist launchctl bootstrap system /Library/LaunchDaemons/com.saltstack.salt.minion.plist
echo "Service: Bootstrapped Successfully" >> "$TEMP_DIR/postinstall.txt"
if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then
echo "Service is running" >> /tmp/postinstall.txt echo "Service: Service Running" >> "$TEMP_DIR/postinstall.txt"
else else
echo "Service start: Kickstarting service..." >> /tmp/postinstall.txt echo "Service: Kickstarting Service..." >> "$TEMP_DIR/postinstall.txt"
launchctl kickstart -kp system/com.saltstack.salt.minion launchctl kickstart -kp system/com.saltstack.salt.minion
echo "Service: Kickstarted Successfully" >> "$TEMP_DIR/postinstall.txt"
fi fi
echo "Service start: Successful" >> /tmp/postinstall.txt echo "Service: Started Successfully" >> "$TEMP_DIR/postinstall.txt"
echo "Service disable: Disabling Master, Syndic, and API" >> /tmp/postinstall.txt
echo "Service: Disabling Master, Syndic, and API services" >> "$TEMP_DIR/postinstall.txt"
launchctl disable system/com.saltstack.salt.master launchctl disable system/com.saltstack.salt.master
launchctl disable system/com.saltstack.salt.syndic launchctl disable system/com.saltstack.salt.syndic
launchctl disable system/com.saltstack.salt.api launchctl disable system/com.saltstack.salt.api
echo "Service: Disabled Successfully" >> "$TEMP_DIR/postinstall.txt"
echo "Post install completed successfully" >> /tmp/postinstall.txt return 0
}
echo "Service: Configuring..." >> "$TEMP_DIR/postinstall.txt"
case $MINOR in
9 )
setup_services_maverick;
;;
* )
setup_services_yosemite_and_later;
;;
esac
echo "Service: Configured Successfully" >> "$TEMP_DIR/postinstall.txt"
echo "Post install completed successfully on:" >> "$TEMP_DIR/postinstall.txt"
date "+%Y/%m/%d %H:%m:%S" >> "$TEMP_DIR/postinstall.txt"
exit 0 exit 0

View File

@ -6,7 +6,8 @@
# Date: December 2015 # Date: December 2015
# #
# Description: This script stops the salt minion service before attempting to # Description: This script stops the salt minion service before attempting to
# install Salt on macOS # install Salt on macOS. It also removes the /opt/salt/bin
# directory, symlink to salt-config, and salt from paths.d.
# #
# Requirements: # Requirements:
# - None # - None
@ -15,26 +16,126 @@
# This script is run as a part of the macOS Salt Installation # This script is run as a part of the macOS Salt Installation
# #
############################################################################### ###############################################################################
echo "Preinstall started on:" > /tmp/preinstall.txt
date >> /tmp/preinstall.txt ###############################################################################
# Define Variables
###############################################################################
# Get Minor Version
OSX_VERSION=$(sw_vers | grep ProductVersion | cut -f 2 -d: | tr -d '[:space:]')
MINOR=$(echo ${OSX_VERSION} | cut -f 2 -d.)
# Path Variables
INSTALL_DIR="/opt/salt"
BIN_DIR="$INSTALL_DIR/bin"
CONFIG_DIR="/etc/salt"
TEMP_DIR="/tmp"
SBIN_DIR="/usr/local/sbin"
###############################################################################
# Set up logging and error handling
###############################################################################
echo "Preinstall started on:" > "$TEMP_DIR/preinstall.txt"
date "+%Y/%m/%d %H:%m:%S" >> "$TEMP_DIR/preinstall.txt"
trap 'quit_on_error $LINENO $BASH_COMMAND' ERR trap 'quit_on_error $LINENO $BASH_COMMAND' ERR
quit_on_error() { quit_on_error() {
echo "$(basename $0) caught error on line : $1 command was: $2" >> /tmp/preinstall.txt echo "$(basename $0) caught error on line : $1 command was: $2" >> "$TEMP_DIR/preinstall.txt"
exit -1 exit -1
} }
############################################################################### ###############################################################################
# Stop the service # Stop the service
############################################################################### ###############################################################################
stop_service_maverick() {
echo "Service: Using old (< 10.10) launchctl interface" >> "$TEMP_DIR/preinstall.txt"
if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then
echo "Stop service: Started..." >> /tmp/preinstall.txt echo "Service: Unloading minion..." >> "$TEMP_DIR/preinstall.txt"
# /bin/launchctl unload "/Library/LaunchDaemons/com.saltstack.salt.minion.plist" launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist
echo "Service: Unloaded Successfully" >> "$TEMP_DIR/preinstall.txt"
fi
if /bin/launchctl list "com.saltstack.salt.master" &> /dev/null; then
echo "Service: Unloading master..." >> "$TEMP_DIR/preinstall.txt"
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.master.plist
echo "Service: Unloaded Successfully" >> "$TEMP_DIR/preinstall.txt"
fi
if /bin/launchctl list "com.saltstack.salt.syndic" &> /dev/null; then
echo "Service: Unloading syndic..." >> "$TEMP_DIR/preinstall.txt"
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.syndic.plist
echo "Service: Unloaded Successfully" >> "$TEMP_DIR/preinstall.txt"
fi
if /bin/launchctl list "com.saltstack.salt.api" &> /dev/null; then
echo "Service: Unloading api..." >> "$TEMP_DIR/preinstall.txt"
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.api.plist
echo "Service: Unloaded Successfully" >> "$TEMP_DIR/preinstall.txt"
fi
}
stop_service_yosemite_and_later() {
echo "Service: Using new (>= 10.10) launchctl interface" >> "$TEMP_DIR/preinstall.txt"
if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then
echo "Service: Stopping minion..." >> "$TEMP_DIR/preinstall.txt"
launchctl disable system/com.saltstack.salt.minion launchctl disable system/com.saltstack.salt.minion
launchctl bootout system /Library/LaunchDaemons/com.saltstack.salt.minion.plist launchctl bootout system /Library/LaunchDaemons/com.saltstack.salt.minion.plist
echo "Stop service: Successful" >> /tmp/preinstall.txt echo "Service: Stopped Successfully" >> "$TEMP_DIR/preinstall.txt"
fi
if /bin/launchctl list "com.saltstack.salt.master" &> /dev/null; then
echo "Service: Stopping master..." >> "$TEMP_DIR/preinstall.txt"
launchctl disable system/com.saltstack.salt.master
launchctl bootout system /Library/LaunchDaemons/com.saltstack.salt.master.plist
echo "Service: Stopped Successfully" >> "$TEMP_DIR/preinstall.txt"
fi
if /bin/launchctl list "com.saltstack.salt.syndic" &> /dev/null; then
echo "Service: Stopping syndic..." >> "$TEMP_DIR/preinstall.txt"
launchctl disable system/com.saltstack.salt.syndic
launchctl bootout system /Library/LaunchDaemons/com.saltstack.salt.syndic.plist
echo "Service: Stopped Successfully" >> "$TEMP_DIR/preinstall.txt"
fi
if /bin/launchctl list "com.saltstack.salt.api" &> /dev/null; then
echo "Service: Stopping api..." >> "$TEMP_DIR/preinstall.txt"
launchctl disable system/com.saltstack.salt.api
launchctl bootout system /Library/LaunchDaemons/com.saltstack.salt.api.plist
echo "Service: Stopped Successfully" >> "$TEMP_DIR/preinstall.txt"
fi
}
echo "Service: Configuring..." >> "$TEMP_DIR/preinstall.txt"
case $MINOR in
9 )
stop_service_maverick;
;;
* )
stop_service_yosemite_and_later;
;;
esac
echo "Service: Configured Successfully" >> "$TEMP_DIR/preinstall.txt"
###############################################################################
# Remove the Symlink to salt-config.sh
###############################################################################
if [ -L "$SBIN_DIR/salt-config" ]; then
echo "Cleanup: Removing Symlink $BIN_DIR/salt-config" >> "$TEMP_DIR/preinstall.txt"
rm "$SBIN_DIR/salt-config"
echo "Cleanup: Removed Successfully" >> "$TEMP_DIR/preinstall.txt"
fi fi
echo "Preinstall Completed Successfully" >> /tmp/preinstall.txt ###############################################################################
# Remove the $INSTALL_DIR directory
###############################################################################
if [ -d "$INSTALL_DIR" ]; then
echo "Cleanup: Removing $INSTALL_DIR" >> "$TEMP_DIR/preinstall.txt"
rm -rf "$INSTALL_DIR"
echo "Cleanup: Removed Successfully" >> "$TEMP_DIR/preinstall.txt"
fi
###############################################################################
# Remove the salt from the paths.d
###############################################################################
if [ ! -f "/etc/paths.d/salt" ]; then
echo "Path: Removing salt from the path..." >> "$TEMP_DIR/preinstall.txt"
rm "/etc/paths.d/salt"
echo "Path: Removed Successfully" >> "$TEMP_DIR/preinstall.txt"
fi
echo "Preinstall Completed Successfully on:" >> "$TEMP_DIR/preinstall.txt"
date "+%Y/%m/%d %H:%m:%S" >> "$TEMP_DIR/preinstall.txt"
exit 0 exit 0

View File

@ -42,6 +42,13 @@ if not %errorLevel%==0 (
) )
@echo. @echo.
:: Remove build and dist directories
@echo %0 :: Remove build and dist directories...
@echo ---------------------------------------------------------------------
rd /s /q "%SrcDir%\build"
rd /s /q "%SrcDir%\dist"
@echo.
:: Install Current Version of salt :: Install Current Version of salt
@echo %0 :: Install Current Version of salt... @echo %0 :: Install Current Version of salt...
@echo --------------------------------------------------------------------- @echo ---------------------------------------------------------------------

View File

@ -9,5 +9,4 @@ Set Python=%SaltDir%\bin\python.exe
Set Script=%SaltDir%\bin\Scripts\salt-call Set Script=%SaltDir%\bin\Scripts\salt-call
:: Launch Script :: Launch Script
"%Python%" "%Script%" %* "%Python%" -E -s "%Script%" %*

View File

@ -9,5 +9,4 @@ Set Python=%SaltDir%\bin\python.exe
Set Script=%SaltDir%\bin\Scripts\salt-cp Set Script=%SaltDir%\bin\Scripts\salt-cp
:: Launch Script :: Launch Script
"%Python%" "%Script%" %* "%Python%" -E -s "%Script%" %*

View File

@ -9,5 +9,4 @@ Set Python=%SaltDir%\bin\python.exe
Set Script=%SaltDir%\bin\Scripts\salt-key Set Script=%SaltDir%\bin\Scripts\salt-key
:: Launch Script :: Launch Script
"%Python%" "%Script%" %* "%Python%" -E -s "%Script%" %*

View File

@ -9,5 +9,4 @@ Set Python=%SaltDir%\bin\python.exe
Set Script=%SaltDir%\bin\Scripts\salt-master Set Script=%SaltDir%\bin\Scripts\salt-master
:: Launch Script :: Launch Script
"%Python%" "%Script%" %* "%Python%" -E -s "%Script%" %*

View File

@ -12,5 +12,4 @@ Set Script=%SaltDir%\bin\Scripts\salt-minion
net stop salt-minion net stop salt-minion
:: Launch Script :: Launch Script
"%Python%" "%Script%" -l debug "%Python%" -E -s "%Script%" -l debug

View File

@ -9,5 +9,4 @@ Set Python=%SaltDir%\bin\python.exe
Set Script=%SaltDir%\bin\Scripts\salt-minion Set Script=%SaltDir%\bin\Scripts\salt-minion
:: Launch Script :: Launch Script
"%Python%" "%Script%" %* "%Python%" -E -s "%Script%" %*

View File

@ -9,5 +9,4 @@ Set Python=%SaltDir%\bin\python.exe
Set Script=%SaltDir%\bin\Scripts\salt-run Set Script=%SaltDir%\bin\Scripts\salt-run
:: Launch Script :: Launch Script
"%Python%" "%Script%" %* "%Python%" -E -s "%Script%" %*

View File

@ -9,5 +9,4 @@ Set Python=%SaltDir%\bin\python.exe
Set Script=%SaltDir%\bin\Scripts\salt Set Script=%SaltDir%\bin\Scripts\salt
:: Launch Script :: Launch Script
"%Python%" "%Script%" %* "%Python%" -E -s "%Script%" %*

View File

@ -334,8 +334,7 @@ Section -Post
WriteRegStr HKLM "${PRODUCT_MINION_REGKEY}" "Path" "$INSTDIR\bin\" WriteRegStr HKLM "${PRODUCT_MINION_REGKEY}" "Path" "$INSTDIR\bin\"
; Register the Salt-Minion Service ; Register the Salt-Minion Service
nsExec::Exec "nssm.exe install salt-minion $INSTDIR\bin\python.exe $INSTDIR\bin\Scripts\salt-minion -c $INSTDIR\conf -l quiet" nsExec::Exec "nssm.exe install salt-minion $INSTDIR\bin\python.exe -E -s $INSTDIR\bin\Scripts\salt-minion -c $INSTDIR\conf -l quiet"
nsExec::Exec "nssm.exe set salt-minion AppEnvironmentExtra PYTHONHOME="
nsExec::Exec "nssm.exe set salt-minion Description Salt Minion from saltstack.com" nsExec::Exec "nssm.exe set salt-minion Description Salt Minion from saltstack.com"
nsExec::Exec "nssm.exe set salt-minion AppNoConsole 1" nsExec::Exec "nssm.exe set salt-minion AppNoConsole 1"

View File

@ -31,6 +31,7 @@ import salt.config
import salt.loader import salt.loader
import salt.transport.client import salt.transport.client
import salt.utils import salt.utils
import salt.utils.files
import salt.utils.minions import salt.utils.minions
import salt.payload import salt.payload
@ -193,8 +194,13 @@ class LoadAuth(object):
if 'groups' in load: if 'groups' in load:
tdata['groups'] = load['groups'] tdata['groups'] = load['groups']
try:
with salt.utils.files.set_umask(0o177):
with salt.utils.fopen(t_path, 'w+b') as fp_: with salt.utils.fopen(t_path, 'w+b') as fp_:
fp_.write(self.serial.dumps(tdata)) fp_.write(self.serial.dumps(tdata))
except (IOError, OSError):
log.warning('Authentication failure: can not write token file "{0}".'.format(t_path))
return {}
return tdata return tdata
def get_tok(self, tok): def get_tok(self, tok):
@ -473,14 +479,12 @@ class Resolver(object):
tdata = self._send_token_request(load) tdata = self._send_token_request(load)
if 'token' not in tdata: if 'token' not in tdata:
return tdata return tdata
oldmask = os.umask(0o177)
try: try:
with salt.utils.files.set_umask(0o177):
with salt.utils.fopen(self.opts['token_file'], 'w+') as fp_: with salt.utils.fopen(self.opts['token_file'], 'w+') as fp_:
fp_.write(tdata['token']) fp_.write(tdata['token'])
except (IOError, OSError): except (IOError, OSError):
pass pass
finally:
os.umask(oldmask)
return tdata return tdata
def mk_token(self, load): def mk_token(self, load):

View File

@ -19,8 +19,9 @@ import sys
# Import salt libs # Import salt libs
import salt.client import salt.client
import salt.utils.gzip_util import salt.utils.gzip_util
import salt.utils.itertools
import salt.utils.minions import salt.utils.minions
from salt.utils import parsers, to_bytes from salt.utils import parsers, to_bytes, print_cli
from salt.utils.verify import verify_log from salt.utils.verify import verify_log
import salt.output import salt.output
@ -100,10 +101,69 @@ class SaltCP(object):
empty_dirs.update(empty_dirs_) empty_dirs.update(empty_dirs_)
return files, sorted(empty_dirs) return files, sorted(empty_dirs)
def _file_dict(self, fn_):
'''
Take a path and return the contents of the file as a string
'''
if not os.path.isfile(fn_):
err = 'The referenced file, {0} is not available.'.format(fn_)
sys.stderr.write(err + '\n')
sys.exit(42)
with salt.utils.fopen(fn_, 'r') as fp_:
data = fp_.read()
return {fn_: data}
def _load_files(self):
'''
Parse the files indicated in opts['src'] and load them into a python
object for transport
'''
files = {}
for fn_ in self.opts['src']:
if os.path.isfile(fn_):
files.update(self._file_dict(fn_))
elif os.path.isdir(fn_):
print_cli(fn_ + ' is a directory, only files are supported in non-chunked mode. '
'Use "--chunked" command line argument.')
sys.exit(1)
return files
def run(self): def run(self):
''' '''
Make the salt client call Make the salt client call
''' '''
if self.opts['chunked']:
ret = self.run_chunked()
else:
ret = self.run_oldstyle()
salt.output.display_output(
ret,
self.opts.get('output', 'nested'),
self.opts)
def run_oldstyle(self):
'''
Make the salt client call in old-style all-in-one call method
'''
arg = [self._load_files(), self.opts['dest']]
local = salt.client.get_local_client(self.opts['conf_file'])
args = [self.opts['tgt'],
'cp.recv',
arg,
self.opts['timeout'],
]
selected_target_option = self.opts.get('selected_target_option', None)
if selected_target_option is not None:
args.append(selected_target_option)
return local.cmd(*args)
def run_chunked(self):
'''
Make the salt client call in the new fasion chunked multi-call way
'''
files, empty_dirs = self._list_files() files, empty_dirs = self._list_files()
dest = self.opts['dest'] dest = self.opts['dest']
gzip = self.opts['gzip'] gzip = self.opts['gzip']
@ -165,7 +225,7 @@ class SaltCP(object):
) )
args = [ args = [
tgt, tgt,
'cp.recv', 'cp.recv_chunked',
[remote_path, chunk, append, gzip, mode], [remote_path, chunk, append, gzip, mode],
timeout, timeout,
] ]
@ -211,14 +271,11 @@ class SaltCP(object):
else '', else '',
tgt, tgt,
) )
args = [tgt, 'cp.recv', [remote_path, None], timeout] args = [tgt, 'cp.recv_chunked', [remote_path, None], timeout]
if selected_target_option is not None: if selected_target_option is not None:
args.append(selected_target_option) args.append(selected_target_option)
for minion_id, minion_ret in six.iteritems(local.cmd(*args)): for minion_id, minion_ret in six.iteritems(local.cmd(*args)):
ret.setdefault(minion_id, {})[remote_path] = minion_ret ret.setdefault(minion_id, {})[remote_path] = minion_ret
salt.output.display_output( return ret
ret,
self.opts.get('output', 'nested'),
self.opts)

View File

@ -408,8 +408,6 @@ class SyncClientMixin(object):
) )
data['success'] = False data['success'] = False
namespaced_event.fire_event(data, 'ret')
if self.store_job: if self.store_job:
try: try:
salt.utils.job.store_job( salt.utils.job.store_job(
@ -427,6 +425,9 @@ class SyncClientMixin(object):
log.error('Could not store job cache info. ' log.error('Could not store job cache info. '
'Job details for this run may be unavailable.') 'Job details for this run may be unavailable.')
# Outputters _can_ mutate data so write to the job cache first!
namespaced_event.fire_event(data, 'ret')
# if we fired an event, make sure to delete the event object. # if we fired an event, make sure to delete the event object.
# This will ensure that we call destroy, which will do the 0MQ linger # This will ensure that we call destroy, which will do the 0MQ linger
log.info('Runner completed: {0}'.format(data['jid'])) log.info('Runner completed: {0}'.format(data['jid']))

View File

@ -961,10 +961,18 @@ def ssh_interface(vm_):
Return the ssh_interface type to connect to. Either 'public_ips' (default) Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'. or 'private_ips'.
''' '''
return config.get_cloud_config_value( ret = config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips', 'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False search_global=False
) )
if ret not in ('public_ips', 'private_ips'):
log.warning((
'Invalid ssh_interface: {0}. '
'Allowed options are ("public_ips", "private_ips"). '
'Defaulting to "public_ips".'
).format(ret))
ret = 'public_ips'
return ret
def get_ssh_gateway_config(vm_): def get_ssh_gateway_config(vm_):

View File

@ -726,12 +726,18 @@ def request_instance(vm_=None, call=None):
else: else:
pool = floating_ip_conf.get('pool', 'public') pool = floating_ip_conf.get('pool', 'public')
try:
floating_ip = conn.floating_ip_create(pool)['ip']
except Exception:
log.info('A new IP address was unable to be allocated. '
'An IP address will be pulled from the already allocated list, '
'This will cause a race condition when building in parallel.')
for fl_ip, opts in six.iteritems(conn.floating_ip_list()): for fl_ip, opts in six.iteritems(conn.floating_ip_list()):
if opts['fixed_ip'] is None and opts['pool'] == pool: if opts['fixed_ip'] is None and opts['pool'] == pool:
floating_ip = fl_ip floating_ip = fl_ip
break break
if floating_ip is None: if floating_ip is None:
floating_ip = conn.floating_ip_create(pool)['ip'] log.error('No IP addresses available to allocate for this server: {0}'.format(vm_['name']))
def __query_node_data(vm_): def __query_node_data(vm_):
try: try:

View File

@ -135,6 +135,14 @@ Alternatively, one could use the private IP to connect by specifying:
ssh_interface: private_ips ssh_interface: private_ips
.. note::
When using floating ips from networks, if the OpenStack driver is unable to
allocate a new ip address for the server, it will check that for
unassociated ip addresses in the floating ip pool. If SaltCloud is running
in parallel mode, it is possible that more than one server will attempt to
use the same ip address.
''' '''
# Import python libs # Import python libs
@ -866,39 +874,42 @@ def _assign_floating_ips(vm_, conn, kwargs):
pool = OpenStack_1_1_FloatingIpPool( pool = OpenStack_1_1_FloatingIpPool(
net['floating'], conn.connection net['floating'], conn.connection
) )
for idx in pool.list_floating_ips():
if idx.node_id is None:
floating.append(idx)
if not floating:
try: try:
floating.append(pool.create_floating_ip()) floating.append(pool.create_floating_ip())
except Exception as e: except Exception as e:
log.debug('Cannot allocate IP from floating pool \'%s\'. Checking for unassociated ips.',
net['floating'])
for idx in pool.list_floating_ips():
if idx.node_id is None:
floating.append(idx)
break
if not floating:
raise SaltCloudSystemExit( raise SaltCloudSystemExit(
'Floating pool \'{0}\' does not have any more ' 'There are no more floating IP addresses '
'please create some more or use a different ' 'available, please create some more'
'pool.'.format(net['floating'])
) )
# otherwise, attempt to obtain list without specifying pool # otherwise, attempt to obtain list without specifying pool
# this is the same as 'nova floating-ip-list' # this is the same as 'nova floating-ip-list'
elif ssh_interface(vm_) != 'private_ips': elif ssh_interface(vm_) != 'private_ips':
try: try:
# This try/except is here because it appears some # This try/except is here because it appears some
# *cough* Rackspace *cough*
# OpenStack providers return a 404 Not Found for the # OpenStack providers return a 404 Not Found for the
# floating ip pool URL if there are no pools setup # floating ip pool URL if there are no pools setup
pool = OpenStack_1_1_FloatingIpPool( pool = OpenStack_1_1_FloatingIpPool(
'', conn.connection '', conn.connection
) )
for idx in pool.list_floating_ips():
if idx.node_id is None:
floating.append(idx)
if not floating:
try: try:
floating.append(pool.create_floating_ip()) floating.append(pool.create_floating_ip())
except Exception as e: except Exception as e:
raise SaltCloudSystemExit( log.debug('Cannot allocate IP from the default floating pool. Checking for unassociated ips.')
for idx in pool.list_floating_ips():
if idx.node_id is None:
floating.append(idx)
break
if not floating:
log.warning(
'There are no more floating IP addresses ' 'There are no more floating IP addresses '
'available, please create some more' 'available, please create some more if necessary'
) )
except Exception as e: except Exception as e:
if str(e).startswith('404'): if str(e).startswith('404'):

File diff suppressed because it is too large Load Diff

View File

@ -25,7 +25,7 @@ try:
) )
HAS_LIBCLOUD = True HAS_LIBCLOUD = True
LIBCLOUD_VERSION_INFO = tuple([ LIBCLOUD_VERSION_INFO = tuple([
int(part) for part in libcloud.__version__.replace('-', '.').split('.')[:3] int(part) for part in libcloud.__version__.replace('-', '.').replace('rc', '.').split('.')[:3]
]) ])
except ImportError: except ImportError:
@ -150,7 +150,7 @@ def avail_locations(conn=None, call=None):
ret[img_name] = {} ret[img_name] = {}
for attr in dir(img): for attr in dir(img):
if attr.startswith('_'): if attr.startswith('_') or attr == 'driver':
continue continue
attr_value = getattr(img, attr) attr_value = getattr(img, attr)
@ -187,7 +187,7 @@ def avail_images(conn=None, call=None):
ret[img_name] = {} ret[img_name] = {}
for attr in dir(img): for attr in dir(img):
if attr.startswith('_'): if attr.startswith('_') or attr in ('driver', 'get_uuid'):
continue continue
attr_value = getattr(img, attr) attr_value = getattr(img, attr)
if isinstance(attr_value, string_types) and not six.PY3: if isinstance(attr_value, string_types) and not six.PY3:
@ -222,7 +222,7 @@ def avail_sizes(conn=None, call=None):
ret[size_name] = {} ret[size_name] = {}
for attr in dir(size): for attr in dir(size):
if attr.startswith('_'): if attr.startswith('_') or attr in ('driver', 'get_uuid'):
continue continue
try: try:

View File

@ -199,6 +199,9 @@ VALID_OPTS = {
# The directory containing unix sockets for things like the event bus # The directory containing unix sockets for things like the event bus
'sock_dir': str, 'sock_dir': str,
# The pool size of unix sockets, it is necessary to avoid blocking waiting for zeromq and tcp communications.
'sock_pool_size': int,
# Specifies how the file server should backup files, if enabled. The backups # Specifies how the file server should backup files, if enabled. The backups
# live in the cache dir. # live in the cache dir.
'backup_mode': str, 'backup_mode': str,
@ -328,7 +331,7 @@ VALID_OPTS = {
# The TCP port on which minion events should be pulled if ipc_mode is TCP # The TCP port on which minion events should be pulled if ipc_mode is TCP
'tcp_pull_port': int, 'tcp_pull_port': int,
# The TCP port on which events for the master should be pulled if ipc_mode is TCP # The TCP port on which events for the master should be published if ipc_mode is TCP
'tcp_master_pub_port': int, 'tcp_master_pub_port': int,
# The TCP port on which events for the master should be pulled if ipc_mode is TCP # The TCP port on which events for the master should be pulled if ipc_mode is TCP
@ -1005,6 +1008,7 @@ DEFAULT_MINION_OPTS = {
'grains_deep_merge': False, 'grains_deep_merge': False,
'conf_file': os.path.join(salt.syspaths.CONFIG_DIR, 'minion'), 'conf_file': os.path.join(salt.syspaths.CONFIG_DIR, 'minion'),
'sock_dir': os.path.join(salt.syspaths.SOCK_DIR, 'minion'), 'sock_dir': os.path.join(salt.syspaths.SOCK_DIR, 'minion'),
'sock_pool_size': 1,
'backup_mode': '', 'backup_mode': '',
'renderer': 'yaml_jinja', 'renderer': 'yaml_jinja',
'renderer_whitelist': [], 'renderer_whitelist': [],
@ -1230,6 +1234,7 @@ DEFAULT_MASTER_OPTS = {
'user': salt.utils.get_user(), 'user': salt.utils.get_user(),
'worker_threads': 5, 'worker_threads': 5,
'sock_dir': os.path.join(salt.syspaths.SOCK_DIR, 'master'), 'sock_dir': os.path.join(salt.syspaths.SOCK_DIR, 'master'),
'sock_pool_size': 1,
'ret_port': 4506, 'ret_port': 4506,
'timeout': 5, 'timeout': 5,
'keep_jobs': 24, 'keep_jobs': 24,
@ -2122,6 +2127,7 @@ def syndic_config(master_config_path,
'sock_dir': os.path.join( 'sock_dir': os.path.join(
opts['cachedir'], opts.get('syndic_sock_dir', opts['sock_dir']) opts['cachedir'], opts.get('syndic_sock_dir', opts['sock_dir'])
), ),
'sock_pool_size': master_opts['sock_pool_size'],
'cachedir': master_opts['cachedir'], 'cachedir': master_opts['cachedir'],
} }
opts.update(syndic_opts) opts.update(syndic_opts)
@ -2130,7 +2136,7 @@ def syndic_config(master_config_path,
'pki_dir', 'cachedir', 'pidfile', 'sock_dir', 'extension_modules', 'pki_dir', 'cachedir', 'pidfile', 'sock_dir', 'extension_modules',
'autosign_file', 'autoreject_file', 'token_dir' 'autosign_file', 'autoreject_file', 'token_dir'
] ]
for config_key in ('log_file', 'key_logfile'): for config_key in ('log_file', 'key_logfile', 'syndic_log_file'):
# If this is not a URI and instead a local path # If this is not a URI and instead a local path
if urlparse(opts.get(config_key, '')).scheme == '': if urlparse(opts.get(config_key, '')).scheme == '':
prepend_root_dirs.append(config_key) prepend_root_dirs.append(config_key)

View File

@ -372,17 +372,18 @@ class AsyncAuth(object):
loop_instance_map = AsyncAuth.instance_map[io_loop] loop_instance_map = AsyncAuth.instance_map[io_loop]
key = cls.__key(opts) key = cls.__key(opts)
if key not in loop_instance_map: auth = loop_instance_map.get(key)
if auth is None:
log.debug('Initializing new AsyncAuth for {0}'.format(key)) log.debug('Initializing new AsyncAuth for {0}'.format(key))
# we need to make a local variable for this, as we are going to store # we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one # it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller # references it-- this forces a reference while we return to the caller
new_auth = object.__new__(cls) auth = object.__new__(cls)
new_auth.__singleton_init__(opts, io_loop=io_loop) auth.__singleton_init__(opts, io_loop=io_loop)
loop_instance_map[key] = new_auth loop_instance_map[key] = auth
else: else:
log.debug('Re-using AsyncAuth for {0}'.format(key)) log.debug('Re-using AsyncAuth for {0}'.format(key))
return loop_instance_map[key] return auth
@classmethod @classmethod
def __key(cls, opts, io_loop=None): def __key(cls, opts, io_loop=None):
@ -1008,14 +1009,15 @@ class SAuth(AsyncAuth):
Only create one instance of SAuth per __key() Only create one instance of SAuth per __key()
''' '''
key = cls.__key(opts) key = cls.__key(opts)
if key not in SAuth.instances: auth = SAuth.instances.get(key)
if auth is None:
log.debug('Initializing new SAuth for {0}'.format(key)) log.debug('Initializing new SAuth for {0}'.format(key))
new_auth = object.__new__(cls) auth = object.__new__(cls)
new_auth.__singleton_init__(opts) auth.__singleton_init__(opts)
SAuth.instances[key] = new_auth SAuth.instances[key] = auth
else: else:
log.debug('Re-using SAuth for {0}'.format(key)) log.debug('Re-using SAuth for {0}'.format(key))
return SAuth.instances[key] return auth
@classmethod @classmethod
def __key(cls, opts, io_loop=None): def __key(cls, opts, io_loop=None):

View File

@ -716,7 +716,7 @@ class RemoteFuncs(object):
load.get('saltenv', load.get('env')), load.get('saltenv', load.get('env')),
load.get('ext'), load.get('ext'),
self.mminion.functions, self.mminion.functions,
pillar=load.get('pillar_override', {})) pillar_override=load.get('pillar_override', {}))
pillar_dirs = {} pillar_dirs = {}
data = pillar.compile_pillar(pillar_dirs=pillar_dirs) data = pillar.compile_pillar(pillar_dirs=pillar_dirs)
if self.opts.get('minion_data_cache', False): if self.opts.get('minion_data_cache', False):

View File

@ -50,7 +50,7 @@ def start(docker_url='unix://var/run/docker.sock',
.. code-block:: yaml .. code-block:: yaml
engines: engines:
docker_events: - docker_events:
docker_url: unix://var/run/docker.sock docker_url: unix://var/run/docker.sock
The config above sets up engines to listen The config above sets up engines to listen

View File

@ -14,7 +14,7 @@ keys make the engine interactive.
.. code-block:: yaml .. code-block:: yaml
engines: engines:
hipchat: - hipchat:
token: 'XXXXXX' token: 'XXXXXX'
room: 'salt' room: 'salt'
control: True control: True

View File

@ -24,6 +24,9 @@ master config.
:configuration: :configuration:
Example configuration Example configuration
.. code-block:: yaml
engines: engines:
- logentries: - logentries:
endpoint: data.logentries.com endpoint: data.logentries.com

View File

@ -8,6 +8,9 @@ them onto a logstash endpoint.
:configuration: :configuration:
Example configuration Example configuration
.. code-block:: yaml
engines: engines:
- logstash: - logstash:
host: log.my_network.com host: log.my_network.com

View File

@ -7,7 +7,7 @@ Example Config in Master or Minion config
.. code-block:: yaml .. code-block:: yaml
engines: engines:
reactor: - reactor:
refresh_interval: 60 refresh_interval: 60
worker_threads: 10 worker_threads: 10
worker_hwm: 10000 worker_hwm: 10000

View File

@ -8,6 +8,9 @@ events based on the channels they are subscribed to.
:configuration: :configuration:
Example configuration Example configuration
.. code-block:: yaml
engines: engines:
- redis_sentinel: - redis_sentinel:
hosts: hosts:

View File

@ -12,7 +12,7 @@ prefaced with a ``!``.
.. code-block:: yaml .. code-block:: yaml
engines: engines:
slack: - slack:
token: 'xoxb-xxxxxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxx' token: 'xoxb-xxxxxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxx'
control: True control: True
valid_users: valid_users:
@ -202,7 +202,7 @@ def start(token,
# Default to trying to run as a client module. # Default to trying to run as a client module.
else: else:
local = salt.client.LocalClient() local = salt.client.LocalClient()
ret = local.cmd('{0}'.format(target), cmd, args, kwargs) ret = local.cmd('{0}'.format(target), cmd, arg=args, kwarg=kwargs)
if ret: if ret:
return_text = json.dumps(ret, sort_keys=True, indent=1) return_text = json.dumps(ret, sort_keys=True, indent=1)

View File

@ -73,7 +73,7 @@ class SudoExecutor(ModuleExecutorBase):
'-c', salt.syspaths.CONFIG_DIR, '-c', salt.syspaths.CONFIG_DIR,
'--', '--',
data.get('fun')] data.get('fun')]
if data['fun'] == 'state.sls': if data['fun'] in ('state.sls', 'state.highstate', 'state.apply'):
kwargs['concurrent'] = True kwargs['concurrent'] = True
for arg in args: for arg in args:
self.cmd.append(_cmd_quote(str(arg))) self.cmd.append(_cmd_quote(str(arg)))

View File

@ -2,7 +2,7 @@
''' '''
Subversion Fileserver Backend Subversion Fileserver Backend
After enabling this backend, branches, and tags in a remote subversion After enabling this backend, branches and tags in a remote subversion
repository are exposed to salt as different environments. To enable this repository are exposed to salt as different environments. To enable this
backend, add ``svn`` to the :conf_master:`fileserver_backend` option in the backend, add ``svn`` to the :conf_master:`fileserver_backend` option in the
Master config file. Master config file.
@ -694,7 +694,7 @@ def file_hash(load, fnd):
def _file_lists(load, form): def _file_lists(load, form):
''' '''
Return a dict containing the file lists for files, dirs, emtydirs and symlinks Return a dict containing the file lists for files, dirs, emptydirs and symlinks
''' '''
if 'env' in load: if 'env' in load:
salt.utils.warn_until( salt.utils.warn_until(

View File

@ -20,6 +20,7 @@ import re
import platform import platform
import logging import logging
import locale import locale
import datetime
import salt.exceptions import salt.exceptions
__proxyenabled__ = ['*'] __proxyenabled__ = ['*']
@ -771,6 +772,8 @@ def _virtual(osdata):
grains['virtual_subtype'] = 'ovirt' grains['virtual_subtype'] = 'ovirt'
elif 'Google' in output: elif 'Google' in output:
grains['virtual'] = 'gce' grains['virtual'] = 'gce'
elif 'BHYVE' in output:
grains['virtual'] = 'bhyve'
except IOError: except IOError:
pass pass
elif osdata['kernel'] == 'FreeBSD': elif osdata['kernel'] == 'FreeBSD':
@ -919,28 +922,19 @@ def _windows_platform_data():
os_release = platform.release() os_release = platform.release()
info = salt.utils.win_osinfo.get_os_version_info() info = salt.utils.win_osinfo.get_os_version_info()
# Starting with Python 2.7.12 and 3.5.2 the `platform.uname()` function
# started reporting the Desktop version instead of the Server version on
# Server versions of Windows, so we need to look those up
# Check for Python >=2.7.12 or >=3.5.2
ver = pythonversion()['pythonversion']
if ((six.PY2 and
salt.utils.compare_versions(ver, '>=', [2, 7, 12, 'final', 0]))
or
(six.PY3 and
salt.utils.compare_versions(ver, '>=', [3, 5, 2, 'final', 0]))):
# (Product Type 1 is Desktop, Everything else is Server)
if info['ProductType'] > 1:
server = {'Vista': '2008Server', server = {'Vista': '2008Server',
'7': '2008ServerR2', '7': '2008ServerR2',
'8': '2012Server', '8': '2012Server',
'8.1': '2012ServerR2', '8.1': '2012ServerR2',
'10': '2016Server'} '10': '2016Server'}
os_release = server.get(os_release,
'Grain not found. Update lookup table ' # Starting with Python 2.7.12 and 3.5.2 the `platform.uname()` function
'in the `_windows_platform_data` ' # started reporting the Desktop version instead of the Server version on
'function in `grains\\core.py`') # Server versions of Windows, so we need to look those up
# So, if you find a Server Platform that's a key in the server
# dictionary, then lookup the actual Server Release.
if info['ProductType'] > 1 and os_release in server:
os_release = server[os_release]
service_pack = None service_pack = None
if info['ServicePackMajor'] > 0: if info['ServicePackMajor'] > 0:
@ -1047,6 +1041,7 @@ _OS_NAME_MAP = {
'synology': 'Synology', 'synology': 'Synology',
'nilrt': 'NILinuxRT', 'nilrt': 'NILinuxRT',
'manjaro': 'Manjaro', 'manjaro': 'Manjaro',
'manjarolin': 'Manjaro',
'antergos': 'Antergos', 'antergos': 'Antergos',
'sles': 'SUSE', 'sles': 'SUSE',
'slesexpand': 'RES', 'slesexpand': 'RES',
@ -1687,12 +1682,14 @@ def ip_fqdn():
ret[key] = [] ret[key] = []
else: else:
try: try:
start_time = datetime.datetime.utcnow()
info = socket.getaddrinfo(_fqdn, None, socket_type) info = socket.getaddrinfo(_fqdn, None, socket_type)
ret[key] = list(set(item[4][0] for item in info)) ret[key] = list(set(item[4][0] for item in info))
except socket.error: except socket.error:
if __opts__['__role'] == 'master': timediff = datetime.datetime.utcnow() - start_time
log.warning('Unable to find IPv{0} record for "{1}" causing a 10 second timeout when rendering grains. ' if timediff.seconds > 5 and __opts__['__role'] == 'master':
'Set the dns or /etc/hosts for IPv{0} to clear this.'.format(ipv_num, _fqdn)) log.warning('Unable to find IPv{0} record for "{1}" causing a {2} second timeout when rendering grains. '
'Set the dns or /etc/hosts for IPv{0} to clear this.'.format(ipv_num, _fqdn, timediff))
ret[key] = [] ret[key] = []
return ret return ret

View File

@ -182,7 +182,7 @@ def minion_mods(
generated modules in __context__ generated modules in __context__
:param dict utils: Utility functions which should be made available to :param dict utils: Utility functions which should be made available to
Salt modules in __utils__. See `utils_dir` in Salt modules in __utils__. See `utils_dirs` in
salt.config for additional information about salt.config for additional information about
configuration. configuration.

View File

@ -1339,7 +1339,7 @@ class AESFuncs(object):
load['id'], load['id'],
load.get('saltenv', load.get('env')), load.get('saltenv', load.get('env')),
ext=load.get('ext'), ext=load.get('ext'),
pillar=load.get('pillar_override', {}), pillar_override=load.get('pillar_override', {}),
pillarenv=load.get('pillarenv')) pillarenv=load.get('pillarenv'))
data = pillar.compile_pillar(pillar_dirs=pillar_dirs) data = pillar.compile_pillar(pillar_dirs=pillar_dirs)
self.fs_.update_opts() self.fs_.update_opts()

View File

@ -1263,7 +1263,7 @@ class Minion(MinionBase):
ret = yield channel.send(load, timeout=timeout) ret = yield channel.send(load, timeout=timeout)
raise tornado.gen.Return(ret) raise tornado.gen.Return(ret)
def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True): def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None):
''' '''
Fire an event on the master, or drop message if unable to send. Fire an event on the master, or drop message if unable to send.
''' '''
@ -1282,10 +1282,6 @@ class Minion(MinionBase):
else: else:
return return
def timeout_handler(*_):
log.info('fire_master failed: master could not be contacted. Request timed out.')
return True
if sync: if sync:
try: try:
self._send_req_sync(load, timeout) self._send_req_sync(load, timeout)
@ -1296,6 +1292,12 @@ class Minion(MinionBase):
log.info('fire_master failed: {0}'.format(traceback.format_exc())) log.info('fire_master failed: {0}'.format(traceback.format_exc()))
return False return False
else: else:
if timeout_handler is None:
def handle_timeout(*_):
log.info('fire_master failed: master could not be contacted. Request timed out.')
return True
timeout_handler = handle_timeout
with tornado.stack_context.ExceptionStackContext(timeout_handler): with tornado.stack_context.ExceptionStackContext(timeout_handler):
self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
return True return True
@ -2216,13 +2218,15 @@ class Minion(MinionBase):
if ping_interval > 0 and self.connected: if ping_interval > 0 and self.connected:
def ping_master(): def ping_master():
try: try:
if not self._fire_master('ping', 'minion_ping'): def ping_timeout_handler(*_):
if not self.opts.get('auth_safemode', True): if not self.opts.get('auth_safemode', True):
log.error('** Master Ping failed. Attempting to restart minion**') log.error('** Master Ping failed. Attempting to restart minion**')
delay = self.opts.get('random_reauth_delay', 5) delay = self.opts.get('random_reauth_delay', 5)
log.info('delaying random_reauth_delay {0}s'.format(delay)) log.info('delaying random_reauth_delay {0}s'.format(delay))
# regular sys.exit raises an exception -- which isn't sufficient in a thread # regular sys.exit raises an exception -- which isn't sufficient in a thread
os._exit(salt.defaults.exitcodes.SALT_KEEPALIVE) os._exit(salt.defaults.exitcodes.SALT_KEEPALIVE)
self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler)
except Exception: except Exception:
log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG)
self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000, io_loop=self.io_loop) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000, io_loop=self.io_loop)
@ -2237,7 +2241,7 @@ class Minion(MinionBase):
except Exception: except Exception:
log.critical('The beacon errored: ', exc_info=True) log.critical('The beacon errored: ', exc_info=True)
if beacons and self.connected: if beacons and self.connected:
self._fire_master(events=beacons) self._fire_master(events=beacons, sync=False)
self.periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop) self.periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop)

View File

@ -446,11 +446,15 @@ def config(name, config, edit=True):
salt '*' apache.config /etc/httpd/conf.d/ports.conf config="[{'Listen': '22'}]" salt '*' apache.config /etc/httpd/conf.d/ports.conf config="[{'Listen': '22'}]"
''' '''
configs = []
for entry in config: for entry in config:
key = next(six.iterkeys(entry)) key = next(six.iterkeys(entry))
configs = _parse_config(entry[key], key) configs.append(_parse_config(entry[key], key))
# Python auto-correct line endings
configstext = "\n".join(configs)
if edit: if edit:
with salt.utils.fopen(name, 'w') as configfile: with salt.utils.fopen(name, 'w') as configfile:
configfile.write('# This file is managed by Salt.\n') configfile.write('# This file is managed by Salt.\n')
configfile.write(configs) configfile.write(configstext)
return configs return configstext

View File

@ -135,19 +135,6 @@ def _reconstruct_ppa_name(owner_name, ppa_name):
return 'ppa:{0}/{1}'.format(owner_name, ppa_name) return 'ppa:{0}/{1}'.format(owner_name, ppa_name)
def _get_repo(**kwargs):
'''
Check the kwargs for either 'fromrepo' or 'repo' and return the value.
'fromrepo' takes precedence over 'repo'.
'''
for key in ('fromrepo', 'repo'):
try:
return kwargs[key]
except KeyError:
pass
return ''
def _check_apt(): def _check_apt():
''' '''
Abort if python-apt is not installed Abort if python-apt is not installed
@ -242,18 +229,11 @@ def latest_version(*names, **kwargs):
''' '''
refresh = salt.utils.is_true(kwargs.pop('refresh', True)) refresh = salt.utils.is_true(kwargs.pop('refresh', True))
show_installed = salt.utils.is_true(kwargs.pop('show_installed', False)) show_installed = salt.utils.is_true(kwargs.pop('show_installed', False))
if 'repo' in kwargs: if 'repo' in kwargs:
# Remember to kill _get_repo() too when removing this warning. raise SaltInvocationError(
salt.utils.warn_until( 'The \'repo\' argument is invalid, use \'fromrepo\' instead'
'Hydrogen',
'The \'repo\' argument to apt.latest_version is deprecated, and '
'will be removed in Salt {version}. Please use \'fromrepo\' '
'instead.'
) )
fromrepo = _get_repo(**kwargs) fromrepo = kwargs.pop('fromrepo', None)
kwargs.pop('fromrepo', None)
kwargs.pop('repo', None)
cache_valid_time = kwargs.pop('cache_valid_time', 0) cache_valid_time = kwargs.pop('cache_valid_time', 0)
if len(names) == 0: if len(names) == 0:
@ -1380,9 +1360,10 @@ def _get_upgradable(dist_upgrade=True, **kwargs):
cmd.append('dist-upgrade') cmd.append('dist-upgrade')
else: else:
cmd.append('upgrade') cmd.append('upgrade')
fromrepo = _get_repo(**kwargs) try:
if fromrepo: cmd.extend(['-o', 'APT::Default-Release={0}'.format(kwargs['fromrepo'])])
cmd.extend(['-o', 'APT::Default-Release={0}'.format(fromrepo)]) except KeyError:
pass
call = __salt__['cmd.run_all'](cmd, call = __salt__['cmd.run_all'](cmd,
python_shell=False, python_shell=False,

View File

@ -1892,7 +1892,7 @@ def list_policy_versions(policy_name,
return ret.get('list_policy_versions_response', {}).get('list_policy_versions_result', {}).get('versions') return ret.get('list_policy_versions_response', {}).get('list_policy_versions_result', {}).get('versions')
except boto.exception.BotoServerError as e: except boto.exception.BotoServerError as e:
log.debug(e) log.debug(e)
msg = 'Failed to list {0} policy vesions.' msg = 'Failed to list {0} policy versions.'
log.error(msg.format(policy_name)) log.error(msg.format(policy_name))
return [] return []

View File

@ -211,7 +211,7 @@ def _gather_pillar(pillarenv, pillar_override):
__grains__, __grains__,
__opts__['id'], __opts__['id'],
__opts__['environment'], __opts__['environment'],
pillar=pillar_override, pillar_override=pillar_override,
pillarenv=pillarenv pillarenv=pillarenv
) )
ret = pillar.compile_pillar() ret = pillar.compile_pillar()
@ -526,10 +526,25 @@ def _run(cmd,
try: try:
proc = salt.utils.timed_subprocess.TimedProc(cmd, **kwargs) proc = salt.utils.timed_subprocess.TimedProc(cmd, **kwargs)
except (OSError, IOError) as exc: except (OSError, IOError) as exc:
raise CommandExecutionError( msg = (
'Unable to run command \'{0}\' with the context \'{1}\', ' 'Unable to run command \'{0}\' with the context \'{1}\', '
'reason: {2}'.format(cmd, kwargs, exc) 'reason: '.format(
cmd if _check_loglevel(output_loglevel) is not None
else 'REDACTED',
kwargs
) )
)
try:
if exc.filename is None:
msg += 'command not found'
else:
msg += '{0}: {1}'.format(exc, exc.filename)
except AttributeError:
# Both IOError and OSError have the filename attribute, so this
# is a precaution in case the exception classes in the previous
# try/except are changed.
msg += 'unknown'
raise CommandExecutionError(msg)
try: try:
proc.run() proc.run()

View File

@ -1972,6 +1972,8 @@ def acl_create(consul_url=None, **kwargs):
Create a new ACL token. Create a new ACL token.
:param consul_url: The Consul server URL. :param consul_url: The Consul server URL.
:param id: Unique identifier for the ACL to create
leave it blank to let consul server generate one
:param name: Meaningful indicator of the ACL's purpose. :param name: Meaningful indicator of the ACL's purpose.
:param type: Type is either client or management. A management :param type: Type is either client or management. A management
token is comparable to a root user and has the token is comparable to a root user and has the
@ -2002,6 +2004,9 @@ def acl_create(consul_url=None, **kwargs):
else: else:
raise SaltInvocationError('Required argument "name" is missing.') raise SaltInvocationError('Required argument "name" is missing.')
if 'id' in kwargs:
data['ID'] = kwargs['id']
if 'type' in kwargs: if 'type' in kwargs:
data['Type'] = kwargs['type'] data['Type'] = kwargs['type']
@ -2120,7 +2125,7 @@ def acl_delete(consul_url=None, **kwargs):
ret['res'] = False ret['res'] = False
return ret return ret
function = 'acl/delete/{0}'.format(kwargs['id']) function = 'acl/destroy/{0}'.format(kwargs['id'])
res = _query(consul_url=consul_url, res = _query(consul_url=consul_url,
data=data, data=data,
method='PUT', method='PUT',

View File

@ -48,7 +48,7 @@ def _gather_pillar(pillarenv, pillar_override):
__grains__, __grains__,
__opts__['id'], __opts__['id'],
__opts__['environment'], __opts__['environment'],
pillar=pillar_override, pillar_override=pillar_override,
pillarenv=pillarenv pillarenv=pillarenv
) )
ret = pillar.compile_pillar() ret = pillar.compile_pillar()
@ -57,7 +57,36 @@ def _gather_pillar(pillarenv, pillar_override):
return ret return ret
def recv(dest, chunk, append=False, compressed=True, mode=None): def recv(files, dest):
'''
Used with salt-cp, pass the files dict, and the destination.
This function receives small fast copy files from the master via salt-cp.
It does not work via the CLI.
'''
ret = {}
for path, data in six.iteritems(files):
if os.path.basename(path) == os.path.basename(dest) \
and not os.path.isdir(dest):
final = dest
elif os.path.isdir(dest):
final = os.path.join(dest, os.path.basename(path))
elif os.path.isdir(os.path.dirname(dest)):
final = dest
else:
return 'Destination unavailable'
try:
with salt.utils.fopen(final, 'w+') as fp_:
fp_.write(data)
ret[final] = True
except IOError:
ret[final] = False
return ret
def recv_chunked(dest, chunk, append=False, compressed=True, mode=None):
''' '''
This function receives files copied to the minion using ``salt-cp`` and is This function receives files copied to the minion using ``salt-cp`` and is
not intended to be used directly on the CLI. not intended to be used directly on the CLI.

View File

@ -60,17 +60,22 @@ def __virtual__():
return (False, 'The debbuild module could not be loaded: unsupported OS family') return (False, 'The debbuild module could not be loaded: unsupported OS family')
def _check_repo_sign_utils_support(): def _check_repo_sign_utils_support(name):
util_name = 'debsign' '''
if salt.utils.which(util_name): Check for specified command name in search path
'''
if salt.utils.which(name):
return True return True
else: else:
raise CommandExecutionError( raise CommandExecutionError(
'utility \'{0}\' needs to be installed'.format(util_name) 'utility \'{0}\' needs to be installed or made available in search path'.format(name)
) )
def _check_repo_gpg_phrase_utils_support(): def _check_repo_gpg_phrase_utils_support():
'''
Check for /usr/lib/gnupg2/gpg-preset-passphrase is installed
'''
util_name = '/usr/lib/gnupg2/gpg-preset-passphrase' util_name = '/usr/lib/gnupg2/gpg-preset-passphrase'
if __salt__['file.file_exists'](util_name): if __salt__['file.file_exists'](util_name):
return True return True
@ -170,8 +175,8 @@ def _get_repo_dists_env(env):
'ORIGIN': ('O', 'Origin', 'SaltStack'), 'ORIGIN': ('O', 'Origin', 'SaltStack'),
'LABEL': ('O', 'Label', 'salt_debian'), 'LABEL': ('O', 'Label', 'salt_debian'),
'SUITE': ('O', 'Suite', 'stable'), 'SUITE': ('O', 'Suite', 'stable'),
'VERSION': ('O', 'Version', '8.1'), 'VERSION': ('O', 'Version', '9.0'),
'CODENAME': ('M', 'Codename', 'jessie'), 'CODENAME': ('M', 'Codename', 'stretch'),
'ARCHS': ('M', 'Architectures', 'i386 amd64 source'), 'ARCHS': ('M', 'Architectures', 'i386 amd64 source'),
'COMPONENTS': ('M', 'Components', 'main'), 'COMPONENTS': ('M', 'Components', 'main'),
'DESCRIPTION': ('O', 'Description', 'SaltStack debian package repo'), 'DESCRIPTION': ('O', 'Description', 'SaltStack debian package repo'),
@ -205,7 +210,7 @@ def _get_repo_dists_env(env):
else: else:
env_dists += '{0}: {1}\n'.format(key, value) env_dists += '{0}: {1}\n'.format(key, value)
## ensure mandatories are included # ensure mandatories are included
env_keys = list(env.keys()) env_keys = list(env.keys())
for key in env_keys: for key in env_keys:
if key in dflts_keys and dflts_dict[key][0] == 'M' and key not in env_man_seen: if key in dflts_keys and dflts_dict[key][0] == 'M' and key not in env_man_seen:
@ -578,7 +583,8 @@ def make_repo(repodir,
with salt.utils.fopen(repoconfopts, 'w') as fow: with salt.utils.fopen(repoconfopts, 'w') as fow:
fow.write('{0}'.format(repocfg_opts)) fow.write('{0}'.format(repocfg_opts))
local_fingerprint = None local_keygrip_to_use = None
local_key_fingerprint = None
local_keyid = None local_keyid = None
phrase = '' phrase = ''
@ -587,17 +593,14 @@ def make_repo(repodir,
gpg_tty_info_file = '{0}/gpg-tty-info-salt'.format(gnupghome) gpg_tty_info_file = '{0}/gpg-tty-info-salt'.format(gnupghome)
gpg_tty_info_dict = {} gpg_tty_info_dict = {}
# test if using older than gnupg 2.1, env file exists # if using older than gnupg 2.1, then env file exists
older_gnupg = __salt__['file.file_exists'](gpg_info_file) older_gnupg = __salt__['file.file_exists'](gpg_info_file)
# interval of 0.125 is really too fast on some systems
interval = 0.5
if keyid is not None: if keyid is not None:
with salt.utils.fopen(repoconfdist, 'a') as fow: with salt.utils.fopen(repoconfdist, 'a') as fow:
fow.write('SignWith: {0}\n'.format(keyid)) fow.write('SignWith: {0}\n'.format(keyid))
## import_keys # import_keys
pkg_pub_key_file = '{0}/{1}'.format(gnupghome, __salt__['pillar.get']('gpg_pkg_pub_keyname', None)) pkg_pub_key_file = '{0}/{1}'.format(gnupghome, __salt__['pillar.get']('gpg_pkg_pub_keyname', None))
pkg_priv_key_file = '{0}/{1}'.format(gnupghome, __salt__['pillar.get']('gpg_pkg_priv_keyname', None)) pkg_priv_key_file = '{0}/{1}'.format(gnupghome, __salt__['pillar.get']('gpg_pkg_priv_keyname', None))
@ -621,21 +624,37 @@ def make_repo(repodir,
local_keys = __salt__['gpg.list_keys'](user=runas, gnupghome=gnupghome) local_keys = __salt__['gpg.list_keys'](user=runas, gnupghome=gnupghome)
for gpg_key in local_keys: for gpg_key in local_keys:
if keyid == gpg_key['keyid'][8:]: if keyid == gpg_key['keyid'][8:]:
local_fingerprint = gpg_key['fingerprint'] local_keygrip_to_use = gpg_key['fingerprint']
local_key_fingerprint = gpg_key['fingerprint']
local_keyid = gpg_key['keyid'] local_keyid = gpg_key['keyid']
break break
if not older_gnupg:
_check_repo_sign_utils_support('gpg2')
cmd = '{0} --with-keygrip --list-secret-keys'.format(salt.utils.which('gpg2'))
local_keys2_keygrip = __salt__['cmd.run'](cmd, runas=runas)
local_keys2 = iter(local_keys2_keygrip.splitlines())
try:
for line in local_keys2:
if line.startswith('sec'):
line_fingerprint = next(local_keys2).lstrip().rstrip()
if local_key_fingerprint == line_fingerprint:
lkeygrip = next(local_keys2).split('=')
local_keygrip_to_use = lkeygrip[1].lstrip().rstrip()
break
except StopIteration:
raise SaltInvocationError(
'unable to find keygrip associated with fingerprint \'{0}\' for keyid \'{1}\''
.format(local_key_fingerprint, local_keyid)
)
if local_keyid is None: if local_keyid is None:
raise SaltInvocationError( raise SaltInvocationError(
'The key ID \'{0}\' was not found in GnuPG keyring at \'{1}\'' 'The key ID \'{0}\' was not found in GnuPG keyring at \'{1}\''
.format(keyid, gnupghome) .format(keyid, gnupghome)
) )
_check_repo_sign_utils_support() _check_repo_sign_utils_support('debsign')
if use_passphrase:
_check_repo_gpg_phrase_utils_support()
phrase = __salt__['pillar.get']('gpg_passphrase')
if older_gnupg: if older_gnupg:
with salt.utils.fopen(gpg_info_file, 'r') as fow: with salt.utils.fopen(gpg_info_file, 'r') as fow:
@ -656,10 +675,30 @@ def make_repo(repodir,
__salt__['environ.setenv'](gpg_tty_info_dict) __salt__['environ.setenv'](gpg_tty_info_dict)
break break
## sign_it_here if use_passphrase:
for file in os.listdir(repodir): _check_repo_gpg_phrase_utils_support()
if file.endswith('.dsc'): phrase = __salt__['pillar.get']('gpg_passphrase')
abs_file = os.path.join(repodir, file) cmd = '/usr/lib/gnupg2/gpg-preset-passphrase --verbose --preset --passphrase "{0}" {1}'.format(phrase, local_keygrip_to_use)
__salt__['cmd.run'](cmd, runas=runas)
for debfile in os.listdir(repodir):
abs_file = os.path.join(repodir, debfile)
if debfile.endswith('.changes'):
os.remove(abs_file)
if debfile.endswith('.dsc'):
# sign_it_here
if older_gnupg:
if local_keyid is not None:
cmd = 'debsign --re-sign -k {0} {1}'.format(keyid, abs_file)
__salt__['cmd.run'](cmd, cwd=repodir, use_vt=True)
cmd = 'reprepro --ignore=wrongdistribution --component=main -Vb . includedsc {0} {1}'.format(codename, abs_file)
__salt__['cmd.run'](cmd, cwd=repodir, use_vt=True)
else:
# interval of 0.125 is really too fast on some systems
interval = 0.5
if local_keyid is not None:
number_retries = timeout / interval number_retries = timeout / interval
times_looped = 0 times_looped = 0
error_msg = 'Failed to debsign file {0}'.format(abs_file) error_msg = 'Failed to debsign file {0}'.format(abs_file)
@ -702,27 +741,6 @@ def make_repo(repodir,
finally: finally:
proc.close(terminate=True, kill=True) proc.close(terminate=True, kill=True)
if use_passphrase:
cmd = '/usr/lib/gnupg2/gpg-preset-passphrase --verbose --forget {0}'.format(local_fingerprint)
__salt__['cmd.run'](cmd, runas=runas)
cmd = '/usr/lib/gnupg2/gpg-preset-passphrase --verbose --preset --passphrase "{0}" {1}'.format(phrase, local_fingerprint)
__salt__['cmd.run'](cmd, runas=runas)
for debfile in os.listdir(repodir):
abs_file = os.path.join(repodir, debfile)
if debfile.endswith('.changes'):
os.remove(abs_file)
if debfile.endswith('.dsc'):
if older_gnupg:
if local_keyid is not None:
cmd = 'debsign --re-sign -k {0} {1}'.format(keyid, abs_file)
__salt__['cmd.run'](cmd, cwd=repodir, use_vt=True)
cmd = 'reprepro --ignore=wrongdistribution --component=main -Vb . includedsc {0} {1}'.format(codename, abs_file)
__salt__['cmd.run'](cmd, cwd=repodir, use_vt=True)
else:
number_retries = timeout / interval number_retries = timeout / interval
times_looped = 0 times_looped = 0
error_msg = 'Failed to reprepro includedsc file {0}'.format(abs_file) error_msg = 'Failed to reprepro includedsc file {0}'.format(abs_file)
@ -747,8 +765,7 @@ def make_repo(repodir,
if times_looped > number_retries: if times_looped > number_retries:
raise SaltInvocationError( raise SaltInvocationError(
'Attemping to reprepro includedsc for file {0} failed, timed out after {1} loops' 'Attemping to reprepro includedsc for file {0} failed, timed out after {1} loops'.format(abs_file, times_looped)
.format(abs_file, int(times_looped * interval))
) )
time.sleep(interval) time.sleep(interval)
@ -770,8 +787,4 @@ def make_repo(repodir,
cmd = 'reprepro --ignore=wrongdistribution --component=main -Vb . includedeb {0} {1}'.format(codename, abs_file) cmd = 'reprepro --ignore=wrongdistribution --component=main -Vb . includedeb {0} {1}'.format(codename, abs_file)
res = __salt__['cmd.run_all'](cmd, cwd=repodir, use_vt=True) res = __salt__['cmd.run_all'](cmd, cwd=repodir, use_vt=True)
if use_passphrase and local_keyid is not None:
cmd = '/usr/lib/gnupg2/gpg-preset-passphrase --forget {0}'.format(local_fingerprint)
res = __salt__['cmd.run_all'](cmd, runas=runas)
return res return res

View File

@ -22,6 +22,10 @@ import salt.utils.decorators as decorators
from salt.utils.decorators import depends from salt.utils.decorators import depends
from salt.exceptions import CommandExecutionError from salt.exceptions import CommandExecutionError
__func_alias__ = {
'format_': 'format'
}
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
HAS_HDPARM = salt.utils.which('hdparm') is not None HAS_HDPARM = salt.utils.which('hdparm') is not None

View File

@ -1,6 +1,10 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
''' '''
Compendium of generic DNS utilities Compendium of generic DNS utilities.
.. note::
Some functions in the ``dnsutil`` execution module depend on ``dig``.
''' '''
from __future__ import absolute_import from __future__ import absolute_import
@ -232,7 +236,7 @@ def check_ip(ip_addr):
.. code-block:: bash .. code-block:: bash
salt ns1 dig.check_ip 127.0.0.1 salt ns1 dnsutil.check_ip 127.0.0.1
''' '''
if _has_dig(): if _has_dig():
return __salt__['dig.check_ip'](ip_addr) return __salt__['dig.check_ip'](ip_addr)
@ -242,7 +246,7 @@ def check_ip(ip_addr):
def A(host, nameserver=None): def A(host, nameserver=None):
''' '''
Return the A record(s) for `host`. Return the A record(s) for ``host``.
Always returns a list. Always returns a list.
@ -267,7 +271,7 @@ def A(host, nameserver=None):
def AAAA(host, nameserver=None): def AAAA(host, nameserver=None):
''' '''
Return the AAAA record(s) for `host`. Return the AAAA record(s) for ``host``.
Always returns a list. Always returns a list.
@ -302,7 +306,7 @@ def NS(domain, resolve=True, nameserver=None):
.. code-block:: bash .. code-block:: bash
salt ns1 dig.NS google.com salt ns1 dnsutil.NS google.com
''' '''
if _has_dig(): if _has_dig():
@ -323,7 +327,7 @@ def SPF(domain, record='SPF', nameserver=None):
.. code-block:: bash .. code-block:: bash
salt ns1 dig.SPF google.com salt ns1 dnsutil.SPF google.com
''' '''
if _has_dig(): if _has_dig():
return __salt__['dig.SPF'](domain, record, nameserver) return __salt__['dig.SPF'](domain, record, nameserver)
@ -346,7 +350,7 @@ def MX(domain, resolve=False, nameserver=None):
.. code-block:: bash .. code-block:: bash
salt ns1 dig.MX google.com salt ns1 dnsutil.MX google.com
''' '''
if _has_dig(): if _has_dig():
return __salt__['dig.MX'](domain, resolve, nameserver) return __salt__['dig.MX'](domain, resolve, nameserver)

View File

@ -1899,6 +1899,10 @@ def login(*registries):
cmd = ['docker', 'login', '-u', username, '-p', password] cmd = ['docker', 'login', '-u', username, '-p', password]
if registry.lower() != 'hub': if registry.lower() != 'hub':
cmd.append(registry) cmd.append(registry)
log.debug(
'Attempting to login to docker registry \'%s\' as user \'%s\'',
registry, username
)
login_cmd = __salt__['cmd.run_all']( login_cmd = __salt__['cmd.run_all'](
cmd, cmd,
python_shell=False, python_shell=False,
@ -4399,7 +4403,7 @@ def save(name,
ret['Size_Human'] = _size_fmt(ret['Size']) ret['Size_Human'] = _size_fmt(ret['Size'])
# Process push # Process push
if kwargs.get(push, False): if kwargs.get('push', False):
ret['Push'] = __salt__['cp.push'](path) ret['Push'] = __salt__['cp.push'](path)
return ret return ret
@ -5617,7 +5621,7 @@ def _gather_pillar(pillarenv, pillar_override, **grains):
# Not sure if these two are correct # Not sure if these two are correct
__opts__['id'], __opts__['id'],
__opts__['environment'], __opts__['environment'],
pillar=pillar_override, pillar_override=pillar_override,
pillarenv=pillarenv pillarenv=pillarenv
) )
ret = pillar.compile_pillar() ret = pillar.compile_pillar()

View File

@ -710,18 +710,21 @@ def check_hash(path, file_hash):
hash hash
The hash to check against the file specified in the ``path`` argument. The hash to check against the file specified in the ``path`` argument.
For versions 2016.11.4 and newer, the hash can be specified without an
.. versionchanged:: 2016.11.4
For this and newer versions the hash can be specified without an
accompanying hash type (e.g. ``e138491e9d5b97023cea823fe17bac22``), accompanying hash type (e.g. ``e138491e9d5b97023cea823fe17bac22``),
but for earlier releases it is necessary to also specify the hash type but for earlier releases it is necessary to also specify the hash type
in the format ``<hash_type>:<hash_value>`` (e.g. in the format ``<hash_type>=<hash_value>`` (e.g.
``md5:e138491e9d5b97023cea823fe17bac22``). ``md5=e138491e9d5b97023cea823fe17bac22``).
CLI Example: CLI Example:
.. code-block:: bash .. code-block:: bash
salt '*' file.check_hash /etc/fstab e138491e9d5b97023cea823fe17bac22 salt '*' file.check_hash /etc/fstab e138491e9d5b97023cea823fe17bac22
salt '*' file.check_hash /etc/fstab md5:e138491e9d5b97023cea823fe17bac22 salt '*' file.check_hash /etc/fstab md5=e138491e9d5b97023cea823fe17bac22
''' '''
path = os.path.expanduser(path) path = os.path.expanduser(path)
@ -1884,6 +1887,7 @@ def replace(path,
show_changes=True, show_changes=True,
ignore_if_missing=False, ignore_if_missing=False,
preserve_inode=True, preserve_inode=True,
backslash_literal=False,
): ):
''' '''
.. versionadded:: 0.17.0 .. versionadded:: 0.17.0
@ -1984,6 +1988,14 @@ def replace(path,
filename. Hard links will then share an inode with the backup, instead filename. Hard links will then share an inode with the backup, instead
(if using ``backup`` to create a backup copy). (if using ``backup`` to create a backup copy).
backslash_literal : False
.. versionadded:: 2016.11.7
Interpret backslashes as literal backslashes for the repl and not
escape characters. This will help when using append/prepend so that
the backslashes are not interpreted for the repl on the second run of
the state.
If an equal sign (``=``) appears in an argument to a Salt command it is If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the processing can be bypassed in order to pass an equal sign through to the
@ -2080,7 +2092,10 @@ def replace(path,
if re.search(cpattern, r_data): if re.search(cpattern, r_data):
return True # `with` block handles file closure return True # `with` block handles file closure
else: else:
result, nrepl = re.subn(cpattern, repl, r_data, count) result, nrepl = re.subn(cpattern,
repl.replace('\\', '\\\\') if backslash_literal else repl,
r_data,
count)
# found anything? (even if no change) # found anything? (even if no change)
if nrepl > 0: if nrepl > 0:
@ -2138,8 +2153,10 @@ def replace(path,
r_data = mmap.mmap(r_file.fileno(), r_data = mmap.mmap(r_file.fileno(),
0, 0,
access=mmap.ACCESS_READ) access=mmap.ACCESS_READ)
result, nrepl = re.subn(cpattern, repl, result, nrepl = re.subn(cpattern,
r_data, count) repl.replace('\\', '\\\\') if backslash_literal else repl,
r_data,
count)
try: try:
w_file.write(salt.utils.to_str(result)) w_file.write(salt.utils.to_str(result))
except (OSError, IOError) as exc: except (OSError, IOError) as exc:

View File

@ -24,6 +24,8 @@ import salt.utils.kickstart
import salt.syspaths import salt.syspaths
from salt.exceptions import SaltInvocationError from salt.exceptions import SaltInvocationError
# Import 3rd-party libs
from salt.ext import six
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@ -325,6 +327,8 @@ def _bootstrap_yum(
''' '''
if pkgs is None: if pkgs is None:
pkgs = [] pkgs = []
elif isinstance(pkgs, six.string_types):
pkgs = pkgs.split(',')
default_pkgs = ('yum', 'centos-release', 'iputils') default_pkgs = ('yum', 'centos-release', 'iputils')
for pkg in default_pkgs: for pkg in default_pkgs:
@ -333,6 +337,8 @@ def _bootstrap_yum(
if exclude_pkgs is None: if exclude_pkgs is None:
exclude_pkgs = [] exclude_pkgs = []
elif isinstance(exclude_pkgs, six.string_types):
exclude_pkgs = exclude_pkgs.split(',')
for pkg in exclude_pkgs: for pkg in exclude_pkgs:
pkgs.remove(pkg) pkgs.remove(pkg)
@ -393,15 +399,27 @@ def _bootstrap_deb(
if repo_url is None: if repo_url is None:
repo_url = 'http://ftp.debian.org/debian/' repo_url = 'http://ftp.debian.org/debian/'
if not salt.utils.which('debootstrap'):
log.error('Required tool debootstrap is not installed.')
return False
if isinstance(pkgs, (list, tuple)):
pkgs = ','.join(pkgs)
if isinstance(exclude_pkgs, (list, tuple)):
exclude_pkgs = ','.join(exclude_pkgs)
deb_args = [ deb_args = [
'debootstrap', 'debootstrap',
'--foreign', '--foreign',
'--arch', '--arch',
_cmd_quote(arch), _cmd_quote(arch)]
'--include',
] + pkgs + [ if pkgs:
'--exclude', deb_args += ['--include', _cmd_quote(pkgs)]
] + exclude_pkgs + [ if exclude_pkgs:
deb_args += ['--exclude', _cmd_quote(exclude_pkgs)]
deb_args += [
_cmd_quote(flavor), _cmd_quote(flavor),
_cmd_quote(root), _cmd_quote(root),
_cmd_quote(repo_url), _cmd_quote(repo_url),
@ -469,6 +487,8 @@ def _bootstrap_pacman(
if pkgs is None: if pkgs is None:
pkgs = [] pkgs = []
elif isinstance(pkgs, six.string_types):
pkgs = pkgs.split(',')
default_pkgs = ('pacman', 'linux', 'systemd-sysvcompat', 'grub') default_pkgs = ('pacman', 'linux', 'systemd-sysvcompat', 'grub')
for pkg in default_pkgs: for pkg in default_pkgs:
@ -477,6 +497,8 @@ def _bootstrap_pacman(
if exclude_pkgs is None: if exclude_pkgs is None:
exclude_pkgs = [] exclude_pkgs = []
elif isinstance(exclude_pkgs, six.string_types):
exclude_pkgs = exclude_pkgs.split(',')
for pkg in exclude_pkgs: for pkg in exclude_pkgs:
pkgs.remove(pkg) pkgs.remove(pkg)

View File

@ -170,13 +170,24 @@ def _git_run(command, cwd=None, user=None, password=None, identity=None,
identity = [identity] identity = [identity]
# try each of the identities, independently # try each of the identities, independently
tmp_identity_file = None
for id_file in identity: for id_file in identity:
if 'salt://' in id_file: if 'salt://' in id_file:
with salt.utils.files.set_umask(0o077):
tmp_identity_file = salt.utils.mkstemp()
_id_file = id_file _id_file = id_file
id_file = __salt__['cp.cache_file'](id_file, saltenv) id_file = __salt__['cp.get_file'](id_file,
tmp_identity_file,
saltenv)
if not id_file: if not id_file:
log.error('identity {0} does not exist.'.format(_id_file)) log.error('identity {0} does not exist.'.format(_id_file))
__salt__['file.remove'](tmp_identity_file)
continue continue
else:
if user:
os.chown(id_file,
__salt__['file.user_to_uid'](user),
-1)
else: else:
if not __salt__['file.file_exists'](id_file): if not __salt__['file.file_exists'](id_file):
missing_keys.append(id_file) missing_keys.append(id_file)
@ -247,6 +258,11 @@ def _git_run(command, cwd=None, user=None, password=None, identity=None,
if not salt.utils.is_windows() and 'GIT_SSH' in env: if not salt.utils.is_windows() and 'GIT_SSH' in env:
os.remove(env['GIT_SSH']) os.remove(env['GIT_SSH'])
# Cleanup the temporary identify file
if tmp_identity_file and os.path.exists(tmp_identity_file):
log.debug('Removing identify file {0}'.format(tmp_identity_file))
__salt__['file.remove'](tmp_identity_file)
# If the command was successful, no need to try additional IDs # If the command was successful, no need to try additional IDs
if result['retcode'] == 0: if result['retcode'] == 0:
return result return result

View File

@ -35,7 +35,7 @@ import salt.utils
# Import 3rd-party libs # Import 3rd-party libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin # pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.exceptions import SaltInvocationError from salt.exceptions import CommandExecutionError, SaltInvocationError
# pylint: enable=import-error,no-name-in-module # pylint: enable=import-error,no-name-in-module
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@ -87,6 +87,19 @@ def _connect():
password=jenkins_password) password=jenkins_password)
def _retrieve_config_xml(config_xml, saltenv):
'''
Helper to cache the config XML and raise a CommandExecutionError if we fail
to do so. If we successfully cache the file, return the cached path.
'''
ret = __salt__['cp.cache_file'](config_xml, saltenv)
if not ret:
raise CommandExecutionError('Failed to retrieve {0}'.format(config_xml))
return ret
def get_version(): def get_version():
''' '''
Return version of Jenkins Return version of Jenkins
@ -144,7 +157,7 @@ def job_exists(name=None):
''' '''
if not name: if not name:
raise SaltInvocationError('Required parameter `name` is missing.') raise SaltInvocationError('Required parameter \'name\' is missing')
server = _connect() server = _connect()
if server.job_exists(name): if server.job_exists(name):
@ -168,12 +181,12 @@ def get_job_info(name=None):
''' '''
if not name: if not name:
raise SaltInvocationError('Required parameter `name` is missing.') raise SaltInvocationError('Required parameter \'name\' is missing')
server = _connect() server = _connect()
if not job_exists(name): if not job_exists(name):
raise SaltInvocationError('Job `{0}` does not exist.'.format(name)) raise CommandExecutionError('Job \'{0}\' does not exist'.format(name))
job_info = server.get_job_info(name) job_info = server.get_job_info(name)
if job_info: if job_info:
@ -197,17 +210,19 @@ def build_job(name=None, parameters=None):
''' '''
if not name: if not name:
raise SaltInvocationError('Required parameter `name` is missing.') raise SaltInvocationError('Required parameter \'name\' is missing')
server = _connect() server = _connect()
if not job_exists(name): if not job_exists(name):
raise SaltInvocationError('Job `{0}` does not exist.'.format(name)) raise CommandExecutionError('Job \'{0}\' does not exist.'.format(name))
try: try:
server.build_job(name, parameters) server.build_job(name, parameters)
except jenkins.JenkinsException as err: except jenkins.JenkinsException as err:
raise SaltInvocationError('Something went wrong {0}.'.format(err)) raise CommandExecutionError(
'Encountered error building job \'{0}\': {1}'.format(name, err)
)
return True return True
@ -232,15 +247,15 @@ def create_job(name=None,
''' '''
if not name: if not name:
raise SaltInvocationError('Required parameter `name` is missing.') raise SaltInvocationError('Required parameter \'name\' is missing')
if job_exists(name): if job_exists(name):
raise SaltInvocationError('Job `{0}` already exists.'.format(name)) raise CommandExecutionError('Job \'{0}\' already exists'.format(name))
if not config_xml: if not config_xml:
config_xml = jenkins.EMPTY_CONFIG_XML config_xml = jenkins.EMPTY_CONFIG_XML
else: else:
config_xml_file = __salt__['cp.cache_file'](config_xml, saltenv) config_xml_file = _retrieve_config_xml(config_xml, saltenv)
with salt.utils.fopen(config_xml_file) as _fp: with salt.utils.fopen(config_xml_file) as _fp:
config_xml = _fp.read() config_xml = _fp.read()
@ -249,7 +264,9 @@ def create_job(name=None,
try: try:
server.create_job(name, config_xml) server.create_job(name, config_xml)
except jenkins.JenkinsException as err: except jenkins.JenkinsException as err:
raise SaltInvocationError('Something went wrong {0}.'.format(err)) raise CommandExecutionError(
'Encountered error creating job \'{0}\': {1}'.format(name, err)
)
return config_xml return config_xml
@ -274,12 +291,12 @@ def update_job(name=None,
''' '''
if not name: if not name:
raise SaltInvocationError('Required parameter `name` is missing.') raise SaltInvocationError('Required parameter \'name\' is missing')
if not config_xml: if not config_xml:
config_xml = jenkins.EMPTY_CONFIG_XML config_xml = jenkins.EMPTY_CONFIG_XML
else: else:
config_xml_file = __salt__['cp.cache_file'](config_xml, saltenv) config_xml_file = _retrieve_config_xml(config_xml, saltenv)
with salt.utils.fopen(config_xml_file) as _fp: with salt.utils.fopen(config_xml_file) as _fp:
config_xml = _fp.read() config_xml = _fp.read()
@ -288,7 +305,9 @@ def update_job(name=None,
try: try:
server.reconfig_job(name, config_xml) server.reconfig_job(name, config_xml)
except jenkins.JenkinsException as err: except jenkins.JenkinsException as err:
raise SaltInvocationError('Something went wrong {0}.'.format(err)) raise CommandExecutionError(
'Encountered error updating job \'{0}\': {1}'.format(name, err)
)
return config_xml return config_xml
@ -307,17 +326,19 @@ def delete_job(name=None):
''' '''
if not name: if not name:
raise SaltInvocationError('Required parameter `name` is missing.') raise SaltInvocationError('Required parameter \'name\' is missing')
server = _connect() server = _connect()
if not job_exists(name): if not job_exists(name):
raise SaltInvocationError('Job `{0}` does not exists.'.format(name)) raise CommandExecutionError('Job \'{0}\' does not exist'.format(name))
try: try:
server.delete_job(name) server.delete_job(name)
except jenkins.JenkinsException as err: except jenkins.JenkinsException as err:
raise SaltInvocationError('Something went wrong {0}.'.format(err)) raise CommandExecutionError(
'Encountered error deleting job \'{0}\': {1}'.format(name, err)
)
return True return True
@ -336,17 +357,19 @@ def enable_job(name=None):
''' '''
if not name: if not name:
raise SaltInvocationError('Required parameter `name` is missing.') raise SaltInvocationError('Required parameter \'name\' is missing')
server = _connect() server = _connect()
if not job_exists(name): if not job_exists(name):
raise SaltInvocationError('Job `{0}` does not exists.'.format(name)) raise CommandExecutionError('Job \'{0}\' does not exist'.format(name))
try: try:
server.enable_job(name) server.enable_job(name)
except jenkins.JenkinsException as err: except jenkins.JenkinsException as err:
raise SaltInvocationError('Something went wrong {0}.'.format(err)) raise CommandExecutionError(
'Encountered error enabling job \'{0}\': {1}'.format(name, err)
)
return True return True
@ -366,17 +389,19 @@ def disable_job(name=None):
''' '''
if not name: if not name:
raise SaltInvocationError('Required parameter `name` is missing.') raise SaltInvocationError('Required parameter \'name\' is missing')
server = _connect() server = _connect()
if not job_exists(name): if not job_exists(name):
raise SaltInvocationError('Job `{0}` does not exists.'.format(name)) raise CommandExecutionError('Job \'{0}\' does not exist'.format(name))
try: try:
server.disable_job(name) server.disable_job(name)
except jenkins.JenkinsException as err: except jenkins.JenkinsException as err:
raise SaltInvocationError('Something went wrong {0}.'.format(err)) raise CommandExecutionError(
'Encountered error disabling job \'{0}\': {1}'.format(name, err)
)
return True return True
@ -396,12 +421,12 @@ def job_status(name=None):
''' '''
if not name: if not name:
raise SaltInvocationError('Required parameter `name` is missing.') raise SaltInvocationError('Required parameter \'name\' is missing')
server = _connect() server = _connect()
if not job_exists(name): if not job_exists(name):
raise SaltInvocationError('Job `{0}` does not exists.'.format(name)) raise CommandExecutionError('Job \'{0}\' does not exist'.format(name))
return server.get_job_info('empty')['buildable'] return server.get_job_info('empty')['buildable']
@ -422,12 +447,12 @@ def get_job_config(name=None):
''' '''
if not name: if not name:
raise SaltInvocationError('Required parameter `name` is missing.') raise SaltInvocationError('Required parameter \'name\' is missing')
server = _connect() server = _connect()
if not job_exists(name): if not job_exists(name):
raise SaltInvocationError('Job `{0}` does not exists.'.format(name)) raise CommandExecutionError('Job \'{0}\' does not exist'.format(name))
job_info = server.get_job_config(name) job_info = server.get_job_config(name)
return job_info return job_info

View File

@ -262,7 +262,7 @@ def create_keytab(name, keytab, enctypes=None):
.. code-block:: bash .. code-block:: bash
salt 'kdc.example.com' host/host1.example.com host1.example.com.keytab salt 'kdc.example.com' kerberos.create_keytab host/host1.example.com host1.example.com.keytab
''' '''
ret = {} ret = {}

View File

@ -216,7 +216,7 @@ def align_check(device, part_type, partition):
'Invalid partition passed to partition.align_check' 'Invalid partition passed to partition.align_check'
) )
cmd = 'parted -m -s {0} align-check {1} {2}'.format( cmd = 'parted -m {0} align-check {1} {2}'.format(
device, part_type, partition device, part_type, partition
) )
out = __salt__['cmd.run'](cmd).splitlines() out = __salt__['cmd.run'](cmd).splitlines()

View File

@ -110,7 +110,7 @@ def get(key,
default = '' default = ''
opt_merge_lists = __opts__.get('pillar_merge_lists', False) if \ opt_merge_lists = __opts__.get('pillar_merge_lists', False) if \
merge_nested_lists is None else merge_nested_lists merge_nested_lists is None else merge_nested_lists
pillar_dict = __pillar__ if saltenv is None else items(saltenv=saltenv) pillar_dict = __pillar__ if saltenv is None else items(pillarenv=saltenv)
if merge: if merge:
if isinstance(default, dict): if isinstance(default, dict):
@ -203,7 +203,7 @@ def items(*args, **kwargs):
__opts__, __opts__,
__grains__, __grains__,
__opts__['id'], __opts__['id'],
pillar=kwargs.get('pillar'), pillar_override=kwargs.get('pillar'),
pillarenv=kwargs.get('pillarenv') or __opts__['pillarenv']) pillarenv=kwargs.get('pillarenv') or __opts__['pillarenv'])
return pillar.compile_pillar() return pillar.compile_pillar()
@ -411,7 +411,7 @@ def ext(external, pillar=None):
__opts__['id'], __opts__['id'],
__opts__['environment'], __opts__['environment'],
ext=external, ext=external,
pillar=pillar) pillar_override=pillar)
ret = pillar_obj.compile_pillar() ret = pillar_obj.compile_pillar()

View File

@ -345,9 +345,10 @@ def summary():
def plugin_sync(): def plugin_sync():
''' '''
Runs a plugin synch between the puppet master and agent Runs a plugin sync between the puppet master and agent
CLI Example: CLI Example:
.. code-block:: bash .. code-block:: bash
salt '*' puppet.plugin_sync salt '*' puppet.plugin_sync

View File

@ -72,6 +72,7 @@ def _safe_output(line):
''' '''
return not any([ return not any([
line.startswith('Listing') and line.endswith('...'), line.startswith('Listing') and line.endswith('...'),
line.startswith('Listing') and '\t' not in line,
'...done' in line, '...done' in line,
line.startswith('WARNING:') line.startswith('WARNING:')
]) ])
@ -471,11 +472,11 @@ def set_user_tags(name, tags, runas=None):
if runas is None: if runas is None:
runas = salt.utils.get_user() runas = salt.utils.get_user()
if tags and isinstance(tags, (list, tuple)): if not isinstance(tags, (list, tuple)):
tags = ' '.join(tags) tags = [tags]
res = __salt__['cmd.run']( res = __salt__['cmd.run'](
['rabbitmqctl', 'set_user_tags', name, tags], ['rabbitmqctl', 'set_user_tags', name] + list(tags),
runas=runas, runas=runas,
python_shell=False) python_shell=False)
msg = "Tag(s) set" msg = "Tag(s) set"

View File

@ -828,7 +828,7 @@ def _parse_network_settings(opts, current):
_raise_error_network('hostname', ['server1.example.com']) _raise_error_network('hostname', ['server1.example.com'])
if 'nozeroconf' in opts: if 'nozeroconf' in opts:
nozeroconf = salt.utils.dequote(opts['nozerconf']) nozeroconf = salt.utils.dequote(opts['nozeroconf'])
if nozeroconf in valid: if nozeroconf in valid:
if nozeroconf in _CONFIG_TRUE: if nozeroconf in _CONFIG_TRUE:
result['nozeroconf'] = true_val result['nozeroconf'] = true_val

View File

@ -76,7 +76,8 @@ def _get_top_file_envs():
return __context__['saltutil._top_file_envs'] return __context__['saltutil._top_file_envs']
except KeyError: except KeyError:
try: try:
st_ = salt.state.HighState(__opts__) st_ = salt.state.HighState(__opts__,
initial_pillar=__pillar__)
top = st_.get_top() top = st_.get_top()
if top: if top:
envs = list(st_.top_matches(top).keys()) or 'base' envs = list(st_.top_matches(top).keys()) or 'base'
@ -186,10 +187,14 @@ def sync_beacons(saltenv=None, refresh=True):
Sync beacons from ``salt://_beacons`` to the minion Sync beacons from ``salt://_beacons`` to the minion
saltenv : base saltenv
The fileserver environment from which to sync. To sync from more than The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list. one environment, pass a comma-separated list.
If not passed, then all environments configured in the :ref:`top files
<states-top>` will be checked for beacons to sync. If no top files are
found, then the ``base`` environment will be synced.
refresh : True refresh : True
If ``True``, refresh the available beacons on the minion. This refresh If ``True``, refresh the available beacons on the minion. This refresh
will be performed even if no new beacons are synced. Set to ``False`` will be performed even if no new beacons are synced. Set to ``False``
@ -215,10 +220,14 @@ def sync_sdb(saltenv=None):
Sync sdb modules from ``salt://_sdb`` to the minion Sync sdb modules from ``salt://_sdb`` to the minion
saltenv : base saltenv
The fileserver environment from which to sync. To sync from more than The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list. one environment, pass a comma-separated list.
If not passed, then all environments configured in the :ref:`top files
<states-top>` will be checked for sdb modules to sync. If no top files
are found, then the ``base`` environment will be synced.
refresh : False refresh : False
This argument has no affect and is included for consistency with the This argument has no affect and is included for consistency with the
other sync functions. other sync functions.
@ -241,10 +250,14 @@ def sync_modules(saltenv=None, refresh=True):
Sync execution modules from ``salt://_modules`` to the minion Sync execution modules from ``salt://_modules`` to the minion
saltenv : base saltenv
The fileserver environment from which to sync. To sync from more than The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list. one environment, pass a comma-separated list.
If not passed, then all environments configured in the :ref:`top files
<states-top>` will be checked for execution modules to sync. If no top
files are found, then the ``base`` environment will be synced.
refresh : True refresh : True
If ``True``, refresh the available execution modules on the minion. If ``True``, refresh the available execution modules on the minion.
This refresh will be performed even if no new execution modules are This refresh will be performed even if no new execution modules are
@ -287,10 +300,14 @@ def sync_states(saltenv=None, refresh=True):
Sync state modules from ``salt://_states`` to the minion Sync state modules from ``salt://_states`` to the minion
saltenv : base saltenv
The fileserver environment from which to sync. To sync from more than The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list. one environment, pass a comma-separated list.
If not passed, then all environments configured in the :ref:`top files
<states-top>` will be checked for state modules to sync. If no top
files are found, then the ``base`` environment will be synced.
refresh : True refresh : True
If ``True``, refresh the available states on the minion. This refresh If ``True``, refresh the available states on the minion. This refresh
will be performed even if no new state modules are synced. Set to will be performed even if no new state modules are synced. Set to
@ -349,10 +366,14 @@ def sync_grains(saltenv=None, refresh=True):
Sync grains modules from ``salt://_grains`` to the minion Sync grains modules from ``salt://_grains`` to the minion
saltenv : base saltenv
The fileserver environment from which to sync. To sync from more than The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list. one environment, pass a comma-separated list.
If not passed, then all environments configured in the :ref:`top files
<states-top>` will be checked for grains modules to sync. If no top
files are found, then the ``base`` environment will be synced.
refresh : True refresh : True
If ``True``, refresh the available execution modules and recompile If ``True``, refresh the available execution modules and recompile
pillar data for the minion. This refresh will be performed even if no pillar data for the minion. This refresh will be performed even if no
@ -380,10 +401,14 @@ def sync_renderers(saltenv=None, refresh=True):
Sync renderers from ``salt://_renderers`` to the minion Sync renderers from ``salt://_renderers`` to the minion
saltenv : base saltenv
The fileserver environment from which to sync. To sync from more than The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list. one environment, pass a comma-separated list.
If not passed, then all environments configured in the :ref:`top files
<states-top>` will be checked for renderers to sync. If no top files
are found, then the ``base`` environment will be synced.
refresh : True refresh : True
If ``True``, refresh the available execution modules on the minion. If ``True``, refresh the available execution modules on the minion.
This refresh will be performed even if no new renderers are synced. This refresh will be performed even if no new renderers are synced.
@ -410,10 +435,14 @@ def sync_returners(saltenv=None, refresh=True):
Sync beacons from ``salt://_returners`` to the minion Sync beacons from ``salt://_returners`` to the minion
saltenv : base saltenv
The fileserver environment from which to sync. To sync from more than The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list. one environment, pass a comma-separated list.
If not passed, then all environments configured in the :ref:`top files
<states-top>` will be checked for returners to sync. If no top files
are found, then the ``base`` environment will be synced.
refresh : True refresh : True
If ``True``, refresh the available execution modules on the minion. If ``True``, refresh the available execution modules on the minion.
This refresh will be performed even if no new returners are synced. Set This refresh will be performed even if no new returners are synced. Set
@ -438,10 +467,14 @@ def sync_proxymodules(saltenv=None, refresh=False):
Sync proxy modules from ``salt://_proxy`` to the minion Sync proxy modules from ``salt://_proxy`` to the minion
saltenv : base saltenv
The fileserver environment from which to sync. To sync from more than The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list. one environment, pass a comma-separated list.
If not passed, then all environments configured in the :ref:`top files
<states-top>` will be checked for proxy modules to sync. If no top
files are found, then the ``base`` environment will be synced.
refresh : True refresh : True
If ``True``, refresh the available execution modules on the minion. If ``True``, refresh the available execution modules on the minion.
This refresh will be performed even if no new proxy modules are synced. This refresh will be performed even if no new proxy modules are synced.
@ -467,10 +500,14 @@ def sync_engines(saltenv=None, refresh=False):
Sync engine modules from ``salt://_engines`` to the minion Sync engine modules from ``salt://_engines`` to the minion
saltenv : base saltenv
The fileserver environment from which to sync. To sync from more than The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list. one environment, pass a comma-separated list.
If not passed, then all environments configured in the :ref:`top files
<states-top>` will be checked for engines to sync. If no top files are
found, then the ``base`` environment will be synced.
refresh : True refresh : True
If ``True``, refresh the available execution modules on the minion. If ``True``, refresh the available execution modules on the minion.
This refresh will be performed even if no new engine modules are synced. This refresh will be performed even if no new engine modules are synced.
@ -493,10 +530,14 @@ def sync_output(saltenv=None, refresh=True):
''' '''
Sync outputters from ``salt://_output`` to the minion Sync outputters from ``salt://_output`` to the minion
saltenv : base saltenv
The fileserver environment from which to sync. To sync from more than The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list. one environment, pass a comma-separated list.
If not passed, then all environments configured in the :ref:`top files
<states-top>` will be checked for outputters to sync. If no top files
are found, then the ``base`` environment will be synced.
refresh : True refresh : True
If ``True``, refresh the available execution modules on the minion. If ``True``, refresh the available execution modules on the minion.
This refresh will be performed even if no new outputters are synced. This refresh will be performed even if no new outputters are synced.
@ -524,10 +565,14 @@ def sync_utils(saltenv=None, refresh=True):
Sync utility modules from ``salt://_utils`` to the minion Sync utility modules from ``salt://_utils`` to the minion
saltenv : base saltenv
The fileserver environment from which to sync. To sync from more than The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list. one environment, pass a comma-separated list.
If not passed, then all environments configured in the :ref:`top files
<states-top>` will be checked for utility modules to sync. If no top
files are found, then the ``base`` environment will be synced.
refresh : True refresh : True
If ``True``, refresh the available execution modules on the minion. If ``True``, refresh the available execution modules on the minion.
This refresh will be performed even if no new utility modules are This refresh will be performed even if no new utility modules are
@ -553,10 +598,14 @@ def sync_log_handlers(saltenv=None, refresh=True):
Sync log handlers from ``salt://_log_handlers`` to the minion Sync log handlers from ``salt://_log_handlers`` to the minion
saltenv : base saltenv
The fileserver environment from which to sync. To sync from more than The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list. one environment, pass a comma-separated list.
If not passed, then all environments configured in the :ref:`top files
<states-top>` will be checked for log handlers to sync. If no top files
are found, then the ``base`` environment will be synced.
refresh : True refresh : True
If ``True``, refresh the available execution modules on the minion. If ``True``, refresh the available execution modules on the minion.
This refresh will be performed even if no new log handlers are synced. This refresh will be performed even if no new log handlers are synced.

View File

@ -285,7 +285,7 @@ def install_semod(module_path):
salt '*' selinux.install_semod [salt://]path/to/module.pp salt '*' selinux.install_semod [salt://]path/to/module.pp
.. versionadded:: develop .. versionadded:: 2016.11.6
''' '''
if module_path.find('salt://') == 0: if module_path.find('salt://') == 0:
module_path = __salt__['cp.cache_file'](module_path) module_path = __salt__['cp.cache_file'](module_path)
@ -303,7 +303,7 @@ def remove_semod(module):
salt '*' selinux.remove_semod module_name salt '*' selinux.remove_semod module_name
.. versionadded:: develop .. versionadded:: 2016.11.6
''' '''
cmd = 'semodule -r {0}'.format(module) cmd = 'semodule -r {0}'.format(module)
return not __salt__['cmd.retcode'](cmd) return not __salt__['cmd.retcode'](cmd)

View File

@ -252,16 +252,38 @@ def _check_queue(queue, kwargs):
return conflict return conflict
def _get_opts(localconfig=None): def _get_opts(**kwargs):
''' '''
Return a copy of the opts for use, optionally load a local config on top Return a copy of the opts for use, optionally load a local config on top
''' '''
opts = copy.deepcopy(__opts__) opts = copy.deepcopy(__opts__)
if localconfig:
opts = salt.config.minion_config(localconfig, defaults=opts) if 'localconfig' in kwargs:
return salt.config.minion_config(kwargs['localconfig'], defaults=opts)
if 'saltenv' in kwargs:
saltenv = kwargs['saltenv']
if saltenv is not None and not isinstance(saltenv, six.string_types):
opts['environment'] = str(kwargs['saltenv'])
else:
opts['environment'] = kwargs['saltenv']
if 'pillarenv' in kwargs:
pillarenv = kwargs['pillarenv']
if pillarenv is not None and not isinstance(pillarenv, six.string_types):
opts['pillarenv'] = str(kwargs['pillarenv'])
else:
opts['pillarenv'] = kwargs['pillarenv']
return opts return opts
def _get_initial_pillar(opts):
return __pillar__ if __opts__['__cli'] == 'salt-call' \
and opts['pillarenv'] == __opts__['pillarenv'] \
else None
def low(data, queue=False, **kwargs): def low(data, queue=False, **kwargs):
''' '''
Execute a single low data call Execute a single low data call
@ -325,24 +347,32 @@ def high(data, test=None, queue=False, **kwargs):
conflict = _check_queue(queue, kwargs) conflict = _check_queue(queue, kwargs)
if conflict is not None: if conflict is not None:
return conflict return conflict
opts = _get_opts(kwargs.get('localconfig')) opts = _get_opts(**kwargs)
opts['test'] = _get_test_value(test, **kwargs) opts['test'] = _get_test_value(test, **kwargs)
pillar = kwargs.get('pillar') pillar_override = kwargs.get('pillar')
pillar_enc = kwargs.get('pillar_enc') pillar_enc = kwargs.get('pillar_enc')
if pillar_enc is None \ if pillar_enc is None \
and pillar is not None \ and pillar_override is not None \
and not isinstance(pillar, dict): and not isinstance(pillar_override, dict):
raise SaltInvocationError( raise SaltInvocationError(
'Pillar data must be formatted as a dictionary, unless pillar_enc ' 'Pillar data must be formatted as a dictionary, unless pillar_enc '
'is specified.' 'is specified.'
) )
try: try:
st_ = salt.state.State(__opts__, pillar, pillar_enc=pillar_enc, proxy=__proxy__, st_ = salt.state.State(opts,
context=__context__) pillar_override,
pillar_enc=pillar_enc,
proxy=__proxy__,
context=__context__,
initial_pillar=_get_initial_pillar(opts))
except NameError: except NameError:
st_ = salt.state.State(__opts__, pillar, pillar_enc=pillar_enc) st_ = salt.state.State(opts,
pillar_override,
pillar_enc=pillar_enc,
initial_pillar=_get_initial_pillar(opts))
ret = st_.call_high(data) ret = st_.call_high(data)
_set_retcode(ret) _set_retcode(ret)
@ -371,15 +401,15 @@ def template(tem, queue=False, **kwargs):
) )
kwargs.pop('env') kwargs.pop('env')
if 'saltenv' in kwargs:
saltenv = kwargs['saltenv']
else:
saltenv = ''
conflict = _check_queue(queue, kwargs) conflict = _check_queue(queue, kwargs)
if conflict is not None: if conflict is not None:
return conflict return conflict
st_ = salt.state.HighState(__opts__, context=__context__)
opts = _get_opts(**kwargs)
st_ = salt.state.HighState(opts,
context=__context__,
initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']): if not _check_pillar(kwargs, st_.opts['pillar']):
__context__['retcode'] = 5 __context__['retcode'] = 5
@ -388,7 +418,11 @@ def template(tem, queue=False, **kwargs):
if not tem.endswith('.sls'): if not tem.endswith('.sls'):
tem = '{sls}.sls'.format(sls=tem) tem = '{sls}.sls'.format(sls=tem)
high_state, errors = st_.render_state(tem, saltenv, '', None, local=True) high_state, errors = st_.render_state(tem,
kwargs.get('saltenv', ''),
'',
None,
local=True)
if errors: if errors:
__context__['retcode'] = 1 __context__['retcode'] = 1
return errors return errors
@ -410,10 +444,15 @@ def template_str(tem, queue=False, **kwargs):
conflict = _check_queue(queue, kwargs) conflict = _check_queue(queue, kwargs)
if conflict is not None: if conflict is not None:
return conflict return conflict
opts = _get_opts(**kwargs)
try: try:
st_ = salt.state.State(__opts__, proxy=__proxy__) st_ = salt.state.State(opts,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts))
except NameError: except NameError:
st_ = salt.state.State(__opts__) st_ = salt.state.State(opts, initial_pillar=_get_initial_pillar(opts))
ret = st_.call_template_str(tem) ret = st_.call_template_str(tem)
_set_retcode(ret) _set_retcode(ret)
return ret return ret
@ -688,9 +727,7 @@ def run_request(name='default', **kwargs):
return {} return {}
def highstate(test=None, def highstate(test=None, queue=False, **kwargs):
queue=False,
**kwargs):
''' '''
Retrieve the state data from the salt master for this minion and execute it Retrieve the state data from the salt master for this minion and execute it
@ -732,7 +769,7 @@ def highstate(test=None,
states to be run with their own custom minion configuration, including states to be run with their own custom minion configuration, including
different pillars, file_roots, etc. different pillars, file_roots, etc.
mock: mock
The mock option allows for the state run to execute without actually The mock option allows for the state run to execute without actually
calling any states. This then returns a mocked return which will show calling any states. This then returns a mocked return which will show
the requisite ordering as well as fully validate the state run. the requisite ordering as well as fully validate the state run.
@ -765,7 +802,7 @@ def highstate(test=None,
return conflict return conflict
orig_test = __opts__.get('test', None) orig_test = __opts__.get('test', None)
opts = _get_opts(kwargs.get('localconfig')) opts = _get_opts(**kwargs)
opts['test'] = _get_test_value(test, **kwargs) opts['test'] = _get_test_value(test, **kwargs)
@ -778,36 +815,32 @@ def highstate(test=None,
) )
kwargs.pop('env') kwargs.pop('env')
if 'saltenv' in kwargs: pillar_override = kwargs.get('pillar')
opts['environment'] = kwargs['saltenv']
pillar = kwargs.get('pillar')
pillar_enc = kwargs.get('pillar_enc') pillar_enc = kwargs.get('pillar_enc')
if pillar_enc is None \ if pillar_enc is None \
and pillar is not None \ and pillar_override is not None \
and not isinstance(pillar, dict): and not isinstance(pillar_override, dict):
raise SaltInvocationError( raise SaltInvocationError(
'Pillar data must be formatted as a dictionary, unless pillar_enc ' 'Pillar data must be formatted as a dictionary, unless pillar_enc '
'is specified.' 'is specified.'
) )
if 'pillarenv' in kwargs:
opts['pillarenv'] = kwargs['pillarenv']
try: try:
st_ = salt.state.HighState(opts, st_ = salt.state.HighState(opts,
pillar, pillar_override,
kwargs.get('__pub_jid'), kwargs.get('__pub_jid'),
pillar_enc=pillar_enc, pillar_enc=pillar_enc,
proxy=__proxy__, proxy=__proxy__,
context=__context__, context=__context__,
mocked=kwargs.get('mock', False)) mocked=kwargs.get('mock', False),
initial_pillar=_get_initial_pillar(opts))
except NameError: except NameError:
st_ = salt.state.HighState(opts, st_ = salt.state.HighState(opts,
pillar, pillar_override,
kwargs.get('__pub_jid'), kwargs.get('__pub_jid'),
pillar_enc=pillar_enc, pillar_enc=pillar_enc,
mocked=kwargs.get('mock', False)) mocked=kwargs.get('mock', False),
initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']): if not _check_pillar(kwargs, st_.opts['pillar']):
__context__['retcode'] = 5 __context__['retcode'] = 5
@ -844,13 +877,7 @@ def highstate(test=None,
return ret return ret
def sls(mods, def sls(mods, test=None, exclude=None, queue=False, **kwargs):
saltenv=None,
test=None,
exclude=None,
queue=False,
pillarenv=None,
**kwargs):
''' '''
Execute the states in one or more SLS files Execute the states in one or more SLS files
@ -894,7 +921,7 @@ def sls(mods,
multiple state runs can safely be run at the same time. Do *not* multiple state runs can safely be run at the same time. Do *not*
use this flag for performance optimization. use this flag for performance optimization.
saltenv : None saltenv
Specify a salt fileserver environment to be used when applying states Specify a salt fileserver environment to be used when applying states
.. versionchanged:: 0.17.0 .. versionchanged:: 0.17.0
@ -946,16 +973,6 @@ def sls(mods,
) )
kwargs.pop('env') kwargs.pop('env')
if saltenv is None:
if __opts__.get('environment', None):
saltenv = __opts__['environment']
else:
saltenv = 'base'
if not pillarenv:
if __opts__.get('pillarenv', None):
pillarenv = __opts__['pillarenv']
# Modification to __opts__ lost after this if-else # Modification to __opts__ lost after this if-else
if queue: if queue:
_wait(kwargs.get('__pub_jid')) _wait(kwargs.get('__pub_jid'))
@ -965,10 +982,6 @@ def sls(mods,
__context__['retcode'] = 1 __context__['retcode'] = 1
return conflict return conflict
# Ensure desired environment
__opts__['environment'] = saltenv
__opts__['pillarenv'] = pillarenv
if isinstance(mods, list): if isinstance(mods, list):
disabled = _disabled(mods) disabled = _disabled(mods)
else: else:
@ -976,20 +989,28 @@ def sls(mods,
if disabled: if disabled:
for state in disabled: for state in disabled:
log.debug('Salt state {0} run is disabled. To re-enable, run state.enable {0}'.format(state)) log.debug(
'Salt state %s is disabled. To re-enable, run '
'state.enable %s', state, state
)
__context__['retcode'] = 1 __context__['retcode'] = 1
return disabled return disabled
orig_test = __opts__.get('test', None) orig_test = __opts__.get('test', None)
opts = _get_opts(kwargs.get('localconfig')) opts = _get_opts(**kwargs)
opts['test'] = _get_test_value(test, **kwargs) opts['test'] = _get_test_value(test, **kwargs)
pillar = kwargs.get('pillar') # Since this is running a specific SLS file (or files), fall back to the
# 'base' saltenv if none is configured and none was passed.
if opts['environment'] is None:
opts['environment'] = 'base'
pillar_override = kwargs.get('pillar')
pillar_enc = kwargs.get('pillar_enc') pillar_enc = kwargs.get('pillar_enc')
if pillar_enc is None \ if pillar_enc is None \
and pillar is not None \ and pillar_override is not None \
and not isinstance(pillar, dict): and not isinstance(pillar_override, dict):
raise SaltInvocationError( raise SaltInvocationError(
'Pillar data must be formatted as a dictionary, unless pillar_enc ' 'Pillar data must be formatted as a dictionary, unless pillar_enc '
'is specified.' 'is specified.'
@ -1000,20 +1021,23 @@ def sls(mods,
__opts__['cachedir'], __opts__['cachedir'],
'{0}.cache.p'.format(kwargs.get('cache_name', 'highstate')) '{0}.cache.p'.format(kwargs.get('cache_name', 'highstate'))
) )
try: try:
st_ = salt.state.HighState(opts, st_ = salt.state.HighState(opts,
pillar, pillar_override,
kwargs.get('__pub_jid'), kwargs.get('__pub_jid'),
pillar_enc=pillar_enc, pillar_enc=pillar_enc,
proxy=__proxy__, proxy=__proxy__,
context=__context__, context=__context__,
mocked=kwargs.get('mock', False)) mocked=kwargs.get('mock', False),
initial_pillar=_get_initial_pillar(opts))
except NameError: except NameError:
st_ = salt.state.HighState(opts, st_ = salt.state.HighState(opts,
pillar, pillar_override,
kwargs.get('__pub_jid'), kwargs.get('__pub_jid'),
pillar_enc=pillar_enc, pillar_enc=pillar_enc,
mocked=kwargs.get('mock', False)) mocked=kwargs.get('mock', False),
initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']): if not _check_pillar(kwargs, st_.opts['pillar']):
__context__['retcode'] = 5 __context__['retcode'] = 5
@ -1036,7 +1060,7 @@ def sls(mods,
st_.push_active() st_.push_active()
ret = {} ret = {}
try: try:
high_, errors = st_.render_highstate({saltenv: mods}) high_, errors = st_.render_highstate({opts['environment']: mods})
if errors: if errors:
__context__['retcode'] = 1 __context__['retcode'] = 1
@ -1086,11 +1110,7 @@ def sls(mods,
return ret return ret
def top(topfn, def top(topfn, test=None, queue=False, **kwargs):
test=None,
queue=False,
saltenv=None,
**kwargs):
''' '''
Execute a specific top file instead of the default. This is useful to apply Execute a specific top file instead of the default. This is useful to apply
configurations from a different environment (for example, dev or prod), without configurations from a different environment (for example, dev or prod), without
@ -1108,20 +1128,24 @@ def top(topfn,
if conflict is not None: if conflict is not None:
return conflict return conflict
orig_test = __opts__.get('test', None) orig_test = __opts__.get('test', None)
opts = _get_opts(kwargs.get('localconfig')) opts = _get_opts(**kwargs)
opts['test'] = _get_test_value(test, **kwargs) opts['test'] = _get_test_value(test, **kwargs)
pillar = kwargs.get('pillar') pillar_override = kwargs.get('pillar')
pillar_enc = kwargs.get('pillar_enc') pillar_enc = kwargs.get('pillar_enc')
if pillar_enc is None \ if pillar_enc is None \
and pillar is not None \ and pillar_override is not None \
and not isinstance(pillar, dict): and not isinstance(pillar_override, dict):
raise SaltInvocationError( raise SaltInvocationError(
'Pillar data must be formatted as a dictionary, unless pillar_enc ' 'Pillar data must be formatted as a dictionary, unless pillar_enc '
'is specified.' 'is specified.'
) )
st_ = salt.state.HighState(opts, pillar, pillar_enc=pillar_enc, context=__context__) st_ = salt.state.HighState(opts,
pillar_override,
pillar_enc=pillar_enc,
context=__context__,
initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']): if not _check_pillar(kwargs, st_.opts['pillar']):
__context__['retcode'] = 5 __context__['retcode'] = 5
err = ['Pillar failed to render with the following messages:'] err = ['Pillar failed to render with the following messages:']
@ -1132,8 +1156,8 @@ def top(topfn,
st_.opts['state_top'] = salt.utils.url.create(topfn) st_.opts['state_top'] = salt.utils.url.create(topfn)
ret = {} ret = {}
orchestration_jid = kwargs.get('orchestration_jid') orchestration_jid = kwargs.get('orchestration_jid')
if saltenv: if 'saltenv' in kwargs:
st_.opts['state_top_saltenv'] = saltenv st_.opts['state_top_saltenv'] = kwargs['saltenv']
try: try:
snapper_pre = _snapper_pre(opts, kwargs.get('__pub_jid', 'called localy')) snapper_pre = _snapper_pre(opts, kwargs.get('__pub_jid', 'called localy'))
ret = st_.call_highstate( ret = st_.call_highstate(
@ -1167,17 +1191,22 @@ def show_highstate(queue=False, **kwargs):
conflict = _check_queue(queue, kwargs) conflict = _check_queue(queue, kwargs)
if conflict is not None: if conflict is not None:
return conflict return conflict
pillar = kwargs.get('pillar') pillar_override = kwargs.get('pillar')
pillar_enc = kwargs.get('pillar_enc') pillar_enc = kwargs.get('pillar_enc')
if pillar_enc is None \ if pillar_enc is None \
and pillar is not None \ and pillar_override is not None \
and not isinstance(pillar, dict): and not isinstance(pillar_override, dict):
raise SaltInvocationError( raise SaltInvocationError(
'Pillar data must be formatted as a dictionary, unless pillar_enc ' 'Pillar data must be formatted as a dictionary, unless pillar_enc '
'is specified.' 'is specified.'
) )
st_ = salt.state.HighState(__opts__, pillar, pillar_enc=pillar_enc) opts = _get_opts(**kwargs)
st_ = salt.state.HighState(opts,
pillar_override,
pillar_enc=pillar_enc,
initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']): if not _check_pillar(kwargs, st_.opts['pillar']):
__context__['retcode'] = 5 __context__['retcode'] = 5
@ -1207,7 +1236,11 @@ def show_lowstate(queue=False, **kwargs):
if conflict is not None: if conflict is not None:
assert False assert False
return conflict return conflict
st_ = salt.state.HighState(__opts__)
opts = _get_opts(**kwargs)
st_ = salt.state.HighState(opts,
initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']): if not _check_pillar(kwargs, st_.opts['pillar']):
__context__['retcode'] = 5 __context__['retcode'] = 5
@ -1222,13 +1255,7 @@ def show_lowstate(queue=False, **kwargs):
return ret return ret
def sls_id( def sls_id(id_, mods, test=None, queue=False, **kwargs):
id_,
mods,
saltenv='base',
test=None,
queue=False,
**kwargs):
''' '''
Call a single ID from the named module(s) and handle all requisites Call a single ID from the named module(s) and handle all requisites
@ -1246,14 +1273,21 @@ def sls_id(
if conflict is not None: if conflict is not None:
return conflict return conflict
orig_test = __opts__.get('test', None) orig_test = __opts__.get('test', None)
opts = _get_opts(kwargs.get('localconfig')) opts = _get_opts(**kwargs)
opts['test'] = _get_test_value(test, **kwargs) opts['test'] = _get_test_value(test, **kwargs)
if 'pillarenv' in kwargs:
opts['pillarenv'] = kwargs['pillarenv'] # Since this is running a specific ID within a specific SLS file, fall back
# to the 'base' saltenv if none is configured and none was passed.
if opts['environment'] is None:
opts['environment'] = 'base'
try: try:
st_ = salt.state.HighState(opts, proxy=__proxy__) st_ = salt.state.HighState(opts,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts))
except NameError: except NameError:
st_ = salt.state.HighState(opts) st_ = salt.state.HighState(opts,
initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']): if not _check_pillar(kwargs, st_.opts['pillar']):
__context__['retcode'] = 5 __context__['retcode'] = 5
@ -1265,7 +1299,7 @@ def sls_id(
split_mods = mods.split(',') split_mods = mods.split(',')
st_.push_active() st_.push_active()
try: try:
high_, errors = st_.render_highstate({saltenv: split_mods}) high_, errors = st_.render_highstate({opts['environment']: split_mods})
finally: finally:
st_.pop_active() st_.pop_active()
errors += st_.state.verify_high(high_) errors += st_.state.verify_high(high_)
@ -1285,16 +1319,12 @@ def sls_id(
if not ret: if not ret:
raise SaltInvocationError( raise SaltInvocationError(
'No matches for ID \'{0}\' found in SLS \'{1}\' within saltenv ' 'No matches for ID \'{0}\' found in SLS \'{1}\' within saltenv '
'\'{2}\''.format(id_, mods, saltenv) '\'{2}\''.format(id_, mods, opts['environment'])
) )
return ret return ret
def show_low_sls(mods, def show_low_sls(mods, test=None, queue=False, **kwargs):
saltenv='base',
test=None,
queue=False,
**kwargs):
''' '''
Display the low data from a specific sls. The default environment is Display the low data from a specific sls. The default environment is
``base``, use ``saltenv`` to specify a different environment. ``base``, use ``saltenv`` to specify a different environment.
@ -1304,6 +1334,7 @@ def show_low_sls(mods,
.. code-block:: bash .. code-block:: bash
salt '*' state.show_low_sls foo salt '*' state.show_low_sls foo
salt '*' state.show_low_sls foo saltenv=dev
''' '''
if 'env' in kwargs: if 'env' in kwargs:
salt.utils.warn_until( salt.utils.warn_until(
@ -1318,11 +1349,15 @@ def show_low_sls(mods,
if conflict is not None: if conflict is not None:
return conflict return conflict
orig_test = __opts__.get('test', None) orig_test = __opts__.get('test', None)
opts = _get_opts(kwargs.get('localconfig')) opts = _get_opts(**kwargs)
opts['test'] = _get_test_value(test, **kwargs) opts['test'] = _get_test_value(test, **kwargs)
if 'pillarenv' in kwargs:
opts['pillarenv'] = kwargs['pillarenv'] # Since this is dealing with a specific SLS file (or files), fall back to
st_ = salt.state.HighState(opts) # the 'base' saltenv if none is configured and none was passed.
if opts['environment'] is None:
opts['environment'] = 'base'
st_ = salt.state.HighState(opts, initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']): if not _check_pillar(kwargs, st_.opts['pillar']):
__context__['retcode'] = 5 __context__['retcode'] = 5
@ -1333,7 +1368,7 @@ def show_low_sls(mods,
mods = mods.split(',') mods = mods.split(',')
st_.push_active() st_.push_active()
try: try:
high_, errors = st_.render_highstate({saltenv: mods}) high_, errors = st_.render_highstate({opts['environment']: mods})
finally: finally:
st_.pop_active() st_.pop_active()
errors += st_.state.verify_high(high_) errors += st_.state.verify_high(high_)
@ -1347,7 +1382,7 @@ def show_low_sls(mods,
return ret return ret
def show_sls(mods, saltenv='base', test=None, queue=False, **kwargs): def show_sls(mods, test=None, queue=False, **kwargs):
''' '''
Display the state data from a specific sls or list of sls files on the Display the state data from a specific sls or list of sls files on the
master. The default environment is ``base``, use ``saltenv`` to specify a master. The default environment is ``base``, use ``saltenv`` to specify a
@ -1377,24 +1412,29 @@ def show_sls(mods, saltenv='base', test=None, queue=False, **kwargs):
if conflict is not None: if conflict is not None:
return conflict return conflict
orig_test = __opts__.get('test', None) orig_test = __opts__.get('test', None)
opts = _get_opts(kwargs.get('localconfig')) opts = _get_opts(**kwargs)
opts['test'] = _get_test_value(test, **kwargs) opts['test'] = _get_test_value(test, **kwargs)
pillar = kwargs.get('pillar') # Since this is dealing with a specific SLS file (or files), fall back to
# the 'base' saltenv if none is configured and none was passed.
if opts['environment'] is None:
opts['environment'] = 'base'
pillar_override = kwargs.get('pillar')
pillar_enc = kwargs.get('pillar_enc') pillar_enc = kwargs.get('pillar_enc')
if pillar_enc is None \ if pillar_enc is None \
and pillar is not None \ and pillar_override is not None \
and not isinstance(pillar, dict): and not isinstance(pillar_override, dict):
raise SaltInvocationError( raise SaltInvocationError(
'Pillar data must be formatted as a dictionary, unless pillar_enc ' 'Pillar data must be formatted as a dictionary, unless pillar_enc '
'is specified.' 'is specified.'
) )
if 'pillarenv' in kwargs: st_ = salt.state.HighState(opts,
opts['pillarenv'] = kwargs['pillarenv'] pillar_override,
pillar_enc=pillar_enc,
st_ = salt.state.HighState(opts, pillar, pillar_enc=pillar_enc) initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']): if not _check_pillar(kwargs, st_.opts['pillar']):
__context__['retcode'] = 5 __context__['retcode'] = 5
@ -1405,7 +1445,7 @@ def show_sls(mods, saltenv='base', test=None, queue=False, **kwargs):
mods = mods.split(',') mods = mods.split(',')
st_.push_active() st_.push_active()
try: try:
high_, errors = st_.render_highstate({saltenv: mods}) high_, errors = st_.render_highstate({opts['environment']: mods})
finally: finally:
st_.pop_active() st_.pop_active()
errors += st_.state.verify_high(high_) errors += st_.state.verify_high(high_)
@ -1428,8 +1468,6 @@ def show_top(queue=False, **kwargs):
salt '*' state.show_top salt '*' state.show_top
''' '''
opts = copy.deepcopy(__opts__)
if 'env' in kwargs: if 'env' in kwargs:
salt.utils.warn_until( salt.utils.warn_until(
'Oxygen', 'Oxygen',
@ -1439,12 +1477,13 @@ def show_top(queue=False, **kwargs):
) )
kwargs.pop('env') kwargs.pop('env')
if 'saltenv' in kwargs:
opts['environment'] = kwargs['saltenv']
conflict = _check_queue(queue, kwargs) conflict = _check_queue(queue, kwargs)
if conflict is not None: if conflict is not None:
return conflict return conflict
st_ = salt.state.HighState(opts)
opts = _get_opts(**kwargs)
st_ = salt.state.HighState(opts, initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']): if not _check_pillar(kwargs, st_.opts['pillar']):
__context__['retcode'] = 5 __context__['retcode'] = 5
@ -1490,23 +1529,30 @@ def single(fun, name, test=None, queue=False, **kwargs):
'__id__': name, '__id__': name,
'name': name}) 'name': name})
orig_test = __opts__.get('test', None) orig_test = __opts__.get('test', None)
opts = _get_opts(kwargs.get('localconfig')) opts = _get_opts(**kwargs)
opts['test'] = _get_test_value(test, **kwargs) opts['test'] = _get_test_value(test, **kwargs)
pillar = kwargs.get('pillar') pillar_override = kwargs.get('pillar')
pillar_enc = kwargs.get('pillar_enc') pillar_enc = kwargs.get('pillar_enc')
if pillar_enc is None \ if pillar_enc is None \
and pillar is not None \ and pillar_override is not None \
and not isinstance(pillar, dict): and not isinstance(pillar_override, dict):
raise SaltInvocationError( raise SaltInvocationError(
'Pillar data must be formatted as a dictionary, unless pillar_enc ' 'Pillar data must be formatted as a dictionary, unless pillar_enc '
'is specified.' 'is specified.'
) )
try: try:
st_ = salt.state.State(opts, pillar, pillar_enc=pillar_enc, proxy=__proxy__) st_ = salt.state.State(opts,
pillar_override,
pillar_enc=pillar_enc,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts))
except NameError: except NameError:
st_ = salt.state.State(opts, pillar, pillar_enc=pillar_enc) st_ = salt.state.State(opts,
pillar_override,
pillar_enc=pillar_enc,
initial_pillar=_get_initial_pillar(opts))
err = st_.verify_data(kwargs) err = st_.verify_data(kwargs)
if err: if err:
__context__['retcode'] = 1 __context__['retcode'] = 1
@ -1549,7 +1595,11 @@ def clear_cache():
return ret return ret
def pkg(pkg_path, pkg_sum, hash_type, test=None, **kwargs): def pkg(pkg_path,
pkg_sum,
hash_type,
test=None,
**kwargs):
''' '''
Execute a packaged state run, the packaged state run will exist in a Execute a packaged state run, the packaged state run will exist in a
tarball available locally. This packaged state tarball available locally. This packaged state
@ -1587,10 +1637,10 @@ def pkg(pkg_path, pkg_sum, hash_type, test=None, **kwargs):
pillar_json = os.path.join(root, 'pillar.json') pillar_json = os.path.join(root, 'pillar.json')
if os.path.isfile(pillar_json): if os.path.isfile(pillar_json):
with salt.utils.fopen(pillar_json, 'r') as fp_: with salt.utils.fopen(pillar_json, 'r') as fp_:
pillar = json.load(fp_) pillar_override = json.load(fp_)
else: else:
pillar = None pillar_override = None
popts = _get_opts(kwargs.get('localconfig')) popts = _get_opts(**kwargs)
popts['fileclient'] = 'local' popts['fileclient'] = 'local'
popts['file_roots'] = {} popts['file_roots'] = {}
popts['test'] = _get_test_value(test, **kwargs) popts['test'] = _get_test_value(test, **kwargs)
@ -1600,7 +1650,7 @@ def pkg(pkg_path, pkg_sum, hash_type, test=None, **kwargs):
if not os.path.isdir(full): if not os.path.isdir(full):
continue continue
popts['file_roots'][fn_] = [full] popts['file_roots'][fn_] = [full]
st_ = salt.state.State(popts, pillar=pillar) st_ = salt.state.State(popts, pillar_override=pillar_override)
snapper_pre = _snapper_pre(popts, kwargs.get('__pub_jid', 'called localy')) snapper_pre = _snapper_pre(popts, kwargs.get('__pub_jid', 'called localy'))
ret = st_.call_chunks(lowstate) ret = st_.call_chunks(lowstate)
try: try:

View File

@ -214,7 +214,7 @@ def uptime():
raise CommandExecutionError("File {ut_path} was not found.".format(ut_path=ut_path)) raise CommandExecutionError("File {ut_path} was not found.".format(ut_path=ut_path))
seconds = int(float(salt.utils.fopen(ut_path).read().split()[0])) seconds = int(float(salt.utils.fopen(ut_path).read().split()[0]))
elif salt.utils.is_sunos(): elif salt.utils.is_sunos():
# note: some flavors/vesions report the host uptime inside a zone # note: some flavors/versions report the host uptime inside a zone
# https://support.oracle.com/epmos/faces/BugDisplay?id=15611584 # https://support.oracle.com/epmos/faces/BugDisplay?id=15611584
res = __salt__['cmd.run_all']('kstat -p unix:0:system_misc:boot_time') res = __salt__['cmd.run_all']('kstat -p unix:0:system_misc:boot_time')
if res['retcode'] > 0: if res['retcode'] > 0:

View File

@ -242,6 +242,8 @@ def _copy_function(module_name, name=None):
elif hasattr(mod, '__call__'): elif hasattr(mod, '__call__'):
mod_sig = inspect.getargspec(mod.__call__) mod_sig = inspect.getargspec(mod.__call__)
parameters = mod_sig.args parameters = mod_sig.args
log.debug('Parameters accepted by module {0}: {1}'.format(module_name,
parameters))
additional_args = {} additional_args = {}
for arg in set(parameters).intersection(set(methods.keys())): for arg in set(parameters).intersection(set(methods.keys())):
additional_args[arg] = methods.pop(arg) additional_args[arg] = methods.pop(arg)
@ -251,12 +253,15 @@ def _copy_function(module_name, name=None):
else: else:
modinstance = mod() modinstance = mod()
except TypeError: except TypeError:
modinstance = None log.exception('Module failed to instantiate')
methods = {} raise
valid_methods = {}
log.debug('Called methods are: {0}'.format(methods))
for meth_name in methods: for meth_name in methods:
if not meth_name.startswith('_'): if not meth_name.startswith('_'):
methods[meth_name] = methods[meth_name] valid_methods[meth_name] = methods[meth_name]
for meth, arg in methods.items(): log.debug('Valid methods are: {0}'.format(valid_methods))
for meth, arg in valid_methods.items():
result = _get_method_result(mod, modinstance, meth, arg) result = _get_method_result(mod, modinstance, meth, arg)
assertion_result = _apply_assertion(arg, result) assertion_result = _apply_assertion(arg, result)
if not assertion_result: if not assertion_result:
@ -285,13 +290,17 @@ def _register_functions():
functions, and then register them in the module namespace so that they functions, and then register them in the module namespace so that they
can be called via salt. can be called via salt.
""" """
for module_ in modules.__all__: try:
modules_ = [_to_snake_case(module_) for module_ in modules.__all__]
except AttributeError:
modules_ = [module_ for module_ in modules.modules]
for mod_name in modules_:
mod_name = _to_snake_case(module_) mod_name = _to_snake_case(module_)
mod_func = _copy_function(mod_name, str(mod_name)) mod_func = _copy_function(mod_name, str(mod_name))
mod_func.__doc__ = _build_doc(module_) mod_func.__doc__ = _build_doc(mod_name)
__all__.append(mod_name) __all__.append(mod_name)
globals()[mod_name] = mod_func globals()[mod_name] = mod_func
if TESTINFRA_PRESENT: if TESTINFRA_PRESENT:
_register_functions() _register_functions()

View File

@ -42,9 +42,10 @@ def get_cert_serial(cert_file):
salt '*' certutil.get_cert_serial <certificate name> salt '*' certutil.get_cert_serial <certificate name>
''' '''
cmd = "certutil.exe -verify {0}".format(cert_file) cmd = "certutil.exe -silent -verify {0}".format(cert_file)
out = __salt__['cmd.run'](cmd) out = __salt__['cmd.run'](cmd)
matches = re.search(r"Serial: (.*)", out) # match serial number by paragraph to work with multiple languages
matches = re.search(r":\s*(\w*)\r\n\r\n", out)
if matches is not None: if matches is not None:
return matches.groups()[0].strip() return matches.groups()[0].strip()
else: else:
@ -66,7 +67,8 @@ def get_stored_cert_serials(store):
''' '''
cmd = "certutil.exe -store {0}".format(store) cmd = "certutil.exe -store {0}".format(store)
out = __salt__['cmd.run'](cmd) out = __salt__['cmd.run'](cmd)
matches = re.findall(r"Serial Number: (.*)\r", out) # match serial numbers by header position to work with multiple languages
matches = re.findall(r"={16}\r\n.*:\s*(\w*)\r\n", out)
return matches return matches

View File

@ -2832,6 +2832,7 @@ def _findOptionValueInSeceditFile(option):
_reader = codecs.open(_tfile, 'r', encoding='utf-16') _reader = codecs.open(_tfile, 'r', encoding='utf-16')
_secdata = _reader.readlines() _secdata = _reader.readlines()
_reader.close() _reader.close()
if __salt__['file.file_exists'](_tfile):
_ret = __salt__['file.remove'](_tfile) _ret = __salt__['file.remove'](_tfile)
for _line in _secdata: for _line in _secdata:
if _line.startswith(option): if _line.startswith(option):
@ -2853,7 +2854,9 @@ def _importSeceditConfig(infdata):
_tInfFile = '{0}\\{1}'.format(__salt__['config.get']('cachedir'), _tInfFile = '{0}\\{1}'.format(__salt__['config.get']('cachedir'),
'salt-secedit-config-{0}.inf'.format(_d)) 'salt-secedit-config-{0}.inf'.format(_d))
# make sure our temp files don't already exist # make sure our temp files don't already exist
if __salt__['file.file_exists'](_tSdbfile):
_ret = __salt__['file.remove'](_tSdbfile) _ret = __salt__['file.remove'](_tSdbfile)
if __salt__['file.file_exists'](_tInfFile):
_ret = __salt__['file.remove'](_tInfFile) _ret = __salt__['file.remove'](_tInfFile)
# add the inf data to the file, win_file sure could use the write() function # add the inf data to the file, win_file sure could use the write() function
_ret = __salt__['file.touch'](_tInfFile) _ret = __salt__['file.touch'](_tInfFile)
@ -2861,7 +2864,9 @@ def _importSeceditConfig(infdata):
# run secedit to make the change # run secedit to make the change
_ret = __salt__['cmd.run']('secedit /configure /db {0} /cfg {1}'.format(_tSdbfile, _tInfFile)) _ret = __salt__['cmd.run']('secedit /configure /db {0} /cfg {1}'.format(_tSdbfile, _tInfFile))
# cleanup our temp files # cleanup our temp files
if __salt__['file.file_exists'](_tSdbfile):
_ret = __salt__['file.remove'](_tSdbfile) _ret = __salt__['file.remove'](_tSdbfile)
if __salt__['file.file_exists'](_tInfFile):
_ret = __salt__['file.remove'](_tInfFile) _ret = __salt__['file.remove'](_tInfFile)
return True return True
except Exception as e: except Exception as e:
@ -3510,15 +3515,23 @@ def _checkAllAdmxPolicies(policy_class,
not_configured_policies.remove(policy_item) not_configured_policies.remove(policy_item)
for not_configured_policy in not_configured_policies: for not_configured_policy in not_configured_policies:
policy_vals[not_configured_policy.attrib['name']] = 'Not Configured' not_configured_policy_namespace = not_configured_policy.nsmap[not_configured_policy.prefix]
if not_configured_policy_namespace not in policy_vals:
policy_vals[not_configured_policy_namespace] = {}
policy_vals[not_configured_policy_namespace][not_configured_policy.attrib['name']] = 'Not Configured'
if return_full_policy_names: if return_full_policy_names:
full_names[not_configured_policy.attrib['name']] = _getFullPolicyName( if not_configured_policy_namespace not in full_names:
full_names[not_configured_policy_namespace] = {}
full_names[not_configured_policy_namespace][not_configured_policy.attrib['name']] = _getFullPolicyName(
not_configured_policy, not_configured_policy,
not_configured_policy.attrib['name'], not_configured_policy.attrib['name'],
return_full_policy_names, return_full_policy_names,
adml_policy_resources) adml_policy_resources)
log.debug('building hierarchy for non-configured item {0}'.format(not_configured_policy.attrib['name'])) log.debug('building hierarchy for non-configured item {0}'.format(not_configured_policy.attrib['name']))
hierarchy[not_configured_policy.attrib['name']] = _build_parent_list(not_configured_policy, if not_configured_policy_namespace not in hierarchy:
hierarchy[not_configured_policy_namespace] = {}
hierarchy[not_configured_policy_namespace][not_configured_policy.attrib['name']] = _build_parent_list(
not_configured_policy,
admx_policy_definitions, admx_policy_definitions,
return_full_policy_names, return_full_policy_names,
adml_policy_resources) adml_policy_resources)
@ -3526,6 +3539,7 @@ def _checkAllAdmxPolicies(policy_class,
this_key = None this_key = None
this_valuename = None this_valuename = None
this_policyname = None this_policyname = None
this_policynamespace = None
this_policy_setting = 'Not Configured' this_policy_setting = 'Not Configured'
element_only_enabled_disabled = True element_only_enabled_disabled = True
explicit_enable_disable_value_setting = False explicit_enable_disable_value_setting = False
@ -3544,6 +3558,7 @@ def _checkAllAdmxPolicies(policy_class,
log.error('policy item {0} does not have the required "name" ' log.error('policy item {0} does not have the required "name" '
'attribute'.format(admx_policy.attrib)) 'attribute'.format(admx_policy.attrib))
break break
this_policynamespace = admx_policy.nsmap[admx_policy.prefix]
if ENABLED_VALUE_XPATH(admx_policy) and this_policy_setting == 'Not Configured': if ENABLED_VALUE_XPATH(admx_policy) and this_policy_setting == 'Not Configured':
element_only_enabled_disabled = False element_only_enabled_disabled = False
explicit_enable_disable_value_setting = True explicit_enable_disable_value_setting = True
@ -3555,7 +3570,9 @@ def _checkAllAdmxPolicies(policy_class,
policy_filedata): policy_filedata):
this_policy_setting = 'Enabled' this_policy_setting = 'Enabled'
log.debug('{0} is enabled'.format(this_policyname)) log.debug('{0} is enabled'.format(this_policyname))
policy_vals[this_policyname] = this_policy_setting if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][this_policyname] = this_policy_setting
if DISABLED_VALUE_XPATH(admx_policy) and this_policy_setting == 'Not Configured': if DISABLED_VALUE_XPATH(admx_policy) and this_policy_setting == 'Not Configured':
element_only_enabled_disabled = False element_only_enabled_disabled = False
explicit_enable_disable_value_setting = True explicit_enable_disable_value_setting = True
@ -3567,21 +3584,27 @@ def _checkAllAdmxPolicies(policy_class,
policy_filedata): policy_filedata):
this_policy_setting = 'Disabled' this_policy_setting = 'Disabled'
log.debug('{0} is disabled'.format(this_policyname)) log.debug('{0} is disabled'.format(this_policyname))
policy_vals[this_policyname] = this_policy_setting if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][this_policyname] = this_policy_setting
if ENABLED_LIST_XPATH(admx_policy) and this_policy_setting == 'Not Configured': if ENABLED_LIST_XPATH(admx_policy) and this_policy_setting == 'Not Configured':
element_only_enabled_disabled = False element_only_enabled_disabled = False
explicit_enable_disable_value_setting = True explicit_enable_disable_value_setting = True
if _checkListItem(admx_policy, this_policyname, this_key, ENABLED_LIST_XPATH, policy_filedata): if _checkListItem(admx_policy, this_policyname, this_key, ENABLED_LIST_XPATH, policy_filedata):
this_policy_setting = 'Enabled' this_policy_setting = 'Enabled'
log.debug('{0} is enabled'.format(this_policyname)) log.debug('{0} is enabled'.format(this_policyname))
policy_vals[this_policyname] = this_policy_setting if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][this_policyname] = this_policy_setting
if DISABLED_LIST_XPATH(admx_policy) and this_policy_setting == 'Not Configured': if DISABLED_LIST_XPATH(admx_policy) and this_policy_setting == 'Not Configured':
element_only_enabled_disabled = False element_only_enabled_disabled = False
explicit_enable_disable_value_setting = True explicit_enable_disable_value_setting = True
if _checkListItem(admx_policy, this_policyname, this_key, DISABLED_LIST_XPATH, policy_filedata): if _checkListItem(admx_policy, this_policyname, this_key, DISABLED_LIST_XPATH, policy_filedata):
this_policy_setting = 'Disabled' this_policy_setting = 'Disabled'
log.debug('{0} is disabled'.format(this_policyname)) log.debug('{0} is disabled'.format(this_policyname))
policy_vals[this_policyname] = this_policy_setting if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][this_policyname] = this_policy_setting
if not explicit_enable_disable_value_setting and this_valuename: if not explicit_enable_disable_value_setting and this_valuename:
# the policy has a key/valuename but no explicit enabled/Disabled # the policy has a key/valuename but no explicit enabled/Disabled
@ -3594,7 +3617,9 @@ def _checkAllAdmxPolicies(policy_class,
policy_filedata): policy_filedata):
this_policy_setting = 'Enabled' this_policy_setting = 'Enabled'
log.debug('{0} is enabled'.format(this_policyname)) log.debug('{0} is enabled'.format(this_policyname))
policy_vals[this_policyname] = this_policy_setting if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][this_policyname] = this_policy_setting
elif _regexSearchRegPolData(re.escape(_buildKnownDataSearchString(this_key, elif _regexSearchRegPolData(re.escape(_buildKnownDataSearchString(this_key,
this_valuename, this_valuename,
'REG_DWORD', 'REG_DWORD',
@ -3603,7 +3628,9 @@ def _checkAllAdmxPolicies(policy_class,
policy_filedata): policy_filedata):
this_policy_setting = 'Disabled' this_policy_setting = 'Disabled'
log.debug('{0} is disabled'.format(this_policyname)) log.debug('{0} is disabled'.format(this_policyname))
policy_vals[this_policyname] = this_policy_setting if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][this_policyname] = this_policy_setting
if ELEMENTS_XPATH(admx_policy): if ELEMENTS_XPATH(admx_policy):
if element_only_enabled_disabled or this_policy_setting == 'Enabled': if element_only_enabled_disabled or this_policy_setting == 'Enabled':
@ -3801,65 +3828,84 @@ def _checkAllAdmxPolicies(policy_class,
and len(configured_elements.keys()) == len(required_elements.keys()): and len(configured_elements.keys()) == len(required_elements.keys()):
if policy_disabled_elements == len(required_elements.keys()): if policy_disabled_elements == len(required_elements.keys()):
log.debug('{0} is disabled by all enum elements'.format(this_policyname)) log.debug('{0} is disabled by all enum elements'.format(this_policyname))
policy_vals[this_policyname] = 'Disabled' if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][this_policyname] = 'Disabled'
else: else:
policy_vals[this_policyname] = configured_elements if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][this_policyname] = configured_elements
log.debug('{0} is enabled by enum elements'.format(this_policyname)) log.debug('{0} is enabled by enum elements'.format(this_policyname))
else: else:
if this_policy_setting == 'Enabled': if this_policy_setting == 'Enabled':
policy_vals[this_policyname] = configured_elements if this_policynamespace not in policy_vals:
if return_full_policy_names and this_policyname in policy_vals: policy_vals[this_policynamespace] = {}
full_names[this_policyname] = _getFullPolicyName( policy_vals[this_policynamespace][this_policyname] = configured_elements
if return_full_policy_names and this_policynamespace in policy_vals and this_policyname in policy_vals[this_policynamespace]:
if this_policynamespace not in full_names:
full_names[this_policynamespace] = {}
full_names[this_policynamespace][this_policyname] = _getFullPolicyName(
admx_policy, admx_policy,
admx_policy.attrib['name'], admx_policy.attrib['name'],
return_full_policy_names, return_full_policy_names,
adml_policy_resources) adml_policy_resources)
if this_policyname in policy_vals: if this_policynamespace in policy_vals and this_policyname in policy_vals[this_policynamespace]:
hierarchy[this_policyname] = _build_parent_list(admx_policy, if this_policynamespace not in hierarchy:
hierarchy[this_policynamespace] = {}
hierarchy[this_policynamespace][this_policyname] = _build_parent_list(admx_policy,
admx_policy_definitions, admx_policy_definitions,
return_full_policy_names, return_full_policy_names,
adml_policy_resources) adml_policy_resources)
if policy_vals and return_full_policy_names and not hierarchical_return: if policy_vals and return_full_policy_names and not hierarchical_return:
unpathed_dict = {} unpathed_dict = {}
pathed_dict = {} pathed_dict = {}
for policy_item in policy_vals.keys(): for policy_namespace in list(policy_vals):
if full_names[policy_item] in policy_vals: for policy_item in list(policy_vals[policy_namespace]):
if full_names[policy_namespace][policy_item] in policy_vals[policy_namespace]:
# add this item with the path'd full name # add this item with the path'd full name
full_path_list = hierarchy[policy_item] full_path_list = hierarchy[policy_namespace][policy_item]
full_path_list.reverse() full_path_list.reverse()
full_path_list.append(full_names[policy_item]) full_path_list.append(full_names[policy_namespace][policy_item])
policy_vals['\\'.join(full_path_list)] = policy_vals.pop(policy_item) policy_vals['\\'.join(full_path_list)] = policy_vals[policy_namespace].pop(policy_item)
pathed_dict[full_names[policy_item]] = True pathed_dict[full_names[policy_namespace][policy_item]] = True
else: else:
policy_vals[full_names[policy_item]] = policy_vals.pop(policy_item) policy_vals[policy_namespace][full_names[policy_namespace][policy_item]] = policy_vals[policy_namespace].pop(policy_item)
unpathed_dict[full_names[policy_item]] = policy_item if policy_namespace not in unpathed_dict:
unpathed_dict[policy_namespace] = {}
unpathed_dict[policy_namespace][full_names[policy_namespace][policy_item]] = policy_item
# go back and remove any "unpathed" policies that need a full path # go back and remove any "unpathed" policies that need a full path
for path_needed in unpathed_dict.keys(): for path_needed in unpathed_dict[policy_namespace]:
# remove the item with the same full name and re-add it w/a path'd version # remove the item with the same full name and re-add it w/a path'd version
full_path_list = hierarchy[unpathed_dict[path_needed]] full_path_list = hierarchy[policy_namespace][unpathed_dict[policy_namespace][path_needed]]
full_path_list.reverse() full_path_list.reverse()
full_path_list.append(path_needed) full_path_list.append(path_needed)
log.debug('full_path_list == {0}'.format(full_path_list)) log.debug('full_path_list == {0}'.format(full_path_list))
policy_vals['\\'.join(full_path_list)] = policy_vals.pop(path_needed) policy_vals['\\'.join(full_path_list)] = policy_vals[policy_namespace].pop(path_needed)
for policy_namespace in list(policy_vals):
if policy_vals[policy_namespace] == {}:
policy_vals.pop(policy_namespace)
if policy_vals and hierarchical_return: if policy_vals and hierarchical_return:
if hierarchy: if hierarchy:
for hierarchy_item in hierarchy.keys(): for policy_namespace in hierarchy:
if hierarchy_item in policy_vals: for hierarchy_item in hierarchy[policy_namespace]:
if hierarchy_item in policy_vals[policy_namespace]:
tdict = {} tdict = {}
first_item = True first_item = True
for item in hierarchy[hierarchy_item]: for item in hierarchy[policy_namespace][hierarchy_item]:
newdict = {} newdict = {}
if first_item: if first_item:
h_policy_name = hierarchy_item h_policy_name = hierarchy_item
if return_full_policy_names: if return_full_policy_names:
h_policy_name = full_names[hierarchy_item] h_policy_name = full_names[policy_namespace][hierarchy_item]
newdict[item] = {h_policy_name: policy_vals.pop(hierarchy_item)} newdict[item] = {h_policy_name: policy_vals[policy_namespace].pop(hierarchy_item)}
first_item = False first_item = False
else: else:
newdict[item] = tdict newdict[item] = tdict
tdict = newdict tdict = newdict
if tdict: if tdict:
policy_vals = dictupdate.update(policy_vals, tdict) policy_vals = dictupdate.update(policy_vals, tdict)
if policy_namespace in policy_vals and policy_vals[policy_namespace] == {}:
policy_vals.pop(policy_namespace)
policy_vals = { policy_vals = {
module_policy_data.admx_registry_classes[policy_class]['lgpo_section']: { module_policy_data.admx_registry_classes[policy_class]['lgpo_section']: {
'Administrative Templates': policy_vals 'Administrative Templates': policy_vals
@ -3989,18 +4035,17 @@ def _write_regpol_data(data_to_write,
gpt_extension_guid: admx registry extension guid for the class gpt_extension_guid: admx registry extension guid for the class
''' '''
try: try:
if data_to_write:
reg_pol_header = u'\u5250\u6765\x01\x00' reg_pol_header = u'\u5250\u6765\x01\x00'
if not os.path.exists(policy_file_path): if not os.path.exists(policy_file_path):
ret = __salt__['file.makedirs'](policy_file_path) ret = __salt__['file.makedirs'](policy_file_path)
with open(policy_file_path, 'wb') as pol_file: with salt.utils.fopen(policy_file_path, 'wb') as pol_file:
if not data_to_write.startswith(reg_pol_header): if not data_to_write.startswith(reg_pol_header):
pol_file.write(reg_pol_header.encode('utf-16-le')) pol_file.write(reg_pol_header.encode('utf-16-le'))
pol_file.write(data_to_write.encode('utf-16-le')) pol_file.write(data_to_write.encode('utf-16-le'))
try: try:
gpt_ini_data = '' gpt_ini_data = ''
if os.path.exists(gpt_ini_path): if os.path.exists(gpt_ini_path):
with open(gpt_ini_path, 'rb') as gpt_file: with salt.utils.fopen(gpt_ini_path, 'rb') as gpt_file:
gpt_ini_data = gpt_file.read() gpt_ini_data = gpt_file.read()
if not _regexSearchRegPolData(r'\[General\]\r\n', gpt_ini_data): if not _regexSearchRegPolData(r'\[General\]\r\n', gpt_ini_data):
gpt_ini_data = '[General]\r\n' + gpt_ini_data gpt_ini_data = '[General]\r\n' + gpt_ini_data
@ -4055,7 +4100,7 @@ def _write_regpol_data(data_to_write,
int("{0}{1}".format(str(version_nums[0]).zfill(4), str(version_nums[1]).zfill(4)), 16), int("{0}{1}".format(str(version_nums[0]).zfill(4), str(version_nums[1]).zfill(4)), 16),
gpt_ini_data[general_location.end():]) gpt_ini_data[general_location.end():])
if gpt_ini_data: if gpt_ini_data:
with open(gpt_ini_path, 'wb') as gpt_file: with salt.utils.fopen(gpt_ini_path, 'wb') as gpt_file:
gpt_file.write(gpt_ini_data) gpt_file.write(gpt_ini_data)
except Exception as e: except Exception as e:
msg = 'An error occurred attempting to write to {0}, the exception was {1}'.format( msg = 'An error occurred attempting to write to {0}, the exception was {1}'.format(
@ -4124,6 +4169,7 @@ def _policyFileReplaceOrAppend(this_string, policy_data, append_only=False):
def _writeAdminTemplateRegPolFile(admtemplate_data, def _writeAdminTemplateRegPolFile(admtemplate_data,
admtemplate_namespace_data,
admx_policy_definitions=None, admx_policy_definitions=None,
adml_policy_resources=None, adml_policy_resources=None,
display_language='en-US', display_language='en-US',
@ -4140,7 +4186,7 @@ def _writeAdminTemplateRegPolFile(admtemplate_data,
existing_data = '' existing_data = ''
base_policy_settings = {} base_policy_settings = {}
policy_data = _policy_info() policy_data = _policy_info()
policySearchXpath = etree.XPath('//*[@*[local-name() = "id"] = $id or @*[local-name() = "name"] = $id]') policySearchXpath = '//ns1:*[@id = "{0}" or @name = "{0}"]'
try: try:
if admx_policy_definitions is None or adml_policy_resources is None: if admx_policy_definitions is None or adml_policy_resources is None:
admx_policy_definitions, adml_policy_resources = _processPolicyDefinitions( admx_policy_definitions, adml_policy_resources = _processPolicyDefinitions(
@ -4152,21 +4198,25 @@ def _writeAdminTemplateRegPolFile(admtemplate_data,
hierarchical_return=False, hierarchical_return=False,
return_not_configured=False) return_not_configured=False)
log.debug('preparing to loop through policies requested to be configured') log.debug('preparing to loop through policies requested to be configured')
for adm_policy in admtemplate_data.keys(): for adm_namespace in admtemplate_data:
if str(admtemplate_data[adm_policy]).lower() == 'not configured': for adm_policy in admtemplate_data[adm_namespace]:
if adm_policy in base_policy_settings: if str(admtemplate_data[adm_namespace][adm_policy]).lower() == 'not configured':
base_policy_settings.pop(adm_policy) if adm_policy in base_policy_settings[adm_namespace]:
base_policy_settings[adm_namespace].pop(adm_policy)
else: else:
log.debug('adding {0} to base_policy_settings'.format(adm_policy)) log.debug('adding {0} to base_policy_settings'.format(adm_policy))
base_policy_settings[adm_policy] = admtemplate_data[adm_policy] if adm_namespace not in base_policy_settings:
for admPolicy in base_policy_settings.keys(): base_policy_settings[adm_namespace] = {}
base_policy_settings[adm_namespace][adm_policy] = admtemplate_data[adm_namespace][adm_policy]
for adm_namespace in base_policy_settings:
for admPolicy in base_policy_settings[adm_namespace]:
log.debug('working on admPolicy {0}'.format(admPolicy)) log.debug('working on admPolicy {0}'.format(admPolicy))
explicit_enable_disable_value_setting = False explicit_enable_disable_value_setting = False
this_key = None this_key = None
this_valuename = None this_valuename = None
if str(base_policy_settings[admPolicy]).lower() == 'disabled': if str(base_policy_settings[adm_namespace][admPolicy]).lower() == 'disabled':
log.debug('time to disable {0}'.format(admPolicy)) log.debug('time to disable {0}'.format(admPolicy))
this_policy = policySearchXpath(admx_policy_definitions, id=admPolicy) this_policy = admx_policy_definitions.xpath(policySearchXpath.format(admPolicy), namespaces={'ns1': adm_namespace})
if this_policy: if this_policy:
this_policy = this_policy[0] this_policy = this_policy[0]
if 'class' in this_policy.attrib: if 'class' in this_policy.attrib:
@ -4226,7 +4276,7 @@ def _writeAdminTemplateRegPolFile(admtemplate_data,
# WARNING: no OOB adm files use true/falseList items # WARNING: no OOB adm files use true/falseList items
# this has not been fully vetted # this has not been fully vetted
temp_dict = {'trueList': TRUE_LIST_XPATH, 'falseList': FALSE_LIST_XPATH} temp_dict = {'trueList': TRUE_LIST_XPATH, 'falseList': FALSE_LIST_XPATH}
for this_list in temp_dict.keys(): for this_list in temp_dict:
disabled_list_strings = _checkListItem( disabled_list_strings = _checkListItem(
child_item, child_item,
admPolicy, admPolicy,
@ -4277,7 +4327,8 @@ def _writeAdminTemplateRegPolFile(admtemplate_data,
log.error(msg.format(this_policy.attrib)) log.error(msg.format(this_policy.attrib))
else: else:
log.debug('time to enable and set the policy "{0}"'.format(admPolicy)) log.debug('time to enable and set the policy "{0}"'.format(admPolicy))
this_policy = policySearchXpath(admx_policy_definitions, id=admPolicy) this_policy = admx_policy_definitions.xpath(policySearchXpath.format(admPolicy), namespaces={'ns1': adm_namespace})
log.debug('found this_policy == {0}'.format(this_policy))
if this_policy: if this_policy:
this_policy = this_policy[0] this_policy = this_policy[0]
if 'class' in this_policy.attrib: if 'class' in this_policy.attrib:
@ -4334,11 +4385,11 @@ def _writeAdminTemplateRegPolFile(admtemplate_data,
child_key = child_item.attrib['key'] child_key = child_item.attrib['key']
if 'valueName' in child_item.attrib: if 'valueName' in child_item.attrib:
child_valuename = child_item.attrib['valueName'] child_valuename = child_item.attrib['valueName']
if child_item.attrib['id'] in base_policy_settings[admPolicy]: if child_item.attrib['id'] in base_policy_settings[adm_namespace][admPolicy]:
if etree.QName(child_item).localname == 'boolean' and ( if etree.QName(child_item).localname == 'boolean' and (
TRUE_LIST_XPATH(child_item) or FALSE_LIST_XPATH(child_item)): TRUE_LIST_XPATH(child_item) or FALSE_LIST_XPATH(child_item)):
list_strings = [] list_strings = []
if base_policy_settings[admPolicy][child_item.attrib['id']]: if base_policy_settings[adm_namespace][admPolicy][child_item.attrib['id']]:
list_strings = _checkListItem(child_item, list_strings = _checkListItem(child_item,
admPolicy, admPolicy,
child_key, child_key,
@ -4356,10 +4407,10 @@ def _writeAdminTemplateRegPolFile(admtemplate_data,
existing_data = _policyFileReplaceOrAppendList( existing_data = _policyFileReplaceOrAppendList(
list_strings, list_strings,
existing_data) existing_data)
if etree.QName(child_item).localname == 'boolean' and ( elif etree.QName(child_item).localname == 'boolean' and (
TRUE_VALUE_XPATH(child_item) or FALSE_VALUE_XPATH(child_item)): TRUE_VALUE_XPATH(child_item) or FALSE_VALUE_XPATH(child_item)):
value_string = '' value_string = ''
if base_policy_settings[admPolicy][child_item.attrib['id']]: if base_policy_settings[adm_namespace][admPolicy][child_item.attrib['id']]:
value_string = _checkValueItemParent(child_item, value_string = _checkValueItemParent(child_item,
admPolicy, admPolicy,
child_key, child_key,
@ -4380,7 +4431,7 @@ def _writeAdminTemplateRegPolFile(admtemplate_data,
existing_data = _policyFileReplaceOrAppend( existing_data = _policyFileReplaceOrAppend(
value_string, value_string,
existing_data) existing_data)
if etree.QName(child_item).localname == 'boolean' \ elif etree.QName(child_item).localname == 'boolean' \
or etree.QName(child_item).localname == 'decimal' \ or etree.QName(child_item).localname == 'decimal' \
or etree.QName(child_item).localname == 'text' \ or etree.QName(child_item).localname == 'text' \
or etree.QName(child_item).localname == 'longDecimal' \ or etree.QName(child_item).localname == 'longDecimal' \
@ -4392,7 +4443,7 @@ def _writeAdminTemplateRegPolFile(admtemplate_data,
this_policy, this_policy,
elements_item, elements_item,
check_deleted=False, check_deleted=False,
this_element_value=base_policy_settings[admPolicy][child_item.attrib['id']]) this_element_value=base_policy_settings[adm_namespace][admPolicy][child_item.attrib['id']])
msg = 'I have enabled value string of {0}' msg = 'I have enabled value string of {0}'
log.debug(msg.format([enabled_value_string])) log.debug(msg.format([enabled_value_string]))
existing_data = _policyFileReplaceOrAppend( existing_data = _policyFileReplaceOrAppend(
@ -4400,7 +4451,7 @@ def _writeAdminTemplateRegPolFile(admtemplate_data,
existing_data) existing_data)
elif etree.QName(child_item).localname == 'enum': elif etree.QName(child_item).localname == 'enum':
for enum_item in child_item.getchildren(): for enum_item in child_item.getchildren():
if base_policy_settings[admPolicy][child_item.attrib['id']] == \ if base_policy_settings[adm_namespace][admPolicy][child_item.attrib['id']] == \
_getAdmlDisplayName(adml_policy_resources, _getAdmlDisplayName(adml_policy_resources,
enum_item.attrib['displayName'] enum_item.attrib['displayName']
).strip(): ).strip():
@ -4437,7 +4488,7 @@ def _writeAdminTemplateRegPolFile(admtemplate_data,
this_policy, this_policy,
elements_item, elements_item,
check_deleted=False, check_deleted=False,
this_element_value=base_policy_settings[admPolicy][child_item.attrib['id']]) this_element_value=base_policy_settings[adm_namespace][admPolicy][child_item.attrib['id']])
msg = 'I have enabled value string of {0}' msg = 'I have enabled value string of {0}'
log.debug(msg.format([enabled_value_string])) log.debug(msg.format([enabled_value_string]))
existing_data = _policyFileReplaceOrAppend( existing_data = _policyFileReplaceOrAppend(
@ -4559,6 +4610,7 @@ def _lookup_admin_template(policy_name,
if admx_policy_definitions is None or adml_policy_resources is None: if admx_policy_definitions is None or adml_policy_resources is None:
admx_policy_definitions, adml_policy_resources = _processPolicyDefinitions( admx_policy_definitions, adml_policy_resources = _processPolicyDefinitions(
display_language=adml_language) display_language=adml_language)
admx_search_results = []
admx_search_results = ADMX_SEARCH_XPATH(admx_policy_definitions, admx_search_results = ADMX_SEARCH_XPATH(admx_policy_definitions,
policy_name=policy_name, policy_name=policy_name,
registry_class=policy_class) registry_class=policy_class)
@ -4598,11 +4650,42 @@ def _lookup_admin_template(policy_name,
if adml_search_results: if adml_search_results:
multiple_adml_entries = False multiple_adml_entries = False
suggested_policies = '' suggested_policies = ''
adml_to_remove = []
if len(adml_search_results) > 1: if len(adml_search_results) > 1:
multiple_adml_entries = True multiple_adml_entries = True
for adml_search_result in adml_search_results: for adml_search_result in adml_search_results:
if not getattr(adml_search_result, 'text', '').strip() == policy_name: if not getattr(adml_search_result, 'text', '').strip() == policy_name:
adml_search_results.remove(adml_search_result) adml_to_remove.append(adml_search_result)
else:
if hierarchy:
display_name_searchval = '$({0}.{1})'.format(
adml_search_result.tag.split('}')[1],
adml_search_result.attrib['id'])
#policy_search_string = '//{0}:policy[@*[local-name() = "displayName"] = "{1}" and (@*[local-name() = "class"] = "Both" or @*[local-name() = "class"] = "{2}") ]'.format(
policy_search_string = '//{0}:policy[@displayName = "{1}" and (@class = "Both" or @class = "{2}") ]'.format(
adml_search_result.prefix,
display_name_searchval,
policy_class)
admx_results = []
admx_search_results = admx_policy_definitions.xpath(policy_search_string, namespaces=adml_search_result.nsmap)
for search_result in admx_search_results:
log.debug('policy_name == {0}'.format(policy_name))
this_hierarchy = _build_parent_list(search_result,
admx_policy_definitions,
True,
adml_policy_resources)
this_hierarchy.reverse()
if hierarchy != this_hierarchy:
adml_to_remove.append(adml_search_result)
else:
admx_results.append(search_result)
if len(admx_results) == 1:
admx_search_results = admx_results
for adml in adml_to_remove:
if adml in adml_search_results:
adml_search_results.remove(adml)
if len(adml_search_results) == 1 and multiple_adml_entries:
multiple_adml_entries = False
for adml_search_result in adml_search_results: for adml_search_result in adml_search_results:
dmsg = 'found an ADML entry matching the string! {0} -- {1}' dmsg = 'found an ADML entry matching the string! {0} -- {1}'
log.debug(dmsg.format(adml_search_result.tag, log.debug(dmsg.format(adml_search_result.tag,
@ -4611,6 +4694,7 @@ def _lookup_admin_template(policy_name,
adml_search_result.tag.split('}')[1], adml_search_result.tag.split('}')[1],
adml_search_result.attrib['id']) adml_search_result.attrib['id'])
log.debug('searching for displayName == {0}'.format(display_name_searchval)) log.debug('searching for displayName == {0}'.format(display_name_searchval))
if not admx_search_results:
admx_search_results = ADMX_DISPLAYNAME_SEARCH_XPATH( admx_search_results = ADMX_DISPLAYNAME_SEARCH_XPATH(
admx_policy_definitions, admx_policy_definitions,
display_name=display_name_searchval, display_name=display_name_searchval,
@ -4626,6 +4710,7 @@ def _lookup_admin_template(policy_name,
True, True,
adml_policy_resources) adml_policy_resources)
this_hierarchy.reverse() this_hierarchy.reverse()
log.debug('testing {0} == {1}'.format(hierarchy, this_hierarchy))
if hierarchy == this_hierarchy: if hierarchy == this_hierarchy:
found = True found = True
else: else:
@ -5084,6 +5169,7 @@ def set_(computer_policy=None, user_policy=None,
if policies[p_class]: if policies[p_class]:
for policy_name in policies[p_class].keys(): for policy_name in policies[p_class].keys():
_pol = None _pol = None
policy_namespace = None
policy_key_name = policy_name policy_key_name = policy_name
if policy_name in _policydata.policies[p_class]['policies']: if policy_name in _policydata.policies[p_class]['policies']:
_pol = _policydata.policies[p_class]['policies'][policy_name] _pol = _policydata.policies[p_class]['policies'][policy_name]
@ -5133,16 +5219,19 @@ def set_(computer_policy=None, user_policy=None,
adml_policy_resources=admlPolicyResources) adml_policy_resources=admlPolicyResources)
if success: if success:
policy_name = the_policy.attrib['name'] policy_name = the_policy.attrib['name']
_admTemplateData[policy_name] = _value policy_namespace = the_policy.nsmap[the_policy.prefix]
if policy_namespace not in _admTemplateData:
_admTemplateData[policy_namespace] = {}
_admTemplateData[policy_namespace][policy_name] = _value
else: else:
raise SaltInvocationError(msg) raise SaltInvocationError(msg)
if policy_name in _admTemplateData and the_policy is not None: if policy_namespace and policy_name in _admTemplateData[policy_namespace] and the_policy is not None:
log.debug('setting == {0}'.format(_admTemplateData[policy_name]).lower()) log.debug('setting == {0}'.format(_admTemplateData[policy_namespace][policy_name]).lower())
log.debug('{0}'.format(str(_admTemplateData[policy_name]).lower())) log.debug('{0}'.format(str(_admTemplateData[policy_namespace][policy_name]).lower()))
if str(_admTemplateData[policy_name]).lower() != 'disabled' \ if str(_admTemplateData[policy_namespace][policy_name]).lower() != 'disabled' \
and str(_admTemplateData[policy_name]).lower() != 'not configured': and str(_admTemplateData[policy_namespace][policy_name]).lower() != 'not configured':
if ELEMENTS_XPATH(the_policy): if ELEMENTS_XPATH(the_policy):
if isinstance(_admTemplateData[policy_name], dict): if isinstance(_admTemplateData[policy_namespace][policy_name], dict):
for elements_item in ELEMENTS_XPATH(the_policy): for elements_item in ELEMENTS_XPATH(the_policy):
for child_item in elements_item.getchildren(): for child_item in elements_item.getchildren():
# check each element # check each element
@ -5153,9 +5242,9 @@ def set_(computer_policy=None, user_policy=None,
True, True,
admlPolicyResources) admlPolicyResources)
log.debug('id attribute == "{0}" this_element_name == "{1}"'.format(child_item.attrib['id'], this_element_name)) log.debug('id attribute == "{0}" this_element_name == "{1}"'.format(child_item.attrib['id'], this_element_name))
if this_element_name in _admTemplateData[policy_name]: if this_element_name in _admTemplateData[policy_namespace][policy_name]:
temp_element_name = this_element_name temp_element_name = this_element_name
elif child_item.attrib['id'] in _admTemplateData[policy_name]: elif child_item.attrib['id'] in _admTemplateData[policy_namespace][policy_name]:
temp_element_name = child_item.attrib['id'] temp_element_name = child_item.attrib['id']
else: else:
msg = ('Element "{0}" must be included' msg = ('Element "{0}" must be included'
@ -5163,12 +5252,12 @@ def set_(computer_policy=None, user_policy=None,
raise SaltInvocationError(msg.format(this_element_name, policy_name)) raise SaltInvocationError(msg.format(this_element_name, policy_name))
if 'required' in child_item.attrib \ if 'required' in child_item.attrib \
and child_item.attrib['required'].lower() == 'true': and child_item.attrib['required'].lower() == 'true':
if not _admTemplateData[policy_name][temp_element_name]: if not _admTemplateData[policy_namespace][policy_name][temp_element_name]:
msg = 'Element "{0}" requires a value to be specified' msg = 'Element "{0}" requires a value to be specified'
raise SaltInvocationError(msg.format(temp_element_name)) raise SaltInvocationError(msg.format(temp_element_name))
if etree.QName(child_item).localname == 'boolean': if etree.QName(child_item).localname == 'boolean':
if not isinstance( if not isinstance(
_admTemplateData[policy_name][temp_element_name], _admTemplateData[policy_namespace][policy_name][temp_element_name],
bool): bool):
msg = 'Element {0} requires a boolean True or False' msg = 'Element {0} requires a boolean True or False'
raise SaltInvocationError(msg.format(temp_element_name)) raise SaltInvocationError(msg.format(temp_element_name))
@ -5180,9 +5269,9 @@ def set_(computer_policy=None, user_policy=None,
min_val = int(child_item.attrib['minValue']) min_val = int(child_item.attrib['minValue'])
if 'maxValue' in child_item.attrib: if 'maxValue' in child_item.attrib:
max_val = int(child_item.attrib['maxValue']) max_val = int(child_item.attrib['maxValue'])
if int(_admTemplateData[policy_name][temp_element_name]) \ if int(_admTemplateData[policy_namespace][policy_name][temp_element_name]) \
< min_val or \ < min_val or \
int(_admTemplateData[policy_name][temp_element_name]) \ int(_admTemplateData[policy_namespace][policy_name][temp_element_name]) \
> max_val: > max_val:
msg = 'Element "{0}" value must be between {1} and {2}' msg = 'Element "{0}" value must be between {1} and {2}'
raise SaltInvocationError(msg.format(temp_element_name, raise SaltInvocationError(msg.format(temp_element_name,
@ -5192,7 +5281,7 @@ def set_(computer_policy=None, user_policy=None,
# make sure the value is in the enumeration # make sure the value is in the enumeration
found = False found = False
for enum_item in child_item.getchildren(): for enum_item in child_item.getchildren():
if _admTemplateData[policy_name][temp_element_name] == \ if _admTemplateData[policy_namespace][policy_name][temp_element_name] == \
_getAdmlDisplayName( _getAdmlDisplayName(
admlPolicyResources, admlPolicyResources,
enum_item.attrib['displayName']).strip(): enum_item.attrib['displayName']).strip():
@ -5206,33 +5295,33 @@ def set_(computer_policy=None, user_policy=None,
and child_item.attrib['explicitValue'].lower() == \ and child_item.attrib['explicitValue'].lower() == \
'true': 'true':
if not isinstance( if not isinstance(
_admTemplateData[policy_name][temp_element_name], _admTemplateData[policy_namespace][policy_name][temp_element_name],
dict): dict):
msg = ('Each list item of element "{0}" ' msg = ('Each list item of element "{0}" '
'requires a dict value') 'requires a dict value')
msg = msg.format(temp_element_name) msg = msg.format(temp_element_name)
raise SaltInvocationError(msg) raise SaltInvocationError(msg)
elif not isinstance( elif not isinstance(
_admTemplateData[policy_name][temp_element_name], _admTemplateData[policy_namespace][policy_name][temp_element_name],
list): list):
msg = 'Element "{0}" requires a list value' msg = 'Element "{0}" requires a list value'
msg = msg.format(temp_element_name) msg = msg.format(temp_element_name)
raise SaltInvocationError(msg) raise SaltInvocationError(msg)
elif etree.QName(child_item).localname == 'multiText': elif etree.QName(child_item).localname == 'multiText':
if not isinstance( if not isinstance(
_admTemplateData[policy_name][temp_element_name], _admTemplateData[policy_namespace][policy_name][temp_element_name],
list): list):
msg = 'Element "{0}" requires a list value' msg = 'Element "{0}" requires a list value'
msg = msg.format(temp_element_name) msg = msg.format(temp_element_name)
raise SaltInvocationError(msg) raise SaltInvocationError(msg)
_admTemplateData[policy_name][child_item.attrib['id']] = \ _admTemplateData[policy_namespace][policy_name][child_item.attrib['id']] = \
_admTemplateData[policy_name].pop(temp_element_name) _admTemplateData[policy_namespace][policy_name].pop(temp_element_name)
else: else:
msg = 'The policy "{0}" has elements which must be configured' msg = 'The policy "{0}" has elements which must be configured'
msg = msg.format(policy_name) msg = msg.format(policy_name)
raise SaltInvocationError(msg) raise SaltInvocationError(msg)
else: else:
if str(_admTemplateData[policy_name]).lower() != 'enabled': if str(_admTemplateData[policy_namespace][policy_name]).lower() != 'enabled':
msg = ('The policy {0} must either be "Enabled", ' msg = ('The policy {0} must either be "Enabled", '
'"Disabled", or "Not Configured"') '"Disabled", or "Not Configured"')
msg = msg.format(policy_name) msg = msg.format(policy_name)

View File

@ -861,42 +861,79 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
r''' r'''
Install the passed package(s) on the system using winrepo Install the passed package(s) on the system using winrepo
:param name: Args:
The name of a single package, or a comma-separated list of packages to
install. (no spaces after the commas)
:type name: str, list, or None
:param bool refresh: Boolean value representing whether or not to refresh name (str):
the winrepo db The name of a single package, or a comma-separated list of packages
to install. (no spaces after the commas)
:param pkgs: A list of packages to install from a software repository. refresh (bool):
All packages listed under ``pkgs`` will be installed via a single Boolean value representing whether or not to refresh the winrepo db
pkgs (list):
A list of packages to install from a software repository. All
packages listed under ``pkgs`` will be installed via a single
command. command.
:type pkgs: list or None You can specify a version by passing the item as a dict:
*Keyword Arguments (kwargs)* CLI Example:
:param str version: .. code-block:: bash
The specific version to install. If omitted, the latest version will be
installed. If passed with multiple install, the version will apply to
all packages. Recommended for single installation only.
:param str cache_file: # will install the latest version of foo and bar
salt '*' pkg.install pkgs='["foo", "bar"]'
# will install the latest version of foo and version 1.2.3 of bar
salt '*' pkg.install pkgs='["foo", {"bar": "1.2.3"}]'
Kwargs:
version (str):
The specific version to install. If omitted, the latest version will
be installed. Recommend for use when installing a single package.
If passed with a list of packages in the ``pkgs`` parameter, the
version will be ignored.
CLI Example:
.. code-block:: bash
# Version is ignored
salt '*' pkg.install pkgs="['foo', 'bar']" version=1.2.3
If passed with a comma seperated list in the ``name`` parameter, the
version will apply to all packages in the list.
CLI Example:
.. code-block:: bash
# Version 1.2.3 will apply to packages foo and bar
salt '*' pkg.install foo,bar version=1.2.3
cache_file (str):
A single file to copy down for use with the installer. Copied to the A single file to copy down for use with the installer. Copied to the
same location as the installer. Use this over ``cache_dir`` if there same location as the installer. Use this over ``cache_dir`` if there
are many files in the directory and you only need a specific file and are many files in the directory and you only need a specific file
don't want to cache additional files that may reside in the installer and don't want to cache additional files that may reside in the
directory. Only applies to files on ``salt://`` installer directory. Only applies to files on ``salt://``
:param bool cache_dir: cache_dir (bool):
True will copy the contents of the installer directory. This is useful True will copy the contents of the installer directory. This is
for installations that are not a single file. Only applies to useful for installations that are not a single file. Only applies to
directories on ``salt://`` directories on ``salt://``
:param str saltenv: Salt environment. Default 'base' extra_install_flags (str):
Additional install flags that will be appended to the
``install_flags`` defined in the software definition file. Only
applies when single package is passed.
:param bool report_reboot_exit_codes: saltenv (str):
Salt environment. Default 'base'
report_reboot_exit_codes (bool):
If the installer exits with a recognized exit code indicating that If the installer exits with a recognized exit code indicating that
a reboot is required, the module function a reboot is required, the module function
@ -909,8 +946,8 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
.. versionadded:: 2016.11.0 .. versionadded:: 2016.11.0
:return: Return a dict containing the new package names and versions:: Returns:
:rtype: dict dict: Return a dict containing the new package names and versions:
If the package is installed by ``pkg.install``: If the package is installed by ``pkg.install``:
@ -989,13 +1026,22 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
# "sources" argument # "sources" argument
pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs, **kwargs)[0] pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs, **kwargs)[0]
if len(pkg_params) > 1:
if kwargs.get('extra_install_flags') is not None:
log.warning('\'extra_install_flags\' argument will be ignored for '
'multiple package targets')
# Windows expects an Options dictionary containing 'version'
for pkg in pkg_params:
pkg_params[pkg] = {'version': pkg_params[pkg]}
if pkg_params is None or len(pkg_params) == 0: if pkg_params is None or len(pkg_params) == 0:
log.error('No package definition found') log.error('No package definition found')
return {} return {}
if not pkgs and len(pkg_params) == 1: if not pkgs and len(pkg_params) == 1:
# Only use the 'version' param if 'name' was not specified as a # Only use the 'version' param if a single item was passed to the 'name'
# comma-separated list # parameter
pkg_params = { pkg_params = {
name: { name: {
'version': kwargs.get('version'), 'version': kwargs.get('version'),
@ -1020,10 +1066,15 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
ret[pkg_name] = 'Unable to locate package {0}'.format(pkg_name) ret[pkg_name] = 'Unable to locate package {0}'.format(pkg_name)
continue continue
# Get the version number passed or the latest available # Get the version number passed or the latest available (must be a string)
version_num = '' version_num = ''
if options: if options:
version_num = options.get('version', False) version_num = options.get('version', '')
# Using the salt cmdline with version=5.3 might be interpreted
# as a float it must be converted to a string in order for
# string matching to work.
if not isinstance(version_num, six.string_types) and version_num is not None:
version_num = str(version_num)
if not version_num: if not version_num:
version_num = _get_latest_pkg_version(pkginfo) version_num = _get_latest_pkg_version(pkginfo)
@ -1160,11 +1211,8 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
#Compute msiexec string #Compute msiexec string
use_msiexec, msiexec = _get_msiexec(pkginfo[version_num].get('msiexec', False)) use_msiexec, msiexec = _get_msiexec(pkginfo[version_num].get('msiexec', False))
# Install the software # Build cmd and arguments
# Check Use Scheduler Option # cmd and arguments must be seperated for use with the task scheduler
if pkginfo[version_num].get('use_scheduler', False):
# Build Scheduled Task Parameters
if use_msiexec: if use_msiexec:
cmd = msiexec cmd = msiexec
arguments = ['/i', cached_pkg] arguments = ['/i', cached_pkg]
@ -1175,6 +1223,10 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
cmd = cached_pkg cmd = cached_pkg
arguments = salt.utils.shlex_split(install_flags) arguments = salt.utils.shlex_split(install_flags)
# Install the software
# Check Use Scheduler Option
if pkginfo[version_num].get('use_scheduler', False):
# Create Scheduled Task # Create Scheduled Task
__salt__['task.create_task'](name='update-salt-software', __salt__['task.create_task'](name='update-salt-software',
user_name='System', user_name='System',
@ -1188,21 +1240,43 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
start_time='01:00', start_time='01:00',
ac_only=False, ac_only=False,
stop_if_on_batteries=False) stop_if_on_batteries=False)
# Run Scheduled Task # Run Scheduled Task
# Special handling for installing salt
if pkg_name in ['salt-minion', 'salt-minion-py3']:
ret[pkg_name] = {'install status': 'task started'}
if not __salt__['task.run'](name='update-salt-software'):
log.error('Failed to install {0}'.format(pkg_name))
log.error('Scheduled Task failed to run')
ret[pkg_name] = {'install status': 'failed'}
else:
# Make sure the task is running, try for 5 secs
from time import time
t_end = time() + 5
while time() < t_end:
task_running = __salt__['task.status'](
'update-salt-software') == 'Running'
if task_running:
break
if not task_running:
log.error(
'Failed to install {0}'.format(pkg_name))
log.error('Scheduled Task failed to run')
ret[pkg_name] = {'install status': 'failed'}
# All other packages run with task scheduler
else:
if not __salt__['task.run_wait'](name='update-salt-software'): if not __salt__['task.run_wait'](name='update-salt-software'):
log.error('Failed to install {0}'.format(pkg_name)) log.error('Failed to install {0}'.format(pkg_name))
log.error('Scheduled Task failed to run') log.error('Scheduled Task failed to run')
ret[pkg_name] = {'install status': 'failed'} ret[pkg_name] = {'install status': 'failed'}
else: else:
# Build the install command
cmd = [] # Combine cmd and arguments
if use_msiexec: cmd = [cmd].extend(arguments)
cmd.extend([msiexec, '/i', cached_pkg])
if pkginfo[version_num].get('allusers', True):
cmd.append('ALLUSERS="1"')
else:
cmd.append(cached_pkg)
cmd.extend(salt.utils.shlex_split(install_flags))
# Launch the command # Launch the command
result = __salt__['cmd.run_all'](cmd, result = __salt__['cmd.run_all'](cmd,
cache_path, cache_path,
@ -1355,6 +1429,11 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
continue continue
if version_num is not None: if version_num is not None:
# Using the salt cmdline with version=5.3 might be interpreted
# as a float it must be converted to a string in order for
# string matching to work.
if not isinstance(version_num, six.string_types) and version_num is not None:
version_num = str(version_num)
if version_num not in pkginfo and 'latest' in pkginfo: if version_num not in pkginfo and 'latest' in pkginfo:
version_num = 'latest' version_num = 'latest'
elif 'latest' in pkginfo: elif 'latest' in pkginfo:

View File

@ -1,9 +1,18 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
''' '''
Microsoft certificate management via the Pki PowerShell module. Microsoft certificate management via the PKI Client PowerShell module.
https://technet.microsoft.com/en-us/itpro/powershell/windows/pkiclient/pkiclient
The PKI Client PowerShell module is only available on Windows 8+ and Windows
Server 2012+.
https://technet.microsoft.com/en-us/library/hh848636(v=wps.620).aspx
:platform: Windows :platform: Windows
:depends:
- PowerShell 4
- PKI Client Module (Windows 8+ / Windows Server 2012+)
.. versionadded:: 2016.11.0 .. versionadded:: 2016.11.0
''' '''
# Import python libs # Import python libs
@ -29,11 +38,17 @@ __virtualname__ = 'win_pki'
def __virtual__(): def __virtual__():
''' '''
Only works on Windows systems with the PKI PowerShell module installed. Requires Windows
Requires Windows 8+ / Windows Server 2012+
Requires PowerShell
Requires PKI Client PowerShell module installed.
''' '''
if not salt.utils.is_windows(): if not salt.utils.is_windows():
return False, 'Only available on Windows Systems' return False, 'Only available on Windows Systems'
if salt.utils.version_cmp(__grains__['osversion'], '6.2.9200') == -1:
return False, 'Only available on Windows 8+ / Windows Server 2012 +'
if not __salt__['cmd.shell_info']('powershell')['installed']: if not __salt__['cmd.shell_info']('powershell')['installed']:
return False, 'Powershell not available' return False, 'Powershell not available'

View File

@ -1259,7 +1259,7 @@ def status(name, location='\\'):
task_service = win32com.client.Dispatch("Schedule.Service") task_service = win32com.client.Dispatch("Schedule.Service")
task_service.Connect() task_service.Connect()
# get the folder to delete the folder from # get the folder where the task is defined
task_folder = task_service.GetFolder(location) task_folder = task_service.GetFolder(location)
task = task_folder.GetTask(name) task = task_folder.GetTask(name)

View File

@ -945,12 +945,26 @@ def set_wu_settings(level=None,
Change Windows Update settings. If no parameters are passed, the current Change Windows Update settings. If no parameters are passed, the current
value will be returned. value will be returned.
Supported:
- Windows Vista / Server 2008
- Windows 7 / Server 2008R2
- Windows 8 / Server 2012
- Windows 8.1 / Server 2012R2
.. note:
Microsoft began using the Unified Update Platform (UUP) starting with
Windows 10 / Server 2016. The Windows Update settings have changed and
the ability to 'Save' Windows Update settings has been removed. Windows
Update settings are read-only. See MSDN documentation:
https://msdn.microsoft.com/en-us/library/aa385829(v=vs.85).aspx
:param int level: :param int level:
Number from 1 to 4 indicating the update level: Number from 1 to 4 indicating the update level:
1. Never check for updates 1. Never check for updates
2. Check for updates but let me choose whether to download and install them 2. Check for updates but let me choose whether to download and install them
3. Download updates but let me choose whether to install them 3. Download updates but let me choose whether to install them
4. Install updates automatically 4. Install updates automatically
:param bool recommended: :param bool recommended:
Boolean value that indicates whether to include optional or recommended Boolean value that indicates whether to include optional or recommended
updates when a search for updates and installation of updates is updates when a search for updates and installation of updates is
@ -993,8 +1007,28 @@ def set_wu_settings(level=None,
salt '*' win_wua.set_wu_settings level=4 recommended=True featured=False salt '*' win_wua.set_wu_settings level=4 recommended=True featured=False
""" """
ret = {} # The AutomaticUpdateSettings.Save() method used in this function does not
ret['Success'] = True # work on Windows 10 / Server 2016. It is called in throughout this function
# like this:
#
# obj_au = win32com.client.Dispatch('Microsoft.Update.AutoUpdate')
# obj_au_settings = obj_au.Settings
# obj_au_settings.Save()
#
# The `Save()` method reports success but doesn't actually change anything.
# Windows Update settings are read-only in Windows 10 / Server 2016. There's
# a little blurb on MSDN that mentions this, but gives no alternative for
# changing these settings in Windows 10 / Server 2016.
#
# https://msdn.microsoft.com/en-us/library/aa385829(v=vs.85).aspx
#
# Apparently the Windows Update framework in Windows Vista - Windows 8.1 has
# been changed quite a bit in Windows 10 / Server 2016. It is now called the
# Unified Update Platform (UUP). I haven't found an API or a Powershell
# commandlet for working with the the UUP. Perhaps there will be something
# forthcoming. The `win_lgpo` module might be an option for changing the
# Windows Update settings using local group policy.
ret = {'Success': True}
# Initialize the PyCom system # Initialize the PyCom system
pythoncom.CoInitialize() pythoncom.CoInitialize()

View File

@ -330,10 +330,14 @@ def _parse_subject(subject):
for nid_name, nid_num in six.iteritems(subject.nid): for nid_name, nid_num in six.iteritems(subject.nid):
if nid_num in nids: if nid_num in nids:
continue continue
try:
val = getattr(subject, nid_name) val = getattr(subject, nid_name)
if val: if val:
ret[nid_name] = val ret[nid_name] = val
nids.append(nid_num) nids.append(nid_num)
except TypeError as e:
if e.args and e.args[0] == 'No string argument provided':
pass
return ret return ret

View File

@ -35,8 +35,10 @@ try:
import yum import yum
HAS_YUM = True HAS_YUM = True
except ImportError: except ImportError:
from salt.ext.six.moves import configparser
HAS_YUM = False HAS_YUM = False
from salt.ext.six.moves import configparser
# pylint: enable=import-error,redefined-builtin # pylint: enable=import-error,redefined-builtin
# Import salt libs # Import salt libs
@ -180,6 +182,15 @@ def _check_versionlock():
Ensure that the appropriate versionlock plugin is present Ensure that the appropriate versionlock plugin is present
''' '''
if _yum() == 'dnf': if _yum() == 'dnf':
if int(__grains__.get('osmajorrelease')) >= 26:
if six.PY3:
vl_plugin = 'python3-dnf-plugin-versionlock'
else:
vl_plugin = 'python2-dnf-plugin-versionlock'
else:
if six.PY3:
vl_plugin = 'python3-dnf-plugins-extras-versionlock'
else:
vl_plugin = 'python-dnf-plugins-extras-versionlock' vl_plugin = 'python-dnf-plugins-extras-versionlock'
else: else:
vl_plugin = 'yum-versionlock' \ vl_plugin = 'yum-versionlock' \
@ -897,6 +908,11 @@ def refresh_db(**kwargs):
clean_cmd = [_yum(), '--quiet', 'clean', 'expire-cache'] clean_cmd = [_yum(), '--quiet', 'clean', 'expire-cache']
update_cmd = [_yum(), '--quiet', 'check-update'] update_cmd = [_yum(), '--quiet', 'check-update']
if __grains__.get('os_family') == 'RedHat' and __grains__.get('osmajorrelease') == '7':
# This feature is disable because it is not used by Salt and lasts a lot with using large repo like EPEL
update_cmd.append('--setopt=autocheck_running_kernel=false')
for args in (repo_arg, exclude_arg, branch_arg): for args in (repo_arg, exclude_arg, branch_arg):
if args: if args:
clean_cmd.extend(args) clean_cmd.extend(args)
@ -2350,8 +2366,9 @@ def del_repo(repo, basedir=None, **kwargs): # pylint: disable=W0613
if stanza == repo: if stanza == repo:
continue continue
comments = '' comments = ''
if 'comments' in filerepos[stanza]: if 'comments' in six.iterkeys(filerepos[stanza]):
comments = '\n'.join(filerepos[stanza]['comments']) comments = salt.utils.pkg.rpm.combine_comments(
filerepos[stanza]['comments'])
del filerepos[stanza]['comments'] del filerepos[stanza]['comments']
content += '\n[{0}]'.format(stanza) content += '\n[{0}]'.format(stanza)
for line in filerepos[stanza]: for line in filerepos[stanza]:
@ -2486,7 +2503,8 @@ def mod_repo(repo, basedir=None, **kwargs):
for stanza in six.iterkeys(filerepos): for stanza in six.iterkeys(filerepos):
comments = '' comments = ''
if 'comments' in six.iterkeys(filerepos[stanza]): if 'comments' in six.iterkeys(filerepos[stanza]):
comments = '\n'.join(filerepos[stanza]['comments']) comments = salt.utils.pkg.rpm.combine_comments(
filerepos[stanza]['comments'])
del filerepos[stanza]['comments'] del filerepos[stanza]['comments']
content += '\n[{0}]'.format(stanza) content += '\n[{0}]'.format(stanza)
for line in six.iterkeys(filerepos[stanza]): for line in six.iterkeys(filerepos[stanza]):
@ -2508,41 +2526,32 @@ def _parse_repo_file(filename):
''' '''
Turn a single repo file into a dict Turn a single repo file into a dict
''' '''
repos = {} parsed = configparser.ConfigParser()
header = '' config = {}
repo = ''
with salt.utils.fopen(filename, 'r') as rfile:
for line in rfile:
if line.startswith('['):
repo = line.strip().replace('[', '').replace(']', '')
repos[repo] = {}
# Even though these are essentially uselss, I want to allow the
# user to maintain their own comments, etc
if not line:
if not repo:
header += line
if line.startswith('#'):
if not repo:
header += line
else:
if 'comments' not in repos[repo]:
repos[repo]['comments'] = []
repos[repo]['comments'].append(line.strip())
continue
# These are the actual configuration lines that matter
if '=' in line:
try: try:
comps = line.strip().split('=') parsed.read(filename)
repos[repo][comps[0].strip()] = '='.join(comps[1:]) except configparser.MissingSectionHeaderError as err:
except KeyError:
log.error( log.error(
'Failed to parse line in %s, offending line was ' 'Failed to parse file {0}, error: {1}'.format(filename, err.message)
'\'%s\'', filename, line.rstrip()
) )
return ('', {})
return (header, repos) for section in parsed._sections:
section_dict = dict(parsed._sections[section])
section_dict.pop('__name__')
config[section] = section_dict
# Try to extract leading comments
headers = ''
with salt.utils.fopen(filename, 'r') as rawfile:
for line in rawfile:
if line.strip().startswith('#'):
headers += '{0}\n'.format(line.strip())
else:
break
return (headers, config)
def file_list(*packages): def file_list(*packages):

Some files were not shown because too many files have changed in this diff Show More