mirror of
https://github.com/valitydev/salt.git
synced 2024-11-07 17:09:03 +00:00
Merge branch 'develop' into pr-35533
This commit is contained in:
commit
c1245acaad
4
.github/stale.yml
vendored
4
.github/stale.yml
vendored
@ -1,8 +1,8 @@
|
||||
# Probot Stale configuration file
|
||||
|
||||
# Number of days of inactivity before an issue becomes stale
|
||||
# 1225 is approximately 3 years and 4 months
|
||||
daysUntilStale: 1225
|
||||
# 1200 is approximately 3 years and 3 months
|
||||
daysUntilStale: 1200
|
||||
|
||||
# Number of days of inactivity before a stale issue is closed
|
||||
daysUntilClose: 7
|
||||
|
14
conf/master
14
conf/master
@ -414,6 +414,20 @@
|
||||
# will cause minion to throw an exception and drop the message.
|
||||
# sign_pub_messages: False
|
||||
|
||||
# Signature verification on messages published from minions
|
||||
# This requires that minions cryptographically sign the messages they
|
||||
# publish to the master. If minions are not signing, then log this information
|
||||
# at loglevel 'INFO' and drop the message without acting on it.
|
||||
# require_minion_sign_messages: False
|
||||
|
||||
# The below will drop messages when their signatures do not validate.
|
||||
# Note that when this option is False but `require_minion_sign_messages` is True
|
||||
# minions MUST sign their messages but the validity of their signatures
|
||||
# is ignored.
|
||||
# These two config options exist so a Salt infrastructure can be moved
|
||||
# to signing minion messages gradually.
|
||||
# drop_messages_signature_fail: False
|
||||
|
||||
# Use TLS/SSL encrypted connection between master and minion.
|
||||
# Can be set to a dictionary containing keyword arguments corresponding to Python's
|
||||
# 'ssl.wrap_socket' method.
|
||||
|
@ -309,3 +309,5 @@ def setup(app):
|
||||
indextemplate="pair: %s; conf/proxy")
|
||||
app.add_crossref_type(directivename="conf_log", rolename="conf_log",
|
||||
indextemplate="pair: %s; conf/logging")
|
||||
app.add_crossref_type(directivename="jinja_ref", rolename="jinja_ref",
|
||||
indextemplate="pair: %s; jinja filters")
|
||||
|
@ -17,6 +17,9 @@ The configuration file for the salt-master is located at
|
||||
configuration file is located at :file:`/usr/local/etc/salt`. The available
|
||||
options are as follows:
|
||||
|
||||
|
||||
.. _primary-master-configuration:
|
||||
|
||||
Primary Master Configuration
|
||||
============================
|
||||
|
||||
@ -852,6 +855,9 @@ what you are doing! Transports are explained in :ref:`Salt Transports
|
||||
ret_port: 4606
|
||||
zeromq: []
|
||||
|
||||
|
||||
.. _salt-ssh-configuration:
|
||||
|
||||
Salt-SSH Configuration
|
||||
======================
|
||||
|
||||
@ -929,6 +935,8 @@ Default: None
|
||||
Identical as `thin_extra_mods`, only applied to the Salt Minimal.
|
||||
|
||||
|
||||
.. _master-security-settings:
|
||||
|
||||
Master Security Settings
|
||||
========================
|
||||
|
||||
@ -1122,7 +1130,7 @@ from the eauth driver each time.
|
||||
.. conf_master:: eauth_acl_module
|
||||
|
||||
``eauth_acl_module``
|
||||
---------------------
|
||||
--------------------
|
||||
|
||||
Default: ``''``
|
||||
|
||||
@ -1219,7 +1227,6 @@ signature. The :conf_master:`master_pubkey_signature` must also be set for this.
|
||||
|
||||
master_use_pubkey_signature: True
|
||||
|
||||
|
||||
.. conf_master:: rotate_aes_key
|
||||
|
||||
``rotate_aes_key``
|
||||
@ -1236,7 +1243,6 @@ Do not disable this unless it is absolutely clear what this does.
|
||||
|
||||
rotate_aes_key: True
|
||||
|
||||
|
||||
.. conf_master:: ssl
|
||||
|
||||
``ssl``
|
||||
@ -1265,7 +1271,7 @@ constant names without ssl module prefix: ``CERT_REQUIRED`` or ``PROTOCOL_SSLv23
|
||||
.. conf_master:: allow_minion_key_revoke
|
||||
|
||||
``allow_minion_key_revoke``
|
||||
------------------
|
||||
---------------------------
|
||||
|
||||
Default: ``True``
|
||||
|
||||
@ -1278,6 +1284,9 @@ the master will drop the request and the minion's key will remain accepted.
|
||||
|
||||
rotate_aes_key: True
|
||||
|
||||
|
||||
.. _master-module-management:
|
||||
|
||||
Master Module Management
|
||||
========================
|
||||
|
||||
@ -1310,6 +1319,8 @@ the Salt master.
|
||||
cython_enable: False
|
||||
|
||||
|
||||
.. _master-state-system-settings:
|
||||
|
||||
Master State System Settings
|
||||
============================
|
||||
|
||||
@ -1626,6 +1637,9 @@ If set to ``True``, runner jobs will be saved to job cache (defined by
|
||||
|
||||
runner_returns: True
|
||||
|
||||
|
||||
.. _master-file-server-settings:
|
||||
|
||||
Master File Server Settings
|
||||
===========================
|
||||
|
||||
@ -3599,6 +3613,9 @@ can be utilized:
|
||||
|
||||
pillar_cache_backend: disk
|
||||
|
||||
|
||||
.. _syndic-server-settings:
|
||||
|
||||
Syndic Server Settings
|
||||
======================
|
||||
|
||||
@ -3748,6 +3765,8 @@ send events to all connected masters.
|
||||
syndic_forward_all_events: False
|
||||
|
||||
|
||||
.. _peer-publish-settings:
|
||||
|
||||
Peer Publish Settings
|
||||
=====================
|
||||
|
||||
@ -3862,7 +3881,6 @@ The level of messages to send to the console. See also :conf_log:`log_level`.
|
||||
|
||||
log_level: warning
|
||||
|
||||
|
||||
.. conf_master:: log_level_logfile
|
||||
|
||||
``log_level_logfile``
|
||||
@ -3878,7 +3896,6 @@ it will inherit the level set by :conf_log:`log_level` option.
|
||||
|
||||
log_level_logfile: warning
|
||||
|
||||
|
||||
.. conf_master:: log_datefmt
|
||||
|
||||
``log_datefmt``
|
||||
@ -3893,7 +3910,6 @@ The date and time format used in console log messages. See also
|
||||
|
||||
log_datefmt: '%H:%M:%S'
|
||||
|
||||
|
||||
.. conf_master:: log_datefmt_logfile
|
||||
|
||||
``log_datefmt_logfile``
|
||||
@ -3908,7 +3924,6 @@ The date and time format used in log file messages. See also
|
||||
|
||||
log_datefmt_logfile: '%Y-%m-%d %H:%M:%S'
|
||||
|
||||
|
||||
.. conf_master:: log_fmt_console
|
||||
|
||||
``log_fmt_console``
|
||||
@ -3941,7 +3956,6 @@ The format of the console logging messages. See also
|
||||
log_fmt_console: '%(colorlevel)s %(colormsg)s'
|
||||
log_fmt_console: '[%(levelname)-8s] %(message)s'
|
||||
|
||||
|
||||
.. conf_master:: log_fmt_logfile
|
||||
|
||||
``log_fmt_logfile``
|
||||
@ -3956,7 +3970,6 @@ The format of the log file logging messages. See also
|
||||
|
||||
log_fmt_logfile: '%(asctime)s,%(msecs)03d [%(name)-17s][%(levelname)-8s] %(message)s'
|
||||
|
||||
|
||||
.. conf_master:: log_granular_levels
|
||||
|
||||
``log_granular_levels``
|
||||
@ -3967,6 +3980,9 @@ Default: ``{}``
|
||||
This can be used to control logging levels more specifically. See also
|
||||
:conf_log:`log_granular_levels`.
|
||||
|
||||
|
||||
.. _node-groups:
|
||||
|
||||
Node Groups
|
||||
===========
|
||||
|
||||
@ -3984,13 +4000,15 @@ A group consists of a group name and a compound target.
|
||||
group2: 'G@os:Debian and foo.domain.com'
|
||||
group3: 'G@os:Debian and N@group1'
|
||||
group4:
|
||||
- 'G@foo:bar'
|
||||
- 'or'
|
||||
- 'G@foo:baz'
|
||||
- 'G@foo:bar'
|
||||
- 'or'
|
||||
- 'G@foo:baz'
|
||||
|
||||
More information on using nodegroups can be found :ref:`here <targeting-nodegroups>`.
|
||||
|
||||
|
||||
.. _range-cluster-settings:
|
||||
|
||||
Range Cluster Settings
|
||||
======================
|
||||
|
||||
@ -4006,9 +4024,11 @@ https://github.com/ytoolshed/range/wiki/%22yamlfile%22-module-file-spec
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
range_server: range:80
|
||||
range_server: range:80
|
||||
|
||||
|
||||
.. _include-configuration:
|
||||
|
||||
Include Configuration
|
||||
=====================
|
||||
|
||||
@ -4030,7 +4050,6 @@ file.
|
||||
files are prefixed with an underscore. A common example of this is the
|
||||
``_schedule.conf`` file.
|
||||
|
||||
|
||||
.. conf_master:: include
|
||||
|
||||
``include``
|
||||
@ -4060,6 +4079,7 @@ option then the master will log a warning message.
|
||||
- master.d/*
|
||||
- /etc/roles/webserver
|
||||
|
||||
|
||||
.. _winrepo-master-config-opts:
|
||||
|
||||
Windows Software Repo Settings
|
||||
@ -4380,3 +4400,82 @@ configured both globally and for individual remotes.
|
||||
- '+refs/tags/*:refs/tags/*'
|
||||
- '+refs/pull/*/head:refs/remotes/origin/pr/*'
|
||||
- '+refs/pull/*/merge:refs/remotes/origin/merge/*'
|
||||
|
||||
|
||||
.. _configure-master-on-windows:
|
||||
|
||||
Configure Master on Windows
|
||||
===========================
|
||||
|
||||
The master on Windows requires no additional configuration. You can modify the
|
||||
master configuration by creating/editing the master config file located at
|
||||
``c:\salt\conf\master``. The same configuration options available on Linux are
|
||||
available in Windows, as long as they apply. For example, SSH options wouldn't
|
||||
apply in Windows. The main differences are the file paths. If you are familiar
|
||||
with common salt paths, the following table may be useful:
|
||||
|
||||
============= ========= =================
|
||||
linux Paths Windows Paths
|
||||
============= ========= =================
|
||||
``/etc/salt`` ``<--->`` ``c:\salt\conf``
|
||||
``/`` ``<--->`` ``c:\salt``
|
||||
============= ========= =================
|
||||
|
||||
So, for example, the master config file in Linux is ``/etc/salt/master``. In
|
||||
Windows the master config file is ``c:\salt\conf\master``. The Linux path
|
||||
``/etc/salt`` becomes ``c:\salt\conf`` in Windows.
|
||||
|
||||
Common File Locations
|
||||
---------------------
|
||||
|
||||
====================================== =============================================
|
||||
Linux Paths Windows Paths
|
||||
====================================== =============================================
|
||||
``conf_file: /etc/salt/master`` ``conf_file: c:\salt\conf\master``
|
||||
``log_file: /var/log/salt/master`` ``log_file: c:\salt\var\log\salt\master``
|
||||
``pidfile: /var/run/salt-master.pid`` ``pidfile: c:\salt\var\run\salt-master.pid``
|
||||
====================================== =============================================
|
||||
|
||||
Common Directories
|
||||
------------------
|
||||
|
||||
====================================================== ============================================
|
||||
Linux Paths Windows Paths
|
||||
====================================================== ============================================
|
||||
``cachedir: /var/cache/salt/master`` ``cachedir: c:\salt\var\cache\salt\master``
|
||||
``extension_modules: /var/cache/salt/master/extmods`` ``c:\salt\var\cache\salt\master\extmods``
|
||||
``pki_dir: /etc/salt/pki/master`` ``pki_dir: c:\salt\conf\pki\master``
|
||||
``root_dir: /`` ``root_dir: c:\salt``
|
||||
``sock_dir: /var/run/salt/master`` ``sock_dir: c:\salt\var\run\salt\master``
|
||||
====================================================== ============================================
|
||||
|
||||
Roots
|
||||
-----
|
||||
|
||||
**file_roots**
|
||||
|
||||
================== =========================
|
||||
Linux Paths Windows Paths
|
||||
================== =========================
|
||||
``/srv/salt`` ``c:\salt\srv\salt``
|
||||
``/srv/spm/salt`` ``c:\salt\srv\spm\salt``
|
||||
================== =========================
|
||||
|
||||
**pillar_roots**
|
||||
|
||||
==================== ===========================
|
||||
Linux Paths Windows Paths
|
||||
==================== ===========================
|
||||
``/srv/pillar`` ``c:\salt\srv\pillar``
|
||||
``/srv/spm/pillar`` ``c:\salt\srv\spm\pillar``
|
||||
==================== ===========================
|
||||
|
||||
Win Repo Settings
|
||||
-----------------
|
||||
|
||||
========================================== =================================================
|
||||
Linux Paths Windows Paths
|
||||
========================================== =================================================
|
||||
``winrepo_dir: /srv/salt/win/repo`` ``winrepo_dir: c:\salt\srv\salt\win\repo``
|
||||
``winrepo_dir_ng: /srv/salt/win/repo-ng`` ``winrepo_dir_ng: c:\salt\srv\salt\win\repo-ng``
|
||||
========================================== =================================================
|
||||
|
@ -117,6 +117,8 @@ execution modules
|
||||
drac
|
||||
dracr
|
||||
drbd
|
||||
dummyproxy_package
|
||||
dummyproxy_service
|
||||
ebuild
|
||||
eix
|
||||
elasticsearch
|
||||
|
6
doc/ref/modules/all/salt.modules.dummyproxy_package.rst
Normal file
6
doc/ref/modules/all/salt.modules.dummyproxy_package.rst
Normal file
@ -0,0 +1,6 @@
|
||||
salt.modules.dummyproxy_package module
|
||||
======================================
|
||||
|
||||
.. automodule:: salt.modules.dummyproxy_package
|
||||
:members:
|
||||
:undoc-members:
|
6
doc/ref/modules/all/salt.modules.dummyproxy_service.rst
Normal file
6
doc/ref/modules/all/salt.modules.dummyproxy_service.rst
Normal file
@ -0,0 +1,6 @@
|
||||
salt.modules.dummyproxy_service module
|
||||
======================================
|
||||
|
||||
.. automodule:: salt.modules.dummyproxy_service
|
||||
:members:
|
||||
:undoc-members:
|
@ -17,6 +17,7 @@ pillar modules
|
||||
confidant
|
||||
consul_pillar
|
||||
csvpillar
|
||||
digicert
|
||||
django_orm
|
||||
ec2_pillar
|
||||
etcd_pillar
|
||||
|
6
doc/ref/pillar/all/salt.pillar.digicert.rst
Normal file
6
doc/ref/pillar/all/salt.pillar.digicert.rst
Normal file
@ -0,0 +1,6 @@
|
||||
salt.pillar.digicert module
|
||||
===========================
|
||||
|
||||
.. automodule:: salt.pillar.digicert
|
||||
:members:
|
||||
:undoc-members:
|
@ -12,6 +12,7 @@ proxy modules
|
||||
|
||||
chronos
|
||||
cisconso
|
||||
dummy
|
||||
esxi
|
||||
fx2
|
||||
junos
|
||||
|
6
doc/ref/proxy/all/salt.proxy.dummy.rst
Normal file
6
doc/ref/proxy/all/salt.proxy.dummy.rst
Normal file
@ -0,0 +1,6 @@
|
||||
salt.proxy.dummy module
|
||||
=======================
|
||||
|
||||
.. automodule:: salt.proxy.dummy
|
||||
:members:
|
||||
:undoc-members:
|
@ -16,6 +16,7 @@ runner modules
|
||||
cache
|
||||
cloud
|
||||
ddns
|
||||
digicertapi
|
||||
doc
|
||||
drac
|
||||
error
|
||||
|
6
doc/ref/runners/all/salt.runners.digicertapi.rst
Normal file
6
doc/ref/runners/all/salt.runners.digicertapi.rst
Normal file
@ -0,0 +1,6 @@
|
||||
salt.runners.digicertapi module
|
||||
===============================
|
||||
|
||||
.. automodule:: salt.runners.digicertapi
|
||||
:members:
|
||||
:undoc-members:
|
@ -36,21 +36,14 @@ The 64bit installer has been tested on Windows 7 64bit and Windows Server
|
||||
Please file a bug report on our GitHub repo if issues for other platforms are
|
||||
found.
|
||||
|
||||
There are two installers available.
|
||||
There are installers available for Python 2 and Python 3.
|
||||
|
||||
============================================= =================================
|
||||
Filename Details
|
||||
============================================= =================================
|
||||
``Salt-Minion-<version>-<cpuarch>-Setup.exe`` Just the salt Minion
|
||||
``Salt-<version>-<cpuarch>-Setup.exe`` Salt Minion and Master
|
||||
============================================= =================================
|
||||
The installer will detect previous installations of Salt and ask if you would
|
||||
like to remove them. Clicking OK will remove the Salt binaries and related files
|
||||
but leave any existing config, cache, and PKI information.
|
||||
|
||||
When run, both installers will detect previous installations of Salt and ask if
|
||||
you would like to remove them. Clicking OK will remove the Salt binaries and
|
||||
related files but leave any existing config, cache, and PKI information.
|
||||
|
||||
Salt Minion Only Installation
|
||||
=============================
|
||||
Salt Minion Installation
|
||||
========================
|
||||
|
||||
After the Welcome and the License Agreement, the installer asks for two bits of
|
||||
information to configure the minion; the master hostname and the minion name.
|
||||
@ -83,18 +76,6 @@ be managed there or from the command line like any other Windows service.
|
||||
2008 x64 SP1 redistributable. Allow all Windows updates to run salt-minion
|
||||
smoothly.
|
||||
|
||||
Salt Minion/Master Installation
|
||||
===============================
|
||||
|
||||
This installer behaves the same as the Minion installer but adds an additional
|
||||
installer page. You will be prompted to choose to install the minion and master.
|
||||
The minion check box is checked by default. To also install the master, check
|
||||
the master checkbox. To install only the master, uncheck the minion checkbox. At
|
||||
least one item must be selected.
|
||||
|
||||
You will also be prompted on the final page to start the ``salt-master``
|
||||
service.
|
||||
|
||||
Installation Prerequisites
|
||||
--------------------------
|
||||
|
||||
@ -109,7 +90,7 @@ Silent Installer Options
|
||||
========================
|
||||
|
||||
The installer can be run silently by providing the ``/S`` option at the command
|
||||
line. Both installers also accept the following options for configuring the Salt
|
||||
line. The installer also accepts the following options for configuring the Salt
|
||||
Minion silently:
|
||||
|
||||
========================= =====================================================
|
||||
@ -126,19 +107,6 @@ Option Description
|
||||
``Automatic (Delayed Start)``
|
||||
========================= =====================================================
|
||||
|
||||
The Master/Minion installer also supports the following options:
|
||||
|
||||
========================= =====================================================
|
||||
Option Description
|
||||
========================= =====================================================
|
||||
``/start-master=`` Either a 1 or 0. '1' will start the salt-master
|
||||
service, '0' will not. Default is to start the
|
||||
service after installation.
|
||||
``/install-master`` Will install the master along with the minion.
|
||||
Default is to install minion only
|
||||
``/master-only`` Will only install the master.
|
||||
========================= =====================================================
|
||||
|
||||
.. note::
|
||||
``/start-service`` has been deprecated but will continue to function as
|
||||
expected for the time being.
|
||||
@ -147,24 +115,17 @@ Here are some examples of using the silent installer:
|
||||
|
||||
.. code-block:: bat
|
||||
|
||||
# install both minion and master
|
||||
# configure the minion and start both services
|
||||
# Install the Salt Minion
|
||||
# Configure the minion and start the service
|
||||
|
||||
Salt-2016.9.1-Setup-amd64.exe /S /master=yoursaltmaster /minion-name=yourminionname /install-master
|
||||
Salt-Minion-2017.7.1-Py2-AMD64-Setup.exe /S /master=yoursaltmaster /minion-name=yourminionname
|
||||
|
||||
.. code-block:: bat
|
||||
|
||||
# install only the master but don't start the salt-master service
|
||||
# Install the Salt Minion
|
||||
# Configure the minion but don't start the minion service
|
||||
|
||||
*-Setup-*.exe /S /master=yoursaltmaster /minion-name=yourminionname
|
||||
|
||||
.. code-block:: bat
|
||||
|
||||
# install the minion and master
|
||||
# configure the minion to talk to the local master
|
||||
# start both services
|
||||
|
||||
*-Setup-*.exe /S /master=yoursaltmaster /minion-name=yourminionname /start-minion=0
|
||||
Salt-Minion-2017.7.1-Py3-AMD64-Setup.exe /S /master=yoursaltmaster /minion-name=yourminionname /start-minion=0
|
||||
|
||||
|
||||
Running the Salt Minion on Windows as an Unprivileged User
|
||||
@ -268,21 +229,26 @@ code. They are located in the ``pkg\windows`` directory in the Salt repo
|
||||
Scripts:
|
||||
--------
|
||||
|
||||
================= ===========
|
||||
Script Description
|
||||
================= ===========
|
||||
``build_env.ps1`` A PowerShell script that sets up the build environment
|
||||
``build_pkg.bat`` A batch file that builds a Windows installer based on the
|
||||
contents of the ``C:\Python27`` directory
|
||||
``build.bat`` A batch file that fully automates the building of the Windows
|
||||
installer using the above two scripts
|
||||
================= ===========
|
||||
=================== ===========
|
||||
Script Description
|
||||
=================== ===========
|
||||
``build_env_2.ps1`` A PowerShell script that sets up a Python 2 build
|
||||
environment
|
||||
``build_env_3.ps1`` A PowerShell script that sets up a Python 3 build
|
||||
environment
|
||||
``build_pkg.bat`` A batch file that builds a Windows installer based on the
|
||||
contents of the ``C:\Python27`` directory
|
||||
``build.bat`` A batch file that fully automates the building of the
|
||||
Windows installer using the above two scripts
|
||||
=================== ===========
|
||||
|
||||
.. note::
|
||||
The ``build.bat`` and ``build_pkg.bat`` scripts both accept a single
|
||||
parameter to specify the version of Salt that will be displayed in the
|
||||
Windows installer. If no version is passed, the version will be determined
|
||||
using git.
|
||||
The ``build.bat`` and ``build_pkg.bat`` scripts both accept a parameter to
|
||||
specify the version of Salt that will be displayed in the Windows installer.
|
||||
If no version is passed, the version will be determined using git.
|
||||
|
||||
Both scripts also accept an additional parameter to specify the version of
|
||||
Python to use. The default is 2.
|
||||
|
||||
Prerequisite Software
|
||||
---------------------
|
||||
@ -316,7 +282,7 @@ Go into the ``salt`` directory and checkout the version of salt to work with
|
||||
.. code-block:: bat
|
||||
|
||||
cd salt
|
||||
git checkout 2016.3
|
||||
git checkout 2017.7.2
|
||||
|
||||
2. Setup the Python Environment
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
@ -327,14 +293,14 @@ PowerShell script.
|
||||
.. code-block:: bat
|
||||
|
||||
cd pkg\windows
|
||||
powershell -file build_env.ps1
|
||||
powershell -file build_env_2.ps1
|
||||
|
||||
.. note::
|
||||
You can also do this from Explorer by navigating to the ``pkg\windows``
|
||||
directory, right clicking the **build_env.ps1** powershell script and
|
||||
directory, right clicking the **build_env_2.ps1** powershell script and
|
||||
selecting **Run with PowerShell**
|
||||
|
||||
This will download and install Python with all the dependencies needed to
|
||||
This will download and install Python 2 with all the dependencies needed to
|
||||
develop and build Salt.
|
||||
|
||||
.. note::
|
||||
@ -367,6 +333,10 @@ with ``pip``
|
||||
If ``pip`` is not recognized, you may need to restart your shell to get the
|
||||
updated path
|
||||
|
||||
.. note::
|
||||
If ``pip`` is still not recognized make sure that the Python Scripts folder
|
||||
is in the System ``%PATH%``. (``C:\Python2\Scripts``)
|
||||
|
||||
4. Setup Salt Configuration
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
@ -381,9 +351,9 @@ easiest way to set this up is to copy the contents of the
|
||||
md salt
|
||||
xcopy /s /e \Salt-Dev\salt\pkg\windows\buildenv\* \salt\
|
||||
|
||||
Now go into the ``C:\salt\conf`` directory and edit the file name ``minion`` (no
|
||||
extension). You need to configure the master and id parameters in this file.
|
||||
Edit the following lines:
|
||||
Now go into the ``C:\salt\conf`` directory and edit the minion config file named
|
||||
``minion`` (no extension). You need to configure the master and id parameters in
|
||||
this file. Edit the following lines:
|
||||
|
||||
.. code-block:: bat
|
||||
|
||||
@ -414,16 +384,20 @@ Navigate to the root ``salt`` directory and install Salt.
|
||||
-------------------------------
|
||||
|
||||
Navigate to the ``pkg\windows`` directory and run the ``build_pkg.bat``
|
||||
with the build version (2016.3) script.
|
||||
with the build version (2017.7.2) and the Python version as parameters.
|
||||
|
||||
.. code-block:: bat
|
||||
|
||||
cd pkg\windows
|
||||
build_pkg.bat 2016.3
|
||||
build_pkg.bat 2017.7.2 2
|
||||
^^^^^^^^ ^
|
||||
| |
|
||||
# build version -- |
|
||||
# python version ------
|
||||
|
||||
.. note::
|
||||
If no version is passed, the ``build_pkg.bat`` will guess the version number
|
||||
using git.
|
||||
using git. If the python version is not passed, the default is 2.
|
||||
|
||||
.. _create-windows-installer-easy:
|
||||
|
||||
@ -446,7 +420,7 @@ build.
|
||||
.. code-block:: bat
|
||||
|
||||
cd salt
|
||||
git checkout 2016.3
|
||||
git checkout 2017.7.2
|
||||
|
||||
Then navigate to ``pkg\windows`` and run the ``build.bat`` script with the
|
||||
version you're building.
|
||||
@ -454,10 +428,14 @@ version you're building.
|
||||
.. code-block:: bat
|
||||
|
||||
cd pkg\windows
|
||||
build.bat 2016.3
|
||||
build.bat 2017.7.2 3
|
||||
^^^^^^^^ ^
|
||||
| |
|
||||
# build version |
|
||||
# python version --
|
||||
|
||||
This will install everything needed to build a Windows installer for Salt. The
|
||||
binary will be in the ``salt\pkg\windows\installer`` directory.
|
||||
This will install everything needed to build a Windows installer for Salt using
|
||||
Python 3. The binary will be in the ``salt\pkg\windows\installer`` directory.
|
||||
|
||||
.. _test-salt-minion:
|
||||
|
||||
|
@ -1475,7 +1475,75 @@ Returns:
|
||||
|
||||
/usr/local/salt/virtualenv/bin/salt-master
|
||||
|
||||
.. jinja_ref:: jinja-in-files
|
||||
|
||||
Tests
|
||||
=====
|
||||
|
||||
Saltstack extends `builtin tests`_ with these custom tests:
|
||||
|
||||
.. _`builtin tests`: http://jinja.pocoo.org/docs/templates/#builtin-tests
|
||||
|
||||
.. jinja_ref:: equalto
|
||||
|
||||
``equalto``
|
||||
-----------
|
||||
|
||||
Tests the equality between two values.
|
||||
|
||||
Can be used in an ``if`` statement directly:
|
||||
|
||||
.. code-block:: jinja
|
||||
|
||||
{% if 1 is equalto(1) %}
|
||||
< statements >
|
||||
{% endif %}
|
||||
|
||||
If clause evaluates to ``True``
|
||||
|
||||
or with the ``selectattr`` filter:
|
||||
|
||||
.. code-block:: jinja
|
||||
|
||||
{{ [{'value': 1}, {'value': 2} , {'value': 3}] | selectattr('value', 'equalto', 3) | list }}
|
||||
|
||||
Returns:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[{'value': 3}]
|
||||
|
||||
.. jinja_ref:: match
|
||||
|
||||
``match``
|
||||
---------
|
||||
|
||||
Tests that a string matches the regex passed as an argument.
|
||||
|
||||
Can be used in a ``if`` statement directly:
|
||||
|
||||
.. code-block:: jinja
|
||||
|
||||
{% if 'a' is match('[a-b]') %}
|
||||
< statements >
|
||||
{% endif %}
|
||||
|
||||
If clause evaluates to ``True``
|
||||
|
||||
or with the ``selectattr`` filter:
|
||||
|
||||
.. code-block:: jinja
|
||||
|
||||
{{ [{'value': 'a'}, {'value': 'b'}, {'value': 'c'}] | selectattr('value', 'match', '[b-e]') | list }}
|
||||
|
||||
Returns:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[{'value': 'b'}, {'value': 'c'}]
|
||||
|
||||
|
||||
Test supports additional optional arguments: ``ignorecase``, ``multiline``
|
||||
|
||||
|
||||
Jinja in Files
|
||||
==============
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -9,3 +9,18 @@ controls whether a minion can request that the master revoke its key. When True
|
||||
can request a key revocation and the master will comply. If it is False, the key will not
|
||||
be revoked by the msater.
|
||||
|
||||
New master configuration option `require_minion_sign_messages`
|
||||
This requires that minions cryptographically sign the messages they
|
||||
publish to the master. If minions are not signing, then log this information
|
||||
at loglevel 'INFO' and drop the message without acting on it.
|
||||
|
||||
New master configuration option `drop_messages_signature_fail`
|
||||
Drop messages from minions when their signatures do not validate.
|
||||
Note that when this option is False but `require_minion_sign_messages` is True
|
||||
minions MUST sign their messages but the validity of their signatures
|
||||
is ignored.
|
||||
|
||||
New minion configuration option `minion_sign_messages`
|
||||
Causes the minion to cryptographically sign the payload of messages it places
|
||||
on the event bus for the master. The payloads are signed with the minion's
|
||||
private key so the master can verify the signature with its public key.
|
||||
|
@ -579,6 +579,11 @@ Beacons
|
||||
|
||||
- :mod:`salt.beacons.log <salt.beacons.log>`
|
||||
|
||||
Cache
|
||||
-----
|
||||
|
||||
- :mod:`salt.cache.redis_cache <salt.cache.redis_cache>`
|
||||
|
||||
Engines
|
||||
-------
|
||||
|
||||
@ -601,7 +606,6 @@ Execution modules
|
||||
- :mod:`salt.modules.icinga2 <salt.modules.icinga2>`
|
||||
- :mod:`salt.modules.logmod <salt.modules.logmod>`
|
||||
- :mod:`salt.modules.mattermost <salt.modules.mattermost>`
|
||||
- :mod:`salt.modules.mattermost <salt.modules.mattermost>`
|
||||
- :mod:`salt.modules.namecheap_dns <salt.modules.namecheap_dns>`
|
||||
- :mod:`salt.modules.namecheap_domains <salt.modules.namecheap_domains>`
|
||||
- :mod:`salt.modules.namecheap_ns <salt.modules.namecheap_ns>`
|
||||
|
@ -261,17 +261,6 @@ DownloadFileWithProgress $url $file
|
||||
# Install
|
||||
Start_Process_and_test_exitcode "$($ini['Settings']['Scripts2Dir'])\pip.exe" "install --no-index --find-links=$($ini['Settings']['DownloadDir']) $file " "pip install PyCrypto"
|
||||
|
||||
#==============================================================================
|
||||
# Download sitecustomize.py
|
||||
#==============================================================================
|
||||
Write-Output " ----------------------------------------------------------------"
|
||||
Write-Output " - $script_name :: Download sitecustomize . . ."
|
||||
Write-Output " ----------------------------------------------------------------"
|
||||
$file = "sitecustomize.py"
|
||||
$url = "$($ini['Settings']['SaltRepo'])/$file"
|
||||
$file = "$($ini['Settings']['SitePkgs2Dir'])\$file"
|
||||
DownloadFileWithProgress $url $file
|
||||
|
||||
#==============================================================================
|
||||
# Copy DLLs to Python Directory
|
||||
#==============================================================================
|
||||
|
@ -2,4 +2,3 @@
|
||||
|
||||
lxml==3.6.0
|
||||
pypiwin32==219
|
||||
win-unicode-console==0.5
|
@ -78,7 +78,7 @@ class SSHHighState(salt.state.BaseHighState):
|
||||
'''
|
||||
return
|
||||
|
||||
def _ext_nodes(self):
|
||||
def _master_tops(self):
|
||||
'''
|
||||
Evaluate master_tops locally
|
||||
'''
|
||||
|
@ -989,6 +989,10 @@ def _format_instance_info_select(vm, selection):
|
||||
cpu = vm["config.hardware.numCPU"] if "config.hardware.numCPU" in vm else "N/A"
|
||||
ram = "{0} MB".format(vm["config.hardware.memoryMB"]) if "config.hardware.memoryMB" in vm else "N/A"
|
||||
vm_select_info['size'] = u"cpu: {0}\nram: {1}".format(cpu, ram)
|
||||
vm_select_info['size_dict'] = {
|
||||
'cpu': cpu,
|
||||
'memory': ram,
|
||||
}
|
||||
|
||||
if 'state' in selection:
|
||||
vm_select_info['state'] = str(vm["summary.runtime.powerState"]) if "summary.runtime.powerState" in vm else "N/A"
|
||||
@ -1176,6 +1180,10 @@ def _format_instance_info(vm):
|
||||
'id': str(vm['name']),
|
||||
'image': "{0} (Detected)".format(vm["config.guestFullName"]) if "config.guestFullName" in vm else "N/A",
|
||||
'size': u"cpu: {0}\nram: {1}".format(cpu, ram),
|
||||
'size_dict': {
|
||||
'cpu': cpu,
|
||||
'memory': ram,
|
||||
},
|
||||
'state': str(vm["summary.runtime.powerState"]) if "summary.runtime.powerState" in vm else "N/A",
|
||||
'private_ips': ip_addresses,
|
||||
'public_ips': [],
|
||||
@ -1580,6 +1588,10 @@ def list_nodes(kwargs=None, call=None):
|
||||
'id': vm["name"],
|
||||
'image': "{0} (Detected)".format(vm["config.guestFullName"]) if "config.guestFullName" in vm else "N/A",
|
||||
'size': u"cpu: {0}\nram: {1}".format(cpu, ram),
|
||||
'size_dict': {
|
||||
'cpu': cpu,
|
||||
'memory': ram,
|
||||
},
|
||||
'state': str(vm["summary.runtime.powerState"]) if "summary.runtime.powerState" in vm else "N/A",
|
||||
'private_ips': [vm["guest.ipAddress"]] if "guest.ipAddress" in vm else [],
|
||||
'public_ips': []
|
||||
|
@ -1049,6 +1049,19 @@ VALID_OPTS = {
|
||||
|
||||
# File chunk size for salt-cp
|
||||
'salt_cp_chunk_size': int,
|
||||
|
||||
# Require that the minion sign messages it posts to the master on the event
|
||||
# bus
|
||||
'minion_sign_messages': bool,
|
||||
|
||||
# Have master drop messages from minions for which their signatures do
|
||||
# not verify
|
||||
'drop_messages_signature_fail': bool,
|
||||
|
||||
# Require that payloads from minions have a 'sig' entry
|
||||
# (in other words, require that minions have 'minion_sign_messages'
|
||||
# turned on)
|
||||
'require_minion_sign_messages': bool,
|
||||
}
|
||||
|
||||
# default configurations
|
||||
@ -1315,6 +1328,7 @@ DEFAULT_MINION_OPTS = {
|
||||
'salt_cp_chunk_size': 65536,
|
||||
'extmod_whitelist': {},
|
||||
'extmod_blacklist': {},
|
||||
'minion_sign_messages': False,
|
||||
}
|
||||
|
||||
DEFAULT_MASTER_OPTS = {
|
||||
@ -1615,6 +1629,8 @@ DEFAULT_MASTER_OPTS = {
|
||||
'django_auth_settings': '',
|
||||
'allow_minion_key_revoke': True,
|
||||
'salt_cp_chunk_size': 98304,
|
||||
'require_minion_sign_messages': False,
|
||||
'drop_messages_signature_fail': False,
|
||||
}
|
||||
|
||||
|
||||
|
@ -47,6 +47,7 @@ if not CDOME:
|
||||
# Import salt libs
|
||||
import salt.defaults.exitcodes
|
||||
import salt.utils
|
||||
import salt.utils.decorators
|
||||
import salt.payload
|
||||
import salt.transport.client
|
||||
import salt.transport.frame
|
||||
@ -138,13 +139,41 @@ def gen_keys(keydir, keyname, keysize, user=None):
|
||||
return priv
|
||||
|
||||
|
||||
@salt.utils.decorators.memoize
|
||||
def _get_key_with_evict(path, timestamp):
|
||||
'''
|
||||
Load a key from disk. `timestamp` above is intended to be the timestamp
|
||||
of the file's last modification. This fn is memoized so if it is called with the
|
||||
same path and timestamp (the file's last modified time) the second time
|
||||
the result is returned from the memoiziation. If the file gets modified
|
||||
then the params are different and the key is loaded from disk.
|
||||
'''
|
||||
log.debug('salt.crypt._get_key_with_evict: Loading private key')
|
||||
with salt.utils.fopen(path) as f:
|
||||
key = RSA.importKey(f.read())
|
||||
return key
|
||||
|
||||
|
||||
def _get_rsa_key(path):
|
||||
'''
|
||||
Read a key off the disk. Poor man's simple cache in effect here,
|
||||
we memoize the result of calling _get_rsa_with_evict. This means
|
||||
the first time _get_key_with_evict is called with a path and a timestamp
|
||||
the result is cached. If the file (the private key) does not change
|
||||
then its timestamp will not change and the next time the result is returned
|
||||
from the cache. If the key DOES change the next time _get_rsa_with_evict
|
||||
is called it is called with different parameters and the fn is run fully to
|
||||
retrieve the key from disk.
|
||||
'''
|
||||
log.debug('salt.crypt._get_rsa_key: Loading private key')
|
||||
return _get_key_with_evict(path, str(os.path.getmtime(path)))
|
||||
|
||||
|
||||
def sign_message(privkey_path, message):
|
||||
'''
|
||||
Use Crypto.Signature.PKCS1_v1_5 to sign a message. Returns the signature.
|
||||
'''
|
||||
log.debug('salt.crypt.sign_message: Loading private key')
|
||||
with salt.utils.fopen(privkey_path) as f:
|
||||
key = RSA.importKey(f.read())
|
||||
key = _get_rsa_key(privkey_path)
|
||||
log.debug('salt.crypt.sign_message: Signing message.')
|
||||
signer = PKCS1_v1_5.new(key)
|
||||
return signer.sign(SHA.new(message))
|
||||
|
@ -255,27 +255,12 @@ def access_keys(opts):
|
||||
acl_users.add(opts['user'])
|
||||
acl_users.add(salt.utils.get_user())
|
||||
if opts['client_acl_verify'] and HAS_PWD:
|
||||
log.profile('Beginning pwd.getpwall() call in masterarpi acess_keys function')
|
||||
log.profile('Beginning pwd.getpwall() call in masterarpi access_keys function')
|
||||
for user in pwd.getpwall():
|
||||
users.append(user.pw_name)
|
||||
log.profile('End pwd.getpwall() call in masterarpi acess_keys function')
|
||||
log.profile('End pwd.getpwall() call in masterarpi access_keys function')
|
||||
for user in acl_users:
|
||||
log.info(
|
||||
'Preparing the {0} key for local communication'.format(
|
||||
user
|
||||
)
|
||||
)
|
||||
|
||||
if opts['client_acl_verify'] and HAS_PWD:
|
||||
if user not in users:
|
||||
try:
|
||||
log.profile('Beginning pwd.getpnam() call in masterarpi acess_keys function')
|
||||
user = pwd.getpwnam(user).pw_name
|
||||
log.profile('Beginning pwd.getpwnam() call in masterarpi acess_keys function')
|
||||
except KeyError:
|
||||
log.error('ACL user {0} is not available'.format(user))
|
||||
continue
|
||||
|
||||
log.info('Preparing the %s key for local communication', user)
|
||||
keys[user] = mk_key(opts, user)
|
||||
|
||||
# Check other users matching ACL patterns
|
||||
@ -536,10 +521,9 @@ class RemoteFuncs(object):
|
||||
mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']
|
||||
return mopts
|
||||
|
||||
def _ext_nodes(self, load, skip_verify=False):
|
||||
def _master_tops(self, load, skip_verify=False):
|
||||
'''
|
||||
Return the results from an external node classifier if one is
|
||||
specified
|
||||
Return the results from master_tops if configured
|
||||
'''
|
||||
if not skip_verify:
|
||||
if 'id' not in load:
|
||||
@ -772,6 +756,7 @@ class RemoteFuncs(object):
|
||||
# If the return data is invalid, just ignore it
|
||||
if any(key not in load for key in ('return', 'jid', 'id')):
|
||||
return False
|
||||
|
||||
if load['jid'] == 'req':
|
||||
# The minion is returning a standalone job, request a jobid
|
||||
prep_fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
|
||||
|
@ -312,9 +312,10 @@ def start(transport='zmq',
|
||||
if not certificate:
|
||||
log.critical('Please use a certificate, or disable the security.')
|
||||
return
|
||||
priv_key, verify_key = napalm_logs.utils.authenticate(certificate,
|
||||
address=auth_address,
|
||||
port=auth_port)
|
||||
auth = napalm_logs.utils.ClientAuth(certificate,
|
||||
address=auth_address,
|
||||
port=auth_port)
|
||||
|
||||
transport_recv_fun = _get_transport_recv(name=transport,
|
||||
address=address,
|
||||
port=port)
|
||||
@ -330,7 +331,7 @@ def start(transport='zmq',
|
||||
log.debug('Received from napalm-logs:')
|
||||
log.debug(raw_object)
|
||||
if not disable_security:
|
||||
dict_object = napalm_logs.utils.decrypt(raw_object, verify_key, priv_key)
|
||||
dict_object = auth.decrypt(raw_object)
|
||||
else:
|
||||
dict_object = napalm_logs.utils.unserialize(raw_object)
|
||||
try:
|
||||
|
@ -1001,7 +1001,7 @@ class LocalClient(Client):
|
||||
ret.append(saltenv)
|
||||
return ret
|
||||
|
||||
def ext_nodes(self):
|
||||
def master_tops(self):
|
||||
'''
|
||||
Originally returned information via the external_nodes subsystem.
|
||||
External_nodes was deprecated and removed in
|
||||
@ -1327,12 +1327,11 @@ class RemoteClient(Client):
|
||||
load = {'cmd': '_master_opts'}
|
||||
return self.channel.send(load)
|
||||
|
||||
def ext_nodes(self):
|
||||
def master_tops(self):
|
||||
'''
|
||||
Return the metadata derived from the external nodes system on the
|
||||
master.
|
||||
Return the metadata derived from the master_tops system
|
||||
'''
|
||||
load = {'cmd': '_ext_nodes',
|
||||
load = {'cmd': '_master_tops',
|
||||
'id': self.opts['id'],
|
||||
'opts': self.opts}
|
||||
if self.auth:
|
||||
|
@ -876,7 +876,7 @@ class FSChan(object):
|
||||
self.opts['__fs_update'] = True
|
||||
else:
|
||||
self.fs.update()
|
||||
self.cmd_stub = {'ext_nodes': {}}
|
||||
self.cmd_stub = {'master_tops': {}}
|
||||
|
||||
def send(self, load, tries=None, timeout=None, raw=False): # pylint: disable=unused-argument
|
||||
'''
|
||||
|
@ -538,7 +538,7 @@ def update():
|
||||
os.makedirs(env_cachedir)
|
||||
new_envs = envs(ignore_cache=True)
|
||||
serial = salt.payload.Serial(__opts__)
|
||||
with salt.utils.fopen(env_cache, 'w+') as fp_:
|
||||
with salt.utils.fopen(env_cache, 'wb+') as fp_:
|
||||
fp_.write(serial.dumps(new_envs))
|
||||
log.trace('Wrote env cache data to {0}'.format(env_cache))
|
||||
|
||||
|
@ -453,7 +453,7 @@ def update():
|
||||
os.makedirs(env_cachedir)
|
||||
new_envs = envs(ignore_cache=True)
|
||||
serial = salt.payload.Serial(__opts__)
|
||||
with salt.utils.fopen(env_cache, 'w+') as fp_:
|
||||
with salt.utils.fopen(env_cache, 'wb+') as fp_:
|
||||
fp_.write(serial.dumps(new_envs))
|
||||
log.trace('Wrote env cache data to {0}'.format(env_cache))
|
||||
|
||||
|
@ -590,6 +590,43 @@ def grain_funcs(opts, proxy=None):
|
||||
)
|
||||
|
||||
|
||||
def _load_cached_grains(opts, cfn):
|
||||
'''
|
||||
Returns the grains cached in cfn, or None if the cache is too old or is
|
||||
corrupted.
|
||||
'''
|
||||
if not os.path.isfile(cfn):
|
||||
log.debug('Grains cache file does not exist.')
|
||||
return None
|
||||
|
||||
grains_cache_age = int(time.time() - os.path.getmtime(cfn))
|
||||
if grains_cache_age > opts.get('grains_cache_expiration', 300):
|
||||
log.debug('Grains cache last modified {0} seconds ago and '
|
||||
'cache expiration is set to {1}. '
|
||||
'Grains cache expired. Refreshing.'.format(
|
||||
grains_cache_age,
|
||||
opts.get('grains_cache_expiration', 300)
|
||||
))
|
||||
return None
|
||||
|
||||
if opts.get('refresh_grains_cache', False):
|
||||
log.debug('refresh_grains_cache requested, Refreshing.')
|
||||
return None
|
||||
|
||||
log.debug('Retrieving grains from cache')
|
||||
try:
|
||||
serial = salt.payload.Serial(opts)
|
||||
with salt.utils.fopen(cfn, 'rb') as fp_:
|
||||
cached_grains = serial.load(fp_)
|
||||
if not cached_grains:
|
||||
log.debug('Cached grains are empty, cache might be corrupted. Refreshing.')
|
||||
return None
|
||||
|
||||
return cached_grains
|
||||
except (IOError, OSError):
|
||||
return None
|
||||
|
||||
|
||||
def grains(opts, force_refresh=False, proxy=None):
|
||||
'''
|
||||
Return the functions for the dynamic grains and the values for the static
|
||||
@ -616,29 +653,10 @@ def grains(opts, force_refresh=False, proxy=None):
|
||||
opts['cachedir'],
|
||||
'grains.cache.p'
|
||||
)
|
||||
if not force_refresh:
|
||||
if opts.get('grains_cache', False):
|
||||
if os.path.isfile(cfn):
|
||||
grains_cache_age = int(time.time() - os.path.getmtime(cfn))
|
||||
if opts.get('grains_cache_expiration', 300) >= grains_cache_age and not \
|
||||
opts.get('refresh_grains_cache', False) and not force_refresh:
|
||||
log.debug('Retrieving grains from cache')
|
||||
try:
|
||||
serial = salt.payload.Serial(opts)
|
||||
with salt.utils.fopen(cfn, 'rb') as fp_:
|
||||
cached_grains = serial.load(fp_)
|
||||
return cached_grains
|
||||
except (IOError, OSError):
|
||||
pass
|
||||
else:
|
||||
log.debug('Grains cache last modified {0} seconds ago and '
|
||||
'cache expiration is set to {1}. '
|
||||
'Grains cache expired. Refreshing.'.format(
|
||||
grains_cache_age,
|
||||
opts.get('grains_cache_expiration', 300)
|
||||
))
|
||||
else:
|
||||
log.debug('Grains cache file does not exist.')
|
||||
if not force_refresh and opts.get('grains_cache', False):
|
||||
cached_grains = _load_cached_grains(opts, cfn)
|
||||
if cached_grains:
|
||||
return cached_grains
|
||||
else:
|
||||
log.debug('Grains refresh requested. Refreshing grains.')
|
||||
|
||||
|
@ -17,6 +17,7 @@ import signal
|
||||
import stat
|
||||
import logging
|
||||
import multiprocessing
|
||||
import salt.serializers.msgpack
|
||||
|
||||
# Import third party libs
|
||||
try:
|
||||
@ -1109,11 +1110,13 @@ class AESFuncs(object):
|
||||
)
|
||||
)
|
||||
return False
|
||||
|
||||
if 'tok' in load:
|
||||
load.pop('tok')
|
||||
|
||||
return load
|
||||
|
||||
def _ext_nodes(self, load):
|
||||
def _master_tops(self, load):
|
||||
'''
|
||||
Return the results from an external node classifier if one is
|
||||
specified
|
||||
@ -1124,7 +1127,7 @@ class AESFuncs(object):
|
||||
load = self.__verify_load(load, ('id', 'tok'))
|
||||
if load is False:
|
||||
return {}
|
||||
return self.masterapi._ext_nodes(load, skip_verify=True)
|
||||
return self.masterapi._master_tops(load, skip_verify=True)
|
||||
|
||||
def _master_opts(self, load):
|
||||
'''
|
||||
@ -1396,6 +1399,24 @@ class AESFuncs(object):
|
||||
|
||||
:param dict load: The minion payload
|
||||
'''
|
||||
if self.opts['require_minion_sign_messages'] and 'sig' not in load:
|
||||
log.critical('_return: Master is requiring minions to sign their messages, but there is no signature in this payload from {0}.'.format(load['id']))
|
||||
return False
|
||||
|
||||
if 'sig' in load:
|
||||
log.trace('Verifying signed event publish from minion')
|
||||
sig = load.pop('sig')
|
||||
this_minion_pubkey = os.path.join(self.opts['pki_dir'], 'minions/{0}'.format(load['id']))
|
||||
serialized_load = salt.serializers.msgpack.serialize(load)
|
||||
if not salt.crypt.verify_signature(this_minion_pubkey, serialized_load, sig):
|
||||
log.info('Failed to verify event signature from minion {0}.'.format(load['id']))
|
||||
if self.opts['drop_messages_signature_fail']:
|
||||
log.critical('Drop_messages_signature_fail is enabled, dropping message from {0}'.format(load['id']))
|
||||
return False
|
||||
else:
|
||||
log.info('But \'drop_message_signature_fail\' is disabled, so message is still accepted.')
|
||||
load['sig'] = sig
|
||||
|
||||
try:
|
||||
salt.utils.job.store_job(
|
||||
self.opts, load, event=self.event, mminion=self.mminion)
|
||||
@ -1439,6 +1460,9 @@ class AESFuncs(object):
|
||||
ret['fun_args'] = load['arg']
|
||||
if 'out' in load:
|
||||
ret['out'] = load['out']
|
||||
if 'sig' in load:
|
||||
ret['sig'] = load['sig']
|
||||
|
||||
self._return(ret)
|
||||
|
||||
def minion_runner(self, clear_load):
|
||||
|
@ -20,6 +20,7 @@ import contextlib
|
||||
import multiprocessing
|
||||
from random import randint, shuffle
|
||||
from stat import S_IMODE
|
||||
import salt.serializers.msgpack
|
||||
|
||||
# Import Salt Libs
|
||||
# pylint: disable=import-error,no-name-in-module,redefined-builtin
|
||||
@ -1224,11 +1225,25 @@ class Minion(MinionBase):
|
||||
return functions, returners, errors, executors
|
||||
|
||||
def _send_req_sync(self, load, timeout):
|
||||
|
||||
if self.opts['minion_sign_messages']:
|
||||
log.trace('Signing event to be published onto the bus.')
|
||||
minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem')
|
||||
sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load))
|
||||
load['sig'] = sig
|
||||
|
||||
channel = salt.transport.Channel.factory(self.opts)
|
||||
return channel.send(load, timeout=timeout)
|
||||
|
||||
@tornado.gen.coroutine
|
||||
def _send_req_async(self, load, timeout):
|
||||
|
||||
if self.opts['minion_sign_messages']:
|
||||
log.trace('Signing event to be published onto the bus.')
|
||||
minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem')
|
||||
sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load))
|
||||
load['sig'] = sig
|
||||
|
||||
channel = salt.transport.client.AsyncReqChannel.factory(self.opts)
|
||||
ret = yield channel.send(load, timeout=timeout)
|
||||
raise tornado.gen.Return(ret)
|
||||
|
@ -13,6 +13,7 @@ from __future__ import absolute_import
|
||||
# Import python libs
|
||||
import logging
|
||||
import glob
|
||||
import fnmatch
|
||||
import re
|
||||
|
||||
# Import 3rd-party libs
|
||||
@ -224,19 +225,42 @@ def force_reload(name):
|
||||
|
||||
def status(name, sig=None):
|
||||
'''
|
||||
Return the status for a service, pass a signature to use to find
|
||||
the service via ps
|
||||
Return the status for a service.
|
||||
If the name contains globbing, a dict mapping service name to True/False
|
||||
values is returned.
|
||||
|
||||
.. versionchanged:: Oxygen
|
||||
The service name can now be a glob (e.g. ``salt*``)
|
||||
|
||||
Args:
|
||||
name (str): The name of the service to check
|
||||
sig (str): Signature to use to find the service via ps
|
||||
|
||||
Returns:
|
||||
bool: True if running, False otherwise
|
||||
dict: Maps service name to True if running, False otherwise
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' service.status <service name>
|
||||
salt '*' service.status <service name> [service signature]
|
||||
'''
|
||||
if sig:
|
||||
return bool(__salt__['status.pid'](sig))
|
||||
cmd = _service_cmd(name, 'status')
|
||||
return not __salt__['cmd.retcode'](cmd, ignore_retcode=True)
|
||||
|
||||
contains_globbing = bool(re.search(r'\*|\?|\[.+\]', name))
|
||||
if contains_globbing:
|
||||
services = fnmatch.filter(get_all(), name)
|
||||
else:
|
||||
services = [name]
|
||||
results = {}
|
||||
for service in services:
|
||||
cmd = _service_cmd(service, 'status')
|
||||
results[service] = not __salt__['cmd.retcode'](cmd, ignore_retcode=True)
|
||||
if contains_globbing:
|
||||
return results
|
||||
return results[name]
|
||||
|
||||
|
||||
def _osrel():
|
||||
|
@ -2481,9 +2481,6 @@ def create(image,
|
||||
except Exception:
|
||||
pull(image, client_timeout=client_timeout)
|
||||
|
||||
if name is not None and kwargs.get('hostname') is None:
|
||||
kwargs['hostname'] = name
|
||||
|
||||
kwargs, unused_kwargs = _get_create_kwargs(
|
||||
image=image,
|
||||
skip_translate=skip_translate,
|
||||
|
@ -592,9 +592,7 @@ def install(name=None,
|
||||
# Handle version kwarg for a single package target
|
||||
if pkgs is None and sources is None:
|
||||
version_num = kwargs.get('version')
|
||||
if version_num:
|
||||
pkg_params = {name: version_num}
|
||||
else:
|
||||
if not version_num:
|
||||
version_num = ''
|
||||
if slot is not None:
|
||||
version_num += ':{0}'.format(slot)
|
||||
|
@ -13,6 +13,8 @@ from __future__ import absolute_import
|
||||
# Import python libs
|
||||
import logging
|
||||
import os
|
||||
import fnmatch
|
||||
import re
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
@ -473,24 +475,43 @@ def reload_(name, jail=None):
|
||||
|
||||
def status(name, sig=None, jail=None):
|
||||
'''
|
||||
Return the status for a service (True or False).
|
||||
|
||||
name
|
||||
Name of service
|
||||
Return the status for a service.
|
||||
If the name contains globbing, a dict mapping service name to True/False
|
||||
values is returned.
|
||||
|
||||
.. versionchanged:: 2016.3.4
|
||||
|
||||
jail: optional jid or jail name
|
||||
.. versionchanged:: Oxygen
|
||||
The service name can now be a glob (e.g. ``salt*``)
|
||||
|
||||
Args:
|
||||
name (str): The name of the service to check
|
||||
sig (str): Signature to use to find the service via ps
|
||||
|
||||
Returns:
|
||||
bool: True if running, False otherwise
|
||||
dict: Maps service name to True if running, False otherwise
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' service.status <service name>
|
||||
salt '*' service.status <service name> [service signature]
|
||||
'''
|
||||
if sig:
|
||||
return bool(__salt__['status.pid'](sig))
|
||||
cmd = '{0} {1} onestatus'.format(_cmd(jail), name)
|
||||
return not __salt__['cmd.retcode'](cmd,
|
||||
python_shell=False,
|
||||
ignore_retcode=True)
|
||||
|
||||
contains_globbing = bool(re.search(r'\*|\?|\[.+\]', name))
|
||||
if contains_globbing:
|
||||
services = fnmatch.filter(get_all(), name)
|
||||
else:
|
||||
services = [name]
|
||||
results = {}
|
||||
for service in services:
|
||||
cmd = '{0} {1} onestatus'.format(_cmd(jail), service)
|
||||
results[service] = not __salt__['cmd.retcode'](cmd,
|
||||
python_shell=False,
|
||||
ignore_retcode=True)
|
||||
if contains_globbing:
|
||||
return results
|
||||
return results[name]
|
||||
|
@ -13,6 +13,8 @@ to the correct service manager
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
import fnmatch
|
||||
import re
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils.systemd
|
||||
@ -236,9 +238,20 @@ def zap(name):
|
||||
|
||||
def status(name, sig=None):
|
||||
'''
|
||||
Return the status for a service, returns the PID or an empty string if the
|
||||
service is running or not, pass a signature to use to find the service via
|
||||
ps
|
||||
Return the status for a service.
|
||||
If the name contains globbing, a dict mapping service name to True/False
|
||||
values is returned.
|
||||
|
||||
.. versionchanged:: Oxygen
|
||||
The service name can now be a glob (e.g. ``salt*``)
|
||||
|
||||
Args:
|
||||
name (str): The name of the service to check
|
||||
sig (str): Signature to use to find the service via ps
|
||||
|
||||
Returns:
|
||||
bool: True if running, False otherwise
|
||||
dict: Maps service name to True if running, False otherwise
|
||||
|
||||
CLI Example:
|
||||
|
||||
@ -248,8 +261,19 @@ def status(name, sig=None):
|
||||
'''
|
||||
if sig:
|
||||
return bool(__salt__['status.pid'](sig))
|
||||
cmd = _service_cmd(name, 'status')
|
||||
return not _ret_code(cmd)
|
||||
|
||||
contains_globbing = bool(re.search(r'\*|\?|\[.+\]', name))
|
||||
if contains_globbing:
|
||||
services = fnmatch.filter(get_all(), name)
|
||||
else:
|
||||
services = [name]
|
||||
results = {}
|
||||
for service in services:
|
||||
cmd = _service_cmd(service, 'status')
|
||||
results[service] = not _ret_code(cmd)
|
||||
if contains_globbing:
|
||||
return results
|
||||
return results[name]
|
||||
|
||||
|
||||
def enable(name, **kwargs):
|
||||
|
@ -501,7 +501,7 @@ def build_rule(table='filter', chain=None, command=None, position='', full=None,
|
||||
|
||||
rule += after_jump
|
||||
|
||||
if full in ['True', 'true']:
|
||||
if full:
|
||||
if not table:
|
||||
return 'Error: Table needs to be specified'
|
||||
if not chain:
|
||||
|
@ -16,6 +16,7 @@ from __future__ import absolute_import
|
||||
import logging
|
||||
import os
|
||||
import plistlib
|
||||
import fnmatch
|
||||
import re
|
||||
|
||||
# Import salt libs
|
||||
@ -212,33 +213,56 @@ def missing(job_label):
|
||||
return False if _service_by_name(job_label) else True
|
||||
|
||||
|
||||
def status(job_label, runas=None):
|
||||
def status(name, runas=None):
|
||||
'''
|
||||
Return the status for a service, returns a bool whether the service is
|
||||
running.
|
||||
Return the status for a service via systemd.
|
||||
If the name contains globbing, a dict mapping service name to True/False
|
||||
values is returned.
|
||||
|
||||
.. versionchanged:: Oxygen
|
||||
The service name can now be a glob (e.g. ``salt*``)
|
||||
|
||||
Args:
|
||||
name (str): The name of the service to check
|
||||
runas (str): User to run launchctl commands
|
||||
|
||||
Returns:
|
||||
bool: True if running, False otherwise
|
||||
dict: Maps service name to True if running, False otherwise
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' service.status <service label>
|
||||
salt '*' service.status <service name>
|
||||
'''
|
||||
service = _service_by_name(job_label)
|
||||
|
||||
lookup_name = service['plist']['Label'] if service else job_label
|
||||
launchctl_data = _get_launchctl_data(lookup_name, runas=runas)
|
||||
|
||||
if launchctl_data:
|
||||
if BEFORE_YOSEMITE:
|
||||
if six.PY3:
|
||||
return 'PID' in plistlib.loads(launchctl_data)
|
||||
else:
|
||||
return 'PID' in dict(plistlib.readPlistFromString(launchctl_data))
|
||||
else:
|
||||
pattern = '"PID" = [0-9]+;'
|
||||
return True if re.search(pattern, launchctl_data) else False
|
||||
contains_globbing = bool(re.search(r'\*|\?|\[.+\]', name))
|
||||
if contains_globbing:
|
||||
services = fnmatch.filter(get_all(), name)
|
||||
else:
|
||||
return False
|
||||
services = [name]
|
||||
results = {}
|
||||
for service in services:
|
||||
service_info = _service_by_name(service)
|
||||
|
||||
lookup_name = service_info['plist']['Label'] if service_info else service
|
||||
launchctl_data = _get_launchctl_data(lookup_name, runas=runas)
|
||||
|
||||
if launchctl_data:
|
||||
if BEFORE_YOSEMITE:
|
||||
if six.PY3:
|
||||
results[service] = 'PID' in plistlib.loads(launchctl_data)
|
||||
else:
|
||||
results[service] = 'PID' in dict(plistlib.readPlistFromString(launchctl_data))
|
||||
else:
|
||||
pattern = '"PID" = [0-9]+;'
|
||||
results[service] = True if re.search(pattern, launchctl_data) else False
|
||||
else:
|
||||
results[service] = False
|
||||
if contains_globbing:
|
||||
return results
|
||||
return results[name]
|
||||
|
||||
|
||||
def stop(job_label, runas=None):
|
||||
|
@ -354,6 +354,7 @@ def lvcreate(lvname,
|
||||
pv=None,
|
||||
thinvolume=False,
|
||||
thinpool=False,
|
||||
force=False,
|
||||
**kwargs):
|
||||
'''
|
||||
Create a new logical volume, with option for which physical volume to be used
|
||||
@ -428,6 +429,9 @@ def lvcreate(lvname,
|
||||
if extra_arguments:
|
||||
cmd.extend(extra_arguments)
|
||||
|
||||
if force:
|
||||
cmd.append('-yes')
|
||||
|
||||
out = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
|
||||
lvdev = '/dev/{0}/{1}'.format(vgname, lvname)
|
||||
lvdata = lvdisplay(lvdev)
|
||||
|
@ -13,6 +13,8 @@ from __future__ import absolute_import
|
||||
# Import python libs
|
||||
import os
|
||||
import glob
|
||||
import fnmatch
|
||||
import re
|
||||
|
||||
__func_alias__ = {
|
||||
'reload_': 'reload'
|
||||
@ -103,19 +105,42 @@ def force_reload(name):
|
||||
|
||||
def status(name, sig=None):
|
||||
'''
|
||||
Return the status for a service, returns a bool whether the service is
|
||||
running.
|
||||
Return the status for a service.
|
||||
If the name contains globbing, a dict mapping service name to True/False
|
||||
values is returned.
|
||||
|
||||
.. versionchanged:: Oxygen
|
||||
The service name can now be a glob (e.g. ``salt*``)
|
||||
|
||||
Args:
|
||||
name (str): The name of the service to check
|
||||
sig (str): Signature to use to find the service via ps
|
||||
|
||||
Returns:
|
||||
bool: True if running, False otherwise
|
||||
dict: Maps service name to True if running, False otherwise
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' service.status <service name>
|
||||
salt '*' service.status <service name> [service signature]
|
||||
'''
|
||||
if sig:
|
||||
return bool(__salt__['status.pid'](sig))
|
||||
cmd = '/etc/rc.d/{0} onestatus'.format(name)
|
||||
return not __salt__['cmd.retcode'](cmd, ignore_retcode=True)
|
||||
|
||||
contains_globbing = bool(re.search(r'\*|\?|\[.+\]', name))
|
||||
if contains_globbing:
|
||||
services = fnmatch.filter(get_all(), name)
|
||||
else:
|
||||
services = [name]
|
||||
results = {}
|
||||
for service in services:
|
||||
cmd = '/etc/rc.d/{0} onestatus'.format(service)
|
||||
results[service] = not __salt__['cmd.retcode'](cmd, ignore_retcode=True)
|
||||
if contains_globbing:
|
||||
return results
|
||||
return results[name]
|
||||
|
||||
|
||||
def _get_svc(rcd, service_status):
|
||||
|
@ -12,6 +12,8 @@ The service module for OpenBSD
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
import os
|
||||
import re
|
||||
import fnmatch
|
||||
import logging
|
||||
|
||||
# Import 3rd-party libs
|
||||
@ -93,19 +95,42 @@ def restart(name):
|
||||
|
||||
def status(name, sig=None):
|
||||
'''
|
||||
Return the status for a service, returns a bool whether the service is
|
||||
running.
|
||||
Return the status for a service.
|
||||
If the name contains globbing, a dict mapping service name to True/False
|
||||
values is returned.
|
||||
|
||||
.. versionchanged:: Oxygen
|
||||
The service name can now be a glob (e.g. ``salt*``)
|
||||
|
||||
Args:
|
||||
name (str): The name of the service to check
|
||||
sig (str): Signature to use to find the service via ps
|
||||
|
||||
Returns:
|
||||
bool: True if running, False otherwise
|
||||
dict: Maps service name to True if running, False otherwise
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' service.status <service name>
|
||||
salt '*' service.status <service name> [service signature]
|
||||
'''
|
||||
if sig:
|
||||
return bool(__salt__['status.pid'](sig))
|
||||
cmd = '/etc/rc.d/{0} -f check'.format(name)
|
||||
return not __salt__['cmd.retcode'](cmd, ignore_retcode=True)
|
||||
|
||||
contains_globbing = bool(re.search(r'\*|\?|\[.+\]', name))
|
||||
if contains_globbing:
|
||||
services = fnmatch.filter(get_all(), name)
|
||||
else:
|
||||
services = [name]
|
||||
results = {}
|
||||
for service in services:
|
||||
cmd = '/etc/rc.d/{0} -f check'.format(service)
|
||||
results[service] = not __salt__['cmd.retcode'](cmd, ignore_retcode=True)
|
||||
if contains_globbing:
|
||||
return results
|
||||
return results[name]
|
||||
|
||||
|
||||
def reload_(name):
|
||||
|
@ -528,15 +528,6 @@ def install(name=None,
|
||||
if pkg_params is None or len(pkg_params) == 0:
|
||||
return {}
|
||||
|
||||
version_num = kwargs.get('version')
|
||||
if version_num:
|
||||
if pkgs is None and sources is None:
|
||||
# Allow 'version' to work for single package target
|
||||
pkg_params = {name: version_num}
|
||||
else:
|
||||
log.warning('\'version\' parameter will be ignored for multiple '
|
||||
'package targets')
|
||||
|
||||
if 'root' in kwargs:
|
||||
pkg_params['-r'] = kwargs['root']
|
||||
|
||||
|
@ -7,6 +7,8 @@ from __future__ import absolute_import
|
||||
import salt.utils
|
||||
|
||||
import logging
|
||||
import fnmatch
|
||||
import re
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@ -115,11 +117,23 @@ def restart(name, sig=None):
|
||||
|
||||
def status(name, sig=None):
|
||||
'''
|
||||
Return the status for a service via rest_sample, returns a bool
|
||||
whether the service is running.
|
||||
Return the status for a service via rest_sample.
|
||||
If the name contains globbing, a dict mapping service name to True/False
|
||||
values is returned.
|
||||
|
||||
.. versionadded:: 2015.8.0
|
||||
|
||||
.. versionchanged:: Oxygen
|
||||
The service name can now be a glob (e.g. ``salt*``)
|
||||
|
||||
Args:
|
||||
name (str): The name of the service to check
|
||||
sig (str): Not implemented
|
||||
|
||||
Returns:
|
||||
bool: True if running, False otherwise
|
||||
dict: Maps service name to True if running, False otherwise
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -128,11 +142,21 @@ def status(name, sig=None):
|
||||
'''
|
||||
|
||||
proxy_fn = 'rest_sample.service_status'
|
||||
resp = __proxy__[proxy_fn](name)
|
||||
if resp['comment'] == 'stopped':
|
||||
return False
|
||||
if resp['comment'] == 'running':
|
||||
return True
|
||||
contains_globbing = bool(re.search(r'\*|\?|\[.+\]', name))
|
||||
if contains_globbing:
|
||||
services = fnmatch.filter(get_all(), name)
|
||||
else:
|
||||
services = [name]
|
||||
results = {}
|
||||
for service in services:
|
||||
resp = __proxy__[proxy_fn](service)
|
||||
if resp['comment'] == 'running':
|
||||
results[service] = True
|
||||
else:
|
||||
results[service] = False
|
||||
if contains_globbing:
|
||||
return results
|
||||
return results[name]
|
||||
|
||||
|
||||
def running(name, sig=None):
|
||||
|
@ -15,6 +15,8 @@ import glob
|
||||
import logging
|
||||
import os
|
||||
import stat
|
||||
import fnmatch
|
||||
import re
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
@ -34,8 +36,7 @@ if salt.utils.which('initctl'):
|
||||
try:
|
||||
# Don't re-invent the wheel, import the helper functions from the
|
||||
# upstart module.
|
||||
from salt.modules.upstart \
|
||||
import _upstart_enable, _upstart_disable, _upstart_is_enabled
|
||||
from salt.modules.upstart import _upstart_enable, _upstart_disable, _upstart_is_enabled
|
||||
except Exception as exc:
|
||||
log.error('Unable to import helper functions from '
|
||||
'salt.modules.upstart: {0}'.format(exc))
|
||||
@ -470,22 +471,46 @@ def reload_(name):
|
||||
|
||||
def status(name, sig=None):
|
||||
'''
|
||||
Return the status for a service, returns a bool whether the service is
|
||||
running.
|
||||
Return the status for a service.
|
||||
If the name contains globbing, a dict mapping service name to True/False
|
||||
values is returned.
|
||||
|
||||
.. versionchanged:: Oxygen
|
||||
The service name can now be a glob (e.g. ``salt*``)
|
||||
|
||||
Args:
|
||||
name (str): The name of the service to check
|
||||
sig (str): Signature to use to find the service via ps
|
||||
|
||||
Returns:
|
||||
bool: True if running, False otherwise
|
||||
dict: Maps service name to True if running, False otherwise
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' service.status <service name>
|
||||
salt '*' service.status <service name> [service signature]
|
||||
'''
|
||||
if _service_is_upstart(name):
|
||||
cmd = 'status {0}'.format(name)
|
||||
return 'start/running' in __salt__['cmd.run'](cmd, python_shell=False)
|
||||
if sig:
|
||||
return bool(__salt__['status.pid'](sig))
|
||||
cmd = '/sbin/service {0} status'.format(name)
|
||||
return __salt__['cmd.retcode'](cmd, python_shell=False, ignore_retcode=True) == 0
|
||||
|
||||
contains_globbing = bool(re.search(r'\*|\?|\[.+\]', name))
|
||||
if contains_globbing:
|
||||
services = fnmatch.filter(get_all(), name)
|
||||
else:
|
||||
services = [name]
|
||||
results = {}
|
||||
for service in services:
|
||||
if _service_is_upstart(service):
|
||||
cmd = 'status {0}'.format(service)
|
||||
results[service] = 'start/running' in __salt__['cmd.run'](cmd, python_shell=False)
|
||||
else:
|
||||
cmd = '/sbin/service {0} status'.format(service)
|
||||
results[service] = __salt__['cmd.retcode'](cmd, python_shell=False, ignore_retcode=True) == 0
|
||||
if contains_globbing:
|
||||
return results
|
||||
return results[name]
|
||||
|
||||
|
||||
def delete(name, **kwargs):
|
||||
|
@ -6,6 +6,8 @@ from __future__ import absolute_import
|
||||
|
||||
# Import python libs
|
||||
import os
|
||||
import fnmatch
|
||||
import re
|
||||
|
||||
__func_alias__ = {
|
||||
'reload_': 'reload'
|
||||
@ -135,9 +137,20 @@ def restart(name):
|
||||
|
||||
def status(name, sig=None):
|
||||
'''
|
||||
Return the status for a service, returns the PID or an empty string if the
|
||||
service is running or not, pass a signature to use to find the service via
|
||||
ps
|
||||
Return the status for a service.
|
||||
If the name contains globbing, a dict mapping service name to PID or empty
|
||||
string is returned.
|
||||
|
||||
.. versionchanged:: Oxygen
|
||||
The service name can now be a glob (e.g. ``salt*``)
|
||||
|
||||
Args:
|
||||
name (str): The name of the service to check
|
||||
sig (str): Signature to use to find the service via ps
|
||||
|
||||
Returns:
|
||||
string: PID if running, empty otherwise
|
||||
dict: Maps service name to PID if running, empty string otherwise
|
||||
|
||||
CLI Example:
|
||||
|
||||
@ -145,7 +158,20 @@ def status(name, sig=None):
|
||||
|
||||
salt '*' service.status <service name> [service signature]
|
||||
'''
|
||||
return __salt__['status.pid'](sig if sig else name)
|
||||
if sig:
|
||||
return __salt__['status.pid'](sig)
|
||||
|
||||
contains_globbing = bool(re.search(r'\*|\?|\[.+\]', name))
|
||||
if contains_globbing:
|
||||
services = fnmatch.filter(get_all(), name)
|
||||
else:
|
||||
services = [name]
|
||||
results = {}
|
||||
for service in services:
|
||||
results[service] = __salt__['status.pid'](service)
|
||||
if contains_globbing:
|
||||
return results
|
||||
return results[name]
|
||||
|
||||
|
||||
def reload_(name):
|
||||
|
@ -12,6 +12,8 @@ that use SMF also. (e.g. SmartOS)
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import fnmatch
|
||||
import re
|
||||
|
||||
__func_alias__ = {
|
||||
'reload_': 'reload'
|
||||
@ -226,8 +228,20 @@ def reload_(name):
|
||||
|
||||
def status(name, sig=None):
|
||||
'''
|
||||
Return the status for a service, returns a bool whether the service is
|
||||
running.
|
||||
Return the status for a service.
|
||||
If the name contains globbing, a dict mapping service name to True/False
|
||||
values is returned.
|
||||
|
||||
.. versionchanged:: Oxygen
|
||||
The service name can now be a glob (e.g. ``salt*``)
|
||||
|
||||
Args:
|
||||
name (str): The name of the service to check
|
||||
sig (str): Not implemented
|
||||
|
||||
Returns:
|
||||
bool: True if running, False otherwise
|
||||
dict: Maps service name to True if running, False otherwise
|
||||
|
||||
CLI Example:
|
||||
|
||||
@ -235,12 +249,19 @@ def status(name, sig=None):
|
||||
|
||||
salt '*' service.status <service name>
|
||||
'''
|
||||
cmd = '/usr/bin/svcs -H -o STATE {0}'.format(name)
|
||||
line = __salt__['cmd.run'](cmd, python_shell=False)
|
||||
if line == 'online':
|
||||
return True
|
||||
contains_globbing = bool(re.search(r'\*|\?|\[.+\]', name))
|
||||
if contains_globbing:
|
||||
services = fnmatch.filter(get_all(), name)
|
||||
else:
|
||||
return False
|
||||
services = [name]
|
||||
results = {}
|
||||
for service in services:
|
||||
cmd = '/usr/bin/svcs -H -o STATE {0}'.format(service)
|
||||
line = __salt__['cmd.run'](cmd, python_shell=False)
|
||||
results[service] = line == 'online'
|
||||
if contains_globbing:
|
||||
return results
|
||||
return results[name]
|
||||
|
||||
|
||||
def enable(name, **kwargs):
|
||||
|
@ -7,6 +7,8 @@ Provide the service module for the proxy-minion SSH sample
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
import salt.utils
|
||||
import fnmatch
|
||||
import re
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@ -60,7 +62,7 @@ def list_():
|
||||
|
||||
def start(name, sig=None):
|
||||
'''
|
||||
Start the specified service on the rest_sample
|
||||
Start the specified service on the ssh_sample
|
||||
|
||||
CLI Example:
|
||||
|
||||
@ -103,8 +105,20 @@ def restart(name, sig=None):
|
||||
|
||||
def status(name, sig=None):
|
||||
'''
|
||||
Return the status for a service via rest_sample, returns a bool
|
||||
whether the service is running.
|
||||
Return the status for a service via ssh_sample.
|
||||
If the name contains globbing, a dict mapping service name to True/False
|
||||
values is returned.
|
||||
|
||||
.. versionchanged:: Oxygen
|
||||
The service name can now be a glob (e.g. ``salt*``)
|
||||
|
||||
Args:
|
||||
name (str): The name of the service to check
|
||||
sig (str): Not implemented
|
||||
|
||||
Returns:
|
||||
bool: True if running, False otherwise
|
||||
dict: Maps service name to True if running, False otherwise
|
||||
|
||||
CLI Example:
|
||||
|
||||
@ -114,11 +128,21 @@ def status(name, sig=None):
|
||||
'''
|
||||
|
||||
proxy_fn = 'ssh_sample.service_status'
|
||||
resp = __proxy__[proxy_fn](name)
|
||||
if resp['comment'] == 'stopped':
|
||||
return False
|
||||
if resp['comment'] == 'running':
|
||||
return True
|
||||
contains_globbing = bool(re.search(r'\*|\?|\[.+\]', name))
|
||||
if contains_globbing:
|
||||
services = fnmatch.filter(get_all(), name)
|
||||
else:
|
||||
services = [name]
|
||||
results = {}
|
||||
for service in services:
|
||||
resp = __proxy__[proxy_fn](service)
|
||||
if resp['comment'] == 'running':
|
||||
results[service] = True
|
||||
else:
|
||||
results[service] = False
|
||||
if contains_globbing:
|
||||
return results
|
||||
return results[name]
|
||||
|
||||
|
||||
def running(name, sig=None):
|
||||
|
@ -380,7 +380,11 @@ def template(tem, queue=False, **kwargs):
|
||||
conflict = _check_queue(queue, kwargs)
|
||||
if conflict is not None:
|
||||
return conflict
|
||||
st_ = salt.state.HighState(__opts__, context=__context__)
|
||||
try:
|
||||
st_ = salt.state.HighState(__opts__, context=__context__,
|
||||
proxy=__proxy__)
|
||||
except NameError:
|
||||
st_ = salt.state.HighState(__opts__, context=__context__)
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
__context__['retcode'] = 5
|
||||
@ -1172,8 +1176,13 @@ def top(topfn,
|
||||
'Pillar data must be formatted as a dictionary, unless pillar_enc '
|
||||
'is specified.'
|
||||
)
|
||||
try:
|
||||
st_ = salt.state.HighState(opts, pillar, pillar_enc=pillar_enc,
|
||||
context=__context__, proxy=__proxy__)
|
||||
except NameError:
|
||||
st_ = salt.state.HighState(opts, pillar, pillar_enc=pillar_enc,
|
||||
context=__context__)
|
||||
|
||||
st_ = salt.state.HighState(opts, pillar, pillar_enc=pillar_enc, context=__context__)
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
__context__['retcode'] = 5
|
||||
err = ['Pillar failed to render with the following messages:']
|
||||
@ -1229,7 +1238,11 @@ def show_highstate(queue=False, **kwargs):
|
||||
'is specified.'
|
||||
)
|
||||
|
||||
st_ = salt.state.HighState(__opts__, pillar, pillar_enc=pillar_enc)
|
||||
try:
|
||||
st_ = salt.state.HighState(__opts__, pillar, pillar_enc=pillar_enc,
|
||||
proxy=__proxy__)
|
||||
except NameError:
|
||||
st_ = salt.state.HighState(__opts__, pillar, pillar_enc=pillar_enc)
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
__context__['retcode'] = 5
|
||||
@ -1259,7 +1272,10 @@ def show_lowstate(queue=False, **kwargs):
|
||||
if conflict is not None:
|
||||
assert False
|
||||
return conflict
|
||||
st_ = salt.state.HighState(__opts__)
|
||||
try:
|
||||
st_ = salt.state.HighState(__opts__, proxy=__proxy__)
|
||||
except NameError:
|
||||
st_ = salt.state.HighState(__opts__)
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
__context__['retcode'] = 5
|
||||
@ -1453,7 +1469,10 @@ def show_low_sls(mods,
|
||||
opts['environment'] = saltenv
|
||||
if pillarenv is not None:
|
||||
opts['pillarenv'] = pillarenv
|
||||
st_ = salt.state.HighState(opts)
|
||||
try:
|
||||
st_ = salt.state.HighState(opts, proxy=__proxy__)
|
||||
except NameError:
|
||||
st_ = salt.state.HighState(opts)
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
__context__['retcode'] = 5
|
||||
@ -1535,7 +1554,11 @@ def show_sls(mods, saltenv='base', test=None, queue=False, **kwargs):
|
||||
if 'pillarenv' in kwargs:
|
||||
opts['pillarenv'] = kwargs['pillarenv']
|
||||
|
||||
st_ = salt.state.HighState(opts, pillar, pillar_enc=pillar_enc)
|
||||
try:
|
||||
st_ = salt.state.HighState(opts, pillar, pillar_enc=pillar_enc,
|
||||
proxy=__proxy__)
|
||||
except NameError:
|
||||
st_ = salt.state.HighState(opts, pillar, pillar_enc=pillar_enc)
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
__context__['retcode'] = 5
|
||||
@ -1585,7 +1608,10 @@ def show_top(queue=False, **kwargs):
|
||||
conflict = _check_queue(queue, kwargs)
|
||||
if conflict is not None:
|
||||
return conflict
|
||||
st_ = salt.state.HighState(opts)
|
||||
try:
|
||||
st_ = salt.state.HighState(opts, proxy=__proxy__)
|
||||
except NameError:
|
||||
st_ = salt.state.HighState(opts)
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
__context__['retcode'] = 5
|
||||
|
@ -16,6 +16,7 @@ import errno
|
||||
import glob
|
||||
import logging
|
||||
import os
|
||||
import fnmatch
|
||||
import re
|
||||
import shlex
|
||||
|
||||
@ -1017,19 +1018,41 @@ def force_reload(name, no_block=True, unmask=False, unmask_runtime=False):
|
||||
# established by Salt's service management states.
|
||||
def status(name, sig=None): # pylint: disable=unused-argument
|
||||
'''
|
||||
Return the status for a service via systemd, returns ``True`` if the
|
||||
service is running and ``False`` if it is not.
|
||||
Return the status for a service via systemd.
|
||||
If the name contains globbing, a dict mapping service name to True/False
|
||||
values is returned.
|
||||
|
||||
.. versionchanged:: Oxygen
|
||||
The service name can now be a glob (e.g. ``salt*``)
|
||||
|
||||
Args:
|
||||
name (str): The name of the service to check
|
||||
sig (str): Not implemented
|
||||
|
||||
Returns:
|
||||
bool: True if running, False otherwise
|
||||
dict: Maps service name to True if running, False otherwise
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' service.status <service name>
|
||||
salt '*' service.status <service name> [service signature]
|
||||
'''
|
||||
_check_for_unit_changes(name)
|
||||
return __salt__['cmd.retcode'](_systemctl_cmd('is-active', name),
|
||||
python_shell=False,
|
||||
ignore_retcode=True) == 0
|
||||
contains_globbing = bool(re.search(r'\*|\?|\[.+\]', name))
|
||||
if contains_globbing:
|
||||
services = fnmatch.filter(get_all(), name)
|
||||
else:
|
||||
services = [name]
|
||||
results = {}
|
||||
for service in services:
|
||||
_check_for_unit_changes(service)
|
||||
results[service] = __salt__['cmd.retcode'](_systemctl_cmd('is-active', service),
|
||||
python_shell=False,
|
||||
ignore_retcode=True) == 0
|
||||
if contains_globbing:
|
||||
return results
|
||||
return results[name]
|
||||
|
||||
|
||||
# **kwargs is required to maintain consistency with the API established by
|
||||
|
@ -426,28 +426,53 @@ def force_reload(name):
|
||||
|
||||
def status(name, sig=None):
|
||||
'''
|
||||
Return the status for a service, returns a bool whether the service is
|
||||
running.
|
||||
Return the status for a service.
|
||||
If the name contains globbing, a dict mapping service name to True/False
|
||||
values is returned.
|
||||
|
||||
.. versionchanged:: Oxygen
|
||||
The service name can now be a glob (e.g. ``salt*``)
|
||||
|
||||
Args:
|
||||
name (str): The name of the service to check
|
||||
sig (str): Signature to use to find the service via ps
|
||||
|
||||
Returns:
|
||||
bool: True if running, False otherwise
|
||||
dict: Maps service name to True if running, False otherwise
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' service.status <service name>
|
||||
salt '*' service.status <service name> [service signature]
|
||||
'''
|
||||
if sig:
|
||||
return bool(__salt__['status.pid'](sig))
|
||||
cmd = ['service', name, 'status']
|
||||
if _service_is_upstart(name):
|
||||
# decide result base on cmd output, thus ignore retcode,
|
||||
# which makes cmd output not at error lvl even when cmd fail.
|
||||
return 'start/running' in __salt__['cmd.run'](cmd, python_shell=False,
|
||||
ignore_retcode=True)
|
||||
# decide result base on retcode, thus ignore output (set quite)
|
||||
# because there is no way to avoid logging at error lvl when
|
||||
# service is not running - retcode != 0 (which is totally relevant).
|
||||
return not bool(__salt__['cmd.retcode'](cmd, python_shell=False,
|
||||
quite=True))
|
||||
|
||||
contains_globbing = bool(re.search(r'\*|\?|\[.+\]', name))
|
||||
if contains_globbing:
|
||||
services = fnmatch.filter(get_all(), name)
|
||||
else:
|
||||
services = [name]
|
||||
results = {}
|
||||
for service in services:
|
||||
cmd = ['service', service, 'status']
|
||||
if _service_is_upstart(service):
|
||||
# decide result base on cmd output, thus ignore retcode,
|
||||
# which makes cmd output not at error lvl even when cmd fail.
|
||||
results[service] = 'start/running' in __salt__['cmd.run'](cmd, python_shell=False,
|
||||
ignore_retcode=True)
|
||||
else:
|
||||
# decide result base on retcode, thus ignore output (set quite)
|
||||
# because there is no way to avoid logging at error lvl when
|
||||
# service is not running - retcode != 0 (which is totally relevant).
|
||||
results[service] = not bool(__salt__['cmd.retcode'](cmd, python_shell=False,
|
||||
ignore_retcode=True,
|
||||
quite=True))
|
||||
if contains_globbing:
|
||||
return results
|
||||
return results[name]
|
||||
|
||||
|
||||
def _get_service_exec():
|
||||
|
@ -10,6 +10,8 @@ from __future__ import absolute_import
|
||||
import salt.utils
|
||||
import time
|
||||
import logging
|
||||
import fnmatch
|
||||
import re
|
||||
from salt.exceptions import CommandExecutionError
|
||||
|
||||
# Import 3rd party libs
|
||||
@ -528,7 +530,12 @@ def execute_salt_restart_task():
|
||||
|
||||
def status(name, sig=None):
|
||||
'''
|
||||
Return the status for a service
|
||||
Return the status for a service.
|
||||
If the name contains globbing, a dict mapping service name to True/False
|
||||
values is returned.
|
||||
|
||||
.. versionchanged:: Oxygen
|
||||
The service name can now be a glob (e.g. ``salt*``)
|
||||
|
||||
Args:
|
||||
name (str): The name of the service to check
|
||||
@ -536,17 +543,27 @@ def status(name, sig=None):
|
||||
|
||||
Returns:
|
||||
bool: True if running, False otherwise
|
||||
dict: Maps service name to True if running, False otherwise
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' service.status <service name> [service signature]
|
||||
salt '*' service.status <service name>
|
||||
'''
|
||||
if info(name)['Status'] in ['Running', 'Stop Pending']:
|
||||
return True
|
||||
|
||||
return False
|
||||
results = {}
|
||||
all_services = get_all()
|
||||
contains_globbing = bool(re.search(r'\*|\?|\[.+\]', name))
|
||||
if contains_globbing:
|
||||
services = fnmatch.filter(all_services, name)
|
||||
else:
|
||||
services = [name]
|
||||
for service in services:
|
||||
results[service] = info(service)['Status'] in ['Running', 'Stop Pending']
|
||||
if contains_globbing:
|
||||
return results
|
||||
return results[name]
|
||||
|
||||
|
||||
def getsid(name):
|
||||
|
@ -1243,13 +1243,6 @@ def install(name=None,
|
||||
return {}
|
||||
|
||||
version_num = kwargs.get('version')
|
||||
if version_num:
|
||||
if pkgs is None and sources is None:
|
||||
# Allow "version" to work for single package target
|
||||
pkg_params = {name: version_num}
|
||||
else:
|
||||
log.warning('"version" parameter will be ignored for multiple '
|
||||
'package targets')
|
||||
|
||||
old = list_pkgs(versions_as_list=False) if not downloadonly else list_downloaded()
|
||||
# Use of __context__ means no duplicate work here, just accessing
|
||||
|
@ -1085,13 +1085,6 @@ def install(name=None,
|
||||
return {}
|
||||
|
||||
version_num = Wildcard(__zypper__)(name, version)
|
||||
if version_num:
|
||||
if pkgs is None and sources is None:
|
||||
# Allow "version" to work for single package target
|
||||
pkg_params = {name: version_num}
|
||||
else:
|
||||
log.warning("'version' parameter will be ignored for multiple package targets")
|
||||
|
||||
if pkg_type == 'repository':
|
||||
targets = []
|
||||
for param, version_num in six.iteritems(pkg_params):
|
||||
|
@ -33,11 +33,18 @@ def ext_pillar(minion_id,
|
||||
'libvirt',
|
||||
minion_id)
|
||||
cacert = os.path.join(__opts__['pki_dir'],
|
||||
'libvirt',
|
||||
'cacert.pem')
|
||||
'libvirt',
|
||||
'cacert.pem')
|
||||
if not os.path.isdir(key_dir):
|
||||
# No keys have been generated
|
||||
gen_hyper_keys(minion_id)
|
||||
gen_hyper_keys(minion_id,
|
||||
pillar.get('ext_pillar_virt.country', 'US'),
|
||||
pillar.get('ext_pillar_virt.st', 'Utah'),
|
||||
pillar.get('ext_pillar_virt.locality',
|
||||
'Salt Lake City'),
|
||||
pillar.get('ext_pillar_virt.organization', 'Salted'),
|
||||
pillar.get('ext_pillar_virt.expiration_days', '365')
|
||||
)
|
||||
ret = {}
|
||||
for key in os.listdir(key_dir):
|
||||
if not key.endswith('.pem'):
|
||||
@ -51,10 +58,11 @@ def ext_pillar(minion_id,
|
||||
|
||||
|
||||
def gen_hyper_keys(minion_id,
|
||||
country='US',
|
||||
state='Utah',
|
||||
locality='Salt Lake City',
|
||||
organization='Salted'):
|
||||
country='US',
|
||||
state='Utah',
|
||||
locality='Salt Lake City',
|
||||
organization='Salted',
|
||||
expiration_days='365'):
|
||||
'''
|
||||
Generate the keys to be used by libvirt hypervisors, this routine gens
|
||||
the keys and applies them to the pillar for the hypervisor minions
|
||||
@ -91,8 +99,9 @@ def gen_hyper_keys(minion_id,
|
||||
with salt.utils.fopen(srvinfo, 'w+') as fp_:
|
||||
infodat = ('organization = salted\ncn = {0}\ntls_www_server'
|
||||
'\nencryption_key\nsigning_key'
|
||||
'\ndigitalSignature').format(
|
||||
__grains__['fqdn'])
|
||||
'\ndigitalSignature\nexpiration_days = {1}'
|
||||
).format(
|
||||
__grains__['fqdn'], expiration_days)
|
||||
fp_.write(infodat)
|
||||
if not os.path.isfile(priv):
|
||||
subprocess.call(
|
||||
|
@ -3118,7 +3118,7 @@ class BaseHighState(object):
|
||||
matches[env_key] = []
|
||||
matches[env_key].append(inc_sls)
|
||||
_filter_matches(match, data, self.opts['nodegroups'])
|
||||
ext_matches = self._ext_nodes()
|
||||
ext_matches = self._master_tops()
|
||||
for saltenv in ext_matches:
|
||||
if saltenv in matches:
|
||||
matches[saltenv] = list(
|
||||
@ -3128,13 +3128,12 @@ class BaseHighState(object):
|
||||
# pylint: enable=cell-var-from-loop
|
||||
return matches
|
||||
|
||||
def _ext_nodes(self):
|
||||
def _master_tops(self):
|
||||
'''
|
||||
Get results from an external node classifier.
|
||||
Override it if the execution of the external node clasifier
|
||||
needs customization.
|
||||
Get results from the master_tops system. Override this function if the
|
||||
execution of the master_tops needs customization.
|
||||
'''
|
||||
return self.client.ext_nodes()
|
||||
return self.client.master_tops()
|
||||
|
||||
def load_dynamic(self, matches):
|
||||
'''
|
||||
|
@ -211,6 +211,7 @@ def lv_present(name,
|
||||
pv='',
|
||||
thinvolume=False,
|
||||
thinpool=False,
|
||||
force=False,
|
||||
**kwargs):
|
||||
'''
|
||||
Create a new logical volume
|
||||
@ -244,6 +245,12 @@ def lv_present(name,
|
||||
|
||||
thinpool
|
||||
Logical volume is a thin pool
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
force
|
||||
Assume yes to all prompts
|
||||
|
||||
'''
|
||||
ret = {'changes': {},
|
||||
'comment': '',
|
||||
@ -276,6 +283,7 @@ def lv_present(name,
|
||||
pv=pv,
|
||||
thinvolume=thinvolume,
|
||||
thinpool=thinpool,
|
||||
force=force,
|
||||
**kwargs)
|
||||
|
||||
if __salt__['lvm.lvdisplay'](lvpath):
|
||||
|
@ -14,8 +14,9 @@ for the generation and signing of certificates for systems running libvirt:
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import python libs
|
||||
import os
|
||||
import fnmatch
|
||||
import os
|
||||
from salt.ext import six
|
||||
|
||||
try:
|
||||
import libvirt # pylint: disable=import-error
|
||||
@ -42,7 +43,7 @@ def __virtual__():
|
||||
return False
|
||||
|
||||
|
||||
def keys(name, basepath='/etc/pki'):
|
||||
def keys(name, basepath='/etc/pki', **kwargs):
|
||||
'''
|
||||
Manage libvirt keys.
|
||||
|
||||
@ -52,20 +53,57 @@ def keys(name, basepath='/etc/pki'):
|
||||
basepath
|
||||
Defaults to ``/etc/pki``, this is the root location used for libvirt
|
||||
keys on the hypervisor
|
||||
'''
|
||||
#libvirt.serverkey.pem
|
||||
#libvirt.servercert.pem
|
||||
#libvirt.clientkey.pem
|
||||
#libvirt.clientcert.pem
|
||||
#libvirt.cacert.pem
|
||||
|
||||
The following parameters are optional:
|
||||
|
||||
country
|
||||
The country that the certificate should use. Defaults to US.
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
state
|
||||
The state that the certificate should use. Defaults to Utah.
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
locality
|
||||
The locality that the certificate should use.
|
||||
Defaults to Salt Lake City.
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
organization
|
||||
The organization that the certificate should use.
|
||||
Defaults to Salted.
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
expiration_days
|
||||
The number of days that the certificate should be valid for.
|
||||
Defaults to 365 days (1 year)
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
'''
|
||||
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
|
||||
pillar = __salt__['pillar.ext']({'libvirt': '_'})
|
||||
|
||||
# Grab all kwargs to make them available as pillar values
|
||||
# rename them to something hopefully unique to avoid
|
||||
# overriding anything existing
|
||||
pillar_kwargs = {}
|
||||
for key, value in six.iteritems(kwargs):
|
||||
pillar_kwargs['ext_pillar_virt.{0}'.format(key)] = value
|
||||
|
||||
pillar = __salt__['pillar.ext']({'libvirt': '_'}, pillar_kwargs)
|
||||
paths = {
|
||||
'serverkey': os.path.join(basepath, 'libvirt', 'private', 'serverkey.pem'),
|
||||
'servercert': os.path.join(basepath, 'libvirt', 'servercert.pem'),
|
||||
'clientkey': os.path.join(basepath, 'libvirt', 'private', 'clientkey.pem'),
|
||||
'clientcert': os.path.join(basepath, 'libvirt', 'clientcert.pem'),
|
||||
'serverkey': os.path.join(basepath, 'libvirt',
|
||||
'private', 'serverkey.pem'),
|
||||
'servercert': os.path.join(basepath, 'libvirt',
|
||||
'servercert.pem'),
|
||||
'clientkey': os.path.join(basepath, 'libvirt',
|
||||
'private', 'clientkey.pem'),
|
||||
'clientcert': os.path.join(basepath, 'libvirt',
|
||||
'clientcert.pem'),
|
||||
'cacert': os.path.join(basepath, 'CA', 'cacert.pem')
|
||||
}
|
||||
|
||||
|
@ -658,3 +658,28 @@ class JinjaFilter(object):
|
||||
|
||||
|
||||
jinja_filter = JinjaFilter
|
||||
|
||||
|
||||
class JinjaTest(object):
|
||||
'''
|
||||
This decorator is used to specify that a function is to be loaded as a
|
||||
Jinja test.
|
||||
'''
|
||||
salt_jinja_tests = {}
|
||||
|
||||
def __init__(self, name=None):
|
||||
'''
|
||||
'''
|
||||
self.name = name
|
||||
|
||||
def __call__(self, function):
|
||||
'''
|
||||
'''
|
||||
name = self.name or function.__name__
|
||||
if name not in self.salt_jinja_tests:
|
||||
log.debug('Marking "{0}" as a jinja test'.format(name))
|
||||
self.salt_jinja_tests[name] = function
|
||||
return function
|
||||
|
||||
|
||||
jinja_test = JinjaTest
|
||||
|
@ -225,7 +225,7 @@ def translate_input(**kwargs):
|
||||
# format {'Type': log_driver, 'Config': log_opt}. So, we need to
|
||||
# construct this argument to be passed to the API from those two
|
||||
# arguments.
|
||||
if log_driver is not NOTSET and log_opt is not NOTSET:
|
||||
if log_driver is not NOTSET or log_opt is not NOTSET:
|
||||
kwargs['log_config'] = {
|
||||
'Type': log_driver if log_driver is not NOTSET else 'none',
|
||||
'Config': log_opt if log_opt is not NOTSET else {}
|
||||
|
@ -2213,8 +2213,7 @@ class GitBase(object):
|
||||
if refresh_env_cache:
|
||||
new_envs = self.envs(ignore_cache=True)
|
||||
serial = salt.payload.Serial(self.opts)
|
||||
mode = 'wb+' if six.PY3 else 'w+'
|
||||
with salt.utils.fopen(self.env_cache, mode) as fp_:
|
||||
with salt.utils.fopen(self.env_cache, 'wb+') as fp_:
|
||||
fp_.write(serial.dumps(new_envs))
|
||||
log.trace('Wrote env cache data to {0}'.format(self.env_cache))
|
||||
|
||||
|
@ -31,7 +31,7 @@ import salt
|
||||
import salt.fileclient
|
||||
import salt.utils
|
||||
import salt.utils.url
|
||||
from salt.utils.decorators import jinja_filter
|
||||
from salt.utils.decorators import jinja_filter, jinja_test
|
||||
from salt.utils.odict import OrderedDict
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
@ -182,6 +182,26 @@ class PrintableDict(OrderedDict):
|
||||
return '{' + ', '.join(output) + '}'
|
||||
|
||||
|
||||
# Additional tests
|
||||
@jinja_test('match')
|
||||
def test_match(txt, rgx, ignorecase=False, multiline=False):
|
||||
'''Returns true if a sequence of chars matches a pattern.'''
|
||||
flag = 0
|
||||
if ignorecase:
|
||||
flag |= re.I
|
||||
if multiline:
|
||||
flag |= re.M
|
||||
compiled_rgx = re.compile(rgx, flag)
|
||||
return True if compiled_rgx.match(txt) else False
|
||||
|
||||
|
||||
@jinja_test('equalto')
|
||||
def test_equalto(value, other):
|
||||
'''Returns true if two values are equal.'''
|
||||
return value == other
|
||||
|
||||
|
||||
# Additional filters
|
||||
@jinja_filter('skip')
|
||||
def skip_filter(data):
|
||||
'''
|
||||
|
@ -324,6 +324,7 @@ from __future__ import absolute_import, with_statement
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import copy
|
||||
import signal
|
||||
import datetime
|
||||
import itertools
|
||||
@ -827,7 +828,7 @@ class Schedule(object):
|
||||
kwargs = {}
|
||||
if 'kwargs' in data:
|
||||
kwargs = data['kwargs']
|
||||
ret['fun_args'].append(data['kwargs'])
|
||||
ret['fun_args'].append(copy.deepcopy(kwargs))
|
||||
|
||||
if func not in self.functions:
|
||||
ret['return'] = self.functions.missing_fun_string(func)
|
||||
@ -884,9 +885,9 @@ class Schedule(object):
|
||||
ret['success'] = False
|
||||
ret['retcode'] = 254
|
||||
finally:
|
||||
try:
|
||||
# Only attempt to return data to the master
|
||||
# if the scheduled job is running on a minion.
|
||||
# Only attempt to return data to the master
|
||||
# if the scheduled job is running on a minion.
|
||||
if '__role' in self.opts and self.opts['__role'] == 'minion':
|
||||
if 'return_job' in data and not data['return_job']:
|
||||
pass
|
||||
else:
|
||||
@ -908,9 +909,13 @@ class Schedule(object):
|
||||
elif '__role' in self.opts and self.opts['__role'] == 'master':
|
||||
event = salt.utils.event.get_master_event(self.opts,
|
||||
self.opts['sock_dir'])
|
||||
event.fire_event(load, '__schedule_return')
|
||||
try:
|
||||
event.fire_event(load, '__schedule_return')
|
||||
except Exception as exc:
|
||||
log.exception("Unhandled exception firing event: {0}".format(exc))
|
||||
|
||||
log.debug('schedule.handle_func: Removing {0}'.format(proc_fn))
|
||||
log.debug('schedule.handle_func: Removing {0}'.format(proc_fn))
|
||||
try:
|
||||
os.unlink(proc_fn)
|
||||
except OSError as exc:
|
||||
if exc.errno == errno.EEXIST or exc.errno == errno.ENOENT:
|
||||
|
@ -32,7 +32,7 @@ from salt.exceptions import (
|
||||
import salt.utils.jinja
|
||||
import salt.utils.network
|
||||
from salt.utils.odict import OrderedDict
|
||||
from salt.utils.decorators import JinjaFilter
|
||||
from salt.utils.decorators import JinjaFilter, JinjaTest
|
||||
from salt import __path__ as saltpath
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
@ -327,6 +327,7 @@ def render_jinja_tmpl(tmplstr, context, tmplpath=None):
|
||||
jinja_env = jinja2.Environment(undefined=jinja2.StrictUndefined,
|
||||
**env_args)
|
||||
|
||||
jinja_env.tests.update(JinjaTest.salt_jinja_tests)
|
||||
jinja_env.filters.update(JinjaFilter.salt_jinja_filters)
|
||||
|
||||
# globals
|
||||
|
@ -453,6 +453,43 @@ class TestDaemon(object):
|
||||
)
|
||||
sys.stdout.flush()
|
||||
|
||||
if self.parser.options.proxy:
|
||||
try:
|
||||
sys.stdout.write(
|
||||
' * {LIGHT_YELLOW}Starting salt-proxy ... {ENDC}'.format(**self.colors)
|
||||
)
|
||||
sys.stdout.flush()
|
||||
self.proxy_process = start_daemon(
|
||||
daemon_name='salt-proxy',
|
||||
daemon_id=self.master_opts['id'],
|
||||
daemon_log_prefix='salt-proxy/{}'.format(self.proxy_opts['id']),
|
||||
daemon_cli_script_name='proxy',
|
||||
daemon_config=self.proxy_opts,
|
||||
daemon_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
|
||||
daemon_class=SaltProxy,
|
||||
bin_dir_path=SCRIPT_DIR,
|
||||
fail_hard=True,
|
||||
start_timeout=30)
|
||||
sys.stdout.write(
|
||||
'\r{0}\r'.format(
|
||||
' ' * getattr(self.parser.options, 'output_columns', PNUM)
|
||||
)
|
||||
)
|
||||
sys.stdout.write(
|
||||
' * {LIGHT_GREEN}Starting salt-proxy ... STARTED!\n{ENDC}'.format(**self.colors)
|
||||
)
|
||||
sys.stdout.flush()
|
||||
except (RuntimeWarning, RuntimeError):
|
||||
sys.stdout.write(
|
||||
'\r{0}\r'.format(
|
||||
' ' * getattr(self.parser.options, 'output_columns', PNUM)
|
||||
)
|
||||
)
|
||||
sys.stdout.write(
|
||||
' * {LIGHT_RED}Starting salt-proxy ... FAILED!\n{ENDC}'.format(**self.colors)
|
||||
)
|
||||
sys.stdout.flush()
|
||||
|
||||
def start_raet_daemons(self):
|
||||
'''
|
||||
Fire up the raet daemons!
|
||||
@ -653,6 +690,7 @@ class TestDaemon(object):
|
||||
* syndic
|
||||
* syndic_master
|
||||
* sub_minion
|
||||
* proxy
|
||||
'''
|
||||
return RUNTIME_VARS.RUNTIME_CONFIGS[role]
|
||||
|
||||
@ -735,14 +773,14 @@ class TestDaemon(object):
|
||||
syndic_master_opts['pki_dir'] = os.path.join(TMP, 'rootdir-syndic-master', 'pki', 'master')
|
||||
|
||||
# This proxy connects to master
|
||||
# proxy_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'proxy'))
|
||||
# proxy_opts['cachedir'] = os.path.join(TMP, 'rootdir', 'cache')
|
||||
proxy_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'proxy'))
|
||||
proxy_opts['cachedir'] = os.path.join(TMP, 'rootdir-proxy', 'cache')
|
||||
# proxy_opts['user'] = running_tests_user
|
||||
# proxy_opts['config_dir'] = TMP_CONF_DIR
|
||||
# proxy_opts['root_dir'] = os.path.join(TMP, 'rootdir')
|
||||
# proxy_opts['pki_dir'] = os.path.join(TMP, 'rootdir', 'pki')
|
||||
# proxy_opts['hosts.file'] = os.path.join(TMP, 'rootdir', 'hosts')
|
||||
# proxy_opts['aliases.file'] = os.path.join(TMP, 'rootdir', 'aliases')
|
||||
proxy_opts['config_dir'] = RUNTIME_VARS.TMP_CONF_DIR
|
||||
proxy_opts['root_dir'] = os.path.join(TMP, 'rootdir-proxy')
|
||||
proxy_opts['pki_dir'] = os.path.join(TMP, 'rootdir-proxy', 'pki')
|
||||
proxy_opts['hosts.file'] = os.path.join(TMP, 'rootdir-proxy', 'hosts')
|
||||
proxy_opts['aliases.file'] = os.path.join(TMP, 'rootdir-proxy', 'aliases')
|
||||
|
||||
if transport == 'raet':
|
||||
master_opts['transport'] = 'raet'
|
||||
@ -758,6 +796,7 @@ class TestDaemon(object):
|
||||
minion_opts['transport'] = 'tcp'
|
||||
sub_minion_opts['transport'] = 'tcp'
|
||||
syndic_master_opts['transport'] = 'tcp'
|
||||
proxy_opts['transport'] = 'tcp'
|
||||
|
||||
# Set up config options that require internal data
|
||||
master_opts['pillar_roots'] = syndic_master_opts['pillar_roots'] = {
|
||||
@ -815,14 +854,16 @@ class TestDaemon(object):
|
||||
sub_minion_opts[optname] = optname_path
|
||||
syndic_opts[optname] = optname_path
|
||||
syndic_master_opts[optname] = optname_path
|
||||
proxy_opts[optname] = optname_path
|
||||
|
||||
master_opts['runtests_conn_check_port'] = get_unused_localhost_port()
|
||||
minion_opts['runtests_conn_check_port'] = get_unused_localhost_port()
|
||||
sub_minion_opts['runtests_conn_check_port'] = get_unused_localhost_port()
|
||||
syndic_opts['runtests_conn_check_port'] = get_unused_localhost_port()
|
||||
syndic_master_opts['runtests_conn_check_port'] = get_unused_localhost_port()
|
||||
proxy_opts['runtests_conn_check_port'] = get_unused_localhost_port()
|
||||
|
||||
for conf in (master_opts, minion_opts, sub_minion_opts, syndic_opts, syndic_master_opts):
|
||||
for conf in (master_opts, minion_opts, sub_minion_opts, syndic_opts, syndic_master_opts, proxy_opts):
|
||||
if 'engines' not in conf:
|
||||
conf['engines'] = []
|
||||
conf['engines'].append({'salt_runtests': {}})
|
||||
@ -839,7 +880,7 @@ class TestDaemon(object):
|
||||
|
||||
# ----- Transcribe Configuration ---------------------------------------------------------------------------->
|
||||
for entry in os.listdir(RUNTIME_VARS.CONF_DIR):
|
||||
if entry in ('master', 'minion', 'sub_minion', 'syndic', 'syndic_master'):
|
||||
if entry in ('master', 'minion', 'sub_minion', 'syndic', 'syndic_master', 'proxy'):
|
||||
# These have runtime computed values and will be handled
|
||||
# differently
|
||||
continue
|
||||
@ -855,7 +896,7 @@ class TestDaemon(object):
|
||||
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, entry)
|
||||
)
|
||||
|
||||
for entry in ('master', 'minion', 'sub_minion', 'syndic', 'syndic_master'):
|
||||
for entry in ('master', 'minion', 'sub_minion', 'syndic', 'syndic_master', 'proxy'):
|
||||
computed_config = copy.deepcopy(locals()['{0}_opts'.format(entry)])
|
||||
with salt.utils.fopen(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, entry), 'w') as fp_:
|
||||
fp_.write(yaml.dump(computed_config, default_flow_style=False))
|
||||
@ -888,12 +929,14 @@ class TestDaemon(object):
|
||||
)
|
||||
sub_minion_opts = salt.config.minion_config(os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, 'minion'))
|
||||
syndic_master_opts = salt.config.master_config(os.path.join(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR, 'master'))
|
||||
proxy_opts = salt.config.proxy_config(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'proxy'))
|
||||
|
||||
RUNTIME_VARS.RUNTIME_CONFIGS['master'] = freeze(master_opts)
|
||||
RUNTIME_VARS.RUNTIME_CONFIGS['minion'] = freeze(minion_opts)
|
||||
RUNTIME_VARS.RUNTIME_CONFIGS['syndic'] = freeze(syndic_opts)
|
||||
RUNTIME_VARS.RUNTIME_CONFIGS['sub_minion'] = freeze(sub_minion_opts)
|
||||
RUNTIME_VARS.RUNTIME_CONFIGS['syndic_master'] = freeze(syndic_master_opts)
|
||||
RUNTIME_VARS.RUNTIME_CONFIGS['proxy'] = freeze(proxy_opts)
|
||||
|
||||
verify_env([os.path.join(master_opts['pki_dir'], 'minions'),
|
||||
os.path.join(master_opts['pki_dir'], 'minions_pre'),
|
||||
@ -944,6 +987,7 @@ class TestDaemon(object):
|
||||
cls.sub_minion_opts = sub_minion_opts
|
||||
cls.syndic_opts = syndic_opts
|
||||
cls.syndic_master_opts = syndic_master_opts
|
||||
cls.proxy_opts = proxy_opts
|
||||
# <---- Verify Environment -----------------------------------------------------------------------------------
|
||||
|
||||
def __exit__(self, type, value, traceback):
|
||||
@ -952,8 +996,8 @@ class TestDaemon(object):
|
||||
'''
|
||||
self.sub_minion_process.terminate()
|
||||
self.minion_process.terminate()
|
||||
# if hasattr(self, 'proxy_process'):
|
||||
# self.proxy_process.terminate()
|
||||
if hasattr(self, 'proxy_process'):
|
||||
self.proxy_process.terminate()
|
||||
self.master_process.terminate()
|
||||
try:
|
||||
self.syndic_process.terminate()
|
||||
|
@ -14,6 +14,9 @@
|
||||
from __future__ import absolute_import
|
||||
import sys
|
||||
|
||||
# Import salt libs
|
||||
import salt.ext.six as six
|
||||
|
||||
try:
|
||||
if sys.version_info >= (3,):
|
||||
# Python 3
|
||||
@ -105,82 +108,89 @@ if NO_MOCK is False:
|
||||
NO_MOCK_REASON = 'you need to upgrade your mock version to >= 0.8.0'
|
||||
|
||||
|
||||
if sys.version_info >= (3,):
|
||||
from mock import mock_open
|
||||
else:
|
||||
# backport mock_open from the python 3 unittest.mock library so that we can
|
||||
# mock read, readline, readlines, and file iteration properly
|
||||
# backport mock_open from the python 3 unittest.mock library so that we can
|
||||
# mock read, readline, readlines, and file iteration properly
|
||||
|
||||
file_spec = None
|
||||
file_spec = None
|
||||
|
||||
def _iterate_read_data(read_data):
|
||||
# Helper for mock_open:
|
||||
# Retrieve lines from read_data via a generator so that separate calls to
|
||||
# readline, read, and readlines are properly interleaved
|
||||
|
||||
def _iterate_read_data(read_data):
|
||||
# Helper for mock_open:
|
||||
# Retrieve lines from read_data via a generator so that separate calls to
|
||||
# readline, read, and readlines are properly interleaved
|
||||
if six.PY3 and isinstance(read_data, six.binary_type):
|
||||
data_as_list = ['{0}\n'.format(l.decode(__salt_system_encoding__)) for l in read_data.split(six.b('\n'))]
|
||||
else:
|
||||
data_as_list = ['{0}\n'.format(l) for l in read_data.split('\n')]
|
||||
|
||||
if data_as_list[-1] == '\n':
|
||||
# If the last line ended in a newline, the list comprehension will have an
|
||||
# extra entry that's just a newline. Remove this.
|
||||
data_as_list = data_as_list[:-1]
|
||||
else:
|
||||
# If there wasn't an extra newline by itself, then the file being
|
||||
# emulated doesn't have a newline to end the last line remove the
|
||||
# newline that our naive format() added
|
||||
data_as_list[-1] = data_as_list[-1][:-1]
|
||||
if data_as_list[-1] == '\n':
|
||||
# If the last line ended in a newline, the list comprehension will have an
|
||||
# extra entry that's just a newline. Remove this.
|
||||
data_as_list = data_as_list[:-1]
|
||||
else:
|
||||
# If there wasn't an extra newline by itself, then the file being
|
||||
# emulated doesn't have a newline to end the last line remove the
|
||||
# newline that our naive format() added
|
||||
data_as_list[-1] = data_as_list[-1][:-1]
|
||||
|
||||
for line in data_as_list:
|
||||
for line in data_as_list:
|
||||
yield line
|
||||
|
||||
|
||||
def mock_open(mock=None, read_data=''):
|
||||
"""
|
||||
A helper function to create a mock to replace the use of `open`. It works
|
||||
for `open` called directly or used as a context manager.
|
||||
|
||||
The `mock` argument is the mock object to configure. If `None` (the
|
||||
default) then a `MagicMock` will be created for you, with the API limited
|
||||
to methods or attributes available on standard file handles.
|
||||
|
||||
`read_data` is a string for the `read` methoddline`, and `readlines` of the
|
||||
file handle to return. This is an empty string by default.
|
||||
"""
|
||||
def _readlines_side_effect(*args, **kwargs):
|
||||
if handle.readlines.return_value is not None:
|
||||
return handle.readlines.return_value
|
||||
return list(_data)
|
||||
|
||||
def _read_side_effect(*args, **kwargs):
|
||||
if handle.read.return_value is not None:
|
||||
return handle.read.return_value
|
||||
return ''.join(_data)
|
||||
|
||||
def _readline_side_effect():
|
||||
if handle.readline.return_value is not None:
|
||||
while True:
|
||||
yield handle.readline.return_value
|
||||
for line in _data:
|
||||
yield line
|
||||
|
||||
def mock_open(mock=None, read_data=''):
|
||||
"""
|
||||
A helper function to create a mock to replace the use of `open`. It works
|
||||
for `open` called directly or used as a context manager.
|
||||
|
||||
The `mock` argument is the mock object to configure. If `None` (the
|
||||
default) then a `MagicMock` will be created for you, with the API limited
|
||||
to methods or attributes available on standard file handles.
|
||||
|
||||
`read_data` is a string for the `read` methoddline`, and `readlines` of the
|
||||
file handle to return. This is an empty string by default.
|
||||
"""
|
||||
def _readlines_side_effect(*args, **kwargs):
|
||||
if handle.readlines.return_value is not None:
|
||||
return handle.readlines.return_value
|
||||
return list(_data)
|
||||
|
||||
def _read_side_effect(*args, **kwargs):
|
||||
if handle.read.return_value is not None:
|
||||
return handle.read.return_value
|
||||
return ''.join(_data)
|
||||
|
||||
def _readline_side_effect():
|
||||
if handle.readline.return_value is not None:
|
||||
while True:
|
||||
yield handle.readline.return_value
|
||||
for line in _data:
|
||||
yield line
|
||||
|
||||
global file_spec
|
||||
if file_spec is None:
|
||||
global file_spec
|
||||
if file_spec is None:
|
||||
if six.PY3:
|
||||
import _io
|
||||
file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))))
|
||||
else:
|
||||
file_spec = file # pylint: disable=undefined-variable
|
||||
|
||||
if mock is None:
|
||||
mock = MagicMock(name='open', spec=open)
|
||||
if mock is None:
|
||||
mock = MagicMock(name='open', spec=open)
|
||||
|
||||
handle = MagicMock(spec=file_spec)
|
||||
handle.__enter__.return_value = handle
|
||||
handle = MagicMock(spec=file_spec)
|
||||
handle.__enter__.return_value = handle
|
||||
|
||||
_data = _iterate_read_data(read_data)
|
||||
_data = _iterate_read_data(read_data)
|
||||
|
||||
handle.write.return_value = None
|
||||
handle.read.return_value = None
|
||||
handle.readline.return_value = None
|
||||
handle.readlines.return_value = None
|
||||
handle.write.return_value = None
|
||||
handle.read.return_value = None
|
||||
handle.readline.return_value = None
|
||||
handle.readlines.return_value = None
|
||||
|
||||
handle.read.side_effect = _read_side_effect
|
||||
handle.readline.side_effect = _readline_side_effect()
|
||||
handle.readlines.side_effect = _readlines_side_effect
|
||||
# This is salt specific and not in the upstream mock
|
||||
handle.read.side_effect = _read_side_effect
|
||||
handle.readline.side_effect = _readline_side_effect()
|
||||
handle.readlines.side_effect = _readlines_side_effect
|
||||
|
||||
mock.return_value = handle
|
||||
return mock
|
||||
mock.return_value = handle
|
||||
return mock
|
||||
|
@ -57,6 +57,7 @@ TMP_CONF_DIR = os.path.join(TMP, 'config')
|
||||
TMP_SUB_MINION_CONF_DIR = os.path.join(TMP_CONF_DIR, 'sub-minion')
|
||||
TMP_SYNDIC_MINION_CONF_DIR = os.path.join(TMP_CONF_DIR, 'syndic-minion')
|
||||
TMP_SYNDIC_MASTER_CONF_DIR = os.path.join(TMP_CONF_DIR, 'syndic-master')
|
||||
TMP_PROXY_CONF_DIR = os.path.join(TMP_CONF_DIR, 'proxy')
|
||||
CONF_DIR = os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf')
|
||||
PILLAR_DIR = os.path.join(FILES, 'pillar')
|
||||
TMP_SCRIPT_DIR = os.path.join(TMP, 'scripts')
|
||||
|
@ -24,6 +24,7 @@ from pytestsalt.fixtures.daemons import SaltCall as PytestSaltCall
|
||||
from pytestsalt.fixtures.daemons import SaltMaster as PytestSaltMaster
|
||||
from pytestsalt.fixtures.daemons import SaltMinion as PytestSaltMinion
|
||||
from pytestsalt.fixtures.daemons import SaltSyndic as PytestSaltSyndic
|
||||
from pytestsalt.fixtures.daemons import SaltProxy as PytestSaltProxy
|
||||
|
||||
# Import tests support libs
|
||||
from tests.support.paths import ScriptPathMixin
|
||||
@ -87,6 +88,12 @@ class SaltRun(ScriptPathMixin, PytestSaltRun):
|
||||
super(SaltRun, self).__init__(None, *args, **kwargs)
|
||||
|
||||
|
||||
class SaltProxy(GetSaltRunFixtureMixin, PytestSaltProxy):
|
||||
'''
|
||||
Class which runs the salt-proxy daemon
|
||||
'''
|
||||
|
||||
|
||||
class SaltMinion(GetSaltRunFixtureMixin, PytestSaltMinion):
|
||||
'''
|
||||
Class which runs the salt-minion daemon
|
||||
|
@ -206,6 +206,7 @@ RUNTIME_VARS = RuntimeVars(
|
||||
TMP_CONF_DIR=paths.TMP_CONF_DIR,
|
||||
TMP_CONF_MASTER_INCLUDES=os.path.join(paths.TMP_CONF_DIR, 'master.d'),
|
||||
TMP_CONF_MINION_INCLUDES=os.path.join(paths.TMP_CONF_DIR, 'minion.d'),
|
||||
TMP_CONF_PROXY_INCLUDES=os.path.join(paths.TMP_CONF_DIR, 'proxy.d'),
|
||||
TMP_CONF_CLOUD_INCLUDES=os.path.join(paths.TMP_CONF_DIR, 'cloud.conf.d'),
|
||||
TMP_CONF_CLOUD_PROFILE_INCLUDES=os.path.join(paths.TMP_CONF_DIR, 'cloud.profiles.d'),
|
||||
TMP_CONF_CLOUD_PROVIDER_INCLUDES=os.path.join(paths.TMP_CONF_DIR, 'cloud.providers.d'),
|
||||
|
@ -254,6 +254,21 @@ class LinuxLVMTestCase(TestCase, LoaderModuleMockMixin):
|
||||
self.assertDictEqual(linux_lvm.lvcreate(None, None, None, 1),
|
||||
{'Output from lvcreate': 'A'})
|
||||
|
||||
def test_lvcreate_with_force(self):
|
||||
'''
|
||||
Test create a new logical volume, with option
|
||||
for which physical volume to be used
|
||||
'''
|
||||
mock = MagicMock(return_value='A\nB')
|
||||
with patch.dict(linux_lvm.__salt__, {'cmd.run': mock}):
|
||||
with patch.object(linux_lvm, 'lvdisplay', return_value={}):
|
||||
self.assertDictEqual(linux_lvm.lvcreate(None,
|
||||
None,
|
||||
None,
|
||||
1,
|
||||
force=True),
|
||||
{'Output from lvcreate': 'A'})
|
||||
|
||||
def test_vgremove(self):
|
||||
'''
|
||||
Tests to remove an LVM volume group
|
||||
|
@ -184,6 +184,7 @@ class WinServiceTestCase(TestCase, LoaderModuleMockMixin):
|
||||
with patch.dict(win_service.__salt__, {'task.run': mock_true}):
|
||||
self.assertTrue(win_service.execute_salt_restart_task())
|
||||
|
||||
@skipIf(not WINAPI, 'win32serviceutil not available')
|
||||
def test_status(self):
|
||||
'''
|
||||
Test to return the status for a service
|
||||
|
@ -72,3 +72,126 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
|
||||
ret.update({'comment': comt, 'result': True,
|
||||
'changes': {'servercert': 'new'}})
|
||||
self.assertDictEqual(virt.keys(name, basepath=self.pki_dir), ret)
|
||||
|
||||
def test_keys_with_expiration_days(self):
|
||||
'''
|
||||
Test to manage libvirt keys.
|
||||
'''
|
||||
with patch('os.path.isfile', MagicMock(return_value=False)):
|
||||
name = 'sunrise'
|
||||
|
||||
ret = {'name': name,
|
||||
'result': True,
|
||||
'comment': '',
|
||||
'changes': {}}
|
||||
|
||||
mock = MagicMock(side_effect=[[], ['libvirt.servercert.pem'],
|
||||
{'libvirt.servercert.pem': 'A'}])
|
||||
with patch.dict(virt.__salt__, {'pillar.ext': mock}):
|
||||
comt = ('All keys are correct')
|
||||
ret.update({'comment': comt})
|
||||
self.assertDictEqual(virt.keys(name,
|
||||
basepath=self.pki_dir,
|
||||
expiration_days=700), ret)
|
||||
|
||||
with patch.dict(virt.__opts__, {'test': True}):
|
||||
comt = ('Libvirt keys are set to be updated')
|
||||
ret.update({'comment': comt, 'result': None})
|
||||
self.assertDictEqual(virt.keys(name,
|
||||
basepath=self.pki_dir,
|
||||
expiration_days=700), ret)
|
||||
|
||||
with patch.dict(virt.__opts__, {'test': False}):
|
||||
with patch.object(salt.utils, 'fopen', MagicMock(mock_open())):
|
||||
comt = ('Updated libvirt certs and keys')
|
||||
ret.update({'comment': comt, 'result': True,
|
||||
'changes': {'servercert': 'new'}})
|
||||
self.assertDictEqual(virt.keys(name,
|
||||
basepath=self.pki_dir,
|
||||
expiration_days=700), ret)
|
||||
|
||||
def test_keys_with_state(self):
|
||||
'''
|
||||
Test to manage libvirt keys.
|
||||
'''
|
||||
with patch('os.path.isfile', MagicMock(return_value=False)):
|
||||
name = 'sunrise'
|
||||
|
||||
ret = {'name': name,
|
||||
'result': True,
|
||||
'comment': '',
|
||||
'changes': {}}
|
||||
|
||||
mock = MagicMock(side_effect=[[], ['libvirt.servercert.pem'],
|
||||
{'libvirt.servercert.pem': 'A'}])
|
||||
with patch.dict(virt.__salt__, {'pillar.ext': mock}):
|
||||
comt = ('All keys are correct')
|
||||
ret.update({'comment': comt})
|
||||
self.assertDictEqual(virt.keys(name,
|
||||
basepath=self.pki_dir,
|
||||
st='California'), ret)
|
||||
|
||||
with patch.dict(virt.__opts__, {'test': True}):
|
||||
comt = ('Libvirt keys are set to be updated')
|
||||
ret.update({'comment': comt, 'result': None})
|
||||
self.assertDictEqual(virt.keys(name,
|
||||
basepath=self.pki_dir,
|
||||
st='California'), ret)
|
||||
|
||||
with patch.dict(virt.__opts__, {'test': False}):
|
||||
with patch.object(salt.utils, 'fopen', MagicMock(mock_open())):
|
||||
comt = ('Updated libvirt certs and keys')
|
||||
ret.update({'comment': comt, 'result': True,
|
||||
'changes': {'servercert': 'new'}})
|
||||
self.assertDictEqual(virt.keys(name,
|
||||
basepath=self.pki_dir,
|
||||
st='California'), ret)
|
||||
|
||||
def test_keys_with_all_options(self):
|
||||
'''
|
||||
Test to manage libvirt keys.
|
||||
'''
|
||||
with patch('os.path.isfile', MagicMock(return_value=False)):
|
||||
name = 'sunrise'
|
||||
|
||||
ret = {'name': name,
|
||||
'result': True,
|
||||
'comment': '',
|
||||
'changes': {}}
|
||||
|
||||
mock = MagicMock(side_effect=[[], ['libvirt.servercert.pem'],
|
||||
{'libvirt.servercert.pem': 'A'}])
|
||||
with patch.dict(virt.__salt__, {'pillar.ext': mock}):
|
||||
comt = ('All keys are correct')
|
||||
ret.update({'comment': comt})
|
||||
self.assertDictEqual(virt.keys(name,
|
||||
basepath=self.pki_dir,
|
||||
country='USA',
|
||||
st='California',
|
||||
locality='Los_Angeles',
|
||||
organization='SaltStack',
|
||||
expiration_days=700), ret)
|
||||
|
||||
with patch.dict(virt.__opts__, {'test': True}):
|
||||
comt = ('Libvirt keys are set to be updated')
|
||||
ret.update({'comment': comt, 'result': None})
|
||||
self.assertDictEqual(virt.keys(name,
|
||||
basepath=self.pki_dir,
|
||||
country='USA',
|
||||
st='California',
|
||||
locality='Los_Angeles',
|
||||
organization='SaltStack',
|
||||
expiration_days=700), ret)
|
||||
|
||||
with patch.dict(virt.__opts__, {'test': False}):
|
||||
with patch.object(salt.utils, 'fopen', MagicMock(mock_open())):
|
||||
comt = ('Updated libvirt certs and keys')
|
||||
ret.update({'comment': comt, 'result': True,
|
||||
'changes': {'servercert': 'new'}})
|
||||
self.assertDictEqual(virt.keys(name,
|
||||
basepath=self.pki_dir,
|
||||
country='USA',
|
||||
st='California',
|
||||
locality='Los_Angeles',
|
||||
organization='SaltStack',
|
||||
expiration_days=700), ret)
|
||||
|
@ -149,6 +149,28 @@ class LvmTestCase(TestCase, LoaderModuleMockMixin):
|
||||
with patch.dict(lvm.__opts__, {'test': True}):
|
||||
self.assertDictEqual(lvm.lv_present(name), ret)
|
||||
|
||||
def test_lv_present_with_force(self):
|
||||
'''
|
||||
Test to create a new logical volume with force=True
|
||||
'''
|
||||
name = '/dev/sda5'
|
||||
|
||||
comt = ('Logical Volume {0} already present'.format(name))
|
||||
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': True,
|
||||
'comment': comt}
|
||||
|
||||
mock = MagicMock(side_effect=[True, False])
|
||||
with patch.dict(lvm.__salt__, {'lvm.lvdisplay': mock}):
|
||||
self.assertDictEqual(lvm.lv_present(name, force=True), ret)
|
||||
|
||||
comt = ('Logical Volume {0} is set to be created'.format(name))
|
||||
ret.update({'comment': comt, 'result': None})
|
||||
with patch.dict(lvm.__opts__, {'test': True}):
|
||||
self.assertDictEqual(lvm.lv_present(name, force=True), ret)
|
||||
|
||||
# 'lv_absent' function tests: 1
|
||||
|
||||
def test_lv_absent(self):
|
||||
|
@ -522,24 +522,36 @@ class TestCustomExtensions(TestCase):
|
||||
unique = set(dataset)
|
||||
env = Environment(extensions=[SerializerExtension])
|
||||
env.filters.update(JinjaFilter.salt_jinja_filters)
|
||||
rendered = env.from_string('{{ dataset|unique }}').render(dataset=dataset)
|
||||
self.assertEqual(rendered, u"{0}".format(unique))
|
||||
if six.PY3:
|
||||
rendered = env.from_string('{{ dataset|unique }}').render(dataset=dataset).strip("'{}").split("', '")
|
||||
self.assertEqual(rendered, list(unique))
|
||||
else:
|
||||
rendered = env.from_string('{{ dataset|unique }}').render(dataset=dataset)
|
||||
self.assertEqual(rendered, u"{0}".format(unique))
|
||||
|
||||
def test_unique_tuple(self):
|
||||
dataset = ('foo', 'foo', 'bar')
|
||||
unique = set(dataset)
|
||||
env = Environment(extensions=[SerializerExtension])
|
||||
env.filters.update(JinjaFilter.salt_jinja_filters)
|
||||
rendered = env.from_string('{{ dataset|unique }}').render(dataset=dataset)
|
||||
self.assertEqual(rendered, u"{0}".format(unique))
|
||||
if six.PY3:
|
||||
rendered = env.from_string('{{ dataset|unique }}').render(dataset=dataset).strip("'{}").split("', '")
|
||||
self.assertEqual(rendered, list(unique))
|
||||
else:
|
||||
rendered = env.from_string('{{ dataset|unique }}').render(dataset=dataset)
|
||||
self.assertEqual(rendered, u"{0}".format(unique))
|
||||
|
||||
def test_unique_list(self):
|
||||
dataset = ['foo', 'foo', 'bar']
|
||||
unique = ['foo', 'bar']
|
||||
env = Environment(extensions=[SerializerExtension])
|
||||
env.filters.update(JinjaFilter.salt_jinja_filters)
|
||||
rendered = env.from_string('{{ dataset|unique }}').render(dataset=dataset)
|
||||
self.assertEqual(rendered, u"{0}".format(unique))
|
||||
if six.PY3:
|
||||
rendered = env.from_string('{{ dataset|unique }}').render(dataset=dataset).strip("'[]").split("', '")
|
||||
self.assertEqual(rendered, unique)
|
||||
else:
|
||||
rendered = env.from_string('{{ dataset|unique }}').render(dataset=dataset)
|
||||
self.assertEqual(rendered, u"{0}".format(unique))
|
||||
|
||||
def test_serialize_json(self):
|
||||
dataset = {
|
||||
|
@ -97,8 +97,9 @@ class CryptTestCase(TestCase):
|
||||
salt.utils.fopen.assert_has_calls([open_priv_wb, open_pub_wb], any_order=True)
|
||||
|
||||
def test_sign_message(self):
|
||||
with patch('salt.utils.fopen', mock_open(read_data=PRIVKEY_DATA)):
|
||||
self.assertEqual(SIG, crypt.sign_message('/keydir/keyname.pem', MSG))
|
||||
key = Crypto.PublicKey.RSA.importKey(PRIVKEY_DATA)
|
||||
with patch('salt.crypt._get_rsa_key', return_value=key):
|
||||
self.assertEqual(SIG, salt.crypt.sign_message('/keydir/keyname.pem', MSG))
|
||||
|
||||
def test_verify_signature(self):
|
||||
with patch('salt.utils.fopen', mock_open(read_data=PUBKEY_DATA)):
|
||||
|
@ -984,6 +984,28 @@ class TranslateInputTestCase(TestCase):
|
||||
expected
|
||||
)
|
||||
|
||||
# Ensure passing either `log_driver` or `log_opt` works
|
||||
self.assertEqual(
|
||||
docker_utils.translate_input(
|
||||
log_driver='foo'
|
||||
),
|
||||
(
|
||||
{'log_config': {'Type': 'foo',
|
||||
'Config': {}}},
|
||||
{}, []
|
||||
)
|
||||
)
|
||||
self.assertEqual(
|
||||
docker_utils.translate_input(
|
||||
log_opt={'foo': 'bar', 'baz': 'qux'}
|
||||
),
|
||||
(
|
||||
{'log_config': {'Type': 'none',
|
||||
'Config': {'foo': 'bar', 'baz': 'qux'}}},
|
||||
{}, []
|
||||
)
|
||||
)
|
||||
|
||||
@assert_key_equals_value
|
||||
def test_lxc_conf(self):
|
||||
'''
|
||||
|
Loading…
Reference in New Issue
Block a user