mirror of
https://github.com/valitydev/salt.git
synced 2024-11-07 17:09:03 +00:00
Merge remote-tracking branch 'upstream/develop' into issue_42162_route53boto
This commit is contained in:
commit
01535b1ac6
4
.github/stale.yml
vendored
4
.github/stale.yml
vendored
@ -1,8 +1,8 @@
|
||||
# Probot Stale configuration file
|
||||
|
||||
# Number of days of inactivity before an issue becomes stale
|
||||
# 1200 is approximately 3 years and 3 months
|
||||
daysUntilStale: 1200
|
||||
# 1175 is approximately 3 years and 2 months
|
||||
daysUntilStale: 1175
|
||||
|
||||
# Number of days of inactivity before a stale issue is closed
|
||||
daysUntilClose: 7
|
||||
|
@ -72,12 +72,12 @@ MOCK_MODULES = [
|
||||
'Crypto.Signature',
|
||||
'Crypto.Signature.PKCS1_v1_5',
|
||||
'M2Crypto',
|
||||
'msgpack',
|
||||
'yaml',
|
||||
'yaml.constructor',
|
||||
'yaml.nodes',
|
||||
'yaml.parser',
|
||||
'yaml.scanner',
|
||||
'salt.utils.yamlloader',
|
||||
'zmq',
|
||||
'zmq.eventloop',
|
||||
'zmq.eventloop.ioloop',
|
||||
@ -126,7 +126,6 @@ MOCK_MODULES = [
|
||||
'ClusterShell',
|
||||
'ClusterShell.NodeSet',
|
||||
'django',
|
||||
'docker',
|
||||
'libvirt',
|
||||
'MySQLdb',
|
||||
'MySQLdb.cursors',
|
||||
@ -176,7 +175,7 @@ MOCK_MODULES = [
|
||||
|
||||
for mod_name in MOCK_MODULES:
|
||||
if mod_name == 'psutil':
|
||||
mock = Mock(mapping={'total': 0, 'version_info': (0, 6,0)}) # Otherwise it will crash Sphinx
|
||||
mock = Mock(mapping={'total': 0}) # Otherwise it will crash Sphinx
|
||||
else:
|
||||
mock = Mock()
|
||||
sys.modules[mod_name] = mock
|
||||
|
@ -190,6 +190,8 @@ PATH using a :mod:`file.symlink <salt.states.file.symlink>` state.
|
||||
file.symlink:
|
||||
- target: /usr/local/bin/foo
|
||||
|
||||
.. _which-version:
|
||||
|
||||
Can I run different versions of Salt on my Master and Minion?
|
||||
-------------------------------------------------------------
|
||||
|
||||
|
@ -248,6 +248,9 @@ each of Salt's module types such as ``runners``, ``output``, ``wheel``,
|
||||
|
||||
extension_modules: /root/salt_extmods
|
||||
|
||||
.. conf_master:: extmod_whitelist
|
||||
.. conf_master:: extmod_blacklist
|
||||
|
||||
``extmod_whitelist/extmod_blacklist``
|
||||
-------------------------------------
|
||||
|
||||
|
@ -1352,6 +1352,9 @@ below.
|
||||
providers:
|
||||
service: systemd
|
||||
|
||||
.. conf_minion:: extmod_whitelist
|
||||
.. conf_minion:: extmod_blacklist
|
||||
|
||||
``extmod_whitelist/extmod_blacklist``
|
||||
-------------------------------------
|
||||
|
||||
|
@ -74,6 +74,10 @@ state modules
|
||||
dellchassis
|
||||
disk
|
||||
docker
|
||||
docker_container
|
||||
docker_image
|
||||
docker_network
|
||||
docker_volume
|
||||
drac
|
||||
elasticsearch
|
||||
elasticsearch_index
|
||||
|
@ -78,6 +78,7 @@ parameters are discussed in more detail below.
|
||||
# RHEL -> ec2-user
|
||||
# CentOS -> ec2-user
|
||||
# Ubuntu -> ubuntu
|
||||
# Debian -> admin
|
||||
#
|
||||
ssh_username: ec2-user
|
||||
|
||||
|
@ -238,6 +238,20 @@ iface_name
|
||||
Optional. The name to apply to the VM's network interface. If not supplied, the
|
||||
value will be set to ``<VM name>-iface0``.
|
||||
|
||||
dns_servers
|
||||
-----------
|
||||
Optional. A **list** of the DNS servers to configure for the network interface
|
||||
(will be set on the VM by the DHCP of the VNET).
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
my-azurearm-profile:
|
||||
provider: azurearm-provider
|
||||
network: mynetwork
|
||||
dns_servers:
|
||||
- 10.1.1.4
|
||||
- 10.1.1.5
|
||||
|
||||
availability_set
|
||||
----------------
|
||||
Optional. If set, the VM will be added to the specified availability set.
|
||||
|
@ -371,7 +371,6 @@ both.
|
||||
compute_name: cloudServersOpenStack
|
||||
protocol: ipv4
|
||||
compute_region: DFW
|
||||
protocol: ipv4
|
||||
user: myuser
|
||||
tenant: 5555555
|
||||
password: mypass
|
||||
|
@ -48,6 +48,15 @@ from saltstack.com:
|
||||
|
||||
.. __: https://repo.saltstack.com/windows/
|
||||
|
||||
.. _new-pywinrm:
|
||||
|
||||
Self Signed Certificates with WinRM
|
||||
===================================
|
||||
|
||||
Salt-Cloud can use versions of ``pywinrm<=0.1.1`` or ``pywinrm>=0.2.1``.
|
||||
|
||||
For versions greater than `0.2.1`, ``winrm_verify_ssl`` needs to be set to
|
||||
`False` if the certificate is self signed and not verifiable.
|
||||
|
||||
Firewall Settings
|
||||
=================
|
||||
@ -179,7 +188,8 @@ The default Windows user is `Administrator`, and the default Windows password
|
||||
is blank.
|
||||
|
||||
If WinRM is to be used ``use_winrm`` needs to be set to `True`. ``winrm_port``
|
||||
can be used to specify a custom port (must be HTTPS listener).
|
||||
can be used to specify a custom port (must be HTTPS listener). And
|
||||
``winrm_verify_ssl`` can be set to `False` to use a self signed certificate.
|
||||
|
||||
|
||||
Auto-Generated Passwords on EC2
|
||||
|
@ -34,6 +34,17 @@ The following salt-cloud drivers have known issues running with Python 3. These
|
||||
Users using the `C` locale are advised to switch to a UTF-aware locale to ensure proper functionality with Salt with Python 3.
|
||||
|
||||
|
||||
Remember to update the Salt Master first
|
||||
========================================
|
||||
Salt's policy has always been that when upgrading, the minion should never be
|
||||
on a newer version than the master. Specifically with this update, because of
|
||||
changes in the fileclient, the 2017.7 minion requires a 2017.7 master.
|
||||
|
||||
Backwards compatiblity is still maintained, so older minions can still be used.
|
||||
|
||||
More information can be found in the :ref:`Salt FAQ<which-version>`
|
||||
|
||||
|
||||
States Added for Management of systemd Unit Masking
|
||||
===================================================
|
||||
|
||||
@ -573,6 +584,38 @@ Using the new ``roster_order`` configuration syntax it's now possible to compose
|
||||
of grains, pillar and mine data and even Salt SDB URLs.
|
||||
The new release is also fully IPv4 and IPv6 enabled and even has support for CIDR ranges.
|
||||
|
||||
Salt-SSH Default Options
|
||||
========================
|
||||
|
||||
Defaults for rosters can now be set, so that they don't have to be set on every
|
||||
entry in a roster or specified from the commandline.
|
||||
|
||||
The new option is :ref:`roster_defaults<roster-defaults>` and is specified in
|
||||
the master config file.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
roster_defaults:
|
||||
user: daniel
|
||||
sudo: True
|
||||
priv: /root/.ssh/id_rsa
|
||||
tty: True
|
||||
|
||||
Blacklist or Whitelist Extmod Sync
|
||||
==================================
|
||||
|
||||
The modules that are synced to minions can now be limited.
|
||||
|
||||
The following configuration options have been added for the master:
|
||||
|
||||
- :conf_master:`extmod_whitelist`
|
||||
- :conf_master:`extmod_blacklist`
|
||||
|
||||
and for the minion:
|
||||
|
||||
- :conf_minion:`extmod_whitelist`
|
||||
- :conf_minion:`extmod_blacklist`
|
||||
|
||||
Additional Features
|
||||
===================
|
||||
|
||||
|
@ -49,6 +49,12 @@ environments (i.e. ``saltenvs``) have been added:
|
||||
ignore all tags and use branches only, and also to keep SHAs from being made
|
||||
available as saltenvs.
|
||||
|
||||
Salt Cloud and Newer PyWinRM Versions
|
||||
-------------------------------------
|
||||
|
||||
Versions of ``pywinrm>=0.2.1`` are finally able to disable validation of self
|
||||
signed certificates. :ref:`Here<new-pywinrm>` for more information.
|
||||
|
||||
Configuration Option Deprecations
|
||||
---------------------------------
|
||||
|
||||
|
@ -61,6 +61,8 @@ The information which can be stored in a roster ``target`` is the following:
|
||||
cmd_umask: # umask to enforce for the salt-call command. Should be in
|
||||
# octal (so for 0o077 in YAML you would do 0077, or 63)
|
||||
|
||||
.. _roster_defaults:
|
||||
|
||||
Target Defaults
|
||||
---------------
|
||||
|
||||
@ -71,10 +73,10 @@ not need to be passed with commandline arguments.
|
||||
.. code-block:: yaml
|
||||
|
||||
roster_defaults:
|
||||
user: daniel
|
||||
sudo: True
|
||||
priv: /root/.ssh/id_rsa
|
||||
tty: True
|
||||
user: daniel
|
||||
sudo: True
|
||||
priv: /root/.ssh/id_rsa
|
||||
tty: True
|
||||
|
||||
thin_dir
|
||||
--------
|
||||
|
@ -141,7 +141,7 @@ packages:
|
||||
- 2015.8.0 and later minions: https://github.com/saltstack/salt-winrepo-ng
|
||||
- Earlier releases: https://github.com/saltstack/salt-winrepo
|
||||
|
||||
By default, these repositories are mirrored to ``/srv/salt/win/repo_ng``
|
||||
By default, these repositories are mirrored to ``/srv/salt/win/repo-ng``
|
||||
and ``/srv/salt/win/repo``.
|
||||
|
||||
This location can be changed in the master config file by setting the
|
||||
|
@ -67,10 +67,13 @@ If not Exist "%PyDir%\python.exe" (
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
Set "CurrDir=%cd%"
|
||||
Set "BinDir=%cd%\buildenv\bin"
|
||||
Set "InsDir=%cd%\installer"
|
||||
Set "PreDir=%cd%\prereqs"
|
||||
Set "CurDir=%~dp0"
|
||||
Set "BldDir=%CurDir%\buildenv"
|
||||
Set "BinDir=%CurDir%\buildenv\bin"
|
||||
Set "CnfDir=%CurDir%\buildenv\conf"
|
||||
Set "InsDir=%CurDir%\installer"
|
||||
Set "PreDir=%CurDir%\prereqs"
|
||||
for /f "delims=" %%a in ('git rev-parse --show-toplevel') do @set "SrcDir=%%a"
|
||||
|
||||
:: Find the NSIS Installer
|
||||
If Exist "C:\Program Files\NSIS\" (
|
||||
@ -101,6 +104,15 @@ If Exist "%BinDir%\" (
|
||||
xcopy /E /Q "%PyDir%" "%BinDir%\"
|
||||
@echo.
|
||||
|
||||
:: Copy the default master and minion configs to buildenv\conf
|
||||
@echo Copying configs to buildenv\conf...
|
||||
@echo ----------------------------------------------------------------------
|
||||
@echo xcopy /E /Q "%SrcDir%\conf\master" "%CnfDir%\"
|
||||
xcopy /Q "%SrcDir%\conf\master" "%CnfDir%\"
|
||||
@echo xcopy /E /Q "%SrcDir%\conf\minion" "%CnfDir%\"
|
||||
xcopy /Q "%SrcDir%\conf\minion" "%CnfDir%\"
|
||||
@echo.
|
||||
|
||||
@echo Copying VCRedist to Prerequisites
|
||||
@echo ----------------------------------------------------------------------
|
||||
:: Make sure the "prereq" directory exists
|
||||
@ -127,12 +139,12 @@ If Defined ProgramFiles(x86) (
|
||||
:: Remove the fixed path in .exe files
|
||||
@echo Removing fixed path from .exe files
|
||||
@echo ----------------------------------------------------------------------
|
||||
"%PyDir%\python" "%CurrDir%\portable.py" -f "%BinDir%\Scripts\easy_install.exe"
|
||||
"%PyDir%\python" "%CurrDir%\portable.py" -f "%BinDir%\Scripts\easy_install-%PyVerMajor%.%PyVerMinor%.exe"
|
||||
"%PyDir%\python" "%CurrDir%\portable.py" -f "%BinDir%\Scripts\pip.exe"
|
||||
"%PyDir%\python" "%CurrDir%\portable.py" -f "%BinDir%\Scripts\pip%PyVerMajor%.%PyVerMinor%.exe"
|
||||
"%PyDir%\python" "%CurrDir%\portable.py" -f "%BinDir%\Scripts\pip%PyVerMajor%.exe"
|
||||
"%PyDir%\python" "%CurrDir%\portable.py" -f "%BinDir%\Scripts\wheel.exe"
|
||||
"%PyDir%\python" "%CurDir%\portable.py" -f "%BinDir%\Scripts\easy_install.exe"
|
||||
"%PyDir%\python" "%CurDir%\portable.py" -f "%BinDir%\Scripts\easy_install-%PyVerMajor%.%PyVerMinor%.exe"
|
||||
"%PyDir%\python" "%CurDir%\portable.py" -f "%BinDir%\Scripts\pip.exe"
|
||||
"%PyDir%\python" "%CurDir%\portable.py" -f "%BinDir%\Scripts\pip%PyVerMajor%.%PyVerMinor%.exe"
|
||||
"%PyDir%\python" "%CurDir%\portable.py" -f "%BinDir%\Scripts\pip%PyVerMajor%.exe"
|
||||
"%PyDir%\python" "%CurDir%\portable.py" -f "%BinDir%\Scripts\wheel.exe"
|
||||
@echo.
|
||||
|
||||
@echo Cleaning up unused files and directories...
|
||||
@ -534,12 +546,6 @@ If Exist "%BinDir%\Lib\site-packages\salt\states\zpool.py"^
|
||||
:: Remove Unneeded Components
|
||||
If Exist "%BinDir%\Lib\site-packages\salt\cloud"^
|
||||
rd /S /Q "%BinDir%\Lib\site-packages\salt\cloud" 1>nul
|
||||
If Exist "%BinDir%\Scripts\salt-key*"^
|
||||
del /Q "%BinDir%\Scripts\salt-key*" 1>nul
|
||||
If Exist "%BinDir%\Scripts\salt-master*"^
|
||||
del /Q "%BinDir%\Scripts\salt-master*" 1>nul
|
||||
If Exist "%BinDir%\Scripts\salt-run*"^
|
||||
del /Q "%BinDir%\Scripts\salt-run*" 1>nul
|
||||
If Exist "%BinDir%\Scripts\salt-unity*"^
|
||||
del /Q "%BinDir%\Scripts\salt-unity*" 1>nul
|
||||
|
||||
@ -547,6 +553,36 @@ If Exist "%BinDir%\Scripts\salt-unity*"^
|
||||
|
||||
@echo Building the installer...
|
||||
@echo ----------------------------------------------------------------------
|
||||
:: Make the Master installer if the nullsoft script exists
|
||||
If Exist "%InsDir%\Salt-Setup.nsi"^
|
||||
makensis.exe /DSaltVersion=%Version% /DPythonVersion=%Python% "%InsDir%\Salt-Setup.nsi"
|
||||
|
||||
:: Remove files not needed for Salt Minion
|
||||
:: salt
|
||||
:: salt has to be removed individually (can't wildcard it)
|
||||
If Exist "%BinDir%\Scripts\salt"^
|
||||
del /Q "%BinDir%\Scripts\salt" 1>nul
|
||||
If Exist "%BinDir%\Scripts\salt.exe"^
|
||||
del /Q "%BinDir%\Scripts\salt.exe" 1>nul
|
||||
If Exist "%BldDir%\salt.bat"^
|
||||
del /Q "%BldDir%\salt.bat" 1>nul
|
||||
:: salt-key
|
||||
If Exist "%BinDir%\Scripts\salt-key*"^
|
||||
del /Q "%BinDir%\Scripts\salt-key*" 1>nul
|
||||
If Exist "%BldDir%\salt-key.bat"^
|
||||
del /Q "%BldDir%\salt-key.bat" 1>nul
|
||||
:: salt-master
|
||||
If Exist "%BinDir%\Scripts\salt-master*"^
|
||||
del /Q "%BinDir%\Scripts\salt-master*" 1>nul
|
||||
If Exist "%BldDir%\salt-master.bat"^
|
||||
del /Q "%BldDir%\salt-master.bat" 1>nul
|
||||
:: salt-run
|
||||
If Exist "%BinDir%\Scripts\salt-run*"^
|
||||
del /Q "%BinDir%\Scripts\salt-run*" 1>nul
|
||||
If Exist "%BldDir%\salt-run.bat"^
|
||||
del /Q "%BldDir%\salt-run.bat" 1>nul
|
||||
|
||||
:: Make the Salt Minion Installer
|
||||
makensis.exe /DSaltVersion=%Version% /DPythonVersion=%Python% "%InsDir%\Salt-Minion-Setup.nsi"
|
||||
@echo.
|
||||
|
||||
|
@ -1,402 +0,0 @@
|
||||
##### Primary configuration settings #####
|
||||
##########################################
|
||||
|
||||
ipc_mode: tcp
|
||||
|
||||
# Per default the minion will automatically include all config files
|
||||
# from minion.d/*.conf (minion.d is a directory in the same directory
|
||||
# as the main minion config file).
|
||||
#default_include: minion.d/*.conf
|
||||
|
||||
# Set the location of the salt master server, if the master server cannot be
|
||||
# resolved, then the minion will fail to start.
|
||||
# test
|
||||
#master: salt
|
||||
|
||||
# Set the number of seconds to wait before attempting to resolve
|
||||
# the master hostname if name resolution fails. Defaults to 30 seconds.
|
||||
# Set to zero if the minion should shutdown and not retry.
|
||||
# retry_dns: 30
|
||||
|
||||
# Set the port used by the master reply and authentication server
|
||||
#master_port: 4506
|
||||
|
||||
# The user to run salt
|
||||
#user: root
|
||||
|
||||
# Specify the location of the daemon process ID file
|
||||
#pidfile: /var/run/salt-minion.pid
|
||||
|
||||
# The root directory prepended to these options: pki_dir, cachedir, log_file,
|
||||
# sock_dir, pidfile.
|
||||
root_dir: c:\salt
|
||||
|
||||
# The directory to store the pki information in
|
||||
#pki_dir: /etc/salt/pki/minion
|
||||
pki_dir: /conf/pki/minion
|
||||
|
||||
# Explicitly declare the id for this minion to use, if left commented the id
|
||||
# will be the hostname as returned by the python call: socket.getfqdn()
|
||||
# Since salt uses detached ids it is possible to run multiple minions on the
|
||||
# same machine but with different ids, this can be useful for salt compute
|
||||
# clusters.
|
||||
#id:
|
||||
|
||||
# Append a domain to a hostname in the event that it does not exist. This is
|
||||
# useful for systems where socket.getfqdn() does not actually result in a
|
||||
# FQDN (for instance, Solaris).
|
||||
#append_domain:
|
||||
|
||||
# Custom static grains for this minion can be specified here and used in SLS
|
||||
# files just like all other grains. This example sets 4 custom grains, with
|
||||
# the 'roles' grain having two values that can be matched against:
|
||||
#grains:
|
||||
# roles:
|
||||
# - webserver
|
||||
# - memcache
|
||||
# deployment: datacenter4
|
||||
# cabinet: 13
|
||||
# cab_u: 14-15
|
||||
|
||||
# Where cache data goes
|
||||
#cachedir: /var/cache/salt/minion
|
||||
|
||||
# Verify and set permissions on configuration directories at startup
|
||||
#verify_env: True
|
||||
|
||||
# The minion can locally cache the return data from jobs sent to it, this
|
||||
# can be a good way to keep track of jobs the minion has executed
|
||||
# (on the minion side). By default this feature is disabled, to enable
|
||||
# set cache_jobs to True
|
||||
#cache_jobs: False
|
||||
|
||||
# set the directory used to hold unix sockets
|
||||
#sock_dir: /var/run/salt/minion
|
||||
|
||||
# Backup files that are replaced by file.managed and file.recurse under
|
||||
# 'cachedir'/file_backups relative to their original location and appended
|
||||
# with a timestamp. The only valid setting is "minion". Disabled by default.
|
||||
#
|
||||
# Alternatively this can be specified for each file in state files:
|
||||
#
|
||||
# /etc/ssh/sshd_config:
|
||||
# file.managed:
|
||||
# - source: salt://ssh/sshd_config
|
||||
# - backup: minion
|
||||
#
|
||||
#backup_mode: minion
|
||||
|
||||
# When waiting for a master to accept the minion's public key, salt will
|
||||
# continuously attempt to reconnect until successful. This is the time, in
|
||||
# seconds, between those reconnection attempts.
|
||||
#acceptance_wait_time: 10
|
||||
|
||||
# If this is set, the time between reconnection attempts will increase by
|
||||
# acceptance_wait_time seconds per iteration, up to this maximum. If this
|
||||
# is not set, the time between reconnection attempts will stay constant.
|
||||
#acceptance_wait_time_max: None
|
||||
|
||||
# Windows platforms lack posix IPC and must rely on slower TCP based inter-
|
||||
# process communications. Set ipc_mode to 'tcp' on such systems
|
||||
#ipc_mode: ipc
|
||||
#
|
||||
# Overwrite the default tcp ports used by the minion when in tcp mode
|
||||
#tcp_pub_port: 4510
|
||||
#tcp_pull_port: 4511
|
||||
|
||||
# The minion can include configuration from other files. To enable this,
|
||||
# pass a list of paths to this option. The paths can be either relative or
|
||||
# absolute; if relative, they are considered to be relative to the directory
|
||||
# the main minion configuration file lives in (this file). Paths can make use
|
||||
# of shell-style globbing. If no files are matched by a path passed to this
|
||||
# option then the minion will log a warning message.
|
||||
#
|
||||
#
|
||||
# Include a config file from some other path:
|
||||
# include: /etc/salt/extra_config
|
||||
#
|
||||
# Include config from several files and directories:
|
||||
# include:
|
||||
# - /etc/salt/extra_config
|
||||
# - /etc/roles/webserver
|
||||
|
||||
##### Minion module management #####
|
||||
##########################################
|
||||
# Disable specific modules. This allows the admin to limit the level of
|
||||
# access the master has to the minion
|
||||
#disable_modules: [cmd,test]
|
||||
#disable_returners: []
|
||||
#
|
||||
# Modules can be loaded from arbitrary paths. This enables the easy deployment
|
||||
# of third party modules. Modules for returners and minions can be loaded.
|
||||
# Specify a list of extra directories to search for minion modules and
|
||||
# returners. These paths must be fully qualified!
|
||||
#module_dirs: []
|
||||
#returner_dirs: []
|
||||
#states_dirs: []
|
||||
#render_dirs: []
|
||||
#
|
||||
# A module provider can be statically overwritten or extended for the minion
|
||||
# via the providers option, in this case the default module will be
|
||||
# overwritten by the specified module. In this example the pkg module will
|
||||
# be provided by the yumpkg5 module instead of the system default.
|
||||
#
|
||||
# providers:
|
||||
# pkg: yumpkg5
|
||||
#
|
||||
# Enable Cython modules searching and loading. (Default: False)
|
||||
#cython_enable: False
|
||||
#
|
||||
|
||||
##### State Management Settings #####
|
||||
###########################################
|
||||
# The state management system executes all of the state templates on the minion
|
||||
# to enable more granular control of system state management. The type of
|
||||
# template and serialization used for state management needs to be configured
|
||||
# on the minion, the default renderer is yaml_jinja. This is a yaml file
|
||||
# rendered from a jinja template, the available options are:
|
||||
# yaml_jinja
|
||||
# yaml_mako
|
||||
# yaml_wempy
|
||||
# json_jinja
|
||||
# json_mako
|
||||
# json_wempy
|
||||
#
|
||||
#renderer: yaml_jinja
|
||||
#
|
||||
# The failhard option tells the minions to stop immediately after the first
|
||||
# failure detected in the state execution, defaults to False
|
||||
#failhard: False
|
||||
#
|
||||
# autoload_dynamic_modules Turns on automatic loading of modules found in the
|
||||
# environments on the master. This is turned on by default, to turn of
|
||||
# autoloading modules when states run set this value to False
|
||||
#autoload_dynamic_modules: True
|
||||
#
|
||||
# clean_dynamic_modules keeps the dynamic modules on the minion in sync with
|
||||
# the dynamic modules on the master, this means that if a dynamic module is
|
||||
# not on the master it will be deleted from the minion. By default this is
|
||||
# enabled and can be disabled by changing this value to False
|
||||
#clean_dynamic_modules: True
|
||||
#
|
||||
# Normally the minion is not isolated to any single environment on the master
|
||||
# when running states, but the environment can be isolated on the minion side
|
||||
# by statically setting it. Remember that the recommended way to manage
|
||||
# environments is to isolate via the top file.
|
||||
#environment: None
|
||||
#
|
||||
# If using the local file directory, then the state top file name needs to be
|
||||
# defined, by default this is top.sls.
|
||||
#state_top: top.sls
|
||||
#
|
||||
# Run states when the minion daemon starts. To enable, set startup_states to:
|
||||
# 'highstate' -- Execute state.highstate
|
||||
# 'sls' -- Read in the sls_list option and execute the named sls files
|
||||
# 'top' -- Read top_file option and execute based on that file on the Master
|
||||
#startup_states: ''
|
||||
#
|
||||
# list of states to run when the minion starts up if startup_states is 'sls'
|
||||
#sls_list:
|
||||
# - edit.vim
|
||||
# - hyper
|
||||
#
|
||||
# top file to execute if startup_states is 'top'
|
||||
#top_file: ''
|
||||
|
||||
##### File Directory Settings #####
|
||||
##########################################
|
||||
# The Salt Minion can redirect all file server operations to a local directory,
|
||||
# this allows for the same state tree that is on the master to be used if
|
||||
# copied completely onto the minion. This is a literal copy of the settings on
|
||||
# the master but used to reference a local directory on the minion.
|
||||
|
||||
# Set the file client, the client defaults to looking on the master server for
|
||||
# files, but can be directed to look at the local file directory setting
|
||||
# defined below by setting it to local.
|
||||
#file_client: remote
|
||||
|
||||
# The file directory works on environments passed to the minion, each environment
|
||||
# can have multiple root directories, the subdirectories in the multiple file
|
||||
# roots cannot match, otherwise the downloaded files will not be able to be
|
||||
# reliably ensured. A base environment is required to house the top file.
|
||||
# Example:
|
||||
# file_roots:
|
||||
# base:
|
||||
# - /srv/salt/
|
||||
# dev:
|
||||
# - /srv/salt/dev/services
|
||||
# - /srv/salt/dev/states
|
||||
# prod:
|
||||
# - /srv/salt/prod/services
|
||||
# - /srv/salt/prod/states
|
||||
#
|
||||
# Default:
|
||||
#file_roots:
|
||||
# base:
|
||||
# - /srv/salt
|
||||
|
||||
# The hash_type is the hash to use when discovering the hash of a file in
|
||||
# the minion directory, the default is md5, but sha1, sha224, sha256, sha384
|
||||
# and sha512 are also supported.
|
||||
#hash_type: md5
|
||||
|
||||
# The Salt pillar is searched for locally if file_client is set to local. If
|
||||
# this is the case, and pillar data is defined, then the pillar_roots need to
|
||||
# also be configured on the minion:
|
||||
#pillar_roots:
|
||||
# base:
|
||||
# - /srv/pillar
|
||||
|
||||
###### Security settings #####
|
||||
###########################################
|
||||
# Enable "open mode", this mode still maintains encryption, but turns off
|
||||
# authentication, this is only intended for highly secure environments or for
|
||||
# the situation where your keys end up in a bad state. If you run in open mode
|
||||
# you do so at your own risk!
|
||||
#open_mode: False
|
||||
|
||||
# Enable permissive access to the salt keys. This allows you to run the
|
||||
# master or minion as root, but have a non-root group be given access to
|
||||
# your pki_dir. To make the access explicit, root must belong to the group
|
||||
# you've given access to. This is potentially quite insecure.
|
||||
#permissive_pki_access: False
|
||||
|
||||
# The state_verbose and state_output settings can be used to change the way
|
||||
# state system data is printed to the display. By default all data is printed.
|
||||
# The state_verbose setting can be set to True or False, when set to False
|
||||
# all data that has a result of True and no changes will be suppressed.
|
||||
#state_verbose: True
|
||||
#
|
||||
# The state_output setting changes if the output is the full multi line
|
||||
# output for each changed state if set to 'full', but if set to 'terse'
|
||||
# the output will be shortened to a single line.
|
||||
#state_output: full
|
||||
#
|
||||
# Fingerprint of the master public key to double verify the master is valid,
|
||||
# the master fingerprint can be found by running "salt-key -F master" on the
|
||||
# salt master.
|
||||
#master_finger: ''
|
||||
|
||||
###### Thread settings #####
|
||||
###########################################
|
||||
# Disable multiprocessing support, by default when a minion receives a
|
||||
# publication a new process is spawned and the command is executed therein.
|
||||
# multiprocessing: True
|
||||
|
||||
###### Logging settings #####
|
||||
###########################################
|
||||
# The location of the minion log file.
|
||||
# This can be a path for the log file, or, this can be, since 0.11.0, a system
|
||||
# logger address, for example:
|
||||
# tcp://localhost:514/LOG_USER
|
||||
# tcp://localhost/LOG_DAEMON
|
||||
# udp://localhost:5145/LOG_KERN
|
||||
# udp://localhost
|
||||
# file:///dev/log
|
||||
# file:///dev/log/LOG_SYSLOG
|
||||
# file:///dev/log/LOG_DAEMON
|
||||
#
|
||||
# The above examples are self explanatory, but:
|
||||
# <file|udp|tcp>://<host|socketpath>:<port-if-required>/<log-facility>
|
||||
#
|
||||
# Make sure you have a properly configured syslog or you won't get any warnings
|
||||
#
|
||||
#log_file: /var/log/salt/minion
|
||||
#
|
||||
#
|
||||
# The level of messages to send to the console.
|
||||
# One of 'garbage', 'trace', 'debug', info', 'warning', 'error', 'critical'.
|
||||
# Default: 'warning'
|
||||
#log_level: warning
|
||||
#
|
||||
# The level of messages to send to the log file.
|
||||
# One of 'garbage', 'trace', 'debug', info', 'warning', 'error', 'critical'.
|
||||
# Default: 'warning'
|
||||
#log_level_logfile:
|
||||
#
|
||||
# The date and time format used in log messages. Allowed date/time formatting
|
||||
# can be seen on http://docs.python.org/library/time.html#time.strftime
|
||||
#log_datefmt: '%H:%M:%S'
|
||||
#log_datefmt_logfile: '%Y-%m-%d %H:%M:%S'
|
||||
#
|
||||
# The format of the console logging messages. Allowed formatting options can
|
||||
# be seen on http://docs.python.org/library/logging.html#logrecord-attributes
|
||||
#log_fmt_console: '[%(levelname)-8s] %(message)s'
|
||||
#log_fmt_logfile: '%(asctime)s,%(msecs)03d [%(name)-17s][%(levelname)-8s] %(message)s'
|
||||
#
|
||||
# Logger levels can be used to tweak specific loggers logging levels.
|
||||
# For example, if you want to have the salt library at the 'warning' level,
|
||||
# but you still wish to have 'salt.modules' at the 'debug' level:
|
||||
# log_granular_levels: {
|
||||
# 'salt': 'warning',
|
||||
# 'salt.modules': 'debug'
|
||||
# }
|
||||
#
|
||||
#log_granular_levels: {}
|
||||
|
||||
###### Module configuration #####
|
||||
###########################################
|
||||
# Salt allows for modules to be passed arbitrary configuration data, any data
|
||||
# passed here in valid yaml format will be passed on to the salt minion modules
|
||||
# for use. It is STRONGLY recommended that a naming convention be used in which
|
||||
# the module name is followed by a . and then the value. Also, all top level
|
||||
# data must be applied via the yaml dict construct, some examples:
|
||||
#
|
||||
# You can specify that all modules should run in test mode:
|
||||
#test: True
|
||||
#
|
||||
# A simple value for the test module:
|
||||
#test.foo: foo
|
||||
#
|
||||
# A list for the test module:
|
||||
#test.bar: [baz,quo]
|
||||
#
|
||||
# A dict for the test module:
|
||||
#test.baz: {spam: sausage, cheese: bread}
|
||||
|
||||
|
||||
###### Update settings ######
|
||||
###########################################
|
||||
# Using the features in Esky, a salt minion can both run as a frozen app and
|
||||
# be updated on the fly. These options control how the update process
|
||||
# (saltutil.update()) behaves.
|
||||
#
|
||||
# The url for finding and downloading updates. Disabled by default.
|
||||
#update_url: False
|
||||
#
|
||||
# The list of services to restart after a successful update. Empty by default.
|
||||
#update_restart_services: []
|
||||
|
||||
|
||||
###### Keepalive settings ######
|
||||
############################################
|
||||
# ZeroMQ now includes support for configuring SO_KEEPALIVE if supported by
|
||||
# the OS. If connections between the minion and the master pass through
|
||||
# a state tracking device such as a firewall or VPN gateway, there is
|
||||
# the risk that it could tear down the connection the master and minion
|
||||
# without informing either party that their connection has been taken away.
|
||||
# Enabling TCP Keepalives prevents this from happening.
|
||||
#
|
||||
# Overall state of TCP Keepalives, enable (1 or True), disable (0 or False)
|
||||
# or leave to the OS defaults (-1), on Linux, typically disabled. Default True, enabled.
|
||||
#tcp_keepalive: True
|
||||
#
|
||||
# How long before the first keepalive should be sent in seconds. Default 300
|
||||
# to send the first keepalive after 5 minutes, OS default (-1) is typically 7200 seconds
|
||||
# on Linux see /proc/sys/net/ipv4/tcp_keepalive_time.
|
||||
#tcp_keepalive_idle: 300
|
||||
#
|
||||
# How many lost probes are needed to consider the connection lost. Default -1
|
||||
# to use OS defaults, typically 9 on Linux, see /proc/sys/net/ipv4/tcp_keepalive_probes.
|
||||
#tcp_keepalive_cnt: -1
|
||||
#
|
||||
# How often, in seconds, to send keepalives after the first one. Default -1 to
|
||||
# use OS defaults, typically 75 seconds on Linux, see
|
||||
# /proc/sys/net/ipv4/tcp_keepalive_intvl.
|
||||
#tcp_keepalive_intvl: -1
|
||||
|
||||
|
||||
###### Windows Software settings ######
|
||||
############################################
|
||||
# Location of the repository cache file on the master
|
||||
# win_repo_cachefile: 'salt://win/repo/winrepo.p'
|
13
pkg/windows/buildenv/salt.bat
Normal file
13
pkg/windows/buildenv/salt.bat
Normal file
@ -0,0 +1,13 @@
|
||||
@ echo off
|
||||
:: Script for starting the Salt CLI
|
||||
:: Accepts all parameters that Salt CLI accepts
|
||||
|
||||
:: Define Variables
|
||||
Set SaltDir=%~dp0
|
||||
Set SaltDir=%SaltDir:~0,-1%
|
||||
Set Python=%SaltDir%\bin\python.exe
|
||||
Set Script=%SaltDir%\bin\Scripts\salt
|
||||
|
||||
:: Launch Script
|
||||
"%Python%" "%Script%" %*
|
||||
|
1
salt/cache/__init__.py
vendored
1
salt/cache/__init__.py
vendored
@ -77,6 +77,7 @@ class Cache(object):
|
||||
self.serial = Serial(opts)
|
||||
self._modules = None
|
||||
self._kwargs = kwargs
|
||||
self._kwargs['cachedir'] = self.cachedir
|
||||
|
||||
def __lazy_init(self):
|
||||
self._modules = salt.loader.cache(self.opts, self.serial)
|
||||
|
@ -103,6 +103,7 @@ try:
|
||||
from azure.mgmt.network.models import (
|
||||
IPAllocationMethod,
|
||||
NetworkInterface,
|
||||
NetworkInterfaceDnsSettings,
|
||||
NetworkInterfaceIPConfiguration,
|
||||
NetworkSecurityGroup,
|
||||
PublicIPAddress,
|
||||
@ -911,6 +912,17 @@ def create_interface(call=None, kwargs=None): # pylint: disable=unused-argument
|
||||
)
|
||||
]
|
||||
|
||||
dns_settings = None
|
||||
if kwargs.get('dns_servers') is not None:
|
||||
if isinstance(kwargs['dns_servers'], list):
|
||||
dns_settings = NetworkInterfaceDnsSettings(
|
||||
dns_servers=kwargs['dns_servers'],
|
||||
applied_dns_servers=kwargs['dns_servers'],
|
||||
internal_dns_name_label=None,
|
||||
internal_fqdn=None,
|
||||
internal_domain_name_suffix=None,
|
||||
)
|
||||
|
||||
network_security_group = None
|
||||
if kwargs.get('security_group') is not None:
|
||||
network_security_group = netconn.network_security_groups.get(
|
||||
@ -922,6 +934,7 @@ def create_interface(call=None, kwargs=None): # pylint: disable=unused-argument
|
||||
location=kwargs['location'],
|
||||
network_security_group=network_security_group,
|
||||
ip_configurations=ip_configurations,
|
||||
dns_settings=dns_settings,
|
||||
)
|
||||
|
||||
poller = netconn.network_interfaces.create_or_update(
|
||||
@ -1289,6 +1302,15 @@ def destroy(name, conn=None, call=None, kwargs=None): # pylint: disable=unused-
|
||||
'-a or --action.'
|
||||
)
|
||||
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'destroying instance',
|
||||
'salt/cloud/{0}/destroying'.format(name),
|
||||
args={'name': name},
|
||||
sock_dir=__opts__['sock_dir'],
|
||||
transport=__opts__['transport']
|
||||
)
|
||||
|
||||
global compconn # pylint: disable=global-statement,invalid-name
|
||||
if not compconn:
|
||||
compconn = get_conn()
|
||||
@ -1382,6 +1404,15 @@ def destroy(name, conn=None, call=None, kwargs=None): # pylint: disable=unused-
|
||||
)
|
||||
)
|
||||
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'destroyed instance',
|
||||
'salt/cloud/{0}/destroyed'.format(name),
|
||||
args={'name': name},
|
||||
sock_dir=__opts__['sock_dir'],
|
||||
transport=__opts__['transport']
|
||||
)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
|
@ -2328,6 +2328,9 @@ def wait_for_instance(
|
||||
use_winrm = config.get_cloud_config_value(
|
||||
'use_winrm', vm_, __opts__, default=False
|
||||
)
|
||||
winrm_verify_ssl = config.get_cloud_config_value(
|
||||
'winrm_verify_ssl', vm_, __opts__, default=True
|
||||
)
|
||||
|
||||
if win_passwd and win_passwd == 'auto':
|
||||
log.debug('Waiting for auto-generated Windows EC2 password')
|
||||
@ -2399,7 +2402,8 @@ def wait_for_instance(
|
||||
winrm_port,
|
||||
username,
|
||||
win_passwd,
|
||||
timeout=ssh_connect_timeout):
|
||||
timeout=ssh_connect_timeout,
|
||||
verify=winrm_verify_ssl):
|
||||
raise SaltCloudSystemExit(
|
||||
'Failed to authenticate against remote windows host'
|
||||
)
|
||||
|
@ -22,6 +22,7 @@ import logging
|
||||
import locale
|
||||
import uuid
|
||||
from errno import EACCES, EPERM
|
||||
import datetime
|
||||
|
||||
__proxyenabled__ = ['*']
|
||||
__FQDN__ = None
|
||||
@ -1790,12 +1791,14 @@ def ip_fqdn():
|
||||
ret[key] = []
|
||||
else:
|
||||
try:
|
||||
start_time = datetime.datetime.utcnow()
|
||||
info = socket.getaddrinfo(_fqdn, None, socket_type)
|
||||
ret[key] = list(set(item[4][0] for item in info))
|
||||
except socket.error:
|
||||
if __opts__['__role'] == 'master':
|
||||
log.warning('Unable to find IPv{0} record for "{1}" causing a 10 second timeout when rendering grains. '
|
||||
'Set the dns or /etc/hosts for IPv{0} to clear this.'.format(ipv_num, _fqdn))
|
||||
timediff = datetime.datetime.utcnow() - start_time
|
||||
if timediff.seconds > 5 and __opts__['__role'] == 'master':
|
||||
log.warning('Unable to find IPv{0} record for "{1}" causing a {2} second timeout when rendering grains. '
|
||||
'Set the dns or /etc/hosts for IPv{0} to clear this.'.format(ipv_num, _fqdn, timediff))
|
||||
ret[key] = []
|
||||
|
||||
return ret
|
||||
|
@ -1370,12 +1370,21 @@ class LazyLoader(salt.utils.lazy.LazyDict):
|
||||
(importlib.machinery.SourcelessFileLoader, importlib.machinery.BYTECODE_SUFFIXES),
|
||||
(importlib.machinery.ExtensionFileLoader, importlib.machinery.EXTENSION_SUFFIXES),
|
||||
]
|
||||
file_finder = importlib.machinery.FileFinder(fpath, *loader_details)
|
||||
file_finder = importlib.machinery.FileFinder(
|
||||
fpath_dirname,
|
||||
*loader_details
|
||||
)
|
||||
spec = file_finder.find_spec(mod_namespace)
|
||||
if spec is None:
|
||||
raise ImportError()
|
||||
mod = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(mod)
|
||||
# TODO: Get rid of load_module in favor of
|
||||
# exec_module below. load_module is deprecated, but
|
||||
# loading using exec_module has been causing odd things
|
||||
# with the magic dunders we pack into the loaded
|
||||
# modules, most notably with salt-ssh's __opts__.
|
||||
mod = spec.loader.load_module()
|
||||
# mod = importlib.util.module_from_spec(spec)
|
||||
# spec.loader.exec_module(mod)
|
||||
# pylint: enable=no-member
|
||||
sys.modules[mod_namespace] = mod
|
||||
else:
|
||||
@ -1392,8 +1401,14 @@ class LazyLoader(salt.utils.lazy.LazyDict):
|
||||
)
|
||||
if spec is None:
|
||||
raise ImportError()
|
||||
mod = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(mod)
|
||||
# TODO: Get rid of load_module in favor of
|
||||
# exec_module below. load_module is deprecated, but
|
||||
# loading using exec_module has been causing odd things
|
||||
# with the magic dunders we pack into the loaded
|
||||
# modules, most notably with salt-ssh's __opts__.
|
||||
mod = spec.loader.load_module()
|
||||
#mod = importlib.util.module_from_spec(spec)
|
||||
#spec.loader.exec_module(mod)
|
||||
# pylint: enable=no-member
|
||||
sys.modules[mod_namespace] = mod
|
||||
else:
|
||||
|
@ -543,10 +543,25 @@ def _run(cmd,
|
||||
try:
|
||||
proc = salt.utils.timed_subprocess.TimedProc(cmd, **kwargs)
|
||||
except (OSError, IOError) as exc:
|
||||
raise CommandExecutionError(
|
||||
msg = (
|
||||
'Unable to run command \'{0}\' with the context \'{1}\', '
|
||||
'reason: {2}'.format(cmd, kwargs, exc)
|
||||
'reason: '.format(
|
||||
cmd if _check_loglevel(output_loglevel) is not None
|
||||
else 'REDACTED',
|
||||
kwargs
|
||||
)
|
||||
)
|
||||
try:
|
||||
if exc.filename is None:
|
||||
msg += 'command not found'
|
||||
else:
|
||||
msg += '{0}: {1}'.format(exc, exc.filename)
|
||||
except AttributeError:
|
||||
# Both IOError and OSError have the filename attribute, so this
|
||||
# is a precaution in case the exception classes in the previous
|
||||
# try/except are changed.
|
||||
msg += 'unknown'
|
||||
raise CommandExecutionError(msg)
|
||||
|
||||
try:
|
||||
proc.run()
|
||||
|
@ -60,17 +60,22 @@ def __virtual__():
|
||||
return (False, 'The debbuild module could not be loaded: unsupported OS family')
|
||||
|
||||
|
||||
def _check_repo_sign_utils_support():
|
||||
util_name = 'debsign'
|
||||
if salt.utils.which(util_name):
|
||||
def _check_repo_sign_utils_support(name):
|
||||
'''
|
||||
Check for specified command name in search path
|
||||
'''
|
||||
if salt.utils.which(name):
|
||||
return True
|
||||
else:
|
||||
raise CommandExecutionError(
|
||||
'utility \'{0}\' needs to be installed'.format(util_name)
|
||||
'utility \'{0}\' needs to be installed or made available in search path'.format(name)
|
||||
)
|
||||
|
||||
|
||||
def _check_repo_gpg_phrase_utils_support():
|
||||
'''
|
||||
Check for /usr/lib/gnupg2/gpg-preset-passphrase is installed
|
||||
'''
|
||||
util_name = '/usr/lib/gnupg2/gpg-preset-passphrase'
|
||||
if __salt__['file.file_exists'](util_name):
|
||||
return True
|
||||
@ -170,8 +175,8 @@ def _get_repo_dists_env(env):
|
||||
'ORIGIN': ('O', 'Origin', 'SaltStack'),
|
||||
'LABEL': ('O', 'Label', 'salt_debian'),
|
||||
'SUITE': ('O', 'Suite', 'stable'),
|
||||
'VERSION': ('O', 'Version', '8.1'),
|
||||
'CODENAME': ('M', 'Codename', 'jessie'),
|
||||
'VERSION': ('O', 'Version', '9.0'),
|
||||
'CODENAME': ('M', 'Codename', 'stretch'),
|
||||
'ARCHS': ('M', 'Architectures', 'i386 amd64 source'),
|
||||
'COMPONENTS': ('M', 'Components', 'main'),
|
||||
'DESCRIPTION': ('O', 'Description', 'SaltStack debian package repo'),
|
||||
@ -205,7 +210,7 @@ def _get_repo_dists_env(env):
|
||||
else:
|
||||
env_dists += '{0}: {1}\n'.format(key, value)
|
||||
|
||||
## ensure mandatories are included
|
||||
# ensure mandatories are included
|
||||
env_keys = list(env.keys())
|
||||
for key in env_keys:
|
||||
if key in dflts_keys and dflts_dict[key][0] == 'M' and key not in env_man_seen:
|
||||
@ -312,7 +317,7 @@ def make_src_pkg(dest_dir, spec, sources, env=None, template=None, saltenv='base
|
||||
for src in sources:
|
||||
_get_src(tree_base, src, saltenv)
|
||||
|
||||
#.dsc then assumes sources already build
|
||||
# .dsc then assumes sources already build
|
||||
if spec_pathfile.endswith('.dsc'):
|
||||
for efile in os.listdir(tree_base):
|
||||
full = os.path.join(tree_base, efile)
|
||||
@ -578,7 +583,8 @@ def make_repo(repodir,
|
||||
with salt.utils.fopen(repoconfopts, 'w') as fow:
|
||||
fow.write('{0}'.format(repocfg_opts))
|
||||
|
||||
local_fingerprint = None
|
||||
local_keygrip_to_use = None
|
||||
local_key_fingerprint = None
|
||||
local_keyid = None
|
||||
phrase = ''
|
||||
|
||||
@ -587,17 +593,14 @@ def make_repo(repodir,
|
||||
gpg_tty_info_file = '{0}/gpg-tty-info-salt'.format(gnupghome)
|
||||
gpg_tty_info_dict = {}
|
||||
|
||||
# test if using older than gnupg 2.1, env file exists
|
||||
# if using older than gnupg 2.1, then env file exists
|
||||
older_gnupg = __salt__['file.file_exists'](gpg_info_file)
|
||||
|
||||
# interval of 0.125 is really too fast on some systems
|
||||
interval = 0.5
|
||||
|
||||
if keyid is not None:
|
||||
with salt.utils.fopen(repoconfdist, 'a') as fow:
|
||||
fow.write('SignWith: {0}\n'.format(keyid))
|
||||
|
||||
## import_keys
|
||||
# import_keys
|
||||
pkg_pub_key_file = '{0}/{1}'.format(gnupghome, __salt__['pillar.get']('gpg_pkg_pub_keyname', None))
|
||||
pkg_priv_key_file = '{0}/{1}'.format(gnupghome, __salt__['pillar.get']('gpg_pkg_priv_keyname', None))
|
||||
|
||||
@ -621,21 +624,37 @@ def make_repo(repodir,
|
||||
local_keys = __salt__['gpg.list_keys'](user=runas, gnupghome=gnupghome)
|
||||
for gpg_key in local_keys:
|
||||
if keyid == gpg_key['keyid'][8:]:
|
||||
local_fingerprint = gpg_key['fingerprint']
|
||||
local_keygrip_to_use = gpg_key['fingerprint']
|
||||
local_key_fingerprint = gpg_key['fingerprint']
|
||||
local_keyid = gpg_key['keyid']
|
||||
break
|
||||
|
||||
if not older_gnupg:
|
||||
_check_repo_sign_utils_support('gpg2')
|
||||
cmd = '{0} --with-keygrip --list-secret-keys'.format(salt.utils.which('gpg2'))
|
||||
local_keys2_keygrip = __salt__['cmd.run'](cmd, runas=runas)
|
||||
local_keys2 = iter(local_keys2_keygrip.splitlines())
|
||||
try:
|
||||
for line in local_keys2:
|
||||
if line.startswith('sec'):
|
||||
line_fingerprint = next(local_keys2).lstrip().rstrip()
|
||||
if local_key_fingerprint == line_fingerprint:
|
||||
lkeygrip = next(local_keys2).split('=')
|
||||
local_keygrip_to_use = lkeygrip[1].lstrip().rstrip()
|
||||
break
|
||||
except StopIteration:
|
||||
raise SaltInvocationError(
|
||||
'unable to find keygrip associated with fingerprint \'{0}\' for keyid \'{1}\''
|
||||
.format(local_key_fingerprint, local_keyid)
|
||||
)
|
||||
|
||||
if local_keyid is None:
|
||||
raise SaltInvocationError(
|
||||
'The key ID \'{0}\' was not found in GnuPG keyring at \'{1}\''
|
||||
.format(keyid, gnupghome)
|
||||
)
|
||||
|
||||
_check_repo_sign_utils_support()
|
||||
|
||||
if use_passphrase:
|
||||
_check_repo_gpg_phrase_utils_support()
|
||||
phrase = __salt__['pillar.get']('gpg_passphrase')
|
||||
_check_repo_sign_utils_support('debsign')
|
||||
|
||||
if older_gnupg:
|
||||
with salt.utils.fopen(gpg_info_file, 'r') as fow:
|
||||
@ -656,10 +675,30 @@ def make_repo(repodir,
|
||||
__salt__['environ.setenv'](gpg_tty_info_dict)
|
||||
break
|
||||
|
||||
## sign_it_here
|
||||
for file in os.listdir(repodir):
|
||||
if file.endswith('.dsc'):
|
||||
abs_file = os.path.join(repodir, file)
|
||||
if use_passphrase:
|
||||
_check_repo_gpg_phrase_utils_support()
|
||||
phrase = __salt__['pillar.get']('gpg_passphrase')
|
||||
cmd = '/usr/lib/gnupg2/gpg-preset-passphrase --verbose --preset --passphrase "{0}" {1}'.format(phrase, local_keygrip_to_use)
|
||||
__salt__['cmd.run'](cmd, runas=runas)
|
||||
|
||||
for debfile in os.listdir(repodir):
|
||||
abs_file = os.path.join(repodir, debfile)
|
||||
if debfile.endswith('.changes'):
|
||||
os.remove(abs_file)
|
||||
|
||||
if debfile.endswith('.dsc'):
|
||||
# sign_it_here
|
||||
if older_gnupg:
|
||||
if local_keyid is not None:
|
||||
cmd = 'debsign --re-sign -k {0} {1}'.format(keyid, abs_file)
|
||||
__salt__['cmd.run'](cmd, cwd=repodir, use_vt=True)
|
||||
|
||||
cmd = 'reprepro --ignore=wrongdistribution --component=main -Vb . includedsc {0} {1}'.format(codename, abs_file)
|
||||
__salt__['cmd.run'](cmd, cwd=repodir, use_vt=True)
|
||||
else:
|
||||
# interval of 0.125 is really too fast on some systems
|
||||
interval = 0.5
|
||||
if local_keyid is not None:
|
||||
number_retries = timeout / interval
|
||||
times_looped = 0
|
||||
error_msg = 'Failed to debsign file {0}'.format(abs_file)
|
||||
@ -702,27 +741,6 @@ def make_repo(repodir,
|
||||
finally:
|
||||
proc.close(terminate=True, kill=True)
|
||||
|
||||
if use_passphrase:
|
||||
cmd = '/usr/lib/gnupg2/gpg-preset-passphrase --verbose --forget {0}'.format(local_fingerprint)
|
||||
__salt__['cmd.run'](cmd, runas=runas)
|
||||
|
||||
cmd = '/usr/lib/gnupg2/gpg-preset-passphrase --verbose --preset --passphrase "{0}" {1}'.format(phrase, local_fingerprint)
|
||||
__salt__['cmd.run'](cmd, runas=runas)
|
||||
|
||||
for debfile in os.listdir(repodir):
|
||||
abs_file = os.path.join(repodir, debfile)
|
||||
if debfile.endswith('.changes'):
|
||||
os.remove(abs_file)
|
||||
|
||||
if debfile.endswith('.dsc'):
|
||||
if older_gnupg:
|
||||
if local_keyid is not None:
|
||||
cmd = 'debsign --re-sign -k {0} {1}'.format(keyid, abs_file)
|
||||
__salt__['cmd.run'](cmd, cwd=repodir, use_vt=True)
|
||||
|
||||
cmd = 'reprepro --ignore=wrongdistribution --component=main -Vb . includedsc {0} {1}'.format(codename, abs_file)
|
||||
__salt__['cmd.run'](cmd, cwd=repodir, use_vt=True)
|
||||
else:
|
||||
number_retries = timeout / interval
|
||||
times_looped = 0
|
||||
error_msg = 'Failed to reprepro includedsc file {0}'.format(abs_file)
|
||||
@ -747,8 +765,7 @@ def make_repo(repodir,
|
||||
|
||||
if times_looped > number_retries:
|
||||
raise SaltInvocationError(
|
||||
'Attemping to reprepro includedsc for file {0} failed, timed out after {1} loops'
|
||||
.format(abs_file, int(times_looped * interval))
|
||||
'Attemping to reprepro includedsc for file {0} failed, timed out after {1} loops'.format(abs_file, times_looped)
|
||||
)
|
||||
time.sleep(interval)
|
||||
|
||||
@ -770,8 +787,4 @@ def make_repo(repodir,
|
||||
cmd = 'reprepro --ignore=wrongdistribution --component=main -Vb . includedeb {0} {1}'.format(codename, abs_file)
|
||||
res = __salt__['cmd.run_all'](cmd, cwd=repodir, use_vt=True)
|
||||
|
||||
if use_passphrase and local_keyid is not None:
|
||||
cmd = '/usr/lib/gnupg2/gpg-preset-passphrase --forget {0}'.format(local_fingerprint)
|
||||
res = __salt__['cmd.run_all'](cmd, runas=runas)
|
||||
|
||||
return res
|
||||
|
@ -1056,7 +1056,7 @@ def verify(text=None,
|
||||
|
||||
signature
|
||||
Specify the filename of a detached signature.
|
||||
.. versionadded:: Nitrogen
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
CLI Example:
|
||||
|
||||
|
@ -3,10 +3,13 @@
|
||||
Manage Linux kernel packages on APT-based systems
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
import functools
|
||||
import logging
|
||||
import re
|
||||
|
||||
# Import 3rd-party libs
|
||||
# Import Salt libs
|
||||
import salt.ext.six as six
|
||||
|
||||
try:
|
||||
from salt.utils.versions import LooseVersion as _LooseVersion
|
||||
from salt.ext.six.moves import filter # pylint: disable=import-error,redefined-builtin
|
||||
@ -73,7 +76,11 @@ def list_installed():
|
||||
return []
|
||||
|
||||
prefix_len = len(_package_prefix()) + 1
|
||||
return sorted([pkg[prefix_len:] for pkg in result], cmp=_cmp_version)
|
||||
|
||||
if six.PY2:
|
||||
return sorted([pkg[prefix_len:] for pkg in result], cmp=_cmp_version)
|
||||
else:
|
||||
return sorted([pkg[prefix_len:] for pkg in result], key=functools.cmp_to_key(_cmp_version))
|
||||
|
||||
|
||||
def latest_available():
|
||||
|
@ -3,9 +3,12 @@
|
||||
Manage Linux kernel packages on YUM-based systems
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
import functools
|
||||
import logging
|
||||
|
||||
# Import 3rd-party libs
|
||||
# Import Salt libs
|
||||
import salt.ext.six as six
|
||||
|
||||
try:
|
||||
from salt.utils.versions import LooseVersion as _LooseVersion
|
||||
HAS_REQUIRED_LIBS = True
|
||||
@ -65,7 +68,10 @@ def list_installed():
|
||||
if result is None:
|
||||
return []
|
||||
|
||||
return sorted(result, cmp=_cmp_version)
|
||||
if six.PY2:
|
||||
return sorted(result, cmp=_cmp_version)
|
||||
else:
|
||||
return sorted(result, key=functools.cmp_to_key(_cmp_version))
|
||||
|
||||
|
||||
def latest_available():
|
||||
|
@ -856,7 +856,7 @@ def _parse_network_settings(opts, current):
|
||||
_raise_error_network('hostname', ['server1.example.com'])
|
||||
|
||||
if 'nozeroconf' in opts:
|
||||
nozeroconf = salt.utils.dequote(opts['nozerconf'])
|
||||
nozeroconf = salt.utils.dequote(opts['nozeroconf'])
|
||||
if nozeroconf in valid:
|
||||
if nozeroconf in _CONFIG_TRUE:
|
||||
result['nozeroconf'] = true_val
|
||||
|
@ -128,15 +128,6 @@ def list_(show_all=False,
|
||||
continue
|
||||
|
||||
if '_seconds' in schedule[job]:
|
||||
# if _seconds is greater than zero
|
||||
# then include the original back in seconds.
|
||||
# otherwise remove seconds from the listing as the
|
||||
# original item didn't include it.
|
||||
if schedule[job]['_seconds'] > 0:
|
||||
schedule[job]['seconds'] = schedule[job]['_seconds']
|
||||
elif 'seconds' in schedule[job]:
|
||||
del schedule[job]['seconds']
|
||||
|
||||
# remove _seconds from the listing
|
||||
del schedule[job]['_seconds']
|
||||
|
||||
|
@ -300,7 +300,7 @@ def install_semod(module_path):
|
||||
|
||||
salt '*' selinux.install_semod [salt://]path/to/module.pp
|
||||
|
||||
.. versionadded:: develop
|
||||
.. versionadded:: 2016.11.6
|
||||
'''
|
||||
if module_path.find('salt://') == 0:
|
||||
module_path = __salt__['cp.cache_file'](module_path)
|
||||
@ -318,7 +318,7 @@ def remove_semod(module):
|
||||
|
||||
salt '*' selinux.remove_semod module_name
|
||||
|
||||
.. versionadded:: develop
|
||||
.. versionadded:: 2016.11.6
|
||||
'''
|
||||
cmd = 'semodule -r {0}'.format(module)
|
||||
return not __salt__['cmd.retcode'](cmd)
|
||||
|
@ -258,12 +258,22 @@ def _get_opts(**kwargs):
|
||||
Return a copy of the opts for use, optionally load a local config on top
|
||||
'''
|
||||
opts = copy.deepcopy(__opts__)
|
||||
|
||||
if 'localconfig' in kwargs:
|
||||
opts = salt.config.minion_config(kwargs['localconfig'], defaults=opts)
|
||||
else:
|
||||
if 'saltenv' in kwargs:
|
||||
return salt.config.minion_config(kwargs['localconfig'], defaults=opts)
|
||||
|
||||
if 'saltenv' in kwargs:
|
||||
saltenv = kwargs['saltenv']
|
||||
if not isinstance(saltenv, six.string_types):
|
||||
opts['environment'] = str(kwargs['saltenv'])
|
||||
else:
|
||||
opts['environment'] = kwargs['saltenv']
|
||||
if 'pillarenv' in kwargs:
|
||||
|
||||
if 'pillarenv' in kwargs:
|
||||
pillarenv = kwargs['pillarenv']
|
||||
if not isinstance(pillarenv, six.string_types):
|
||||
opts['pillarenv'] = str(kwargs['pillarenv'])
|
||||
else:
|
||||
opts['pillarenv'] = kwargs['pillarenv']
|
||||
|
||||
return opts
|
||||
|
@ -811,13 +811,27 @@ def modify(name,
|
||||
return changes
|
||||
|
||||
|
||||
def enable(name, **kwargs):
|
||||
def enable(name, start_type='auto', start_delayed=False, **kwargs):
|
||||
'''
|
||||
Enable the named service to start at boot
|
||||
|
||||
Args:
|
||||
name (str): The name of the service to enable.
|
||||
|
||||
start_type (str): Specifies the service start type. Valid options are as
|
||||
follows:
|
||||
|
||||
- boot: Device driver that is loaded by the boot loader
|
||||
- system: Device driver that is started during kernel initialization
|
||||
- auto: Service that automatically starts
|
||||
- manual: Service must be started manually
|
||||
- disabled: Service cannot be started
|
||||
|
||||
start_delayed (bool): Set the service to Auto(Delayed Start). Only valid
|
||||
if the start_type is set to ``Auto``. If service_type is not passed,
|
||||
but the service is already set to ``Auto``, then the flag will be
|
||||
set.
|
||||
|
||||
Returns:
|
||||
bool: ``True`` if successful, ``False`` otherwise
|
||||
|
||||
@ -827,8 +841,13 @@ def enable(name, **kwargs):
|
||||
|
||||
salt '*' service.enable <service name>
|
||||
'''
|
||||
modify(name, start_type='Auto')
|
||||
return info(name)['StartType'] == 'Auto'
|
||||
|
||||
modify(name, start_type=start_type, start_delayed=start_delayed)
|
||||
svcstat = info(name)
|
||||
if start_type.lower() == 'auto':
|
||||
return svcstat['StartType'].lower() == start_type.lower() and svcstat['StartTypeDelayed'] == start_delayed
|
||||
else:
|
||||
return svcstat['StartType'].lower() == start_type.lower()
|
||||
|
||||
|
||||
def disable(name, **kwargs):
|
||||
|
@ -28,7 +28,6 @@ from salt.template import compile_template
|
||||
from salt.utils.dictupdate import merge
|
||||
from salt.utils.odict import OrderedDict
|
||||
from salt.version import __version__
|
||||
from salt.utils.locales import decode_recursively
|
||||
|
||||
# Import 3rd-party libs
|
||||
import salt.ext.six as six
|
||||
@ -169,7 +168,7 @@ class RemotePillar(object):
|
||||
'{1}'.format(type(ret_pillar).__name__, ret_pillar)
|
||||
)
|
||||
return {}
|
||||
return decode_recursively(ret_pillar)
|
||||
return ret_pillar
|
||||
|
||||
|
||||
class PillarCache(object):
|
||||
|
@ -677,8 +677,17 @@ class State(object):
|
||||
except AttributeError:
|
||||
pillar_enc = str(pillar_enc).lower()
|
||||
self._pillar_enc = pillar_enc
|
||||
self.opts['pillar'] = initial_pillar if initial_pillar is not None \
|
||||
else self._gather_pillar()
|
||||
if initial_pillar is not None:
|
||||
self.opts['pillar'] = initial_pillar
|
||||
if self._pillar_override:
|
||||
self.opts['pillar'] = salt.utils.dictupdate.merge(
|
||||
self.opts['pillar'],
|
||||
self._pillar_override,
|
||||
self.opts.get('pillar_source_merging_strategy', 'smart'),
|
||||
self.opts.get('renderer', 'yaml'),
|
||||
self.opts.get('pillar_merge_lists', False))
|
||||
else:
|
||||
self.opts['pillar'] = self._gather_pillar()
|
||||
self.state_con = context or {}
|
||||
self.load_modules()
|
||||
self.active = set()
|
||||
|
@ -262,7 +262,7 @@ def module_install(name):
|
||||
name
|
||||
Path to file with module to install
|
||||
|
||||
.. versionadded:: develop
|
||||
.. versionadded:: 2016.11.6
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'result': True,
|
||||
@ -283,7 +283,7 @@ def module_remove(name):
|
||||
name
|
||||
The name of the module to remove
|
||||
|
||||
.. versionadded:: develop
|
||||
.. versionadded:: 2016.11.6
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'result': True,
|
||||
|
@ -3497,3 +3497,21 @@ def dequote(val):
|
||||
if is_quoted(val):
|
||||
return val[1:-1]
|
||||
return val
|
||||
|
||||
|
||||
def mkstemp(*args, **kwargs):
|
||||
'''
|
||||
Helper function which does exactly what `tempfile.mkstemp()` does but
|
||||
accepts another argument, `close_fd`, which, by default, is true and closes
|
||||
the fd before returning the file path. Something commonly done throughout
|
||||
Salt's code.
|
||||
'''
|
||||
if 'prefix' not in kwargs:
|
||||
kwargs['prefix'] = '__salt.tmp.'
|
||||
close_fd = kwargs.pop('close_fd', True)
|
||||
fd_, fpath = tempfile.mkstemp(*args, **kwargs)
|
||||
if close_fd is False:
|
||||
return (fd_, fpath)
|
||||
os.close(fd_)
|
||||
del fd_
|
||||
return fpath
|
||||
|
@ -500,7 +500,10 @@ def bootstrap(vm_, opts):
|
||||
'winrm_port', vm_, opts, default=5986
|
||||
)
|
||||
deploy_kwargs['winrm_use_ssl'] = salt.config.get_cloud_config_value(
|
||||
'winrm_use_ssl', vm_, opts, default=True
|
||||
'winrm_use_ssl', vm_, opts, default=True
|
||||
)
|
||||
deploy_kwargs['winrm_verify_ssl'] = salt.config.get_cloud_config_value(
|
||||
'winrm_verify_ssl', vm_, opts, default=True
|
||||
)
|
||||
|
||||
# Store what was used to the deploy the VM
|
||||
@ -826,7 +829,7 @@ def wait_for_winexesvc(host, port, username, password, timeout=900):
|
||||
)
|
||||
|
||||
|
||||
def wait_for_winrm(host, port, username, password, timeout=900, use_ssl=True):
|
||||
def wait_for_winrm(host, port, username, password, timeout=900, use_ssl=True, verify=True):
|
||||
'''
|
||||
Wait until WinRM connection can be established.
|
||||
'''
|
||||
@ -836,14 +839,20 @@ def wait_for_winrm(host, port, username, password, timeout=900, use_ssl=True):
|
||||
host, port
|
||||
)
|
||||
)
|
||||
transport = 'ssl'
|
||||
if not use_ssl:
|
||||
transport = 'plaintext'
|
||||
trycount = 0
|
||||
while True:
|
||||
trycount += 1
|
||||
try:
|
||||
transport = 'ssl'
|
||||
if not use_ssl:
|
||||
transport = 'plaintext'
|
||||
s = winrm.Session(host, auth=(username, password), transport=transport)
|
||||
winrm_kwargs = {'target': host,
|
||||
'auth': (username, password),
|
||||
'transport': transport}
|
||||
if not verify:
|
||||
log.debug("SSL validation for WinRM disabled.")
|
||||
winrm_kwargs['server_cert_validation'] = 'ignore'
|
||||
s = winrm.Session(**winrm_kwargs)
|
||||
if hasattr(s.protocol, 'set_timeout'):
|
||||
s.protocol.set_timeout(15)
|
||||
log.trace('WinRM endpoint url: {0}'.format(s.url))
|
||||
@ -991,6 +1000,7 @@ def deploy_windows(host,
|
||||
use_winrm=False,
|
||||
winrm_port=5986,
|
||||
winrm_use_ssl=True,
|
||||
winrm_verify_ssl=True,
|
||||
**kwargs):
|
||||
'''
|
||||
Copy the install files to a remote Windows box, and execute them
|
||||
@ -1017,7 +1027,8 @@ def deploy_windows(host,
|
||||
if HAS_WINRM and use_winrm:
|
||||
winrm_session = wait_for_winrm(host=host, port=winrm_port,
|
||||
username=username, password=password,
|
||||
timeout=port_timeout * 60, use_ssl=winrm_use_ssl)
|
||||
timeout=port_timeout * 60, use_ssl=winrm_use_ssl,
|
||||
verify=winrm_verify_ssl)
|
||||
if winrm_session is not None:
|
||||
service_available = True
|
||||
else:
|
||||
|
@ -9,7 +9,6 @@ import logging
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
import time
|
||||
|
||||
# Import salt libs
|
||||
@ -22,9 +21,9 @@ from salt.ext import six
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
TEMPFILE_PREFIX = '__salt.tmp.'
|
||||
REMOTE_PROTOS = ('http', 'https', 'ftp', 'swift', 's3')
|
||||
VALID_PROTOS = ('salt', 'file') + REMOTE_PROTOS
|
||||
TEMPFILE_PREFIX = '__salt.tmp.'
|
||||
|
||||
|
||||
def guess_archive_type(name):
|
||||
@ -44,20 +43,10 @@ def guess_archive_type(name):
|
||||
|
||||
def mkstemp(*args, **kwargs):
|
||||
'''
|
||||
Helper function which does exactly what `tempfile.mkstemp()` does but
|
||||
accepts another argument, `close_fd`, which, by default, is true and closes
|
||||
the fd before returning the file path. Something commonly done throughout
|
||||
Salt's code.
|
||||
Should eventually reside here, but for now point back at old location in
|
||||
salt.utils
|
||||
'''
|
||||
if 'prefix' not in kwargs:
|
||||
kwargs['prefix'] = TEMPFILE_PREFIX
|
||||
close_fd = kwargs.pop('close_fd', True)
|
||||
fd_, fpath = tempfile.mkstemp(*args, **kwargs)
|
||||
if close_fd is False:
|
||||
return (fd_, fpath)
|
||||
os.close(fd_)
|
||||
del fd_
|
||||
return fpath
|
||||
return salt.utils.mkstemp(*args, **kwargs)
|
||||
|
||||
|
||||
def recursive_copy(source, dest):
|
||||
|
@ -115,17 +115,3 @@ def normalize_locale(loc):
|
||||
comps['codeset'] = comps['codeset'].lower().replace('-', '')
|
||||
comps['charmap'] = ''
|
||||
return join_locale(comps)
|
||||
|
||||
|
||||
def decode_recursively(object_):
|
||||
if isinstance(object_, list):
|
||||
return [decode_recursively(o) for o in object_]
|
||||
if isinstance(object_, tuple):
|
||||
return tuple([decode_recursively(o) for o in object_])
|
||||
if isinstance(object_, dict):
|
||||
return dict([(decode_recursively(key), decode_recursively(value))
|
||||
for key, value in salt.ext.six.iteritems(object_)])
|
||||
elif isinstance(object_, six.string_types):
|
||||
return sdecode(object_)
|
||||
else:
|
||||
return object_
|
||||
|
@ -53,7 +53,10 @@ def get_invalid_docs():
|
||||
'cp.recv',
|
||||
'glance.warn_until',
|
||||
'ipset.long_range',
|
||||
'libcloud_compute.get_driver',
|
||||
'libcloud_dns.get_driver',
|
||||
'libcloud_loadbalancer.get_driver',
|
||||
'libcloud_storage.get_driver',
|
||||
'log.critical',
|
||||
'log.debug',
|
||||
'log.error',
|
||||
|
@ -0,0 +1,2 @@
|
||||
ping -c 2 {{ pillar['myhost'] }}:
|
||||
cmd.run
|
@ -5,6 +5,7 @@ Test the grains module
|
||||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
|
||||
@ -13,6 +14,8 @@ from tests.support.case import ModuleCase
|
||||
from tests.support.unit import skipIf
|
||||
from tests.support.helpers import destructiveTest
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TestModulesGrains(ModuleCase):
|
||||
'''
|
||||
@ -110,11 +113,12 @@ class TestModulesGrains(ModuleCase):
|
||||
'''
|
||||
test to ensure some core grains are returned
|
||||
'''
|
||||
grains = ['os', 'os_family', 'osmajorrelease', 'osrelease', 'osfullname', 'id']
|
||||
grains = ('os', 'os_family', 'osmajorrelease', 'osrelease', 'osfullname', 'id')
|
||||
os = self.run_function('grains.get', ['os'])
|
||||
|
||||
for grain in grains:
|
||||
get_grain = self.run_function('grains.get', [grain])
|
||||
log.debug('Value of \'%s\' grain: \'%s\'', grain, get_grain)
|
||||
if os == 'Arch' and grain in ['osmajorrelease']:
|
||||
self.assertEqual(get_grain, '')
|
||||
continue
|
||||
|
@ -426,6 +426,21 @@ class CallTest(ShellCase, testprogram.TestProgramCase, ShellCaseCommonTestsMixin
|
||||
# Restore umask
|
||||
os.umask(current_umask)
|
||||
|
||||
@skipIf(sys.platform.startswith('win'), 'This test does not apply on Win')
|
||||
def test_42116_cli_pillar_override(self):
|
||||
ret = self.run_call(
|
||||
'state.apply issue-42116-cli-pillar-override '
|
||||
'pillar=\'{"myhost": "localhost"}\''
|
||||
)
|
||||
for line in ret:
|
||||
line = line.lstrip()
|
||||
if line == 'Comment: Command "ping -c 2 localhost" run':
|
||||
# Successful test
|
||||
break
|
||||
else:
|
||||
log.debug('salt-call output:\n\n%s', '\n'.join(ret))
|
||||
self.fail('CLI pillar override not found in pillar data')
|
||||
|
||||
def tearDown(self):
|
||||
'''
|
||||
Teardown method to remove installed packages
|
||||
|
@ -306,6 +306,16 @@ class TestCase(_TestCase):
|
||||
)
|
||||
# return _TestCase.failIfAlmostEqual(self, *args, **kwargs)
|
||||
|
||||
@staticmethod
|
||||
def assert_called_once(mock):
|
||||
'''
|
||||
mock.assert_called_once only exists in PY3 in 3.6 and newer
|
||||
'''
|
||||
try:
|
||||
mock.assert_called_once()
|
||||
except AttributeError:
|
||||
log.warning('assert_called_once invoked, but not available')
|
||||
|
||||
if six.PY2:
|
||||
def assertRegexpMatches(self, *args, **kwds):
|
||||
raise DeprecationWarning(
|
||||
|
@ -105,7 +105,7 @@ class GCETestCase(TestCase, LoaderModuleMockMixin):
|
||||
get_deps = gce.get_dependencies()
|
||||
self.assertEqual(get_deps, True)
|
||||
if LooseVersion(mock_version) >= LooseVersion('2.0.0'):
|
||||
p.assert_called_once()
|
||||
self.assert_called_once(p)
|
||||
|
||||
def test_provider_matches(self):
|
||||
"""
|
||||
|
@ -127,7 +127,7 @@ class DiskTestCase(TestCase, LoaderModuleMockMixin):
|
||||
kwargs = {'read-ahead': 512, 'filesystem-read-ahead': 1024}
|
||||
disk.tune('/dev/sda', **kwargs)
|
||||
|
||||
mock.assert_called_once()
|
||||
self.assert_called_once(mock)
|
||||
|
||||
args, kwargs = mock.call_args
|
||||
|
||||
|
@ -161,7 +161,10 @@ class HaproxyConnTestCase(TestCase, LoaderModuleMockMixin):
|
||||
'''
|
||||
Test listing all frontends
|
||||
'''
|
||||
self.assertItemsEqual(haproxyconn.list_frontends(), ['frontend-alpha', 'frontend-beta', 'frontend-gamma'])
|
||||
self.assertEqual(
|
||||
sorted(haproxyconn.list_frontends()),
|
||||
sorted(['frontend-alpha', 'frontend-beta', 'frontend-gamma'])
|
||||
)
|
||||
|
||||
# 'show_backends' function tests: 1
|
||||
|
||||
@ -175,7 +178,10 @@ class HaproxyConnTestCase(TestCase, LoaderModuleMockMixin):
|
||||
'''
|
||||
Test listing of all backends
|
||||
'''
|
||||
self.assertItemsEqual(haproxyconn.list_backends(), ['backend-alpha', 'backend-beta', 'backend-gamma'])
|
||||
self.assertEqual(
|
||||
sorted(haproxyconn.list_backends()),
|
||||
sorted(['backend-alpha', 'backend-beta', 'backend-gamma'])
|
||||
)
|
||||
|
||||
def test_get_backend(self):
|
||||
'''
|
||||
|
@ -132,7 +132,7 @@ class KernelPkgTestCase(object):
|
||||
self.assertEqual(result['latest_installed'], self.KERNEL_LIST[-1])
|
||||
self.assertEqual(result['reboot_requested'], True)
|
||||
self.assertEqual(result['reboot_required'], True)
|
||||
self._kernelpkg.__salt__['system.reboot'].assert_called_once()
|
||||
self.assert_called_once(self._kernelpkg.__salt__['system.reboot'])
|
||||
|
||||
def test_upgrade_needed_without_reboot(self):
|
||||
'''
|
||||
|
@ -38,9 +38,8 @@ class MdadmTestCase(TestCase, LoaderModuleMockMixin):
|
||||
)
|
||||
self.assertEqual('salt', ret)
|
||||
|
||||
# Only available in 3.6 and above on py3
|
||||
if hasattr(mock, 'assert_called_once'):
|
||||
mock.assert_called_once()
|
||||
self.assert_called_once(mock)
|
||||
|
||||
args, kwargs = mock.call_args
|
||||
# expected cmd is
|
||||
# mdadm -C /dev/md0 -R -v --chunk 256 --force -l 5 -e default -n 3 /dev/sdb1 /dev/sdc1 /dev/sdd1
|
||||
|
@ -95,7 +95,8 @@ class NetworkTestCase(TestCase, LoaderModuleMockMixin):
|
||||
'''
|
||||
with patch.dict(network.__grains__, {'kernel': 'Linux'}):
|
||||
with patch.object(network, '_netstat_linux', return_value='A'):
|
||||
self.assertEqual(network.netstat(), 'A')
|
||||
with patch.object(network, '_ss_linux', return_value='A'):
|
||||
self.assertEqual(network.netstat(), 'A')
|
||||
|
||||
with patch.dict(network.__grains__, {'kernel': 'OpenBSD'}):
|
||||
with patch.object(network, '_netstat_bsd', return_value='A'):
|
||||
@ -340,10 +341,12 @@ class NetworkTestCase(TestCase, LoaderModuleMockMixin):
|
||||
with patch.dict(network.__grains__, {'kernel': 'Linux'}):
|
||||
with patch.object(network, '_netstat_route_linux',
|
||||
side_effect=['A', [{'addr_family': 'inet'}]]):
|
||||
self.assertEqual(network.routes(None), 'A')
|
||||
with patch.object(network, '_ip_route_linux',
|
||||
side_effect=['A', [{'addr_family': 'inet'}]]):
|
||||
self.assertEqual(network.routes(None), 'A')
|
||||
|
||||
self.assertListEqual(network.routes('inet'),
|
||||
[{'addr_family': 'inet'}])
|
||||
self.assertListEqual(network.routes('inet'),
|
||||
[{'addr_family': 'inet'}])
|
||||
|
||||
def test_default_route(self):
|
||||
'''
|
||||
|
@ -91,8 +91,13 @@ class SSHAuthKeyTestCase(TestCase, LoaderModuleMockMixin):
|
||||
email = 'github.com'
|
||||
empty_line = '\n'
|
||||
comment_line = '# this is a comment \n'
|
||||
|
||||
# Write out the authorized key to a temporary file
|
||||
temp_file = tempfile.NamedTemporaryFile(delete=False)
|
||||
if salt.utils.is_windows():
|
||||
temp_file = tempfile.NamedTemporaryFile(delete=False)
|
||||
else:
|
||||
temp_file = tempfile.NamedTemporaryFile(delete=False, mode='w+')
|
||||
|
||||
# Add comment
|
||||
temp_file.write(comment_line)
|
||||
# Add empty line for #41335
|
||||
|
@ -214,7 +214,8 @@ class WinServiceTestCase(TestCase, LoaderModuleMockMixin):
|
||||
Test to enable the named service to start at boot
|
||||
'''
|
||||
mock_modify = MagicMock(return_value=True)
|
||||
mock_info = MagicMock(return_value={'StartType': 'Auto'})
|
||||
mock_info = MagicMock(return_value={'StartType': 'Auto',
|
||||
'StartTypeDelayed': False})
|
||||
with patch.object(win_service, 'modify', mock_modify):
|
||||
with patch.object(win_service, 'info', mock_info):
|
||||
self.assertTrue(win_service.enable('spongebob'))
|
||||
|
@ -70,7 +70,7 @@ class KernelPkgTestCase(TestCase, LoaderModuleMockMixin):
|
||||
self.assertTrue(ret['result'])
|
||||
self.assertIsInstance(ret['changes'], dict)
|
||||
self.assertIsInstance(ret['comment'], str)
|
||||
kernelpkg.__salt__['kernelpkg.upgrade'].assert_called_once()
|
||||
self.assert_called_once(kernelpkg.__salt__['kernelpkg.upgrade'])
|
||||
|
||||
with patch.dict(kernelpkg.__opts__, {'test': True}):
|
||||
kernelpkg.__salt__['kernelpkg.upgrade'].reset_mock()
|
||||
@ -118,7 +118,7 @@ class KernelPkgTestCase(TestCase, LoaderModuleMockMixin):
|
||||
self.assertTrue(ret['result'])
|
||||
self.assertIsInstance(ret['changes'], dict)
|
||||
self.assertIsInstance(ret['comment'], str)
|
||||
kernelpkg.__salt__['system.reboot'].assert_called_once()
|
||||
self.assert_called_once(kernelpkg.__salt__['system.reboot'])
|
||||
|
||||
with patch.dict(kernelpkg.__opts__, {'test': True}):
|
||||
kernelpkg.__salt__['system.reboot'].reset_mock()
|
||||
|
@ -146,7 +146,17 @@ class SaltmodTestCase(TestCase, LoaderModuleMockMixin):
|
||||
del ret['__jid__']
|
||||
with patch.dict(saltmod.__opts__, {'test': False}):
|
||||
with patch.dict(saltmod.__salt__, {'saltutil.cmd': MagicMock(return_value=test_batch_return)}):
|
||||
self.assertDictEqual(saltmod.state(name, tgt, highstate=True), ret)
|
||||
state_run = saltmod.state(name, tgt, highstate=True)
|
||||
|
||||
# Test return without checking the comment contents. Comments are tested later.
|
||||
comment = state_run.pop('comment')
|
||||
ret.pop('comment')
|
||||
self.assertDictEqual(state_run, ret)
|
||||
|
||||
# Check the comment contents in a non-order specific way (ordering fails sometimes on PY3)
|
||||
self.assertIn('States ran successfully. No changes made to', comment)
|
||||
for minion in ['minion1', 'minion2', 'minion3']:
|
||||
self.assertIn(minion, comment)
|
||||
|
||||
# 'function' function tests: 1
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user