Merge branch '2017.7' into configparser-defaultsect

This commit is contained in:
Nicole Thomas 2018-05-15 10:50:09 -04:00 committed by GitHub
commit a8f2ad977e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
126 changed files with 6158 additions and 1925 deletions

32
.github/CODEOWNERS vendored
View File

@ -9,43 +9,45 @@
# See https://help.github.com/articles/about-codeowners/
# for more info about the CODEOWNERS file
# This file uses an fnmatch-style matching pattern.
# Team Boto
salt/**/*boto* @saltstack/team-boto
# Team Core
salt/auth/ @saltstack/team-core
salt/cache/ @saltstack/team-core
salt/cli/ @saltstack/team-core
salt/auth/* @saltstack/team-core
salt/cache/* @saltstack/team-core
salt/cli/* @saltstack/team-core
salt/client/* @saltstack/team-core
salt/config/* @saltstack/team-core
salt/daemons/ @saltstack/team-core
salt/pillar/ @saltstack/team-core
salt/daemons/* @saltstack/team-core
salt/pillar/* @saltstack/team-core
salt/loader.py @saltstack/team-core
salt/payload.py @saltstack/team-core
salt/**/master* @saltstack/team-core
salt/**/minion* @saltstack/team-core
# Team Cloud
salt/cloud/ @saltstack/team-cloud
salt/utils/openstack/ @saltstack/team-cloud
salt/cloud/* @saltstack/team-cloud
salt/utils/openstack/* @saltstack/team-cloud
salt/utils/aws.py @saltstack/team-cloud
salt/**/*cloud* @saltstack/team-cloud
# Team NetAPI
salt/cli/api.py @saltstack/team-netapi
salt/client/netapi.py @saltstack/team-netapi
salt/netapi/ @saltstack/team-netapi
salt/netapi/* @saltstack/team-netapi
# Team Network
salt/proxy/ @saltstack/team-proxy
salt/proxy/* @saltstack/team-proxy
# Team SPM
salt/cli/spm.py @saltstack/team-spm
salt/spm/ @saltstack/team-spm
salt/spm/* @saltstack/team-spm
# Team SSH
salt/cli/ssh.py @saltstack/team-ssh
salt/client/ssh/ @saltstack/team-ssh
salt/client/ssh/* @saltstack/team-ssh
salt/runners/ssh.py @saltstack/team-ssh
salt/**/thin.py @saltstack/team-ssh
@ -61,8 +63,12 @@ salt/**/*xfs* @saltstack/team-suse
salt/**/*zypper* @saltstack/team-suse
# Team Transport
salt/transport/ @saltstack/team-transport
salt/transport/* @saltstack/team-transport
salt/utils/zeromq.py @saltstack/team-transport
# Team Windows
salt/**/*win* @saltstack/team-windows
salt/*/*win* @saltstack/team-windows
salt/modules/reg.py @saltstack/team-windows
salt/states/reg.py @saltstack/team-windows
tests/*/*win* @saltstack/team-windows
tests/*/test_reg.py @saltstack/team-windows

View File

@ -1,5 +1,5 @@
---
<% vagrant = system('which vagrant 2>/dev/null >/dev/null') %>
<% vagrant = system('gem list -i kitchen-vagrant 2>/dev/null >/dev/null') %>
<% version = '2017.7.4' %>
<% platformsfile = ENV['SALT_KITCHEN_PLATFORMS'] || '.kitchen/platforms.yml' %>
<% driverfile = ENV['SALT_KITCHEN_DRIVER'] || '.kitchen/driver.yml' %>
@ -92,12 +92,9 @@ platforms:
- yum install -y upstart
provisioner:
salt_bootstrap_options: -P -p rsync -y -x python2.7 -X git v<%= version %> >/dev/null
- name: ubuntu-rolling
- name: ubuntu-18.04
driver_config:
image: ubuntu:rolling
run_command: /lib/systemd/systemd
provisioner:
salt_bootstrap_url: https://raw.githubusercontent.com/saltstack/salt-bootstrap/develop/bootstrap-salt.sh
- name: ubuntu-16.04
driver_config:
run_command: /lib/systemd/systemd

View File

@ -38,7 +38,10 @@ from __future__ import division
import re
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.util.compat import Directive
try:
from sphinx.util.compat import Directive
except ImportError:
from docutils.parsers.rst import Directive
CONTROL_HEIGHT = 30

View File

@ -323,6 +323,7 @@ rst_prolog = """\
.. _`salt-users`: https://groups.google.com/forum/#!forum/salt-users
.. _`salt-announce`: https://groups.google.com/forum/#!forum/salt-announce
.. _`salt-packagers`: https://groups.google.com/forum/#!forum/salt-packagers
.. _`salt-slack`: https://saltstackcommunity.herokuapp.com/
.. |windownload| raw:: html
<p>Python2 x86: <a

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-API" "1" "Mar 13, 2018" "2017.7.5" "Salt"
.TH "SALT-API" "1" "May 07, 2018" "2017.7.6" "Salt"
.SH NAME
salt-api \- salt-api Command
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-CALL" "1" "Mar 13, 2018" "2017.7.5" "Salt"
.TH "SALT-CALL" "1" "May 07, 2018" "2017.7.6" "Salt"
.SH NAME
salt-call \- salt-call Documentation
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-CLOUD" "1" "Mar 13, 2018" "2017.7.5" "Salt"
.TH "SALT-CLOUD" "1" "May 07, 2018" "2017.7.6" "Salt"
.SH NAME
salt-cloud \- Salt Cloud Command
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-CP" "1" "Mar 13, 2018" "2017.7.5" "Salt"
.TH "SALT-CP" "1" "May 07, 2018" "2017.7.6" "Salt"
.SH NAME
salt-cp \- salt-cp Documentation
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-KEY" "1" "Mar 13, 2018" "2017.7.5" "Salt"
.TH "SALT-KEY" "1" "May 07, 2018" "2017.7.6" "Salt"
.SH NAME
salt-key \- salt-key Documentation
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-MASTER" "1" "Mar 13, 2018" "2017.7.5" "Salt"
.TH "SALT-MASTER" "1" "May 07, 2018" "2017.7.6" "Salt"
.SH NAME
salt-master \- salt-master Documentation
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-MINION" "1" "Mar 13, 2018" "2017.7.5" "Salt"
.TH "SALT-MINION" "1" "May 07, 2018" "2017.7.6" "Salt"
.SH NAME
salt-minion \- salt-minion Documentation
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-PROXY" "1" "Mar 13, 2018" "2017.7.5" "Salt"
.TH "SALT-PROXY" "1" "May 07, 2018" "2017.7.6" "Salt"
.SH NAME
salt-proxy \- salt-proxy Documentation
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-RUN" "1" "Mar 13, 2018" "2017.7.5" "Salt"
.TH "SALT-RUN" "1" "May 07, 2018" "2017.7.6" "Salt"
.SH NAME
salt-run \- salt-run Documentation
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-SSH" "1" "Mar 13, 2018" "2017.7.5" "Salt"
.TH "SALT-SSH" "1" "May 07, 2018" "2017.7.6" "Salt"
.SH NAME
salt-ssh \- salt-ssh Documentation
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-SYNDIC" "1" "Mar 13, 2018" "2017.7.5" "Salt"
.TH "SALT-SYNDIC" "1" "May 07, 2018" "2017.7.6" "Salt"
.SH NAME
salt-syndic \- salt-syndic Documentation
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-UNITY" "1" "Mar 13, 2018" "2017.7.5" "Salt"
.TH "SALT-UNITY" "1" "May 07, 2018" "2017.7.6" "Salt"
.SH NAME
salt-unity \- salt-unity Command
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT" "1" "Mar 13, 2018" "2017.7.5" "Salt"
.TH "SALT" "1" "May 07, 2018" "2017.7.6" "Salt"
.SH NAME
salt \- salt
.

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SPM" "1" "Mar 13, 2018" "2017.7.5" "Salt"
.TH "SPM" "1" "May 07, 2018" "2017.7.6" "Salt"
.SH NAME
spm \- Salt Package Manager Command
.

View File

@ -186,7 +186,7 @@ execution modules
iwtools
jboss7
jboss7_cli
jenkins
jenkinsmod
junos
k8s
kapacitor

View File

@ -1,5 +0,0 @@
salt.modules.jenkins module
===========================
.. automodule:: salt.modules.jenkins
:members:

View File

@ -0,0 +1,5 @@
salt.modules.jenkinsmod module
==============================
.. automodule:: salt.modules.jenkinsmod
:members:

View File

@ -60,7 +60,7 @@ Fork a Repo Guide_>`_ and is well worth reading.
isolated into separate branches.
If you're working on a bug or documentation fix, create your branch from
the oldest release branch that contains the bug or requires the documentation
the oldest **supported** main release branch that contains the bug or requires the documentation
update. See :ref:`Which Salt Branch? <which-salt-branch>`.
.. code-block:: bash
@ -212,8 +212,11 @@ There are three different kinds of branches in use: develop, main release
branches, and dot release branches.
- All feature work should go into the ``develop`` branch.
- Bug fixes and documentation changes should go into the oldest supported
**main** release branch affected by the the bug or documentation change.
- Bug fixes and documentation changes should go into the oldest **supported
main** release branch affected by the the bug or documentation change (you
can use the blame button in github to figure out when the bug was introduced).
Supported releases are the last 2 releases. For example, if the latest release
is 2018.3, the last two release are 2018.3 and 2017.7.
Main release branches are named after a year and month, such as
``2016.11`` and ``2017.7``.
- Hot fixes, as determined by SaltStack's release team, should be submitted
@ -247,7 +250,7 @@ Main Release Branches
=====================
The current release branch is the most recent stable release. Pull requests
containing bug fixes or documentation changes should be made against the main
containing bug fixes or documentation changes should be made against the oldest supported main
release branch that is affected.
The branch name will be a date-based name such as ``2016.11``.

View File

@ -212,8 +212,9 @@ on GitHub.
repository in your own account on GitHub and notify a SaltStack employee
when it is ready. We will add you to the contributors team on the
`saltstack-formulas`_ organization and help you transfer the repository
over. Ping a SaltStack employee on IRC (``#salt`` on Freenode) or send an
email to the `salt-users`_ mailing list.
over. Ping a SaltStack employee on IRC (``#salt`` on Freenode), join the
``#formulas`` channel on the `salt-slack`_ or send an email to the
`salt-users`_ mailing list.
There are a lot of repositories in that organization! Team members can
manage which repositories they are subscribed to on GitHub's watching page:

View File

@ -6,7 +6,7 @@ Debian GNU/Linux / Raspbian
Debian GNU/Linux distribution and some derivatives such as Raspbian already
have included Salt packages to their repositories. However, current stable
release codenamed "Jessie" contains old outdated Salt release. It is
Debian release contains old outdated Salt releases. It is
recommended to use SaltStack repository for Debian as described
:ref:`below <installation-debian-repo>`.
@ -33,11 +33,13 @@ Instructions are at https://repo.saltstack.com/#debian.
Installation from the Debian / Raspbian Official Repository
===========================================================
Stretch (Testing) and Sid (Unstable) distributions are already contain mostly
up-to-date Salt packages built by Debian Salt Team. You can install Salt
components directly from Debian.
The Debian distributions contain mostly old Salt packages
built by the Debian Salt Team. You can install Salt
components directly from Debian but it is recommended to
use the instructions above for the packages from the official
Salt repository.
On Jessie (Stable) there is an option to install Salt minion from Stretch with
On Jessie there is an option to install Salt minion from Stretch with
`python-tornado` dependency from `jessie-backports` repositories.
To install fresh release of Salt minion on Jessie:
@ -79,7 +81,7 @@ To install fresh release of Salt minion on Jessie:
apt-get update
apt-get install python-zmq python-tornado/stretch salt-common/stretch
#. Install Salt minion package from Stretch:
#. Install Salt minion package from Latest Debian Release:
.. code-block:: bash

View File

@ -20,6 +20,9 @@ Statistics:
Changes:
This release includes a CVE Fix:
CVE-2017-7893: Compromised salt-minions can impersonate the salt-master. (Discovery credit: Frank Spierings)
- **PR** `#39855`_: (*Foxlik*) Use regular expression instead of split when replacing authorized_keys
@ *2017-03-22T18:28:32Z*

View File

@ -1,8 +1,9 @@
===========================
Salt 2017.7.6 Release Notes
In Progress: Salt 2017.7.6 Release Notes
===========================
Version 2017.7.6 is a bugfix release for :ref:`2017.7.0 <release-2017-7-0>`.
Version 2017.7.6 is an **unreleased** bugfix release for :ref:`2017.7.0 <release-2017-7-0>`.
This release is still in progress and has not been released yet.
Option to Return to Previous Pillar Include Behavior
----------------------------------------------------

View File

@ -600,15 +600,24 @@ repository to be served up from the Salt fileserver path
Mountpoints can also be configured on a :ref:`per-remote basis
<gitfs-per-remote-config>`.
Using gitfs in Masterless Mode
==============================
Since 2014.7.0, gitfs can be used in masterless mode. To do so, simply add the
gitfs configuration parameters (and set :conf_master:`fileserver_backend`) in
the _minion_ config file instead of the master config file.
Using gitfs Alongside Other Backends
====================================
Sometimes it may make sense to use multiple backends; for instance, if ``sls``
files are stored in git but larger files are stored directly on the master.
The cascading lookup logic used for multiple remotes is also used with
multiple backends. If the ``fileserver_backend`` option contains
multiple backends:
The cascading lookup logic used for multiple remotes is also used with multiple
backends. If the :conf_master:`fileserver_backend` option contains multiple
backends:
.. code-block:: yaml
@ -620,7 +629,6 @@ Then the ``roots`` backend (the default backend of files in ``/srv/salt``) will
be searched first for the requested file; then, if it is not found on the
master, each configured git remote will be searched.
Branches, Environments, and Top Files
=====================================

View File

@ -157,7 +157,7 @@ If (Test-Path "$($ini[$bitPaths]['VCforPythonDir'])\vcvarsall.bat") {
# Install Microsoft Visual C++ for Python2.7
Write-Output " - Installing $($ini['Prerequisites']['VCforPython']) . . ."
$file = "$($ini['Settings']['DownloadDir'])\$($ini['Prerequisites']['VCforPython'])"
$p = Start-Process msiexec.exe -ArgumentList "/i $file /qb ALLUSERS=1" -Wait -NoNewWindow -PassThru
$p = Start-Process msiexec.exe -ArgumentList "/i $file /quiet ALLUSERS=1" -Wait -NoNewWindow -PassThru
}
#------------------------------------------------------------------------------
@ -175,7 +175,7 @@ If (Test-Path "$($ini['Settings']['Python2Dir'])\python.exe") {
DownloadFileWithProgress $url $file
Write-Output " - $script_name :: Installing $($ini[$bitPrograms]['Python2']) . . ."
$p = Start-Process msiexec -ArgumentList "/i $file /qb ADDLOCAL=DefaultFeature,SharedCRT,Extensions,pip_feature,PrependPath TARGETDIR=`"$($ini['Settings']['Python2Dir'])`"" -Wait -NoNewWindow -PassThru
$p = Start-Process msiexec -ArgumentList "/i $file /quiet ADDLOCAL=DefaultFeature,SharedCRT,Extensions,pip_feature,PrependPath TARGETDIR=`"$($ini['Settings']['Python2Dir'])`"" -Wait -NoNewWindow -PassThru
}
#------------------------------------------------------------------------------
@ -197,17 +197,17 @@ Write-Output " ----------------------------------------------------------------"
Write-Output " - $script_name :: Updating PIP and SetupTools . . ."
Write-Output " ----------------------------------------------------------------"
if ( ! [bool]$Env:SALT_PIP_LOCAL_CACHE) {
Start_Process_and_test_exitcode "$($ini['Settings']['Python2Dir'])\python.exe" "-m pip --no-cache-dir install -r $($script_path)\req_pip.txt" "python pip"
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python2Dir'])\python.exe -m pip --no-cache-dir install -r $($script_path)\req_pip.txt" "python pip"
} else {
$p = New-Item $Env:SALT_PIP_LOCAL_CACHE -ItemType Directory -Force # Ensure directory exists
if ( (Get-ChildItem $Env:SALT_PIP_LOCAL_CACHE | Measure-Object).Count -eq 0 ) {
# folder empty
Write-Output " pip download from req_pip.txt into empty local cache SALT_REQ_PIP $Env:SALT_PIP_LOCAL_CACHE"
Start_Process_and_test_exitcode "$($ini['Settings']['Python2Dir'])\python.exe" "-m pip download --dest $Env:SALT_PIP_LOCAL_CACHE -r $($script_path)\req_pip.txt" "pip download"
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python2Dir'])\python.exe -m pip download --dest $Env:SALT_PIP_LOCAL_CACHE -r $($script_path)\req_pip.txt" "pip download"
}
Write-Output " reading from local pip cache $Env:SALT_PIP_LOCAL_CACHE"
Write-Output " If a (new) resource is missing, please delete all files in this cache, go online and repeat"
Start_Process_and_test_exitcode "$($ini['Settings']['Python2Dir'])\python.exe" "-m pip install --no-index --find-links=$Env:SALT_PIP_LOCAL_CACHE -r $($script_path)\req_pip.txt" "pip install"
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python2Dir'])\python.exe -m pip install --no-index --find-links=$Env:SALT_PIP_LOCAL_CACHE -r $($script_path)\req_pip.txt" "pip install"
}
#==============================================================================
@ -218,16 +218,16 @@ Write-Output " ----------------------------------------------------------------"
Write-Output " - $script_name :: Installing pypi resources using pip . . ."
Write-Output " ----------------------------------------------------------------"
if ( ! [bool]$Env:SALT_REQ_LOCAL_CACHE) {
Start_Process_and_test_exitcode "$($ini['Settings']['Scripts2Dir'])\pip.exe" "--no-cache-dir install -r $($script_path)\req.txt" "pip install"
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python2Dir'])\python.exe -m pip --no-cache-dir install -r $($script_path)\req.txt" "pip install"
} else {
if ( (Get-ChildItem $Env:SALT_REQ_LOCAL_CACHE | Measure-Object).Count -eq 0 ) {
# folder empty
Write-Output " pip download from req.txt into empty local cache SALT_REQ $Env:SALT_REQ_LOCAL_CACHE"
Start_Process_and_test_exitcode "$($ini['Settings']['Python2Dir'])\python.exe" "-m pip download --dest $Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req.txt" "pip download"
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python2Dir'])\python.exe -m pip download --dest $Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req.txt" "pip download"
}
Write-Output " reading from local pip cache $Env:SALT_REQ_LOCAL_CACHE"
Write-Output " If a (new) resource is missing, please delete all files in this cache, go online and repeat"
Start_Process_and_test_exitcode "$($ini['Settings']['Python2Dir'])\python.exe" "-m pip install --no-index --find-links=$Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req.txt" "pip install"
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python2Dir'])\python.exe -m pip install --no-index --find-links=$Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req.txt" "pip install"
}
#==============================================================================

View File

@ -157,7 +157,7 @@ If (Test-Path "$($ini[$bitPaths]['VCppBuildToolsDir'])\vcbuildtools.bat") {
# Install Microsoft Visual C++ Build Tools
Write-Output " - Installing $($ini['Prerequisites']['VCppBuildTools']) . . ."
$file = "$($ini['Settings']['DownloadDir'])\$($ini['Prerequisites']['VCppBuildTools'])"
$p = Start-Process $file -ArgumentList '/Passive' -Wait -NoNewWindow -PassThru
$p = Start-Process $file -ArgumentList '/Quiet' -Wait -NoNewWindow -PassThru
}
#------------------------------------------------------------------------------
@ -175,7 +175,7 @@ If (Test-Path "$($ini['Settings']['Python3Dir'])\python.exe") {
DownloadFileWithProgress $url $file
Write-Output " - $script_name :: Installing $($ini[$bitPrograms]['Python3']) . . ."
$p = Start-Process $file -ArgumentList "/passive InstallAllUsers=1 TargetDir=`"$($ini['Settings']['Python3Dir'])`" Include_doc=0 Include_tcltk=0 Include_test=0 Include_launcher=0 PrependPath=1 Shortcuts=0" -Wait -NoNewWindow -PassThru
$p = Start-Process $file -ArgumentList "/Quiet InstallAllUsers=1 TargetDir=`"$($ini['Settings']['Python3Dir'])`" Include_doc=0 Include_tcltk=0 Include_test=0 Include_launcher=0 PrependPath=1 Shortcuts=0" -Wait -NoNewWindow -PassThru
}
#------------------------------------------------------------------------------
@ -197,17 +197,17 @@ Write-Output " ----------------------------------------------------------------"
Write-Output " - $script_name :: Updating PIP and SetupTools . . ."
Write-Output " ----------------------------------------------------------------"
if ( ! [bool]$Env:SALT_PIP_LOCAL_CACHE) {
Start_Process_and_test_exitcode "$($ini['Settings']['Python3Dir'])\python.exe" "-m pip --no-cache-dir install -r $($script_path)\req_pip.txt" "python pip"
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python3Dir'])\python.exe -m pip --no-cache-dir install -r $($script_path)\req_pip.txt" "python pip"
} else {
$p = New-Item $Env:SALT_PIP_LOCAL_CACHE -ItemType Directory -Force # Ensure directory exists
if ( (Get-ChildItem $Env:SALT_PIP_LOCAL_CACHE | Measure-Object).Count -eq 0 ) {
# folder empty
Write-Output " pip download from req_pip.txt into empty local cache SALT_REQ_PIP $Env:SALT_PIP_LOCAL_CACHE"
Start_Process_and_test_exitcode "$($ini['Settings']['Python3Dir'])\python.exe" "-m pip download --dest $Env:SALT_PIP_LOCAL_CACHE -r $($script_path)\req_pip.txt" "pip download"
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python3Dir'])\python.exe -m pip download --dest $Env:SALT_PIP_LOCAL_CACHE -r $($script_path)\req_pip.txt" "pip download"
}
Write-Output " reading from local pip cache $Env:SALT_PIP_LOCAL_CACHE"
Write-Output " If a (new) resource is missing, please delete all files in this cache, go online and repeat"
Start_Process_and_test_exitcode "$($ini['Settings']['Python3Dir'])\python.exe" "-m pip install --no-index --find-links=$Env:SALT_PIP_LOCAL_CACHE -r $($script_path)\req_pip.txt" "pip install"
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python3Dir'])\python.exe -m pip install --no-index --find-links=$Env:SALT_PIP_LOCAL_CACHE -r $($script_path)\req_pip.txt" "pip install"
}
#==============================================================================
@ -218,16 +218,16 @@ Write-Output " ----------------------------------------------------------------"
Write-Output " - $script_name :: Installing pypi resources using pip . . ."
Write-Output " ----------------------------------------------------------------"
if ( ! [bool]$Env:SALT_REQ_LOCAL_CACHE) {
Start_Process_and_test_exitcode "$($ini['Settings']['Scripts3Dir'])\pip.exe" "--no-cache-dir install -r $($script_path)\req.txt" "pip install"
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python3Dir'])\python.exe -m pip --no-cache-dir install -r $($script_path)\req.txt" "pip install"
} else {
if ( (Get-ChildItem $Env:SALT_REQ_LOCAL_CACHE | Measure-Object).Count -eq 0 ) {
# folder empty
Write-Output " pip download from req.txt into empty local cache SALT_REQ $Env:SALT_REQ_LOCAL_CACHE"
Start_Process_and_test_exitcode "$($ini['Settings']['Python3Dir'])\python.exe" "-m pip download --dest $Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req.txt" "pip download"
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python3Dir'])\python.exe -m pip download --dest $Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req.txt" "pip download"
}
Write-Output " reading from local pip cache $Env:SALT_REQ_LOCAL_CACHE"
Write-Output " If a (new) resource is missing, please delete all files in this cache, go online and repeat"
Start_Process_and_test_exitcode "$($ini['Settings']['Python3Dir'])\python.exe" "-m pip install --no-index --find-links=$Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req.txt" "pip install"
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python3Dir'])\python.exe -m pip install --no-index --find-links=$Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req.txt" "pip install"
}
#==============================================================================

View File

@ -46,8 +46,6 @@ goto CheckPython3
echo Failed, please remove manually
)
goto eof
:CheckPython3
if exist "\Python35" goto RemovePython3
@ -59,13 +57,13 @@ goto eof
:: 64 bit
if exist "%LOCALAPPDATA%\Package Cache\{b94f45d6-8461-440c-aa4d-bf197b2c2499}" (
echo %0 :: - 3.5.3 64bit
"%LOCALAPPDATA%\Package Cache\{b94f45d6-8461-440c-aa4d-bf197b2c2499}\python-3.5.3-amd64.exe" /uninstall /passive
"%LOCALAPPDATA%\Package Cache\{b94f45d6-8461-440c-aa4d-bf197b2c2499}\python-3.5.3-amd64.exe" /uninstall /quiet
)
:: 32 bit
if exist "%LOCALAPPDATA%\Package Cache\{a10037e1-4247-47c9-935b-c5ca049d0299}" (
echo %0 :: - 3.5.3 32bit
"%LOCALAPPDATA%\Package Cache\{a10037e1-4247-47c9-935b-c5ca049d0299}\python-3.5.3" /uninstall /passive
"%LOCALAPPDATA%\Package Cache\{a10037e1-4247-47c9-935b-c5ca049d0299}\python-3.5.3" /uninstall /quiet
)
rem wipe the Python directory

View File

@ -35,7 +35,7 @@ Function Get-Settings {
# Prerequisite software
$Prerequisites = @{
"NSIS" = "nsis-3.0b1-setup.exe"
"NSIS" = "nsis-3.03-setup.exe"
"VCforPython" = "VCForPython27.msi"
"VCppBuildTools" = "visualcppbuildtools_full.exe"
}
@ -59,21 +59,15 @@ Function Get-Settings {
# Filenames for 64 bit Windows
$64bitPrograms = @{
"PyCrypto2" = "pycrypto-2.6.1-cp27-none-win_amd64.whl"
"Python2" = "python-2.7.13.amd64.msi"
"PyYAML2" = "PyYAML-3.11.win-amd64-py2.7.exe"
"Python3" = "python-3.5.3-amd64.exe"
"PyWin323" = "pywin32-220.1-cp35-cp35m-win_amd64.whl"
}
$ini.Add("64bitPrograms", $64bitPrograms)
# Filenames for 32 bit Windows
$32bitPrograms = @{
"PyCrypto2" = "pycrypto-2.6.1-cp27-none-win32.whl"
"Python2" = "python-2.7.13.msi"
"PyYAML2" = "PyYAML-3.11.win32-py2.7.exe"
"Python3" = "python-3.5.3.exe"
"PyWin323" = "pywin32-220.1-cp35-cp35m-win32.whl"
}
$ini.Add("32bitPrograms", $32bitPrograms)

View File

@ -0,0 +1,4 @@
-r base.txt
# Required by Tornado to handle threads stuff.
futures>=2.0

View File

@ -0,0 +1 @@
-r base.txt

View File

@ -1,5 +1,7 @@
Jinja2
msgpack-python>0.3,!=0.5.5
# This should be changed to msgpack-python for Packages
# msgpack-python>0.3,!=0.5.5
msgpack>=0.5,!=0.5.5
PyYAML
MarkupSafe
requests>=1.0.0

View File

@ -1,4 +1,4 @@
-r base.txt
-r base-py2.txt
mock>=2.0.0
apache-libcloud>=0.14.0
@ -6,6 +6,6 @@ boto>=2.32.1
boto3>=1.2.1
moto>=0.3.6
SaltPyLint>=v2017.3.6
pytest
pytest>=3.5.0
git+https://github.com/eisensheng/pytest-catchlog.git@develop#egg=Pytest-catchlog
git+https://github.com/saltstack/pytest-salt.git@master#egg=pytest-salt

View File

@ -1,4 +1,4 @@
-r base.txt
-r base-py3.txt
mock>=2.0.0
apache-libcloud>=0.14.0
@ -11,5 +11,5 @@ moto>=0.3.6
# prevent it from being successfully installed (at least on Python 3.4).
httpretty
SaltPyLint>=v2017.2.29
pytest
pytest>=3.5.0
git+https://github.com/saltstack/pytest-salt.git@master#egg=pytest-salt

View File

@ -1,3 +1,3 @@
pytest
pytest>=3.5.0
pytest-helpers-namespace
pytest-tempdir

View File

@ -47,11 +47,6 @@ from salt.exceptions import (
# Import third party libs
import salt.ext.six as six
# pylint: disable=import-error
try:
import zmq
HAS_ZMQ = True
except ImportError:
HAS_ZMQ = False
# Try to import range from https://github.com/ytoolshed/range
HAS_RANGE = False

View File

@ -56,11 +56,7 @@ try:
HAS_WINSHELL = True
except ImportError:
HAS_WINSHELL = False
try:
import zmq
HAS_ZMQ = True
except ImportError:
HAS_ZMQ = False
from salt.utils.zeromq import zmq
# The directory where salt thin is deployed
DEFAULT_THIN_DIR = '/var/tmp/.%%USER%%_%%FQDNUUID%%_salt'
@ -207,7 +203,7 @@ class SSH(object):
'''
def __init__(self, opts):
pull_sock = os.path.join(opts['sock_dir'], 'master_event_pull.ipc')
if os.path.isfile(pull_sock) and HAS_ZMQ:
if os.path.exists(pull_sock) and zmq:
self.event = salt.utils.event.get_event(
'master',
opts['sock_dir'],

View File

@ -2044,9 +2044,10 @@ def list_input_endpoints(kwargs=None, conn=None, call=None):
ret = {}
for item in data:
if 'Role' not in item:
continue
for role in item['Role']:
if 'Role' in item:
role = item['Role']
if not isinstance(role, dict):
return ret
input_endpoint = role['ConfigurationSets']['ConfigurationSet'].get('InputEndpoints', {}).get('InputEndpoint')
if not input_endpoint:
continue
@ -2054,6 +2055,7 @@ def list_input_endpoints(kwargs=None, conn=None, call=None):
input_endpoint = [input_endpoint]
for endpoint in input_endpoint:
ret[endpoint['Name']] = endpoint
return ret
return ret

View File

@ -9,7 +9,7 @@
#
# BUGS: https://github.com/saltstack/salt-bootstrap/issues
#
# COPYRIGHT: (c) 2012-2017 by the SaltStack Team, see AUTHORS.rst for more
# COPYRIGHT: (c) 2012-2018 by the SaltStack Team, see AUTHORS.rst for more
# details.
#
# LICENSE: Apache 2.0
@ -18,7 +18,7 @@
#======================================================================================================================
set -o nounset # Treat unset variables as an error
__ScriptVersion="2017.12.13"
__ScriptVersion="2018.04.25"
__ScriptName="bootstrap-salt.sh"
__ScriptFullName="$0"
@ -249,7 +249,6 @@ _CURL_ARGS=${BS_CURL_ARGS:-}
_FETCH_ARGS=${BS_FETCH_ARGS:-}
_GPG_ARGS=${BS_GPG_ARGS:-}
_WGET_ARGS=${BS_WGET_ARGS:-}
_ENABLE_EXTERNAL_ZMQ_REPOS=${BS_ENABLE_EXTERNAL_ZMQ_REPOS:-$BS_FALSE}
_SALT_MASTER_ADDRESS=${BS_SALT_MASTER_ADDRESS:-null}
_SALT_MINION_ID="null"
# _SIMPLIFY_VERSION is mostly used in Solaris based distributions
@ -299,13 +298,13 @@ __usage() {
Examples:
- ${__ScriptName}
- ${__ScriptName} stable
- ${__ScriptName} stable 2016.3
- ${__ScriptName} stable 2016.3.1
- ${__ScriptName} stable 2017.7
- ${__ScriptName} stable 2017.7.2
- ${__ScriptName} daily
- ${__ScriptName} testing
- ${__ScriptName} git
- ${__ScriptName} git 2016.3
- ${__ScriptName} git v2016.3.1
- ${__ScriptName} git 2017.7
- ${__ScriptName} git v2017.7.2
- ${__ScriptName} git 06f249901a2e2f1ed310d58ea3921a129f214358
Options:
@ -355,8 +354,6 @@ __usage() {
per -p flag. You're responsible for providing the proper package name.
-H Use the specified HTTP proxy for all download URLs (including https://).
For example: http://myproxy.example.com:3128
-Z Enable additional package repository for newer ZeroMQ
(only available for RHEL/CentOS/Fedora/Ubuntu based distributions)
-b Assume that dependencies are already installed and software sources are
set up. If git is selected, git tree is still checked out as dependency
step.
@ -395,7 +392,7 @@ __usage() {
tested with Centos 6 and is considered experimental. This will install the
ius repo on the box if disable repo is false. This must be used in conjunction
with -x <pythonversion>. For example:
sh bootstrap.sh -P -y -x python2.7 git v2016.11.3
sh bootstrap.sh -P -y -x python2.7 git v2017.7.2
The above will install python27 and install the git version of salt using the
python2.7 executable. This only works for git and pip installations.
@ -438,7 +435,6 @@ do
p ) _EXTRA_PACKAGES="$_EXTRA_PACKAGES $OPTARG" ;;
d ) _DISABLE_SALT_CHECKS=$BS_TRUE ;;
H ) _HTTP_PROXY="$OPTARG" ;;
Z ) _ENABLE_EXTERNAL_ZMQ_REPOS=$BS_TRUE ;;
b ) _NO_DEPS=$BS_TRUE ;;
f ) _FORCE_SHALLOW_CLONE=$BS_TRUE ;;
l ) _DISABLE_SSL=$BS_TRUE ;;
@ -593,14 +589,14 @@ elif [ "$ITYPE" = "stable" ]; then
if [ "$#" -eq 0 ];then
STABLE_REV="latest"
else
if [ "$(echo "$1" | egrep '^(latest|1\.6|1\.7|2014\.1|2014\.7|2015\.5|2015\.8|2016\.3|2016\.11|2017\.7)$')" != "" ]; then
if [ "$(echo "$1" | egrep '^(latest|1\.6|1\.7|2014\.1|2014\.7|2015\.5|2015\.8|2016\.3|2016\.11|2017\.7|2018\.3)$')" != "" ]; then
STABLE_REV="$1"
shift
elif [ "$(echo "$1" | egrep '^([0-9]*\.[0-9]*\.[0-9]*)$')" != "" ]; then
STABLE_REV="archive/$1"
shift
else
echo "Unknown stable version: $1 (valid: 1.6, 1.7, 2014.1, 2014.7, 2015.5, 2015.8, 2016.3, 2016.11, 2017.7, latest, \$MAJOR.\$MINOR.\$PATCH)"
echo "Unknown stable version: $1 (valid: 1.6, 1.7, 2014.1, 2014.7, 2015.5, 2015.8, 2016.3, 2016.11, 2017.7, 2018.3, latest, \$MAJOR.\$MINOR.\$PATCH)"
exit 1
fi
fi
@ -1331,10 +1327,10 @@ __check_dpkg_architecture() {
if [ "${error_msg}" != "" ]; then
echoerror "${error_msg}"
if [ "$ITYPE" != "git" ]; then
echoerror "You can try git installation mode, i.e.: sh ${__ScriptName} git v2016.11.5."
echoerror "You can try git installation mode, i.e.: sh ${__ScriptName} git v2017.7.2."
echoerror "It may be necessary to use git installation mode with pip and disable the SaltStack apt repository."
echoerror "For example:"
echoerror " sh ${__ScriptName} -r -P git v2016.11.5"
echoerror " sh ${__ScriptName} -r -P git v2017.7.2"
fi
fi
@ -1372,16 +1368,10 @@ __ubuntu_codename_translation() {
DISTRO_CODENAME="trusty"
;;
"16")
if [ "$_april" ]; then
DISTRO_CODENAME="xenial"
else
DISTRO_CODENAME="yakkety"
fi
DISTRO_CODENAME="xenial"
;;
"17")
if [ "$_april" ]; then
DISTRO_CODENAME="zesty"
fi
DISTRO_CODENAME="artful"
;;
*)
DISTRO_CODENAME="trusty"
@ -1500,9 +1490,12 @@ __check_end_of_life_versions() {
# < 14.04
# = 14.10
# = 15.04, 15.10
# = 16.10
# = 17.04
if [ "$DISTRO_MAJOR_VERSION" -lt 14 ] || \
[ "$DISTRO_MAJOR_VERSION" -eq 15 ] || \
([ "$DISTRO_MAJOR_VERSION" -lt 16 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]); then
([ "$DISTRO_MAJOR_VERSION" -eq 17 ] && [ "$DISTRO_MINOR_VERSION" -eq 04 ]) || \
([ "$DISTRO_MAJOR_VERSION" -lt 17 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]); then
echoerror "End of life distributions are not supported."
echoerror "Please consider upgrading to the next stable. See:"
echoerror " https://wiki.ubuntu.com/Releases"
@ -1544,8 +1537,8 @@ __check_end_of_life_versions() {
;;
fedora)
# Fedora lower than 25 are no longer supported
if [ "$DISTRO_MAJOR_VERSION" -lt 25 ]; then
# Fedora lower than 26 are no longer supported
if [ "$DISTRO_MAJOR_VERSION" -lt 26 ]; then
echoerror "End of life distributions are not supported."
echoerror "Please consider upgrading to the next stable. See:"
echoerror " https://fedoraproject.org/wiki/Releases"
@ -1765,12 +1758,41 @@ __function_defined() {
}
#--- FUNCTION -------------------------------------------------------------------------------------------------------
# NAME: __wait_for_apt
# DESCRIPTION: Check if any apt, apt-get, aptitude, or dpkg processes are running before
# calling these again. This is useful when these process calls are part of
# a boot process, such as on AWS AMIs. This func will wait until the boot
# process is finished so the script doesn't exit on a locked proc.
#----------------------------------------------------------------------------------------------------------------------
__wait_for_apt(){
echodebug "Checking if apt process is currently running."
# Timeout set at 15 minutes
WAIT_TIMEOUT=900
while ps -C apt,apt-get,aptitude,dpkg >/dev/null; do
sleep 1
WAIT_TIMEOUT=$((WAIT_TIMEOUT - 1))
# If timeout reaches 0, abort.
if [ "$WAIT_TIMEOUT" -eq 0 ]; then
echoerror "Apt, apt-get, aptitude, or dpkg process is taking too long."
echoerror "Bootstrap script cannot proceed. Aborting."
return 1
fi
done
echodebug "No apt processes are currently running."
}
#--- FUNCTION -------------------------------------------------------------------------------------------------------
# NAME: __apt_get_install_noinput
# DESCRIPTION: (DRY) apt-get install with noinput options
# PARAMETERS: packages
#----------------------------------------------------------------------------------------------------------------------
__apt_get_install_noinput() {
__wait_for_apt
apt-get install -y -o DPkg::Options::=--force-confold "${@}"; return $?
} # ---------- end of function __apt_get_install_noinput ----------
@ -1780,6 +1802,7 @@ __apt_get_install_noinput() {
# DESCRIPTION: (DRY) apt-get upgrade with noinput options
#----------------------------------------------------------------------------------------------------------------------
__apt_get_upgrade_noinput() {
__wait_for_apt
apt-get upgrade -y -o DPkg::Options::=--force-confold; return $?
} # ---------- end of function __apt_get_upgrade_noinput ----------
@ -1790,6 +1813,7 @@ __apt_get_upgrade_noinput() {
# PARAMETERS: url
#----------------------------------------------------------------------------------------------------------------------
__apt_key_fetch() {
__wait_for_apt
url=$1
# shellcheck disable=SC2086
@ -2544,7 +2568,7 @@ __enable_universe_repository() {
__install_saltstack_ubuntu_repository() {
# Workaround for latest non-LTS ubuntu
if [ "$DISTRO_VERSION" = "16.10" ] || [ "$DISTRO_MAJOR_VERSION" -gt 16 ]; then
if [ "$DISTRO_MAJOR_VERSION" -gt 16 ]; then
echowarn "Non-LTS Ubuntu detected, but stable packages requested. Trying packages from latest LTS release. You may experience problems."
UBUNTU_VERSION=16.04
UBUNTU_CODENAME="xenial"
@ -2556,8 +2580,8 @@ __install_saltstack_ubuntu_repository() {
__PACKAGES=''
# Install downloader backend for GPG keys fetching
if [ "$DISTRO_VERSION" = "16.10" ] || [ "$DISTRO_MAJOR_VERSION" -gt 16 ]; then
__PACKAGES="${__PACKAGES} gnupg2 dirmngr"
if [ "$DISTRO_MAJOR_VERSION" -gt 16 ]; then
__PACKAGES="${__PACKAGES} gnupg dirmngr"
else
__PACKAGES="${__PACKAGES} gnupg-curl"
fi
@ -2576,6 +2600,7 @@ __install_saltstack_ubuntu_repository() {
__apt_key_fetch "$SALTSTACK_UBUNTU_URL/SALTSTACK-GPG-KEY.pub" || return 1
__wait_for_apt
apt-get update
}
@ -2588,6 +2613,7 @@ install_ubuntu_deps() {
__enable_universe_repository || return 1
__wait_for_apt
apt-get update
fi
@ -2644,6 +2670,7 @@ install_ubuntu_stable_deps() {
# No user interaction, libc6 restart services for example
export DEBIAN_FRONTEND=noninteractive
__wait_for_apt
apt-get update
if [ "${_UPGRADE_SYS}" -eq $BS_TRUE ]; then
@ -2664,6 +2691,7 @@ install_ubuntu_stable_deps() {
}
install_ubuntu_daily_deps() {
__wait_for_apt
install_ubuntu_stable_deps || return 1
if [ $_DISABLE_REPOS -eq $BS_FALSE ]; then
@ -2681,6 +2709,7 @@ install_ubuntu_daily_deps() {
}
install_ubuntu_git_deps() {
__wait_for_apt
apt-get update
if ! __check_command_exists git; then
@ -2711,8 +2740,8 @@ install_ubuntu_git_deps() {
else
install_ubuntu_stable_deps || return 1
__PACKAGES="${__PACKAGES} python-crypto python-jinja2 python-msgpack python-requests"
__PACKAGES="${__PACKAGES} python-tornado python-yaml python-zmq"
__PACKAGES="${__PACKAGES} python-crypto python-jinja2 python-m2crypto python-msgpack"
__PACKAGES="${__PACKAGES} python-requests python-tornado python-yaml python-zmq"
if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then
# Install python-libcloud if asked to
@ -2791,7 +2820,7 @@ install_ubuntu_stable_post() {
/bin/systemctl preset salt-$fname.service > /dev/null 2>&1 &&
/bin/systemctl enable salt-$fname.service > /dev/null 2>&1
)
sleep 0.1
sleep 1
/bin/systemctl daemon-reload
elif [ -f /etc/init.d/salt-$fname ]; then
update-rc.d salt-$fname defaults
@ -2817,7 +2846,7 @@ install_ubuntu_git_post() {
[ $fname = "api" ] && continue
systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service)
sleep 0.1
sleep 1
systemctl daemon-reload
elif [ -f /sbin/initctl ]; then
_upstart_conf="/etc/init/salt-$fname.conf"
@ -2973,6 +3002,7 @@ __install_saltstack_debian_repository() {
__apt_key_fetch "$SALTSTACK_DEBIAN_URL/SALTSTACK-GPG-KEY.pub" || return 1
__wait_for_apt
apt-get update
}
@ -2984,6 +3014,7 @@ install_debian_deps() {
# No user interaction, libc6 restart services for example
export DEBIAN_FRONTEND=noninteractive
__wait_for_apt
apt-get update
if [ "${_UPGRADE_SYS}" -eq $BS_TRUE ]; then
@ -3030,9 +3061,9 @@ install_debian_git_deps() {
__git_clone_and_checkout || return 1
__PACKAGES="libzmq3 libzmq3-dev lsb-release python-apt python-backports.ssl-match-hostname python-crypto"
__PACKAGES="${__PACKAGES} python-jinja2 python-msgpack python-requests"
__PACKAGES="${__PACKAGES} python-tornado python-yaml python-zmq"
__PACKAGES="libzmq3 libzmq3-dev lsb-release python-apt python-backports.ssl-match-hostname"
__PACKAGES="${__PACKAGES} python-crypto python-jinja2 python-msgpack python-m2crypto"
__PACKAGES="${__PACKAGES} python-requests python-tornado python-yaml python-zmq"
if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then
# Install python-libcloud if asked to
@ -3071,8 +3102,9 @@ install_debian_8_git_deps() {
__git_clone_and_checkout || return 1
__PACKAGES="libzmq3 libzmq3-dev lsb-release python-apt python-crypto python-jinja2 python-msgpack"
__PACKAGES="${__PACKAGES} python-requests python-systemd python-yaml python-zmq"
__PACKAGES="libzmq3 libzmq3-dev lsb-release python-apt python-crypto python-jinja2"
__PACKAGES="${__PACKAGES} python-m2crypto python-msgpack python-requests python-systemd"
__PACKAGES="${__PACKAGES} python-yaml python-zmq"
if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then
# Install python-libcloud if asked to
@ -3081,7 +3113,7 @@ install_debian_8_git_deps() {
__PIP_PACKAGES=''
if (__check_pip_allowed >/dev/null 2>&1); then
__PIP_PACKAGES='tornado'
__PIP_PACKAGES='tornado<5.0'
# Install development environment for building tornado Python module
__PACKAGES="${__PACKAGES} build-essential python-dev"
@ -3096,6 +3128,7 @@ install_debian_8_git_deps() {
/etc/apt/sources.list.d/backports.list
fi
__wait_for_apt
apt-get update || return 1
# python-tornado package should be installed from backports repo
@ -3135,8 +3168,8 @@ install_debian_9_git_deps() {
__git_clone_and_checkout || return 1
__PACKAGES="libzmq5 lsb-release python-apt python-backports-abc python-crypto"
__PACKAGES="${__PACKAGES} python-jinja2 python-msgpack python-requests python-systemd"
__PACKAGES="${__PACKAGES} python-tornado python-yaml python-zmq"
__PACKAGES="${__PACKAGES} python-jinja2 python-m2crypto python-msgpack python-requests"
__PACKAGES="${__PACKAGES} python-systemd python-tornado python-yaml python-zmq"
if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then
# Install python-libcloud if asked to
@ -3330,15 +3363,8 @@ install_debian_check_services() {
install_fedora_deps() {
if [ $_DISABLE_REPOS -eq $BS_FALSE ]; then
if [ "$_ENABLE_EXTERNAL_ZMQ_REPOS" -eq $BS_TRUE ]; then
__install_saltstack_copr_zeromq_repository || return 1
fi
__install_saltstack_copr_salt_repository || return 1
fi
__PACKAGES="PyYAML libyaml python-crypto python-jinja2 python-zmq python2-msgpack python2-requests"
__PACKAGES="libyaml m2crypto PyYAML python-crypto python-jinja2"
__PACKAGES="${__PACKAGES} python2-msgpack python2-requests python-zmq"
if [ "$DISTRO_MAJOR_VERSION" -lt 26 ]; then
__PACKAGES="${__PACKAGES} yum-utils"
@ -3395,7 +3421,7 @@ install_fedora_stable_post() {
[ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue
systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service)
sleep 0.1
sleep 1
systemctl daemon-reload
done
}
@ -3456,7 +3482,7 @@ install_fedora_git_post() {
[ $fname = "api" ] && continue
systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service)
sleep 0.1
sleep 1
systemctl daemon-reload
done
}
@ -3523,20 +3549,6 @@ __install_epel_repository() {
return 0
}
__install_saltstack_copr_zeromq_repository() {
echoinfo "Installing Zeromq >=4 and PyZMQ>=14 from SaltStack's COPR repository"
if [ ! -s /etc/yum.repos.d/saltstack-zeromq4.repo ]; then
if [ "${DISTRO_NAME_L}" = "fedora" ]; then
__REPOTYPE="${DISTRO_NAME_L}"
else
__REPOTYPE="epel"
fi
__fetch_url /etc/yum.repos.d/saltstack-zeromq4.repo \
"${HTTP_VAL}://copr.fedorainfracloud.org/coprs/saltstack/zeromq4/repo/${__REPOTYPE}-${DISTRO_MAJOR_VERSION}/saltstack-zeromq4-${__REPOTYPE}-${DISTRO_MAJOR_VERSION}.repo" || return 1
fi
return 0
}
__install_saltstack_rhel_repository() {
if [ "$ITYPE" = "stable" ]; then
repo_rev="$STABLE_REV"
@ -3550,7 +3562,7 @@ __install_saltstack_rhel_repository() {
gpg_key="SALTSTACK-GPG-KEY.pub"
repo_file="/etc/yum.repos.d/saltstack.repo"
if [ ! -s "$repo_file" ]; then
if [ ! -s "$repo_file" ] || [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; then
cat <<_eof > "$repo_file"
[saltstack]
name=SaltStack ${repo_rev} Release Channel for RHEL/CentOS \$releasever
@ -3564,26 +3576,10 @@ _eof
fetch_url="${HTTP_VAL}://${_REPO_URL}/yum/redhat/${DISTRO_MAJOR_VERSION}/${CPU_ARCH_L}/${repo_rev}/"
__rpm_import_gpg "${fetch_url}${gpg_key}" || return 1
fi
return 0
}
__install_saltstack_copr_salt_repository() {
echoinfo "Adding SaltStack's COPR repository"
if [ "${DISTRO_NAME_L}" = "fedora" ]; then
[ "$DISTRO_MAJOR_VERSION" -ge 22 ] && return 0
__REPOTYPE="${DISTRO_NAME_L}"
else
__REPOTYPE="epel"
fi
__REPO_FILENAME="saltstack-salt-${__REPOTYPE}-${DISTRO_MAJOR_VERSION}.repo"
if [ ! -s "/etc/yum.repos.d/${__REPO_FILENAME}" ]; then
__fetch_url "/etc/yum.repos.d/${__REPO_FILENAME}" \
"${HTTP_VAL}://copr.fedorainfracloud.org/coprs/saltstack/salt/repo/${__REPOTYPE}-${DISTRO_MAJOR_VERSION}/${__REPO_FILENAME}" || return 1
yum clean metadata || return 1
elif [ "$repo_rev" != "latest" ]; then
echowarn "saltstack.repo already exists, ignoring salt version argument."
echowarn "Use -F (forced overwrite) to install $repo_rev."
fi
return 0
@ -3688,7 +3684,8 @@ install_centos_git_deps() {
__git_clone_and_checkout || return 1
__PACKAGES="python-crypto python-futures python-msgpack python-zmq python-jinja2 python-requests python-tornado"
__PACKAGES="m2crypto python-crypto python-futures python-jinja2 python-msgpack"
__PACKAGES="${__PACKAGES} python-requests python-tornado python-zmq"
if [ "$DISTRO_MAJOR_VERSION" -ge 7 ]; then
__PACKAGES="${__PACKAGES} systemd-python"
@ -3705,7 +3702,12 @@ install_centos_git_deps() {
if [ "${_PY_EXE}" != "" ]; then
# If "-x" is defined, install dependencies with pip based on the Python version given.
_PIP_PACKAGES="jinja2 msgpack-python pycrypto PyYAML tornado zmq"
_PIP_PACKAGES="m2crypto jinja2 msgpack-python pycrypto PyYAML tornado<5.0 zmq"
# install swig and openssl on cent6
if [ "$DISTRO_MAJOR_VERSION" -eq 6 ]; then
__yum_install_noinput openssl-devel swig || return 1
fi
if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then
for SINGLE_PACKAGE in $_PIP_PACKAGES; do
@ -4275,7 +4277,7 @@ install_alpine_linux_stable_deps() {
install_alpine_linux_git_deps() {
install_alpine_linux_stable_deps || return 1
apk -U add python2 py-virtualenv py2-crypto py2-setuptools \
apk -U add python2 py-virtualenv py2-crypto py2-m2crypto py2-setuptools \
py2-jinja2 py2-yaml py2-markupsafe py2-msgpack py2-psutil \
py2-zmq zeromq py2-requests || return 1
@ -4367,6 +4369,7 @@ install_alpine_linux_restart_daemons() {
# Skip if not meant to be installed
[ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue
[ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue
[ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue
# Disable stdin to fix shell session hang on killing tee pipe
/sbin/rc-service salt-$fname stop < /dev/null > /dev/null 2>&1
@ -4382,6 +4385,7 @@ install_alpine_linux_check_services() {
# Skip if not meant to be installed
[ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue
[ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue
[ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue
__check_services_alpine salt-$fname || return 1
done
@ -4400,6 +4404,7 @@ daemons_running_alpine_linux() {
# Skip if not meant to be installed
[ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue
[ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue
[ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue
# shellcheck disable=SC2009
if [ "$(ps wwwaux | grep -v grep | grep salt-$fname)" = "" ]; then
@ -4427,10 +4432,20 @@ install_amazon_linux_ami_deps() {
_USEAWS=$BS_FALSE
pkg_append="python"
repo_rev="$(echo "${STABLE_REV}" | sed 's|.*\/||g')"
if [ "$ITYPE" = "stable" ]; then
repo_rev="$STABLE_REV"
else
repo_rev="latest"
fi
if echo $repo_rev | egrep -q '^archive'; then
year=$(echo "$repo_rev" | cut -d '/' -f 2 | cut -c1-4)
else
year=$(echo "$repo_rev" | cut -c1-4)
fi
if echo "$repo_rev" | egrep -q '^(latest|2016\.11)$' || \
[ "$(echo "$repo_rev" | cut -c1-4)" -gt 2016 ]; then
[ "$year" -gt 2016 ]; then
_USEAWS=$BS_TRUE
pkg_append="python27"
fi
@ -4477,7 +4492,8 @@ _eof
# Package python-ordereddict-1.1-2.el6.noarch is obsoleted by python26-2.6.9-2.88.amzn1.x86_64
# which is already installed
__PACKAGES="${pkg_append}-PyYAML ${pkg_append}-crypto ${pkg_append}-msgpack ${pkg_append}-zmq ${pkg_append}-jinja2 ${pkg_append}-requests"
__PACKAGES="m2crypto ${pkg_append}-crypto ${pkg_append}-jinja2 ${pkg_append}-PyYAML"
__PACKAGES="${__PACKAGES} ${pkg_append}-msgpack ${pkg_append}-requests ${pkg_append}-zmq"
# shellcheck disable=SC2086
__yum_install_noinput ${__PACKAGES} || return 1
@ -4630,7 +4646,7 @@ install_arch_linux_git_deps() {
fi
pacman -R --noconfirm python2-distribute
pacman -Su --noconfirm --needed python2-crypto python2-setuptools python2-jinja \
python2-markupsafe python2-msgpack python2-psutil \
python2-m2crypto python2-markupsafe python2-msgpack python2-psutil \
python2-pyzmq zeromq python2-requests python2-systemd || return 1
__git_clone_and_checkout || return 1
@ -4704,7 +4720,7 @@ install_arch_linux_post() {
/usr/bin/systemctl preset salt-$fname.service > /dev/null 2>&1 &&
/usr/bin/systemctl enable salt-$fname.service > /dev/null 2>&1
)
sleep 0.1
sleep 1
/usr/bin/systemctl daemon-reload
continue
fi
@ -4732,7 +4748,7 @@ install_arch_linux_git_post() {
/usr/bin/systemctl preset salt-${fname}.service > /dev/null 2>&1 &&
/usr/bin/systemctl enable salt-${fname}.service > /dev/null 2>&1
)
sleep 0.1
sleep 1
/usr/bin/systemctl daemon-reload
continue
fi
@ -4885,9 +4901,9 @@ install_freebsd_9_stable_deps() {
__configure_freebsd_pkg_details || return 1
fi
# Now install swig
# Now install swig30
# shellcheck disable=SC2086
/usr/local/sbin/pkg install ${FROM_FREEBSD} -y swig || return 1
/usr/local/sbin/pkg install ${FROM_FREEBSD} -y swig30 || return 1
# YAML module is used for generating custom master/minion configs
# shellcheck disable=SC2086
@ -4934,7 +4950,7 @@ install_freebsd_git_deps() {
# We're on the develop branch, install whichever tornado is on the requirements file
__REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")"
if [ "${__REQUIRED_TORNADO}" != "" ]; then
/usr/local/sbin/pkg install -y www/py-tornado || return 1
/usr/local/sbin/pkg install -y www/py-tornado4 || return 1
fi
fi
@ -5098,35 +5114,11 @@ install_freebsd_restart_daemons() {
# OpenBSD Install Functions
#
__choose_openbsd_mirror() {
OPENBSD_REPO=''
MINTIME=''
MIRROR_LIST=$(ftp -w 15 -Vao - 'https://ftp.openbsd.org/cgi-bin/ftplist.cgi?dbversion=1' | awk '/^http/ {print $1}')
for MIRROR in $MIRROR_LIST; do
MIRROR_HOST=$(echo "$MIRROR" | sed -e 's|.*//||' -e 's|+*/.*$||')
TIME=$(ping -c 1 -w 1 -q "$MIRROR_HOST" | awk -F/ '/round-trip/ { print $5 }')
[ -z "$TIME" ] && continue
echodebug "ping time for $MIRROR_HOST is $TIME"
if [ -z "$MINTIME" ]; then
FASTER_MIRROR=1
else
FASTER_MIRROR=$(echo "$TIME < $MINTIME" | bc)
fi
if [ "$FASTER_MIRROR" -eq 1 ]; then
MINTIME=$TIME
OPENBSD_REPO="$MIRROR"
fi
done
}
install_openbsd_deps() {
if [ $_DISABLE_REPOS -eq $BS_FALSE ]; then
__choose_openbsd_mirror || return 1
echoinfo "setting package repository to $OPENBSD_REPO with ping time of $MINTIME"
[ -n "$OPENBSD_REPO" ] || return 1
echo "${OPENBSD_REPO}" >>/etc/installurl || return 1
OPENBSD_REPO='https://cdn.openbsd.org/pub/OpenBSD'
echoinfo "setting package repository to $OPENBSD_REPO"
echo "${OPENBSD_REPO}" >/etc/installurl || return 1
fi
if [ "${_EXTRA_PACKAGES}" != "" ]; then
@ -5226,7 +5218,7 @@ install_openbsd_restart_daemons() {
# SmartOS Install Functions
#
install_smartos_deps() {
pkgin -y install zeromq py27-crypto py27-msgpack py27-yaml py27-jinja2 py27-zmq py27-requests || return 1
pkgin -y install zeromq py27-crypto py27-m2crypto py27-msgpack py27-yaml py27-jinja2 py27-zmq py27-requests || return 1
# Set _SALT_ETC_DIR to SmartOS default if they didn't specify
_SALT_ETC_DIR=${BS_SALT_ETC_DIR:-/opt/local/etc/salt}
@ -5456,6 +5448,13 @@ __version_lte() {
}
__zypper() {
# Check if any zypper process is running before calling zypper again.
# This is useful when a zypper call is part of a boot process and will
# wait until the zypper process is finished, such as on AWS AMIs.
while pgrep -l zypper; do
sleep 1
done
zypper --non-interactive "${@}"; return $?
}
@ -5515,7 +5514,7 @@ install_opensuse_stable_deps() {
}
install_opensuse_git_deps() {
if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then
if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ] && ! __check_command_exists update-ca-certificates; then
__zypper_install ca-certificates || return 1
fi
@ -5529,7 +5528,7 @@ install_opensuse_git_deps() {
__git_clone_and_checkout || return 1
__PACKAGES="libzmq5 python-Jinja2 python-msgpack-python python-pycrypto python-pyzmq python-xml"
__PACKAGES="libzmq5 python-Jinja2 python-m2crypto python-msgpack-python python-pycrypto python-pyzmq python-xml"
if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then
# We're on the develop branch, install whichever tornado is on the requirements file
@ -5594,7 +5593,7 @@ install_opensuse_stable_post() {
if [ -f /bin/systemctl ]; then
systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service)
sleep 0.1
sleep 1
systemctl daemon-reload
continue
fi
@ -5723,6 +5722,12 @@ install_suse_12_stable_deps() {
# shellcheck disable=SC2086,SC2090
__zypper_install ${__PACKAGES} || return 1
# SLES 11 SP3 ships with both python-M2Crypto-0.22.* and python-m2crypto-0.21 and we will be asked which
# we want to install, even with --non-interactive.
# Let's try to install the higher version first and then the lower one in case of failure
__zypper_install 'python-M2Crypto>=0.22' || __zypper_install 'python-M2Crypto>=0.21' || return 1
if [ "${_EXTRA_PACKAGES}" != "" ]; then
echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}"
# shellcheck disable=SC2086
@ -5825,6 +5830,11 @@ install_suse_11_stable_deps() {
# shellcheck disable=SC2086,SC2090
__zypper_install ${__PACKAGES} || return 1
# SLES 11 SP3 ships with both python-M2Crypto-0.22.* and python-m2crypto-0.21 and we will be asked which
# we want to install, even with --non-interactive.
# Let's try to install the higher version first and then the lower one in case of failure
__zypper_install 'python-M2Crypto>=0.22' || __zypper_install 'python-M2Crypto>=0.21' || return 1
if [ "${_EXTRA_PACKAGES}" != "" ]; then
echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}"
# shellcheck disable=SC2086

View File

@ -3571,6 +3571,8 @@ def apply_master_config(overrides=None, defaults=None):
if overrides:
opts.update(overrides)
opts['__cli'] = os.path.basename(sys.argv[0])
if len(opts['sock_dir']) > len(opts['cachedir']) + 10:
opts['sock_dir'] = os.path.join(opts['cachedir'], '.salt-unix')

View File

@ -15,15 +15,12 @@ import errno
# Import ioflo libs
import ioflo.base.deeding
# Import third party libs
try:
import zmq
import salt.master
import salt.crypt
import salt.daemons.masterapi
import salt.payload
HAS_ZMQ = True
except ImportError:
HAS_ZMQ = False
from salt.utils.zeromq import zmq
import salt.master
import salt.crypt
import salt.daemons.masterapi
import salt.payload
log = logging.getLogger(__name__)
@ -159,7 +156,7 @@ class SaltZmqPublisher(ioflo.base.deeding.Deed):
'''
Set up tracking value(s)
'''
if not HAS_ZMQ:
if not zmq:
return
self.created = False
self.serial = salt.payload.Serial(self.opts.value)

View File

@ -94,14 +94,15 @@ class IRCClient(object):
self.allow_nicks = allow_nicks
self.disable_query = disable_query
self.io_loop = tornado.ioloop.IOLoop(make_current=False)
self.io_loop.make_current()
self._connect()
def _connect(self):
_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
if self.ssl is True:
self._stream = tornado.iostream.SSLIOStream(_sock, ssl_options={'cert_reqs': ssl.CERT_NONE}, io_loop=self.io_loop)
self._stream = tornado.iostream.SSLIOStream(_sock, ssl_options={'cert_reqs': ssl.CERT_NONE})
else:
self._stream = tornado.iostream.IOStream(_sock, io_loop=self.io_loop)
self._stream = tornado.iostream.IOStream(_sock)
self._stream.set_close_callback(self.on_closed)
self._stream.connect((self.host, self.port), self.on_connect)

View File

@ -173,11 +173,7 @@ from __future__ import absolute_import
import logging
# Import third party libraries
try:
import zmq
HAS_ZMQ = True
except ImportError:
HAS_ZMQ = False
from salt.utils.zeromq import zmq
try:
# pylint: disable=W0611
@ -209,7 +205,7 @@ def __virtual__():
'''
Load only if napalm-logs is installed.
'''
if not HAS_NAPALM_LOGS or not HAS_ZMQ:
if not HAS_NAPALM_LOGS or not zmq:
return (False, 'napalm_syslog could not be loaded. \
Please install napalm-logs library amd ZeroMQ.')
return True

View File

@ -81,6 +81,7 @@ def start(address=None, port=5000, ssl_crt=None, ssl_key=None):
if all([ssl_crt, ssl_key]):
ssl_options = {"certfile": ssl_crt, "keyfile": ssl_key}
io_loop = tornado.ioloop.IOLoop(make_current=False)
http_server = tornado.httpserver.HTTPServer(application, ssl_options=ssl_options, io_loop=io_loop)
io_loop.make_current()
http_server = tornado.httpserver.HTTPServer(application, ssl_options=ssl_options)
http_server.listen(port, address=address)
io_loop.start()

View File

@ -56,7 +56,7 @@ def inet_pton(address_family, ip_string):
addr_size = ctypes.c_int(ctypes.sizeof(addr))
if WSAStringToAddressA(
ip_string,
ip_string.encode('ascii'),
address_family,
None,
ctypes.byref(addr),

View File

@ -675,13 +675,6 @@ def _virtual(osdata):
grains['virtual'] = 'kvm'
# Break out of the loop so the next log message is not issued
break
elif command == 'virt-what':
# if 'virt-what' returns nothing, it's either an undetected platform
# so we default just as virt-what to 'physical', otherwise use the
# platform detected/returned by virt-what
if output:
grains['virtual'] = output.lower()
break
elif command == 'prtdiag':
model = output.lower().split("\n")[0]
if 'vmware' in model:
@ -1113,6 +1106,7 @@ _OS_NAME_MAP = {
'synology': 'Synology',
'nilrt': 'NILinuxRT',
'nilrt-xfce': 'NILinuxRT-XFCE',
'poky': 'Poky',
'manjaro': 'Manjaro',
'manjarolin': 'Manjaro',
'antergos': 'Antergos',
@ -1654,7 +1648,7 @@ def os_data():
osarch = __salt__['cmd.run']('dpkg --print-architecture').strip()
elif grains.get('os_family') == 'RedHat':
osarch = __salt__['cmd.run']('rpm --eval %{_host_cpu}').strip()
elif grains.get('os_family') == 'NILinuxRT':
elif grains.get('os_family') in ('NILinuxRT', 'Poky'):
archinfo = {}
for line in __salt__['cmd.run']('opkg print-architecture').splitlines():
if line.startswith('arch'):

View File

@ -91,7 +91,7 @@ def _retrieve_device_cache(proxy=None):
DEVICE_CACHE = proxy['napalm.get_device']()
elif not proxy and salt.utils.napalm.is_minion(__opts__):
# if proxy var not passed and is running in a straight minion
DEVICE_CACHE = salt.utils.napalm.get_device_opts(__opts__)
DEVICE_CACHE = salt.utils.napalm.get_device(__opts__)
return DEVICE_CACHE

View File

@ -103,6 +103,20 @@ class SysLogHandler(ExcInfoOnLogLevelFormatMixIn, logging.handlers.SysLogHandler
'''
Syslog handler which properly handles exc_info on a per handler basis
'''
def handleError(self, record):
'''
Override the default error handling mechanism for py3
Deal with syslog os errors when the log file does not exist
'''
handled = False
if sys.stderr and sys.version_info >= (3, 5, 4):
t, v, tb = sys.exc_info()
if t.__name__ in 'FileNotFoundError':
sys.stderr.write('[WARNING ] The log_file does not exist. Logging not setup correctly or syslog service not started.\n')
handled = True
if not handled:
super(SysLogHandler, self).handleError(record)
class RotatingFileHandler(ExcInfoOnLogLevelFormatMixIn, logging.handlers.RotatingFileHandler, NewStyleClassMixIn):

View File

@ -28,21 +28,9 @@ except ImportError:
# pylint: disable=import-error,no-name-in-module,redefined-builtin
import salt.ext.six as six
from salt.ext.six.moves import range
from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO
# pylint: enable=import-error,no-name-in-module,redefined-builtin
try:
import zmq
import zmq.eventloop.ioloop
# support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x
if not hasattr(zmq.eventloop.ioloop, 'ZMQIOLoop'):
zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop
LOOP_CLASS = zmq.eventloop.ioloop.ZMQIOLoop
HAS_ZMQ = True
except ImportError:
import tornado.ioloop
LOOP_CLASS = tornado.ioloop.IOLoop
HAS_ZMQ = False
import tornado.gen # pylint: disable=F0401
# Import salt libs
@ -376,23 +364,13 @@ class Master(SMaster):
:param dict: The salt options
'''
if HAS_ZMQ:
# Warn if ZMQ < 3.2
try:
zmq_version_info = zmq.zmq_version_info()
except AttributeError:
# PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to
# using zmq.zmq_version() and build a version info tuple.
zmq_version_info = tuple(
[int(x) for x in zmq.zmq_version().split('.')]
)
if zmq_version_info < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
if zmq and ZMQ_VERSION_INFO < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
SMaster.__init__(self, opts)
def __set_max_open_files(self):
@ -856,9 +834,8 @@ class MWorker(SignalHandlingMultiprocessingProcess):
Bind to the local port
'''
# using ZMQIOLoop since we *might* need zmq in there
if HAS_ZMQ:
zmq.eventloop.ioloop.install()
self.io_loop = LOOP_CLASS()
install_zmq()
self.io_loop = ZMQDefaultLoop()
self.io_loop.make_current()
for req_channel in self.req_channels:
req_channel.post_fork(self._handle_payload, io_loop=self.io_loop) # TODO: cleaner? Maybe lazily?

View File

@ -30,22 +30,10 @@ if six.PY3:
else:
import salt.ext.ipaddress as ipaddress
from salt.ext.six.moves import range
# pylint: enable=no-name-in-module,redefined-builtin
from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO
# Import third party libs
try:
import zmq
# TODO: cleanup
import zmq.eventloop.ioloop
# support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x
if not hasattr(zmq.eventloop.ioloop, 'ZMQIOLoop'):
zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop
LOOP_CLASS = zmq.eventloop.ioloop.ZMQIOLoop
HAS_ZMQ = True
except ImportError:
import tornado.ioloop
LOOP_CLASS = tornado.ioloop.IOLoop
HAS_ZMQ = False
# pylint: enable=no-name-in-module,redefined-builtin
import tornado
HAS_RANGE = False
try:
@ -567,8 +555,15 @@ class MinionBase(object):
break
except SaltClientError as exc:
last_exc = exc
msg = ('Master {0} could not be reached, trying '
'next master (if any)'.format(opts['master']))
if exc.strerror.startswith('Could not access'):
msg = (
'Failed to initiate connection with Master '
'{0}: check ownership/permissions. Error '
'message: {1}'.format(opts['master'], exc)
)
else:
msg = ('Master {0} could not be reached, trying '
'next master (if any)'.format(opts['master']))
log.info(msg)
continue
@ -612,7 +607,7 @@ class MinionBase(object):
if self.opts['transport'] == 'detect':
self.opts['detect_mode'] = True
for trans in ('zeromq', 'tcp'):
if trans == 'zeromq' and not HAS_ZMQ:
if trans == 'zeromq' and not zmq:
continue
self.opts['transport'] = trans
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
@ -649,10 +644,8 @@ class SMinion(MinionBase):
# Clean out the proc directory (default /var/cache/salt/minion/proc)
if (self.opts.get('file_client', 'remote') == 'remote'
or self.opts.get('use_master_when_local', False)):
if self.opts['transport'] == 'zeromq' and HAS_ZMQ:
io_loop = zmq.eventloop.ioloop.ZMQIOLoop()
else:
io_loop = LOOP_CLASS.current()
install_zmq()
io_loop = ZMQDefaultLoop.current()
io_loop.run_sync(
lambda: self.eval_master(self.opts, failed=True)
)
@ -798,9 +791,8 @@ class MinionManager(MinionBase):
self.minions = []
self.jid_queue = []
if HAS_ZMQ:
zmq.eventloop.ioloop.install()
self.io_loop = LOOP_CLASS.current()
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
self.process_manager = ProcessManager(name='MultiMinionProcessManager')
self.io_loop.spawn_callback(self.process_manager.run, async=True)
@ -947,23 +939,14 @@ class Minion(MinionBase):
self.periodic_callbacks = {}
if io_loop is None:
if HAS_ZMQ:
zmq.eventloop.ioloop.install()
self.io_loop = LOOP_CLASS.current()
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
else:
self.io_loop = io_loop
# Warn if ZMQ < 3.2
if HAS_ZMQ:
try:
zmq_version_info = zmq.zmq_version_info()
except AttributeError:
# PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to
# using zmq.zmq_version() and build a version info tuple.
zmq_version_info = tuple(
[int(x) for x in zmq.zmq_version().split('.')] # pylint: disable=no-member
)
if zmq_version_info < (3, 2):
if zmq:
if ZMQ_VERSION_INFO < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
@ -1049,7 +1032,7 @@ class Minion(MinionBase):
# I made the following 3 line oddity to preserve traceback.
# Please read PR #23978 before changing, hopefully avoiding regressions.
# Good luck, we're all counting on you. Thanks.
future_exception = self._connect_master_future.exc_info()
future_exception = self._connect_master_future.exception()
if future_exception:
# This needs to be re-raised to preserve restart_on_error behavior.
raise six.reraise(*future_exception)
@ -2243,13 +2226,15 @@ class Minion(MinionBase):
if beacons and self.connected:
self._fire_master(events=beacons)
new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop)
new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(
handle_beacons, loop_interval * 1000)
if before_connect:
# Make sure there is a chance for one iteration to occur before connect
handle_beacons()
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(
self._fallback_cleanups, loop_interval * 1000)
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
@ -2302,14 +2287,15 @@ class Minion(MinionBase):
# TODO: actually listen to the return and change period
def handle_schedule():
self.process_schedule(self, loop_interval)
new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000, io_loop=self.io_loop)
new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000)
if before_connect:
# Make sure there is a chance for one iteration to occur before connect
handle_schedule()
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(
self._fallback_cleanups, loop_interval * 1000)
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
@ -2365,7 +2351,7 @@ class Minion(MinionBase):
self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler)
except Exception:
log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG)
self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000, io_loop=self.io_loop)
self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000)
self.periodic_callbacks['ping'].start()
# add handler to subscriber
@ -2625,9 +2611,8 @@ class SyndicManager(MinionBase):
self.jid_forward_cache = set()
if io_loop is None:
if HAS_ZMQ:
zmq.eventloop.ioloop.install()
self.io_loop = LOOP_CLASS.current()
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
else:
self.io_loop = io_loop
@ -2809,7 +2794,7 @@ class SyndicManager(MinionBase):
# forward events every syndic_event_forward_timeout
self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events,
self.opts['syndic_event_forward_timeout'] * 1000,
io_loop=self.io_loop)
)
self.forward_events.start()
# Make sure to gracefully handle SIGUSR1

View File

@ -544,6 +544,10 @@ def associate_vpc_with_hosted_zone(HostedZoneId=None, Name=None, VPCId=None,
r = conn.associate_vpc_with_hosted_zone(**args)
return _wait_for_sync(r['ChangeInfo']['Id'], conn)
except ClientError as e:
if e.response.get('Error', {}).get('Code') == 'ConflictingDomainExists':
log.debug('VPC Association already exists.')
# return True since the current state is the desired one
return True
if tries and e.response.get('Error', {}).get('Code') == 'Throttling':
log.debug('Throttled by AWS API.')
time.sleep(3)

View File

@ -2302,10 +2302,10 @@ def create_route(route_table_id=None, destination_cidr_block=None,
'must be provided.')
if not _exactly_one((gateway_id, internet_gateway_name, instance_id, interface_id, vpc_peering_connection_id,
nat_gateway_id, nat_gateway_subnet_id, nat_gateway_subnet_name)):
nat_gateway_id, nat_gateway_subnet_id, nat_gateway_subnet_name, vpc_peering_connection_name)):
raise SaltInvocationError('Only one of gateway_id, internet_gateway_name, instance_id, '
'interface_id, vpc_peering_connection_id, nat_gateway_id, '
'nat_gateway_subnet_id or nat_gateway_subnet_name may be provided.')
'nat_gateway_subnet_id, nat_gateway_subnet_name or vpc_peering_connection_name may be provided.')
if destination_cidr_block is None:
raise SaltInvocationError('destination_cidr_block is required.')

View File

@ -28,6 +28,7 @@ import salt.utils
import salt.utils.files
import salt.utils.powershell
import salt.utils.timed_subprocess
import salt.utils.win_dacl
import salt.grains.extra
import salt.ext.six as six
from salt.utils import vt
@ -477,10 +478,18 @@ def _run(cmd,
env_runas = dict((sdecode(k), sdecode(v)) for k, v in six.iteritems(env_runas))
env_runas.update(env)
# Fix platforms like Solaris that don't set a USER env var in the
# user's default environment as obtained above.
if env_runas.get('USER') != runas:
env_runas['USER'] = runas
# Fix some corner cases where shelling out to get the user's
# environment returns the wrong home directory.
runas_home = os.path.expanduser('~{0}'.format(runas))
if env_runas.get('HOME') != runas_home:
env_runas['HOME'] = runas_home
env = env_runas
# Encode unicode kwargs to filesystem encoding to avoid a
# UnicodeEncodeError when the subprocess is invoked.
@ -2071,11 +2080,14 @@ def script(source,
)
kwargs.pop('__env__')
win_cwd = False
if salt.utils.is_windows() and runas and cwd is None:
# Create a temp working directory
cwd = tempfile.mkdtemp(dir=__opts__['cachedir'])
__salt__['win_dacl.add_ace'](
cwd, 'File', runas, 'READ&EXECUTE', 'ALLOW',
'FOLDER&SUBFOLDERS&FILES')
win_cwd = True
salt.utils.win_dacl.set_permissions(obj_name=cwd,
principal=runas,
permissions='full_control')
path = salt.utils.files.mkstemp(dir=cwd, suffix=os.path.splitext(source)[1])
@ -2089,10 +2101,10 @@ def script(source,
saltenv,
**kwargs)
if not fn_:
if salt.utils.is_windows() and runas:
_cleanup_tempfile(path)
# If a temp working directory was created (Windows), let's remove that
if win_cwd:
_cleanup_tempfile(cwd)
else:
_cleanup_tempfile(path)
return {'pid': 0,
'retcode': 1,
'stdout': '',
@ -2101,10 +2113,10 @@ def script(source,
else:
fn_ = __salt__['cp.cache_file'](source, saltenv)
if not fn_:
if salt.utils.is_windows() and runas:
_cleanup_tempfile(path)
# If a temp working directory was created (Windows), let's remove that
if win_cwd:
_cleanup_tempfile(cwd)
else:
_cleanup_tempfile(path)
return {'pid': 0,
'retcode': 1,
'stdout': '',
@ -2134,10 +2146,10 @@ def script(source,
bg=bg,
password=password,
**kwargs)
if salt.utils.is_windows() and runas:
_cleanup_tempfile(path)
# If a temp working directory was created (Windows), let's remove that
if win_cwd:
_cleanup_tempfile(cwd)
else:
_cleanup_tempfile(path)
return ret

View File

@ -453,8 +453,8 @@ def delval(key, destructive=False):
.. versionadded:: 0.17.0
Delete a grain value from the grains config file. This will just set the
grain value to `None`. To completely remove the grain run `grains.delkey`
of pass `destructive=True` to `grains.delval`.
grain value to ``None``. To completely remove the grain, run ``grains.delkey``
or pass ``destructive=True`` to ``grains.delval``.
key
The grain key from which to delete the value.

View File

@ -205,6 +205,14 @@ def build_rule(table='filter', chain=None, command=None, position='', full=None,
To pass in jump options that doesn't take arguments, pass in an empty
string.
.. note::
Whereas iptables will accept ``-p``, ``--proto[c[o[l]]]`` as synonyms
of ``--protocol``, if ``--proto`` appears in an iptables command after
the appearance of ``-m policy``, it is interpreted as the ``--proto``
option of the policy extension (see the iptables-extensions(8) man
page).
CLI Examples:
.. code-block:: bash
@ -235,7 +243,6 @@ def build_rule(table='filter', chain=None, command=None, position='', full=None,
salt '*' iptables.build_rule filter INPUT command=I position=3 \\
full=True match=state state=RELATED,ESTABLISHED jump=ACCEPT \\
family=ipv6
'''
if 'target' in kwargs:
kwargs['jump'] = kwargs.pop('target')
@ -249,7 +256,7 @@ def build_rule(table='filter', chain=None, command=None, position='', full=None,
del kwargs[ignore]
rule = []
proto = False
protocol = False
bang_not_pat = re.compile(r'(!|not)\s?')
def maybe_add_negation(arg):
@ -273,12 +280,15 @@ def build_rule(table='filter', chain=None, command=None, position='', full=None,
rule.append('{0}-o {1}'.format(maybe_add_negation('of'), kwargs['of']))
del kwargs['of']
for proto_arg in ('protocol', 'proto'):
if proto_arg in kwargs:
if not proto:
rule.append('{0}-p {1}'.format(maybe_add_negation(proto_arg), kwargs[proto_arg]))
proto = True
del kwargs[proto_arg]
if 'proto' in kwargs and kwargs.get('match') != 'policy':
kwargs['protocol'] = kwargs['proto']
del kwargs['proto']
# Handle the case 'proto' in kwargs and kwargs.get('match') == 'policy' below
if 'protocol' in kwargs:
if not protocol:
rule.append('{0}-p {1}'.format(maybe_add_negation('protocol'), kwargs['protocol']))
protocol = True
del kwargs['protocol']
if 'match' in kwargs:
match_value = kwargs['match']
@ -289,6 +299,9 @@ def build_rule(table='filter', chain=None, command=None, position='', full=None,
if 'name_' in kwargs and match.strip() in ('pknock', 'quota2', 'recent'):
rule.append('--name {0}'.format(kwargs['name_']))
del kwargs['name_']
if 'proto' in kwargs and kwargs.get('match') == 'policy':
rule.append('{0}--proto {1}'.format(maybe_add_negation('proto'), kwargs['proto']))
del kwargs['proto']
del kwargs['match']
if 'match-set' in kwargs:
@ -322,8 +335,8 @@ def build_rule(table='filter', chain=None, command=None, position='', full=None,
if multiport_arg in kwargs:
if '-m multiport' not in rule:
rule.append('-m multiport')
if not proto:
return 'Error: proto must be specified'
if not protocol:
return 'Error: protocol must be specified'
mp_value = kwargs[multiport_arg]
if isinstance(mp_value, list):
@ -1033,9 +1046,9 @@ def _parse_conf(conf_file=None, in_mem=False, family='ipv4'):
def _parser():
'''
This function contains _all_ the options I could find in man 8 iptables,
listed in the first section that I found them in. They will not all be used
by all parts of the module; use them intelligently and appropriately.
This function attempts to list all the options documented in the
iptables(8) and iptables-extensions(8) man pages. They will not all be
used by all parts of the module; use them intelligently and appropriately.
'''
add_arg = None
if sys.version.startswith('2.6'):

View File

@ -56,9 +56,9 @@ def __virtual__():
'''
Confirm this module is on a nilrt based system
'''
if __grains__.get('os_family', False) == 'NILinuxRT':
if os.path.isdir(OPKG_CONFDIR):
return __virtualname__
return (False, "Module opkg only works on nilrt based systems")
return False, "Module opkg only works on OpenEmbedded based systems"
def latest_version(*names, **kwargs):

View File

@ -77,6 +77,7 @@ of the 2015.5 branch:
from __future__ import absolute_import
# Import python libs
import json
import os
import re
import shutil
@ -114,43 +115,82 @@ def __virtual__():
return 'pip'
def _clear_context(bin_env=None):
'''
Remove the cached pip version
'''
contextkey = 'pip.version'
if bin_env is not None:
contextkey = '{0}.{1}'.format(contextkey, bin_env)
__context__.pop(contextkey, None)
def _get_pip_bin(bin_env):
'''
Locate the pip binary, either from `bin_env` as a virtualenv, as the
executable itself, or from searching conventional filesystem locations
'''
if not bin_env:
which_result = __salt__['cmd.which_bin'](
['pip{0}.{1}'.format(*sys.version_info[:2]),
'pip{0}'.format(sys.version_info[0]),
'pip', 'pip-python']
)
if salt.utils.is_windows() and six.PY2:
which_result.encode('string-escape')
if which_result is None:
raise CommandNotFoundError('Could not find a `pip` binary')
return which_result
logger.debug('pip: Using pip from currently-running Python')
return [os.path.normpath(sys.executable), '-m', 'pip']
# try to get python bin from virtualenv, bin_env
python_bin = 'python.exe' if salt.utils.is_windows() else 'python'
def _search_paths(*basedirs):
ret = []
for path in basedirs:
ret.extend([
os.path.join(path, python_bin),
os.path.join(path, 'bin', python_bin),
os.path.join(path, 'Scripts', python_bin)
])
return ret
# try to get pip bin from virtualenv, bin_env
if os.path.isdir(bin_env):
if salt.utils.is_windows():
if six.PY2:
pip_bin = os.path.join(
bin_env, 'Scripts', 'pip.exe').encode('string-escape')
else:
pip_bin = os.path.join(bin_env, 'Scripts', 'pip.exe')
else:
pip_bin = os.path.join(bin_env, 'bin', 'pip')
if os.path.isfile(pip_bin):
return pip_bin
msg = 'Could not find a `pip` binary in virtualenv {0}'.format(bin_env)
raise CommandNotFoundError(msg)
# bin_env is the pip binary
for bin_path in _search_paths(bin_env):
if os.path.isfile(bin_path):
if os.access(bin_path, os.X_OK):
logger.debug('pip: Found python binary: %s', bin_path)
return [os.path.normpath(bin_path), '-m', 'pip']
else:
logger.debug(
'pip: Found python binary by name but it is not '
'executable: %s', bin_path
)
raise CommandNotFoundError(
'Could not find a pip binary in virtualenv {0}'.format(bin_env)
)
# bin_env is the python or pip binary
elif os.access(bin_env, os.X_OK):
if os.path.isfile(bin_env) or os.path.islink(bin_env):
return bin_env
if os.path.isfile(bin_env):
# If the python binary was passed, return it
if 'python' in os.path.basename(bin_env):
return [os.path.normpath(bin_env), '-m', 'pip']
# Try to find the python binary based on the location of pip in a
# virtual environment, should be relative
if 'pip' in os.path.basename(bin_env):
# Look in the same directory as the pip binary, and also its
# parent directories.
pip_dirname = os.path.dirname(bin_env)
pip_parent_dir = os.path.dirname(pip_dirname)
for bin_path in _search_paths(pip_dirname, pip_parent_dir):
if os.path.isfile(bin_path):
logger.debug('pip: Found python binary: %s', bin_path)
return [os.path.normpath(bin_path), '-m', 'pip']
# Couldn't find python, use the passed pip binary
# This has the limitation of being unable to update pip itself
return [os.path.normpath(bin_env)]
raise CommandExecutionError(
'Could not find a pip binary within {0}'.format(bin_env)
)
else:
raise CommandNotFoundError('Could not find a `pip` binary')
raise CommandNotFoundError(
'Access denied to {0}, could not find a pip binary'.format(bin_env)
)
def _get_cached_requirements(requirements, saltenv):
@ -267,15 +307,21 @@ def _process_requirements(requirements, cmd, cwd, saltenv, user):
treq = tempfile.mkdtemp()
__salt__['file.chown'](treq, user, None)
# In Windows, just being owner of a file isn't enough. You also
# need permissions
if salt.utils.is_windows():
__utils__['win_dacl.set_permissions'](
obj_name=treq,
principal=user,
permissions='read_execute')
current_directory = None
if not current_directory:
current_directory = os.path.abspath(os.curdir)
logger.info('_process_requirements from directory,' +
'%s -- requirement: %s', cwd, requirement
)
logger.info('_process_requirements from directory, '
'%s -- requirement: %s', cwd, requirement)
if cwd is None:
r = requirement
@ -378,7 +424,6 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
global_options=None,
install_options=None,
user=None,
no_chown=False,
cwd=None,
pre_releases=False,
cert=None,
@ -391,7 +436,9 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
use_vt=False,
trusted_host=None,
no_cache_dir=False,
cache_dir=None):
cache_dir=None,
no_binary=None,
**kwargs):
'''
Install packages with pip
@ -405,19 +452,21 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
Path to requirements
bin_env
Path to pip bin or path to virtualenv. If doing a system install,
and want to use a specific pip bin (pip-2.7, pip-2.6, etc..) just
specify the pip bin you want.
.. note::
If installing into a virtualenv, just use the path to the
virtualenv (e.g. ``/home/code/path/to/virtualenv/``)
Path to pip (or to a virtualenv). This can be used to specify the path
to the pip to use when more than one Python release is installed (e.g.
``/usr/bin/pip-2.7`` or ``/usr/bin/pip-2.6``. If a directory path is
specified, it is assumed to be a virtualenv.
use_wheel
Prefer wheel archives (requires pip>=1.4)
no_use_wheel
Force to not use wheel archives (requires pip>=1.4)
Force to not use wheel archives (requires pip>=1.4,<10.0.0)
no_binary
Force to not use binary packages (requires pip >= 7.0.0)
Accepts either :all: to disable all binary packages, :none: to empty the set,
or one or more package names with commas between them
log
Log file where a complete (maximum verbosity) record will be kept
@ -508,12 +557,8 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
user
The user under which to run pip
no_chown
When user is given, do not attempt to copy and chown a requirements
file
cwd
Current working directory to run pip from
Directory from which to run pip
pre_releases
Include pre-releases in the available versions
@ -572,9 +617,14 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
editable=git+https://github.com/worldcompany/djangoembed.git#egg=djangoembed upgrade=True no_deps=True
'''
pip_bin = _get_pip_bin(bin_env)
cmd = [pip_bin, 'install']
if 'no_chown' in kwargs:
salt.utils.warn_until(
'Flourine',
'The no_chown argument has been deprecated and is no longer used. '
'Its functionality was removed in Boron.')
kwargs.pop('no_chown')
cmd = _get_pip_bin(bin_env)
cmd.append('install')
cleanup_requirements, error = _process_requirements(
requirements=requirements,
@ -587,31 +637,49 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
if error:
return error
cur_version = version(bin_env)
if use_wheel:
min_version = '1.4'
cur_version = __salt__['pip.version'](bin_env)
if not salt.utils.compare_versions(ver1=cur_version, oper='>=',
ver2=min_version):
max_version = '9.0.3'
too_low = salt.utils.compare_versions(ver1=cur_version, oper='<', ver2=min_version)
too_high = salt.utils.compare_versions(ver1=cur_version, oper='>', ver2=max_version)
if too_low or too_high:
logger.error(
('The --use-wheel option is only supported in pip {0} and '
'newer. The version of pip detected is {1}. This option '
'will be ignored.'.format(min_version, cur_version))
('The --use-wheel option is only supported in pip between {0} and '
'{1}. The version of pip detected is {2}. This option '
'will be ignored.'.format(min_version, max_version, cur_version))
)
else:
cmd.append('--use-wheel')
if no_use_wheel:
min_version = '1.4'
cur_version = __salt__['pip.version'](bin_env)
if not salt.utils.compare_versions(ver1=cur_version, oper='>=',
ver2=min_version):
max_version = '9.0.3'
too_low = salt.utils.compare_versions(ver1=cur_version, oper='<', ver2=min_version)
too_high = salt.utils.compare_versions(ver1=cur_version, oper='>', ver2=max_version)
if too_low or too_high:
logger.error(
('The --no-use-wheel option is only supported in pip {0} and '
('The --no-use-wheel option is only supported in pip between {0} and '
'{1}. The version of pip detected is {2}. This option '
'will be ignored.'.format(min_version, max_version, cur_version))
)
else:
cmd.append('--no-use-wheel')
if no_binary:
min_version = '7.0.0'
too_low = salt.utils.compare_versions(ver1=cur_version, oper='<', ver2=min_version)
if too_low:
logger.error(
('The --no-binary option is only supported in pip {0} and '
'newer. The version of pip detected is {1}. This option '
'will be ignored.'.format(min_version, cur_version))
)
else:
cmd.append('--no-use-wheel')
if isinstance(no_binary, list):
no_binary = ','.join(no_binary)
cmd.extend(['--no-binary', no_binary])
if log:
if os.path.isdir(log):
@ -675,8 +743,7 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
if mirrors:
# https://github.com/pypa/pip/pull/2641/files#diff-3ef137fb9ffdd400f117a565cd94c188L216
pip_version = version(pip_bin)
if salt.utils.compare_versions(ver1=pip_version, oper='>=', ver2='7.0.0'):
if salt.utils.compare_versions(ver1=cur_version, oper='>=', ver2='7.0.0'):
raise CommandExecutionError(
'pip >= 7.0.0 does not support mirror argument:'
' use index_url and/or extra_index_url instead'
@ -704,7 +771,7 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
if download_cache or cache_dir:
cmd.extend(['--cache-dir' if salt.utils.compare_versions(
ver1=version(bin_env), oper='>=', ver2='6.0'
ver1=cur_version, oper='>=', ver2='6.0'
) else '--download-cache', download_cache or cache_dir])
if source:
@ -741,7 +808,7 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
if pre_releases:
# Check the locally installed pip version
pip_version = version(pip_bin)
pip_version = cur_version
# From pip v1.4 the --pre flag is available
if salt.utils.compare_versions(ver1=pip_version, oper='>=', ver2='1.4'):
@ -773,6 +840,11 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
# Put the commas back in while making sure the names are contained in
# quotes, this allows for proper version spec passing salt>=0.17.0
cmd.extend(['{0}'.format(p.replace(';', ',')) for p in pkgs])
elif not any([requirements, editable]):
# Starting with pip 10.0.0, if no packages are specified in the
# command, it returns a retcode 1. So instead of running the command,
# just return the output without running pip.
return {'retcode': 0, 'stdout': 'No packages to install.'}
if editable:
egg_match = re.compile(r'(?:#|#.*?&)egg=([^&]*)')
@ -817,6 +889,9 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
cmd_kwargs = dict(saltenv=saltenv, use_vt=use_vt, runas=user)
if kwargs:
cmd_kwargs.update(kwargs)
if env_vars:
cmd_kwargs.setdefault('env', {}).update(_format_env_vars(env_vars))
@ -836,6 +911,7 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
python_shell=False,
**cmd_kwargs)
finally:
_clear_context(bin_env)
for tempdir in [cr for cr in cleanup_requirements if cr is not None]:
if os.path.isdir(tempdir):
shutil.rmtree(tempdir)
@ -848,46 +924,42 @@ def uninstall(pkgs=None,
proxy=None,
timeout=None,
user=None,
no_chown=False,
cwd=None,
saltenv='base',
use_vt=False):
'''
Uninstall packages with pip
Uninstall packages individually or from a pip requirements file. Uninstall
packages globally or from a virtualenv.
Uninstall packages individually or from a pip requirements file
pkgs
comma separated list of packages to install
requirements
path to requirements.
Path to requirements file
bin_env
path to pip bin or path to virtualenv. If doing an uninstall from
the system python and want to use a specific pip bin (pip-2.7,
pip-2.6, etc..) just specify the pip bin you want.
If uninstalling from a virtualenv, just use the path to the virtualenv
(/home/code/path/to/virtualenv/)
Path to pip (or to a virtualenv). This can be used to specify the path
to the pip to use when more than one Python release is installed (e.g.
``/usr/bin/pip-2.7`` or ``/usr/bin/pip-2.6``. If a directory path is
specified, it is assumed to be a virtualenv.
log
Log file where a complete (maximum verbosity) record will be kept
proxy
Specify a proxy in the form
user:passwd@proxy.server:port. Note that the
user:password@ is optional and required only if you
are behind an authenticated proxy. If you provide
user@proxy.server:port then you will be prompted for a
password.
Specify a proxy in the format ``user:passwd@proxy.server:port``. Note
that the ``user:password@`` is optional and required only if you are
behind an authenticated proxy. If you provide
``user@proxy.server:port`` then you will be prompted for a password.
timeout
Set the socket timeout (default 15 seconds)
user
The user under which to run pip
no_chown
When user is given, do not attempt to copy and chown
a requirements file (needed if the requirements file refers to other
files via relative paths, as the copy-and-chown procedure does not
account for such files)
cwd
Current working directory to run pip from
Directory from which to run pip
use_vt
Use VT terminal emulation (see output while installing)
@ -899,11 +971,9 @@ def uninstall(pkgs=None,
salt '*' pip.uninstall requirements=/path/to/requirements.txt
salt '*' pip.uninstall <package name> bin_env=/path/to/virtualenv
salt '*' pip.uninstall <package name> bin_env=/path/to/pip_bin
'''
pip_bin = _get_pip_bin(bin_env)
cmd = [pip_bin, 'uninstall', '-y']
cmd = _get_pip_bin(bin_env)
cmd.extend(['uninstall', '-y'])
cleanup_requirements, error = _process_requirements(
requirements=requirements, cmd=cmd, saltenv=saltenv, user=user,
@ -962,6 +1032,7 @@ def uninstall(pkgs=None,
try:
return __salt__['cmd.run_all'](cmd, **cmd_kwargs)
finally:
_clear_context(bin_env)
for requirement in cleanup_requirements:
if requirement:
try:
@ -974,48 +1045,42 @@ def freeze(bin_env=None,
user=None,
cwd=None,
use_vt=False,
env_vars=None):
env_vars=None,
**kwargs):
'''
Return a list of installed packages either globally or in the specified
virtualenv
bin_env
path to pip bin or path to virtualenv. If doing an uninstall from
the system python and want to use a specific pip bin (pip-2.7,
pip-2.6, etc..) just specify the pip bin you want.
If uninstalling from a virtualenv, just use the path to the virtualenv
(/home/code/path/to/virtualenv/)
Path to pip (or to a virtualenv). This can be used to specify the path
to the pip to use when more than one Python release is installed (e.g.
``/usr/bin/pip-2.7`` or ``/usr/bin/pip-2.6``. If a directory path is
specified, it is assumed to be a virtualenv.
user
The user under which to run pip
cwd
Current working directory to run pip from
Directory from which to run pip
.. note::
If the version of pip available is older than 8.0.3, the list will not
include the packages pip, wheel, setuptools, or distribute even if they
are installed.
include the packages ``pip``, ``wheel``, ``setuptools``, or
``distribute`` even if they are installed.
CLI Example:
.. code-block:: bash
salt '*' pip.freeze /home/code/path/to/virtualenv/
.. versionchanged:: 2016.11.2
The packages pip, wheel, setuptools, and distribute are included if the
installed pip is new enough.
salt '*' pip.freeze bin_env=/home/code/path/to/virtualenv
'''
pip_bin = _get_pip_bin(bin_env)
cmd = [pip_bin, 'freeze']
cmd = _get_pip_bin(bin_env)
cmd.append('freeze')
# Include pip, setuptools, distribute, wheel
min_version = '8.0.3'
cur_version = version(bin_env)
if not salt.utils.compare_versions(ver1=cur_version, oper='>=',
ver2=min_version):
if salt.utils.compare_versions(ver1=cur_version, oper='<', ver2=min_version):
logger.warning(
('The version of pip installed is {0}, which is older than {1}. '
'The packages pip, wheel, setuptools, and distribute will not be '
@ -1025,14 +1090,16 @@ def freeze(bin_env=None,
cmd.append('--all')
cmd_kwargs = dict(runas=user, cwd=cwd, use_vt=use_vt, python_shell=False)
if kwargs:
cmd_kwargs.update(**kwargs)
if bin_env and os.path.isdir(bin_env):
cmd_kwargs['env'] = {'VIRTUAL_ENV': bin_env}
if env_vars:
cmd_kwargs.setdefault('env', {}).update(_format_env_vars(env_vars))
result = __salt__['cmd.run_all'](cmd, **cmd_kwargs)
if result['retcode'] > 0:
raise CommandExecutionError(result['stderr'])
if result['retcode']:
raise CommandExecutionError(result['stderr'], info=result)
return result['stdout'].splitlines()
@ -1041,7 +1108,8 @@ def list_(prefix=None,
bin_env=None,
user=None,
cwd=None,
env_vars=None):
env_vars=None,
**kwargs):
'''
Filter list of installed apps from ``freeze`` and check to see if
``prefix`` exists in the list of packages installed.
@ -1049,28 +1117,27 @@ def list_(prefix=None,
.. note::
If the version of pip available is older than 8.0.3, the packages
wheel, setuptools, and distribute will not be reported by this function
even if they are installed. Unlike
:py:func:`pip.freeze <salt.modules.pip.freeze>`, this function always
reports the version of pip which is installed.
``wheel``, ``setuptools``, and ``distribute`` will not be reported by
this function even if they are installed. Unlike :py:func:`pip.freeze
<salt.modules.pip.freeze>`, this function always reports the version of
pip which is installed.
CLI Example:
.. code-block:: bash
salt '*' pip.list salt
.. versionchanged:: 2016.11.2
The packages wheel, setuptools, and distribute are included if the
installed pip is new enough.
'''
packages = {}
if prefix is None or 'pip'.startswith(prefix):
packages['pip'] = version(bin_env)
for line in freeze(bin_env=bin_env, user=user, cwd=cwd, env_vars=env_vars):
for line in freeze(bin_env=bin_env,
user=user,
cwd=cwd,
env_vars=env_vars,
**kwargs):
if line.startswith('-f') or line.startswith('#'):
# ignore -f line as it contains --find-links directory
# ignore comment lines
@ -1080,7 +1147,15 @@ def list_(prefix=None,
continue
elif line.startswith('-e'):
line = line.split('-e ')[1]
version_, name = line.split('#egg=')
if '#egg=' in line:
version_, name = line.split('#egg=')
else:
if len(line.split('===')) >= 2:
name = line.split('===')[0]
version_ = line.split('===')[1]
elif len(line.split('==')) >= 2:
name = line.split('==')[0]
version_ = line.split('==')[1]
elif len(line.split('===')) >= 2:
name = line.split('===')[0]
version_ = line.split('===')[1]
@ -1115,14 +1190,27 @@ def version(bin_env=None):
salt '*' pip.version
'''
pip_bin = _get_pip_bin(bin_env)
contextkey = 'pip.version'
if bin_env is not None:
contextkey = '{0}.{1}'.format(contextkey, bin_env)
if contextkey in __context__:
return __context__[contextkey]
cmd = _get_pip_bin(bin_env)[:]
cmd.append('--version')
ret = __salt__['cmd.run_all'](cmd, python_shell=False)
if ret['retcode']:
raise CommandNotFoundError('Could not find a `pip` binary')
output = __salt__['cmd.run'](
'{0} --version'.format(pip_bin), python_shell=False)
try:
return re.match(r'^pip (\S+)', output).group(1)
pip_version = re.match(r'^pip (\S+)', ret['stdout']).group(1)
except AttributeError:
return None
pip_version = None
__context__[contextkey] = pip_version
return pip_version
def list_upgrades(bin_env=None,
@ -1137,28 +1225,66 @@ def list_upgrades(bin_env=None,
salt '*' pip.list_upgrades
'''
pip_bin = _get_pip_bin(bin_env)
cmd = _get_pip_bin(bin_env)
cmd.extend(['list', '--outdated'])
cmd = [pip_bin, 'list', '--outdated']
pip_version = version(bin_env)
# Pip started supporting the ability to output json starting with 9.0.0
if salt.utils.compare_versions(ver1=pip_version, oper='>=', ver2='9.0.0'):
cmd.extend(['--format', 'json'])
cmd_kwargs = dict(cwd=cwd, runas=user)
if bin_env and os.path.isdir(bin_env):
cmd_kwargs['env'] = {'VIRTUAL_ENV': bin_env}
result = __salt__['cmd.run_all'](cmd, **cmd_kwargs)
if result['retcode'] > 0:
logger.error(result['stderr'])
raise CommandExecutionError(result['stderr'])
if result['retcode']:
raise CommandExecutionError(result['stderr'], info=result)
packages = {}
for line in result['stdout'].splitlines():
match = re.search(r'(\S*)\s+\(.*Latest:\s+(.*)\)', line)
if match:
name, version_ = match.groups()
# Pip started supporting the ability to output json starting with 9.0.0
# Older versions will have to parse stdout
if salt.utils.compare_versions(ver1=pip_version, oper='<', ver2='9.0.0'):
# Pip versions < 8.0.0 had a different output format
# Sample data:
# pip (Current: 7.1.2 Latest: 10.0.1 [wheel])
# psutil (Current: 5.2.2 Latest: 5.4.5 [wheel])
# pyasn1 (Current: 0.2.3 Latest: 0.4.2 [wheel])
# pycparser (Current: 2.17 Latest: 2.18 [sdist])
if salt.utils.compare_versions(ver1=pip_version, oper='<', ver2='8.0.0'):
logger.debug('pip module: Old output format')
pat = re.compile(r'(\S*)\s+\(.*Latest:\s+(.*)\)')
# New output format for version 8.0.0+
# Sample data:
# pip (8.0.0) - Latest: 10.0.1 [wheel]
# psutil (5.2.2) - Latest: 5.4.5 [wheel]
# pyasn1 (0.2.3) - Latest: 0.4.2 [wheel]
# pycparser (2.17) - Latest: 2.18 [sdist]
else:
logger.error('Can\'t parse line \'{0}\''.format(line))
continue
packages[name] = version_
logger.debug('pip module: New output format')
pat = re.compile(r'(\S*)\s+\(.*\)\s+-\s+Latest:\s+(.*)')
for line in result['stdout'].splitlines():
match = pat.search(line)
if match:
name, version_ = match.groups()
else:
logger.error('Can\'t parse line \'{0}\''.format(line))
continue
packages[name] = version_
else:
logger.debug('pip module: JSON output format')
try:
pkgs = json.loads(result['stdout'], strict=False)
except ValueError:
raise CommandExecutionError('Invalid JSON', info=result)
for pkg in pkgs:
packages[pkg['name']] = '{0} [{1}]'.format(pkg['latest_version'],
pkg['latest_filetype'])
return packages
@ -1187,7 +1313,11 @@ def upgrade(bin_env=None,
'''
.. versionadded:: 2015.5.0
Upgrades outdated pip packages
Upgrades outdated pip packages.
.. note::
On Windows you can't update salt from pip using salt, so salt will be
skipped
Returns a dict containing the changes.
@ -1205,16 +1335,19 @@ def upgrade(bin_env=None,
'result': True,
'comment': '',
}
pip_bin = _get_pip_bin(bin_env)
cmd = _get_pip_bin(bin_env)
cmd.extend(['install', '-U'])
old = list_(bin_env=bin_env, user=user, cwd=cwd)
cmd = [pip_bin, 'install', '-U']
cmd_kwargs = dict(cwd=cwd, use_vt=use_vt)
if bin_env and os.path.isdir(bin_env):
cmd_kwargs['env'] = {'VIRTUAL_ENV': bin_env}
errors = False
for pkg in list_upgrades(bin_env=bin_env, user=user, cwd=cwd):
if pkg == 'salt':
if salt.utils.is_windows():
continue
result = __salt__['cmd.run_all'](cmd + [pkg], **cmd_kwargs)
if result['retcode'] != 0:
errors = True
@ -1223,6 +1356,7 @@ def upgrade(bin_env=None,
if errors:
ret['result'] = False
_clear_context(bin_env)
new = list_(bin_env=bin_env, user=user, cwd=cwd)
ret['changes'] = salt.utils.compare_dicts(old, new)
@ -1246,9 +1380,10 @@ def list_all_versions(pkg,
The package to check
bin_env
Path to pip bin or path to virtualenv. If doing a system install,
and want to use a specific pip bin (pip-2.7, pip-2.6, etc..) just
specify the pip bin you want.
Path to pip (or to a virtualenv). This can be used to specify the path
to the pip to use when more than one Python release is installed (e.g.
``/usr/bin/pip-2.7`` or ``/usr/bin/pip-2.6``. If a directory path is
specified, it is assumed to be a virtualenv.
include_alpha
Include alpha versions in the list
@ -1263,7 +1398,7 @@ def list_all_versions(pkg,
The user under which to run pip
cwd
Current working directory to run pip from
Directory from which to run pip
CLI Example:
@ -1271,9 +1406,8 @@ def list_all_versions(pkg,
salt '*' pip.list_all_versions <package name>
'''
pip_bin = _get_pip_bin(bin_env)
cmd = [pip_bin, 'install', '{0}==versions'.format(pkg)]
cmd = _get_pip_bin(bin_env)
cmd.extend(['install', '{0}==versions'.format(pkg)])
cmd_kwargs = dict(cwd=cwd, runas=user, output_loglevel='quiet', redirect_stderr=True)
if bin_env and os.path.isdir(bin_env):

View File

@ -48,6 +48,10 @@ def __virtual__():
Only work on select distros which still use Red Hat's /usr/bin/service for
management of either sysvinit or a hybrid sysvinit/upstart init system.
'''
# Disable when booted with systemd
if __utils__['systemd.booted'](__context__):
return (False, 'The rh_service execution module failed to load: this system was booted with systemd.')
# Enable on these platforms only.
enable = set((
'XenServer',
@ -97,15 +101,6 @@ def __virtual__():
'RedHat-based distros >= version 7 use systemd, will not '
'load rh_service.py as virtual \'service\''
)
if __grains__['os'] == 'Amazon':
if int(osrelease_major) in (2016, 2017):
return __virtualname__
else:
return (
False,
'Amazon Linux >= version 2 uses systemd. Will not '
'load rh_service.py as virtual \'service\''
)
return __virtualname__
return (False, 'Cannot load rh_service module: OS not in {0}'.format(enable))

View File

@ -280,7 +280,7 @@ def _get_opts(**kwargs):
def _get_initial_pillar(opts):
return __pillar__ if __opts__['__cli'] == 'salt-call' \
return __pillar__ if __opts__.get('__cli', None) == 'salt-call' \
and opts['pillarenv'] == __opts__['pillarenv'] \
else None

View File

@ -939,7 +939,7 @@ def diskusage(*args):
elif __grains__['kernel'] in ('FreeBSD', 'SunOS'):
ifile = __salt__['cmd.run']('mount -p').splitlines()
else:
ifile = []
raise CommandExecutionError('status.diskusage not yet supported on this platform')
for line in ifile:
comps = line.split()

View File

@ -56,7 +56,8 @@ def create(path,
upgrade=None,
user=None,
use_vt=False,
saltenv='base'):
saltenv='base',
**kwargs):
'''
Create a virtualenv
@ -102,6 +103,11 @@ def create(path,
user : None
Set ownership for the virtualenv
.. note::
On Windows you must also pass a ``password`` parameter. Additionally,
the user must have permissions to the location where the virtual
environment is being created
runas : None
Set ownership for the virtualenv
@ -161,7 +167,7 @@ def create(path,
# Unable to import?? Let's parse the version from the console
version_cmd = [venv_bin, '--version']
ret = __salt__['cmd.run_all'](
version_cmd, runas=user, python_shell=False
version_cmd, runas=user, python_shell=False, **kwargs
)
if ret['retcode'] > 0 or not ret['stdout'].strip():
raise CommandExecutionError(
@ -251,7 +257,7 @@ def create(path,
cmd.append(path)
# Let's create the virtualenv
ret = __salt__['cmd.run_all'](cmd, runas=user, python_shell=False)
ret = __salt__['cmd.run_all'](cmd, runas=user, python_shell=False, **kwargs)
if ret['retcode'] != 0:
# Something went wrong. Let's bail out now!
return ret

View File

@ -718,23 +718,24 @@ def set_lcm_config(config_mode=None,
'ApplyAndAutoCorrect'):
error = 'config_mode must be one of ApplyOnly, ApplyAndMonitor, ' \
'or ApplyAndAutoCorrect. Passed {0}'.format(config_mode)
SaltInvocationError(error)
return error
raise SaltInvocationError(error)
cmd += ' ConfigurationMode = "{0}";'.format(config_mode)
if config_mode_freq:
if not isinstance(config_mode_freq, int):
SaltInvocationError('config_mode_freq must be an integer')
return 'config_mode_freq must be an integer. Passed {0}'.\
format(config_mode_freq)
error = 'config_mode_freq must be an integer. Passed {0}'.format(
config_mode_freq
)
raise SaltInvocationError(error)
cmd += ' ConfigurationModeFrequencyMins = {0};'.format(config_mode_freq)
if refresh_mode:
if refresh_mode not in ('Disabled', 'Push', 'Pull'):
SaltInvocationError('refresh_mode must be one of Disabled, Push, '
'or Pull')
raise SaltInvocationError(
'refresh_mode must be one of Disabled, Push, or Pull'
)
cmd += ' RefreshMode = "{0}";'.format(refresh_mode)
if refresh_freq:
if not isinstance(refresh_freq, int):
SaltInvocationError('refresh_freq must be an integer')
raise SaltInvocationError('refresh_freq must be an integer')
cmd += ' RefreshFrequencyMins = {0};'.format(refresh_freq)
if reboot_if_needed is not None:
if not isinstance(reboot_if_needed, bool):
@ -747,8 +748,10 @@ def set_lcm_config(config_mode=None,
if action_after_reboot:
if action_after_reboot not in ('ContinueConfiguration',
'StopConfiguration'):
SaltInvocationError('action_after_reboot must be one of '
'ContinueConfiguration or StopConfiguration')
raise SaltInvocationError(
'action_after_reboot must be one of '
'ContinueConfiguration or StopConfiguration'
)
cmd += ' ActionAfterReboot = "{0}"'.format(action_after_reboot)
if certificate_id is not None:
if certificate_id == '':
@ -760,7 +763,7 @@ def set_lcm_config(config_mode=None,
cmd += ' ConfigurationID = "{0}";'.format(configuration_id)
if allow_module_overwrite is not None:
if not isinstance(allow_module_overwrite, bool):
SaltInvocationError('allow_module_overwrite must be a boolean value')
raise SaltInvocationError('allow_module_overwrite must be a boolean value')
if allow_module_overwrite:
allow_module_overwrite = '$true'
else:
@ -770,13 +773,14 @@ def set_lcm_config(config_mode=None,
if debug_mode is None:
debug_mode = 'None'
if debug_mode not in ('None', 'ForceModuleImport', 'All'):
SaltInvocationError('debug_mode must be one of None, '
'ForceModuleImport, ResourceScriptBreakAll, or '
'All')
raise SaltInvocationError(
'debug_mode must be one of None, ForceModuleImport, '
'ResourceScriptBreakAll, or All'
)
cmd += ' DebugMode = "{0}";'.format(debug_mode)
if status_retention_days:
if not isinstance(status_retention_days, int):
SaltInvocationError('status_retention_days must be an integer')
raise SaltInvocationError('status_retention_days must be an integer')
cmd += ' StatusRetentionTimeInDays = {0};'.format(status_retention_days)
cmd += ' }}};'
cmd += r'SaltConfig -OutputPath "{0}\SaltConfig"'.format(temp_dir)

View File

@ -87,7 +87,7 @@ def _get_username(member):
str: The username converted to domain\\username format
'''
return member.ADSPath.replace('WinNT://', '').replace(
'/', '\\').encode('ascii', 'backslashreplace')
'/', '\\')
def add(name, **kwargs):

View File

@ -3679,7 +3679,7 @@ def _checkAllAdmxPolicies(policy_class,
if ENABLED_VALUE_XPATH(admx_policy) and this_policy_setting == 'Not Configured':
# some policies have a disabled list but not an enabled list
# added this to address those issues
if DISABLED_LIST_XPATH(admx_policy):
if DISABLED_LIST_XPATH(admx_policy) or DISABLED_VALUE_XPATH(admx_policy):
element_only_enabled_disabled = False
explicit_enable_disable_value_setting = True
if _checkValueItemParent(admx_policy,
@ -3689,14 +3689,14 @@ def _checkAllAdmxPolicies(policy_class,
ENABLED_VALUE_XPATH,
policy_filedata):
this_policy_setting = 'Enabled'
log.debug('{0} is enabled'.format(this_policyname))
log.debug('{0} is enabled by detected ENABLED_VALUE_XPATH'.format(this_policyname))
if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][this_policyname] = this_policy_setting
if DISABLED_VALUE_XPATH(admx_policy) and this_policy_setting == 'Not Configured':
# some policies have a disabled list but not an enabled list
# added this to address those issues
if ENABLED_LIST_XPATH(admx_policy):
if ENABLED_LIST_XPATH(admx_policy) or ENABLED_VALUE_XPATH(admx_policy):
element_only_enabled_disabled = False
explicit_enable_disable_value_setting = True
if _checkValueItemParent(admx_policy,
@ -3706,25 +3706,27 @@ def _checkAllAdmxPolicies(policy_class,
DISABLED_VALUE_XPATH,
policy_filedata):
this_policy_setting = 'Disabled'
log.debug('{0} is disabled'.format(this_policyname))
log.debug('{0} is disabled by detected DISABLED_VALUE_XPATH'.format(this_policyname))
if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][this_policyname] = this_policy_setting
if ENABLED_LIST_XPATH(admx_policy) and this_policy_setting == 'Not Configured':
element_only_enabled_disabled = False
explicit_enable_disable_value_setting = True
if DISABLED_LIST_XPATH(admx_policy) or DISABLED_VALUE_XPATH(admx_policy):
element_only_enabled_disabled = False
explicit_enable_disable_value_setting = True
if _checkListItem(admx_policy, this_policyname, this_key, ENABLED_LIST_XPATH, policy_filedata):
this_policy_setting = 'Enabled'
log.debug('{0} is enabled'.format(this_policyname))
log.debug('{0} is enabled by detected ENABLED_LIST_XPATH'.format(this_policyname))
if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][this_policyname] = this_policy_setting
if DISABLED_LIST_XPATH(admx_policy) and this_policy_setting == 'Not Configured':
element_only_enabled_disabled = False
explicit_enable_disable_value_setting = True
if ENABLED_LIST_XPATH(admx_policy) or ENABLED_VALUE_XPATH(admx_policy):
element_only_enabled_disabled = False
explicit_enable_disable_value_setting = True
if _checkListItem(admx_policy, this_policyname, this_key, DISABLED_LIST_XPATH, policy_filedata):
this_policy_setting = 'Disabled'
log.debug('{0} is disabled'.format(this_policyname))
log.debug('{0} is disabled by detected DISABLED_LIST_XPATH'.format(this_policyname))
if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][this_policyname] = this_policy_setting
@ -3739,7 +3741,7 @@ def _checkAllAdmxPolicies(policy_class,
'1')),
policy_filedata):
this_policy_setting = 'Enabled'
log.debug('{0} is enabled'.format(this_policyname))
log.debug('{0} is enabled by no explicit enable/disable list or value'.format(this_policyname))
if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][this_policyname] = this_policy_setting
@ -3750,7 +3752,7 @@ def _checkAllAdmxPolicies(policy_class,
check_deleted=True)),
policy_filedata):
this_policy_setting = 'Disabled'
log.debug('{0} is disabled'.format(this_policyname))
log.debug('{0} is disabled by no explicit enable/disable list or value'.format(this_policyname))
if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][this_policyname] = this_policy_setting

View File

@ -128,6 +128,6 @@ def start():
raise SystemExit(1)
try:
tornado.ioloop.IOLoop.instance().start()
tornado.ioloop.IOLoop.current().start()
except KeyboardInterrupt:
raise SystemExit(0)

View File

@ -205,14 +205,17 @@ import tornado.ioloop
import tornado.web
import tornado.gen
from tornado.concurrent import Future
from zmq.eventloop import ioloop
import salt.ext.six as six
import tornado
# pylint: enable=import-error
TORNADO_50 = tornado.version_info >= (5,)
# instantiate the zmq IOLoop (specialized poller)
ioloop.install()
if not TORNADO_50:
import zmq.eventloop.ioloop
# instantiate the zmq IOLoop (specialized poller)
zmq.eventloop.ioloop.install()
# salt imports
import salt.ext.six as six
import salt.netapi
import salt.utils
import salt.utils.event
@ -289,7 +292,7 @@ class EventListener(object):
self.event.set_event_handler(self._handle_event_socket_recv)
def clean_timeout_futures(self, request):
def clean_by_request(self, request):
'''
Remove all futures that were waiting for request `request` since it is done waiting
'''
@ -426,6 +429,9 @@ class BaseSaltAPIHandler(tornado.web.RequestHandler): # pylint: disable=W0223
'runner_async': None, # empty, since we use the same client as `runner`
}
if not hasattr(self, 'ckminions'):
self.ckminions = salt.utils.minions.CkMinions(self.application.opts)
@property
def token(self):
'''
@ -481,7 +487,7 @@ class BaseSaltAPIHandler(tornado.web.RequestHandler): # pylint: disable=W0223
timeout a session
'''
# TODO: set a header or something??? so we know it was a timeout
self.application.event_listener.clean_timeout_futures(self)
self.application.event_listener.clean_by_request(self)
def on_finish(self):
'''
@ -918,7 +924,8 @@ class SaltAPIHandler(BaseSaltAPIHandler): # pylint: disable=W0223
chunk['jid'] = salt.utils.jid.gen_jid()
# Subscribe returns from minions before firing a job
future_minion_map = self.subscribe_minion_returns(chunk['jid'], chunk['tgt'])
minions = set(self.ckminions.check_minions(chunk['tgt'], chunk.get('tgt_type', 'glob')))
future_minion_map = self.subscribe_minion_returns(chunk['jid'], minions)
f_call = self._format_call_run_job_async(chunk)
# fire a job off
@ -937,9 +944,9 @@ class SaltAPIHandler(BaseSaltAPIHandler): # pylint: disable=W0223
pass
raise tornado.gen.Return('No minions matched the target. No command was sent, no jid was assigned.')
syndic_min_wait = None
# wait syndic a while to avoid missing published events
if self.application.opts['order_masters']:
syndic_min_wait = tornado.gen.sleep(self.application.opts['syndic_wait'])
yield tornado.gen.sleep(self.application.opts['syndic_wait'])
# To ensure job_not_running and all_return are terminated by each other, communicate using a future
is_finished = Future()
@ -949,10 +956,6 @@ class SaltAPIHandler(BaseSaltAPIHandler): # pylint: disable=W0223
f_call['kwargs']['tgt_type'],
is_finished)
# if we have a min_wait, do that
if syndic_min_wait is not None:
yield syndic_min_wait
minion_returns_future = self.sanitize_minion_returns(future_minion_map, pub_data['minions'], is_finished)
yield job_not_running_future
@ -965,8 +968,7 @@ class SaltAPIHandler(BaseSaltAPIHandler): # pylint: disable=W0223
tag = tagify([jid, 'ret', minion], 'job')
minion_future = self.application.event_listener.get_event(self,
tag=tag,
matcher=EventListener.exact_matcher,
timeout=self.application.opts['timeout'])
matcher=EventListener.exact_matcher)
future_minion_map[minion_future] = minion
return future_minion_map
@ -990,7 +992,7 @@ class SaltAPIHandler(BaseSaltAPIHandler): # pylint: disable=W0223
chunk_ret = {}
while True:
f = yield Any(future_minion_map.keys() + [is_finished])
f = yield Any(list(future_minion_map.keys()) + [is_finished])
try:
# When finished entire routine, cleanup other futures and return result
if f is is_finished:
@ -1032,8 +1034,7 @@ class SaltAPIHandler(BaseSaltAPIHandler): # pylint: disable=W0223
try:
event = self.application.event_listener.get_event(self,
tag=ping_tag,
timeout=self.application.opts['gather_job_timeout'],
)
timeout=self.application.opts['gather_job_timeout'])
f = yield Any([event, is_finished])
# When finished entire routine, cleanup other futures and return result
if f is is_finished:

View File

@ -50,66 +50,76 @@ possible to reference grains within the configuration.
to trick the master into returning secret data.
Use only the 'id' grain which is verified through the minion's key/cert.
Map Mode
--------
The ``it-admins`` configuration below returns the Pillar ``it-admins`` by:
- filtering for:
- members of the group ``it-admins``
- objects with ``objectclass=user``
- returning the data of users (``mode: map``), where each user is a dictionary
containing the configured string or list attributes.
- members of the group ``it-admins``
- objects with ``objectclass=user``
- returning the data of users, where each user is a dictionary containing the
configured string or list attributes.
**Configuration:**
Configuration
*************
.. code-block:: yaml
salt-users:
server: ldap.company.tld
port: 389
tls: true
dn: 'dc=company,dc=tld'
binddn: 'cn=salt-pillars,ou=users,dc=company,dc=tld'
bindpw: bi7ieBai5Ano
referrals: false
anonymous: false
mode: map
dn: 'ou=users,dc=company,dc=tld'
filter: '(&(memberof=cn=it-admins,ou=groups,dc=company,dc=tld)(objectclass=user))'
attrs:
- cn
- displayName
- givenName
- sn
lists:
- memberOf
server: ldap.company.tld
port: 389
tls: true
dn: 'dc=company,dc=tld'
binddn: 'cn=salt-pillars,ou=users,dc=company,dc=tld'
bindpw: bi7ieBai5Ano
referrals: false
anonymous: false
mode: map
dn: 'ou=users,dc=company,dc=tld'
filter: '(&(memberof=cn=it-admins,ou=groups,dc=company,dc=tld)(objectclass=user))'
attrs:
- cn
- displayName
- givenName
- sn
lists:
- memberOf
**Result:**
search_order:
- salt-users
.. code-block:: yaml
Result
******
salt-users:
- cn: cn=johndoe,ou=users,dc=company,dc=tld
displayName: John Doe
givenName: John
sn: Doe
memberOf:
- cn=it-admins,ou=groups,dc=company,dc=tld
- cn=team01,ou=groups,dc=company
- cn: cn=janedoe,ou=users,dc=company,dc=tld
displayName: Jane Doe
givenName: Jane
sn: Doe
memberOf:
- cn=it-admins,ou=groups,dc=company,dc=tld
- cn=team02,ou=groups,dc=company
.. code-block:: python
List Mode
---------
TODO: see also ``_result_to_dict()`` documentation
{
'salt-users': [
{
'cn': 'cn=johndoe,ou=users,dc=company,dc=tld',
'displayName': 'John Doe'
'givenName': 'John'
'sn': 'Doe'
'memberOf': [
'cn=it-admins,ou=groups,dc=company,dc=tld',
'cn=team01,ou=groups,dc=company'
]
},
{
'cn': 'cn=janedoe,ou=users,dc=company,dc=tld',
'displayName': 'Jane Doe',
'givenName': 'Jane',
'sn': 'Doe',
'memberOf': [
'cn=it-admins,ou=groups,dc=company,dc=tld',
'cn=team02,ou=groups,dc=company'
]
}
]
}
'''
# Import python libs
@ -123,7 +133,7 @@ from salt.exceptions import SaltInvocationError
# Import third party libs
import yaml
from jinja2 import Environment, FileSystemLoader
import jinja2
try:
import ldap # pylint: disable=W0611
HAS_LDAP = True
@ -149,10 +159,9 @@ def _render_template(config_file):
Render config template, substituting grains where found.
'''
dirname, filename = os.path.split(config_file)
env = Environment(loader=FileSystemLoader(dirname))
env = jinja2.Environment(loader=jinja2.FileSystemLoader(dirname))
template = env.get_template(filename)
config = template.render(__grains__)
return config
return template.render(__grains__)
def _config(name, conf):
@ -186,18 +195,18 @@ def _result_to_dict(data, result, conf, source):
For example, search result:
{ saltKeyValue': ['ntpserver=ntp.acme.local', 'foo=myfoo'],
'saltList': ['vhost=www.acme.net', 'vhost=www.acme.local' }
'saltList': ['vhost=www.acme.net', 'vhost=www.acme.local'] }
is written to the pillar data dictionary as:
{ 'ntpserver': 'ntp.acme.local', 'foo': 'myfoo',
'vhost': ['www.acme.net', 'www.acme.local' }
'vhost': ['www.acme.net', 'www.acme.local'] }
'''
attrs = _config('attrs', conf) or []
lists = _config('lists', conf) or []
# TODO:
# deprecate the default 'mode: split' and make the more
# straightforward 'mode: dict' the new default
# straightforward 'mode: map' the new default
mode = _config('mode', conf) or 'split'
if mode == 'map':
data[source] = []
@ -277,21 +286,45 @@ def ext_pillar(minion_id, # pylint: disable=W0613
'''
Execute LDAP searches and return the aggregated data
'''
if os.path.isfile(config_file):
try:
#open(config_file, 'r') as raw_config:
config = _render_template(config_file) or {}
opts = yaml.safe_load(config) or {}
opts['conf_file'] = config_file
except Exception as err:
import salt.log
msg = 'Error parsing configuration file: {0} - {1}'
if salt.log.is_console_configured():
log.warning(msg.format(config_file, err))
else:
print(msg.format(config_file, err))
config_template = None
try:
config_template = _render_template(config_file)
except jinja2.exceptions.TemplateNotFound:
log.debug('pillar_ldap: missing configuration file %s', config_file)
except Exception:
log.debug('pillar_ldap: failed to render template for %s',
config_file, exc_info=True)
if not config_template:
# We don't have a config file
return {}
try:
opts = yaml.safe_load(config_template) or {}
opts['conf_file'] = config_file
except Exception as err:
import salt.log
msg = 'pillar_ldap: error parsing configuration file: {0} - {1}'
if salt.log.is_console_configured():
log.warning(msg.format(config_file, err))
else:
print(msg.format(config_file, err))
return {}
else:
log.debug('Missing configuration file: {0}'.format(config_file))
if not isinstance(opts, dict):
log.warning(
'pillar_ldap: %s is invalidly formatted, must be a YAML '
'dictionary. See the documentation for more information.',
config_file
)
return {}
if 'search_order' not in opts:
log.warning(
'pillar_ldap: search_order missing from configuration. See the '
'documentation for more information.'
)
return {}
data = {}
for source in opts['search_order']:

View File

@ -11,6 +11,7 @@
from __future__ import absolute_import
import datetime
import logging
import yaml
from yaml.constructor import ConstructorError
@ -22,6 +23,8 @@ from salt.utils.odict import OrderedDict
__all__ = ['deserialize', 'serialize', 'available']
log = logging.getLogger(__name__)
available = True
# prefer C bindings over python when available
@ -46,14 +49,17 @@ def deserialize(stream_or_string, **options):
try:
return yaml.load(stream_or_string, **options)
except ScannerError as error:
log.exception('Error encountered while deserializing')
err_type = ERROR_MAP.get(error.problem, 'Unknown yaml render error')
line_num = error.problem_mark.line + 1
raise DeserializationError(err_type,
line_num,
error.problem_mark.buffer)
except ConstructorError as error:
log.exception('Error encountered while deserializing')
raise DeserializationError(error)
except Exception as error:
log.exception('Error encountered while deserializing')
raise DeserializationError(error)
@ -74,6 +80,7 @@ def serialize(obj, **options):
return response[:-1]
return response
except Exception as error:
log.exception('Error encountered while serializing')
raise SerializationError(error)
@ -93,7 +100,6 @@ Loader.add_multi_constructor('tag:yaml.org,2002:set', Loader.construct_yaml_set)
Loader.add_multi_constructor('tag:yaml.org,2002:str', Loader.construct_yaml_str)
Loader.add_multi_constructor('tag:yaml.org,2002:seq', Loader.construct_yaml_seq)
Loader.add_multi_constructor('tag:yaml.org,2002:map', Loader.construct_yaml_map)
Loader.add_multi_constructor(None, Loader.construct_undefined)
class Dumper(BaseDumper): # pylint: disable=W0232

View File

@ -150,14 +150,17 @@ def deserialize(stream_or_string, **options):
try:
return yaml.load(stream_or_string, **options)
except ScannerError as error:
log.exception('Error encountered while deserializing')
err_type = ERROR_MAP.get(error.problem, 'Unknown yaml render error')
line_num = error.problem_mark.line + 1
raise DeserializationError(err_type,
line_num,
error.problem_mark.buffer)
except ConstructorError as error:
log.exception('Error encountered while deserializing')
raise DeserializationError(error)
except Exception as error:
log.exception('Error encountered while deserializing')
raise DeserializationError(error)
@ -178,6 +181,7 @@ def serialize(obj, **options):
return response[:-1]
return response
except Exception as error:
log.exception('Error encountered while serializing')
raise SerializationError(error)
@ -322,7 +326,6 @@ Loader.add_multi_constructor('tag:yaml.org,2002:pairs', Loader.construct_yaml_pa
Loader.add_multi_constructor('tag:yaml.org,2002:set', Loader.construct_yaml_set)
Loader.add_multi_constructor('tag:yaml.org,2002:seq', Loader.construct_yaml_seq)
Loader.add_multi_constructor('tag:yaml.org,2002:map', Loader.construct_yaml_map)
Loader.add_multi_constructor(None, Loader.construct_undefined)
class SLSMap(OrderedDict):

View File

@ -3255,43 +3255,45 @@ class BaseHighState(object):
'Specified SLS {0} on local filesystem cannot '
'be found.'.format(sls)
)
state = None
if not fn_:
errors.append(
'Specified SLS {0} in saltenv {1} is not '
'available on the salt master or through a configured '
'fileserver'.format(sls, saltenv)
)
state = None
try:
state = compile_template(fn_,
self.state.rend,
self.state.opts['renderer'],
self.state.opts['renderer_blacklist'],
self.state.opts['renderer_whitelist'],
saltenv,
sls,
rendered_sls=mods
)
except SaltRenderError as exc:
msg = 'Rendering SLS \'{0}:{1}\' failed: {2}'.format(
saltenv, sls, exc
)
log.critical(msg)
errors.append(msg)
except Exception as exc:
msg = 'Rendering SLS {0} failed, render error: {1}'.format(
sls, exc
)
log.critical(
msg,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
errors.append('{0}\n{1}'.format(msg, traceback.format_exc()))
try:
mods.add('{0}:{1}'.format(saltenv, sls))
except AttributeError:
pass
else:
try:
state = compile_template(fn_,
self.state.rend,
self.state.opts['renderer'],
self.state.opts['renderer_blacklist'],
self.state.opts['renderer_whitelist'],
saltenv,
sls,
rendered_sls=mods
)
except SaltRenderError as exc:
msg = 'Rendering SLS \'{0}:{1}\' failed: {2}'.format(
saltenv, sls, exc
)
log.critical(msg)
errors.append(msg)
except Exception as exc:
msg = 'Rendering SLS {0} failed, render error: {1}'.format(
sls, exc
)
log.critical(
msg,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
errors.append('{0}\n{1}'.format(msg, traceback.format_exc()))
try:
mods.add('{0}:{1}'.format(saltenv, sls))
except AttributeError:
pass
if state:
if not isinstance(state, dict):
errors.append(

View File

@ -17,7 +17,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -32,7 +32,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- comment: "Allow HTTP"
- connstate: NEW
- dport: 80
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -48,7 +48,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- connstate: NEW
- source: '127.0.0.1'
- dport: 80
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -65,7 +65,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- connstate: NEW
- source: '! 127.0.0.1'
- dport: 80
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -81,7 +81,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- connstate: NEW
- source: 'not 127.0.0.1'
- dport: 80
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -94,7 +94,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -109,7 +109,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- dports:
- 80
- 443
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -122,7 +122,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -136,7 +136,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -148,7 +148,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -161,7 +161,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -174,7 +174,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- protocol: tcp
- sport: 1025:65535
- save: True
@ -183,6 +183,55 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
- chain: INPUT
- policy: ACCEPT
.. note::
Whereas iptables will accept ``-p``, ``--proto[c[o[l]]]`` as synonyms of
``--protocol``, if ``--proto`` appears in an iptables command after the
appearance of ``-m policy``, it is interpreted as the ``--proto`` option of
the policy extension (see the iptables-extensions(8) man page).
Example rules for IPSec policy:
.. code-block:: yaml
accept_esp_in:
iptables.append:
- table: filter
- chain: INPUT
- jump: ACCEPT
- source: 10.20.0.0/24
- destination: 10.10.0.0/24
- in-interface: eth0
- match: policy
- dir: in
- pol: ipsec
- reqid: 1
- proto: esp
accept_esp_forward_in:
iptables.append:
- use:
- iptables: accept_esp_in
- chain: FORWARD
accept_esp_out:
iptables.append:
- table: filter
- chain: OUTPUT
- jump: ACCEPT
- source: 10.10.0.0/24
- destination: 10.20.0.0/24
- out-interface: eth0
- match: policy
- dir: out
- pol: ipsec
- reqid: 1
- proto: esp
accept_esp_forward_out:
iptables.append:
- use:
- iptables: accept_esp_out
- chain: FORWARD
.. note::
Various functions of the ``iptables`` module use the ``--check`` option. If

View File

@ -56,10 +56,13 @@ def run_file(name,
grain=None,
key=None,
overwrite=True,
saltenv=None,
**connection_args):
'''
Execute an arbitrary query on the specified database
.. versionadded:: 2017.7.0
name
Used only as an ID
@ -84,13 +87,17 @@ def run_file(name,
overwrite:
The file or grain will be overwritten if it already exists (default)
.. versionadded:: 2017.7.0
saltenv:
The saltenv to pull the query_file from
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Database {0} is already present'.format(database)}
if any([query_file.startswith(proto) for proto in ['http://', 'https://', 'salt://', 's3://', 'swift://']]):
query_file = __salt__['cp.cache_file'](query_file, saltenv=saltenv or __env__)
if not os.path.exists(query_file):
ret['comment'] = 'File {0} does not exist'.format(query_file)
ret['result'] = False

View File

@ -216,6 +216,13 @@ def managed(name,
Debug mode. Will insert a new key under the output dictionary, as ``loaded_config`` containing the raw
result after the template was rendered.
.. note::
This argument cannot be used directly on the command line. Instead,
it can be passed through the ``pillar`` variable when executing one
of the :ref:`salt.modules.state.sls` or :ref:`salt.modules.state.apply`
functions (see an example below).
replace: False
Load and replace the configuration. Default: ``False`` (will apply load merge).
@ -266,7 +273,7 @@ def managed(name,
$ sudo salt 'juniper.device' state.sls router.config test=True
$ sudo salt -N all-routers state.sls router.config debug=True
$ sudo salt -N all-routers state.sls router.config pillar="{'debug': True}"
``router.config`` depends on the location of the SLS file (see above). Running this command, will be executed all
five steps from above. These examples above are not meant to be used in a production environment, their sole purpose
@ -334,11 +341,11 @@ def managed(name,
# the user can override the flags the equivalent CLI args
# which have higher precedence
test = __opts__.get('test', test)
debug = __opts__.get('debug', debug)
commit = __opts__.get('commit', commit)
replace = __opts__.get('replace', replace) # this might be a bit risky
skip_verify = __opts__.get('skip_verify', skip_verify)
test = __salt__['config.merge']('test', test)
debug = __salt__['config.merge']('debug', debug)
commit = __salt__['config.merge']('commit', commit)
replace = __salt__['config.merge']('replace', replace) # this might be a bit risky
skip_verify = __salt__['config.merge']('skip_verify', skip_verify)
config_update_ret = _update_config(template_name,
template_source=template_source,

View File

@ -41,14 +41,18 @@ except ImportError:
if HAS_PIP is True:
try:
import pip.req
from pip.req import InstallRequirement
except ImportError:
HAS_PIP = False
# Remove references to the loaded pip module above so reloading works
import sys
del pip
if 'pip' in sys.modules:
del sys.modules['pip']
# pip 10.0.0 move req module under pip._internal
try:
from pip._internal.req import InstallRequirement
except ImportError:
HAS_PIP = False
# Remove references to the loaded pip module above so reloading works
import sys
del pip
if 'pip' in sys.modules:
del sys.modules['pip']
try:
from pip.exceptions import InstallationError
@ -129,7 +133,7 @@ def _check_pkg_version_format(pkg):
logger.debug(
'Installed pip version: {0}'.format(pip.__version__)
)
install_req = pip.req.InstallRequirement.from_line(pkg)
install_req = InstallRequirement.from_line(pkg)
except AttributeError:
logger.debug('Installed pip version is lower than 1.2')
supported_vcs = ('git', 'svn', 'hg', 'bzr')
@ -137,12 +141,12 @@ def _check_pkg_version_format(pkg):
for vcs in supported_vcs:
if pkg.startswith(vcs):
from_vcs = True
install_req = pip.req.InstallRequirement.from_line(
install_req = InstallRequirement.from_line(
pkg.split('{0}+'.format(vcs))[-1]
)
break
else:
install_req = pip.req.InstallRequirement.from_line(pkg)
install_req = InstallRequirement.from_line(pkg)
except (ValueError, InstallationError) as exc:
ret['result'] = False
if not from_vcs and '=' in pkg and '==' not in pkg:
@ -180,24 +184,19 @@ def _check_pkg_version_format(pkg):
return ret
def _check_if_installed(prefix, state_pkg_name, version_spec,
ignore_installed, force_reinstall,
upgrade, user, cwd, bin_env, env_vars):
def _check_if_installed(prefix, state_pkg_name, version_spec, ignore_installed,
force_reinstall, upgrade, user, cwd, bin_env, env_vars,
**kwargs):
# result: None means the command failed to run
# result: True means the package is installed
# result: False means the package is not installed
ret = {'result': False, 'comment': None}
# Check if the requested package is already installed.
try:
pip_list = __salt__['pip.list'](prefix, bin_env=bin_env,
user=user, cwd=cwd,
env_vars=env_vars)
prefix_realname = _find_key(prefix, pip_list)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = None
ret['comment'] = 'Error installing \'{0}\': {1}'.format(state_pkg_name, err)
return ret
pip_list = __salt__['pip.list'](prefix, bin_env=bin_env,
user=user, cwd=cwd,
env_vars=env_vars, **kwargs)
prefix_realname = _find_key(prefix, pip_list)
# If the package was already installed, check
# the ignore_installed and force_reinstall flags
@ -309,7 +308,6 @@ def installed(name,
install_options=None,
global_options=None,
user=None,
no_chown=False,
cwd=None,
pre_releases=False,
cert=None,
@ -321,7 +319,9 @@ def installed(name,
use_vt=False,
trusted_host=None,
no_cache_dir=False,
cache_dir=None):
cache_dir=None,
no_binary=None,
**kwargs):
'''
Make sure the package is installed
@ -356,6 +356,25 @@ def installed(name,
no_use_wheel : False
Force to not use wheel archives (requires pip>=1.4)
no_binary
Force to not use binary packages (requires pip >= 7.0.0)
Accepts either :all: to disable all binary packages, :none: to empty the set,
or a list of one or more packages
Example:
.. code-block:: yaml
django:
pip.installed:
- no_binary: ':all:'
flask:
pip.installed:
- no_binary:
- itsdangerous
- click
log
Log file where a complete (maximum verbosity) record will be kept
@ -424,10 +443,6 @@ def installed(name,
no_install
Download and unpack all packages, but don't actually install them
no_chown
When user is given, do not attempt to copy and chown
a requirements file
no_cache_dir:
Disable the cache.
@ -570,6 +585,12 @@ def installed(name,
.. _`virtualenv`: http://www.virtualenv.org/en/latest/
'''
if 'no_chown' in kwargs:
salt.utils.warn_until(
'Flourine',
'The no_chown argument has been deprecated and is no longer used. '
'Its functionality was removed in Boron.')
kwargs.pop('no_chown')
if pip_bin and not bin_env:
bin_env = pip_bin
@ -595,26 +616,45 @@ def installed(name,
ret = {'name': ';'.join(pkgs), 'result': None,
'comment': '', 'changes': {}}
try:
cur_version = __salt__['pip.version'](bin_env)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = None
ret['comment'] = 'Error installing \'{0}\': {1}'.format(name, err)
return ret
# Check that the pip binary supports the 'use_wheel' option
if use_wheel:
min_version = '1.4'
cur_version = __salt__['pip.version'](bin_env)
if not salt.utils.compare_versions(ver1=cur_version, oper='>=',
ver2=min_version):
max_version = '9.0.3'
too_low = salt.utils.compare_versions(ver1=cur_version, oper='<', ver2=min_version)
too_high = salt.utils.compare_versions(ver1=cur_version, oper='>', ver2=max_version)
if too_low or too_high:
ret['result'] = False
ret['comment'] = ('The \'use_wheel\' option is only supported in '
'pip {0} and newer. The version of pip detected '
'was {1}.').format(min_version, cur_version)
'pip between {0} and {1}. The version of pip detected '
'was {2}.').format(min_version, max_version, cur_version)
return ret
# Check that the pip binary supports the 'no_use_wheel' option
if no_use_wheel:
min_version = '1.4'
cur_version = __salt__['pip.version'](bin_env)
if not salt.utils.compare_versions(ver1=cur_version, oper='>=',
ver2=min_version):
max_version = '9.0.3'
too_low = salt.utils.compare_versions(ver1=cur_version, oper='<', ver2=min_version)
too_high = salt.utils.compare_versions(ver1=cur_version, oper='>', ver2=max_version)
if too_low or too_high:
ret['result'] = False
ret['comment'] = ('The \'no_use_wheel\' option is only supported in '
'pip between {0} and {1}. The version of pip detected '
'was {2}.').format(min_version, max_version, cur_version)
return ret
# Check that the pip binary supports the 'no_binary' option
if no_binary:
min_version = '7.0.0'
too_low = salt.utils.compare_versions(ver1=cur_version, oper='<', ver2=min_version)
if too_low:
ret['result'] = False
ret['comment'] = ('The \'no_binary\' option is only supported in '
'pip {0} and newer. The version of pip detected '
'was {1}.').format(min_version, cur_version)
return ret
@ -683,7 +723,8 @@ def installed(name,
version_spec = version_spec
out = _check_if_installed(prefix, state_pkg_name, version_spec,
ignore_installed, force_reinstall,
upgrade, user, cwd, bin_env, env_vars)
upgrade, user, cwd, bin_env, env_vars,
**kwargs)
# If _check_if_installed result is None, something went wrong with
# the command running. This way we keep stateful output.
if out['result'] is None:
@ -720,6 +761,14 @@ def installed(name,
ret['comment'] = out['comment']
return ret
# No packages to install.
if not target_pkgs:
ret['result'] = True
aicomms = '\n'.join(already_installed_comments)
last_line = 'All specified packages are already installed' + (' and up-to-date' if upgrade else '')
ret['comment'] = aicomms + ('\n' if aicomms else '') + last_line
return ret
# Construct the string that will get passed to the install call
pkgs_str = ','.join([state_name for _, state_name in target_pkgs])
@ -730,6 +779,7 @@ def installed(name,
bin_env=bin_env,
use_wheel=use_wheel,
no_use_wheel=no_use_wheel,
no_binary=no_binary,
log=log,
proxy=proxy,
timeout=timeout,
@ -754,7 +804,6 @@ def installed(name,
install_options=install_options,
global_options=global_options,
user=user,
no_chown=no_chown,
cwd=cwd,
pre_releases=pre_releases,
cert=cert,
@ -766,15 +815,11 @@ def installed(name,
env_vars=env_vars,
use_vt=use_vt,
trusted_host=trusted_host,
no_cache_dir=no_cache_dir
no_cache_dir=no_cache_dir,
**kwargs
)
# Check the retcode for success, but don't fail if using pip1 and the package is
# already present. Pip1 returns a retcode of 1 (instead of 0 for pip2) if you run
# "pip install" without any arguments. See issue #21845.
if pip_install_call and \
(pip_install_call.get('retcode', 1) == 0 or pip_install_call.get('stdout', '').startswith(
'You must give at least one requirement to install')):
if pip_install_call and pip_install_call.get('retcode', 1) == 0:
ret['result'] = True
if requirements or editable:
@ -782,6 +827,8 @@ def installed(name,
if requirements:
PIP_REQUIREMENTS_NOCHANGE = [
'Requirement already satisfied',
'Requirement already up-to-date',
'Requirement not upgraded',
'Collecting',
'Cloning',
'Cleaning up...',
@ -825,7 +872,8 @@ def installed(name,
if prefix:
pipsearch = __salt__['pip.list'](prefix, bin_env,
user=user, cwd=cwd,
env_vars=env_vars)
env_vars=env_vars,
**kwargs)
# If we didn't find the package in the system after
# installing it report it

View File

@ -153,6 +153,16 @@ def __virtual__():
return 'pkg.install' in __salt__
def _warn_virtual(virtual):
return [
'The following package(s) are "virtual package" names: {0}. These '
'will no longer be supported as of the Fluorine release. Please '
'update your SLS file(s) to use the actual package name.'.format(
', '.join(virtual)
)
]
def _get_comparison_spec(pkgver):
'''
Return a tuple containing the comparison operator and the version. If no
@ -529,9 +539,14 @@ def _find_install_targets(name=None,
was_refreshed = True
refresh = False
def _get_virtual(desired):
return [x for x in desired if cur_pkgs.get(x, []) == ['1']]
virtual_pkgs = []
if any((pkgs, sources)):
if pkgs:
desired = _repack_pkgs(pkgs)
desired = _repack_pkgs(pkgs, normalize=normalize)
elif sources:
desired = __salt__['pkg_resource.pack_sources'](
sources,
@ -546,6 +561,8 @@ def _find_install_targets(name=None,
'comment': 'Invalidly formatted \'{0}\' parameter. See '
'minion log.'.format('pkgs' if pkgs
else 'sources')}
virtual_pkgs = _get_virtual(desired)
to_unpurge = _find_unpurge_targets(desired)
else:
if salt.utils.is_windows():
@ -566,6 +583,7 @@ def _find_install_targets(name=None,
else:
desired = {name: version}
virtual_pkgs = _get_virtual(desired)
to_unpurge = _find_unpurge_targets(desired)
# FreeBSD pkg supports `openjdk` and `java/openjdk7` package names
@ -582,22 +600,28 @@ def _find_install_targets(name=None,
and not reinstall \
and not pkg_verify:
# The package is installed and is the correct version
return {'name': name,
'changes': {},
'result': True,
'comment': 'Version {0} of package \'{1}\' is already '
'installed'.format(version, name)}
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Version {0} of package \'{1}\' is already '
'installed'.format(version, name)}
if virtual_pkgs:
ret['warnings'] = _warn_virtual(virtual_pkgs)
return ret
# if cver is not an empty string, the package is already installed
elif cver and version is None \
and not reinstall \
and not pkg_verify:
# The package is installed
return {'name': name,
'changes': {},
'result': True,
'comment': 'Package {0} is already '
'installed'.format(name)}
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Package {0} is already '
'installed'.format(name)}
if virtual_pkgs:
ret['warnings'] = _warn_virtual(virtual_pkgs)
return ret
version_spec = False
if not sources:
@ -635,10 +659,13 @@ def _find_install_targets(name=None,
if comments:
if len(comments) > 1:
comments.append('')
return {'name': name,
'changes': {},
'result': False,
'comment': '. '.join(comments).rstrip()}
ret = {'name': name,
'changes': {},
'result': False,
'comment': '. '.join(comments).rstrip()}
if virtual_pkgs:
ret['warnings'] = _warn_virtual(virtual_pkgs)
return ret
# Resolve the latest package version for any packages with "latest" in the
# package version
@ -770,10 +797,13 @@ def _find_install_targets(name=None,
problems.append(failed_verify)
if problems:
return {'name': name,
'changes': {},
'result': False,
'comment': ' '.join(problems)}
ret = {'name': name,
'changes': {},
'result': False,
'comment': ' '.join(problems)}
if virtual_pkgs:
ret['warnings'] = _warn_virtual(virtual_pkgs)
return ret
if not any((targets, to_unpurge, to_reinstall)):
# All specified packages are installed
@ -788,6 +818,8 @@ def _find_install_targets(name=None,
'comment': msg}
if warnings:
ret.setdefault('warnings', []).extend(warnings)
if virtual_pkgs:
ret.setdefault('warnings', []).extend(_warn_virtual(virtual_pkgs))
return ret
return (desired, targets, to_unpurge, to_reinstall, altered_files,
@ -1720,6 +1752,25 @@ def installed(
and x not in to_reinstall]
failed = [x for x in failed if x in targets]
# Check for virtual packages in list of desired packages
if not sources:
try:
virtual_pkgs = []
for pkgname in [next(iter(x)) for x in pkgs] if pkgs else [name]:
cver = new_pkgs.get(pkgname, [])
if '1' in cver:
virtual_pkgs.append(pkgname)
if virtual_pkgs:
warnings.extend(_warn_virtual(virtual_pkgs))
except Exception:
# This is just some temporary code to warn the user about using
# virtual packages. Don't let an exception break the entire
# state.
log.debug(
'Failed to detect virtual packages after running '
'pkg.install', exc_info=True
)
# If there was nothing unpurged, just set the changes dict to the contents
# of changes['installed'].
if not changes.get('purge_desired'):

View File

@ -319,7 +319,6 @@ def managed(name, ppa=None, **kwargs):
enabled = True
repo = name
os_family = __grains__['os_family'].lower()
if __grains__['os'] in ('Ubuntu', 'Mint'):
if ppa is not None:
# overload the name/repo value for PPAs cleanly
@ -333,7 +332,7 @@ def managed(name, ppa=None, **kwargs):
if enabled is not None \
else salt.utils.is_true(disabled)
elif os_family in ('redhat', 'suse'):
elif __grains__['os_family'] in ('RedHat', 'Suse'):
if 'humanname' in kwargs:
kwargs['name'] = kwargs.pop('humanname')
if 'name' not in kwargs:
@ -344,7 +343,7 @@ def managed(name, ppa=None, **kwargs):
if disabled is not None \
else salt.utils.is_true(enabled)
elif os_family == 'nilinuxrt':
elif __grains__['os_family'] in ('NILinuxRT', 'Poky'):
# opkg is the pkg virtual
kwargs['enabled'] = not salt.utils.is_true(disabled) \
if disabled is not None \
@ -373,7 +372,7 @@ def managed(name, ppa=None, **kwargs):
else:
sanitizedkwargs = kwargs
if os_family == 'debian':
if __grains__['os_family'] == 'Debian':
repo = salt.utils.pkg.deb.strip_uri(repo)
if pre:
@ -387,7 +386,7 @@ def managed(name, ppa=None, **kwargs):
# not explicitly set, so we don't need to update the repo
# if it's desired to be enabled and the 'enabled' key is
# missing from the repo definition
if os_family == 'redhat':
if __grains__['os_family'] == 'RedHat':
if not salt.utils.is_true(sanitizedkwargs[kwarg]):
break
else:
@ -397,7 +396,7 @@ def managed(name, ppa=None, **kwargs):
elif kwarg == 'comps':
if sorted(sanitizedkwargs[kwarg]) != sorted(pre[kwarg]):
break
elif kwarg == 'line' and os_family == 'debian':
elif kwarg == 'line' and __grains__['os_family'] == 'Debian':
# split the line and sort everything after the URL
sanitizedsplit = sanitizedkwargs[kwarg].split()
sanitizedsplit[3:] = sorted(sanitizedsplit[3:])
@ -412,14 +411,14 @@ def managed(name, ppa=None, **kwargs):
salt.utils.pkg.deb.combine_comments(kwargs['comments'])
if pre_comments != post_comments:
break
elif kwarg == 'comments' and os_family == 'redhat':
elif kwarg == 'comments' and __grains__['os_family'] == 'RedHat':
precomments = salt.utils.pkg.rpm.combine_comments(pre[kwarg])
kwargcomments = salt.utils.pkg.rpm.combine_comments(
sanitizedkwargs[kwarg])
if precomments != kwargcomments:
break
else:
if os_family in ('redhat', 'suse') \
if __grains__['os_family'] in ('RedHat', 'Suse') \
and any(isinstance(x, bool) for x in
(sanitizedkwargs[kwarg], pre[kwarg])):
# This check disambiguates 1/0 from True/False
@ -450,7 +449,7 @@ def managed(name, ppa=None, **kwargs):
pass
try:
if os_family == 'debian':
if __grains__['os_family'] == 'Debian':
__salt__['pkg.mod_repo'](repo, saltenv=__env__, **kwargs)
else:
__salt__['pkg.mod_repo'](repo, **kwargs)

View File

@ -39,7 +39,6 @@ def managed(name,
never_download=None,
prompt=None,
user=None,
no_chown=False,
cwd=None,
index_url=None,
extra_index_url=None,
@ -57,7 +56,9 @@ def managed(name,
pip_pkgs=None,
pip_no_cache_dir=False,
pip_cache_dir=None,
process_dependency_links=False):
process_dependency_links=False,
no_binary=None,
**kwargs):
'''
Create a virtualenv and optionally manage it with pip
@ -77,11 +78,6 @@ def managed(name,
user: None
The user under which to run virtualenv and pip.
no_chown: False
When user is given, do not attempt to copy and chown a requirements file
(needed if the requirements file refers to other files via relative
paths, as the copy-and-chown procedure does not account for such files)
cwd: None
Path to the working directory where `pip install` is executed.
@ -103,6 +99,11 @@ def managed(name,
no_use_wheel: False
Force to not use wheel archives (requires pip>=1.4)
no_binary
Force to not use binary packages (requires pip >= 7.0.0)
Accepts either :all: to disable all binary packages, :none: to empty the set,
or a list of one or more packages
pip_upgrade: False
Pass `--upgrade` to `pip install`.
@ -127,6 +128,13 @@ def managed(name,
- env_vars:
PATH_VAR: '/usr/local/bin/'
'''
if 'no_chown' in kwargs:
salt.utils.warn_until(
'Flourine',
'The no_chown argument has been deprecated and is no longer used. '
'Its functionality was removed in Boron.')
kwargs.pop('no_chown')
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
if 'virtualenv.create' not in __salt__:
@ -199,6 +207,7 @@ def managed(name,
prompt=prompt,
user=user,
use_vt=use_vt,
**kwargs
)
except CommandNotFoundError as err:
ret['result'] = False
@ -222,27 +231,44 @@ def managed(name,
elif venv_exists:
ret['comment'] = 'virtualenv exists'
# Check that the pip binary supports the 'use_wheel' option
if use_wheel:
min_version = '1.4'
max_version = '9.0.3'
cur_version = __salt__['pip.version'](bin_env=name)
if not salt.utils.compare_versions(ver1=cur_version, oper='>=',
ver2=min_version):
too_low = salt.utils.compare_versions(ver1=cur_version, oper='<', ver2=min_version)
too_high = salt.utils.compare_versions(ver1=cur_version, oper='>', ver2=max_version)
if too_low or too_high:
ret['result'] = False
ret['comment'] = ('The \'use_wheel\' option is only supported in '
'pip {0} and newer. The version of pip detected '
'was {1}.').format(min_version, cur_version)
'pip between {0} and {1}. The version of pip detected '
'was {2}.').format(min_version, max_version, cur_version)
return ret
# Check that the pip binary supports the 'no_use_wheel' option
if no_use_wheel:
min_version = '1.4'
max_version = '9.0.3'
cur_version = __salt__['pip.version'](bin_env=name)
if not salt.utils.compare_versions(ver1=cur_version, oper='>=',
ver2=min_version):
too_low = salt.utils.compare_versions(ver1=cur_version, oper='<', ver2=min_version)
too_high = salt.utils.compare_versions(ver1=cur_version, oper='>', ver2=max_version)
if too_low or too_high:
ret['result'] = False
ret['comment'] = ('The \'no_use_wheel\' option is only supported '
'in pip {0} and newer. The version of pip '
'detected was {1}.').format(min_version,
cur_version)
ret['comment'] = ('The \'no_use_wheel\' option is only supported in '
'pip between {0} and {1}. The version of pip detected '
'was {2}.').format(min_version, max_version, cur_version)
return ret
# Check that the pip binary supports the 'no_binary' option
if no_binary:
min_version = '7.0.0'
cur_version = __salt__['pip.version'](bin_env=name)
too_low = salt.utils.compare_versions(ver1=cur_version, oper='<', ver2=min_version)
if too_low:
ret['result'] = False
ret['comment'] = ('The \'no_binary\' option is only supported in '
'pip {0} and newer. The version of pip detected '
'was {1}.').format(min_version, cur_version)
return ret
# Populate the venv via a requirements file
@ -275,13 +301,13 @@ def managed(name,
bin_env=name,
use_wheel=use_wheel,
no_use_wheel=no_use_wheel,
no_binary=no_binary,
user=user,
cwd=cwd,
index_url=index_url,
extra_index_url=extra_index_url,
download=pip_download,
download_cache=pip_download_cache,
no_chown=no_chown,
pre_releases=pre_releases,
exists_action=pip_exists_action,
ignore_installed=pip_ignore_installed,
@ -291,7 +317,8 @@ def managed(name,
use_vt=use_vt,
env_vars=env_vars,
no_cache_dir=pip_no_cache_dir,
cache_dir=pip_cache_dir
cache_dir=pip_cache_dir,
**kwargs
)
ret['result'] &= pip_ret['retcode'] == 0
if pip_ret['retcode'] > 0:

View File

@ -376,7 +376,7 @@ def filesystem_present(name, create_parent=False, properties=None, cloned_from=N
if name in __salt__['zfs.list'](name, **{'type': 'filesystem'}): # update properties if needed
result = {}
if len(properties) > 0:
result = __salt__['zfs.get'](name, **{'properties': ','.join(properties.keys()), 'fields': 'value', 'depth': 1})
result = __salt__['zfs.get'](name, **{'properties': ','.join(properties.keys()), 'type': 'filesystem', 'fields': 'value', 'depth': 0})
for prop in properties:
if properties[prop] != result[name][prop]['value']:

View File

@ -130,11 +130,11 @@ class IPCServer(object):
else:
self.sock = tornado.netutil.bind_unix_socket(self.socket_path)
tornado.netutil.add_accept_handler(
self.sock,
self.handle_connection,
io_loop=self.io_loop,
)
with salt.utils.async.current_ioloop(self.io_loop):
tornado.netutil.add_accept_handler(
self.sock,
self.handle_connection,
)
self._started = True
@tornado.gen.coroutine
@ -197,10 +197,10 @@ class IPCServer(object):
log.trace('IPCServer: Handling connection '
'to address: {0}'.format(address))
try:
stream = IOStream(
connection,
io_loop=self.io_loop,
)
with salt.utils.async.current_ioloop(self.io_loop):
stream = IOStream(
connection,
)
self.io_loop.spawn_callback(self.handle_stream, stream)
except Exception as exc:
log.error('IPC streaming error: {0}'.format(exc))
@ -296,7 +296,7 @@ class IPCClient(object):
else:
if hasattr(self, '_connecting_future'):
# read previous future result to prevent the "unhandled future exception" error
self._connecting_future.exc_info() # pylint: disable=E0203
self._connecting_future.exception() # pylint: disable=E0203
future = tornado.concurrent.Future()
self._connecting_future = future
self._connect(timeout=timeout)
@ -330,10 +330,10 @@ class IPCClient(object):
break
if self.stream is None:
self.stream = IOStream(
socket.socket(sock_type, socket.SOCK_STREAM),
io_loop=self.io_loop,
)
with salt.utils.async.current_ioloop(self.io_loop):
self.stream = IOStream(
socket.socket(sock_type, socket.SOCK_STREAM),
)
try:
log.trace('IPCClient: Connecting to socket: {0}'.format(self.socket_path))
@ -511,11 +511,11 @@ class IPCMessagePublisher(object):
else:
self.sock = tornado.netutil.bind_unix_socket(self.socket_path)
tornado.netutil.add_accept_handler(
self.sock,
self.handle_connection,
io_loop=self.io_loop,
)
with salt.utils.async.current_ioloop(self.io_loop):
tornado.netutil.add_accept_handler(
self.sock,
self.handle_connection,
)
self._started = True
@tornado.gen.coroutine
@ -546,17 +546,14 @@ class IPCMessagePublisher(object):
def handle_connection(self, connection, address):
log.trace('IPCServer: Handling connection to address: {0}'.format(address))
try:
kwargs = {}
if self.opts['ipc_write_buffer'] > 0:
kwargs['max_write_buffer_size'] = self.opts['ipc_write_buffer']
log.trace('Setting IPC connection write buffer: {0}'.format((self.opts['ipc_write_buffer'])))
with salt.utils.async.current_ioloop(self.io_loop):
stream = IOStream(
connection,
io_loop=self.io_loop,
max_write_buffer_size=self.opts['ipc_write_buffer']
)
else:
stream = IOStream(
connection,
io_loop=self.io_loop
**kwargs
)
self.streams.add(stream)
@ -756,9 +753,9 @@ class IPCMessageSubscriber(IPCClient):
# '[ERROR ] Future exception was never retrieved:
# StreamClosedError'
if self._read_sync_future is not None:
self._read_sync_future.exc_info()
self._read_sync_future.exception()
if self._read_stream_future is not None:
self._read_stream_future.exc_info()
self._read_stream_future.exception()
def __del__(self):
if IPCMessageSubscriber in globals():

View File

@ -32,6 +32,7 @@ import salt.transport.client
import salt.transport.server
import salt.transport.mixins.auth
import salt.ext.six as six
from salt.ext.six.moves import queue # pylint: disable=import-error
from salt.exceptions import SaltReqTimeoutError, SaltClientError
from salt.transport import iter_transport_opts
@ -556,6 +557,11 @@ class TCPReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin, salt.tra
raise exc
self._socket.close()
self._socket = None
if hasattr(self.req_server, 'stop'):
try:
self.req_server.stop()
except Exception as exc:
log.exception('TCPReqServerChannel close generated an exception: %s', str(exc))
def __del__(self):
self.close()
@ -742,15 +748,23 @@ if USE_LOAD_BALANCER:
super(LoadBalancerWorker, self).__init__(
message_handler, *args, **kwargs)
self.socket_queue = socket_queue
self._stop = threading.Event()
self.thread = threading.Thread(target=self.socket_queue_thread)
self.thread.start()
t = threading.Thread(target=self.socket_queue_thread)
t.start()
def stop(self):
self._stop.set()
self.thread.join()
def socket_queue_thread(self):
try:
while True:
client_socket, address = self.socket_queue.get(True, None)
try:
client_socket, address = self.socket_queue.get(True, 1)
except queue.Empty:
if self._stop.is_set():
break
continue
# 'self.io_loop' initialized in super class
# 'tornado.tcpserver.TCPServer'.
# 'self._handle_connection' defined in same super class.
@ -764,10 +778,9 @@ class TCPClientKeepAlive(tornado.tcpclient.TCPClient):
'''
Override _create_stream() in TCPClient to enable keep alive support.
'''
def __init__(self, opts, resolver=None, io_loop=None):
def __init__(self, opts, resolver=None):
self.opts = opts
super(TCPClientKeepAlive, self).__init__(
resolver=resolver, io_loop=io_loop)
super(TCPClientKeepAlive, self).__init__(resolver=resolver)
def _create_stream(self, max_buffer_size, af, addr, **kwargs): # pylint: disable=unused-argument
'''
@ -783,7 +796,6 @@ class TCPClientKeepAlive(tornado.tcpclient.TCPClient):
_set_tcp_keepalive(sock, self.opts)
stream = tornado.iostream.IOStream(
sock,
io_loop=self.io_loop,
max_buffer_size=max_buffer_size)
return stream.connect(addr)
@ -842,8 +854,8 @@ class SaltMessageClient(object):
self.io_loop = io_loop or tornado.ioloop.IOLoop.current()
self._tcp_client = TCPClientKeepAlive(
opts, io_loop=self.io_loop, resolver=resolver)
with salt.utils.async.current_ioloop(self.io_loop):
self._tcp_client = TCPClientKeepAlive(opts, resolver=resolver)
self._mid = 1
self._max_messages = int((1 << 31) - 2) # number of IDs before we wrap
@ -874,7 +886,7 @@ class SaltMessageClient(object):
# This happens because the logic is always waiting to read
# the next message and the associated read future is marked
# 'StreamClosedError' when the stream is closed.
self._read_until_future.exc_info()
self._read_until_future.exception()
if (not self._stream_return_future.done() and
self.io_loop != tornado.ioloop.IOLoop.current(
instance=False)):
@ -932,9 +944,10 @@ class SaltMessageClient(object):
if self._closing:
break
try:
self._stream = yield self._tcp_client.connect(self.host,
self.port,
ssl_options=self.opts.get('ssl'))
with salt.utils.async.current_ioloop(self.io_loop):
self._stream = yield self._tcp_client.connect(self.host,
self.port,
ssl_options=self.opts.get('ssl'))
self._connecting_future.set_result(True)
break
except Exception as e:
@ -1126,7 +1139,7 @@ class Subscriber(object):
# This happens because the logic is always waiting to read
# the next message and the associated read future is marked
# 'StreamClosedError' when the stream is closed.
self._read_until_future.exc_info()
self._read_until_future.exception()
def __del__(self):
self.close()
@ -1137,7 +1150,8 @@ class PubServer(tornado.tcpserver.TCPServer, object):
TCP publisher
'''
def __init__(self, opts, io_loop=None):
super(PubServer, self).__init__(io_loop=io_loop, ssl_options=opts.get('ssl'))
super(PubServer, self).__init__(ssl_options=opts.get('ssl'))
self.io_loop = io_loop
self.opts = opts
self._closing = False
self.clients = set()

View File

@ -30,13 +30,11 @@ import salt.transport.server
import salt.transport.mixins.auth
from salt.exceptions import SaltReqTimeoutError
import zmq
from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO, LIBZMQ_VERSION_INFO
import zmq.error
import zmq.eventloop.ioloop
# support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x
if not hasattr(zmq.eventloop.ioloop, 'ZMQIOLoop'):
zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop
import zmq.eventloop.zmqstream
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
@ -50,6 +48,7 @@ PYZMQ_VERSION = tuple(map(int, zmq.pyzmq_version().split('.')))
import tornado
import tornado.gen
import tornado.concurrent
TORNADO_50 = tornado.version_info >= (5,)
# Import third party libs
try:
@ -60,6 +59,42 @@ except ImportError:
log = logging.getLogger(__name__)
def _get_master_uri(master_ip,
master_port,
source_ip=None,
source_port=None):
'''
Return the ZeroMQ URI to connect the Minion to the Master.
It supports different source IP / port, given the ZeroMQ syntax:
// Connecting using a IP address and bind to an IP address
rc = zmq_connect(socket, "tcp://192.168.1.17:5555;192.168.1.1:5555"); assert (rc == 0);
Source: http://api.zeromq.org/4-1:zmq-tcp
'''
if LIBZMQ_VERSION_INFO >= (4, 1, 6) and ZMQ_VERSION_INFO >= (16, 0, 1):
# The source:port syntax for ZeroMQ has been added in libzmq 4.1.6
# which is included in the pyzmq wheels starting with 16.0.1.
if source_ip or source_port:
if source_ip and source_port:
return 'tcp://{source_ip}:{source_port};{master_ip}:{master_port}'.format(
source_ip=source_ip, source_port=source_port,
master_ip=master_ip, master_port=master_port)
elif source_ip and not source_port:
return 'tcp://{source_ip}:0;{master_ip}:{master_port}'.format(
source_ip=source_ip,
master_ip=master_ip, master_port=master_port)
elif not source_ip and source_port:
return 'tcp://0.0.0.0:{source_port};{master_ip}:{master_port}'.format(
source_port=source_port,
master_ip=master_ip, master_port=master_port)
if source_ip or source_port:
log.warning('Unable to connect to the Master using a specific source IP / port')
log.warning('Consider upgrading to pyzmq >= 16.0.1 and libzmq >= 4.1.6')
return 'tcp://{master_ip}:{master_port}'.format(
master_ip=master_ip, master_port=master_port)
class AsyncZeroMQReqChannel(salt.transport.client.ReqChannel):
'''
Encapsulate sending routines to ZeroMQ.
@ -78,8 +113,8 @@ class AsyncZeroMQReqChannel(salt.transport.client.ReqChannel):
# do we have any mapping for this io_loop
io_loop = kwargs.get('io_loop')
if io_loop is None:
zmq.eventloop.ioloop.install()
io_loop = tornado.ioloop.IOLoop.current()
install_zmq()
io_loop = ZMQDefaultLoop.current()
if io_loop not in cls.instance_map:
cls.instance_map[io_loop] = weakref.WeakValueDictionary()
loop_instance_map = cls.instance_map[io_loop]
@ -94,7 +129,8 @@ class AsyncZeroMQReqChannel(salt.transport.client.ReqChannel):
obj = object.__new__(cls)
obj.__singleton_init__(opts, **kwargs)
loop_instance_map[key] = obj
log.trace('Inserted key into loop_instance_map id {0} for key {1} and process {2}'.format(id(loop_instance_map), key, os.getpid()))
log.trace('Inserted key into loop_instance_map id %s for key %s and process %s',
id(loop_instance_map), key, os.getpid())
else:
log.debug('Re-using AsyncZeroMQReqChannel for {0}'.format(key))
return obj
@ -146,8 +182,8 @@ class AsyncZeroMQReqChannel(salt.transport.client.ReqChannel):
self._io_loop = kwargs.get('io_loop')
if self._io_loop is None:
zmq.eventloop.ioloop.install()
self._io_loop = tornado.ioloop.IOLoop.current()
install_zmq()
self._io_loop = ZMQDefaultLoop.current()
if self.crypt != 'clear':
# we don't need to worry about auth as a kwarg, since its a singleton
@ -287,18 +323,14 @@ class AsyncZeroMQPubChannel(salt.transport.mixins.auth.AESPubClientMixin, salt.t
**kwargs):
self.opts = opts
self.ttype = 'zeromq'
self.io_loop = kwargs.get('io_loop')
if self.io_loop is None:
zmq.eventloop.ioloop.install()
self.io_loop = tornado.ioloop.IOLoop.current()
self.hexid = hashlib.sha1(six.b(self.opts['id'])).hexdigest()
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
self.hexid = hashlib.sha1(salt.utils.to_bytes(self.opts['id'])).hexdigest()
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
self.serial = salt.payload.Serial(self.opts)
self.context = zmq.Context()
self._socket = self.context.socket(zmq.SUB)
@ -330,8 +362,7 @@ class AsyncZeroMQPubChannel(salt.transport.mixins.auth.AESPubClientMixin, salt.t
if self.opts['recon_randomize']:
recon_delay = randint(self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max']
)
self.opts['recon_default'] + self.opts['recon_max'])
log.debug("Generated random reconnect delay between '{0}ms' and '{1}ms' ({2})".format(
self.opts['recon_default'],
@ -445,7 +476,8 @@ class AsyncZeroMQPubChannel(salt.transport.mixins.auth.AESPubClientMixin, salt.t
return self.stream.on_recv(wrap_callback)
class ZeroMQReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin, salt.transport.server.ReqServerChannel):
class ZeroMQReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin,
salt.transport.server.ReqServerChannel):
def __init__(self, opts):
salt.transport.server.ReqServerChannel.__init__(self, opts)
@ -465,13 +497,7 @@ class ZeroMQReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin, salt.
# IPv6 sockets work for both IPv6 and IPv4 addresses
self.clients.setsockopt(zmq.IPV4ONLY, 0)
self.clients.setsockopt(zmq.BACKLOG, self.opts.get('zmq_backlog', 1000))
if HAS_ZMQ_MONITOR and self.opts['zmq_monitor']:
# Socket monitor shall be used the only for debug purposes so using threading doesn't look too bad here
import threading
self._monitor = ZeroMQSocketMonitor(self.clients)
t = threading.Thread(target=self._monitor.start_poll)
t.start()
self._start_zmq_monitor()
self.workers = self.context.socket(zmq.DEALER)
if self.opts.get('ipc_mode', '') == 'tcp':
@ -485,7 +511,6 @@ class ZeroMQReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin, salt.
log.info('Setting up the master communication server')
self.clients.bind(self.uri)
self.workers.bind(self.w_uri)
while True:
@ -508,10 +533,11 @@ class ZeroMQReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin, salt.
return
log.info('MWorkerQueue under PID %s is closing', os.getpid())
self._closing = True
if hasattr(self, '_monitor') and self._monitor is not None:
# pylint: disable=E0203
if getattr(self, '_monitor', None) is not None:
self._monitor.stop()
self._monitor = None
if hasattr(self, '_w_monitor') and self._w_monitor is not None:
if getattr(self, '_w_monitor', None) is not None:
self._w_monitor.stop()
self._w_monitor = None
if hasattr(self, 'clients') and self.clients.closed is False:
@ -524,6 +550,7 @@ class ZeroMQReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin, salt.
self._socket.close()
if hasattr(self, 'context') and self.context.closed is False:
self.context.term()
# pylint: enable=E0203
def pre_fork(self, process_manager):
'''
@ -534,6 +561,21 @@ class ZeroMQReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin, salt.
salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager)
process_manager.add_process(self.zmq_device)
def _start_zmq_monitor(self):
'''
Starts ZMQ monitor for debugging purposes.
:return:
'''
# Socket monitor shall be used the only for debug
# purposes so using threading doesn't look too bad here
if HAS_ZMQ_MONITOR and self.opts['zmq_monitor']:
log.debug('Starting ZMQ monitor')
import threading
self._w_monitor = ZeroMQSocketMonitor(self._socket)
threading.Thread(target=self._w_monitor.start_poll).start()
log.debug('ZMQ monitor has been started started')
def post_fork(self, payload_handler, io_loop):
'''
After forking we need to create all of the local sockets to listen to the
@ -548,12 +590,7 @@ class ZeroMQReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin, salt.
self.context = zmq.Context(1)
self._socket = self.context.socket(zmq.REP)
if HAS_ZMQ_MONITOR and self.opts['zmq_monitor']:
# Socket monitor shall be used the only for debug purposes so using threading doesn't look too bad here
import threading
self._w_monitor = ZeroMQSocketMonitor(self._socket)
t = threading.Thread(target=self._w_monitor.start_poll)
t.start()
self._start_zmq_monitor()
if self.opts.get('ipc_mode', '') == 'tcp':
self.w_uri = 'tcp://127.0.0.1:{0}'.format(
@ -759,27 +796,35 @@ class ZeroMQPubServerChannel(salt.transport.server.PubServerChannel):
# Catch and handle EINTR from when this process is sent
# SIGUSR1 gracefully so we don't choke and die horribly
try:
log.trace('Getting data from puller %s', pull_uri)
package = pull_sock.recv()
unpacked_package = salt.payload.unpackage(package)
if six.PY3:
unpacked_package = salt.transport.frame.decode_embedded_strs(unpacked_package)
payload = unpacked_package['payload']
log.trace('Accepted unpacked package from puller')
if self.opts['zmq_filtering']:
# if you have a specific topic list, use that
if 'topic_lst' in unpacked_package:
for topic in unpacked_package['topic_lst']:
log.trace('Sending filtered data over publisher %s', pub_uri)
# zmq filters are substring match, hash the topic
# to avoid collisions
htopic = hashlib.sha1(topic).hexdigest()
pub_sock.send(htopic, flags=zmq.SNDMORE)
pub_sock.send(payload)
log.trace('Filtered data has been sent')
# otherwise its a broadcast
else:
# TODO: constants file for "broadcast"
log.trace('Sending broadcasted data over publisher %s', pub_uri)
pub_sock.send('broadcast', flags=zmq.SNDMORE)
pub_sock.send(payload)
log.trace('Broadcasted data has been sent')
else:
log.trace('Sending ZMQ-unfiltered data over publisher %s', pub_uri)
pub_sock.send(payload)
log.trace('Unfiltered data has been sent')
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
@ -897,13 +942,12 @@ class AsyncReqMessageClient(object):
self.addr = addr
self.linger = linger
if io_loop is None:
zmq.eventloop.ioloop.install()
tornado.ioloop.IOLoop.current()
install_zmq()
ZMQDefaultLoop.current()
else:
self.io_loop = io_loop
self.serial = salt.payload.Serial(self.opts)
self.context = zmq.Context()
# wire up sockets
@ -982,7 +1026,8 @@ class AsyncReqMessageClient(object):
try:
ret = yield future
except: # pylint: disable=W0702
except Exception as err: # pylint: disable=W0702
log.debug('Re-init ZMQ socket: %s', err)
self._init_socket() # re-init the zmq socket (no other way in zmq)
del self.send_queue[0]
continue

View File

@ -7,19 +7,9 @@ from __future__ import absolute_import
import tornado.ioloop
import tornado.concurrent
# attempt to use zmq-- if we have it otherwise fallback to tornado loop
try:
import zmq.eventloop.ioloop
# support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x
if not hasattr(zmq.eventloop.ioloop, 'ZMQIOLoop'):
zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop
LOOP_CLASS = zmq.eventloop.ioloop.ZMQIOLoop
HAS_ZMQ = True
except ImportError:
LOOP_CLASS = tornado.ioloop.IOLoop
HAS_ZMQ = False
import contextlib
from salt.utils import zeromq
@contextlib.contextmanager
@ -53,7 +43,7 @@ class SyncWrapper(object):
if kwargs is None:
kwargs = {}
self.io_loop = LOOP_CLASS()
self.io_loop = zeromq.ZMQDefaultLoop()
kwargs['io_loop'] = self.io_loop
with current_ioloop(self.io_loop):

View File

@ -18,11 +18,7 @@ import salt.utils.dictupdate
# Import third party libs
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
try:
import zmq
HAS_ZMQ = True
except ImportError:
HAS_ZMQ = False
from salt.utils.zeromq import zmq
log = logging.getLogger(__name__)

View File

@ -29,12 +29,8 @@ from salt.utils.cache import CacheCli as cache_cli
from salt.utils.process import MultiprocessingProcess
# Import third party libs
import salt.ext.six as six
try:
import zmq
HAS_ZMQ = True
except ImportError:
HAS_ZMQ = False
from salt.ext import six
from salt.utils.zeromq import zmq
log = logging.getLogger(__name__)

View File

@ -908,7 +908,7 @@ class SaltNova(object):
try:
ret[item.name] = {
'id': item.id,
'status': 'Running'
'state': 'Running'
}
except TypeError:
pass

View File

@ -889,6 +889,7 @@ class Schedule(object):
for global_key, value in six.iteritems(func_globals):
self.functions[mod_name].__globals__[global_key] = value
self.functions.pack['__context__']['retcode'] = 0
ret['return'] = self.functions[func](*args, **kwargs)
# runners do not provide retcode
@ -940,11 +941,13 @@ class Schedule(object):
else:
# Send back to master so the job is included in the job list
mret = ret.copy()
mret['jid'] = 'req'
if data.get('return_job') == 'nocache':
# overwrite 'req' to signal to master that
# this job shouldn't be stored
mret['jid'] = 'nocache'
# No returners defined, so we're only sending back to the master
if not data_returner and not self.schedule_returner:
mret['jid'] = 'req'
if data.get('return_job') == 'nocache':
# overwrite 'req' to signal to master that
# this job shouldn't be stored
mret['jid'] = 'nocache'
load = {'cmd': '_return', 'id': self.opts['id']}
for key, value in six.iteritems(mret):
load[key] = value

View File

@ -132,6 +132,12 @@ def vb_get_manager():
'''
global _virtualboxManager
if _virtualboxManager is None and HAS_LIBS:
try:
from importlib import reload
except ImportError:
# If we get here, we are in py2 and reload is a built-in.
pass
# Reloading the API extends sys.paths for subprocesses of multiprocessing, since they seem to share contexts
reload(vboxapi)
_virtualboxManager = vboxapi.VirtualBoxManager(None, None)
@ -146,7 +152,13 @@ def vb_get_box():
@rtype: IVirtualBox
'''
vb_get_manager()
vbox = _virtualboxManager.vbox
try:
# This works in older versions of the SDK, but does not seem to work anymore.
vbox = _virtualboxManager.vbox
except AttributeError:
vbox = _virtualboxManager.getVirtualBox()
return vbox

View File

@ -3,21 +3,60 @@
# Import Python libs
from __future__ import absolute_import
# Import Salt libs
import logging
import tornado.ioloop
from salt.exceptions import SaltSystemExit
# Import 3rd-party libs
log = logging.getLogger(__name__)
try:
import zmq
HAS_ZMQ = True
except ImportError:
HAS_ZMQ = False
zmq = None
log.debug('ZMQ module is not found')
ZMQDefaultLoop = None
ZMQ_VERSION_INFO = (-1, -1, -1)
LIBZMQ_VERSION_INFO = (-1, -1, -1)
try:
if zmq:
ZMQ_VERSION_INFO = tuple([int(v_el) for v_el in zmq.__version__.split('.')])
LIBZMQ_VERSION_INFO = tuple([int(v_el) for v_el in zmq.zmq_version().split('.')])
if ZMQ_VERSION_INFO[0] > 16: # 17.0.x+ deprecates zmq's ioloops
ZMQDefaultLoop = tornado.ioloop.IOLoop
except Exception:
log.exception('Error while getting LibZMQ/PyZMQ library version')
if ZMQDefaultLoop is None:
try:
import zmq.eventloop.ioloop
# Support for ZeroMQ 13.x
if not hasattr(zmq.eventloop.ioloop, 'ZMQIOLoop'):
zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop
if tornado.version_info < (5,):
ZMQDefaultLoop = zmq.eventloop.ioloop.ZMQIOLoop
except ImportError:
ZMQDefaultLoop = None
if ZMQDefaultLoop is None:
ZMQDefaultLoop = tornado.ioloop.IOLoop
def install_zmq():
'''
While pyzmq 17 no longer needs any special integration for tornado,
older version still need one.
:return:
'''
if zmq and ZMQ_VERSION_INFO[0] < 17:
if tornado.version_info < (5,):
zmq.eventloop.ioloop.install()
def check_ipc_path_max_len(uri):
# The socket path is limited to 107 characters on Solaris and
# Linux, and 103 characters on BSD-based systems.
if not HAS_ZMQ:
if zmq is None:
return
ipc_path_max_len = getattr(zmq, 'IPC_PATH_MAX_LEN', 103)
if ipc_path_max_len and len(uri) > ipc_path_max_len:

View File

@ -54,13 +54,17 @@ import salt.log.setup
from salt.utils.odict import OrderedDict
# Define the pytest plugins we rely on
pytest_plugins = ['pytest_catchlog', 'tempdir', 'helpers_namespace'] # pylint: disable=invalid-name
pytest_plugins = ['tempdir', 'helpers_namespace'] # pylint: disable=invalid-name
# Define where not to collect tests from
collect_ignore = ['setup.py']
log = logging.getLogger('salt.testsuite')
# Reset logging root handlers
for handler in logging.root.handlers:
logging.root.removeHandler(handler)
def pytest_tempdir_basename():
'''
@ -196,25 +200,6 @@ def pytest_configure(config):
called after command line options have been parsed
and all plugins and initial conftest files been loaded.
'''
# Configure the console logger based on the catch_log settings.
# Most importantly, shutdown Salt's null, store and temporary logging queue handlers
catch_log = config.pluginmanager.getplugin('_catch_log')
cli_logging_handler = catch_log.log_cli_handler
# Add the pytest_catchlog CLI log handler to the logging root
logging.root.addHandler(cli_logging_handler)
cli_level = cli_logging_handler.level
cli_level = config._catchlog_log_cli_level
cli_format = cli_logging_handler.formatter._fmt
cli_date_format = cli_logging_handler.formatter.datefmt
# Setup the console logger which shuts down the null and the temporary queue handlers
salt.log.setup_console_logger(
log_level=salt.log.setup.LOG_VALUES_TO_LEVELS.get(cli_level, 'error'),
log_format=cli_format,
date_format=cli_date_format
)
# Disable the store logging queue handler
salt.log.setup.setup_extended_logging({'extension_modules': ''})
config.addinivalue_line('norecursedirs', os.path.join(CODE_DIR, 'templates'))
config.addinivalue_line(
'markers',

View File

@ -21,6 +21,7 @@ import logging
# Import salt libs
import salt.utils.event
import salt.utils.async
# Import 3rd-party libs
from tornado import gen
@ -69,11 +70,11 @@ class PyTestEngine(object):
self.sock.bind(('localhost', port))
# become a server socket
self.sock.listen(5)
netutil.add_accept_handler(
self.sock,
self.handle_connection,
io_loop=self.io_loop,
)
with salt.utils.async.current_ioloop(self.io_loop):
netutil.add_accept_handler(
self.sock,
self.handle_connection,
)
def handle_connection(self, connection, address):
log.warning('Accepted connection from %s. Role: %s', address, self.opts['__role'])

View File

@ -4,6 +4,7 @@
- user: issue-1959
{%- if grains.get('pythonversion')[0] != 2 %}
{#- wheels are disabled because the pip cache dir will not be owned by the above issue-1959 user. Need to check this ASAP #}
- use_wheel: False
- no_use_wheel: True
- no_binary: ':all:'
{%- endif %}
- env:
XDG_CACHE_HOME: /tmp

View File

@ -0,0 +1,25 @@
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
# Import Salt libs
import salt.utils
@skipIf(not salt.utils.is_windows(), 'windows tests only')
class AutoRunsModuleTest(ModuleCase):
'''
Test the autoruns module
'''
def test_win_autoruns_list(self):
'''
test win_autoruns.list module
'''
ret = self.run_function('autoruns.list')
self.assertIn('HKLM', str(ret))
self.assertTrue(isinstance(ret, dict))

View File

@ -0,0 +1,110 @@
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
from tests.support.helpers import destructiveTest
# Import Salt Libs
import salt.utils
@skipIf(not salt.utils.is_windows(), 'Tests for only Windows')
class FirewallTest(ModuleCase):
'''
Validate windows firewall module
'''
def _pre_firewall_status(self, pre_run):
post_run = self.run_function('firewall.get_config')
network = ['Domain', 'Public', 'Private']
# compare the status of the firewall before and after test
# and re-enable or disable depending on status before test run
for net in network:
if post_run[net] != pre_run[net]:
if pre_run[net]:
self.assertTrue(self.run_function('firewall.enable', profile=net))
else:
self.assertTrue(self.run_function('firewall.disable', profile=net))
@destructiveTest
def test_firewall_get_config(self):
'''
test firewall.get_config
'''
pre_run = self.run_function('firewall.get_config')
# ensure all networks are enabled then test status
self.assertTrue(self.run_function('firewall.enable', profile='allprofiles'))
ret = self.run_function('firewall.get_config')
network = ['Domain', 'Public', 'Private']
for net in network:
self.assertTrue(ret[net])
self._pre_firewall_status(pre_run)
@destructiveTest
def test_firewall_disable(self):
'''
test firewall.disable
'''
pre_run = self.run_function('firewall.get_config')
network = 'Private'
ret = self.run_function('firewall.get_config')[network]
if not ret:
self.assertTrue(self.run_function('firewall.enable', profile=network))
self.assertTrue(self.run_function('firewall.disable', profile=network))
ret = self.run_function('firewall.get_config')[network]
self.assertFalse(ret)
self._pre_firewall_status(pre_run)
@destructiveTest
def test_firewall_enable(self):
'''
test firewall.enable
'''
pre_run = self.run_function('firewall.get_config')
network = 'Private'
ret = self.run_function('firewall.get_config')[network]
if ret:
self.assertTrue(self.run_function('firewall.disable', profile=network))
self.assertTrue(self.run_function('firewall.enable', profile=network))
ret = self.run_function('firewall.get_config')[network]
self.assertTrue(ret)
self._pre_firewall_status(pre_run)
def test_firewall_get_rule(self):
'''
test firewall.get_rule
'''
rule = 'Remote Event Log Management (NP-In)'
ret = self.run_function('firewall.get_rule', [rule])
checks = ['Private', 'LocalPort', 'RemotePort']
for check in checks:
self.assertIn(check, ret[rule])
@destructiveTest
def test_firewall_add_delete_rule(self):
'''
test firewall.add_rule and delete_rule
'''
rule = 'test rule'
port = '8080'
# test adding firewall rule
add_rule = self.run_function('firewall.add_rule', [rule, port])
ret = self.run_function('firewall.get_rule', [rule])
self.assertIn(rule, ret[rule])
self.assertIn(port, ret[rule])
# test deleting firewall rule
self.assertTrue(self.run_function('firewall.delete_rule', [rule, port]))
ret = self.run_function('firewall.get_rule', [rule])
self.assertNotIn(rule, ret)
self.assertNotIn(port, ret)
self.assertIn('No rules match the specified criteria.', ret)

View File

@ -0,0 +1,59 @@
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
# Import Salt Libs
import salt.utils
URL = 'repo.saltstack.com'
class NetworkTest(ModuleCase):
'''
Validate network module
'''
def test_network_ping(self):
'''
network.ping
'''
ret = self.run_function('network.ping', [URL])
exp_out = ['ping', URL, 'ttl', 'time']
for out in exp_out:
self.assertIn(out, ret.lower())
@skipIf(salt.utils.is_darwin(), 'not supported on macosx')
def test_network_netstat(self):
'''
network.netstat
'''
ret = self.run_function('network.netstat')
exp_out = ['proto', 'local-address']
for val in ret:
for out in exp_out:
self.assertIn(out, val)
def test_network_traceroute(self):
'''
network.traceroute
'''
if not salt.utils.which('traceroute') and not salt.utils.is_windows():
self.skipTest('traceroute not installed')
ret = self.run_function('network.traceroute', [URL])
exp_out = ['hostname', 'ip']
for out in exp_out:
self.assertIn(out, exp_out)
@skipIf(not salt.utils.is_windows(), 'windows only test')
def test_network_nslookup(self):
'''
network.nslookup
'''
ret = self.run_function('network.nslookup', [URL])
exp_out = ['Server', 'Address']
for out in exp_out:
self.assertIn(out, exp_out)

View File

@ -0,0 +1,30 @@
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
from tests.support.helpers import destructiveTest
# Import Salt Libs
import salt.utils
@skipIf(not salt.utils.is_windows(), 'Tests for only Windows')
class NTPTest(ModuleCase):
'''
Validate windows ntp module
'''
@destructiveTest
def test_ntp_set_servers(self):
'''
test ntp get and set servers
'''
ntp_srv = 'pool.ntp.org'
set_srv = self.run_function('ntp.set_servers', [ntp_srv])
self.assertTrue(set_srv)
get_srv = self.run_function('ntp.get_servers')
self.assertEqual(ntp_srv, get_srv[0])

View File

@ -9,9 +9,9 @@
# Import python libs
from __future__ import absolute_import
import os
import pwd
import shutil
import re
import shutil
import sys
import tempfile
# Import Salt Testing libs
@ -73,23 +73,34 @@ class PipModuleTest(ModuleCase):
# Let's remove the pip binary
pip_bin = os.path.join(self.venv_dir, 'bin', 'pip')
py_dir = 'python{0}.{1}'.format(*sys.version_info[:2])
site_dir = os.path.join(self.venv_dir, 'lib', py_dir, 'site-packages')
if salt.utils.is_windows():
pip_bin = os.path.join(self.venv_dir, 'Scripts', 'pip.exe')
site_dir = os.path.join(self.venv_dir, 'lib', 'site-packages')
if not os.path.isfile(pip_bin):
self.skipTest(
'Failed to find the pip binary to the test virtualenv'
)
os.remove(pip_bin)
# Also remove the pip dir from site-packages
# This is needed now that we're using python -m pip instead of the
# pip binary directly. python -m pip will still work even if the
# pip binary is missing
shutil.rmtree(os.path.join(site_dir, 'pip'))
# Let's run a pip depending functions
for func in ('pip.freeze', 'pip.list'):
ret = self.run_function(func, bin_env=self.venv_dir)
self.assertIn(
'Command required for \'{0}\' not found: '
'Could not find a `pip` binary in virtualenv'.format(func),
'Could not find a `pip` binary'.format(func),
ret
)
@skip_if_not_root
def test_requirements_as_list_of_chains__sans_no_chown__cwd_set__absolute_file_path(self):
def test_requirements_as_list_of_chains__cwd_set__absolute_file_path(self):
self.run_function('virtualenv.create', [self.venv_dir])
# Create a requirements file that depends on another one.
@ -108,11 +119,10 @@ class PipModuleTest(ModuleCase):
with salt.utils.fopen(req2b_filename, 'w') as f:
f.write('pep8\n')
this_user = pwd.getpwuid(os.getuid())[0]
requirements_list = [req1_filename, req2_filename]
ret = self.run_function(
'pip.install', requirements=requirements_list, user=this_user,
'pip.install', requirements=requirements_list,
bin_env=self.venv_dir, cwd=self.venv_dir
)
try:
@ -127,7 +137,7 @@ class PipModuleTest(ModuleCase):
raise
@skip_if_not_root
def test_requirements_as_list_of_chains__sans_no_chown__cwd_not_set__absolute_file_path(self):
def test_requirements_as_list_of_chains__cwd_not_set__absolute_file_path(self):
self.run_function('virtualenv.create', [self.venv_dir])
# Create a requirements file that depends on another one.
@ -146,12 +156,10 @@ class PipModuleTest(ModuleCase):
with salt.utils.fopen(req2b_filename, 'w') as f:
f.write('pep8\n')
this_user = pwd.getpwuid(os.getuid())[0]
requirements_list = [req1_filename, req2_filename]
ret = self.run_function(
'pip.install', requirements=requirements_list, user=this_user,
bin_env=self.venv_dir
'pip.install', requirements=requirements_list, bin_env=self.venv_dir
)
try:
self.assertEqual(ret['retcode'], 0)
@ -166,7 +174,7 @@ class PipModuleTest(ModuleCase):
raise
@skip_if_not_root
def test_requirements_as_list__sans_no_chown__absolute_file_path(self):
def test_requirements_as_list__absolute_file_path(self):
self.run_function('virtualenv.create', [self.venv_dir])
req1_filename = os.path.join(self.venv_dir, 'requirements.txt')
@ -177,12 +185,10 @@ class PipModuleTest(ModuleCase):
with salt.utils.fopen(req2_filename, 'w') as f:
f.write('pep8\n')
this_user = pwd.getpwuid(os.getuid())[0]
requirements_list = [req1_filename, req2_filename]
ret = self.run_function(
'pip.install', requirements=requirements_list, user=this_user,
bin_env=self.venv_dir
'pip.install', requirements=requirements_list, bin_env=self.venv_dir
)
found = self.pip_successful_install(ret['stdout'])
@ -197,7 +203,7 @@ class PipModuleTest(ModuleCase):
raise
@skip_if_not_root
def test_requirements_as_list__sans_no_chown__non_absolute_file_path(self):
def test_requirements_as_list__non_absolute_file_path(self):
self.run_function('virtualenv.create', [self.venv_dir])
# Create a requirements file that depends on another one.
@ -214,11 +220,10 @@ class PipModuleTest(ModuleCase):
with salt.utils.fopen(req2_filepath, 'w') as f:
f.write('pep8\n')
this_user = pwd.getpwuid(os.getuid())[0]
requirements_list = [req1_filename, req2_filename]
ret = self.run_function(
'pip.install', requirements=requirements_list, user=this_user,
'pip.install', requirements=requirements_list,
bin_env=self.venv_dir, cwd=req_cwd
)
try:
@ -233,7 +238,7 @@ class PipModuleTest(ModuleCase):
raise
@skip_if_not_root
def test_chained_requirements__sans_no_chown__absolute_file_path(self):
def test_chained_requirements__absolute_file_path(self):
self.run_function('virtualenv.create', [self.venv_dir])
# Create a requirements file that depends on another one.
@ -246,10 +251,8 @@ class PipModuleTest(ModuleCase):
with salt.utils.fopen(req2_filename, 'w') as f:
f.write('pep8')
this_user = pwd.getpwuid(os.getuid())[0]
ret = self.run_function(
'pip.install', requirements=req1_filename, user=this_user,
bin_env=self.venv_dir
'pip.install', requirements=req1_filename, bin_env=self.venv_dir
)
try:
self.assertEqual(ret['retcode'], 0)
@ -260,7 +263,7 @@ class PipModuleTest(ModuleCase):
raise
@skip_if_not_root
def test_chained_requirements__sans_no_chown__non_absolute_file_path(self):
def test_chained_requirements__non_absolute_file_path(self):
self.run_function('virtualenv.create', [self.venv_dir])
# Create a requirements file that depends on another one.
@ -277,10 +280,9 @@ class PipModuleTest(ModuleCase):
with salt.utils.fopen(req2_file, 'w') as f:
f.write('pep8')
this_user = pwd.getpwuid(os.getuid())[0]
ret = self.run_function(
'pip.install', requirements=req1_filename, user=this_user,
no_chown=False, cwd=req_basepath, bin_env=self.venv_dir
'pip.install', requirements=req1_filename, cwd=req_basepath,
bin_env=self.venv_dir
)
try:
self.assertEqual(ret['retcode'], 0)
@ -291,7 +293,7 @@ class PipModuleTest(ModuleCase):
raise
@skip_if_not_root
def test_issue_4805_nested_requirements_user_no_chown(self):
def test_issue_4805_nested_requirements(self):
self.run_function('virtualenv.create', [self.venv_dir])
# Create a requirements file that depends on another one.
@ -302,11 +304,8 @@ class PipModuleTest(ModuleCase):
with salt.utils.fopen(req2_filename, 'w') as f:
f.write('pep8')
this_user = pwd.getpwuid(os.getuid())[0]
ret = self.run_function(
'pip.install', requirements=req1_filename, user=this_user,
no_chown=True, bin_env=self.venv_dir
)
'pip.install', requirements=req1_filename, bin_env=self.venv_dir)
if self._check_download_error(ret['stdout']):
self.skipTest('Test skipped due to pip download error')
try:
@ -435,9 +434,9 @@ class PipModuleTest(ModuleCase):
def tearDown(self):
super(PipModuleTest, self).tearDown()
if os.path.isdir(self.venv_test_dir):
shutil.rmtree(self.venv_test_dir)
shutil.rmtree(self.venv_test_dir, ignore_errors=True)
if os.path.isdir(self.pip_temp):
shutil.rmtree(self.pip_temp)
shutil.rmtree(self.pip_temp, ignore_errors=True)
del self.venv_dir
del self.venv_test_dir
del self.pip_temp

View File

@ -6,6 +6,7 @@ from __future__ import absolute_import
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.helpers import destructiveTest
from tests.support.unit import skipIf
# Import Salt libs
import salt.utils
@ -30,10 +31,32 @@ class ServiceModuleTest(ModuleCase):
self.service_name = 'org.ntp.ntpd'
if int(os_release.split('.')[1]) >= 13:
self.service_name = 'com.apple.AirPlayXPCHelper'
elif salt.utils.is_windows():
self.service_name = 'Spooler'
if salt.utils.which(cmd_name) is None:
self.pre_srv_status = self.run_function('service.status', [self.service_name])
self.pre_srv_enabled = True if self.service_name in self.run_function('service.get_enabled') else False
if salt.utils.which(cmd_name) is None and not salt.utils.is_windows():
self.skipTest('{0} is not installed'.format(cmd_name))
def tearDown(self):
post_srv_status = self.run_function('service.status', [self.service_name])
post_srv_enabled = True if self.service_name in self.run_function('service.get_enabled') else False
if post_srv_status != self.pre_srv_status:
if self.pre_srv_status:
self.run_function('service.enable', [self.service_name])
else:
self.run_function('service.disable', [self.service_name])
if post_srv_enabled != self.pre_srv_enabled:
if self.pre_srv_enabled:
self.run_function('service.enable', [self.service_name])
else:
self.run_function('service.disable', [self.service_name])
del self.service_name
def test_service_status_running(self):
'''
test service.status execution module
@ -53,3 +76,37 @@ class ServiceModuleTest(ModuleCase):
check_service = self.run_function('service.status', [self.service_name])
self.assertFalse(check_service)
def test_service_restart(self):
'''
test service.restart
'''
self.assertTrue(self.run_function('service.restart', [self.service_name]))
def test_service_enable(self):
'''
test service.get_enabled and service.enable module
'''
# disable service before test
self.assertTrue(self.run_function('service.disable', [self.service_name]))
self.assertTrue(self.run_function('service.enable', [self.service_name]))
self.assertIn(self.service_name, self.run_function('service.get_enabled'))
def test_service_disable(self):
'''
test service.get_disabled and service.disable module
'''
# enable service before test
self.assertTrue(self.run_function('service.enable', [self.service_name]))
self.assertTrue(self.run_function('service.disable', [self.service_name]))
self.assertIn(self.service_name, self.run_function('service.get_disabled'))
@skipIf(not salt.utils.is_windows(), 'Windows Only Test')
def test_service_get_service_name(self):
'''
test service.get_service_name
'''
ret = self.run_function('service.get_service_name')
self.assertIn(self.service_name, ret.values())

Some files were not shown because too many files have changed in this diff Show More