Merge branch 'develop' into pkiminions_fix

This commit is contained in:
Aaron 2018-05-25 08:23:32 -05:00 committed by GitHub
commit 6301b31a85
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
203 changed files with 16154 additions and 4745 deletions

View File

@ -1,5 +1,5 @@
---
<% vagrant = system('which vagrant 2>/dev/null >/dev/null') %>
<% vagrant = system('gem list -i kitchen-vagrant 2>/dev/null >/dev/null') %>
<% version = '2017.7.4' %>
<% platformsfile = ENV['SALT_KITCHEN_PLATFORMS'] || '.kitchen/platforms.yml' %>
<% driverfile = ENV['SALT_KITCHEN_DRIVER'] || '.kitchen/driver.yml' %>
@ -94,12 +94,9 @@ platforms:
- yum install -y upstart
provisioner:
salt_bootstrap_options: -P -p rsync -y -x python2.7 -X git v<%= version %> >/dev/null
- name: ubuntu-rolling
- name: ubuntu-18.04
driver_config:
image: ubuntu:rolling
run_command: /lib/systemd/systemd
provisioner:
salt_bootstrap_url: https://raw.githubusercontent.com/saltstack/salt-bootstrap/develop/bootstrap-salt.sh
- name: ubuntu-16.04
driver_config:
run_command: /lib/systemd/systemd

View File

@ -546,6 +546,10 @@
# targeted with the normal -N argument to salt-ssh.
#ssh_list_nodegroups: {}
# salt-ssh has the ability to update the flat roster file if a minion is not
# found in the roster. Set this to True to enable it.
#ssh_update_roster: False
##### Master Module Management #####
##########################################
# Manage how master side modules are loaded.

View File

@ -38,7 +38,10 @@ from __future__ import division
import re
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.util.compat import Directive
try:
from sphinx.util.compat import Directive
except ImportError:
from docutils.parsers.rst import Directive
CONTROL_HEIGHT = 30

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-API" "1" "Feb 23, 2018" "2018.3.0" "Salt"
.TH "SALT-API" "1" "May 09, 2018" "2018.3.1" "Salt"
.SH NAME
salt-api \- salt-api Command
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-CALL" "1" "Feb 23, 2018" "2018.3.0" "Salt"
.TH "SALT-CALL" "1" "May 09, 2018" "2018.3.1" "Salt"
.SH NAME
salt-call \- salt-call Documentation
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-CLOUD" "1" "Feb 23, 2018" "2018.3.0" "Salt"
.TH "SALT-CLOUD" "1" "May 09, 2018" "2018.3.1" "Salt"
.SH NAME
salt-cloud \- Salt Cloud Command
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-CP" "1" "Feb 23, 2018" "2018.3.0" "Salt"
.TH "SALT-CP" "1" "May 09, 2018" "2018.3.1" "Salt"
.SH NAME
salt-cp \- salt-cp Documentation
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-KEY" "1" "Feb 23, 2018" "2018.3.0" "Salt"
.TH "SALT-KEY" "1" "May 09, 2018" "2018.3.1" "Salt"
.SH NAME
salt-key \- salt-key Documentation
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-MASTER" "1" "Feb 23, 2018" "2018.3.0" "Salt"
.TH "SALT-MASTER" "1" "May 09, 2018" "2018.3.1" "Salt"
.SH NAME
salt-master \- salt-master Documentation
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-MINION" "1" "Feb 23, 2018" "2018.3.0" "Salt"
.TH "SALT-MINION" "1" "May 09, 2018" "2018.3.1" "Salt"
.SH NAME
salt-minion \- salt-minion Documentation
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-PROXY" "1" "Feb 23, 2018" "2018.3.0" "Salt"
.TH "SALT-PROXY" "1" "May 09, 2018" "2018.3.1" "Salt"
.SH NAME
salt-proxy \- salt-proxy Documentation
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-RUN" "1" "Feb 23, 2018" "2018.3.0" "Salt"
.TH "SALT-RUN" "1" "May 09, 2018" "2018.3.1" "Salt"
.SH NAME
salt-run \- salt-run Documentation
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-SSH" "1" "Feb 23, 2018" "2018.3.0" "Salt"
.TH "SALT-SSH" "1" "May 09, 2018" "2018.3.1" "Salt"
.SH NAME
salt-ssh \- salt-ssh Documentation
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-SYNDIC" "1" "Feb 23, 2018" "2018.3.0" "Salt"
.TH "SALT-SYNDIC" "1" "May 09, 2018" "2018.3.1" "Salt"
.SH NAME
salt-syndic \- salt-syndic Documentation
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-UNITY" "1" "Feb 23, 2018" "2018.3.0" "Salt"
.TH "SALT-UNITY" "1" "May 09, 2018" "2018.3.1" "Salt"
.SH NAME
salt-unity \- salt-unity Command
.

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT" "1" "Feb 23, 2018" "2018.3.0" "Salt"
.TH "SALT" "1" "May 09, 2018" "2018.3.1" "Salt"
.SH NAME
salt \- salt
.

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SPM" "1" "Feb 23, 2018" "2018.3.0" "Salt"
.TH "SPM" "1" "May 09, 2018" "2018.3.1" "Salt"
.SH NAME
spm \- Salt Package Manager Command
.

View File

@ -3275,3 +3275,31 @@ URL of the repository:
Replace ``<commit_id>`` with the SHA1 hash of a commit ID. Specifying a commit
ID is useful in that it allows one to revert back to a previous version in the
event that an error is introduced in the latest revision of the repo.
``ssh_merge_pillar``
--------------------
.. versionadded:: 2018.3.2
Default: ``True``
Merges the compiled pillar data with the pillar data already available globally.
This is useful when using ``salt-ssh`` or ``salt-call --local`` and overriding the pillar
data in a state file:
.. code-block:: yaml
apply_showpillar:
module.run:
- name: state.apply
- mods:
- showpillar
- kwargs:
pillar:
test: "foo bar"
If set to ``True`` the ``showpillar`` state will have access to the
global pillar data.
If set to ``False`` only the overriding pillar data will be available
to the ``showpillar`` state.

View File

@ -17,3 +17,4 @@ roster modules
flat
range
scan
sshconfig

View File

@ -0,0 +1,6 @@
=====================
salt.roster.sshconfig
=====================
.. automodule:: salt.roster.sshconfig
:members:

View File

@ -125,7 +125,6 @@ state modules
influxdb_database
influxdb_retention_policy
influxdb_user
infoblox
infoblox_a
infoblox_cname
infoblox_host_record
@ -314,11 +313,14 @@ state modules
winrepo
x509
xmpp
zabbix_action
zabbix_host
zabbix_hostgroup
zabbix_mediatype
zabbix_template
zabbix_user
zabbix_usergroup
zabbix_valuemap
zcbuildout
zenoss
zk_concurrency

View File

@ -1,5 +0,0 @@
salt.states.infoblox module
===========================
.. automodule:: salt.states.infoblox
:members:

View File

@ -0,0 +1,5 @@
salt.states.zabbix_action
=========================
.. automodule:: salt.states.zabbix_action
:members:

View File

@ -0,0 +1,5 @@
salt.states.zabbix_template
===========================
.. automodule:: salt.states.zabbix_template
:members:

View File

@ -0,0 +1,5 @@
salt.states.zabbix_valuemap
===========================
.. automodule:: salt.states.zabbix_valuemap
:members:

View File

@ -410,10 +410,11 @@ exactly like the ``require`` requisite (the watching state will execute if
service.running:
- watch_any:
- file: /etc/apache2/sites-available/site1.conf
- file: /etc/apache2/sites-available/site2.conf
- file: apache2-site2
file.managed:
- name: /etc/apache2/sites-available/site1.conf
- source: salt://apache2/files/site1.conf
apache2-site2:
file.managed:
- name: /etc/apache2/sites-available/site2.conf
- source: salt://apache2/files/site2.conf
@ -860,6 +861,17 @@ Reload
after a state finishes. ``reload_pillar`` and ``reload_grains`` can also be set.
See :ref:`Reloading Modules <reloading-modules>`.
.. code-block:: yaml
grains_refresh:
module.run:
- name: saltutil.refresh_grains
- reload_grains: true
grains_read:
module.run:
- name: grains.items
.. _unless-requisite:
Unless

View File

@ -40,8 +40,9 @@ Beacons are typically enabled by placing a ``beacons:`` top level block in
beacons:
inotify:
/etc/important_file: {}
/opt: {}
- files:
/etc/important_file: {}
/opt: {}
The beacon system, like many others in Salt, can also be configured via the
minion pillar, grains, or local config file.
@ -50,6 +51,8 @@ minion pillar, grains, or local config file.
The `inotify` beacon only works on OSes that have `inotify` kernel support.
Currently this excludes FreeBSD, macOS, and Windows.
All beacon configuration is done using list based configuration.
Beacon Monitoring Interval
--------------------------
@ -61,21 +64,23 @@ and 10-second intervals:
beacons:
inotify:
/etc/important_file: {}
/opt: {}
interval: 5
disable_during_state_run: True
- files:
/etc/important_file: {}
/opt: {}
- interval: 5
- disable_during_state_run: True
load:
1m:
- 0.0
- 2.0
5m:
- 0.0
- 1.5
15m:
- 0.1
- 1.0
interval: 10
- averages:
1m:
- 0.0
- 2.0
5m:
- 0.0
- 1.5
15m:
- 0.1
- 1.0
- interval: 10
.. _avoid-beacon-event-loops:
@ -96,8 +101,9 @@ which point the normal beacon interval will resume.
beacons:
inotify:
/etc/important_file: {}
disable_during_state_run: True
- files:
/etc/important_file: {}
- disable_during_state_run: True
.. _beacon-example:
@ -137,10 +143,11 @@ On the Salt minion, add the following configuration to
beacons:
inotify:
/etc/important_file:
mask:
- modify
disable_during_state_run: True
- files:
/etc/important_file:
mask:
- modify
- disable_during_state_run: True
Save the configuration file and restart the minion service. The beacon is now
set up to notify salt upon modifications made to the file.

View File

@ -351,6 +351,7 @@ This driver can be configured using the ``/etc/openstack/clouds.yml`` file with
`os-client-config <https://docs.openstack.org/os-client-config/latest/>`
.. code-block:: yaml
myopenstack:
driver: openstack
region_name: RegionOne
@ -359,6 +360,7 @@ This driver can be configured using the ``/etc/openstack/clouds.yml`` file with
Or by just configuring the same auth block directly in the cloud provider config.
.. code-block:: yaml
myopenstack:
driver: openstack
region_name: RegionOne

View File

@ -135,17 +135,6 @@ provider, profile, or map blocks use ssh_port option.
ssh_port: 2222
SSH Port
========
By default ssh port is set to port 22. If you want to use a custom port in
provider, profile, or map blocks use ssh_port option.
.. code-block:: yaml
ssh_port: 2222
Delete SSH Keys
===============
When Salt Cloud deploys an instance, the SSH pub key for the instance is added

View File

@ -6,7 +6,7 @@ Debian GNU/Linux / Raspbian
Debian GNU/Linux distribution and some derivatives such as Raspbian already
have included Salt packages to their repositories. However, current stable
release codenamed "Jessie" contains old outdated Salt release. It is
Debian release contains old outdated Salt releases. It is
recommended to use SaltStack repository for Debian as described
:ref:`below <installation-debian-repo>`.
@ -33,11 +33,13 @@ Instructions are at https://repo.saltstack.com/#debian.
Installation from the Debian / Raspbian Official Repository
===========================================================
Stretch (Testing) and Sid (Unstable) distributions are already contain mostly
up-to-date Salt packages built by Debian Salt Team. You can install Salt
components directly from Debian.
The Debian distributions contain mostly old Salt packages
built by the Debian Salt Team. You can install Salt
components directly from Debian but it is recommended to
use the instructions above for the packages from the official
Salt repository.
On Jessie (Stable) there is an option to install Salt minion from Stretch with
On Jessie there is an option to install Salt minion from Stretch with
`python-tornado` dependency from `jessie-backports` repositories.
To install fresh release of Salt minion on Jessie:
@ -79,7 +81,7 @@ To install fresh release of Salt minion on Jessie:
apt-get update
apt-get install python-zmq python-tornado/stretch salt-common/stretch
#. Install Salt minion package from Stretch:
#. Install Salt minion package from Latest Debian Release:
.. code-block:: bash

View File

@ -1,8 +1,9 @@
===========================
Salt 2017.7.6 Release Notes
In Progress: Salt 2017.7.6 Release Notes
===========================
Version 2017.7.6 is a bugfix release for :ref:`2017.7.0 <release-2017-7-0>`.
Version 2017.7.6 is an **unreleased** bugfix release for :ref:`2017.7.0 <release-2017-7-0>`.
This release is still in progress and has not been released yet.
Option to Return to Previous Pillar Include Behavior
----------------------------------------------------

View File

@ -13,3 +13,13 @@ used as part of a salt-minion process running on the master. This will allow
the minion to have pillars assigned to it, and will still allow the engine to
create a LocalClient connection to the master ipc sockets to control
environments.
Changes to Automatically Updating the Roster File
-------------------------------------------------
In ``2018.3.0`` salt-ssh was configured to automatically update the flat roster
file if a minion was not found for salt-ssh. This was decided to be
undesireable as a default. The ``--skip-roster`` flag has been removed and
replaced with ``--update-roster``, which will enable salt-ssh to add minions
to the flat roster file. This behavior can also be enabled by setting
``ssh_update_roster: True`` in the master config file.

View File

@ -507,3 +507,15 @@ passed correctly to the minion to run an orchestration in test mode. At present
it is not possible to pass ``test=False`` on the command-line to override a
minion in permanent test mode and so the ``test:False`` option must still be set
in the orchestration file.
============================
LDAP External Authentication
============================
freeipa 'groupattribute' support
--------------------------------
Previously, if Salt was using external authentication against a freeipa LDAP system
it could only search for users via the 'accountattributename' field. This release
add an additional search using the 'groupattribute' field as well. The original
'accountattributename' search is done first then the 'groupattribute' allowing for
backward compatibility with previous Salt releases.

View File

@ -59,11 +59,13 @@ To match a nodegroup on the CLI, use the ``-N`` command-line option:
salt -N group1 test.ping
.. versionadded:: Fluorine
.. note::
The ``N@`` classifier cannot be used in compound matches within the CLI or
:term:`top file`, it is only recognized in the :conf_master:`nodegroups`
master config file parameter.
The ``N@`` classifier historically could not be used in compound matches
within the CLI or :term:`top file`, it was only recognized in the
:conf_master:`nodegroups` master config file parameter. As of Fluorine
release, this limitation no longer exists.
To match a nodegroup in your :term:`top file`, make sure to put ``- match:
nodegroup`` on the line directly following the nodegroup name.

View File

@ -699,15 +699,24 @@ repository to be served up from the Salt fileserver path
Mountpoints can also be configured on a :ref:`per-remote basis
<gitfs-per-remote-config>`.
Using gitfs in Masterless Mode
==============================
Since 2014.7.0, gitfs can be used in masterless mode. To do so, simply add the
gitfs configuration parameters (and set :conf_master:`fileserver_backend`) in
the _minion_ config file instead of the master config file.
Using gitfs Alongside Other Backends
====================================
Sometimes it may make sense to use multiple backends; for instance, if ``sls``
files are stored in git but larger files are stored directly on the master.
The cascading lookup logic used for multiple remotes is also used with
multiple backends. If the ``fileserver_backend`` option contains
multiple backends:
The cascading lookup logic used for multiple remotes is also used with multiple
backends. If the :conf_master:`fileserver_backend` option contains multiple
backends:
.. code-block:: yaml
@ -719,7 +728,6 @@ Then the ``roots`` backend (the default backend of files in ``/srv/salt``) will
be searched first for the requested file; then, if it is not found on the
master, each configured git remote will be searched.
Branches, Environments, and Top Files
=====================================

View File

@ -75,7 +75,7 @@ set -l salt_programs_select salt salt-cp
for program in $salt_programs_select
complete -c $program -f -s G -l grain -d "Instead of using shell globs to evaluate the target use a grain value to identify targets, the syntax for the target is the grain key followed by a globexpression: \"os:Arch*\""
complete -c $program -f -l grain-pcre -d "Instead of using shell globs to evaluate the target use a grain value to identify targets, the syntax for the target is the grain key followed by a pcre regular expression: \"os:Arch.*\""
complete -c $program -f -s L -l list -d "Instead of using shell globs to evaluate the target servers, take a comma or space delimited list of servers."
complete -c $program -f -s L -l list -d "Instead of using shell globs to evaluate the target servers, take a comma or whitespace delimited list of servers."
complete -c $program -f -s N -l nodegroup -d "Instead of using shell globs to evaluate the target use one of the predefined nodegroups to identify a list of targets."
complete -c $program -f -s E -l pcre -d "Instead of using shell globs to evaluate the target servers, use pcre regular expressions"
complete -c $program -f -s R -l range -d "Instead of using shell globs to evaluate the target use a range expression to identify targets. Range expressions look like %cluster"

View File

@ -13,17 +13,17 @@
_salt_get_grains(){
if [ "$1" = 'local' ] ; then
salt-call --out=txt -- grains.ls | sed 's/^.*\[//' | tr -d ",']" |sed 's:\([a-z0-9]\) :\1\: :g'
salt-call --log-level=error --out=txt -- grains.ls | sed 's/^.*\[//' | tr -d ",']" |sed 's:\([a-z0-9]\) :\1\: :g'
else
salt '*' --timeout 2 --hide-timeout --out=txt -- grains.ls | sed 's/^.*\[//' | tr -d ",']" |sed 's:\([a-z0-9]\) :\1\: :g'
salt '*' --timeout 2 --hide-timeout --log-level=error --out=txt -- grains.ls | sed 's/^.*\[//' | tr -d ",']" |sed 's:\([a-z0-9]\) :\1\: :g'
fi
}
_salt_get_grain_values(){
if [ "$1" = 'local' ] ; then
salt-call --out=txt -- grains.item $1 |sed 's/^\S*:\s//' |grep -v '^\s*$'
salt-call --log-level=error --out=txt -- grains.item $1 |sed 's/^\S*:\s//' |grep -v '^\s*$'
else
salt '*' --timeout 2 --hide-timeout --out=txt -- grains.item $1 |sed 's/^\S*:\s//' |grep -v '^\s*$'
salt '*' --timeout 2 --hide-timeout --log-level=error --out=txt -- grains.item $1 |sed 's/^\S*:\s//' |grep -v '^\s*$'
fi
}
@ -34,8 +34,24 @@ _salt_get_keys(){
done
}
_salt(){
CACHE_DIR="$HOME/.cache/salt-comp-cache_functions"
_salt_list_functions(){
# salt-call: get all functions on this minion
# salt: get all functions on all minions
# sed: remove all array overhead and convert to newline separated list
# sort: chop out doubled entries, so overhead is minimal later during actual completion
if [ "$1" = 'local' ] ; then
salt-call --log-level=quiet --out=txt -- sys.list_functions \
| sed "s/^.*\[//;s/[],']//g;s/ /\n/g" \
| sort -u
else
salt '*' --timeout 2 --hide-timeout --log-level=quiet --out=txt -- sys.list_functions \
| sed "s/^.*\[//;s/[],']//g;s/ /\n/g" \
| sort -u
fi
}
_salt_get_coms() {
CACHE_DIR="$HOME/.cache/salt-${1}-comp-cache_functions"
local _salt_cache_functions=${SALT_COMP_CACHE_FUNCTIONS:=$CACHE_DIR}
local _salt_cache_timeout=${SALT_COMP_CACHE_TIMEOUT:='last hour'}
@ -43,6 +59,17 @@ _salt(){
mkdir -p "$(dirname ${_salt_cache_functions})"
fi
# Regenerate cache if timed out
if [[ "$(stat --format=%Z ${_salt_cache_functions} 2>/dev/null)" -lt "$(date --date="${_salt_cache_timeout}" +%s)" ]]; then
_salt_list_functions $1 > "${_salt_cache_functions}"
fi
# filter results, to only print the part to next dot (or end of function)
sed 's/^\('${cur}'\(\.\|[^.]*\)\)\?.*/\1/' "${_salt_cache_functions}" | sort -u
}
_salt(){
local cur prev opts _salt_grains _salt_coms pprev ppprev
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
@ -129,22 +156,10 @@ _salt(){
;;
esac
# Regenerate cache if timed out
if [[ "$(stat --format=%Z ${_salt_cache_functions} 2>/dev/null)" -lt "$(date --date="${_salt_cache_timeout}" +%s)" ]]; then
# salt: get all functions on all minions
# sed: remove all array overhead and convert to newline separated list
# sort: chop out doubled entries, so overhead is minimal later during actual completion
salt '*' --timeout 2 --hide-timeout --out=txt -- sys.list_functions \
| sed "s/^.*\[//;s/[],']//g;s/ /\n/g" \
| sort -u \
> "${_salt_cache_functions}"
fi
# filter results, to only print the part to next dot (or end of function)
_salt_coms="$(sed 's/^\('${cur}'\(\.\|[^.]*\)\)\?.*/\1/' "${_salt_cache_functions}" | sort -u)"
_salt_coms=$(_salt_get_coms remote)
# If there are still dots in the suggestion, do not append space
grep "^${cur}.*\." "${_salt_cache_functions}" &>/dev/null && compopt -o nospace
grep "^${cur}.*\." "${_salt_coms}" &>/dev/null && compopt -o nospace
all="${opts} ${_salt_coms}"
COMPREPLY=( $(compgen -W "${all}" -- ${cur}) )
@ -276,7 +291,11 @@ _saltcall(){
;;
esac
_salt_coms="$(salt-call --out=txt -- sys.list_functions|sed 's/^.*\[//' | tr -d ",']" )"
_salt_coms=$(_salt_get_coms local)
# If there are still dots in the suggestion, do not append space
grep "^${cur}.*\." "${_salt_coms}" &>/dev/null && compopt -o nospace
COMPREPLY=( $(compgen -W "${opts} ${_salt_coms}" -- ${cur} ))
return 0
}

View File

@ -22,6 +22,7 @@ BASE_THORIUM_ROOTS_DIR = os.path.join(SRV_ROOT_DIR, 'thorium')
BASE_MASTER_ROOTS_DIR = os.path.join(SRV_ROOT_DIR, 'salt-master')
LOGS_DIR = os.path.join(ROOT_DIR, 'var', 'log', 'salt')
PIDFILE_DIR = os.path.join(ROOT_DIR, 'var', 'run')
SPM_FORMULA_PATH = os.path.join(ROOT_DIR, 'spm', 'salt')
SPM_PILLAR_PATH = os.path.join(ROOT_DIR, 'spm', 'pillar')
SPM_REACTOR_PATH = os.path.join(ROOT_DIR, 'spm', 'reactor')
SPM_PARENT_PATH = os.path.join(ROOT_DIR, 'spm')
SPM_FORMULA_PATH = os.path.join(SPM_PARENT_PATH, 'salt')
SPM_PILLAR_PATH = os.path.join(SPM_PARENT_PATH, 'pillar')
SPM_REACTOR_PATH = os.path.join(SPM_PARENT_PATH, 'reactor')

View File

@ -740,7 +740,7 @@ Function ${un}uninstallSalt
# Remove files
Delete "$INSTDIR\uninst.exe"
Delete "$INSTDIR\nssm.exe"
Delete "$INSTDIR\ssm.exe"
Delete "$INSTDIR\salt*"
Delete "$INSTDIR\vcredist.exe"
RMDir /r "$INSTDIR\bin"

View File

@ -35,7 +35,7 @@ Function Get-Settings {
# Prerequisite software
$Prerequisites = @{
"NSIS" = "nsis-3.02.1-setup.exe"
"NSIS" = "nsis-3.03-setup.exe"
"VCforPython" = "VCForPython27.msi"
"VCppBuildTools" = "visualcppbuildtools_full.exe"
}
@ -59,21 +59,15 @@ Function Get-Settings {
# Filenames for 64 bit Windows
$64bitPrograms = @{
"PyCrypto2" = "pycrypto-2.6.1-cp27-none-win_amd64.whl"
"Python2" = "python-2.7.14.amd64.msi"
"PyWin322" = "pywin32-221-cp27-cp27m-win_amd64.whl"
"Python3" = "python-3.5.3-amd64.exe"
"PyWin323" = "pywin32-221-cp35-cp35m-win_amd64.whl"
}
$ini.Add("64bitPrograms", $64bitPrograms)
# Filenames for 32 bit Windows
$32bitPrograms = @{
"PyCrypto2" = "pycrypto-2.6.1-cp27-none-win32.whl"
"Python2" = "python-2.7.14.msi"
"PyWin322" = "pywin32-221-cp27-cp27m-win32.whl"
"Python3" = "python-3.5.3.exe"
"PyWin323" = "pywin32-221-cp35-cp35m-win32.whl"
}
$ini.Add("32bitPrograms", $32bitPrograms)

View File

@ -25,7 +25,7 @@ _modules(){
fi
if _cache_invalid salt/modules || ! _retrieve_cache salt/modules; then
_funcs=( ${${(Q)${${(s. .)"$(_call_program salt-call-cmd salt-call --local --out txt sys.list_functions)"}%%[],]##}#\[}:#local:} )
_funcs=( ${${(Q)${${(s. .)"$(_call_program salt-call-cmd salt-call --local --log-level error --out txt sys.list_functions)"}%%[],]##}#\[}:#local:} )
_store_cache salt/modules _funcs
fi
@ -40,7 +40,7 @@ _runners(){
fi
if _cache_invalid salt/runners || ! _retrieve_cache salt/runners; then
_runs=( ${${(Q)${${(s. .)"$(_call_program salt-call-cmd salt-call --local --out txt sys.list_runner_functions)"}%%[],]##}#\[}:#local:} )
_runs=( ${${(Q)${${(s. .)"$(_call_program salt-call-cmd salt-call --local --log-level error --out txt sys.list_runner_functions)"}%%[],]##}#\[}:#local:} )
_store_cache salt/runners _runs
fi
@ -119,7 +119,7 @@ _target_opt_pat=(
_target_options=(
"$_target_opt_pat[2]"{-E,--pcre}'[use pcre regular expressions]:pcre:'
"$_target_opt_pat[2]"{-L,--list}'[take a comma or space delimited list of servers.]:list:'
"$_target_opt_pat[2]"{-L,--list}'[take a comma or whitespace delimited list of servers.]:list:'
"$_target_opt_pat[2]"{-G,--grain}'[use a grain value to identify targets]:Grains:'
"$_target_opt_pat[2]--grain-pcre[use a grain value to identify targets.]:pcre:"
"$_target_opt_pat[2]"{-N,--nodegroup}'[use one of the predefined nodegroups to identify a list of targets.]:Nodegroup:'

View File

@ -0,0 +1,4 @@
-r base.txt
# Required by Tornado to handle threads stuff.
futures>=2.0

View File

@ -0,0 +1 @@
-r base.txt

View File

@ -1,8 +1,10 @@
Jinja2
msgpack-python>0.3,!=0.5.5
# This should be changed to msgpack-python for Packages
# msgpack-python>0.3,!=0.5.5
msgpack>=0.5,!=0.5.5
PyYAML
MarkupSafe
requests>=1.0.0
tornado>=4.2.1,<5.0
tornado>=4.2.1,<6.0
# Required by Tornado to handle threads stuff.
futures>=2.0

View File

@ -8,6 +8,7 @@ Provide authentication using simple LDAP binds
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import itertools
from salt.ext import six
# Import salt libs
@ -358,10 +359,11 @@ def groups(username, **kwargs):
search_results = bind.search_s(search_base,
ldap.SCOPE_SUBTREE,
search_string,
[salt.utils.stringutils.to_str(_config('accountattributename')), str('cn')]) # future lint: disable=blacklisted-function
[salt.utils.stringutils.to_str(_config('accountattributename')), salt.utils.stringutils.to_str(_config('groupattribute')), str('cn')]) # future lint: disable=blacklisted-function
for entry, result in search_results:
for user in result[_config('accountattributename')]:
for user in itertools.chain(result.get(_config('accountattributename'), []),
result.get(_config('groupattribute'), [])):
if username == salt.utils.stringutils.to_unicode(user).split(',')[0].split('=')[-1]:
group_list.append(entry.split(',')[0].split('=')[-1])

View File

@ -8,7 +8,6 @@ from __future__ import absolute_import, unicode_literals
# Import salt libs
import salt.utils.data
import salt.utils.locales
import salt.ext.six
from salt.ext.six.moves import map
@ -93,7 +92,7 @@ def beacon(config):
n_flag = 0
for key in _config['services'][name]:
if isinstance(key, salt.ext.six.string_types):
key = salt.utils.locales.sdecode(key)
key = salt.utils.data.decode(key)
if key in cur:
if _config['services'][name][key] == cur[key]:
n_flag += 1

View File

@ -407,7 +407,7 @@ class SSH(object):
'host': hostname,
'user': user,
}
if not self.opts.get('ssh_skip_roster'):
if self.opts.get('ssh_update_roster'):
self._update_roster()
def get_pubkey(self):

View File

@ -135,9 +135,9 @@ def lowstate_file_refs(chunks, extras=''):
elif state.startswith('__'):
continue
crefs.extend(salt_refs(chunk[state]))
if saltenv not in refs:
refs[saltenv] = []
if crefs:
if saltenv not in refs:
refs[saltenv] = []
refs[saltenv].append(crefs)
if extras:
extra_refs = extras.split(',')

View File

@ -1916,7 +1916,8 @@ class Map(Cloud):
pmap = self.map_providers_parallel(cached=cached)
exist = set()
defined = set()
for profile_name, nodes in six.iteritems(self.rendered_map):
rendered_map = copy.deepcopy(self.rendered_map)
for profile_name, nodes in six.iteritems(rendered_map):
if profile_name not in self.opts['profiles']:
msg = (
'The required profile, \'{0}\', defined in the map '
@ -1934,21 +1935,23 @@ class Map(Cloud):
profile_data = self.opts['profiles'].get(profile_name)
# Get associated provider data, in case something like size
# or image is specified in the provider file. See issue #32510.
alias, driver = profile_data.get('provider').split(':')
provider_details = self.opts['providers'][alias][driver].copy()
del provider_details['profiles']
# Update the provider details information with profile data
# Profile data should override provider data, if defined.
# This keeps map file data definitions consistent with -p usage.
provider_details.update(profile_data)
profile_data = provider_details
for nodename, overrides in six.iteritems(nodes):
# Get the VM name
nodedata = copy.deepcopy(profile_data)
# Get associated provider data, in case something like size
# or image is specified in the provider file. See issue #32510.
if 'provider' in overrides and overrides['provider'] != profile_data['provider']:
alias, driver = overrides.get('provider').split(':')
else:
alias, driver = profile_data.get('provider').split(':')
provider_details = copy.deepcopy(self.opts['providers'][alias][driver])
del provider_details['profiles']
# Update the provider details information with profile data
# Profile data and node overrides should override provider data, if defined.
# This keeps map file data definitions consistent with -p usage.
salt.utils.dictupdate.update(provider_details, profile_data)
nodedata = copy.deepcopy(provider_details)
# Update profile data with the map overrides
for setting in ('grains', 'master', 'minion', 'volumes',
'requires'):

View File

@ -1158,11 +1158,11 @@ def request_instance(vm_):
volume['vhd'] = VirtualHardDisk(volume['vhd'])
if 'image' in volume:
volume['create_option'] = DiskCreateOptionTypes.from_image
volume['create_option'] = 'from_image'
elif 'attach' in volume:
volume['create_option'] = DiskCreateOptionTypes.attach
volume['create_option'] = 'attach'
else:
volume['create_option'] = DiskCreateOptionTypes.empty
volume['create_option'] = 'empty'
data_disks.append(DataDisk(**volume))
if vm_['image'].startswith('http') or vm_.get('vhd') == 'unmanaged':

View File

@ -75,7 +75,6 @@ import time
import uuid
import pprint
import logging
import random
# Import libs for talking to the EC2 API
import hmac
@ -302,8 +301,8 @@ def query(params=None, setname=None, requesturl=None, location=None,
# Retrieve access credentials from meta-data, or use provided
access_key_id, secret_access_key, token = aws.creds(provider)
attempts = 5
while attempts > 0:
attempts = 0
while attempts < aws.AWS_MAX_RETRIES:
params_with_headers = params.copy()
timestamp = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
@ -364,15 +363,15 @@ def query(params=None, setname=None, requesturl=None, location=None,
querystring = querystring.replace('+', '%20')
canonical_request = method + '\n' + canonical_uri + '\n' + \
querystring + '\n' + canonical_headers + '\n' + \
signed_headers + '\n' + payload_hash
querystring + '\n' + canonical_headers + '\n' + \
signed_headers + '\n' + payload_hash
algorithm = 'AWS4-HMAC-SHA256'
credential_scope = datestamp + '/' + region + '/' + service + '/' + 'aws4_request'
string_to_sign = algorithm + '\n' + amz_date + '\n' + \
credential_scope + '\n' + \
salt.utils.hashutils.sha256_digest(canonical_request)
credential_scope + '\n' + \
salt.utils.hashutils.sha256_digest(canonical_request)
kDate = sign(('AWS4' + provider['key']).encode('utf-8'), datestamp)
kRegion = sign(kDate, region)
@ -381,12 +380,11 @@ def query(params=None, setname=None, requesturl=None, location=None,
signature = hmac.new(signing_key, (string_to_sign).encode('utf-8'),
hashlib.sha256).hexdigest()
#sig = binascii.b2a_base64(hashed)
authorization_header = algorithm + ' ' + 'Credential=' + \
provider['id'] + '/' + credential_scope + \
', ' + 'SignedHeaders=' + signed_headers + \
', ' + 'Signature=' + signature
provider['id'] + '/' + credential_scope + \
', ' + 'SignedHeaders=' + signed_headers + \
', ' + 'Signature=' + signature
headers = {'x-amz-date': amz_date, 'Authorization': authorization_header}
log.debug('EC2 Request: %s', requesturl)
@ -407,15 +405,14 @@ def query(params=None, setname=None, requesturl=None, location=None,
# check to see if we should retry the query
err_code = data.get('Errors', {}).get('Error', {}).get('Code', '')
if attempts > 0 and err_code and err_code in EC2_RETRY_CODES:
attempts -= 1
if err_code and err_code in EC2_RETRY_CODES:
attempts += 1
log.error(
'EC2 Response Status Code and Error: [%s %s] %s; '
'Attempts remaining: %s',
exc.response.status_code, exc, data, attempts
)
# Wait a bit before continuing to prevent throttling
time.sleep(2)
aws.sleep_exponential_backoff(attempts)
continue
log.error(
@ -1562,29 +1559,21 @@ def _modify_eni_properties(eni_id, properties=None, vm_=None):
for k, v in six.iteritems(properties):
params[k] = v
retries = 5
while retries > 0:
retries = retries - 1
result = aws.query(params,
return_root=True,
location=get_location(vm_),
provider=get_provider(),
opts=__opts__,
sigver='4')
result = aws.query(params,
return_root=True,
location=get_location(vm_),
provider=get_provider(),
opts=__opts__,
sigver='4')
if isinstance(result, dict) and result.get('error'):
time.sleep(1)
continue
return result
raise SaltCloudException(
'Could not change interface <{0}> attributes '
'<\'{1}\'> after 5 retries'.format(
eni_id, properties
if isinstance(result, dict) and result.get('error'):
raise SaltCloudException(
'Could not change interface <{0}> attributes <\'{1}\'>'.format(
eni_id, properties
)
)
)
else:
return result
def _associate_eip_with_interface(eni_id, eip_id, private_ip=None, vm_=None):
@ -1597,44 +1586,35 @@ def _associate_eip_with_interface(eni_id, eip_id, private_ip=None, vm_=None):
be NATted to - useful if you have multiple IP addresses assigned to an
interface.
'''
retries = 5
while retries > 0:
params = {'Action': 'AssociateAddress',
'NetworkInterfaceId': eni_id,
'AllocationId': eip_id}
params = {'Action': 'AssociateAddress',
'NetworkInterfaceId': eni_id,
'AllocationId': eip_id}
if private_ip:
params['PrivateIpAddress'] = private_ip
if private_ip:
params['PrivateIpAddress'] = private_ip
retries = retries - 1
result = aws.query(params,
return_root=True,
location=get_location(vm_),
provider=get_provider(),
opts=__opts__,
sigver='4')
result = aws.query(params,
return_root=True,
location=get_location(vm_),
provider=get_provider(),
opts=__opts__,
sigver='4')
if isinstance(result, dict) and result.get('error'):
time.sleep(1)
continue
if not result[2].get('associationId'):
break
log.debug(
'Associated ElasticIP address %s with interface %s',
eip_id, eni_id
if not result[2].get('associationId'):
raise SaltCloudException(
'Could not associate elastic ip address '
'<{0}> with network interface <{1}>'.format(
eip_id, eni_id
)
)
return result[2].get('associationId')
raise SaltCloudException(
'Could not associate elastic ip address '
'<{0}> with network interface <{1}>'.format(
eip_id, eni_id
)
log.debug(
'Associated ElasticIP address %s with interface %s',
eip_id, eni_id
)
return result[2].get('associationId')
def _update_enis(interfaces, instance, vm_=None):
config_enis = {}
@ -2011,7 +1991,8 @@ def request_instance(vm_=None, call=None):
params[termination_key] = six.text_type(set_del_root_vol_on_destroy).lower()
# Use default volume type if not specified
if ex_blockdevicemappings and dev_index < len(ex_blockdevicemappings) and 'Ebs.VolumeType' not in ex_blockdevicemappings[dev_index]:
if ex_blockdevicemappings and dev_index < len(ex_blockdevicemappings) and \
'Ebs.VolumeType' not in ex_blockdevicemappings[dev_index]:
type_key = '{0}BlockDeviceMapping.{1}.Ebs.VolumeType'.format(spot_prefix, dev_index)
params[type_key] = rd_type
@ -2182,8 +2163,7 @@ def query_instance(vm_=None, call=None):
provider = get_provider(vm_)
attempts = 0
# perform exponential backoff and wait up to one minute (2**6 seconds)
while attempts < 7:
while attempts < aws.AWS_MAX_RETRIES:
data, requesturl = aws.query(params, # pylint: disable=unbalanced-tuple-unpacking
location=location,
provider=provider,
@ -2205,7 +2185,7 @@ def query_instance(vm_=None, call=None):
else:
break
time.sleep(random.uniform(1, 2**attempts))
aws.sleep_exponential_backoff(attempts)
attempts += 1
continue
else:
@ -2215,7 +2195,6 @@ def query_instance(vm_=None, call=None):
def __query_ip_address(params, url): # pylint: disable=W0613
data = aws.query(params,
#requesturl=url,
location=location,
provider=provider,
opts=__opts__,
@ -3028,9 +3007,9 @@ def set_tags(name=None,
params['Tag.{0}.Key'.format(idx)] = tag_k
params['Tag.{0}.Value'.format(idx)] = tag_v
attempts = 5
while attempts >= 0:
result = aws.query(params,
attempts = 0
while attempts < aws.AWS_MAX_RETRIES:
aws.query(params,
setname='tagSet',
location=location,
provider=get_provider(),
@ -3064,9 +3043,8 @@ def set_tags(name=None,
if failed_to_set_tags:
log.warning('Failed to set tags. Remaining attempts %s', attempts)
attempts -= 1
# Just a little delay between attempts...
time.sleep(1)
attempts += 1
aws.sleep_exponential_backoff(attempts)
continue
return settags
@ -3405,8 +3383,8 @@ def _get_node(name=None, instance_id=None, location=None):
provider = get_provider()
attempts = 10
while attempts >= 0:
attempts = 0
while attempts < aws.AWS_MAX_RETRIES:
try:
instances = aws.query(params,
location=location,
@ -3416,13 +3394,12 @@ def _get_node(name=None, instance_id=None, location=None):
instance_info = _extract_instance_info(instances).values()
return next(iter(instance_info))
except IndexError:
attempts -= 1
attempts += 1
log.debug(
'Failed to get the data for node \'%s\'. Remaining '
'attempts: %s', instance_id or name, attempts
)
# Just a little delay between attempts...
time.sleep(0.5)
aws.sleep_exponential_backoff(attempts)
return {}
@ -3699,6 +3676,25 @@ def enable_term_protect(name, call=None):
return _toggle_term_protect(name, 'true')
def disable_term_protect(name, call=None):
'''
Disable termination protection on a node
CLI Example:
.. code-block:: bash
salt-cloud -a disable_term_protect mymachine
'''
if call != 'action':
raise SaltCloudSystemExit(
'The enable_term_protect action must be called with '
'-a or --action.'
)
return _toggle_term_protect(name, 'false')
def disable_detailed_monitoring(name, call=None):
'''
Enable/disable detailed monitoring on a node
@ -3927,7 +3923,8 @@ def register_image(kwargs=None, call=None):
.. code-block:: bash
salt-cloud -f register_image my-ec2-config ami_name=my_ami description="my description" root_device_name=/dev/xvda snapshot_id=snap-xxxxxxxx
salt-cloud -f register_image my-ec2-config ami_name=my_ami description="my description"
root_device_name=/dev/xvda snapshot_id=snap-xxxxxxxx
'''
if call != 'function':

View File

@ -597,6 +597,7 @@ def _clean_create_kwargs(**kwargs):
'volume_size': int,
'nat_destination': six.string_types,
'group': six.string_types,
'userdata': six.string_types,
}
extra = kwargs.pop('extra', {})
for key, value in six.iteritems(kwargs.copy()):

View File

@ -35,7 +35,6 @@ import salt.utils.yaml
import salt.utils.zeromq
import salt.syspaths
import salt.exceptions
from salt.utils.locales import sdecode
import salt.defaults.exitcodes
try:
@ -992,6 +991,7 @@ VALID_OPTS = {
'ssh_identities_only': bool,
'ssh_log_file': six.string_types,
'ssh_config_file': six.string_types,
'ssh_merge_pillar': bool,
# Enable ioflo verbose logging. Warning! Very verbose!
'ioflo_verbose': int,
@ -1502,6 +1502,7 @@ DEFAULT_MINION_OPTS = {
},
'discovery': False,
'schedule': {},
'ssh_merge_pillar': True
}
DEFAULT_MASTER_OPTS = {
@ -1915,15 +1916,15 @@ DEFAULT_API_OPTS = {
DEFAULT_SPM_OPTS = {
# ----- Salt master settings overridden by SPM --------------------->
'spm_conf_file': os.path.join(salt.syspaths.CONFIG_DIR, 'spm'),
'formula_path': '/srv/spm/salt',
'pillar_path': '/srv/spm/pillar',
'reactor_path': '/srv/spm/reactor',
'formula_path': salt.syspaths.SPM_FORMULA_PATH,
'pillar_path': salt.syspaths.SPM_PILLAR_PATH,
'reactor_path': salt.syspaths.SPM_REACTOR_PATH,
'spm_logfile': os.path.join(salt.syspaths.LOGS_DIR, 'spm'),
'spm_default_include': 'spm.d/*.conf',
# spm_repos_config also includes a .d/ directory
'spm_repos_config': '/etc/salt/spm.repos',
'spm_cache_dir': os.path.join(salt.syspaths.CACHE_DIR, 'spm'),
'spm_build_dir': '/srv/spm_build',
'spm_build_dir': os.path.join(salt.syspaths.SRV_ROOT_DIR, 'spm_build'),
'spm_build_exclude': ['CVS', '.hg', '.git', '.svn'],
'spm_db': os.path.join(salt.syspaths.CACHE_DIR, 'spm', 'packages.db'),
'cache': 'localfs',
@ -2109,7 +2110,7 @@ def _validate_ssh_minion_opts(opts):
for opt_name in list(ssh_minion_opts):
if re.match('^[a-z0-9]+fs_', opt_name, flags=re.IGNORECASE) \
or 'pillar' in opt_name \
or ('pillar' in opt_name and not 'ssh_merge_pillar' == opt_name) \
or opt_name in ('fileserver_backend',):
log.warning(
'\'%s\' is not a valid ssh_minion_opts parameter, ignoring',
@ -2157,7 +2158,7 @@ def _read_conf_file(path):
if not isinstance(conf_opts['id'], six.string_types):
conf_opts['id'] = six.text_type(conf_opts['id'])
else:
conf_opts['id'] = sdecode(conf_opts['id'])
conf_opts['id'] = salt.utils.data.decode(conf_opts['id'])
return conf_opts
@ -3336,7 +3337,7 @@ def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):
if isinstance(vm_[name], types.GeneratorType):
value = next(vm_[name], '')
else:
if isinstance(value, dict):
if isinstance(value, dict) and isinstance(vm_[name], dict):
value.update(vm_[name].copy())
else:
value = deepcopy(vm_[name])
@ -3421,7 +3422,7 @@ def is_profile_configured(opts, provider, profile_name, vm_=None):
# Most drivers need a size, but some do not.
non_size_drivers = ['opennebula', 'parallels', 'proxmox', 'scaleway',
'softlayer', 'softlayer_hw', 'vmware', 'vsphere',
'virtualbox', 'libvirt', 'oneandone']
'virtualbox', 'libvirt', 'oneandone', 'profitbricks']
provider_key = opts['providers'][alias][driver]
profile_key = opts['providers'][alias][driver]['profiles'][profile_name]

View File

@ -13,6 +13,7 @@ import string
import shutil
import ftplib
from tornado.httputil import parse_response_start_line, HTTPHeaders, HTTPInputError
import salt.utils.atomicfile
# Import salt libs
from salt.exceptions import (
@ -35,7 +36,6 @@ import salt.utils.stringutils
import salt.utils.templates
import salt.utils.url
import salt.utils.versions
from salt.utils.locales import sdecode
from salt.utils.openstack.swift import SaltSwift
# pylint: disable=no-name-in-module,import-error
@ -46,6 +46,7 @@ from salt.ext.six.moves.urllib.parse import urlparse, urlunparse
# pylint: enable=no-name-in-module,import-error
log = logging.getLogger(__name__)
MAX_FILENAME_LENGTH = 255
def get_file_client(opts, pillar=False):
@ -223,7 +224,7 @@ class Client(object):
'''
ret = []
path = self._check_proto(sdecode(path))
path = self._check_proto(salt.utils.data.decode(path))
# We want to make sure files start with this *directory*, use
# '/' explicitly because the master (that's generating the
# list of files) only runs on POSIX
@ -236,7 +237,7 @@ class Client(object):
# go through the list of all files finding ones that are in
# the target directory and caching them
for fn_ in self.file_list(saltenv):
fn_ = sdecode(fn_)
fn_ = salt.utils.data.decode(fn_)
if fn_.strip() and fn_.startswith(path):
if salt.utils.stringutils.check_include_exclude(
fn_, include_pat, exclude_pat):
@ -259,7 +260,7 @@ class Client(object):
dest = salt.utils.path.join(cachedir, 'files', saltenv)
for fn_ in self.file_list_emptydirs(saltenv):
fn_ = sdecode(fn_)
fn_ = salt.utils.data.decode(fn_)
if fn_.startswith(path):
minion_dir = '{0}/{1}'.format(dest, fn_)
if not os.path.isdir(minion_dir):
@ -831,6 +832,9 @@ class Client(object):
else:
file_name = url_data.path
if len(file_name) > MAX_FILENAME_LENGTH:
file_name = salt.utils.hashutils.sha256_digest(file_name)
return salt.utils.path.join(
cachedir,
'extrn_files',
@ -899,7 +903,7 @@ class LocalClient(Client):
dirs[:] = [d for d in dirs if not salt.fileserver.is_file_ignored(self.opts, d)]
for fname in files:
relpath = os.path.relpath(os.path.join(root, fname), path)
ret.append(sdecode(relpath))
ret.append(salt.utils.data.decode(relpath))
return ret
def file_list_emptydirs(self, saltenv='base', prefix=''):
@ -916,7 +920,7 @@ class LocalClient(Client):
# Don't walk any directories that match file_ignore_regex or glob
dirs[:] = [d for d in dirs if not salt.fileserver.is_file_ignored(self.opts, d)]
if len(dirs) == 0 and len(files) == 0:
ret.append(sdecode(os.path.relpath(root, path)))
ret.append(salt.utils.data.decode(os.path.relpath(root, path)))
return ret
def dir_list(self, saltenv='base', prefix=''):
@ -930,7 +934,7 @@ class LocalClient(Client):
for root, dirs, files in salt.utils.path.os_walk(
os.path.join(path, prefix), followlinks=True
):
ret.append(sdecode(os.path.relpath(root, path)))
ret.append(salt.utils.data.decode(os.path.relpath(root, path)))
return ret
def __get_file_path(self, path, saltenv='base'):
@ -1156,7 +1160,11 @@ class RemoteClient(Client):
destdir = os.path.dirname(dest)
if not os.path.isdir(destdir):
if makedirs:
os.makedirs(destdir)
try:
os.makedirs(destdir)
except OSError as exc:
if exc.errno != errno.EEXIST: # ignore if it was there already
raise
else:
return False
# We need an open filehandle here, that's why we're not using a
@ -1211,7 +1219,7 @@ class RemoteClient(Client):
# remove it to avoid a traceback trying to write the file
if os.path.isdir(dest):
salt.utils.files.rm_rf(dest)
fn_ = salt.utils.files.fopen(dest, 'wb+')
fn_ = salt.utils.atomicfile.atomic_open(dest, 'wb+')
if data.get('gzip', None):
data = salt.utils.gzip_util.uncompress(data['data'])
else:
@ -1381,14 +1389,12 @@ class RemoteClient(Client):
'''
Return the metadata derived from the master_tops system
'''
salt.utils.versions.warn_until(
'Magnesium',
'The _ext_nodes master function has '
'been renamed to _master_tops. To ensure '
'compatibility when using older Salt masters '
'we continue to pass the function as _ext_nodes.'
log.debug(
'The _ext_nodes master function has been renamed to _master_tops. '
'To ensure compatibility when using older Salt masters we will '
'continue to invoke the function as _ext_nodes until the '
'Magnesium release.'
)
# TODO: Change back to _master_tops
# for Magnesium release
load = {'cmd': '_ext_nodes',

View File

@ -850,7 +850,8 @@ class FSChan(object):
self.opts['__fs_update'] = True
else:
self.fs.update()
self.cmd_stub = {'master_tops': {}}
self.cmd_stub = {'master_tops': {},
'ext_nodes': {}}
def send(self, load, tries=None, timeout=None, raw=False): # pylint: disable=unused-argument
'''

View File

@ -253,7 +253,7 @@ def file_hash(load, fnd):
except OSError:
pass
return file_hash(load, fnd)
if os.path.getmtime(path) == mtime:
if str(os.path.getmtime(path)) == mtime:
# check if mtime changed
ret['hsum'] = hsum
return ret

View File

@ -459,7 +459,11 @@ def _bsd_memdata(osdata):
if osdata['kernel'] in ['OpenBSD', 'NetBSD']:
swapctl = salt.utils.path.which('swapctl')
swap_total = __salt__['cmd.run']('{0} -sk'.format(swapctl)).split(' ')[1]
swap_data = __salt__['cmd.run']('{0} -sk'.format(swapctl))
if swap_data == 'no swap devices configured':
swap_total = 0
else:
swap_total = swap_data.split(' ')[1]
else:
swap_total = __salt__['cmd.run']('{0} -n vm.swap_total'.format(sysctl))
grains['swap_total'] = int(swap_total) // 1024 // 1024
@ -1227,6 +1231,7 @@ _OS_NAME_MAP = {
'synology': 'Synology',
'nilrt': 'NILinuxRT',
'nilrt-xfce': 'NILinuxRT-XFCE',
'poky': 'Poky',
'manjaro': 'Manjaro',
'manjarolin': 'Manjaro',
'antergos': 'Antergos',
@ -1786,7 +1791,7 @@ def os_data():
osarch = __salt__['cmd.run']('dpkg --print-architecture').strip()
elif grains.get('os_family') == 'RedHat':
osarch = __salt__['cmd.run']('rpm --eval %{_host_cpu}').strip()
elif grains.get('os_family') == 'NILinuxRT':
elif grains.get('os_family') in ('NILinuxRT', 'Poky'):
archinfo = {}
for line in __salt__['cmd.run']('opkg print-architecture').splitlines():
if line.startswith('arch'):

View File

@ -1629,7 +1629,11 @@ class LazyLoader(salt.utils.lazy.LazyDict):
return True
# if the modulename isn't in the whitelist, don't bother
if self.whitelist and mod_name not in self.whitelist:
raise KeyError
log.error(
'Failed to load function %s because its module (%s) is '
'not in the whitelist: %s', key, mod_name, self.whitelist
)
raise KeyError(key)
def _inner_load(mod_name):
for name in self._iter_files(mod_name):

View File

@ -103,6 +103,20 @@ class SysLogHandler(ExcInfoOnLogLevelFormatMixIn, logging.handlers.SysLogHandler
'''
Syslog handler which properly handles exc_info on a per handler basis
'''
def handleError(self, record):
'''
Override the default error handling mechanism for py3
Deal with syslog os errors when the log file does not exist
'''
handled = False
if sys.stderr and sys.version_info >= (3, 5, 4):
t, v, tb = sys.exc_info()
if t.__name__ in 'FileNotFoundError':
sys.stderr.write('[WARNING ] The log_file does not exist. Logging not setup correctly or syslog service not started.\n')
handled = True
if not handled:
super(SysLogHandler, self).handleError(record)
class RotatingFileHandler(ExcInfoOnLogLevelFormatMixIn, logging.handlers.RotatingFileHandler, NewStyleClassMixIn):

View File

@ -5,7 +5,12 @@
.. versionadded:: 0.17.0
This module provides a `Sentry`_ logging handler.
This module provides a `Sentry`_ logging handler. Sentry is an open source
error tracking platform that provides deep context about exceptions that
happen in production. Details about stack traces along with the context
variables available at the time of the exeption are easily browsable and
filterable from the online interface. For more details please see
`Sentry`_.
.. admonition:: Note
@ -41,6 +46,11 @@
- cpuarch
- ec2.tags.environment
.. admonition:: Note
The ``public_key`` and ``secret_key`` variables are not supported with
Sentry > 3.0. The `DSN`_ key should be used instead.
All the client configuration keys are supported, please see the
`Raven client documentation`_.

View File

@ -3370,6 +3370,8 @@ class Matcher(object):
'''
Runs the compound target check
'''
nodegroups = self.opts.get('nodegroups', {})
if not isinstance(tgt, six.string_types) and not isinstance(tgt, (list, tuple)):
log.error('Compound target received that is neither string, list nor tuple')
return False
@ -3391,9 +3393,11 @@ class Matcher(object):
if isinstance(tgt, six.string_types):
words = tgt.split()
else:
words = tgt
# we make a shallow copy in order to not affect the passed in arg
words = tgt[:]
for word in words:
while words:
word = words.pop(0)
target_info = salt.utils.minions.parse_target(word)
# Easy check first
@ -3415,10 +3419,12 @@ class Matcher(object):
elif target_info and target_info['engine']:
if 'N' == target_info['engine']:
# Nodegroups should already be expanded/resolved to other engines
log.error(
'Detected nodegroup expansion failure of "%s"', word)
return False
# if we encounter a node group, just evaluate it in-place
decomposed = salt.utils.minions.nodegroup_comp(target_info['pattern'], nodegroups)
if decomposed:
words = decomposed + words
continue
engine = ref.get(target_info['engine'])
if not engine:
# If an unknown engine is called at any time, fail out

View File

@ -410,9 +410,9 @@ def list_(name,
item.sort()
if verbose:
ret = {'dirs': sorted(dirs),
'files': sorted(files),
'links': sorted(links)}
ret = {'dirs': sorted(salt.utils.data.decode_list(dirs)),
'files': sorted(salt.utils.data.decode_list(files)),
'links': sorted(salt.utils.data.decode_list(links))}
ret['top_level_dirs'] = [x for x in ret['dirs']
if x.count('/') == 1]
ret['top_level_files'] = [x for x in ret['files']

View File

@ -551,6 +551,10 @@ def associate_vpc_with_hosted_zone(HostedZoneId=None, Name=None, VPCId=None,
r = conn.associate_vpc_with_hosted_zone(**args)
return _wait_for_sync(r['ChangeInfo']['Id'], conn)
except ClientError as e:
if e.response.get('Error', {}).get('Code') == 'ConflictingDomainExists':
log.debug('VPC Association already exists.')
# return True since the current state is the desired one
return True
if tries and e.response.get('Error', {}).get('Code') == 'Throttling':
log.debug('Throttled by AWS API.')
time.sleep(3)

View File

@ -845,7 +845,7 @@ def _find_subnets(subnet_name=None, vpc_id=None, cidr=None, tags=None, conn=None
def create_subnet(vpc_id=None, cidr_block=None, vpc_name=None,
availability_zone=None, subnet_name=None, tags=None,
region=None, key=None, keyid=None, profile=None):
region=None, key=None, keyid=None, profile=None, auto_assign_public_ipv4=False):
'''
Given a valid VPC ID or Name and a CIDR block, create a subnet for the VPC.
@ -873,10 +873,15 @@ def create_subnet(vpc_id=None, cidr_block=None, vpc_name=None,
except BotoServerError as e:
return {'created': False, 'error': salt.utils.boto.get_error(e)}
return _create_resource('subnet', name=subnet_name, tags=tags, vpc_id=vpc_id,
subnet_object_dict = _create_resource('subnet', name=subnet_name, tags=tags, vpc_id=vpc_id,
availability_zone=availability_zone,
cidr_block=cidr_block, region=region, key=key,
keyid=keyid, profile=profile)
# if auto_assign_public_ipv4 is requested set that to true using boto3
if auto_assign_public_ipv4:
conn3 = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)
conn3.modify_subnet_attribute(MapPublicIpOnLaunch={'Value': True}, SubnetId=subnet_object_dict['id'])
return subnet_object_dict
def delete_subnet(subnet_id=None, subnet_name=None, region=None, key=None,

View File

@ -36,13 +36,14 @@ import salt.utils.timed_subprocess
import salt.utils.user
import salt.utils.versions
import salt.utils.vt
import salt.utils.win_dacl
import salt.utils.win_reg
import salt.grains.extra
from salt.ext import six
from salt.exceptions import CommandExecutionError, TimedProcTimeoutError, \
SaltInvocationError
from salt.log import LOG_LEVELS
from salt.ext.six.moves import range, zip
from salt.ext.six.moves import range, zip, map
# Only available on POSIX systems, nonfatal on windows
try:
@ -409,6 +410,19 @@ def _run(cmd,
return win_runas(cmd, runas, password, cwd)
if runas and salt.utils.platform.is_darwin():
# we need to insert the user simulation into the command itself and not
# just run it from the environment on macOS as that
# method doesn't work properly when run as root for certain commands.
if isinstance(cmd, (list, tuple)):
cmd = ' '.join(map(_cmd_quote, cmd))
cmd = 'su -l {0} -c "{1}"'.format(runas, cmd)
# set runas to None, because if you try to run `su -l` as well as
# simulate the environment macOS will prompt for the password of the
# user and will cause salt to hang.
runas = None
if runas:
# Save the original command before munging it
try:
@ -512,10 +526,18 @@ def _run(cmd,
for k, v in six.iteritems(env_runas)
)
env_runas.update(env)
# Fix platforms like Solaris that don't set a USER env var in the
# user's default environment as obtained above.
if env_runas.get('USER') != runas:
env_runas['USER'] = runas
# Fix some corner cases where shelling out to get the user's
# environment returns the wrong home directory.
runas_home = os.path.expanduser('~{0}'.format(runas))
if env_runas.get('HOME') != runas_home:
env_runas['HOME'] = runas_home
env = env_runas
except ValueError as exc:
log.exception('Error raised retrieving environment for user %s', runas)
@ -542,6 +564,7 @@ def _run(cmd,
env.setdefault('LC_TELEPHONE', 'C')
env.setdefault('LC_MEASUREMENT', 'C')
env.setdefault('LC_IDENTIFICATION', 'C')
env.setdefault('LANGUAGE', 'C')
else:
# On Windows set the codepage to US English.
if python_shell:
@ -2415,11 +2438,14 @@ def script(source,
# "env" is not supported; Use "saltenv".
kwargs.pop('__env__')
win_cwd = False
if salt.utils.platform.is_windows() and runas and cwd is None:
# Create a temp working directory
cwd = tempfile.mkdtemp(dir=__opts__['cachedir'])
__salt__['win_dacl.add_ace'](
cwd, 'File', runas, 'READ&EXECUTE', 'ALLOW',
'FOLDER&SUBFOLDERS&FILES')
win_cwd = True
salt.utils.win_dacl.set_permissions(obj_name=cwd,
principal=runas,
permissions='full_control')
path = salt.utils.files.mkstemp(dir=cwd, suffix=os.path.splitext(source)[1])
@ -2433,10 +2459,10 @@ def script(source,
saltenv,
**kwargs)
if not fn_:
if salt.utils.platform.is_windows() and runas:
_cleanup_tempfile(path)
# If a temp working directory was created (Windows), let's remove that
if win_cwd:
_cleanup_tempfile(cwd)
else:
_cleanup_tempfile(path)
return {'pid': 0,
'retcode': 1,
'stdout': '',
@ -2445,10 +2471,10 @@ def script(source,
else:
fn_ = __salt__['cp.cache_file'](source, saltenv)
if not fn_:
if salt.utils.platform.is_windows() and runas:
_cleanup_tempfile(path)
# If a temp working directory was created (Windows), let's remove that
if win_cwd:
_cleanup_tempfile(cwd)
else:
_cleanup_tempfile(path)
return {'pid': 0,
'retcode': 1,
'stdout': '',
@ -2481,10 +2507,10 @@ def script(source,
password=password,
success_retcodes=success_retcodes,
**kwargs)
if salt.utils.platform.is_windows() and runas:
_cleanup_tempfile(path)
# If a temp working directory was created (Windows), let's remove that
if win_cwd:
_cleanup_tempfile(cwd)
else:
_cleanup_tempfile(path)
if hide_output:
ret['stdout'] = ret['stderr'] = ''

View File

@ -14,9 +14,9 @@ import fnmatch
# Import salt libs
import salt.minion
import salt.fileclient
import salt.utils.data
import salt.utils.files
import salt.utils.gzip_util
import salt.utils.locales
import salt.utils.path
import salt.utils.templates
import salt.utils.url
@ -470,8 +470,8 @@ def cache_file(path, saltenv='base', source_hash=None):
It may be necessary to quote the URL when using the querystring method,
depending on the shell being used to run the command.
'''
path = salt.utils.locales.sdecode(path)
saltenv = salt.utils.locales.sdecode(saltenv)
path = salt.utils.data.decode(path)
saltenv = salt.utils.data.decode(saltenv)
contextkey = '{0}_|-{1}_|-{2}'.format('cp.cache_file', path, saltenv)

View File

@ -14,6 +14,7 @@ import os
import random
# Import salt libs
import salt.utils.data
import salt.utils.files
import salt.utils.functools
import salt.utils.path
@ -21,7 +22,6 @@ import salt.utils.stringutils
from salt.ext import six
from salt.ext.six.moves import range
from salt.utils.locales import sdecode
TAG = '# Lines below here are managed by Salt, do not edit\n'
SALT_CRON_IDENTIFIER = 'SALT_CRON_IDENTIFIER'
@ -287,18 +287,22 @@ def raw_cron(user):
if _check_instance_uid_match(user) or __grains__.get('os_family') in ('Solaris', 'AIX'):
cmd = 'crontab -l'
# Preserve line endings
lines = sdecode(__salt__['cmd.run_stdout'](cmd,
runas=user,
ignore_retcode=True,
rstrip=False,
python_shell=False)).splitlines(True)
lines = salt.utils.data.decode(
__salt__['cmd.run_stdout'](cmd,
runas=user,
ignore_retcode=True,
rstrip=False,
python_shell=False)
).splitlines(True)
else:
cmd = 'crontab -u {0} -l'.format(user)
# Preserve line endings
lines = sdecode(__salt__['cmd.run_stdout'](cmd,
ignore_retcode=True,
rstrip=False,
python_shell=False)).splitlines(True)
lines = salt.utils.data.decode(
__salt__['cmd.run_stdout'](cmd,
ignore_retcode=True,
rstrip=False,
python_shell=False)
).splitlines(True)
if len(lines) != 0 and lines[0].startswith('# DO NOT EDIT THIS FILE - edit the master and reinstall.'):
del lines[0:3]

View File

@ -2082,6 +2082,16 @@ def port(name, private_port=None):
name
Container name or ID
.. versionchanged:: Fluorine
This value can now be a pattern expression (using the
pattern-matching characters defined in fnmatch_). If a pattern
expression is used, this function will return a dictionary mapping
container names which match the pattern to the mappings for those
containers. When no pattern expression is used, a dictionary of the
mappings for the specified container name will be returned.
.. _fnmatch: https://docs.python.org/2/library/fnmatch.html
private_port : None
If specified, get information for that specific port. Can be specified
either as a port number (i.e. ``5000``), or as a port number plus the
@ -2104,12 +2114,10 @@ def port(name, private_port=None):
salt myminion docker.port mycontainer 5000
salt myminion docker.port mycontainer 5000/udp
'''
# docker.client.Client.port() doesn't do what we need, so just inspect the
# container and get the information from there. It's what they're already
# doing (poorly) anyway.
mappings = inspect_container(name).get('NetworkSettings', {}).get('Ports', {})
if not mappings:
return {}
pattern_used = bool(re.search(r'[*?\[]', name))
names = fnmatch.filter(list_containers(all=True), name) \
if pattern_used \
else [name]
if private_port is None:
pattern = '*'
@ -2132,7 +2140,17 @@ def port(name, private_port=None):
except AttributeError:
raise SaltInvocationError(err)
return dict((x, mappings[x]) for x in fnmatch.filter(mappings, pattern))
ret = {}
for c_name in names:
# docker.client.Client.port() doesn't do what we need, so just inspect
# the container and get the information from there. It's what they're
# already doing (poorly) anyway.
mappings = inspect_container(c_name).get(
'NetworkSettings', {}).get('Ports', {})
ret[c_name] = dict((x, mappings[x])
for x in fnmatch.filter(mappings, pattern))
return ret.get(name, {}) if not pattern_used else ret
def ps_(filters=None, **kwargs):
@ -3210,6 +3228,7 @@ def run_container(image,
CLI Examples:
.. code-block:: bash
salt myminion docker.run_container myuser/myimage command=/usr/local/bin/myscript.sh
# Run container in the background
salt myminion docker.run_container myuser/myimage command=/usr/local/bin/myscript.sh bg=True

View File

@ -48,13 +48,13 @@ except ImportError:
# Import salt libs
import salt.utils.args
import salt.utils.atomicfile
import salt.utils.data
import salt.utils.filebuffer
import salt.utils.files
import salt.utils.find
import salt.utils.functools
import salt.utils.hashutils
import salt.utils.itertools
import salt.utils.locales
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
@ -5133,7 +5133,7 @@ def manage_file(name,
.. code-block:: bash
salt '*' file.manage_file /etc/httpd/conf.d/httpd.conf '' '{}' salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root root '755' base ''
salt '*' file.manage_file /etc/httpd/conf.d/httpd.conf '' '{}' salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root root '755' '' base ''
.. versionchanged:: 2014.7.0
``follow_symlinks`` option added
@ -5331,12 +5331,12 @@ def manage_file(name,
if ret['changes']:
ret['comment'] = 'File {0} updated'.format(
salt.utils.locales.sdecode(name)
salt.utils.data.decode(name)
)
elif not ret['changes'] and ret['result']:
ret['comment'] = 'File {0} is in the correct state'.format(
salt.utils.locales.sdecode(name)
salt.utils.data.decode(name)
)
if sfn:
__clean_tmp(sfn)

View File

@ -9,10 +9,12 @@ Glance module for interacting with OpenStack Glance
Example configuration
.. code-block:: yaml
glance:
cloud: default
.. code-block:: yaml
glance:
auth:
username: admin

View File

@ -9,10 +9,12 @@ Keystone module for interacting with OpenStack Keystone
Example configuration
.. code-block:: yaml
keystone:
cloud: default
.. code-block:: yaml
keystone:
auth:
username: admin

View File

@ -867,7 +867,7 @@ def _network_conf(conf_tuples=None, **kwargs):
# on old versions of lxc, still support the gateway auto mode
# if we didn't explicitly say no to
# (lxc.network.ipv4.gateway: auto)
if _LooseVersion(version()) <= '1.0.7' and \
if _LooseVersion(version()) <= _LooseVersion('1.0.7') and \
True not in ['lxc.network.ipv4.gateway' in a for a in ret] and \
True in ['lxc.network.ipv4' in a for a in ret]:
ret.append({'lxc.network.ipv4.gateway': 'auto'})
@ -2077,7 +2077,7 @@ def clone(name,
if backing in ('dir', 'overlayfs', 'btrfs'):
size = None
# LXC commands and options changed in 2.0 - CF issue #34086 for details
if version() >= _LooseVersion('2.0'):
if _LooseVersion(version()) >= _LooseVersion('2.0'):
# https://linuxcontainers.org/lxc/manpages//man1/lxc-copy.1.html
cmd = 'lxc-copy'
cmd += ' {0} -n {1} -N {2}'.format(snapshot, orig, name)

View File

@ -2,6 +2,24 @@
'''
The service module for macOS
.. versionadded:: 2016.3.0
This module has support for services in the following locations.
.. code-block:: bash
/System/Library/LaunchDaemons/
/System/Library/LaunchAgents/
/Library/LaunchDaemons/
/Library/LaunchAgents/
# As of version "Fluorine" support for user-specific services were added.
/Users/foo/Library/LaunchAgents/
.. note::
As of version "Fluorine", if a service is located in a ``LaunchAgent`` path
and a ``runas`` user is NOT specified the current console user will be used
to properly interact with the service.
'''
from __future__ import absolute_import, unicode_literals, print_function
@ -14,7 +32,6 @@ import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
import salt.utils.mac_utils
from salt.exceptions import CommandExecutionError
from salt.utils.versions import LooseVersion as _LooseVersion
@ -62,7 +79,7 @@ def _get_service(name):
:return: The service information for the service, otherwise an Error
:rtype: dict
'''
services = salt.utils.mac_utils.available_services()
services = __utils__['mac_utils.available_services']()
name = name.lower()
if name in services:
@ -113,6 +130,68 @@ def _always_running_service(name):
return False
def _get_domain_target(name, service_target=False):
'''
Returns the domain/service target and path for a service. This is used to
determine whether or not a service should be loaded in a user space or
system space.
:param str name: Service label, file name, or full path
:param bool service_target: Whether to return a full
service target. This is needed for the enable and disable
subcommands of /bin/launchctl. Defaults to False
:return: Tuple of the domain/service target and the path to the service.
:rtype: tuple
.. versionadded:: Fluorine
'''
# Get service information
service = _get_service(name)
# get the path to the service
path = service['file_path']
# most of the time we'll be at the system level.
domain_target = 'system'
# check if a LaunchAgent as we should treat these differently.
if 'LaunchAgents' in path:
# Get the console user so we can service in the correct session
uid = __utils__['mac_utils.console_user']()
domain_target = 'gui/{}'.format(uid)
# check to see if we need to make it a full service target.
if service_target is True:
domain_target = '{}/{}'.format(domain_target, service['plist']['Label'])
return (domain_target, path)
def _launch_agent(name):
'''
Checks to see if the provided service is a LaunchAgent
:param str name: Service label, file name, or full path
:return: True if a LaunchAgent, False if not.
:rtype: bool
.. versionadded:: Fluorine
'''
# Get the path to the service.
path = _get_service(name)['file_path']
if 'LaunchAgents' not in path:
return False
return True
def show(name):
'''
Show properties of a launchctl service
@ -158,7 +237,7 @@ def launchctl(sub_cmd, *args, **kwargs):
salt '*' service.launchctl debug org.cups.cupsd
'''
return salt.utils.mac_utils.launchctl(sub_cmd, *args, **kwargs)
return __utils__['mac_utils.launchctl'](sub_cmd, *args, **kwargs)
def list_(name=None, runas=None):
@ -185,17 +264,20 @@ def list_(name=None, runas=None):
service = _get_service(name)
label = service['plist']['Label']
# we can assume if we are trying to list a LaunchAgent we need
# to run as a user, if not provided, we'll use the console user.
if not runas and _launch_agent(name):
runas = __utils__['mac_utils.console_user'](username=True)
# Collect information on service: will raise an error if it fails
return launchctl('list',
label,
return_stdout=True,
output_loglevel='trace',
runas=runas)
# Collect information on all services: will raise an error if it fails
return launchctl('list',
return_stdout=True,
output_loglevel='trace',
runas=runas)
@ -216,12 +298,11 @@ def enable(name, runas=None):
salt '*' service.enable org.cups.cupsd
'''
# Get service information and label
service = _get_service(name)
label = service['plist']['Label']
# Get the domain target. enable requires a full <service-target>
service_target = _get_domain_target(name, service_target=True)[0]
# Enable the service: will raise an error if it fails
return launchctl('enable', 'system/{0}'.format(label), runas=runas)
return launchctl('enable', service_target, runas=runas)
def disable(name, runas=None):
@ -242,12 +323,11 @@ def disable(name, runas=None):
salt '*' service.disable org.cups.cupsd
'''
# Get service information and label
service = _get_service(name)
label = service['plist']['Label']
# Get the service target. enable requires a full <service-target>
service_target = _get_domain_target(name, service_target=True)[0]
# disable the service: will raise an error if it fails
return launchctl('disable', 'system/{0}'.format(label), runas=runas)
return launchctl('disable', service_target, runas=runas)
def start(name, runas=None):
@ -271,12 +351,11 @@ def start(name, runas=None):
salt '*' service.start org.cups.cupsd
'''
# Get service information and file path
service = _get_service(name)
path = service['file_path']
# Get the domain target.
domain_target, path = _get_domain_target(name)
# Load the service: will raise an error if it fails
return launchctl('load', path, runas=runas)
# Load (bootstrap) the service: will raise an error if it fails
return launchctl('bootstrap', domain_target, path, runas=runas)
def stop(name, runas=None):
@ -301,12 +380,11 @@ def stop(name, runas=None):
salt '*' service.stop org.cups.cupsd
'''
# Get service information and file path
service = _get_service(name)
path = service['file_path']
# Get the domain target.
domain_target, path = _get_domain_target(name)
# Disable the Launch Daemon: will raise an error if it fails
return launchctl('unload', path, runas=runas)
# Stop (bootout) the service: will raise an error if it fails
return launchctl('bootout', domain_target, path, runas=runas)
def restart(name, runas=None):
@ -368,6 +446,9 @@ def status(name, sig=None, runas=None):
if not _always_running_service(name) and enabled(name):
return 'loaded'
if not runas and _launch_agent(name):
runas = __utils__['mac_utils.console_user'](username=True)
output = list_(runas=runas)
# Used a string here instead of a list because that's what the linux version
@ -493,7 +574,7 @@ def get_all(runas=None):
enabled = get_enabled(runas=runas)
# Get list of all services
available = list(salt.utils.mac_utils.available_services().keys())
available = list(__utils__['mac_utils.available_services']().keys())
# Return composite list
return sorted(set(enabled + available))
@ -514,7 +595,6 @@ def get_enabled(runas=None):
.. code-block:: bash
salt '*' service.get_enabled
salt '*' service.get_enabled running=True
'''
# Collect list of enabled services
stdout = list_(runas=runas)

View File

@ -29,7 +29,6 @@ import salt.utils.decorators.path
import salt.utils.files
import salt.utils.stringutils
import salt.utils.user
from salt.utils.locales import sdecode as _sdecode
from salt.exceptions import CommandExecutionError, SaltInvocationError
from salt.ext import six
@ -309,13 +308,11 @@ def chfullname(name, fullname):
salt '*' user.chfullname foo 'Foo Bar'
'''
if isinstance(fullname, string_types):
fullname = _sdecode(fullname)
fullname = salt.utils.data.decode(fullname)
pre_info = info(name)
if not pre_info:
raise CommandExecutionError('User \'{0}\' does not exist'.format(name))
if isinstance(pre_info['fullname'], string_types):
pre_info['fullname'] = _sdecode(pre_info['fullname'])
pre_info['fullname'] = salt.utils.data.decode(pre_info['fullname'])
if fullname == pre_info['fullname']:
return True
_dscl(
@ -329,9 +326,7 @@ def chfullname(name, fullname):
# matches desired value
time.sleep(1)
current = info(name).get('fullname')
if isinstance(current, string_types):
current = _sdecode(current)
current = salt.utils.data.decode(info(name).get('fullname'))
return current == fullname

View File

@ -9,10 +9,12 @@ Neutron module for interacting with OpenStack Neutron
Example configuration
.. code-block:: yaml
neutron:
cloud: default
.. code-block:: yaml
neutron:
auth:
username: admin

View File

@ -22,6 +22,7 @@ import copy
import os
import re
import logging
import errno
# Import salt libs
import salt.utils.args
@ -55,14 +56,73 @@ log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'pkg'
NILRT_MODULE_STATE_PATH = '/var/lib/salt/kernel_module_state'
def _update_nilrt_module_dep_info():
'''
Update modules.dep timestamp & checksum.
NILRT systems determine whether to reboot after kernel module install/
removals/etc by checking modules.dep which gets modified/touched by
each kernel module on-target dkms-like compilation. This function
updates the module.dep data after each opkg kernel module operation
which needs a reboot as detected by the salt checkrestart module.
'''
__salt__['cmd.shell']('stat -c %Y /lib/modules/$(uname -r)/modules.dep >{0}/modules.dep.timestamp'
.format(NILRT_MODULE_STATE_PATH))
__salt__['cmd.shell']('md5sum /lib/modules/$(uname -r)/modules.dep >{0}/modules.dep.md5sum'
.format(NILRT_MODULE_STATE_PATH))
def _get_restartcheck_result(errors):
'''
Return restartcheck result and append errors (if any) to ``errors``
'''
rs_result = __salt__['restartcheck.restartcheck'](verbose=False)
if isinstance(rs_result, dict) and 'comment' in rs_result:
errors.append(rs_result['comment'])
return rs_result
def _process_restartcheck_result(rs_result):
'''
Check restartcheck output to see if system/service restarts were requested
and take appropriate action.
'''
if 'No packages seem to need to be restarted' in rs_result:
return
for rstr in rs_result:
if 'System restart required' in rstr:
_update_nilrt_module_dep_info()
__salt__['system.set_reboot_required_witnessed']()
else:
service = os.path.join('/etc/init.d', rstr)
if os.path.exists(service):
__salt__['cmd.run']([service, 'restart'])
def __virtual__():
'''
Confirm this module is on a nilrt based system
'''
if __grains__.get('os_family', False) == 'NILinuxRT':
if __grains__.get('os_family') == 'NILinuxRT':
try:
os.makedirs(NILRT_MODULE_STATE_PATH)
except OSError as exc:
if exc.errno != errno.EEXIST:
return False, 'Error creating {0} (-{1}): {2}'.format(
NILRT_MODULE_STATE_PATH,
exc.errno,
exc.strerror)
if not (os.path.exists(os.path.join(NILRT_MODULE_STATE_PATH, 'modules.dep.timestamp')) and
os.path.exists(os.path.join(NILRT_MODULE_STATE_PATH, 'modules.dep.md5sum'))):
_update_nilrt_module_dep_info()
return __virtualname__
return (False, "Module opkg only works on nilrt based systems")
if os.path.isdir(OPKG_CONFDIR):
return __virtualname__
return False, "Module opkg only works on OpenEmbedded based systems"
def latest_version(*names, **kwargs):
@ -412,12 +472,16 @@ def install(name=None,
ret.update({pkgname: {'old': old.get(pkgname, ''),
'new': new.get(pkgname, '')}})
rs_result = _get_restartcheck_result(errors)
if errors:
raise CommandExecutionError(
'Problem encountered installing package(s)',
info={'errors': errors, 'changes': ret}
)
_process_restartcheck_result(rs_result)
return ret
@ -475,12 +539,16 @@ def remove(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
rs_result = _get_restartcheck_result(errors)
if errors:
raise CommandExecutionError(
'Problem encountered removing package(s)',
info={'errors': errors, 'changes': ret}
)
_process_restartcheck_result(rs_result)
return ret
@ -536,6 +604,8 @@ def upgrade(refresh=True, **kwargs): # pylint: disable=unused-argument
'comment': '',
}
errors = []
if salt.utils.data.is_true(refresh):
refresh_db()
@ -550,11 +620,18 @@ def upgrade(refresh=True, **kwargs): # pylint: disable=unused-argument
ret = salt.utils.data.compare_dicts(old, new)
if result['retcode'] != 0:
errors.append(result)
rs_result = _get_restartcheck_result(errors)
if errors:
raise CommandExecutionError(
'Problem encountered upgrading packages',
info={'changes': ret, 'result': result}
info={'errors': errors, 'changes': ret}
)
_process_restartcheck_result(rs_result)
return ret

View File

@ -26,7 +26,7 @@ import logging
import shlex
# Import salt libs
import salt.utils.locales
import salt.utils.data
import salt.utils.path
import salt.utils.yaml
from salt.exceptions import SaltInvocationError
@ -237,7 +237,7 @@ def clone(name, new_name, linked=False, template=False, runas=None):
salt '*' parallels.clone macvm macvm_new runas=macdev
salt '*' parallels.clone macvm macvm_templ template=True runas=macdev
'''
args = [salt.utils.locales.sdecode(name), '--name', salt.utils.locales.sdecode(new_name)]
args = [salt.utils.data.decode(name), '--name', salt.utils.data.decode(new_name)]
if linked:
args.append('--linked')
if template:
@ -263,7 +263,7 @@ def delete(name, runas=None):
salt '*' parallels.exec macvm 'find /etc/paths.d' runas=macdev
'''
return prlctl('delete', salt.utils.locales.sdecode(name), runas=runas)
return prlctl('delete', salt.utils.data.decode(name), runas=runas)
def exists(name, runas=None):
@ -307,7 +307,7 @@ def start(name, runas=None):
salt '*' parallels.start macvm runas=macdev
'''
return prlctl('start', salt.utils.locales.sdecode(name), runas=runas)
return prlctl('start', salt.utils.data.decode(name), runas=runas)
def stop(name, kill=False, runas=None):
@ -331,7 +331,7 @@ def stop(name, kill=False, runas=None):
salt '*' parallels.stop macvm kill=True runas=macdev
'''
# Construct argument list
args = [salt.utils.locales.sdecode(name)]
args = [salt.utils.data.decode(name)]
if kill:
args.append('--kill')
@ -356,7 +356,7 @@ def restart(name, runas=None):
salt '*' parallels.restart macvm runas=macdev
'''
return prlctl('restart', salt.utils.locales.sdecode(name), runas=runas)
return prlctl('restart', salt.utils.data.decode(name), runas=runas)
def reset(name, runas=None):
@ -375,7 +375,7 @@ def reset(name, runas=None):
salt '*' parallels.reset macvm runas=macdev
'''
return prlctl('reset', salt.utils.locales.sdecode(name), runas=runas)
return prlctl('reset', salt.utils.data.decode(name), runas=runas)
def status(name, runas=None):
@ -394,7 +394,7 @@ def status(name, runas=None):
salt '*' parallels.status macvm runas=macdev
'''
return prlctl('status', salt.utils.locales.sdecode(name), runas=runas)
return prlctl('status', salt.utils.data.decode(name), runas=runas)
def exec_(name, command, runas=None):
@ -417,7 +417,7 @@ def exec_(name, command, runas=None):
salt '*' parallels.exec macvm 'find /etc/paths.d' runas=macdev
'''
# Construct argument list
args = [salt.utils.locales.sdecode(name)]
args = [salt.utils.data.decode(name)]
args.extend(_normalize_args(command))
# Execute command and return output
@ -459,10 +459,10 @@ def snapshot_id_to_name(name, snap_id, strict=False, runas=None):
salt '*' parallels.snapshot_id_to_name macvm a5b8999f-5d95-4aff-82de-e515b0101b66 runas=macdev
'''
# Validate VM name and snapshot ID
name = salt.utils.locales.sdecode(name)
name = salt.utils.data.decode(name)
if not re.match(GUID_REGEX, snap_id):
raise SaltInvocationError(
'Snapshot ID "{0}" is not a GUID'.format(salt.utils.locales.sdecode(snap_id))
'Snapshot ID "{0}" is not a GUID'.format(salt.utils.data.decode(snap_id))
)
# Get the snapshot information of the snapshot having the requested ID
@ -502,7 +502,7 @@ def snapshot_id_to_name(name, snap_id, strict=False, runas=None):
'Could not find a snapshot name for snapshot ID "{0}" of VM '
'"{1}"'.format(snap_id, name)
)
return salt.utils.locales.sdecode(snap_name)
return salt.utils.data.decode(snap_name)
def snapshot_name_to_id(name, snap_name, strict=False, runas=None):
@ -530,8 +530,8 @@ def snapshot_name_to_id(name, snap_name, strict=False, runas=None):
salt '*' parallels.snapshot_id_to_name macvm original runas=macdev
'''
# Validate VM and snapshot names
name = salt.utils.locales.sdecode(name)
snap_name = salt.utils.locales.sdecode(snap_name)
name = salt.utils.data.decode(name)
snap_name = salt.utils.data.decode(snap_name)
# Get a multiline string containing all the snapshot GUIDs
info = prlctl('snapshot-list', name, runas=runas)
@ -579,7 +579,7 @@ def _validate_snap_name(name, snap_name, strict=True, runas=None):
:param str runas:
The user that the prlctl command will be run as
'''
snap_name = salt.utils.locales.sdecode(snap_name)
snap_name = salt.utils.data.decode(snap_name)
# Try to convert snapshot name to an ID without {}
if re.match(GUID_REGEX, snap_name):
@ -619,7 +619,7 @@ def list_snapshots(name, snap_name=None, tree=False, names=False, runas=None):
salt '*' parallels.list_snapshots macvm names=True runas=macdev
'''
# Validate VM and snapshot names
name = salt.utils.locales.sdecode(name)
name = salt.utils.data.decode(name)
if snap_name:
snap_name = _validate_snap_name(name, snap_name, runas=runas)
@ -642,7 +642,7 @@ def list_snapshots(name, snap_name=None, tree=False, names=False, runas=None):
ret = '{0:<38} {1}\n'.format('Snapshot ID', 'Snapshot Name')
for snap_id in snap_ids:
snap_name = snapshot_id_to_name(name, snap_id, runas=runas)
ret += ('{{{0}}} {1}\n'.format(snap_id, salt.utils.locales.sdecode(snap_name)))
ret += ('{{{0}}} {1}\n'.format(snap_id, salt.utils.data.decode(snap_name)))
return ret
# Return information directly from parallels desktop
@ -674,9 +674,9 @@ def snapshot(name, snap_name=None, desc=None, runas=None):
salt '*' parallels.create_snapshot macvm snap_name=macvm-updates desc='clean install with updates' runas=macdev
'''
# Validate VM and snapshot names
name = salt.utils.locales.sdecode(name)
name = salt.utils.data.decode(name)
if snap_name:
snap_name = salt.utils.locales.sdecode(snap_name)
snap_name = salt.utils.data.decode(snap_name)
# Construct argument list
args = [name]
@ -723,7 +723,7 @@ def delete_snapshot(name, snap_name, runas=None, all=False):
strict = not all
# Validate VM and snapshot names
name = salt.utils.locales.sdecode(name)
name = salt.utils.data.decode(name)
snap_ids = _validate_snap_name(name, snap_name, strict=strict, runas=runas)
if isinstance(snap_ids, six.string_types):
snap_ids = [snap_ids]
@ -766,7 +766,7 @@ def revert_snapshot(name, snap_name, runas=None):
salt '*' parallels.revert_snapshot macvm base-with-updates runas=macdev
'''
# Validate VM and snapshot names
name = salt.utils.locales.sdecode(name)
name = salt.utils.data.decode(name)
snap_name = _validate_snap_name(name, snap_name, runas=runas)
# Construct argument list

View File

@ -119,40 +119,82 @@ def __virtual__():
return 'pip'
def _clear_context(bin_env=None):
'''
Remove the cached pip version
'''
contextkey = 'pip.version'
if bin_env is not None:
contextkey = '{0}.{1}'.format(contextkey, bin_env)
__context__.pop(contextkey, None)
def _get_pip_bin(bin_env):
'''
Locate the pip binary, either from `bin_env` as a virtualenv, as the
executable itself, or from searching conventional filesystem locations
'''
if not bin_env:
which_result = __salt__['cmd.which_bin'](
['pip{0}.{1}'.format(*sys.version_info[:2]),
'pip{0}'.format(sys.version_info[0]),
'pip', 'pip-python']
)
if salt.utils.platform.is_windows() and six.PY2 \
and isinstance(which_result, str):
which_result.encode('string-escape')
if which_result is None:
raise CommandNotFoundError('Could not find a `pip` binary')
return which_result
logger.debug('pip: Using pip from currently-running Python')
return [os.path.normpath(sys.executable), '-m', 'pip']
# try to get pip bin from virtualenv, bin_env
python_bin = 'python.exe' if salt.utils.platform.is_windows() else 'python'
def _search_paths(*basedirs):
ret = []
for path in basedirs:
ret.extend([
os.path.join(path, python_bin),
os.path.join(path, 'bin', python_bin),
os.path.join(path, 'Scripts', python_bin)
])
return ret
# try to get python bin from virtualenv (i.e. bin_env)
if os.path.isdir(bin_env):
if salt.utils.platform.is_windows():
pip_bin = os.path.join(bin_env, 'Scripts', 'pip.exe')
else:
pip_bin = os.path.join(bin_env, 'bin', 'pip')
if os.path.isfile(pip_bin):
return pip_bin
msg = 'Could not find a `pip` binary in virtualenv {0}'.format(bin_env)
raise CommandNotFoundError(msg)
# bin_env is the pip binary
for bin_path in _search_paths(bin_env):
if os.path.isfile(bin_path):
if os.access(bin_path, os.X_OK):
logger.debug('pip: Found python binary: %s', bin_path)
return [os.path.normpath(bin_path), '-m', 'pip']
else:
logger.debug(
'pip: Found python binary by name but it is not '
'executable: %s', bin_path
)
raise CommandNotFoundError(
'Could not find a pip binary in virtualenv {0}'.format(bin_env)
)
# bin_env is the python or pip binary
elif os.access(bin_env, os.X_OK):
if os.path.isfile(bin_env) or os.path.islink(bin_env):
return bin_env
if os.path.isfile(bin_env):
# If the python binary was passed, return it
if 'python' in os.path.basename(bin_env):
return [os.path.normpath(bin_env), '-m', 'pip']
# Try to find the python binary based on the location of pip in a
# virtual environment, should be relative
if 'pip' in os.path.basename(bin_env):
# Look in the same directory as the pip binary, and also its
# parent directories.
pip_dirname = os.path.dirname(bin_env)
pip_parent_dir = os.path.dirname(pip_dirname)
for bin_path in _search_paths(pip_dirname, pip_parent_dir):
if os.path.isfile(bin_path):
logger.debug('pip: Found python binary: %s', bin_path)
return [os.path.normpath(bin_path), '-m', 'pip']
# Couldn't find python, use the passed pip binary
# This has the limitation of being unable to update pip itself
return [os.path.normpath(bin_env)]
raise CommandExecutionError(
'Could not find a pip binary within {0}'.format(bin_env)
)
else:
raise CommandNotFoundError('Could not find a `pip` binary')
raise CommandNotFoundError(
'Access denied to {0}, could not find a pip binary'.format(bin_env)
)
def _get_cached_requirements(requirements, saltenv):
@ -271,15 +313,21 @@ def _process_requirements(requirements, cmd, cwd, saltenv, user):
treq = tempfile.mkdtemp()
__salt__['file.chown'](treq, user, None)
# In Windows, just being owner of a file isn't enough. You also
# need permissions
if salt.utils.platform.is_windows():
__utils__['win_dacl.set_permissions'](
obj_name=treq,
principal=user,
permissions='read_execute')
current_directory = None
if not current_directory:
current_directory = os.path.abspath(os.curdir)
logger.info('_process_requirements from directory,' +
'%s -- requirement: %s', cwd, requirement
)
logger.info('_process_requirements from directory, '
'%s -- requirement: %s', cwd, requirement)
if cwd is None:
r = requirement
@ -384,7 +432,6 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
global_options=None,
install_options=None,
user=None,
no_chown=False,
cwd=None,
pre_releases=False,
cert=None,
@ -398,7 +445,8 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
trusted_host=None,
no_cache_dir=False,
cache_dir=None,
no_binary=None):
no_binary=None,
**kwargs):
'''
Install packages with pip
@ -412,13 +460,10 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
Path to requirements
bin_env
Path to pip bin or path to virtualenv. If doing a system install,
and want to use a specific pip bin (pip-2.7, pip-2.6, etc..) just
specify the pip bin you want.
.. note::
If installing into a virtualenv, just use the path to the
virtualenv (e.g. ``/home/code/path/to/virtualenv/``)
Path to pip (or to a virtualenv). This can be used to specify the path
to the pip to use when more than one Python release is installed (e.g.
``/usr/bin/pip-2.7`` or ``/usr/bin/pip-2.6``. If a directory path is
specified, it is assumed to be a virtualenv.
use_wheel
Prefer wheel archives (requires pip>=1.4)
@ -520,12 +565,8 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
user
The user under which to run pip
no_chown
When user is given, do not attempt to copy and chown a requirements
file
cwd
Current working directory to run pip from
Directory from which to run pip
pre_releases
Include pre-releases in the available versions
@ -584,9 +625,14 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
editable=git+https://github.com/worldcompany/djangoembed.git#egg=djangoembed upgrade=True no_deps=True
'''
pip_bin = _get_pip_bin(bin_env)
cmd = [pip_bin, 'install']
if 'no_chown' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The no_chown argument has been deprecated and is no longer used. '
'Its functionality was removed in Boron.')
kwargs.pop('no_chown')
cmd = _get_pip_bin(bin_env)
cmd.append('install')
cleanup_requirements, error = _process_requirements(
requirements=requirements,
@ -599,10 +645,11 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
if error:
return error
cur_version = version(bin_env)
if use_wheel:
min_version = '1.4'
max_version = '9.0.3'
cur_version = __salt__['pip.version'](bin_env)
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
too_high = salt.utils.versions.compare(ver1=cur_version, oper='>', ver2=max_version)
if too_low or too_high:
@ -617,7 +664,6 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
if no_use_wheel:
min_version = '1.4'
max_version = '9.0.3'
cur_version = __salt__['pip.version'](bin_env)
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
too_high = salt.utils.versions.compare(ver1=cur_version, oper='>', ver2=max_version)
if too_low or too_high:
@ -631,7 +677,6 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
if no_binary:
min_version = '7.0.0'
cur_version = __salt__['pip.version'](bin_env)
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
if too_low:
logger.error(
@ -706,8 +751,7 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
if mirrors:
# https://github.com/pypa/pip/pull/2641/files#diff-3ef137fb9ffdd400f117a565cd94c188L216
pip_version = version(pip_bin)
if salt.utils.versions.compare(ver1=pip_version, oper='>=', ver2='7.0.0'):
if salt.utils.versions.compare(ver1=cur_version, oper='>=', ver2='7.0.0'):
raise CommandExecutionError(
'pip >= 7.0.0 does not support mirror argument:'
' use index_url and/or extra_index_url instead'
@ -735,7 +779,7 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
if download_cache or cache_dir:
cmd.extend(['--cache-dir' if salt.utils.versions.compare(
ver1=version(bin_env), oper='>=', ver2='6.0'
ver1=cur_version, oper='>=', ver2='6.0'
) else '--download-cache', download_cache or cache_dir])
if source:
@ -772,7 +816,7 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
if pre_releases:
# Check the locally installed pip version
pip_version = version(pip_bin)
pip_version = cur_version
# From pip v1.4 the --pre flag is available
if salt.utils.versions.compare(ver1=pip_version, oper='>=', ver2='1.4'):
@ -857,6 +901,9 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
cmd_kwargs = dict(saltenv=saltenv, use_vt=use_vt, runas=user)
if kwargs:
cmd_kwargs.update(kwargs)
if env_vars:
cmd_kwargs.setdefault('env', {}).update(_format_env_vars(env_vars))
@ -874,6 +921,7 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
return __salt__['cmd.run_all'](cmd, python_shell=False, **cmd_kwargs)
finally:
_clear_context(bin_env)
for tempdir in [cr for cr in cleanup_requirements if cr is not None]:
if os.path.isdir(tempdir):
shutil.rmtree(tempdir)
@ -886,46 +934,42 @@ def uninstall(pkgs=None,
proxy=None,
timeout=None,
user=None,
no_chown=False,
cwd=None,
saltenv='base',
use_vt=False):
'''
Uninstall packages with pip
Uninstall packages individually or from a pip requirements file. Uninstall
packages globally or from a virtualenv.
Uninstall packages individually or from a pip requirements file
pkgs
comma separated list of packages to install
requirements
path to requirements.
Path to requirements file
bin_env
path to pip bin or path to virtualenv. If doing an uninstall from
the system python and want to use a specific pip bin (pip-2.7,
pip-2.6, etc..) just specify the pip bin you want.
If uninstalling from a virtualenv, just use the path to the virtualenv
(/home/code/path/to/virtualenv/)
Path to pip (or to a virtualenv). This can be used to specify the path
to the pip to use when more than one Python release is installed (e.g.
``/usr/bin/pip-2.7`` or ``/usr/bin/pip-2.6``. If a directory path is
specified, it is assumed to be a virtualenv.
log
Log file where a complete (maximum verbosity) record will be kept
proxy
Specify a proxy in the form
user:passwd@proxy.server:port. Note that the
user:password@ is optional and required only if you
are behind an authenticated proxy. If you provide
user@proxy.server:port then you will be prompted for a
password.
Specify a proxy in the format ``user:passwd@proxy.server:port``. Note
that the ``user:password@`` is optional and required only if you are
behind an authenticated proxy. If you provide
``user@proxy.server:port`` then you will be prompted for a password.
timeout
Set the socket timeout (default 15 seconds)
user
The user under which to run pip
no_chown
When user is given, do not attempt to copy and chown
a requirements file (needed if the requirements file refers to other
files via relative paths, as the copy-and-chown procedure does not
account for such files)
cwd
Current working directory to run pip from
Directory from which to run pip
use_vt
Use VT terminal emulation (see output while installing)
@ -937,11 +981,9 @@ def uninstall(pkgs=None,
salt '*' pip.uninstall requirements=/path/to/requirements.txt
salt '*' pip.uninstall <package name> bin_env=/path/to/virtualenv
salt '*' pip.uninstall <package name> bin_env=/path/to/pip_bin
'''
pip_bin = _get_pip_bin(bin_env)
cmd = [pip_bin, 'uninstall', '-y']
cmd = _get_pip_bin(bin_env)
cmd.extend(['uninstall', '-y'])
cleanup_requirements, error = _process_requirements(
requirements=requirements, cmd=cmd, saltenv=saltenv, user=user,
@ -1001,6 +1043,7 @@ def uninstall(pkgs=None,
try:
return __salt__['cmd.run_all'](cmd, **cmd_kwargs)
finally:
_clear_context(bin_env)
for requirement in cleanup_requirements:
if requirement:
try:
@ -1013,48 +1056,42 @@ def freeze(bin_env=None,
user=None,
cwd=None,
use_vt=False,
env_vars=None):
env_vars=None,
**kwargs):
'''
Return a list of installed packages either globally or in the specified
virtualenv
bin_env
path to pip bin or path to virtualenv. If doing an uninstall from
the system python and want to use a specific pip bin (pip-2.7,
pip-2.6, etc..) just specify the pip bin you want.
If uninstalling from a virtualenv, just use the path to the virtualenv
(/home/code/path/to/virtualenv/)
Path to pip (or to a virtualenv). This can be used to specify the path
to the pip to use when more than one Python release is installed (e.g.
``/usr/bin/pip-2.7`` or ``/usr/bin/pip-2.6``. If a directory path is
specified, it is assumed to be a virtualenv.
user
The user under which to run pip
cwd
Current working directory to run pip from
Directory from which to run pip
.. note::
If the version of pip available is older than 8.0.3, the list will not
include the packages pip, wheel, setuptools, or distribute even if they
are installed.
include the packages ``pip``, ``wheel``, ``setuptools``, or
``distribute`` even if they are installed.
CLI Example:
.. code-block:: bash
salt '*' pip.freeze /home/code/path/to/virtualenv/
.. versionchanged:: 2016.11.2
The packages pip, wheel, setuptools, and distribute are included if the
installed pip is new enough.
salt '*' pip.freeze bin_env=/home/code/path/to/virtualenv
'''
pip_bin = _get_pip_bin(bin_env)
cmd = [pip_bin, 'freeze']
cmd = _get_pip_bin(bin_env)
cmd.append('freeze')
# Include pip, setuptools, distribute, wheel
min_version = '8.0.3'
cur_version = version(bin_env)
if not salt.utils.versions.compare(ver1=cur_version, oper='>=',
ver2=min_version):
if salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version):
logger.warning(
'The version of pip installed is %s, which is older than %s. '
'The packages pip, wheel, setuptools, and distribute will not be '
@ -1064,14 +1101,16 @@ def freeze(bin_env=None,
cmd.append('--all')
cmd_kwargs = dict(runas=user, cwd=cwd, use_vt=use_vt, python_shell=False)
if kwargs:
cmd_kwargs.update(**kwargs)
if bin_env and os.path.isdir(bin_env):
cmd_kwargs['env'] = {'VIRTUAL_ENV': bin_env}
if env_vars:
cmd_kwargs.setdefault('env', {}).update(_format_env_vars(env_vars))
result = __salt__['cmd.run_all'](cmd, **cmd_kwargs)
if result['retcode'] > 0:
raise CommandExecutionError(result['stderr'])
if result['retcode']:
raise CommandExecutionError(result['stderr'], info=result)
return result['stdout'].splitlines()
@ -1080,7 +1119,8 @@ def list_(prefix=None,
bin_env=None,
user=None,
cwd=None,
env_vars=None):
env_vars=None,
**kwargs):
'''
Filter list of installed apps from ``freeze`` and check to see if
``prefix`` exists in the list of packages installed.
@ -1088,28 +1128,27 @@ def list_(prefix=None,
.. note::
If the version of pip available is older than 8.0.3, the packages
wheel, setuptools, and distribute will not be reported by this function
even if they are installed. Unlike
:py:func:`pip.freeze <salt.modules.pip.freeze>`, this function always
reports the version of pip which is installed.
``wheel``, ``setuptools``, and ``distribute`` will not be reported by
this function even if they are installed. Unlike :py:func:`pip.freeze
<salt.modules.pip.freeze>`, this function always reports the version of
pip which is installed.
CLI Example:
.. code-block:: bash
salt '*' pip.list salt
.. versionchanged:: 2016.11.2
The packages wheel, setuptools, and distribute are included if the
installed pip is new enough.
'''
packages = {}
if prefix is None or 'pip'.startswith(prefix):
packages['pip'] = version(bin_env)
for line in freeze(bin_env=bin_env, user=user, cwd=cwd, env_vars=env_vars):
for line in freeze(bin_env=bin_env,
user=user,
cwd=cwd,
env_vars=env_vars,
**kwargs):
if line.startswith('-f') or line.startswith('#'):
# ignore -f line as it contains --find-links directory
# ignore comment lines
@ -1119,7 +1158,15 @@ def list_(prefix=None,
continue
elif line.startswith('-e'):
line = line.split('-e ')[1]
version_, name = line.split('#egg=')
if '#egg=' in line:
version_, name = line.split('#egg=')
else:
if len(line.split('===')) >= 2:
name = line.split('===')[0]
version_ = line.split('===')[1]
elif len(line.split('==')) >= 2:
name = line.split('==')[0]
version_ = line.split('==')[1]
elif len(line.split('===')) >= 2:
name = line.split('===')[0]
version_ = line.split('===')[1]
@ -1154,14 +1201,27 @@ def version(bin_env=None):
salt '*' pip.version
'''
pip_bin = _get_pip_bin(bin_env)
contextkey = 'pip.version'
if bin_env is not None:
contextkey = '{0}.{1}'.format(contextkey, bin_env)
if contextkey in __context__:
return __context__[contextkey]
cmd = _get_pip_bin(bin_env)[:]
cmd.append('--version')
ret = __salt__['cmd.run_all'](cmd, python_shell=False)
if ret['retcode']:
raise CommandNotFoundError('Could not find a `pip` binary')
output = __salt__['cmd.run_stdout'](
'{0} --version'.format(pip_bin), python_shell=False)
try:
return re.match(r'^pip (\S+)', output).group(1)
pip_version = re.match(r'^pip (\S+)', ret['stdout']).group(1)
except AttributeError:
return None
pip_version = None
__context__[contextkey] = pip_version
return pip_version
def list_upgrades(bin_env=None,
@ -1176,15 +1236,15 @@ def list_upgrades(bin_env=None,
salt '*' pip.list_upgrades
'''
pip_bin = _get_pip_bin(bin_env)
cmd = _get_pip_bin(bin_env)
cmd.extend(['list', '--outdated'])
cmd = [pip_bin, 'list', '--outdated']
# If pip >= 9.0 use --format=json
pip_version = version(bin_env)
# Pip started supporting the ability to output json starting with 9.0.0
min_version = '9.0'
cur_version = version(pip_bin)
if salt.utils.versions.compare(ver1=cur_version, oper='>=',
ver2=min_version):
if salt.utils.versions.compare(ver1=pip_version,
oper='>=',
ver2=min_version):
cmd.append('--format=json')
cmd_kwargs = dict(cwd=cwd, runas=user)
@ -1192,54 +1252,78 @@ def list_upgrades(bin_env=None,
cmd_kwargs['env'] = {'VIRTUAL_ENV': bin_env}
result = __salt__['cmd.run_all'](cmd, **cmd_kwargs)
if result['retcode'] > 0:
logger.error(result['stderr'])
raise CommandExecutionError(result['stderr'])
if result['retcode']:
raise CommandExecutionError(result['stderr'], info=result)
packages = {}
try:
json_results = salt.utils.json.loads(result['stdout'])
for json_result in json_results:
packages[json_result['name']] = json_result['latest_version']
except ValueError:
# Pip started supporting the ability to output json starting with 9.0.0
# Older versions will have to parse stdout
if salt.utils.versions.compare(ver1=pip_version, oper='<', ver2='9.0.0'):
# Pip versions < 8.0.0 had a different output format
# Sample data:
# pip (Current: 7.1.2 Latest: 10.0.1 [wheel])
# psutil (Current: 5.2.2 Latest: 5.4.5 [wheel])
# pyasn1 (Current: 0.2.3 Latest: 0.4.2 [wheel])
# pycparser (Current: 2.17 Latest: 2.18 [sdist])
if salt.utils.versions.compare(ver1=pip_version, oper='<', ver2='8.0.0'):
logger.debug('pip module: Old output format')
pat = re.compile(r'(\S*)\s+\(.*Latest:\s+(.*)\)')
# New output format for version 8.0.0+
# Sample data:
# pip (8.0.0) - Latest: 10.0.1 [wheel]
# psutil (5.2.2) - Latest: 5.4.5 [wheel]
# pyasn1 (0.2.3) - Latest: 0.4.2 [wheel]
# pycparser (2.17) - Latest: 2.18 [sdist]
else:
logger.debug('pip module: New output format')
pat = re.compile(r'(\S*)\s+\(.*\)\s+-\s+Latest:\s+(.*)')
for line in result['stdout'].splitlines():
match = re.search(r'(\S*)\s+.*Latest:\s+(.*)', line)
match = pat.search(line)
if match:
name, version_ = match.groups()
else:
logger.error('Can\'t parse line \'%s\'', line)
logger.error('Can\'t parse line \'{0}\''.format(line))
continue
packages[name] = version_
else:
logger.debug('pip module: JSON output format')
try:
pkgs = salt.utils.json.loads(result['stdout'], strict=False)
except ValueError:
raise CommandExecutionError('Invalid JSON', info=result)
for pkg in pkgs:
packages[pkg['name']] = '{0} [{1}]'.format(pkg['latest_version'],
pkg['latest_filetype'])
return packages
def is_installed(pkgname=None,
bin_env=None,
user=None,
cwd=None):
bin_env=None,
user=None,
cwd=None):
'''
.. versionadded:: 2018.3.0
Filter list of installed apps from ``freeze`` and return True or False if
``pkgname`` exists in the list of packages installed.
.. note::
If the version of pip available is older than 8.0.3, the packages
wheel, setuptools, and distribute will not be reported by this function
even if they are installed. Unlike
:py:func:`pip.freeze <salt.modules.pip.freeze>`, this function always
reports the version of pip which is installed.
even if they are installed. Unlike :py:func:`pip.freeze
<salt.modules.pip.freeze>`, this function always reports the version of
pip which is installed.
CLI Example:
.. code-block:: bash
salt '*' pip.is_installed salt
.. versionadded:: 2018.3.0
The packages wheel, setuptools, and distribute are included if the
installed pip is new enough.
'''
for line in freeze(bin_env=bin_env, user=user, cwd=cwd):
if line.startswith('-f') or line.startswith('#'):
@ -1294,7 +1378,11 @@ def upgrade(bin_env=None,
'''
.. versionadded:: 2015.5.0
Upgrades outdated pip packages
Upgrades outdated pip packages.
.. note::
On Windows you can't update salt from pip using salt, so salt will be
skipped
Returns a dict containing the changes.
@ -1312,16 +1400,19 @@ def upgrade(bin_env=None,
'result': True,
'comment': '',
}
pip_bin = _get_pip_bin(bin_env)
cmd = _get_pip_bin(bin_env)
cmd.extend(['install', '-U'])
old = list_(bin_env=bin_env, user=user, cwd=cwd)
cmd = [pip_bin, 'install', '-U']
cmd_kwargs = dict(cwd=cwd, use_vt=use_vt)
if bin_env and os.path.isdir(bin_env):
cmd_kwargs['env'] = {'VIRTUAL_ENV': bin_env}
errors = False
for pkg in list_upgrades(bin_env=bin_env, user=user, cwd=cwd):
if pkg == 'salt':
if salt.utils.platform.is_windows():
continue
result = __salt__['cmd.run_all'](cmd + [pkg], **cmd_kwargs)
if result['retcode'] != 0:
errors = True
@ -1330,6 +1421,7 @@ def upgrade(bin_env=None,
if errors:
ret['result'] = False
_clear_context(bin_env)
new = list_(bin_env=bin_env, user=user, cwd=cwd)
ret['changes'] = salt.utils.data.compare_dicts(old, new)
@ -1354,9 +1446,10 @@ def list_all_versions(pkg,
The package to check
bin_env
Path to pip bin or path to virtualenv. If doing a system install,
and want to use a specific pip bin (pip-2.7, pip-2.6, etc..) just
specify the pip bin you want.
Path to pip (or to a virtualenv). This can be used to specify the path
to the pip to use when more than one Python release is installed (e.g.
``/usr/bin/pip-2.7`` or ``/usr/bin/pip-2.6``. If a directory path is
specified, it is assumed to be a virtualenv.
include_alpha
Include alpha versions in the list
@ -1371,7 +1464,7 @@ def list_all_versions(pkg,
The user under which to run pip
cwd
Current working directory to run pip from
Directory from which to run pip
index_url
Base URL of Python Package Index
@ -1383,9 +1476,8 @@ def list_all_versions(pkg,
salt '*' pip.list_all_versions <package name>
'''
pip_bin = _get_pip_bin(bin_env)
cmd = [pip_bin, 'install', '{0}==versions'.format(pkg)]
cmd = _get_pip_bin(bin_env)
cmd.extend(['install', '{0}==versions'.format(pkg)])
if index_url:
if not salt.utils.url.validate(index_url, VALID_PROTOS):

View File

@ -995,7 +995,6 @@ def _role_cmd_args(name,
connlimit=None,
inherit=None,
createdb=None,
createuser=None,
createroles=None,
superuser=None,
groups=None,
@ -1003,8 +1002,6 @@ def _role_cmd_args(name,
rolepassword=None,
valid_until=None,
db_role=None):
if createuser is not None and superuser is None:
superuser = createuser
if inherit is None:
if typ_ in ['user', 'group']:
inherit = True
@ -1088,7 +1085,6 @@ def _role_create(name,
password=None,
createdb=None,
createroles=None,
createuser=None,
encrypted=None,
superuser=None,
login=None,
@ -1121,7 +1117,6 @@ def _role_create(name,
inherit=inherit,
createdb=createdb,
createroles=createroles,
createuser=createuser,
superuser=superuser,
groups=groups,
replication=replication,
@ -1143,7 +1138,6 @@ def user_create(username,
maintenance_db=None,
password=None,
createdb=None,
createuser=None,
createroles=None,
inherit=None,
login=None,
@ -1174,7 +1168,6 @@ def user_create(username,
maintenance_db=maintenance_db,
password=password,
createdb=createdb,
createuser=createuser,
createroles=createroles,
inherit=inherit,
login=login,
@ -1195,7 +1188,6 @@ def _role_update(name,
maintenance_db=None,
password=None,
createdb=None,
createuser=None,
typ_='role',
createroles=None,
inherit=None,
@ -1235,7 +1227,6 @@ def _role_update(name,
connlimit=connlimit,
inherit=inherit,
createdb=createdb,
createuser=createuser,
createroles=createroles,
superuser=superuser,
groups=groups,
@ -1259,7 +1250,6 @@ def user_update(username,
maintenance_db=None,
password=None,
createdb=None,
createuser=None,
createroles=None,
encrypted=None,
superuser=None,
@ -1293,7 +1283,6 @@ def user_update(username,
login=login,
connlimit=connlimit,
createdb=createdb,
createuser=createuser,
createroles=createroles,
encrypted=encrypted,
superuser=superuser,
@ -1740,7 +1729,6 @@ def group_create(groupname,
maintenance_db=None,
password=None,
createdb=None,
createuser=None,
createroles=None,
encrypted=None,
login=None,
@ -1771,7 +1759,6 @@ def group_create(groupname,
password=password,
createdb=createdb,
createroles=createroles,
createuser=createuser,
encrypted=encrypted,
login=login,
inherit=inherit,
@ -1790,7 +1777,6 @@ def group_update(groupname,
password=None,
createdb=None,
createroles=None,
createuser=None,
encrypted=None,
inherit=None,
login=None,
@ -1819,7 +1805,6 @@ def group_update(groupname,
createdb=createdb,
typ_='group',
createroles=createroles,
createuser=createuser,
encrypted=encrypted,
login=login,
inherit=inherit,

View File

@ -49,7 +49,6 @@ from salt.ext import six
# Import salt libs
import salt.utils.args
import salt.utils.data
import salt.utils.locales
import salt.utils.user
from salt.exceptions import CommandExecutionError
@ -84,10 +83,10 @@ def _get_gecos(name):
# Assign empty strings for any unspecified trailing GECOS fields
while len(gecos_field) < 4:
gecos_field.append('')
return {'fullname': salt.utils.locales.sdecode(gecos_field[0]),
'roomnumber': salt.utils.locales.sdecode(gecos_field[1]),
'workphone': salt.utils.locales.sdecode(gecos_field[2]),
'homephone': salt.utils.locales.sdecode(gecos_field[3])}
return {'fullname': salt.utils.data.decode(gecos_field[0]),
'roomnumber': salt.utils.data.decode(gecos_field[1]),
'workphone': salt.utils.data.decode(gecos_field[2]),
'homephone': salt.utils.data.decode(gecos_field[3])}
def _build_gecos(gecos_dict):

View File

@ -28,6 +28,8 @@ import salt.utils.path
# Import 3rd partylibs
from salt.ext import six
NILRT_FAMILY_NAME = 'NILinuxRT'
HAS_PSUTIL = False
try:
import psutil
@ -297,6 +299,13 @@ def _kernel_versions_redhat():
return kernel_versions
def _is_older_nilrt():
'''
If this is an older version of NILinuxRT, return True. Otherwise, return False.
'''
return os.path.exists('/usr/local/natinst/bin/nisafemodeversion')
def _kernel_versions_nilrt():
'''
Last installed kernel name, for Debian based systems.
@ -306,10 +315,20 @@ def _kernel_versions_nilrt():
as they are probably interpreted in output of `uname -a` command.
'''
kernel_versions = []
kernel = os.readlink('/boot/bzImage')
kernel = os.path.basename(kernel)
kernel = kernel.strip('bzImage-')
kernel_versions.append(kernel)
if __grains__.get('os_family') == NILRT_FAMILY_NAME and _is_older_nilrt():
# bzImage is copied in the rootfs without any package management or
# version info. We also can't depend on kernel headers like
# include/generated/uapi/linux/version.h being installed. Even if
# we fix this in newer versions of "old NILRT" we still need to be
# backwards compatible so it'll just get more complicated.
kpath = '/boot/runmode/bzImage'
kernel_strings = __salt__['cmd.run']('strings {0}'.format(kpath))
re_result = re.search(r'[0-9]+\.[0-9]+\.[0-9]+-rt.*(?=\s\()', kernel_strings)
if re_result is not None:
kernel_versions.append(re_result.group(0))
else:
kernel_versions.append(os.path.basename(os.readlink('/boot/bzImage')).strip('bzImage-'))
return kernel_versions
@ -326,6 +345,36 @@ def _check_timeout(start_time, timeout):
raise salt.exceptions.TimeoutError('Timeout expired.')
def _kernel_modules_changed_nilrt(kernelversion):
'''
Once a NILRT kernel module is inserted, it can't be rmmod so systems need
rebooting (some modules explicitely ask for reboots even on first install),
hence this functionality of determining if the module state got modified by
testing if depmod was run.
Returns:
- True/False depending if modules.dep got modified/touched
'''
depmodpath_base = '/lib/modules/{0}/modules.dep'.format(kernelversion)
depmodpath_timestamp = "/var/lib/salt/kernel_module_state/modules.dep.timestamp"
depmodpath_md5sum = "/var/lib/salt/kernel_module_state/modules.dep.md5sum"
# nothing can be detected without these dependencies
if (kernelversion is None or
not os.path.exists(depmodpath_timestamp) or
not os.path.exists(depmodpath_md5sum)):
return False
prev_timestamp = __salt__['file.read'](depmodpath_timestamp).rstrip()
# need timestamp in seconds so floor it using int()
cur_timestamp = str(int(os.path.getmtime(depmodpath_base)))
if prev_timestamp != cur_timestamp:
return True
return bool(__salt__['cmd.retcode']('md5sum -cs {0}'.format(depmodpath_md5sum), output_loglevel="quiet"))
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
def restartcheck(ignorelist=None, blacklist=None, excludepid=None, **kwargs):
'''
@ -339,9 +388,9 @@ def restartcheck(ignorelist=None, blacklist=None, excludepid=None, **kwargs):
timeout: int, timeout in minute
Returns:
True if no packages for restart found.
False on failure.
String with checkrestart output if some package seems to need to be restarted.
Dict on error: { 'result': False, 'comment': '<reason>' }
String with checkrestart output if some package seems to need to be restarted or
if no packages need restarting.
.. versionadded:: 2015.8.3
@ -365,7 +414,7 @@ def restartcheck(ignorelist=None, blacklist=None, excludepid=None, **kwargs):
systemd_folder = '/usr/lib/systemd/system/'
systemd = '/usr/bin/systemctl'
kernel_versions = _kernel_versions_redhat()
elif __grains__.get('os_family') == 'NILinuxRT':
elif __grains__.get('os_family') == NILRT_FAMILY_NAME:
cmd_pkg_query = 'opkg files '
systemd = ''
kernel_versions = _kernel_versions_nilrt()
@ -377,8 +426,14 @@ def restartcheck(ignorelist=None, blacklist=None, excludepid=None, **kwargs):
for kernel in kernel_versions:
_check_timeout(start_time, timeout)
if kernel in kernel_current:
kernel_restart = False
break
if __grains__.get('os_family') == 'NILinuxRT':
# Check kernel modules for version changes
if not _kernel_modules_changed_nilrt(kernel):
kernel_restart = False
break
else:
kernel_restart = False
break
packages = {}
running_services = {}
@ -492,10 +547,10 @@ def restartcheck(ignorelist=None, blacklist=None, excludepid=None, **kwargs):
service = __salt__['service.available'](packages[package]['process_name'])
if service:
packages[package]['systemdservice'].append(packages[package]['process_name'])
else:
if os.path.exists('/etc/init.d/' + packages[package]['process_name']):
packages[package]['initscripts'].append(packages[package]['process_name'])
else:
packages[package]['systemdservice'].append(packages[package]['process_name'])
restartable = []
nonrestartable = []

View File

@ -453,7 +453,7 @@ def diff(package, path):
return res
def info(*packages, **attr):
def info(*packages, **kwargs):
'''
Return a detailed package(s) summary information.
If no packages specified, all packages will be returned.
@ -467,6 +467,9 @@ def info(*packages, **attr):
version, vendor, release, build_date, build_date_time_t, install_date, install_date_time_t,
build_host, group, source_rpm, arch, epoch, size, license, signature, packager, url, summary, description.
:param all_versions:
Return information for all installed versions of the packages
:return:
CLI example:
@ -476,7 +479,9 @@ def info(*packages, **attr):
salt '*' lowpkg.info apache2 bash
salt '*' lowpkg.info apache2 bash attr=version
salt '*' lowpkg.info apache2 bash attr=version,build_date_iso,size
salt '*' lowpkg.info apache2 bash attr=version,build_date_iso,size all_versions=True
'''
all_versions = kwargs.get('all_versions', False)
# LONGSIZE is not a valid tag for all versions of rpm. If LONGSIZE isn't
# available, then we can just use SIZE for older versions. See Issue #31366.
rpm_tags = __salt__['cmd.run_stdout'](
@ -516,7 +521,7 @@ def info(*packages, **attr):
"edition": "edition: %|EPOCH?{%{EPOCH}:}|%{VERSION}-%{RELEASE}\\n",
}
attr = attr.get('attr', None) and attr['attr'].split(",") or None
attr = kwargs.get('attr', None) and kwargs['attr'].split(",") or None
query = list()
if attr:
for attr_k in attr:
@ -610,8 +615,13 @@ def info(*packages, **attr):
if pkg_name.startswith('gpg-pubkey'):
continue
if pkg_name not in ret:
ret[pkg_name] = pkg_data.copy()
del ret[pkg_name]['edition']
if all_versions:
ret[pkg_name] = [pkg_data.copy()]
else:
ret[pkg_name] = pkg_data.copy()
del ret[pkg_name]['edition']
elif all_versions:
ret[pkg_name].append(pkg_data.copy())
return ret

View File

@ -65,21 +65,22 @@ def __virtual__():
# The module will be exposed as `rpmbuild` on non-RPM based systems
return 'rpmbuild'
else:
return False, 'The rpmbuild module could not be loaded: requires python-gnupg, gpg, rpm, rpmbuild, mock and createrepo utilities to be installed'
return False, 'The rpmbuild module could not be loaded: requires python-gnupg, ' \
'gpg, rpm, rpmbuild, mock and createrepo utilities to be installed'
def _create_rpmmacros():
def _create_rpmmacros(runas='root'):
'''
Create the .rpmmacros file in user's home directory
'''
home = os.path.expanduser('~')
rpmbuilddir = os.path.join(home, 'rpmbuild')
if not os.path.isdir(rpmbuilddir):
os.makedirs(rpmbuilddir)
__salt__['file.makedirs_perms'](name=rpmbuilddir, user=runas, group='mock')
mockdir = os.path.join(home, 'mock')
if not os.path.isdir(mockdir):
os.makedirs(mockdir)
__salt__['file.makedirs_perms'](name=mockdir, user=runas, group='mock')
rpmmacros = os.path.join(home, '.rpmmacros')
with salt.utils.files.fopen(rpmmacros, 'w') as afile:
@ -92,7 +93,7 @@ def _create_rpmmacros():
afile.write('%_gpg_name packaging@saltstack.com\n')
def _mk_tree():
def _mk_tree(runas='root'):
'''
Create the rpm build tree
'''
@ -100,7 +101,7 @@ def _mk_tree():
paths = ['BUILD', 'RPMS', 'SOURCES', 'SPECS', 'SRPMS']
for path in paths:
full = os.path.join(basedir, path)
os.makedirs(full)
__salt__['file.makedirs_perms'](name=full, user=runas, group='mock')
return basedir
@ -116,7 +117,7 @@ def _get_spec(tree_base, spec, template, saltenv='base'):
saltenv=saltenv)
def _get_src(tree_base, source, saltenv='base'):
def _get_src(tree_base, source, saltenv='base', runas='root'):
'''
Get the named sources and place them into the tree_base
'''
@ -127,6 +128,7 @@ def _get_src(tree_base, source, saltenv='base'):
lsrc = __salt__['cp.get_url'](source, dest, saltenv=saltenv)
else:
shutil.copy(source, dest)
__salt__['file.chown'](path=dest, user=runas, group='mock')
def _get_distset(tgt):
@ -171,7 +173,7 @@ def _get_deps(deps, tree_base, saltenv='base'):
return deps_list
def make_src_pkg(dest_dir, spec, sources, env=None, template=None, saltenv='base'):
def make_src_pkg(dest_dir, spec, sources, env=None, template=None, saltenv='base', runas='root'):
'''
Create a source rpm from the given spec file and sources
@ -179,33 +181,74 @@ def make_src_pkg(dest_dir, spec, sources, env=None, template=None, saltenv='base
.. code-block:: bash
salt '*' pkgbuild.make_src_pkg /var/www/html/ https://raw.githubusercontent.com/saltstack/libnacl/master/pkg/rpm/python-libnacl.spec https://pypi.python.org/packages/source/l/libnacl/libnacl-1.3.5.tar.gz
salt '*' pkgbuild.make_src_pkg /var/www/html/
https://raw.githubusercontent.com/saltstack/libnacl/master/pkg/rpm/python-libnacl.spec
https://pypi.python.org/packages/source/l/libnacl/libnacl-1.3.5.tar.gz
This example command should build the libnacl SOURCE package and place it in
/var/www/html/ on the minion
.. versionchanged:: 2017.7.0
dest_dir
The directory on the minion to place the built package(s)
spec
The location of the spec file (used for rpms)
sources
The list of package sources
env
A dictionary of environment variables to be set prior to execution.
template
Run the spec file through a templating engine
Optional arguement, allows for no templating engine used to be
if none is desired.
saltenv
The saltenv to use for files downloaded from the salt filesever
runas
The user to run the build process as
.. versionadded:: 2018.3.2
.. note::
using SHA256 as digest and minimum level dist el6
'''
_create_rpmmacros()
tree_base = _mk_tree()
_create_rpmmacros(runas)
tree_base = _mk_tree(runas)
spec_path = _get_spec(tree_base, spec, template, saltenv)
__salt__['file.chown'](path=spec_path, user=runas, group='mock')
__salt__['file.chown'](path=tree_base, user=runas, group='mock')
if isinstance(sources, six.string_types):
sources = sources.split(',')
for src in sources:
_get_src(tree_base, src, saltenv)
_get_src(tree_base, src, saltenv, runas)
# make source rpms for dist el6 with SHA256, usable with mock on other dists
cmd = 'rpmbuild --verbose --define "_topdir {0}" -bs --define "dist .el6" {1}'.format(tree_base, spec_path)
__salt__['cmd.run'](cmd)
retrc = __salt__['cmd.retcode'](cmd, runas=runas)
if retrc != 0:
raise SaltInvocationError(
'Make source package for destination directory {0}, spec {1}, sources {2}, failed '
'with return error {3}, check logs for further details'.format(
dest_dir,
spec,
sources,
retrc)
)
srpms = os.path.join(tree_base, 'SRPMS')
ret = []
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
__salt__['file.makedirs_perms'](name=dest_dir, user=runas, group='mock')
for fn_ in os.listdir(srpms):
full = os.path.join(srpms, fn_)
tgt = os.path.join(dest_dir, fn_)
@ -232,14 +275,16 @@ def build(runas,
.. code-block:: bash
salt '*' pkgbuild.build mock epel-7-x86_64 /var/www/html https://raw.githubusercontent.com/saltstack/libnacl/master/pkg/rpm/python-libnacl.spec https://pypi.python.org/packages/source/l/libnacl/libnacl-1.3.5.tar.gz
salt '*' pkgbuild.build mock epel-7-x86_64 /var/www/html
https://raw.githubusercontent.com/saltstack/libnacl/master/pkg/rpm/python-libnacl.spec
https://pypi.python.org/packages/source/l/libnacl/libnacl-1.3.5.tar.gz
This example command should build the libnacl package for rhel 7 using user
mock and place it in /var/www/html/ on the minion
'''
ret = {}
try:
os.makedirs(dest_dir)
__salt__['file.chown'](path=dest_dir, user=runas, group='mock')
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
@ -247,7 +292,7 @@ def build(runas,
srpm_build_dir = tempfile.mkdtemp()
try:
srpms = make_src_pkg(srpm_build_dir, spec, sources,
env, template, saltenv)
env, template, saltenv, runas)
except Exception as exc:
shutil.rmtree(srpm_build_dir)
log.error('Failed to make src package')
@ -259,17 +304,18 @@ def build(runas,
deps_dir = tempfile.mkdtemp()
deps_list = _get_deps(deps, deps_dir, saltenv)
retrc = 0
for srpm in srpms:
dbase = os.path.dirname(srpm)
results_dir = tempfile.mkdtemp()
try:
__salt__['cmd.run']('chown {0} -R {1}'.format(runas, dbase))
__salt__['cmd.run']('chown {0} -R {1}'.format(runas, results_dir))
__salt__['file.chown'](path=dbase, user=runas, group='mock')
__salt__['file.chown'](path=results_dir, user=runas, group='mock')
cmd = 'mock --root={0} --resultdir={1} --init'.format(tgt, results_dir)
__salt__['cmd.run'](cmd, runas=runas)
retrc |= __salt__['cmd.retcode'](cmd, runas=runas)
if deps_list and not deps_list.isspace():
cmd = 'mock --root={0} --resultdir={1} --install {2} {3}'.format(tgt, results_dir, deps_list, noclean)
__salt__['cmd.run'](cmd, runas=runas)
retrc |= __salt__['cmd.retcode'](cmd, runas=runas)
noclean += ' --no-clean'
cmd = 'mock --root={0} --resultdir={1} {2} {3} {4}'.format(
@ -278,17 +324,20 @@ def build(runas,
distset,
noclean,
srpm)
__salt__['cmd.run'](cmd, runas=runas)
cmd = ['rpm', '-qp', '--queryformat',
'{0}/%{{name}}/%{{version}}-%{{release}}'.format(log_dir),
srpm]
log_dest = __salt__['cmd.run_stdout'](cmd, python_shell=False)
retrc |= __salt__['cmd.retcode'](cmd, runas=runas)
cmdlist = [
'rpm',
'-qp',
'--queryformat',
'{0}/%{{name}}/%{{version}}-%{{release}}'.format(log_dir),
srpm]
log_dest = __salt__['cmd.run_stdout'](cmdlist, python_shell=False)
for filename in os.listdir(results_dir):
full = os.path.join(results_dir, filename)
if filename.endswith('src.rpm'):
sdest = os.path.join(srpm_dir, filename)
try:
os.makedirs(srpm_dir)
__salt__['file.makedirs_perms'](name=srpm_dir, user=runas, group='mock')
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
@ -301,7 +350,7 @@ def build(runas,
else:
log_file = os.path.join(log_dest, filename)
try:
os.makedirs(log_dest)
__salt__['file.makedirs_perms'](name=log_dest, user=runas, group='mock')
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
@ -311,6 +360,15 @@ def build(runas,
log.error('Error building from %s: %s', srpm, exc)
finally:
shutil.rmtree(results_dir)
if retrc != 0:
raise SaltInvocationError(
'Building packages for destination directory {0}, spec {1}, sources {2}, failed '
'with return error {3}, check logs for further details'.format(
dest_dir,
spec,
sources,
retrc)
)
shutil.rmtree(deps_dir)
shutil.rmtree(srpm_build_dir)
return ret
@ -433,7 +491,7 @@ def make_repo(repodir,
phrase = ''
if keyid is not None:
## import_keys
# import_keys
pkg_pub_key_file = '{0}/{1}'.format(gnupghome, __salt__['pillar.get']('gpg_pkg_pub_keyname', None))
pkg_priv_key_file = '{0}/{1}'.format(gnupghome, __salt__['pillar.get']('gpg_pkg_priv_keyname', None))
@ -477,14 +535,21 @@ def make_repo(repodir,
# need to update rpm with public key
cmd = 'rpm --import {0}'.format(pkg_pub_key_file)
__salt__['cmd.run'](cmd, runas=runas, use_vt=True)
retrc = __salt__['cmd.retcode'](cmd, runas=runas, use_vt=True)
if retrc != 0:
raise SaltInvocationError(
'Failed to import public key from file {0} with return '
'error {1}, check logs for further details'.format(
pkg_pub_key_file,
retrc)
)
## sign_it_here
# sign_it_here
# interval of 0.125 is really too fast on some systems
interval = 0.5
for file in os.listdir(repodir):
if file.endswith('.rpm'):
abs_file = os.path.join(repodir, file)
for fileused in os.listdir(repodir):
if fileused.endswith('.rpm'):
abs_file = os.path.join(repodir, fileused)
number_retries = timeout / interval
times_looped = 0
error_msg = 'Failed to sign file {0}'.format(abs_file)

View File

@ -54,6 +54,42 @@ def merge(obj_a, obj_b, strategy='smart', renderer='yaml', merge_lists=False):
merge_lists)
def merge_all(lst, strategy='smart', renderer='yaml', merge_lists=False):
'''
.. versionadded:: Fluorine
Merge a list of objects into each other in order
:type lst: Iterable
:param lst: List of objects to be merged.
:type strategy: String
:param strategy: Merge strategy. See utils.dictupdate.
:type renderer: String
:param renderer:
Renderer type. Used to determine strategy when strategy is 'smart'.
:type merge_lists: Bool
:param merge_lists: Defines whether to merge embedded object lists.
CLI Example:
.. code-block:: shell
$ salt-call --output=txt slsutil.merge_all '[{foo: Foo}, {foo: Bar}]'
local: {u'foo': u'Bar'}
'''
ret = {}
for obj in lst:
ret = salt.utils.dictupdate.merge(
ret, obj, strategy, renderer, merge_lists
)
return ret
def renderer(path=None, string=None, default_renderer='jinja|yaml', **kwargs):
'''
Parse a string or file through Salt's renderer system

View File

@ -989,7 +989,7 @@ def diskusage(*args):
elif __grains__['kernel'] in ('FreeBSD', 'SunOS'):
ifile = __salt__['cmd.run']('mount -p').splitlines()
else:
ifile = []
raise CommandExecutionError('status.diskusage not yet supported on this platform')
for line in ifile:
comps = line.split()

View File

@ -24,6 +24,7 @@ import salt.utils.files
import salt.utils.path
import salt.utils.platform
from salt.exceptions import CommandExecutionError, SaltInvocationError
from salt.utils.decorators import depends
__virtualname__ = 'system'
@ -608,3 +609,60 @@ def get_computer_name():
salt '*' network.get_hostname
'''
return __salt__['network.get_hostname']()
def _is_nilrt_family():
'''
Determine whether the minion is running on NI Linux RT
'''
return __grains__.get('os_family') == 'NILinuxRT'
NILRT_REBOOT_WITNESS_PATH = '/var/volatile/tmp/salt/reboot_witnessed'
@depends('_is_nilrt_family')
def set_reboot_required_witnessed():
'''
This function is used to remember that an event indicating that a reboot is
required was witnessed. This function writes to a temporary filesystem so
the event gets cleared upon reboot.
Returns:
bool: ``True`` if successful, otherwise ``False``
.. code-block:: bash
salt '*' system.set_reboot_required_witnessed
'''
errcode = -1
dir_path = os.path.dirname(NILRT_REBOOT_WITNESS_PATH)
if not os.path.exists(dir_path):
try:
os.makedirs(dir_path)
except OSError as ex:
raise SaltInvocationError('Error creating {0} (-{1}): {2}'
.format(dir_path, ex.errno, ex.strerror))
rdict = __salt__['cmd.run_all']('touch {0}'.format(NILRT_REBOOT_WITNESS_PATH))
errcode = rdict['retcode']
return errcode == 0
@depends('_is_nilrt_family')
def get_reboot_required_witnessed():
'''
Determine if at any time during the current boot session the salt minion
witnessed an event indicating that a reboot is required.
Returns:
bool: ``True`` if the a reboot request was witnessed, ``False`` otherwise
CLI Example:
.. code-block:: bash
salt '*' system.get_reboot_required_witnessed
'''
return os.path.exists(NILRT_REBOOT_WITNESS_PATH)

View File

@ -19,9 +19,9 @@ import logging
import copy
# Import salt libs
import salt.utils.data
import salt.utils.files
import salt.utils.decorators.path
import salt.utils.locales
import salt.utils.stringutils
import salt.utils.user
from salt.exceptions import CommandExecutionError
@ -60,17 +60,18 @@ def _get_gecos(name):
Retrieve GECOS field info and return it in dictionary form
'''
gecos_field = salt.utils.stringutils.to_unicode(
pwd.getpwnam(_quote_username(name)).pw_gecos).split(',', 3)
pwd.getpwnam(_quote_username(name)).pw_gecos).split(',', 4)
if not gecos_field:
return {}
else:
# Assign empty strings for any unspecified trailing GECOS fields
while len(gecos_field) < 4:
while len(gecos_field) < 5:
gecos_field.append('')
return {'fullname': salt.utils.locales.sdecode(gecos_field[0]),
'roomnumber': salt.utils.locales.sdecode(gecos_field[1]),
'workphone': salt.utils.locales.sdecode(gecos_field[2]),
'homephone': salt.utils.locales.sdecode(gecos_field[3])}
return {'fullname': salt.utils.data.decode(gecos_field[0]),
'roomnumber': salt.utils.data.decode(gecos_field[1]),
'workphone': salt.utils.data.decode(gecos_field[2]),
'homephone': salt.utils.data.decode(gecos_field[3]),
'other': salt.utils.data.decode(gecos_field[4])}
def _build_gecos(gecos_dict):
@ -78,10 +79,11 @@ def _build_gecos(gecos_dict):
Accepts a dictionary entry containing GECOS field names and their values,
and returns a full GECOS comment string, to be used with usermod.
'''
return '{0},{1},{2},{3}'.format(gecos_dict.get('fullname', ''),
gecos_dict.get('roomnumber', ''),
gecos_dict.get('workphone', ''),
gecos_dict.get('homephone', ''))
return '{0},{1},{2},{3},{4}'.format(gecos_dict.get('fullname', ''),
gecos_dict.get('roomnumber', ''),
gecos_dict.get('workphone', ''),
gecos_dict.get('homephone', ''),
gecos_dict.get('other', ''),).rstrip(',')
def _update_gecos(name, key, value, root=None):
@ -124,6 +126,7 @@ def add(name,
roomnumber='',
workphone='',
homephone='',
other='',
createhome=True,
loginclass=None,
root=None,
@ -237,6 +240,8 @@ def add(name,
chworkphone(name, workphone)
if homephone:
chhomephone(name, homephone)
if other:
chother(name, other)
return True
@ -507,6 +512,19 @@ def chhomephone(name, homephone):
return _update_gecos(name, 'homephone', homephone)
def chother(name, other):
'''
Change the user's other GECOS attribute
CLI Example:
.. code-block:: bash
salt '*' user.chother foobar
'''
return _update_gecos(name, 'other', other)
def chloginclass(name, loginclass, root=None):
'''
Change the default login class of the user
@ -588,9 +606,9 @@ def _format_info(data):
Return user information in a pretty way
'''
# Put GECOS info into a list
gecos_field = salt.utils.stringutils.to_unicode(data.pw_gecos).split(',', 3)
# Make sure our list has at least four elements
while len(gecos_field) < 4:
gecos_field = salt.utils.stringutils.to_unicode(data.pw_gecos).split(',', 4)
# Make sure our list has at least five elements
while len(gecos_field) < 5:
gecos_field.append('')
return {'gid': data.pw_gid,
@ -603,7 +621,8 @@ def _format_info(data):
'fullname': gecos_field[0],
'roomnumber': gecos_field[1],
'workphone': gecos_field[2],
'homephone': gecos_field[3]}
'homephone': gecos_field[3],
'other': gecos_field[4]}
@salt.utils.decorators.path.which('id')

View File

@ -179,6 +179,30 @@ def write_secret(path, **kwargs):
return False
def write_raw(path, raw):
'''
Set raw data at the path in vault. The vault policy used must allow this.
CLI Example:
.. code-block:: bash
salt '*' vault.write_raw "secret/my/secret" '{"user":"foo","password": "bar"}'
'''
log.debug('Writing vault secrets for %s at %s', __grains__['id'], path)
try:
url = 'v1/{0}'.format(path)
response = __utils__['vault.make_request']('POST', url, json=raw)
if response.status_code == 200:
return response.json()['data']
elif response.status_code != 204:
response.raise_for_status()
return True
except Exception as err:
log.error('Failed to write secret! %s: %s', type(err).__name__, err)
return False
def delete_secret(path):
'''
Delete secret at the path in vault. The vault policy used must allow this.

View File

@ -57,7 +57,8 @@ def create(path,
upgrade=None,
user=None,
use_vt=False,
saltenv='base'):
saltenv='base',
**kwargs):
'''
Create a virtualenv
@ -103,6 +104,11 @@ def create(path,
user : None
Set ownership for the virtualenv
.. note::
On Windows you must also pass a ``password`` parameter. Additionally,
the user must have permissions to the location where the virtual
environment is being created
runas : None
Set ownership for the virtualenv
@ -162,7 +168,7 @@ def create(path,
# Unable to import?? Let's parse the version from the console
version_cmd = [venv_bin, '--version']
ret = __salt__['cmd.run_all'](
version_cmd, runas=user, python_shell=False
version_cmd, runas=user, python_shell=False, **kwargs
)
if ret['retcode'] > 0 or not ret['stdout'].strip():
raise CommandExecutionError(
@ -252,7 +258,7 @@ def create(path,
cmd.append(path)
# Let's create the virtualenv
ret = __salt__['cmd.run_all'](cmd, runas=user, python_shell=False)
ret = __salt__['cmd.run_all'](cmd, runas=user, python_shell=False, **kwargs)
if ret['retcode'] != 0:
# Something went wrong. Let's bail out now!
return ret

View File

@ -719,23 +719,24 @@ def set_lcm_config(config_mode=None,
'ApplyAndAutoCorrect'):
error = 'config_mode must be one of ApplyOnly, ApplyAndMonitor, ' \
'or ApplyAndAutoCorrect. Passed {0}'.format(config_mode)
SaltInvocationError(error)
return error
raise SaltInvocationError(error)
cmd += ' ConfigurationMode = "{0}";'.format(config_mode)
if config_mode_freq:
if not isinstance(config_mode_freq, int):
SaltInvocationError('config_mode_freq must be an integer')
return 'config_mode_freq must be an integer. Passed {0}'.\
format(config_mode_freq)
error = 'config_mode_freq must be an integer. Passed {0}'.format(
config_mode_freq
)
raise SaltInvocationError(error)
cmd += ' ConfigurationModeFrequencyMins = {0};'.format(config_mode_freq)
if refresh_mode:
if refresh_mode not in ('Disabled', 'Push', 'Pull'):
SaltInvocationError('refresh_mode must be one of Disabled, Push, '
'or Pull')
raise SaltInvocationError(
'refresh_mode must be one of Disabled, Push, or Pull'
)
cmd += ' RefreshMode = "{0}";'.format(refresh_mode)
if refresh_freq:
if not isinstance(refresh_freq, int):
SaltInvocationError('refresh_freq must be an integer')
raise SaltInvocationError('refresh_freq must be an integer')
cmd += ' RefreshFrequencyMins = {0};'.format(refresh_freq)
if reboot_if_needed is not None:
if not isinstance(reboot_if_needed, bool):
@ -748,8 +749,10 @@ def set_lcm_config(config_mode=None,
if action_after_reboot:
if action_after_reboot not in ('ContinueConfiguration',
'StopConfiguration'):
SaltInvocationError('action_after_reboot must be one of '
'ContinueConfiguration or StopConfiguration')
raise SaltInvocationError(
'action_after_reboot must be one of '
'ContinueConfiguration or StopConfiguration'
)
cmd += ' ActionAfterReboot = "{0}"'.format(action_after_reboot)
if certificate_id is not None:
if certificate_id == '':
@ -761,7 +764,7 @@ def set_lcm_config(config_mode=None,
cmd += ' ConfigurationID = "{0}";'.format(configuration_id)
if allow_module_overwrite is not None:
if not isinstance(allow_module_overwrite, bool):
SaltInvocationError('allow_module_overwrite must be a boolean value')
raise SaltInvocationError('allow_module_overwrite must be a boolean value')
if allow_module_overwrite:
allow_module_overwrite = '$true'
else:
@ -771,13 +774,14 @@ def set_lcm_config(config_mode=None,
if debug_mode is None:
debug_mode = 'None'
if debug_mode not in ('None', 'ForceModuleImport', 'All'):
SaltInvocationError('debug_mode must be one of None, '
'ForceModuleImport, ResourceScriptBreakAll, or '
'All')
raise SaltInvocationError(
'debug_mode must be one of None, ForceModuleImport, '
'ResourceScriptBreakAll, or All'
)
cmd += ' DebugMode = "{0}";'.format(debug_mode)
if status_retention_days:
if not isinstance(status_retention_days, int):
SaltInvocationError('status_retention_days must be an integer')
raise SaltInvocationError('status_retention_days must be an integer')
cmd += ' StatusRetentionTimeInDays = {0};'.format(status_retention_days)
cmd += ' }}};'
cmd += r'SaltConfig -OutputPath "{0}\SaltConfig"'.format(temp_dir)

View File

@ -4364,7 +4364,7 @@ def _checkAllAdmxPolicies(policy_class,
if ENABLED_VALUE_XPATH(admx_policy) and this_policy_setting == 'Not Configured':
# some policies have a disabled list but not an enabled list
# added this to address those issues
if DISABLED_LIST_XPATH(admx_policy):
if DISABLED_LIST_XPATH(admx_policy) or DISABLED_VALUE_XPATH(admx_policy):
element_only_enabled_disabled = False
explicit_enable_disable_value_setting = True
if _checkValueItemParent(admx_policy,
@ -4374,14 +4374,14 @@ def _checkAllAdmxPolicies(policy_class,
ENABLED_VALUE_XPATH,
policy_filedata):
this_policy_setting = 'Enabled'
log.debug('%s is enabled', this_policyname)
log.debug('%s is enabled by detected ENABLED_VALUE_XPATH', this_policyname)
if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][this_policyname] = this_policy_setting
if DISABLED_VALUE_XPATH(admx_policy) and this_policy_setting == 'Not Configured':
# some policies have a disabled list but not an enabled list
# added this to address those issues
if ENABLED_LIST_XPATH(admx_policy):
if ENABLED_LIST_XPATH(admx_policy) or ENABLED_VALUE_XPATH(admx_policy):
element_only_enabled_disabled = False
explicit_enable_disable_value_setting = True
if _checkValueItemParent(admx_policy,
@ -4391,25 +4391,27 @@ def _checkAllAdmxPolicies(policy_class,
DISABLED_VALUE_XPATH,
policy_filedata):
this_policy_setting = 'Disabled'
log.debug('%s is disabled', this_policyname)
log.debug('%s is disabled by detected DISABLED_VALUE_XPATH', this_policyname)
if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][this_policyname] = this_policy_setting
if ENABLED_LIST_XPATH(admx_policy) and this_policy_setting == 'Not Configured':
element_only_enabled_disabled = False
explicit_enable_disable_value_setting = True
if DISABLED_LIST_XPATH(admx_policy) or DISABLED_VALUE_XPATH(admx_policy):
element_only_enabled_disabled = False
explicit_enable_disable_value_setting = True
if _checkListItem(admx_policy, this_policyname, this_key, ENABLED_LIST_XPATH, policy_filedata):
this_policy_setting = 'Enabled'
log.debug('%s is enabled', this_policyname)
log.debug('%s is enabled by detected ENABLED_LIST_XPATH', this_policyname)
if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][this_policyname] = this_policy_setting
if DISABLED_LIST_XPATH(admx_policy) and this_policy_setting == 'Not Configured':
element_only_enabled_disabled = False
explicit_enable_disable_value_setting = True
if ENABLED_LIST_XPATH(admx_policy) or ENABLED_VALUE_XPATH(admx_policy):
element_only_enabled_disabled = False
explicit_enable_disable_value_setting = True
if _checkListItem(admx_policy, this_policyname, this_key, DISABLED_LIST_XPATH, policy_filedata):
this_policy_setting = 'Disabled'
log.debug('%s is disabled', this_policyname)
log.debug('%s is disabled by detected DISABLED_LIST_XPATH', this_policyname)
if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][this_policyname] = this_policy_setting
@ -4424,7 +4426,7 @@ def _checkAllAdmxPolicies(policy_class,
'1')),
policy_filedata):
this_policy_setting = 'Enabled'
log.debug('%s is enabled', this_policyname)
log.debug('%s is enabled by no explicit enable/disable list or value', this_policyname)
if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][this_policyname] = this_policy_setting
@ -4435,7 +4437,7 @@ def _checkAllAdmxPolicies(policy_class,
check_deleted=True)),
policy_filedata):
this_policy_setting = 'Disabled'
log.debug('%s is disabled', this_policyname)
log.debug('%s is disabled by no explicit enable/disable list or value', this_policyname)
if this_policynamespace not in policy_vals:
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][this_policyname] = this_policy_setting

View File

@ -331,6 +331,7 @@ def version(*names, **kwargs):
dict: The package name(s) with the installed versions.
.. code-block:: cfg
{['<version>', '<version>', ]} OR
{'<package name>': ['<version>', '<version>', ]}

View File

@ -96,6 +96,7 @@ TASK_TRIGGER_SESSION_STATE_CHANGE = 11
duration = {'Immediately': 'PT0M',
'Indefinitely': 'PT0M',
'Do not wait': 'PT0M',
'15 seconds': 'PT15S',
'30 seconds': 'PT30S',
'1 minute': 'PT1M',
'5 minutes': 'PT5M',
@ -1381,10 +1382,16 @@ def info(name, location='\\'):
trigger['end_date'] = end_date
trigger['end_time'] = end_time
trigger['enabled'] = triggerObj.Enabled
if triggerObj.RandomDelay == '':
trigger['random_delay'] = False
else:
trigger['random_delay'] = _reverse_lookup(duration, triggerObj.RandomDelay)
if hasattr(triggerObj, 'RandomDelay'):
if triggerObj.RandomDelay:
trigger['random_delay'] = _reverse_lookup(duration, triggerObj.RandomDelay)
else:
trigger['random_delay'] = False
if hasattr(triggerObj, 'Delay'):
if triggerObj.Delay:
trigger['delay'] = _reverse_lookup(duration, triggerObj.Delay)
else:
trigger['delay'] = False
triggers.append(trigger)
properties['settings'] = settings
@ -1623,6 +1630,7 @@ def add_trigger(name=None,
repeat_duration=None,
repeat_stop_at_duration_end=False,
execution_time_limit=None,
delay=None,
**kwargs):
r'''
@ -1687,9 +1695,9 @@ def add_trigger(name=None,
:param str random_delay: The delay time that is randomly added to the start
time of the trigger. Valid values are:
- 30 seconds
= 1 minute
- 1 minute
- 30 minutes
= 1 hour
- 1 hour
- 8 hours
- 1 day
@ -1725,6 +1733,16 @@ def add_trigger(name=None,
- 1 day
- 3 days (default)
:param str delay: The time the trigger waits after its activation to start the task.
Valid values are:
- 15 seconds
- 30 seconds
- 1 minute
- 30 minutes
- 1 hour
- 8 hours
- 1 day
**kwargs**
There are optional keyword arguments determined by the type of trigger
@ -1976,6 +1994,8 @@ def add_trigger(name=None,
# Settings
trigger.StartBoundary = start_boundary
# Advanced Settings
if delay:
trigger.Delay = _lookup_first(duration, delay)
if random_delay:
trigger.RandomDelay = _lookup_first(duration, random_delay)
if repeat_interval:

View File

@ -1009,31 +1009,39 @@ def list_downloaded():
return ret
def info_installed(*names):
def info_installed(*names, **kwargs):
'''
.. versionadded:: 2015.8.1
Return the information of the named package(s), installed on the system.
:param all_versions:
Include information for all versions of the packages installed on the minion.
CLI example:
.. code-block:: bash
salt '*' pkg.info_installed <package1>
salt '*' pkg.info_installed <package1> <package2> <package3> ...
salt '*' pkg.info_installed <package1> <package2> <package3> all_versions=True
'''
all_versions = kwargs.get('all_versions', False)
ret = dict()
for pkg_name, pkg_nfo in __salt__['lowpkg.info'](*names).items():
t_nfo = dict()
# Translate dpkg-specific keys to a common structure
for key, value in pkg_nfo.items():
if key == 'source_rpm':
t_nfo['source'] = value
for pkg_name, pkgs_nfo in __salt__['lowpkg.info'](*names, **kwargs).items():
pkg_nfo = pkgs_nfo if all_versions else [pkgs_nfo]
for _nfo in pkg_nfo:
t_nfo = dict()
# Translate dpkg-specific keys to a common structure
for key, value in _nfo.items():
if key == 'source_rpm':
t_nfo['source'] = value
else:
t_nfo[key] = value
if not all_versions:
ret[pkg_name] = t_nfo
else:
t_nfo[key] = value
ret[pkg_name] = t_nfo
ret.setdefault(pkg_name, []).append(t_nfo)
return ret
@ -1957,7 +1965,24 @@ def remove(name=None, pkgs=None, **kwargs): # pylint: disable=W0613
raise CommandExecutionError(exc)
old = list_pkgs()
targets = [x for x in pkg_params if x in old]
targets = []
for target in pkg_params:
# Check if package version set to be removed is actually installed:
# old[target] contains a comma-separated list of installed versions
if target in old and not pkg_params[target]:
targets.append(target)
elif target in old and pkg_params[target] in old[target].split(','):
arch = ''
pkgname = target
try:
namepart, archpart = target.rsplit('.', 1)
except ValueError:
pass
else:
if archpart in salt.utils.pkg.rpm.ARCHES:
arch = '.' + archpart
pkgname = namepart
targets.append('{0}-{1}{2}'.format(pkgname, pkg_params[target], arch))
if not targets:
return {}

View File

@ -15,7 +15,7 @@ Support for Zabbix
Connection arguments from the minion config file can be overridden on the CLI by using arguments with
_connection_ prefix.
``_connection_`` prefix.
.. code-block:: bash
@ -24,33 +24,85 @@ Support for Zabbix
:codeauthor: Jiri Kotlin <jiri.kotlin@ultimum.io>
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
# Import Python libs
import logging
import socket
import os
# Import salt libs
# Import Salt libs
from salt.ext import six
from salt.exceptions import SaltException
import salt.utils.data
import salt.utils.files
import salt.utils.http
import salt.utils.json
import salt.utils.path
from salt.utils.versions import LooseVersion as _LooseVersion
from salt.ext.six.moves.urllib.error import HTTPError, URLError # pylint: disable=import-error,no-name-in-module
# pylint: disable=import-error,no-name-in-module,unused-import
from salt.ext.six.moves.urllib.error import HTTPError, URLError
# pylint: enable=import-error,no-name-in-module,unused-import
log = logging.getLogger(__name__)
INTERFACE_DEFAULT_PORTS = [10050, 161, 623, 12345]
ZABBIX_TOP_LEVEL_OBJECTS = ('hostgroup', 'template', 'host', 'maintenance', 'action', 'drule', 'service', 'proxy',
'screen', 'usergroup', 'mediatype', 'script', 'valuemap')
# Zabbix object and its ID name mapping
ZABBIX_ID_MAPPER = {
'action': 'actionid',
'alert': 'alertid',
'application': 'applicationid',
'dhost': 'dhostid',
'dservice': 'dserviceid',
'dcheck': 'dcheckid',
'drule': 'druleid',
'event': 'eventid',
'graph': 'graphid',
'graphitem': 'gitemid',
'graphprototype': 'graphid',
'history': 'itemid',
'host': 'hostid',
'hostgroup': 'groupid',
'hostinterface': 'interfaceid',
'hostprototype': 'hostid',
'iconmap': 'iconmapid',
'image': 'imageid',
'item': 'itemid',
'itemprototype': 'itemid',
'service': 'serviceid',
'discoveryrule': 'itemid',
'maintenance': 'maintenanceid',
'map': 'sysmapid',
'usermedia': 'mediaid',
'mediatype': 'mediatypeid',
'proxy': 'proxyid',
'screen': 'screenid',
'screenitem': 'screenitemid',
'script': 'scriptid',
'template': 'templateid',
'templatescreen': 'screenid',
'templatescreenitem': 'screenitemid',
'trend': 'itemid',
'trigger': 'triggerid',
'triggerprototype': 'triggerid',
'user': 'userid',
'usergroup': 'usrgrpid',
'usermacro': 'globalmacroid',
'valuemap': 'valuemapid',
'httptest': 'httptestid'
}
# Define the module's virtual name
__virtualname__ = 'zabbix'
def __virtual__():
'''
Only load the module if Zabbix server is installed
Only load the module if all modules are imported correctly.
'''
if salt.utils.path.which('zabbix_server'):
return __virtualname__
return (False, 'The zabbix execution module cannot be loaded: zabbix not installed.')
return __virtualname__
def _frontend_url():
@ -86,7 +138,9 @@ def _query(method, params, url, auth=None):
:param url: url of zabbix api
:param auth: auth token for zabbix api (only for methods with required authentication)
:return: Response from API with desired data in JSON format.
:return: Response from API with desired data in JSON format. In case of error returns more specific description.
.. versionchanged:: 2017.7
'''
unauthenticated_methods = ['user.login', 'apiinfo.version', ]
@ -99,17 +153,28 @@ def _query(method, params, url, auth=None):
data = salt.utils.json.dumps(data)
log.info('_QUERY input:\nurl: %s\ndata: %s', six.text_type(url), six.text_type(data))
try:
result = salt.utils.http.query(url,
method='POST',
data=data,
header_dict=header_dict,
decode_type='json',
decode=True,)
decode=True,
status=True,
headers=True)
log.info('_QUERY result: %s', six.text_type(result))
if 'error' in result:
raise SaltException('Zabbix API: Status: {0} ({1})'.format(result['status'], result['error']))
ret = result.get('dict', {})
if 'error' in ret:
raise SaltException('Zabbix API: {} ({})'.format(ret['error']['message'], ret['error']['data']))
return ret
except (URLError, socket.gaierror):
return {}
except ValueError as err:
raise SaltException('URL or HTTP headers are probably not correct! ({})'.format(err))
except socket.error as err:
raise SaltException('Check hostname in URL! ({})'.format(err))
def _login(**kwargs):
@ -171,8 +236,8 @@ def _login(**kwargs):
return connargs
else:
raise KeyError
except KeyError:
return False
except KeyError as err:
raise SaltException('URL is probably not correct! ({})'.format(err))
def _params_extend(params, _ignore_name=False, **kwargs):
@ -208,6 +273,160 @@ def _params_extend(params, _ignore_name=False, **kwargs):
return params
def get_zabbix_id_mapper():
'''
.. versionadded:: 2017.7
Make ZABBIX_ID_MAPPER constant available to state modules.
:return: ZABBIX_ID_MAPPER
'''
return ZABBIX_ID_MAPPER
def substitute_params(input_object, extend_params=None, filter_key='name', **kwargs):
'''
.. versionadded:: 2017.7
Go through Zabbix object params specification and if needed get given object ID from Zabbix API and put it back
as a value. Definition of the object is done via dict with keys "query_object" and "query_name".
:param input_object: Zabbix object type specified in state file
:param extend_params: Specify query with params
:param filter_key: Custom filtering key (default: name)
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
:return: Params structure with values converted to string for further comparison purposes
'''
if extend_params is None:
extend_params = {}
if isinstance(input_object, list):
return [substitute_params(oitem, extend_params, filter_key, **kwargs) for oitem in input_object]
elif isinstance(input_object, dict):
if 'query_object' in input_object:
query_params = {}
if input_object['query_object'] not in ZABBIX_TOP_LEVEL_OBJECTS:
query_params.update(extend_params)
try:
query_params.update({'filter': {filter_key: input_object['query_name']}})
return get_object_id_by_params(input_object['query_object'], query_params, **kwargs)
except KeyError:
raise SaltException('Qyerying object ID requested '
'but object name not provided: {0}'.format(input_object))
else:
return {key: substitute_params(val, extend_params, filter_key, **kwargs)
for key, val in input_object.items()}
else:
# Zabbix response is always str, return everything in str as well
return six.text_type(input_object)
# pylint: disable=too-many-return-statements,too-many-nested-blocks
def compare_params(defined, existing, return_old_value=False):
'''
.. versionadded:: 2017.7
Compares Zabbix object definition against existing Zabbix object.
:param defined: Zabbix object definition taken from sls file.
:param existing: Existing Zabbix object taken from result of an API call.
:param return_old_value: Default False. If True, returns dict("old"=old_val, "new"=new_val) for rollback purpose.
:return: Params that are different from existing object. Result extended by object ID can be passed directly to
Zabbix API update method.
'''
# Comparison of data types
if not isinstance(defined, type(existing)):
raise SaltException('Zabbix object comparison failed (data type mismatch). Expecting {0}, got {1}. '
'Existing value: "{2}", defined value: "{3}").'.format(type(existing),
type(defined),
existing,
defined))
# Comparison of values
if not salt.utils.data.is_iter(defined):
if six.text_type(defined) != six.text_type(existing) and return_old_value:
return {'new': six.text_type(defined), 'old': six.text_type(existing)}
elif six.text_type(defined) != six.text_type(existing) and not return_old_value:
return six.text_type(defined)
# Comparison of lists of values or lists of dicts
if isinstance(defined, list):
if len(defined) != len(existing):
log.info('Different list length!')
return {'new': defined, 'old': existing} if return_old_value else defined
else:
difflist = []
for ditem in defined:
d_in_e = []
for eitem in existing:
comp = compare_params(ditem, eitem, return_old_value)
if return_old_value:
d_in_e.append(comp['new'])
else:
d_in_e.append(comp)
if all(d_in_e):
difflist.append(ditem)
# If there is any difference in a list then whole defined list must be returned and provided for update
if any(difflist) and return_old_value:
return {'new': defined, 'old': existing}
elif any(difflist) and not return_old_value:
return defined
# Comparison of dicts
if isinstance(defined, dict):
try:
# defined must be a subset of existing to be compared
if set(defined) <= set(existing):
intersection = set(defined) & set(existing)
diffdict = {'new': {}, 'old': {}} if return_old_value else {}
for i in intersection:
comp = compare_params(defined[i], existing[i], return_old_value)
if return_old_value:
if comp or (not comp and isinstance(comp, list)):
diffdict['new'].update({i: defined[i]})
diffdict['old'].update({i: existing[i]})
else:
if comp or (not comp and isinstance(comp, list)):
diffdict.update({i: defined[i]})
return diffdict
return {'new': defined, 'old': existing} if return_old_value else defined
except TypeError:
raise SaltException('Zabbix object comparison failed (data type mismatch). Expecting {0}, got {1}. '
'Existing value: "{2}", defined value: "{3}").'.format(type(existing),
type(defined),
existing,
defined))
def get_object_id_by_params(obj, params=None, **connection_args):
'''
.. versionadded:: 2017.7
Get ID of single Zabbix object specified by its name.
:param obj: Zabbix object type
:param params: Parameters by which object is uniquely identified
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
:return: object ID
'''
if params is None:
params = {}
res = run_query(obj + '.get', params, **connection_args)
if res and len(res) == 1:
return six.text_type(res[0][ZABBIX_ID_MAPPER[obj]])
else:
raise SaltException('Zabbix API: Object does not exist or bad Zabbix user permissions or other unexpected '
'result. Called method {0} with params {1}. '
'Result: {2}'.format(obj + '.get', params, res))
def apiinfo_version(**connection_args):
'''
Retrieve the version of the Zabbix API.
@ -1466,7 +1685,7 @@ def hostinterface_get(hostids, **connection_args):
return ret
def hostinterface_create(hostid, ip, dns='', main=1, type=1, useip=1, port=None, **connection_args):
def hostinterface_create(hostid, ip_, dns='', main=1, if_type=1, useip=1, port=None, **connection_args):
'''
Create new host interface
NOTE: This function accepts all standard host group interface: keyword argument names differ depending
@ -1475,11 +1694,11 @@ def hostinterface_create(hostid, ip, dns='', main=1, type=1, useip=1, port=None,
.. versionadded:: 2016.3.0
:param hostid: ID of the host the interface belongs to
:param ip: IP address used by the interface
:param ip_: IP address used by the interface
:param dns: DNS name used by the interface
:param main: whether the interface is used as default on the host (0 - not default, 1 - default)
:param port: port number used by the interface
:param type: Interface type (1 - agent; 2 - SNMP; 3 - IPMI; 4 - JMX)
:param if_type: Interface type (1 - agent; 2 - SNMP; 3 - IPMI; 4 - JMX)
:param useip: Whether the connection should be made via IP (0 - connect using host DNS name; 1 - connect using
host IP address for this host interface)
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
@ -1497,12 +1716,18 @@ def hostinterface_create(hostid, ip, dns='', main=1, type=1, useip=1, port=None,
ret = False
if not port:
port = INTERFACE_DEFAULT_PORTS[type]
port = INTERFACE_DEFAULT_PORTS[if_type]
try:
if conn_args:
method = 'hostinterface.create'
params = {"hostid": hostid, "ip": ip, "dns": dns, "main": main, "port": port, "type": type, "useip": useip}
params = {"hostid": hostid,
"ip": ip_,
"dns": dns,
"main": main,
"port": port,
"type": if_type,
"useip": useip}
params = _params_extend(params, **connection_args)
ret = _query(method, params, conn_args['url'], conn_args['auth'])
return ret['result']['interfaceids']
@ -1565,7 +1790,7 @@ def hostinterface_update(interfaceid, **connection_args):
CLI Example:
.. code-block:: bash
salt '*' zabbix.hostinterface_update 6 ip=0.0.0.2
salt '*' zabbix.hostinterface_update 6 ip_=0.0.0.2
'''
conn_args = _login(**connection_args)
ret = False
@ -1879,8 +2104,9 @@ def mediatype_get(name=None, mediatypeids=None, **connection_args):
_connection_password: zabbix password (can also be set in opts or pillar, see module's docstring)
_connection_url: url of zabbix frontend (can also be set in opts or pillar, see module's docstring)
all optional mediatype.get parameters: keyword argument names differ depending on your zabbix
version,nsee: https://www.zabbix.com/documentation/2.2/manual/api/reference/mediatype/get
all optional mediatype.get parameters: keyword argument names depends on your zabbix version, see:
https://www.zabbix.com/documentation/2.2/manual/api/reference/mediatype/get
Returns:
Array with mediatype details, False if no mediatype found or on failure.
@ -2039,8 +2265,9 @@ def template_get(name=None, host=None, templateids=None, **connection_args):
_connection_password: zabbix password (can also be set in opts or pillar, see module's docstring)
_connection_url: url of zabbix frontend (can also be set in opts or pillar, see module's docstring)
all optional template.get parameters: keyword argument names differ depending on your zabbix
version, see: https://www.zabbix.com/documentation/2.4/manual/api/reference/template/get
all optional template.get parameters: keyword argument names depends on your zabbix version, see:
https://www.zabbix.com/documentation/2.4/manual/api/reference/template/get
Returns:
Array with convenient template details, False if no template found or on failure.
@ -2085,8 +2312,9 @@ def run_query(method, params, **connection_args):
_connection_password: zabbix password (can also be set in opts or pillar, see module's docstring)
_connection_url: url of zabbix frontend (can also be set in opts or pillar, see module's docstring)
all optional template.get parameters: keyword argument names differ depending on your zabbix
version, see: https://www.zabbix.com/documentation/2.4/manual/api/reference/
all optional template.get parameters: keyword argument names depends on your zabbix version, see:
https://www.zabbix.com/documentation/2.4/manual/api/reference/
Returns:
Response from Zabbix API
@ -2100,10 +2328,86 @@ def run_query(method, params, **connection_args):
ret = False
try:
if conn_args:
method = method
params = params
params = _params_extend(params, **connection_args)
ret = _query(method, params, conn_args['url'], conn_args['auth'])
if isinstance(ret['result'], bool):
return ret['result']
return ret['result'] if len(ret['result']) > 0 else False
else:
raise KeyError
except KeyError:
return ret
def configuration_import(config_file, rules=None, file_format='xml', **connection_args):
'''
.. versionadded:: 2017.7
Imports Zabbix configuration specified in file to Zabbix server.
:param config_file: File with Zabbix config (local or remote)
:param rules: Optional - Rules that have to be different from default (defaults are the same as in Zabbix web UI.)
:param file_format: Config file format (default: xml)
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
CLI Example:
.. code-block:: bash
salt '*' zabbix.configuration_import salt://zabbix/config/zabbix_templates.xml \
"{'screens': {'createMissing': True, 'updateExisting': True}}"
'''
if rules is None:
rules = {}
default_rules = {'applications': {'createMissing': True, 'updateExisting': False, 'deleteMissing': False},
'discoveryRules': {'createMissing': True, 'updateExisting': True, 'deleteMissing': False},
'graphs': {'createMissing': True, 'updateExisting': True, 'deleteMissing': False},
'groups': {'createMissing': True},
'hosts': {'createMissing': False, 'updateExisting': False},
'images': {'createMissing': False, 'updateExisting': False},
'items': {'createMissing': True, 'updateExisting': True, 'deleteMissing': False},
'maps': {'createMissing': False, 'updateExisting': False},
'screens': {'createMissing': False, 'updateExisting': False},
'templateLinkage': {'createMissing': True},
'templates': {'createMissing': True, 'updateExisting': True},
'templateScreens': {'createMissing': True, 'updateExisting': True, 'deleteMissing': False},
'triggers': {'createMissing': True, 'updateExisting': True, 'deleteMissing': False},
'valueMaps': {'createMissing': True, 'updateExisting': False}}
new_rules = dict(default_rules)
if rules:
for rule in rules:
if rule in new_rules:
new_rules[rule].update(rules[rule])
else:
new_rules[rule] = rules[rule]
if 'salt://' in config_file:
tmpfile = salt.utils.files.mkstemp()
cfile = __salt__['cp.get_file'](config_file, tmpfile)
if not cfile or os.path.getsize(cfile) == 0:
return {'name': config_file, 'result': False, 'message': 'Failed to fetch config file.'}
else:
cfile = config_file
if not os.path.isfile(cfile):
return {'name': config_file, 'result': False, 'message': 'Invalid file path.'}
with salt.utils.files.fopen(cfile, mode='r') as fp_:
xml = fp_.read()
if 'salt://' in config_file:
salt.utils.files.safe_rm(cfile)
params = {'format': file_format,
'rules': new_rules,
'source': xml}
log.info('CONFIGURATION IMPORT: rules: %s', six.text_type(params['rules']))
try:
run_query('configuration.import', params, **connection_args)
return {'name': config_file, 'result': True, 'message': 'Zabbix API "configuration.import" method '
'called successfully.'}
except SaltException as exc:
return {'name': config_file, 'result': False, 'message': six.text_type(exc)}

View File

@ -470,14 +470,13 @@ def get(zpool, prop=None, show_source=False, parsable=True):
'''
ret = OrderedDict()
value_properties = ['property', 'value', 'source']
value_properties = ['name', 'property', 'value', 'source']
## collect get output
res = __salt__['cmd.run_all'](
__utils__['zfs.zpool_command'](
command='get',
flags=['-H'],
opts={'-o': ','.join(value_properties)},
property_name=prop if prop else 'all',
target=zpool,
),
@ -503,6 +502,9 @@ def get(zpool, prop=None, show_source=False, parsable=True):
[x for x in line.strip().split('\t') if x not in ['']],
)))
# NOTE: older zfs does not have -o, fall back to manually stipping the name field
del prop_data['name']
# NOTE: normalize values
if parsable:
# NOTE: raw numbers and pythonic types
@ -634,6 +636,8 @@ def scrub(zpool, stop=False, pause=False):
.. note::
Pause is only available on recent versions of ZFS.
If both ``pause`` and ``stop`` are ``True``, then ``stop`` will
win.
@ -706,6 +710,9 @@ def create(zpool, *vdevs, **kwargs):
.. versionadded:: 2018.3.0
.. warning:
This is only available on illumos and Solaris
CLI Examples:
.. code-block:: bash

View File

@ -470,28 +470,37 @@ def info_installed(*names, **kwargs):
Valid attributes are:
ignore, report
:param all_versions:
Include information for all versions of the packages installed on the minion.
CLI example:
.. code-block:: bash
salt '*' pkg.info_installed <package1>
salt '*' pkg.info_installed <package1> <package2> <package3> ...
salt '*' pkg.info_installed <package1> attr=version,vendor
salt '*' pkg.info_installed <package1> <package2> <package3> all_versions=True
salt '*' pkg.info_installed <package1> attr=version,vendor all_versions=True
salt '*' pkg.info_installed <package1> <package2> <package3> ... attr=version,vendor
salt '*' pkg.info_installed <package1> <package2> <package3> ... attr=version,vendor errors=ignore
salt '*' pkg.info_installed <package1> <package2> <package3> ... attr=version,vendor errors=report
'''
all_versions = kwargs.get('all_versions', False)
ret = dict()
for pkg_name, pkg_nfo in __salt__['lowpkg.info'](*names, **kwargs).items():
t_nfo = dict()
# Translate dpkg-specific keys to a common structure
for key, value in six.iteritems(pkg_nfo):
if key == 'source_rpm':
t_nfo['source'] = value
for pkg_name, pkgs_nfo in __salt__['lowpkg.info'](*names, **kwargs).items():
pkg_nfo = pkgs_nfo if all_versions else [pkgs_nfo]
for _nfo in pkg_nfo:
t_nfo = dict()
# Translate dpkg-specific keys to a common structure
for key, value in six.iteritems(_nfo):
if key == 'source_rpm':
t_nfo['source'] = value
else:
t_nfo[key] = value
if not all_versions:
ret[pkg_name] = t_nfo
else:
t_nfo[key] = value
ret[pkg_name] = t_nfo
ret.setdefault(pkg_name, []).append(t_nfo)
return ret
@ -1494,7 +1503,14 @@ def _uninstall(name=None, pkgs=None):
raise CommandExecutionError(exc)
old = list_pkgs()
targets = [target for target in pkg_params if target in old]
targets = []
for target in pkg_params:
# Check if package version set to be removed is actually installed:
# old[target] contains a comma-separated list of installed versions
if target in old and pkg_params[target] in old[target].split(','):
targets.append(target + "-" + pkg_params[target])
elif target in old and not pkg_params[target]:
targets.append(target)
if not targets:
return {}
@ -1517,6 +1533,32 @@ def _uninstall(name=None, pkgs=None):
return ret
def normalize_name(name):
'''
Strips the architecture from the specified package name, if necessary.
Circumstances where this would be done include:
* If the arch is 32 bit and the package name ends in a 32-bit arch.
* If the arch matches the OS arch, or is ``noarch``.
CLI Example:
.. code-block:: bash
salt '*' pkg.normalize_name zsh.x86_64
'''
try:
arch = name.rsplit('.', 1)[-1]
if arch not in salt.utils.pkg.rpm.ARCHES + ('noarch',):
return name
except ValueError:
return name
if arch in (__grains__['osarch'], 'noarch') \
or salt.utils.pkg.rpm.check_32(arch, osarch=__grains__['osarch']):
return name[:-(len(arch) + 1)]
return name
def remove(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument
'''
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0

View File

@ -6,8 +6,10 @@ for managing outputters.
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import errno
import logging
import io
import os
import re
import sys
@ -168,7 +170,7 @@ def get_printout(out, opts=None, **kwargs):
'''
try:
fileno = sys.stdout.fileno()
except AttributeError:
except (AttributeError, io.UnsupportedOperation):
fileno = -1 # sys.stdout is StringIO or fake
return not os.isatty(fileno)

View File

@ -9,8 +9,8 @@ from __future__ import absolute_import, print_function, unicode_literals
# Import salt libs
import salt.output
from salt.utils.locales import sdecode
import salt.utils.color
import salt.utils.data
def output(data, **kwargs): # pylint: disable=unused-argument
@ -89,7 +89,7 @@ def output(data, **kwargs): # pylint: disable=unused-argument
for status in sorted(data):
ret += u'{0}\n'.format(trans[status])
for key in sorted(data[status]):
key = sdecode(key)
key = salt.utils.data.decode(key)
skey = salt.output.strip_esc_sequence(key) if strip_colors else key
if isinstance(data[status], list):
ret += u'{0}{1}{2}{3}\n'.format(

Some files were not shown because too many files have changed in this diff Show More