Merge branch 'develop' into small-fixes

Conflicts:
	salt/minion.py
	salt/modules/freebsdpkg.py
	salt/payload.py
This commit is contained in:
Bruno Clermont 2012-12-05 14:25:28 +08:00
commit 51327d3f74
129 changed files with 5147 additions and 3523 deletions

View File

@ -16,10 +16,6 @@
# The tcp port used by the publisher
#publish_port: 4505
# Refresh the publisher connections when sending out commands, this is a fix
# for zeromq losing some minion connections. Default: False
#pub_refresh: False
# The user to run the salt-master as. Salt will update all permissions to
# allow the specified user to run the master. If the modified files cause
# conflicts set verify_env to False.
@ -318,6 +314,21 @@
##### Logging settings #####
##########################################
# The location of the master log file
# This can be a path for the log file, or, this can be, since 0.10.6, a system
# logger address, for example:
# tcp://localhost:514/LOG_USER
# tcp://localhost/LOG_DAEMON
# udp://localhost:5145/LOG_KERN
# udp://localhost
# file:///dev/log
# file:///dev/log/LOG_SYSLOG
# file:///dev/log/LOG_DAEMON
#
# The above examples are self explanatory, but:
# <file|udp|tcp>://<host|socketpath>:<port-if-required>/<log-facility>
#
# Make sure you have a properly configured syslog or you won't get any warnings
#
#log_file: /var/log/salt/master
#key_logfile: /var/log/salt/key
#

View File

@ -53,12 +53,6 @@
# cabinet: 13
# cab_u: 14-15
# If the connection to the server is interrupted, the minion will
# attempt to reconnect. sub_timeout allows you to control the rate
# of reconnection attempts (in seconds). To disable reconnects, set
# this value to 0.
#sub_timeout: 60
# Where cache data goes
#cachedir: /var/cache/salt/minion
@ -286,9 +280,25 @@
###### Logging settings #####
###########################################
# The location of the minion log file
# The location of the minion log file.
# This can be a path for the log file, or, this can be, since 0.10.6, a system
# logger address, for example:
# tcp://localhost:514/LOG_USER
# tcp://localhost/LOG_DAEMON
# udp://localhost:5145/LOG_KERN
# udp://localhost
# file:///dev/log
# file:///dev/log/LOG_SYSLOG
# file:///dev/log/LOG_DAEMON
#
# The above examples are self explanatory, but:
# <file|udp|tcp>://<host|socketpath>:<port-if-required>/<log-facility>
#
# Make sure you have a properly configured syslog or you won't get any warnings
#
#log_file: /var/log/salt/minion
#
#
# The level of messages to send to the console.
# One of 'garbage', 'trace', 'debug', info', 'warning', 'error', 'critical'.
# Default: 'warning'

94
debian/changelog vendored
View File

@ -1,3 +1,20 @@
salt (0.10.5-1) unstable; urgency=low
* add bash_completion and ufw support files
* use shared package support files in pkg/
-- Sean Channel <pentabular@gmail.com> Mon, 3 Dec 2012 11:14:35 -0700
salt (0.10.5-1) unstable; urgency=low
* [f735ab9] Filter pyc files
* [62462dd] New upstream version 0.10.5 (Closes: #690481)
* [9726d8b] Install empty include dirs for salt-master and salt-minion.
* [8705c6e] Build-depend on msgpack-python
* [6832d36] Update config file templates
-- Ulrich Dangel <uli@debian.org> Mon, 19 Nov 2012 08:34:09 +0000
salt (0.10.5) unstable; urgency=low
* new release
@ -20,6 +37,17 @@ salt (0.10.4-2) unstable; urgency=low
-- Sean Channel <pentabular@gmail.com> Wed, 14 Nov 2012 10:56:16 -0700
salt (0.10.4-1) unstable; urgency=low
* [5431ef2] Imported Upstream version 0.10.4
* [bcd48a0] Remove patch 'add_hacking_rst', applied upstream.
* [3135d52] Fix salt-master restart (Patch by martin f. krafft)
(Closes: #692064)
* [15abbbb] Recommend lsb-release. With new upstream code this
provides lsb* grains. (Closes: #690700)
-- Christian Hofstaedtler <christian@hofstaedtler.name> Sat, 10 Nov 2012 17:57:52 +0100
salt (0.10.4-1precise1) precise; urgency=low
* New upstream version
@ -32,12 +60,76 @@ salt (0.10.3) precise; urgency=low
-- Tom Vaughan <thomas.david.vaughan@gmail.com> Sun, 30 Aug 2012 13:34:10 -0700
salt (0.10.2-1~experimental+1) experimental; urgency=low
* [cf57587] Import upstream version 0.10.2
* [1ff7a9f] Add patch to create HACKING.rst
-- Ulrich Dangel <uli@debian.org> Sat, 04 Aug 2012 02:57:52 +0200
salt (0.10.2) precise; urgency=low
* Non-maintainer upload.
* New upstream version
-- Dave Rawks <drawks@mint> Wed, 01 Aug 2012 13:34:10 -0700
-- Dave Rawks <drawks@pandora.org> Wed, 01 Aug 2012 13:34:10 -0700
salt (0.10.1-3) unstable; urgency=low
* [efbd4a8] Change uploaders email address for Ulrich Dangel
* [442ead1] Recommends dmidecode instead of Depend to support non-x86 systems.
-- Ulrich Dangel <uli@debian.org> Mon, 30 Jul 2012 12:40:53 +0200
salt (0.10.1-2) unstable; urgency=low
* [bda6011] Add dmidecode to depends for salt-minion. (Closes: #680410)
* [ad4786e] Depend on the same salt version
* [671c2c3] Depend on debhelper version fixing #577040
-- Ulrich Dangel <uli@debian.org> Mon, 09 Jul 2012 23:15:27 +0200
salt (0.10.1-1) unstable; urgency=low
[ Ulrich Dangel ]
* [f1d627c] Always recreate orig.tar.gz with git-buildpackage
[ Michael Prokop ]
* Merge new upstream release
* [ee1806a] Add python-augeas to suggests of salt-minion
-- Michael Prokop <mika@debian.org> Fri, 22 Jun 2012 18:56:02 +0200
salt (0.10.0-1) unstable; urgency=low
[ Ulrich Dangel ]
* Merge new upstream release
* [bd10385] Change debian/source/format to quilt
* [ba60137] Add ignore options
* [54e70de] Copy service files from rpm package
* [1d21548] Update install files
* [c2737c9] Update pyversions file to use 2.6
* [573b27a] Update salt-common.install to install complete python
package
* [9b739f5] Update debian/rules to use python support and update
build dependencies
* [bf51e1c] Provide pydists-overrides for msgpack-python
* [4bbd2bf] Add dependency to python-pkg-resources for
salt-{minion,master,syndic}
* [ad8f712] Update config files to latest version
[ Jeroen Dekkers ]
* [9ae1aa5] Unapply patches from source
* [933c1ee] Add debian/gbp.conf
* [be9529b] Add >= 1.0 to python-sphinx build-depend
-- Michael Prokop <mika@debian.org> Wed, 20 Jun 2012 22:39:40 +0200
salt (0.9.9-1) unstable; urgency=low
* Initial release. [Closes: #643789]
-- Michael Prokop <mika@debian.org> Thu, 14 Jun 2012 18:39:17 +0200
salt (0.9.9) precise; urgency=low

5
debian/control vendored
View File

@ -2,7 +2,7 @@ Source: salt
Section: admin
Priority: optional
Maintainer: Sean Channel <pentabular@gmail.com>
Build-Depends: debhelper,
Build-Depends: debhelper (>= 7.0.50),
cython,
python | python-all | python-dev | python-all-dev,
python-crypto,
@ -30,7 +30,6 @@ Depends: ${python:Depends},
python-yaml,
python-zmq,
msgpack-python
Recommends: dmidecode, pciutils
Description: Shared libraries that salt requires for all packages
This package is a powerful remote execution manager that can be used
to administer servers in a fast and efficient way.
@ -79,6 +78,7 @@ Depends: ${misc:Depends},
python,
python-pkg-resources,
salt-common (= ${source:Version})
Recommends: dmidecode, pciutils
Description: This package represents the client package for salt
This package is a powerful remote execution manager that can be used
to administer servers in a fast and efficient way.
@ -125,6 +125,7 @@ Package: salt-doc
Architecture: all
Section: doc
Depends: ${misc:Depends}
Recommends: libjs-sphinxdoc
Description: additional documentation for salt, the distributed remote execution system
salt is a powerful remote execution manager that can be used to
administer servers in a fast and efficient way.

34
debian/copyright vendored
View File

@ -1,7 +1,7 @@
Format: http://dep.debian.net/deps/dep5
Upstream-Name: salt
Upstream-Contact: salt-users@googlegroups.com
Source: https://github.com/downloads/saltstack/salt/salt-0.10.4.tar.gz
Source: https://github.com/saltstack/salt
Files: *
Copyright: 2012 Thomas S Hatch <thatch45@gmail.com>
@ -23,20 +23,24 @@ License: Apache-2.0
`/usr/share/common-licenses/Apache-2.0'.
Files: debian/*
Copyright: 2011 Aaron Toponce <aaron.toponce@gmail.com>
License: GPL-3+
This package is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
Copyright: 2012 Michael Prokop <mika@debian.org>
2012 Christian Hofstaedtler <christian@hofstaedtler.name>
2012 Ulrich Dangel <mru@spamt.net>
2012 Corey Quinn <corey@sequestered.net>
2011 Aaron Toponce <aaron.toponce@gmail.com>
License: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
.
This package is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
http://www.apache.org/licenses/LICENSE-2.0
.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
.
On Debian systems, the complete text of the GNU General
Public License version 3 can be found in "/usr/share/common-licenses/GPL-3".
On Debian systems, the full text of the Apache Licens, Version 2.0 can be
found in the file
`/usr/share/common-licenses/Apache-2.0'.

14
debian/rules vendored
View File

@ -1,8 +1,14 @@
#!/usr/bin/make -f
PKGFILES = pkg/*.service pkg/*.upstart pkg/*.logrotate
SALT_BIN = common minion master syndic
%:
dh $@
make -C doc html
# overrides require debuilder >= 7.0.50
# http://pkg-perl.alioth.debian.org/debhelper.html#forcing_special_tests
cp $(PKGFILES) debian
for d in $(SALT_BIN); do \
cp pkg/salt.postrm debian/salt-$${d}.postrm; done
dh $@
make -C doc html
dh_override_auto_build:
python setup.py build

View File

@ -1,5 +1,4 @@
/etc/salt
/etc/salt/pki
/var/cache/salt
/var/log/salt
/var/run/salt
/etc/salt/
/etc/salt/pki/
/var/cache/salt/
/var/log/salt/

View File

@ -1 +1,3 @@
usr/lib/python2*/dist-packages/salt/
pkg/salt.ufw /etc/ufw/applications.d/
pkg/salt.bash /etc/bash_completion.d/

View File

@ -1,9 +0,0 @@
/var/log/salt/key
/var/log/salt/*.log
{
weekly
missingok
rotate 7
compress
notifempty
}

View File

@ -1,53 +0,0 @@
#!/bin/sh
# Purge config files, logs, and directories created after package install.
# Note that user-specified alternate locations for these are not affected.
clean_common() {
# remove shared job cache and other runtime directories
rm -rf \
/etc/salt \
/var/cache/salt \
/var/log/salt \
/var/run/salt \
2> /dev/null
}
clean_conf() {
# remove config and log file for master, minion, or syndic
rm -rf \
/etc/salt/"$1" \
/etc/salt/"$1".d \
/etc/salt/pki/$1 \
/var/cache/salt/$1 \
/var/log/salt/$1 \
/var/run/salt/$1 \
2> /dev/null
}
purgefiles() {
case "$pkg" in
master|minion|syndic)
clean_conf $pkg ;;
common)
clean_common ;;
*)
echo "$0 unknown package \`$1'" 1>&2
exit 1 ;;
esac
}
pkg=`echo $0 | cut -f1 -d. | cut -f2 -d-`
case "$1" in
remove)
;;
purge)
purgefiles ;;
upgrade|failed-upgrade|disappear|abort-install|abort-upgrade)
;;
*)
echo "$0 unknown action \`$1'" 1>&2
exit 1 ;;
esac
exit 0

View File

@ -1,4 +1,3 @@
/etc/salt/master.d
/etc/salt/pki/master
/var/cache/salt/master
/var/run/salt/master
/etc/salt/master.d/
/etc/salt/pki/master/
/var/cache/salt/master/

View File

@ -1,4 +1,4 @@
conf/master.template /etc/salt
conf/master /etc/salt
scripts/salt-master /usr/bin
scripts/salt-cp /usr/bin
scripts/salt-run /usr/bin

View File

@ -1,8 +0,0 @@
/var/log/salt/master
{
weekly
missingok
rotate 7
compress
notifempty
}

View File

@ -1,10 +0,0 @@
[Unit]
Description=The Salt Master Server
After=syslog.target network.target
[Service]
Type=simple
ExecStart=/usr/bin/salt-master -d
[Install]
WantedBy=multi-user.target

View File

@ -1,4 +1,3 @@
/etc/salt/minion.d
/etc/salt/pki/minion
/var/cache/salt/minion
/var/run/salt/minion
/etc/salt/minion.d/
/etc/salt/pki/minion/
/var/cache/salt/minion/

View File

@ -1,4 +1,4 @@
conf/minion.template /etc/salt
conf/minion /etc/salt
scripts/salt-minion /usr/bin
scripts/salt-call /usr/bin
debian/salt-minion.service /lib/systemd/system

View File

@ -1,8 +0,0 @@
/var/log/salt/minion
{
weekly
missingok
rotate 7
compress
notifempty
}

View File

@ -1,53 +0,0 @@
#!/bin/sh
# Purge config files, logs, and directories created after package install.
# Note that user-specified alternate locations for these are not affected.
clean_common() {
# remove shared job cache and other runtime directories
rm -rf \
/etc/salt \
/var/cache/salt \
/var/log/salt \
/var/run/salt \
2> /dev/null
}
clean_conf() {
# remove config and log file for master, minion, or syndic
rm -rf \
/etc/salt/"$1" \
/etc/salt/"$1".d \
/etc/salt/pki/$1 \
/var/cache/salt/$1 \
/var/log/salt/$1 \
/var/run/salt/$1 \
2> /dev/null
}
purgefiles() {
case "$pkg" in
master|minion|syndic)
clean_conf $pkg ;;
common)
clean_common ;;
*)
echo "$0 unknown package \`$1'" 1>&2
exit 1 ;;
esac
}
pkg=`echo $0 | cut -f1 -d. | cut -f2 -d-`
case "$1" in
remove)
;;
purge)
purgefiles ;;
upgrade|failed-upgrade|disappear|abort-install|abort-upgrade)
;;
*)
echo "$0 unknown action \`$1'" 1>&2
exit 1 ;;
esac
exit 0

View File

@ -1,10 +0,0 @@
[Unit]
Description=The Salt Minion
After=syslog.target network.target
[Service]
Type=simple
ExecStart=/usr/bin/salt-minion -d
[Install]
WantedBy=multi-user.target

View File

@ -1,53 +0,0 @@
#!/bin/sh
# Purge config files, logs, and directories created after package install.
# Note that user-specified alternate locations for these are not affected.
clean_common() {
# remove shared job cache and other runtime directories
rm -rf \
/etc/salt \
/var/cache/salt \
/var/log/salt \
/var/run/salt \
2> /dev/null
}
clean_conf() {
# remove config and log file for master, minion, or syndic
rm -rf \
/etc/salt/"$1" \
/etc/salt/"$1".d \
/etc/salt/pki/$1 \
/var/cache/salt/$1 \
/var/log/salt/$1 \
/var/run/salt/$1 \
2> /dev/null
}
purgefiles() {
case "$pkg" in
master|minion|syndic)
clean_conf $pkg ;;
common)
clean_common ;;
*)
echo "$0 unknown package \`$1'" 1>&2
exit 1 ;;
esac
}
pkg=`echo $0 | cut -f1 -d. | cut -f2 -d-`
case "$1" in
remove)
;;
purge)
purgefiles ;;
upgrade|failed-upgrade|disappear|abort-install|abort-upgrade)
;;
*)
echo "$0 unknown action \`$1'" 1>&2
exit 1 ;;
esac
exit 0

View File

@ -1,10 +0,0 @@
[Unit]
Description=The Salt Master Server
After=syslog.target network.target
[Service]
Type=simple
ExecStart=/usr/bin/salt-syndic -d
[Install]
WantedBy=multi-user.target

View File

@ -8,8 +8,8 @@ Full Table of Contents
topics/index
topics/installation/index
topics/hacking
topics/configuration
topics/hacking
topics/targeting/index
topics/tutorials/modules
topics/tutorials/starting_states
@ -48,6 +48,7 @@ Full Table of Contents
ref/runners/all/index
ref/wheel/all/index
ref/output/all/index
ref/clients/index
ref/peer
ref/clientacl
ref/syndic

14
doc/ref/clients/index.rst Normal file
View File

@ -0,0 +1,14 @@
.. _client-apis:
==========================
Salt client API interfaces
==========================
.. autoclass:: salt.client.LocalClient
:members: cmd
.. autoclass:: salt.runner.RunnerClient
:members: low
.. autoclass:: salt.wheel.Wheel
:members: master_call

View File

@ -40,6 +40,7 @@ Full list of builtin execution modules
djangomod
ebuild
event
extfs
file
freebsdjail
freebsdkmod
@ -52,15 +53,18 @@ Full list of builtin execution modules
glance
grains
groupadd
grub
grub_legacy
hg
hosts
iptables
keyboard
keystone
kmod
kvm_hyper
launchctl
ldapmod
linux_sysctl
locale
mdadm
mongodb
monit
@ -75,10 +79,13 @@ Full list of builtin execution modules
openbsdservice
osxdesktop
pacman
pam
parted
pecl
pillar
pip
pkgng
pkg_resource
pkgutil
postgres
poudriere
@ -111,6 +118,7 @@ Full list of builtin execution modules
svn
systemd
test
timezone
tls
tomcat
upstart

View File

@ -0,0 +1,6 @@
==================
salt.modules.extfs
==================
.. automodule:: salt.modules.extfs
:members:

View File

@ -0,0 +1,6 @@
========================
salt.modules.grub_legacy
========================
.. automodule:: salt.modules.grub_legacy
:members:

View File

@ -0,0 +1,6 @@
=====================
salt.modules.iptables
=====================
.. automodule:: salt.modules.iptables
:members:

View File

@ -0,0 +1,6 @@
=====================
salt.modules.keyboard
=====================
.. automodule:: salt.modules.keyboard
:members:

View File

@ -0,0 +1,6 @@
===================
salt.modules.locale
===================
.. automodule:: salt.modules.locale
:members:

View File

@ -0,0 +1,6 @@
================
salt.modules.pam
================
.. automodule:: salt.modules.pam
:members:

View File

@ -0,0 +1,6 @@
===================
salt.modules.parted
===================
.. automodule:: salt.modules.parted
:members:

View File

@ -0,0 +1,6 @@
=========================
salt.modules.pkg_resource
=========================
.. automodule:: salt.modules.pkg_resource
:members:

View File

@ -0,0 +1,6 @@
=====================
salt.modules.timezone
=====================
.. automodule:: salt.modules.timezone
:members:

View File

@ -196,12 +196,36 @@ to the calling terminal.
.. _`Python docstring`: #term-docstring
Add Module meta data
--------------------
Add information about the module using the following field lists:
.. code-block:: text
:maintainer: Thomas Hatch <thatch@saltstack.com, Seth House <shouse@saltstack.com>
:maturity: new
:depends: python-mysqldb
:platform: all
The maintaner field is a comma-delimited list of developers who help maintain
this module.
The maturity field indicates the level of quality and testing for this module.
Standard labels will be determined.
The depends field is a comma-delimited list of modules that this module depends
on.
The platform field is a comma-delimited list of platforms that this modules is
known to run on.
How Functions are Read
======================
In Salt, Python callable objects contained within a module are made available to
the Salt minion for use. The only exception to this rule is a callable object
with a name starting with an underscore ``_``.
In Salt, Python callable objects contained within a module are made available
to the Salt minion for use. The only exception to this rule is a callable
object with a name starting with an underscore ``_``.
Objects Loaded Into the Salt Minion
-----------------------------------

View File

@ -2,5 +2,244 @@
salt.renderers.stateconf
========================
.. automodule:: salt.renderers.stateconf
:members:
This module provides a custom renderer that process a salt file with a
specified templating engine(eg, jinja) and a chosen data renderer(eg, yaml),
extract arguments for any ``stateconf.set`` state and provide the extracted
arguments (including salt specific args, such as 'require', etc) as template
context. The goal is to make writing reusable/configurable/ parameterized
salt files easier and cleaner.
To use this renderer, either set it as the default renderer via the
``renderer`` option in master/minion's config, or use the shebang line in each
individual sls file, like so: ``#!stateconf``. Note, due to the way this
renderer works, it must be specified as the first renderer in a render
pipeline. That is, you cannot specify ``#!mako|yaml|stateconf``, for example.
Instead, you specify them as renderer arguments: ``#!stateconf mako . yaml``.
Here's a list of features enabled by this renderer:
- Recognizes the special state function, ``stateconf.set``, that configures a
default list of named arguments useable within the template context of
the salt file. Example::
sls_params:
stateconf.set:
- name1: value1
- name2: value2
- name3:
- value1
- value2
- value3
- require_in:
- cmd: output
# --- end of state config ---
output:
cmd.run:
- name: |
echo 'name1={{sls_params.name1}}
name2={{sls_params.name2}}
name3[1]={{sls_params.name3[1]}}
'
This even works with ``include`` + ``extend`` so that you can override
the default configured arguments by including the salt file and then
``extend`` the ``stateconf.set`` states that come from the included salt
file.
Notice that the end of configuration marker(``# --- end of state config --``)
is needed to separate the use of 'stateconf.set' form the rest of your salt
file. The regex that matches such marker can be configured via the
``stateconf_end_marker`` option in your master or minion config file.
Sometimes, you'd like to set a default argument value that's based on
earlier arguments in the same ``stateconf.set``. For example, you may be
tempted to do something like this::
apache:
stateconf.set:
- host: localhost
- port: 1234
- url: 'http://{{host}}:{{port}}/'
# --- end of state config ---
test:
cmd.run:
- name: echo '{{apache.url}}'
- cwd: /
However, this won't work, but can be worked around like so::
apache:
stateconf.set:
- host: localhost
- port: 1234
{# - url: 'http://{{host}}:{{port}}/' #}
# --- end of state config ---
# {{ apache.setdefault('url', "http://%(host)s:%(port)s/" % apache) }}
test:
cmd.run:
- name: echo '{{apache.url}}'
- cwd: /
- Adds support for relative include and exclude of .sls files. Example::
include:
- .apache
- .db.mysql
exclude:
- sls: .users
If the above is written in a salt file at `salt://some/where.sls` then
it will include `salt://some/apache.sls` and `salt://some/db/mysql.sls`,
and exclude `salt://some/users.ssl`. Actually, it does that by rewriting
the above ``include`` and ``exclude`` into::
include:
- some.apache
- some.db.mysql
exclude:
- sls: some.users
- Adds a ``sls_dir`` context variable that expands to the directory containing
the rendering salt file. So, you can write ``salt://${sls_dir}/...`` to
reference templates files used by your salt file.
- Prefixes any state id(declaration or reference) that starts with a dot(``.``)
to avoid duplicated state ids when the salt file is included by other salt
files.
For example, in the `salt://some/file.sls`, a state id such as ``.sls_params``
will be turned into ``some.file::sls_params``. Example::
.vim:
package.installed
Above will be translated into::
some.file::vim:
package.installed:
- name: vim
Notice how that if a state under a dot-prefixed state id has no ``name``
argument then one will be added automatically by using the state id with
the leading dot stripped off.
The leading dot trick can be used with extending state ids as well,
so you can include relatively and extend relatively. For example, when
extending a state in `salt://some/other_file.sls`, eg,::
include:
- .file
extend:
.file::sls_params:
stateconf.set:
- name1: something
Above will be pre-processed into::
include:
- some.file
extend:
some.file::sls_params:
stateconf.set:
- name1: something
- Optionally(enabled by default, *disable* via the `-G` renderer option,
eg, in the shebang line: ``#!stateconf -G``), generates a
``stateconf.set`` goal state(state id named as ``.goal`` by default,
configurable via the master/minion config option, ``stateconf_goal_state``)
that requires all other states in the salt file. Note, the ``.goal``
state id is subject to dot-prefix rename rule mentioned earlier.
Such goal state is intended to be required by some state in an including
salt file. For example, in your webapp salt file, if you include a
sls file that is supposed to setup Tomcat, you might want to make sure that
all states in the Tomcat sls file will be executed before some state in
the webapp sls file.
- Optionally(enable via the `-o` renderer option, eg, in the shebang line:
``#!stateconf -o``), orders the states in a sls file by adding a
``require`` requisite to each state such that every state requires the
state defined just before it. The order of the states here is the order
they are defined in the sls file.(Note: this feature is only available
if your minions are using Python >= 2.7. For Python2.6, it should also
work if you install the `ordereddict` module from PyPI)
By enabling this feature, you are basically agreeing to author your sls
files in a way that gives up the explicit(or implicit?) ordering imposed
by the use of ``require``, ``watch``, ``require_in`` or ``watch_in``
requisites, and instead, you rely on the order of states you define in
the sls files. This may or may not be a better way for you. However, if
there are many states defined in a sls file, then it tends to be easier
to see the order they will be executed with this feature.
You are still allowed to use all the requisites, with a few restricitons.
You cannot ``require`` or ``watch`` a state defined *after* the current
state. Similarly, in a state, you cannot ``require_in`` or ``watch_in``
a state defined *before* it. Breaking any of the two restrictions above
will result in a state loop. The renderer will check for such incorrect
uses if this feature is enabled.
Additionally, ``names`` declarations cannot be used with this feature
because the way they are compiled into low states make it impossible to
guarantee the order in which they will be executed. This is also checked
by the renderer. As a workaround for not being able to use ``names``,
you can achieve the same effect, by generate your states with the
template engine available within your sls file.
Finally, with the use of this feature, it becomes possible to easily make
an included sls file execute all its states *after* some state(say, with
id ``X``) in the including sls file. All you have to do is to make state,
``X``, ``require_in`` the first state defined in the included sls file.
When writing sls files with this renderer, you should avoid using what can be
defined in a ``name`` argument of a state as the state's id. That is, avoid
writing your states like this::
/path/to/some/file:
file.managed:
- source: salt://some/file
cp /path/to/some/file file2:
cmd.run:
- cwd: /
- require:
- file: /path/to/some/file
Instead, you should define the state id and the ``name`` argument separately
for each state, and the id should be something meaningful and easy to reference
within a requisite(which I think is a good habit anyway, and such extra
indirection would also makes your sls file easier to modify later). Thus, the
above states should be written like this::
add-some-file:
file.managed:
- name: /path/to/some/file
- source: salt://some/file
copy-files:
cmd.run:
- name: cp /path/to/some/file file2
- cwd: /
- require:
- file: add-some-file
Moreover, when referencing a state from a requisite, you should reference the
state's id plus the state name rather than the state name plus its ``name``
argument. (Yes, in the above example, you can actually ``require`` the
``file: /path/to/some/file``, instead of the ``file: add-some-file``). The
reason is that this renderer will re-write or rename state id's and their
references for state id's prefixed with ``.``. So, if you reference ``name``
then there's no way to reliably rewrite such reference.

View File

@ -65,6 +65,22 @@ Other renderer combinations are possible, here's a few examples:
This one allows you to use both jinja and mako templating syntax in the
input and then parse the final rendererd output as YAML.
And here's a contrived example sls file using the ``jinja | mako | yaml`` renderer:
.. code-block:: python
#!jinja|mako|yaml
An_Example:
cmd.run:
- name: |
echo "Using Salt ${grains['saltversion']}" \
"from path {{grains['saltpath']}}."
- cwd: /
<%doc> ${...} is Mako's notation, and so is this comment. </%doc>
{# Similarly, {{...}} is Jinja's notation, and so is this comment. #}
For backward compatibility, ``jinja | yaml`` can also be written as
``yaml_jinja``, and similarly, the ``yaml_mako``, ``yaml_wempy``,
``json_jinja``, ``json_mako``, and ``json_wempy`` renderers are all supported

View File

@ -14,3 +14,4 @@ Full list of builtin returner modules
cassandra_return
mongo_return
redis_return
mysql

View File

@ -0,0 +1,6 @@
====================
salt.returners.mysql
====================
.. automodule:: salt.returners.mysql
:members:

View File

@ -145,14 +145,23 @@ Watch and the mod_watch Function
The watch requisite is based on the ``mod_watch`` function. Python state
modules can include a function called ``mod_watch`` which is then called
if the watch call is invoked. In the case of the service module the underlying
service is restarted. In the case of the cmd state the command is executed.
if the watch call is invoked. When ``mod_watch`` is called depends on the
execution of the watched state, which:
- If no changes then just run the watching state itself as usual.
``mod_watch`` is not called. This behavior is same as using a ``require``.
- If changes then run the watching state *AND* if that changes nothing then
react by calling ``mod_watch``.
When reacting, in the case of the service module the underlying service is
restarted. In the case of the cmd state the command is executed.
The ``mod_watch`` function for the service state looks like this:
.. code-block:: python
def mod_watch(name, sig=None):
def mod_watch(name, sig=None, reload=False, full_restart=False):
'''
The service watcher, called to invoke the watch command.
@ -163,22 +172,48 @@ The ``mod_watch`` function for the service state looks like this:
The string to search for when looking for the service process with ps
'''
if __salt__['service.status'](name, sig):
changes = {name: __salt__['service.restart'](name)}
return {'name': name,
'changes': changes,
'result': True,
'comment': 'Service restarted'}
if 'service.reload' in __salt__ and reload:
restart_func = __salt__['service.reload']
elif 'service.full_restart' in __salt__ and full_restart:
restart_func = __salt__['service.full_restart']
else:
restart_func = __salt__['service.restart']
else:
restart_func = __salt__['service.start']
result = restart_func(name)
return {'name': name,
'changes': {},
'result': True,
'comment': 'Service {0} started'.format(name)}
'changes': {name: result},
'result': result,
'comment': 'Service restarted' if result else \
'Failed to restart the service'
}
The watch requisite only works if the state that is watching has a
``mod_watch`` function written. If watch is set on a state that does not have
a ``mod_watch`` function (like pkg), then the listed states will behave only
as if they were under a ``require`` statement.
Also notice that a ``mod_watch`` may accept additional keyword arguments,
which, in the sls file, will be taken from the same set of arguments specified
for the state that includes the ``watch`` requisite. This means, for the
earlier ``service.running`` example above, you can tell the service to
``reload`` instead of restart like this:
.. code-block:: yaml
redis:
# ... other state declarations omitted ...
service.running:
- enable: True
- reload: True
- watch:
- file: /etc/redis.conf
- pkg: redis
The Order Option
================

View File

@ -10,23 +10,53 @@ configuration file.
.. glossary::
master
The Salt master is the central server that all minions connect to. You
run commands on the minions through the master and minions send data
The Salt master is the central server that all minions connect to.
Commands are run on the minions through the master, and minions send data
back to the master (unless otherwise redirected with a :doc:`returner
</ref/returners/index>`). It is started with the
:command:`salt-master` program.
minion
Salt minions are the potentially hundreds or thousands of servers that
you query and control from the master.
may be queried and controlled from the master.
The configuration files will be installed to :file:`/etc/salt` and are named
after the respective components, :file:`/etc/salt/master` and
:file:`/etc/salt/minion`.
To make a minion check into the correct master simply edit the
:conf_minion:`master` variable in the minion configuration file to reference
the master DNS name or IPv4 address.
Master Configuration
====================
By default the Salt master listens on ports 4505 and 4506 on all
interfaces (0.0.0.0). To bind Salt to a specific IP, redefine the
"interface" directive in the master configuration file, typically
``/etc/salt/master``, as follows:
.. code-block:: diff
- #interface: 0.0.0.0
+ interface: 10.0.0.1
After updating the configuration file, restart the Salt master.
Minion Configuration
====================
Although there are many Salt Minion configuration options, configuring
a Salt Minion is very simple. By default a Salt Minion will
try to connect to the DNS name "salt"; if the Minion is able to
resolve that name correctly, no configuration is needed.
If the DNS name "salt" does not resolve to point to the correct
location of the Master, redefine the "master" directive in the minion
configuration file, typically ``/etc/salt/minion``, as follows:
.. code-block:: diff
- #master: salt
+ master: 10.0.0.1
After updating the configuration file, restart the Salt minion.
Running Salt
============
@ -58,24 +88,74 @@ Running Salt
There is also a full :doc:`troubleshooting guide</topics/troubleshooting/index>`
available.
Manage Salt Public Keys
=======================
Key Management
==============
Salt manages authentication with RSA public keys. The keys are managed on the
:term:`master` via the :command:`salt-key` command. Once a :term:`minion`
checks into the master the master will save a copy of the minion key. Before
the master can send commands to the minion the key needs to be "accepted".
Salt uses AES encryption for all communication between the Master and
the Minion. This ensures that the commands sent to the Minions cannot
be tampered with, and that communication between Master and Minion is
authenticated through trusted, accepted keys.
1. List the accepted and unaccepted Salt keys::
Before commands can be sent to a Minion, its key must be accepted on
the Master. Run the ``salt-key`` command to list the keys known to
the Salt Master:
salt-key -L
.. code-block:: bash
2. Accept a minion key::
[root@master ~]# salt-key -L
Unaccepted Keys:
alpha
bravo
charlie
delta
Accepted Keys:
salt-key -a <minion id>
This example shows that the Salt Master is aware of four Minions, but none of
the keys has been accepted. To accept the keys and allow the Minions to be
controlled by the Master, again use the ``salt-key`` command:
or accept all unaccepted minion keys::
.. code-block:: bash
salt-key -A
[root@master ~]# salt-key -A
[root@master ~]# salt-key -L
Unaccepted Keys:
Accepted Keys:
alpha
bravo
charlie
delta
The ``salt-key`` command allows for signing keys individually or in bulk. The
example above, using ``-A`` bulk-accepts all pending keys. To accept keys
individually use the lowercase of the same option, ``-a keyname``.
.. seealso:: :doc:`salt-key manpage </ref/cli/salt-key>`
Sending Commands
================
Communication between the Master and a Minion may be verified by running
the ``test.ping`` remote command. ::
[root@master ~]# salt 'alpha' test.ping
{'alpha': True}
Communication between the Master and all Minions may be tested in a
similar way. ::
[root@master ~]# salt '*' test.ping
{'alpha': True}
{'bravo': True}
{'charlie': True}
{'delta': True}
Each of the Minions should send a "True" response as shown above.
What's Next?
============
Depending on the primary way you want to manage your machines you may
either want to visit the section regarding Salt States, or the section
on Modules.

View File

@ -4,11 +4,20 @@ Introduction to Salt
.. rubric:: Were not just talking about NaCl.
Distributed remote execution
============================
The 30 second summary
=====================
Salt is a distributed remote execution system used to execute commands and
query data. It was developed in order to bring the best solutions found in the
Salt is:
* a configuration management system, capable of maintaining remote nodes
in defined states (for example, ensuring that specific packages are installed and
specific services are running)
* a distributed remote execution system used to execute commands and
query data on remote nodes, either individually or by arbitrary
selection criteria
It was developed in order to bring the best solutions found in the
world of remote execution together and make them better, faster, and more
malleable. Salt accomplishes this through its ability to handle large loads of
information, and not just dozens but hundreds and even thousands of individual
@ -17,7 +26,7 @@ servers quickly through a simple and manageable interface.
Simplicity
==========
Versatility between massive scale deployments and smaller systems may seem
Providing versatility between massive scale deployments and smaller systems may seem
daunting, but Salt is very simple to set up and maintain, regardless of the
size of the project. The architecture of Salt is designed to work with any
number of servers, from a handful of local network systems to international
@ -29,23 +38,28 @@ modification, Salt can be fine tuned to meet specific needs.
Parallel execution
==================
The core function of Salt is to enable remote commands to be called in parallel
rather than in serial, to use a secure and encrypted protocol, the smallest and
fastest network payloads possible, and with a simple programming interface. Salt
also introduces more granular controls to the realm of remote execution,
allowing for commands to be executed in parallel and for systems to be targeted
based on more than just hostname, but by system properties.
The core functions of Salt:
* enable commands to remote systems to be called in parallel rather than serially
* use a secure and encrypted protocol
* use the smallest and fastest network payloads possible
* provide a simple programming interface
Salt also introduces more granular controls to the realm of remote
execution, allowing systems to be targeted not just by hostname, but
also by system properties.
Building on proven technology
=============================
Salt takes advantage of a number of technologies and techniques. The networking
layer is built with the excellent `ZeroMQ`_ networking library, so Salt itself
contains a viable, and transparent, AMQ broker inside the daemon. Salt uses
public keys for authentication with the master daemon, then uses faster `AES`_
encryption for payload communication, this means that authentication and
encryption are also built into Salt. Salt takes advantage of communication via
`msgpack`_, enabling fast and light network traffic.
Salt takes advantage of a number of technologies and techniques. The
networking layer is built with the excellent `ZeroMQ`_ networking
library, so the Salt daemon includes a viable and transparent AMQ
broker. Salt uses public keys for authentication with the master
daemon, then uses faster `AES`_ encryption for payload communication;
authentication and encryption are integral to Salt. Salt takes
advantage of communication via `msgpack`_, enabling fast and light
network traffic.
.. _`ZeroMQ`: http://www.zeromq.org/
.. _`msgpack`: http://msgpack.org/
@ -55,7 +69,7 @@ Python client interface
=======================
In order to allow for simple expansion, Salt execution routines can be written
as plain Python modules and the data collected from Salt executions can be sent
as plain Python modules. The data collected from Salt executions can be sent
back to the master server, or to any arbitrary program. Salt can be called from
a simple Python API, or from the command line, so that Salt can be used to
execute one-off commands as well as operate as an integral part of a larger
@ -64,20 +78,22 @@ application.
Fast, flexible, scalable
========================
The result is a system that can execute commands across groups of varying size,
from very few to very many servers at considerably high speed. A system that is
very fast, easy to set up and amazingly malleable, able to suit the needs of
any number of servers working within the same system. Salts unique
architecture brings together the best of the remote execution world, amplifies
its capabilities and expands its range, resulting in this system that is as
versatile as it is practical, able to suit any network.
The result is a system that can execute commands at high speed on
target server groups ranging from one to very many servers. Salt is
very fast, easy to set up, amazingly malleable and provides a single
remote execution architecture that can manage the diverse
requirements of any number of servers. The Salt infrastructure
brings together the best of the remote execution world, amplifies its
capabilities and expands its range, resulting in a system that is as
versatile as it is practical, suitable for any network.
Open
====
Salt is developed under the `Apache 2.0 licence`_, and can be used for open and
proprietary projects. Please submit your expansions back to the Salt project so
that we can all benefit together as Salt grows. So, please feel free to
sprinkle some of this around your systems and let the deliciousness come forth.
Salt is developed under the `Apache 2.0 licence`_, and can be used for
open and proprietary projects. Please submit your expansions back to
the Salt project so that we can all benefit together as Salt grows.
Please feel free to sprinkle Salt around your systems and let the
deliciousness come forth.
.. _`Apache 2.0 licence`: http://www.apache.org/licenses/LICENSE-2.0.html

View File

@ -2,13 +2,6 @@
Arch Linux
==========
Salt has primarily been developed on Arch Linux, meaning it is known to
work very well on that distribution. The lead developer, Thomas S. Hatch
(thatch45) has been a TU (Trusted User) for the Arch Linux distribution,
and has written a number of Arch-specific tools in the past.
Salt, while not Arch-specific, is packaged for and works well on Arch Linux.
Installation
============
@ -18,7 +11,7 @@ currently stable and -git packages available.
Stable Release
--------------
To install Salt stable releases from the Arch Linux AUR, use the commands:
Install Salt stable releases from the Arch Linux AUR as follows:
.. code-block:: bash
@ -27,17 +20,17 @@ To install Salt stable releases from the Arch Linux AUR, use the commands:
cd salt/
makepkg -is
A few of Salt's dependencies are currently only found within the AUR, so you'll
need to download and run ``makepkg -is`` on these as well. As a reference, Salt
currently relies on the following packages only available via the AUR:
A few of Salt's dependencies are currently only found within the AUR, so it is
necessary to download and run ``makepkg -is`` on these as well. As a reference, Salt
currently relies on the following packages which are only available via the AUR:
* https://aur.archlinux.org/packages/py/python2-msgpack/python2-msgpack.tar.gz
* https://aur.archlinux.org/packages/py/python2-psutil/python2-psutil.tar.gz
.. note:: yaourt
If you chose to use a tool such as Yaourt_ the dependencies will be
gathered and built for you automatically.
If a tool such as Yaourt_ is used, the dependencies will be
gathered and built automatically.
The command to install salt using the yaourt tool is:
@ -51,8 +44,7 @@ Tracking develop
----------------
To install the bleeding edge version of Salt (**may include bugs!**),
you can use the -git package. Installing the -git package can be done
using the commands:
use the -git package. Installing the -git package as follows:
.. code-block:: bash
@ -61,76 +53,33 @@ using the commands:
cd salt-git/
makepkg -is
A few of Salt's dependencies are currently only found within the AUR, so you'll
need to download and run ``makepkg -is`` on these as well. As a reference, Salt
currently relies on the following packages only available via the AUR:
See the note above about Salt's dependencies.
* https://aur.archlinux.org/packages/py/python2-msgpack/python2-msgpack.tar.gz
* https://aur.archlinux.org/packages/py/python2-psutil/python2-psutil.tar.gz
Post-installation tasks
=======================
.. note:: yaourt
If you chose to use a tool such as Yaourt_ the dependencies will be
gathered and built for you automatically.
The command to install salt using the yaourt tool is:
.. code-block:: bash
yaourt salt-git
.. _Yaourt: https://aur.archlinux.org/packages.php?ID=5863
Configuration
=============
In the sections below I'll outline configuration options for both the Salt
Master and Salt Minions.
**Configuration files**
The Salt package installs two template configuration files,
``/etc/salt/master.template`` and ``/etc/salt/minion.template``. You'll need
to copy these .template files into place and make a few edits. First, copy
them into place as seen here:
``/etc/salt/master.template`` and ``/etc/salt/minion.template``. These
files need to be copied as follows:
.. code-block:: bash
cp /etc/salt/master.template /etc/salt/master
cp /etc/salt/minion.template /etc/salt/minion
Note: You'll only need to copy the config for the service you're going to run.
Once you've copied the config into place you'll need to make changes specific
to your setup. Below I'll outline suggested configuration changes to the
Master, after which I'll outline configuring the Minion.
Master Configuration
====================
This section outlines configuration of a Salt Master, which is used to control
other machines known as "minions" (see "Minion Configuration" for instructions
on configuring a minion). This will outline IP configuration, and a few key
configuration paths.
**Interface**
By default the Salt master listens on TCP ports 4505 and 4506 on all interfaces
(0.0.0.0). If you have a need to bind Salt to a specific IP, redefine the
"interface" directive as seen here:
.. code-block:: diff
- #interface: 0.0.0.0
+ interface: 10.0.0.1
Note: only the configuration files for the services to be run need be
copied.
**rc.conf**
You'll need to activate the Salt Master in your *rc.conf* file. Using your
favorite editor, open ``/etc/rc.conf`` and add the salt-master.
Activate the Salt Master and/or Minion in ``/etc/rc.conf`` as follows:
.. code-block:: diff
-DAEMONS=(syslog-ng network crond)
+DAEMONS=(syslog-ng network crond @salt-master)
+DAEMONS=(syslog-ng network crond @salt-master @salt-minion)
**Start the Master**
@ -142,131 +91,5 @@ seen here:
rc.d start salt-master
If your Salt Master doesn't start successfully, go back through each step and
see if anything was missed. Salt doesn't take much configuration (part of its
beauty!), and errors are usually simple mistakes.
Now go to the :doc:`Configuring Salt</topics/configuration>` page.
Minion Configuration
====================
Configuring a Salt Minion is surprisingly simple. Unless you have a real need
for customizing your minion configuration (which there are plenty of options if
you are so inclined!), there is one simple directive that needs to be updated.
That option is the location of the master.
By default a Salt Minion will try to connect to the dns name "salt". If you
have the ability to update DNS records for your domain you might create an A or
CNAME record for "salt" that points to your Salt Master. If you are able to do
this you likely can do without any minion configuration at all.
If you are not able to update DNS, you'll simply need to update one entry in
the configuration file. Using your favorite editor, open the minion
configuration file and update the "master" entry as seen here.
.. code-block:: diff
- #master: salt
+ master: 10.0.0.1
Simply update the master directive to the IP or hostname of your Salt Master.
Save your changes and you're ready to start your Salt Minion. Advanced
configuration options are covered in another chapter.
**rc.conf**
Before you're able to start the Salt Minion you'll need to update your rc.conf
file. Using your favorite editor open ``/etc/rc.conf`` and add this line:
.. code-block:: diff
-DAEMONS=(syslog-ng network crond)
+DAEMONS=(syslog-ng network crond @salt-minion)
**Start the Minion**
Once you've completed all of these steps you're ready to start your Salt
Minion. You should be able to start your Salt Minion now using the command
seen here:
.. code-block:: bash
rc.d start salt-minion
If your Salt Minion doesn't start successfully, go back through each step and
see if anything was missed. Salt doesn't take much configuration (part of its
beauty!), and errors are usually simple mistakes.
Tying It All Together
=====================
If you've successfully completed each of the steps above you should have a
running Salt Master and a running Salt Minion. The Minion should be configured
to point to the Master. To verify that there is communication flowing between
the Minion and Master we'll run a few initial ``salt`` commands. These commands
will validate the Minions RSA encryption key, and then send a test command to
the Minion to ensure that commands and responses are flowing as expected.
**Key Management**
Salt uses AES encryption for all communication between the Master and the
Minion. This ensures that the commands you send to your Minions (your cloud)
can not be tampered with, and that communication between Master and Minion is
only done through trusted, accepted keys.
Before you'll be able to do any remote execution or configuration management you'll
need to accept any pending keys on the Master. Run the ``salt-key`` command to
list the keys known to the Salt Master.
.. code-block:: bash
[root@master ~]# salt-key -L
Unaccepted Keys:
alpha
bravo
charlie
delta
Accepted Keys:
This example shows that the Salt Master is aware of four Minions, but none of
the keys have been accepted. To accept the keys and allow the Minions to be
controlled by the Master, again use the ``salt-key`` command:
.. code-block:: bash
[root@master ~]# salt-key -A
[root@master ~]# salt-key -L
Unaccepted Keys:
Accepted Keys:
alpha
bravo
charlie
delta
The ``salt-key`` command allows for signing keys individually or in bulk. The
example above, using ``-A`` bulk-accepts all pending keys. To accept keys
individually use the lowercase of the same option, ``-a keyname``.
Sending Commands
================
Everything should be set for you to begin remote management of your Minions.
Whether you have a few or a few-dozen, Salt can help you manage them easily!
For final verification, send a test function from your Salt Master to your
minions. If all of your minions are properly communicating with your Master,
you should "True" responses from each of them. See the example below to send
the ``test.ping`` remote command:
.. code-block:: bash
[root@master ~]# salt '*' test.ping
{'alpha': True}
Where Do I Go From Here
=======================
Congratulations! You've successfully configured your first Salt Minions and are
able to send remote commands. I'm sure you're eager to learn more about what
Salt can do. Depending on the primary way you want to manage your machines you
may either want to visit the section regarding Salt States, or the section on
Modules.

View File

@ -12,28 +12,29 @@ To install Salt on Wheezy or later use:
.. code-block:: bash
sudo apt-get install salt-master
sudo apt-get install salt-minion
apt-get install salt-master
apt-get install salt-minion
Squeeze
=======
Salt is available for squeeze in the Debian backports repository. For more
information how to use debian-backports see
http://backports-master.debian.org/Instructions/
Salt is available for squeeze in the Debian backports repository, and may be
installed as follows:
.. code-block:: bash
cat <<EOF | sudo tee /etc/apt/sources.list.d/backports.list
deb http://backports.debian.org/debian-backports squeeze-backports main
EOF
sudo apt-get update
sudo apt-get -t squeeze-backports install salt-master
sudo apt-get -t squeeze-backports install salt-minion
apt-get update
apt-get -t squeeze-backports install salt-master
apt-get -t squeeze-backports install salt-minion
For more information how to use debian-backports see
http://backports-master.debian.org/Instructions/
Configuration
=============
Post-installation tasks
=======================
Now go to the :doc:`Configuring Salt</topics/configuration>` page.
For more configuration have a look at the Ubuntu section :ref:`ubuntu-config`

View File

@ -11,7 +11,7 @@ makes it a great place to help improve Salt!
Salt and all dependencies have been *finally* accepted into the yum
reposities for EPEL5 and EPEL6. Currently, the latest is in epel-testing
while awaiting promotion to epel proper. You can install it via:
while awaiting promotion to epel proper, and may be installed as follows:
.. code-block:: bash
@ -32,181 +32,45 @@ repositories.
Stable Release
--------------
Salt is packaged separately for the minion and the master. You'll only need to
install the appropriate package for the role you need the machine to play. This
means you're going to want one master and a whole bunch of minions!
Salt is packaged separately for the minion and the master. It is necessary only to
install the appropriate package for the role the machine will play. Typically, there
will be one master and multiple minions.
.. code-block:: bash
yum install salt-master
yum install salt-minion
Configuration
=============
Post-installation tasks
=======================
Below, we'll cover Salt Master and Minion configuration options.
**Master**
Master Configuration
====================
This section outlines configuration of a Salt Master, which is used to control
other machines known as "minions" (see "Minion Configuration" for instructions
on configuring a minion). This will outline IP configuration, and a few key
configuration paths.
**Interface**
By default the Salt master listens on TCP ports ``4505`` and ``4506`` on all interfaces
(0.0.0.0). If you have a need to bind Salt to a specific IP, redefine the
"interface" directive as seen here:
.. code-block:: diff
- #interface: 0.0.0.0
+ interface: 10.0.0.1
**Enable the Master**
You'll also likely want to activate the Salt Master in *systemd*, configuring the
Salt Master to start automatically at boot.
To have the Master start automatically at boot time:
.. code-block:: bash
systemctl enable salt-master.service
**Start the Master**
Once you've completed all of these steps you're ready to start your Salt
Master. You should be able to start your Salt Master now using the command
seen here:
To start the Master:
.. code-block:: bash
systemctl start salt-master.service
If your Salt Master doesn't start successfully, go back through each step and
see if anything was missed. Salt doesn't take much configuration (part of its
beauty!), and errors are usually simple mistakes.
**Minion**
Minion Configuration
====================
Configuring a Salt Minion is surprisingly simple. Unless you have a real need
for customizing your minion configuration (which there are plenty of options if
you are so inclined!), there is one simple directive that needs to be updated.
That option is the location of the master.
By default a Salt Minion will try to connect to the dns name "salt". If you
have the ability to update DNS records for your domain you might create an A or
CNAME record for "salt" that points to your Salt Master. If you are able to do
this you likely can do without any minion configuration at all.
If you are not able to update DNS, you'll simply need to update one entry in
the configuration file. Using your favorite editor, open the minion
configuration file and update the "master" entry as seen here:
.. code-block:: diff
- #master: salt
+ master: 10.0.0.1
Simply update the master directive to the IP or hostname of your Salt Master.
Save your changes and you're ready to start your Salt Minion. Advanced
configuration options are covered in another chapter.
**Enable the Minion**
You'll need to configure the minion to auto-start at boot. You can toggle
that option through systemd.
To have the Minion start automatically at boot time:
.. code-block:: bash
systemctl enable salt-minion.service
**Start the Minion**
Once you've completed all of these steps, start the Minion. This command
should do the trick:
To start the Minion:
.. code-block:: bash
systemctl start salt-minion.service
If your Salt Minion doesn't start successfully, go back through each step and
see if anything was missed. Salt doesn't take much configuration (part of its
beauty!), and errors are usually simple mistakes.
Now go to the :doc:`Configuring Salt</topics/configuration>` page.
Tying It All Together
=====================
If you've successfully completed each of the steps above you should have a
running Salt Master and a running Salt Minion. The Minion should be configured
to point to the Master. To verify that there is communication flowing between
the Minion and Master we'll run a few initial ``salt`` commands. These commands
will validate the Minions RSA encryption key, and then send a test command to
the Minion to ensure that commands and responses are flowing as expected.
**Key Management**
Salt uses AES encryption for all communication between the Master and the
Minion. This ensures that the commands you send to your Minions (your cloud)
can not be tampered with, and that communication between Master and Minion is
only done through trusted, accepted keys.
Before you'll be able to do any remote execution or configuration management
you'll need to accept any pending keys on the Master. Run the ``salt-key``
command to list the keys known to the Salt Master:
.. code-block:: bash
[root@master ~]# salt-key -L
Unaccepted Keys:
alpha
bravo
charlie
delta
Accepted Keys:
This example shows that the Salt Master is aware of four Minions, but none of
the keys have been accepted. To accept the keys and allow the Minions to be
controlled by the Master, again use the ``salt-key`` command:
.. code-block:: bash
[root@master ~]# salt-key -A
[root@master ~]# salt-key -L
Unaccepted Keys:
Accepted Keys:
alpha
bravo
charlie
delta
The ``salt-key`` command allows for signing keys individually or in bulk. The
example above, using ``-A`` bulk-accepts all pending keys. To accept keys
individually use the lowercase of the same option, ``-a keyname``.
Sending Commands
================
Everything should be set for you to begin remote management of your Minions.
Whether you have a few or a few-dozen, Salt can help you manage them easily!
For final verification, send a test function from your Salt Master to your
minions. If all of your minions are properly communicating with your Master,
you should "True" responses from each of them. See the example below to send
the ``test.ping`` remote command:
.. code-block:: bash
[root@master ~]# salt '*' test.ping
{'alpha': True}
Where Do I Go From Here
=======================
Congratulations! You've successfully configured your first Salt Minions and are
able to send remote commands. I'm sure you're eager to learn more about what
Salt can do. Depending on the primary way you want to manage your machines you
may either want to visit the section regarding Salt States, or the section on
Modules.

View File

@ -25,57 +25,24 @@ To install Salt from the FreeBSD ports tree, use the command:
cd /usr/ports/sysutils/salt && make install clean
Once the port is installed you'll need to make a few configuration changes.
Once the port is installed, it is necessary to make a few configuration changes.
These include defining the IP to bind to (optional), and some configuration
path changes to make salt fit more natively into the FreeBSD filesystem tree.
Configuration
=============
Post-installation tasks
=======================
In the sections below I'll outline configuration options for both the Salt
Master and Salt Minions.
**Master**
The Salt port installs two sample configuration files, ``salt/master.sample``
and ``salt/minion.sample`` (these should be installed in ``/usr/local/etc/``,
unless you use a different ``%%PREFIX%%``). You'll need to copy these
.sample files into place and make a few edits. First, copy them into place
as seen here:
Copy the sample configuration file:
.. code-block:: bash
cp /usr/local/etc/salt/master.sample /usr/local/etc/salt/master
cp /usr/local/etc/salt/minion.sample /usr/local/etc/salt/minion
Note: You'll only need to copy the config for the service you're going to run.
Once you've copied the config into place you'll need to make changes specific
to your setup. Below I'll outline suggested configuration changes to the
Master, after which I'll outline configuring the Minion.
Master Configuration
====================
This section outlines configuration of a Salt Master, which is used to control
other machines known as "minions" (see "Minion Configuration" for instructions
on configuring a minion). This will outline IP configuration, and a few key
configuration paths.
**Interface**
By default the Salt master listens on ports 4505 and 4506 on all interfaces
(0.0.0.0). If you have a need to bind Salt to a specific IP, redefine the
"interface" directive as seen here.
.. code-block:: diff
- #interface: 0.0.0.0
+ interface: 10.0.0.1
**rc.conf**
Last but not least you'll need to activate the Salt Master in your rc.conf
file. Using your favorite editor, open ``/etc/rc.conf`` or
``/etc/rc.conf.local`` and add this line.
Activate the Salt Master in ``/etc/rc.conf`` or ``/etc/rc.conf.local`` and add:
.. code-block:: diff
@ -83,138 +50,36 @@ file. Using your favorite editor, open ``/etc/rc.conf`` or
**Start the Master**
Once you've completed all of these steps you're ready to start your Salt
Master. The Salt port installs an rc script which should be used to manage your
Salt Master. You should be able to start your Salt Master now using the command
seen here:
Start the Salt Master as follows:
.. code-block:: bash
service salt_master start
If your Salt Master doesn't start successfully, go back through each step and
see if anything was missed. Salt doesn't take much configuration (part of its
beauty!), and errors are usually simple mistakes.
**Minion**
Minion Configuration
====================
Copy the sample configuration file:
Configuring a Salt Minion is surprisingly simple. Unless you have a real need
for customizing your minion configuration (which there are plenty of options if
you are so inclined!), there is one simple directive that needs to be updated.
That option is the location of the master.
.. code-block:: bash
By default a Salt Minion will try to connect to the dns name "salt". If you
have the ability to update DNS records for your domain you might create an A or
CNAME record for "salt" that points to your Salt Master. If you are able to do
this you likely can do without any minion configuration at all.
If you are not able to update DNS, you'll simply need to update one entry in
the configuration file. Using your favorite editor, open the minion
configuration file and update the "master" entry as seen here.
.. code-block:: diff
- #master: salt
+ master: 10.0.0.1
Simply update the master directive to the IP or hostname of your Salt Master.
Save your changes and you're ready to start your Salt Minion. Advanced
configuration options are covered in another chapter.
cp /usr/local/etc/salt/minion.sample /usr/local/etc/salt/minion
**rc.conf**
Before you're able to start the Salt Minion you'll need to update your rc.conf
file. Using your favorite editor open ``/etc/rc.conf`` or
``/etc/rc.conf.local`` and add this line.
Activate the Salt Minion in ``/etc/rc.conf`` or ``/etc/rc.conf.local`` and add:
.. code-block:: diff
+ salt_minion_enable="YES"
Once you've completed all of these steps you're ready to start your Salt
Minion. The Salt port installs an *rc* script which should be used to manage your
Salt Minion. You should be able to start your Salt Minion now using the command
seen here.
**Start the Minion**
Start the Salt Minion as follows:
.. code-block:: bash
service salt_minion start
If your Salt Minion doesn't start successfully, go back through each step and
see if anything was missed. Salt doesn't take much configuration (part of its
beauty!), and errors are usually simple mistakes.
Now go to the :doc:`Configuring Salt</topics/configuration>` page.
Tying It All Together
=====================
If you've successfully completed each of the steps above you should have a
running Salt Master and a running Salt Minion. The Minion should be configured
to point to the Master. To verify that there is communication flowing between
the Minion and Master we'll run a few initial ``salt`` commands. These commands
will validate the Minions RSA encryption key, and then send a test command to
the Minion to ensure that commands and responses are flowing as expected.
**Key Management**
Salt uses AES encryption for all communication between the Master and the
Minion. This ensures that the commands you send to your Minions (your cloud)
can not be tampered with, and that communication between Master and Minion is
only done through trusted, accepted keys.
Before you'll be able to do any remote execution or state management you'll
need to accept any pending keys on the Master. Run the ``salt-key`` command to
list the keys known to the Salt Master:
.. code-block:: bash
[root@master ~]# salt-key -L
Unaccepted Keys:
alpha
bravo
charlie
delta
Accepted Keys:
This example shows that the Salt Master is aware of four Minions, but none of
the keys have been accepted. To accept the keys and allow the Minions to be
controlled by the Master, again use the ``salt-key`` command:
.. code-block:: bash
[root@master ~]# salt-key -A
[root@master ~]# salt-key -L
Unaccepted Keys:
Accepted Keys:
alpha
bravo
charlie
delta
The ``salt-key`` command allows for signing keys individually or in bulk. The
example above, using ``-A`` bulk-accepts all pending keys. To accept keys
individually use the lowercase of the same option, ``-a keyname``.
Sending Commands
================
Everything should be set for you to begin remote management of your Minions.
Whether you have a few or a few-dozen, Salt can help you manage them easily!
For final verification, send a test function from your Salt Master to your
minions. If all of your minions are properly communicating with your Master,
you should "True" responses from each of them. See the example below to send
the ``test.ping`` remote command. ::
[root@master ~]# salt 'alpha' test.ping
{'alpha': True}
Where Do I Go From Here
=======================
Congratulations! You've successfully configured your first Salt Minions and are
able to send remote commands. I'm sure you're eager to learn more about what
Salt can do. Depending on the primary way you want to manage your machines you
may either want to visit the section regarding Salt States, or the section on
Modules.

View File

@ -21,4 +21,9 @@ Then download and install from source:
cd salt-<version>
python setup.py install
Post-installation tasks
=======================
Now go to the :doc:`Configuring Salt</topics/configuration>` page.
.. _GitHub downloads: https://github.com/saltstack/salt/downloads

View File

@ -5,8 +5,8 @@ Ubuntu Installation
Add repository
--------------
The latest packages for Ubuntu are published in the saltstack PPA. Add the repository
to your system, import the PPA key, and refresh the package data with the following
The latest packages for Ubuntu are published in the saltstack PPA. Add the
repository, import the PPA key, and refresh the package data with the following
commands:
.. code-block:: bash
@ -36,29 +36,8 @@ may be given at a time:
.. _ubuntu-config:
Configuration
-------------
Post-installation tasks
=======================
Debian based systems will launch the daemons right after package install, but you
may need to make changes to the configuration files in /etc/salt (see the configuration
files), such as:
- set the minion id and salt master name in /etc/salt/minion
- enable the file_roots and pillar_roots options in /etc/salt/master
- configure syndic to relay commands from another master
After making any configuration changes, re-start the affected daemons (or use 'stop' and 'start' as needed). E.g.:
.. code-block:: bash
sudo /etc/init.d/salt-minion restart
.. code-block:: bash
sudo /etc/init.d/salt-master restart
.. code-block:: bash
sudo /etc/init.d/salt-syndic stop
sudo /etc/init.d/salt-syndic start
Now go to the :doc:`Configuring Salt</topics/configuration>` page.

View File

@ -19,7 +19,7 @@ Letter Meaning Example
G Grains glob match ``G@os:Ubuntu``
E PCRE Minion id match ``E@web\d+\.(dev|qa|prod)\.loc``
P Grains PCRE match ``P@os:(RedHat|Fedora|CentOS)``
L List of minions ``L@minion1.example.com,minion3.domain.com and bl*.domain.com``
L List of minions ``L@minion1.example.com,minion3.domain.com or bl*.domain.com``
I Pillar glob match ``I@pdata:foobar``
S Subnet/IP addr match ``S@192.168.1.0/24`` or ``S@192.168.1.100``
====== ==================== ===============================================================

View File

@ -16,7 +16,7 @@ target documentation can be found here:
For example, in the master config file :conf_master:`nodegroups` setting::
nodegroups:
group1: 'L@foo.domain.com,bar.domain.com,baz.domain.com and bl*.domain.com'
group1: 'L@foo.domain.com,bar.domain.com,baz.domain.com or bl*.domain.com'
group2: 'G@os:Debian and foo.domain.com'
Specify a nodegroup via the ``-N`` option at the command-line::

View File

@ -1,5 +0,0 @@
/var/log/salt/master {
missingok
sharedscripts
endscript
}

View File

@ -1,5 +0,0 @@
/var/log/salt/minion {
missingok
sharedscripts
endscript
}

23
pkg/salt-common.logrotate Normal file
View File

@ -0,0 +1,23 @@
/var/log/salt/master {
weekly
missingok
rotate 7
compress
notifempty
}
/var/log/salt/minion {
weekly
missingok
rotate 7
compress
notifempty
}
/var/log/salt/key {
weekly
missingok
rotate 7
compress
notifempty
}

8
pkg/salt-master.upstart Normal file
View File

@ -0,0 +1,8 @@
description "salt-master"
start on (net-device-up
and local-filesystems
and runlevel [2345])
stop on runlevel [!2345]
exec /usr/bin/salt-master >/dev/null 2>&1

8
pkg/salt-minion.upstart Normal file
View File

@ -0,0 +1,8 @@
description "salt-minion"
start on (net-device-up
and local-filesystems
and runlevel [2345])
stop on runlevel [!2345]
exec /usr/bin/salt-minion >/dev/null 2>&1

11
pkg/salt-syndic.upstart Normal file
View File

@ -0,0 +1,11 @@
description "salt-syndic"
start on (net-device-up
and local-filesystems
and runlevel [2345])
stop on runlevel [!2345]
respawn limit 10 5
respawn
exec /usr/bin/salt-syndic >/dev/null 2>&1

View File

@ -1,5 +1,3 @@
#!/bin/bash
# written by David Pravec
# - feel free to /msg alekibango on IRC if you want to talk about this file

View File

@ -1,6 +1,8 @@
#!/bin/sh
#!/bin/sh -e
# Purge config files, logs, and directories created after package install.
# Note that user-specified alternate locations for these are not affected.
#
# rename to salt-'common|master|minion|syndic'.postrm and call with "purge"
clean_common() {
# remove shared job cache and other runtime directories
@ -42,7 +44,8 @@ case "$1" in
remove)
;;
purge)
purgefiles ;;
purgefiles
;;
upgrade|failed-upgrade|disappear|abort-install|abort-upgrade)
;;
*)
@ -50,4 +53,6 @@ case "$1" in
exit 1 ;;
esac
# This tag is required:
#DEBHELPER#
exit 0

6
pkg/salt.ufw Normal file
View File

@ -0,0 +1,6 @@
# Install into /etc/ufw/applications.d/ and run 'ufw app update' to add salt
# firewall rules to systems with UFW. Activate with 'ufw allow salt'
[Salt]
title=salt
description=fast and powerful configuration management and remote execution
ports=4505,4506/tcp

View File

@ -1,4 +1,3 @@
# pip requirements file for Salt
Jinja2
M2Crypto
msgpack-python

View File

@ -10,6 +10,7 @@ import logging
# Import salt libs, the try block bypasses an issue at build time so that
# modules don't cause the build to fail
from salt.version import __version__
from salt.utils import migrations
try:
from salt.utils import parsers
@ -59,7 +60,7 @@ class Master(parsers.MasterOptionParser):
self.config['publish_port'],
self.config['ret_port']):
self.exit(4, 'The ports are not available to bind\n')
migrations.migrate_paths(self.config)
import salt.master
master = salt.master.Master(self.config)
self.daemonize_if_required()
@ -103,7 +104,7 @@ class Minion(parsers.MinionOptionParser):
log.warn(
'Setting up the Salt Minion "{0}"'.format( self.config['id'])
)
migrations.migrate_paths(self.config)
# Late import so logging works correctly
import salt.minion
# If the minion key has not been accepted, then Salt enters a loop

View File

@ -226,6 +226,9 @@ class SaltCall(parsers.SaltCallOptionParser):
if self.options.local:
self.config['file_client'] = 'local'
# Setup file logging!
self.setup_logfile_logger()
caller = salt.cli.caller.Caller(self.config)
if self.options.doc:

View File

@ -15,8 +15,9 @@ class Batch(object):
'''
Manage the execution of batch runs
'''
def __init__(self, opts):
def __init__(self, opts, quiet=False):
self.opts = opts
self.quiet = quiet
self.local = salt.client.LocalClient(opts['conf_file'])
self.minions = self.__gather_minions()
@ -39,7 +40,8 @@ class Batch(object):
fret = []
for ret in self.local.cmd_iter(*args):
for minion in ret:
print('{0} Detected for this batch run'.format(minion))
if not self.quiet:
print('{0} Detected for this batch run'.format(minion))
fret.append(minion)
return sorted(fret)
@ -58,8 +60,9 @@ class Batch(object):
else:
return int(self.opts['batch'])
except ValueError:
print(('Invalid batch data sent: {0}\nData must be in the form'
'of %10, 10% or 3').format(self.opts['batch']))
if not quiet:
print(('Invalid batch data sent: {0}\nData must be in the form'
'of %10, 10% or 3').format(self.opts['batch']))
def run(self):
'''
@ -90,7 +93,8 @@ class Batch(object):
active += next_
args[0] = next_
if next_:
print('\nExecuting run on {0}\n'.format(next_))
if not quiet:
print('\nExecuting run on {0}\n'.format(next_))
iters.append(
self.local.cmd_iter_no_block(*args))
else:
@ -114,14 +118,15 @@ class Batch(object):
pass
for minion, data in parts.items():
active.remove(minion)
yield data['ret']
ret[minion] = data['ret']
data[minion] = data.pop('ret')
if 'out' in data:
out = data.pop('out')
else:
out = None
salt.output.display_output(
data,
out,
self.opts)
return ret
if not self.quiet:
salt.output.display_output(
data,
out,
self.opts)

View File

@ -69,7 +69,7 @@ class LocalClient(object):
'''
Connect to the salt master via the local server and via root
'''
def __init__(self, c_path='/etc/salt', mopts=None):
def __init__(self, c_path='/etc/salt/master', mopts=None):
if mopts:
self.opts - mopts
else:
@ -93,7 +93,7 @@ class LocalClient(object):
self.opts['cachedir'], '.{0}_key'.format(key_user)
)
# Make sure all key parent directories are accessible
salt.utils.verify.check_parent_dirs(keyfile, key_user)
salt.utils.verify.check_path_traversal(self.opts['cachedir'], key_user)
try:
with salt.utils.fopen(keyfile, 'r') as KEY:
@ -129,6 +129,22 @@ class LocalClient(object):
print(("Range server exception: {0}".format(e)))
return []
def _get_timeout(self, timeout):
'''
Return the timeout to use
'''
if timeout is None:
return self.opts['timeout']
if isinstance(timeout, int):
return timeout
if isinstance(timeout, str):
try:
return int(timeout)
except ValueError:
return self.opts['timeout']
# Looks like the timeout is invalid, use config
return self.opts['timeout']
def gather_job_info(self, jid, tgt, tgt_type, **kwargs):
'''
Return the information about a given job
@ -172,7 +188,7 @@ class LocalClient(object):
jid = salt.utils.prep_jid(
self.opts['cachedir'],
self.opts['hash_type'],
user = __opts__['user']
user=__opts__['user']
)
except Exception:
jid = ''
@ -184,11 +200,64 @@ class LocalClient(object):
expr_form,
ret,
jid=jid,
timeout=timeout or self.opts['timeout'],
timeout=self._get_timeout(timeout),
**kwargs)
return self._check_pub_data(pub_data)
def cmd_async(
self,
tgt,
fun,
arg=(),
expr_form='glob',
ret='',
kwarg=None,
**kwargs):
'''
Execute a command and get back the jid, don't wait for anything
'''
arg = condition_kwarg(arg, kwarg)
pub_data = self.run_job(
tgt,
fun,
arg,
expr_form,
ret,
**kwargs)
try:
return pub_data['jid']
except KeyError:
return 0
def cmd_batch(
self,
tgt,
fun,
arg=(),
expr_form='glob',
ret='',
kwarg=None,
batch='10%',
**kwargs):
'''
Execute a batch command
'''
import salt.cli.batch
arg = condition_kwarg(arg, kwarg)
opts = {'tgt': tgt,
'fun': fun,
'arg': arg,
'expr_form': expr_form,
'ret': ret,
'batch': batch}
for key, val in self.opts.items():
if key not in opts:
opts[key] = val
batch = salt.cli.batch.Batch(opts, True)
for ret in batch.run():
yield ret
def cmd(
self,
tgt,
@ -215,8 +284,10 @@ class LocalClient(object):
if not pub_data:
return pub_data
return self.get_returns(pub_data['jid'], pub_data['minions'],
timeout or self.opts['timeout'])
return self.get_returns(
pub_data['jid'],
pub_data['minions'],
self._get_timeout(timeout))
def cmd_cli(
self,
@ -248,7 +319,7 @@ class LocalClient(object):
else:
for fn_ret in self.get_cli_event_returns(pub_data['jid'],
pub_data['minions'],
timeout or self.opts['timeout'],
self._get_timeout(timeout),
tgt,
expr_form,
verbose,
@ -288,7 +359,7 @@ class LocalClient(object):
else:
for fn_ret in self.get_iter_returns(pub_data['jid'],
pub_data['minions'],
timeout or self.opts['timeout'],
self._get_timeout(timeout),
tgt,
expr_form,
**kwargs):
@ -407,7 +478,9 @@ class LocalClient(object):
while fn_ not in ret:
try:
check = True
ret_data = self.serial.load(salt.utils.fopen(retp, 'r'))
ret_data = self.serial.load(
salt.utils.fopen(retp, 'r')
)
if ret_data is None:
# Sometimes the ret data is read at the wrong
# time and returns None, do a quick re-read
@ -415,7 +488,9 @@ class LocalClient(object):
continue
ret[fn_] = {'ret': ret_data}
if os.path.isfile(outp):
ret[fn_]['out'] = self.serial.load(salt.utils.fopen(outp, 'r'))
ret[fn_]['out'] = self.serial.load(
salt.utils.fopen(outp, 'r')
)
except Exception:
pass
found.add(fn_)
@ -436,7 +511,10 @@ class LocalClient(object):
for id_ in jinfo:
if jinfo[id_]:
if verbose:
print('Execution is still running on {0}'.format(id_))
print(
'Execution is still running on {0}'.format(
id_)
)
more_time = True
if more_time:
timeout += inc_timeout
@ -553,7 +631,9 @@ class LocalClient(object):
continue
while fn_ not in ret:
try:
ret[fn_] = self.serial.load(salt.utils.fopen(retp, 'r'))
ret[fn_] = self.serial.load(
salt.utils.fopen(retp, 'r')
)
except Exception:
pass
if ret and start == 999999999999:
@ -605,10 +685,12 @@ class LocalClient(object):
continue
while fn_ not in ret:
try:
ret_data = self.serial.load(salt.utils.fopen(retp, 'r'))
ret_data = self.serial.load(
salt.utils.fopen(retp, 'r'))
ret[fn_] = {'ret': ret_data}
if os.path.isfile(outp):
ret[fn_]['out'] = self.serial.load(salt.utils.fopen(outp, 'r'))
ret[fn_]['out'] = self.serial.load(
salt.utils.fopen(outp, 'r'))
except Exception:
pass
if ret and start == 999999999999:
@ -757,7 +839,10 @@ class LocalClient(object):
for id_ in jinfo:
if jinfo[id_]:
if verbose:
print('Execution is still running on {0}'.format(id_))
print(
'Execution is still running on {0}'.format(
id_)
)
more_time = True
if more_time:
timeout += inc_timeout
@ -842,9 +927,14 @@ class LocalClient(object):
if expr_form == 'nodegroup':
if tgt not in self.opts['nodegroups']:
conf_file = self.opts.get('conf_file', 'the master config file')
err = 'Node group {0} unavailable in {1}'.format(tgt, conf_file)
raise SaltInvocationError(err)
conf_file = self.opts.get(
'conf_file', 'the master config file'
)
raise SaltInvocationError(
'Node group {0} unavailable in {1}'.format(
tgt, conf_file
)
)
tgt = salt.utils.minions.nodegroup_comp(
tgt,
self.opts['nodegroups']
@ -894,14 +984,25 @@ class LocalClient(object):
payload_kwargs['to'] = timeout
sreq = salt.payload.SREQ(
'tcp://{0[interface]}:{0[ret_port]}'.format(self.opts),
)
'tcp://{0[interface]}:{0[ret_port]}'.format(self.opts),
)
payload = sreq.send('clear', payload_kwargs)
# We have the payload, let's get rid of SREQ fast(GC'ed faster)
del(sreq)
if not payload:
return payload
return {'jid': payload['load']['jid'],
'minions': payload['load']['minions']}
def __del__(self):
# This IS really necessary!
# When running tests, if self.events is not destroyed, we leak 2
# threads per test case which uses self.client
if hasattr(self, 'event'):
self.event.destroy()
class FunctionWrapper(dict):
'''
@ -951,6 +1052,7 @@ class FunctionWrapper(dict):
args.append('{0}={1}'.format(_key, _val))
return self.local.cmd(self.minion, key, args)
class Caller(object):
'''
Create an object used to call salt functions directly on a minion

View File

@ -8,6 +8,7 @@ import os
import socket
import logging
import time
import urlparse
# import third party libs
import yaml
@ -21,7 +22,6 @@ except Exception:
import salt.crypt
import salt.loader
import salt.utils
import salt.utils.migrations
import salt.pillar
from salt.exceptions import SaltClientError
@ -81,6 +81,7 @@ def load_config(opts, path, env_var):
if not os.path.isfile(path):
template = '{0}.template'.format(path)
if os.path.isfile(template):
import salt.utils # Need to re-import, need to find out why
with salt.utils.fopen(path, 'w') as out:
with salt.utils.fopen(template, 'r') as f:
f.readline() # skip first line
@ -120,16 +121,21 @@ def include_config(include, opts, orig_path, verbose):
# Catch situation where user typos path in config; also warns for
# empty include dir (which might be by design)
if len(glob.glob(path)) == 0:
msg = ('Warning parsing configuration file: "include" path/glob '
'"{0}" matches no files').format(path)
if verbose: log.warn(msg.format(path))
if verbose:
log.warn(
'Warning parsing configuration file: "include" path/glob '
'"{0}" matches no files'.format(path)
)
for fn_ in glob.glob(path):
try:
opts.update(_read_conf_file(fn_))
except Exception as e:
msg = 'Error parsing configuration file: {0} - {1}'
log.warn(msg.format(fn_, e))
log.warn(
'Error parsing configuration file: {0} - {1}'.format(
fn_, e
)
)
return opts
@ -215,9 +221,9 @@ def minion_config(path, check_dns=True):
'update_url': False,
'update_restart_services': [],
'retry_dns': 30,
'recon_max': 5000,
}
if len(opts['sock_dir']) > len(opts['cachedir']) + 10:
opts['sock_dir'] = os.path.join(opts['cachedir'], '.salt-unix')
@ -232,7 +238,10 @@ def minion_config(path, check_dns=True):
if 'append_domain' in opts:
opts['id'] = _append_domain(opts)
if check_dns:
if opts.get('file_client', 'remote') == 'local' and check_dns:
check_dns = False
if check_dns is True:
# Because I import salt.log bellow I need to re-import salt.utils here
import salt.utils
try:
@ -274,10 +283,16 @@ def minion_config(path, check_dns=True):
)
# Prepend root_dir to other paths
prepend_root_dir(opts, ['pki_dir', 'cachedir', 'log_file', 'sock_dir',
'key_logfile', 'extension_modules'])
import salt.utils.migrations
salt.utils.migrations.migrate_paths(opts)
prepend_root_dirs = [
'pki_dir', 'cachedir', 'sock_dir', 'extension_modules'
]
# These can be set to syslog, so, not actual paths on the system
for config_key in ('log_file', 'key_logfile'):
if urlparse.urlparse(opts.get(config_key, '')).scheme == '':
prepend_root_dirs.append(config_key)
prepend_root_dir(opts, prepend_root_dirs)
return opts
@ -306,8 +321,8 @@ def master_config(path):
'base': ['/srv/pillar'],
},
'ext_pillar': [],
# TODO - Set this to 2 by default in 0.10.5
'pillar_version': 1,
# NOTE: pillar version changed to 2 by default in 0.10.6
'pillar_version': 2,
'pillar_opts': True,
'syndic_master': '',
'runner_dirs': [],
@ -318,7 +333,6 @@ def master_config(path):
'max_open_files': 100000,
'hash_type': 'md5',
'conf_file': path,
'pub_refresh': False,
'open_mode': False,
'auto_accept': False,
'renderer': 'yaml_jinja',
@ -341,6 +355,7 @@ def master_config(path):
'cluster_masters': [],
'cluster_mode': 'paranoid',
'range_server': 'range:80',
'reactors': [],
'serial': 'msgpack',
'state_verbose': True,
'state_output': 'full',
@ -382,7 +397,6 @@ def master_config(path):
opts['open_mode'] = opts['open_mode'] is True
opts['auto_accept'] = opts['auto_accept'] is True
opts['file_roots'] = _validate_file_roots(opts['file_roots'])
salt.utils.migrations.migrate_paths(opts)
return opts

View File

@ -144,7 +144,7 @@ class Auth(object):
'''
# Make sure all key parent directories are accessible
user = self.opts.get('user', 'root')
salt.utils.verify.check_parent_dirs(self.rsa_path, user)
salt.utils.verify.check_path_traversal(self.opts['pki_dir'], user)
if os.path.exists(self.rsa_path):
try:

View File

@ -415,6 +415,7 @@ _OS_NAME_MAP = {
'redhatente': 'RedHat',
'gentoobase': 'Gentoo',
'arch': 'Arch',
'debian': 'Debian',
}
# Map the 'os' grain to the 'os_family' grain

View File

@ -59,12 +59,16 @@ def _create_loader(
return Loader(module_dirs, opts, tag)
def minion_mods(opts):
def minion_mods(opts, context=None):
'''
Returns the minion modules
'''
load = _create_loader(opts, 'modules', 'module')
functions = load.apply_introspection(load.gen_functions())
if context is None:
context = {}
pack = {'name': '__context__',
'value': context}
functions = load.apply_introspection(load.gen_functions(pack))
if opts.get('providers', False):
if isinstance(opts['providers'], dict):
for mod, provider in opts['providers'].items():
@ -239,6 +243,27 @@ def _mod_type(module_path):
return 'int'
return 'ext'
def in_pack(pack, name):
'''
Returns if the passed name is in the pack
'''
if isinstance(pack, list):
for chunk in pack:
if not isinstance(chunk, dict):
continue
try:
if name == chunk['name']:
return True
except KeyError:
pass
elif isinstance(pack, dict):
try:
if name == pack['name']:
return True
except KeyError:
pass
return False
class Loader(object):
'''
@ -373,7 +398,10 @@ class Loader(object):
if pack:
if isinstance(pack, list):
for chunk in pack:
setattr(mod, chunk['name'], chunk['value'])
try:
setattr(mod, chunk['name'], chunk['value'])
except KeyError:
pass
else:
setattr(mod, pack['name'], pack['value'])
@ -409,6 +437,7 @@ class Loader(object):
'''
Return a dict of functions found in the defined module_dirs
'''
log.debug('loading {0} in {1}'.format(self.tag, self.module_dirs))
names = {}
modules = []
funcs = {}
@ -425,13 +454,21 @@ class Loader(object):
'in the system path. Skipping Cython modules.')
for mod_dir in self.module_dirs:
if not os.path.isabs(mod_dir):
log.debug(('Skipping {0}, it is not an abosolute '
'path').format(mod_dir))
continue
if not os.path.isdir(mod_dir):
log.debug(('Skipping {0}, it is not a '
'directory').format(mod_dir))
continue
for fn_ in os.listdir(mod_dir):
if fn_.startswith('_'):
# skip private modules
# log messages omitted for obviousness
continue
if fn_.split('.')[0] in disable:
log.debug(('Skipping {0}, it is disabled by '
'configuration').format(fn_))
continue
if (fn_.endswith(('.py', '.pyc', '.pyo', '.so'))
or (cython_enabled and fn_.endswith('.pyx'))
@ -443,6 +480,9 @@ class Loader(object):
else:
_name = fn_
names[_name] = os.path.join(mod_dir, fn_)
else:
log.debug(('Skipping {0}, it does not end with an '
'expected extension').format(fn_))
for name in names:
try:
if names[name].endswith('.pyx'):
@ -502,7 +542,12 @@ class Loader(object):
if pack:
if isinstance(pack, list):
for chunk in pack:
setattr(mod, chunk['name'], chunk['value'])
if not isinstance(chunk, dict):
continue
try:
setattr(mod, chunk['name'], chunk['value'])
except KeyError:
pass
else:
setattr(mod, pack['name'], pack['value'])
@ -514,45 +559,72 @@ class Loader(object):
except TypeError:
pass
# Trim the full pathname to just the module
# this will be the short name that other salt modules and state
# will refer to it as.
module_name = mod.__name__.rsplit('.', 1)[-1]
if virtual_enable:
# if virtual modules are enabled, we need to look for the
# __virtual__() function inside that module and run it.
# This function will return either a new name for the module
# or False. This allows us to have things like the pkg module
# working on all platforms under the name 'pkg'. It also allows
# for modules like augeas_cfg to be referred to as 'augeas',
# which would otherwise have namespace collisions. And finally
# it allows modules to return False if they are not intended
# to run on the given platform or are missing dependencies.
try:
if hasattr(mod, '__virtual__'):
if callable(mod.__virtual__):
virtual = mod.__virtual__()
if virtual:
log.debug(('Loaded {0} as virtual '
'{1}').format(module_name, virtual))
# update the module name with the new name
module_name = virtual
else:
# if __virtual__() returns false then the
# module wasn't meant for this platform.
continue
except Exception:
virtual = False
trb = traceback.format_exc()
log.critical(('Failed to read the virtual function for '
'module: {0}\nWith traceback: {1}').format(
mod.__name__[mod.__name__.rindex('.')+1:], trb))
# If the module throws an exception during __virtual__()
# then log the information and continue to the next.
log.exception(('Failed to read the virtual function for '
'module: {0}').format(module_name))
continue
for attr in dir(mod):
# functions are namespaced with their module name
attr_name = '{0}.{1}'.format(module_name, attr)
if attr.startswith('_'):
# skip private attributes
# log messages omitted for obviousness
continue
if callable(getattr(mod, attr)):
# check to make sure this is callable
func = getattr(mod, attr)
if isinstance(func, type):
# skip callables that might be exceptions
if any([
'Error' in func.__name__,
'Exception' in func.__name__]):
continue
if virtual:
funcs['{0}.{1}'.format(virtual, attr)] = func
self._apply_outputter(func, mod)
elif virtual is False:
pass
else:
funcs[
'{0}.{1}'.format(
mod.__name__[mod.__name__.rindex('.')+1:],
attr
)
] = func
self._apply_outputter(func, mod)
# now that callable passes all the checks, add it to the
# library of available functions of this type
funcs[attr_name] = func
log.trace('Added {0} to {1}'.format(attr_name, self.tag))
self._apply_outputter(func, mod)
# now that all the functions have been collected, iterate back over
# the available modules and inject the special __salt__ namespace that
# contains these functions.
for mod in modules:
if not hasattr(mod, '__salt__'):
mod.__salt__ = funcs
elif not pack:
elif not in_pack(pack, '__salt__'):
mod.__salt__.update(funcs)
return funcs

View File

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
'''
salt.log
~~~~~~~~
@ -9,8 +10,11 @@
:license: Apache 2.0, see LICENSE for more details.
'''
import os
import re
import sys
import socket
import urlparse
import logging
import logging.handlers
@ -57,6 +61,26 @@ def is_logging_configured():
return __CONSOLE_CONFIGURED or __LOGFILE_CONFIGURED
if sys.version_info < (2, 7):
# Since the NullHandler is only available on python >= 2.7, here's a copy
class NullHandler(logging.Handler):
""" This is 1 to 1 copy of python's 2.7 NullHandler"""
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
logging.NullHandler = NullHandler
# Store a reference to the null logging handler
LoggingNullHandler = logging.NullHandler()
class Logging(LoggingLoggerClass):
def __new__(cls, logger_name, *args, **kwargs):
global MAX_LOGGER_MODNAME_LENGTH
@ -74,6 +98,9 @@ class Logging(LoggingLoggerClass):
logging.Logger.manager.loggerDict.keys(), key=len
))
for handler in logging.getLogger().handlers:
if handler is LoggingNullHandler:
continue
if not handler.lock:
handler.createLock()
handler.acquire()
@ -102,22 +129,26 @@ class Logging(LoggingLoggerClass):
return LoggingLoggerClass.log(self, TRACE, msg, *args, **kwargs)
def getLogger(name):
init()
return logging.getLogger(name)
def init():
# Override the python's logging logger class as soon as this module is imported
if logging.getLoggerClass() is not Logging:
'''
Replace the default system logger with a version that includes trace()
and garbage() methods.
'''
if logging.getLoggerClass() is not Logging:
logging.setLoggerClass(Logging)
logging.addLevelName(TRACE, 'TRACE')
logging.addLevelName(GARBAGE, 'GARBAGE')
# Set the root logger at the lowest level possible
logging.getLogger().setLevel(GARBAGE)
logging.setLoggerClass(Logging)
logging.addLevelName(TRACE, 'TRACE')
logging.addLevelName(GARBAGE, 'GARBAGE')
# Set the root logger at the lowest level possible
rootLogger = logging.getLogger()
# Add a Null logging handler until logging is configured(will be removed at
# a later stage) so we stop getting:
# No handlers could be found for logger "foo"
rootLogger.addHandler(LoggingNullHandler)
rootLogger.setLevel(GARBAGE)
def getLogger(name):
return logging.getLogger(name)
def setup_console_logger(log_level='error', log_format=None, date_format=None):
@ -128,7 +159,8 @@ def setup_console_logger(log_level='error', log_format=None, date_format=None):
logging.getLogger(__name__).warn('Console logging already configured')
return
init()
# Remove the temporary null logging handler
__remove_null_logging_handler()
if log_level is None:
log_level = 'warning'
@ -159,29 +191,117 @@ def setup_logfile_logger(log_path, log_level='error', log_format=None,
date_format=None):
'''
Setup the logfile logger
Since version 0.10.6 we support logging to syslog, some examples:
tcp://localhost:514/LOG_USER
tcp://localhost/LOG_DAEMON
udp://localhost:5145/LOG_KERN
udp://localhost
file:///dev/log
file:///dev/log/LOG_SYSLOG
file:///dev/log/LOG_DAEMON
The above examples are self explanatory, but:
<file|udp|tcp>://<host|socketpath>:<port-if-required>/<log-facility>
'''
if is_logfile_configured():
logging.getLogger(__name__).warn('Logfile logging already configured')
return
init()
# Remove the temporary null logging handler
__remove_null_logging_handler()
if log_level is None:
log_level = 'warning'
level = LOG_LEVELS.get(log_level.lower(), logging.ERROR)
try:
rootLogger = logging.getLogger()
handler = getattr(
logging.handlers, 'WatchedFileHandler', logging.FileHandler
)(log_path, 'a', 'utf-8', delay=0)
except (IOError, OSError):
err = ('Failed to open log file, do you have permission to write to '
'{0}'.format(log_path))
sys.stderr.write('{0}\n'.format(err))
sys.exit(2)
parsed_log_path = urlparse.urlparse(log_path)
rootLogger = logging.getLogger()
if parsed_log_path.scheme in ('tcp', 'udp', 'file'):
syslog_opts = {
'facility': logging.handlers.SysLogHandler.LOG_USER,
'socktype': socket.SOCK_DGRAM
}
if parsed_log_path.scheme == 'file' and parsed_log_path.path:
facility_name = parsed_log_path.path.split(os.sep)[-1].upper()
if not facility_name.startswith('LOG_'):
# The user is not specifying a syslog facility
facility_name = 'LOG_USER' # Syslog default
syslog_opts['address'] = parsed_log_path.path
else:
# The user has set a syslog facility, let's update the path to
# the logging socket
syslog_opts['address'] = os.sep.join(
parsed_log_path.path.split(os.sep)[:-1]
)
elif parsed_log_path.path:
# In case of udp or tcp with a facility specified
facility_name = parsed_log_path.path.lstrip(os.sep).upper()
if not facility_name.startswith('LOG_'):
# Logging facilities start with LOG_ if this is not the case
# fail right now!
raise RuntimeError(
'The syslog facility {0!r} is not know'.format(
facility_name
)
)
else:
# This is the case of udp or tcp without a facility specified
facility_name = 'LOG_USER' # Syslog default
facility = getattr(
logging.handlers.SysLogHandler, facility_name, None
)
if facility is None:
# This python syslog version does not know about the user provided
# facility name
raise RuntimeError(
'The syslog facility {0!r} is not know'.format(
facility_name
)
)
syslog_opts['facility'] = facility
if parsed_log_path.scheme == 'tcp':
# tcp syslog support was only added on python versions >= 2.7
if sys.version_info < (2, 7):
raise RuntimeError(
'Python versions lower than 2.7 do not support logging '
'to syslog using tcp sockets'
)
syslog_opts['socktype'] = socket.SOCK_STREAM
if parsed_log_path.scheme in ('tcp', 'udp'):
syslog_opts['address'] = (
parsed_log_path.hostname,
parsed_log_path.port or logging.handlers.SYSLOG_UDP_PORT
)
if sys.version_info < (2, 7) or parsed_log_path.scheme == 'file':
# There's not socktype support on python versions lower than 2.7
syslog_opts.pop('socktype', None)
# Et voilá! Finally our syslog handler instance
handler = logging.handlers.SysLogHandler(**syslog_opts)
else:
try:
handler = getattr(
logging.handlers, 'WatchedFileHandler', logging.FileHandler
)(log_path, 'a', 'utf-8', delay=0)
except (IOError, OSError):
sys.stderr.write(
'Failed to open log file, do you have permission to write to '
'{0}\n'.format(log_path)
)
sys.exit(2)
handler.setLevel(level)
@ -204,7 +324,22 @@ def set_logger_level(logger_name, log_level='error'):
'''
Tweak a specific logger's logging level
'''
init()
logging.getLogger(logger_name).setLevel(
LOG_LEVELS.get(log_level.lower(), logging.ERROR)
)
def __remove_null_logging_handler():
if is_logfile_configured():
# In this case, the NullHandler has been removed, return!
return
rootLogger = logging.getLogger()
global LoggingNullHandler
for handler in rootLogger.handlers:
if handler is LoggingNullHandler:
rootLogger.removeHandler(LoggingNullHandler)
# Redefine the null handler to None so it can be garbage collected
LoggingNullHandler = None
break

View File

@ -246,6 +246,7 @@ class Master(SMaster):
self.master_key)
reqserv.start_publisher()
reqserv.start_event_publisher()
reqserv.start_reactor()
def sigterm_clean(signum, frame):
'''
@ -325,9 +326,11 @@ class Publisher(multiprocessing.Process):
continue
raise exc
except KeyboardInterrupt:
#except KeyboardInterrupt:
finally:
pub_sock.close()
pull_sock.close()
context.term()
class ReqServer(object):
@ -394,12 +397,25 @@ class ReqServer(object):
self.eventpublisher = salt.utils.event.EventPublisher(self.opts)
self.eventpublisher.start()
def start_reactor(self):
'''
Start the reactor, but only if the reactor interface is configured
'''
if self.opts.get('reactor'):
self.reactor = salt.utils.event.Reactor(self.opts)
self.reactor.start()
def run(self):
'''
Start up the ReqServer
'''
self.__bind()
def __del__(self):
self.clients.close()
self.workers.close()
self.context.term()
class MWorker(multiprocessing.Process):
'''
@ -442,8 +458,10 @@ class MWorker(multiprocessing.Process):
if exc.errno == errno.EINTR:
continue
raise exc
except KeyboardInterrupt:
finally:
#except KeyboardInterrupt:
socket.close()
context.term()
def _handle_payload(self, payload):
'''
@ -1008,7 +1026,8 @@ class AESFuncs(object):
try:
timeout = int(clear_load['tmo'])
except ValueError:
msg = 'Failed to parse timeout value: {0}'.format(clear_load['tmo'])
msg = 'Failed to parse timeout value: {0}'.format(
clear_load['tmo'])
log.warn(msg)
return {}
if 'tgt_type' in clear_load:
@ -1034,14 +1053,18 @@ class AESFuncs(object):
else:
ret_form = 'clean'
if ret_form == 'clean':
return self.local.get_returns(
try:
return self.local.get_returns(
jid,
self.ckminions.check_minions(
clear_load['tgt'],
expr_form
),
),
timeout
)
)
finally:
pub_sock.close()
context.term()
elif ret_form == 'full':
ret = self.local.get_full_returns(
jid,
@ -1052,7 +1075,11 @@ class AESFuncs(object):
timeout
)
ret['__jid__'] = jid
return ret
try:
return ret
finally:
pub_sock.close()
context.term()
def run_func(self, func, load):
'''
@ -1241,7 +1268,8 @@ class ClearFuncs(object):
if re.match(line, keyid):
return True
except re.error:
message = "{0} is not a valid regular expression, ignoring line in {1}"
message = ('{0} is not a valid regular expression, '
'ignoring line in {1}')
log.warn(message.format(line, autosign_file))
continue
@ -1635,6 +1663,14 @@ class ClearFuncs(object):
load['tgt'],
load.get('tgt_type', 'glob')
)
return {'enc': 'clear',
'load': {'jid': clear_load['jid'],
'minions': minions}}
try:
return {
'enc': 'clear',
'load': {
'jid': clear_load['jid'],
'minions': minions
}
}
finally:
pub_sock.close()
context.term()

View File

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
'''
Routines to set up a minion
'''
@ -174,6 +175,7 @@ class Minion(object):
# module
opts['grains'] = salt.loader.grains(opts)
self.opts = opts
self.authenticate()
self.opts['pillar'] = salt.pillar.get_pillar(
opts,
opts['grains'],
@ -185,7 +187,7 @@ class Minion(object):
self.functions, self.returners = self.__load_modules()
self.matcher = Matcher(self.opts, self.functions)
self.proc_dir = get_proc_dir(opts['cachedir'])
self.authenticate()
self.__processing = []
def __prep_mod_opts(self):
'''
@ -301,9 +303,15 @@ class Minion(object):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
multiprocessing.Process(target=target, args=(instance, self.opts, data)).start()
process = multiprocessing.Process(
target=target, args=(instance, self.opts, data)
)
else:
threading.Thread(target=target, args=(instance, self.opts, data)).start()
process = threading.Thread(
target=target, args=(instance, self.opts, data)
)
self.__processing.append(process)
process.start()
@classmethod
def _thread_return(class_, minion_instance, opts, data):
@ -353,7 +361,9 @@ class Minion(object):
except SaltInvocationError as exc:
msg = 'Problem executing "{0}": {1}'
log.error(msg.format(function_name, str(exc)))
ret['return'] = 'ERROR executing {0}: {1}'.format(function_name, str(exc))
ret['return'] = 'ERROR executing {0}: {1}'.format(
function_name, exc
)
except Exception:
trb = traceback.format_exc()
msg = 'The minion function caused an exception: {0}'
@ -523,9 +533,11 @@ class Minion(object):
in, signing in can occur as often as needed to keep up with the
revolving master aes key.
'''
log.debug('Attempting to authenticate with the Salt Master at {0}'.format(
self.opts['master_ip']
))
log.debug(
'Attempting to authenticate with the Salt Master at {0}'.format(
self.opts['master_ip']
)
)
auth = salt.crypt.Auth(self.opts)
while True:
creds = auth.sign_in()
@ -560,6 +572,16 @@ class Minion(object):
pass
self.functions, self.returners = self.__load_modules()
def cleanup_processes(self):
for process in self.__processing[:]:
if process.is_alive():
continue
process.join(0.025)
if isinstance(process, multiprocessing.Process):
process.terminate()
self.__processing.pop(self.__processing.index(process))
del(process)
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the minion
@ -571,7 +593,7 @@ class Minion(object):
)
)
log.debug('Minion "{0}" trying to tune in'.format(self.opts['id']))
context = zmq.Context()
self.context = zmq.Context()
# Prepare the minion event system
#
@ -585,7 +607,7 @@ class Minion(object):
self.opts['sock_dir'],
'minion_event_{0}_pull.ipc'.format(id_hash)
)
epub_sock = context.socket(zmq.PUB)
self.epub_sock = self.context.socket(zmq.PUB)
if self.opts.get('ipc_mode', '') == 'tcp':
epub_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts['tcp_pub_port']
@ -609,10 +631,10 @@ class Minion(object):
)
# Create the pull socket
epull_sock = context.socket(zmq.PULL)
self.epull_sock = self.context.socket(zmq.PULL)
# Bind the event sockets
epub_sock.bind(epub_uri)
epull_sock.bind(epull_uri)
self.epub_sock.bind(epub_uri)
self.epull_sock.bind(epull_uri)
# Restrict access to the sockets
if not self.opts.get('ipc_mode', '') == 'tcp':
os.chmod(
@ -624,14 +646,18 @@ class Minion(object):
448
)
poller = zmq.Poller()
epoller = zmq.Poller()
socket = context.socket(zmq.SUB)
socket.setsockopt(zmq.SUBSCRIBE, '')
socket.setsockopt(zmq.IDENTITY, self.opts['id'])
socket.connect(self.master_pub)
poller.register(socket, zmq.POLLIN)
epoller.register(epull_sock, zmq.POLLIN)
self.poller = zmq.Poller()
self.epoller = zmq.Poller()
self.socket = self.context.socket(zmq.SUB)
self.socket.setsockopt(zmq.SUBSCRIBE, '')
self.socket.setsockopt(zmq.IDENTITY, self.opts['id'])
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
self.socket.setsockopt(
zmq.RECONNECT_IVL_MAX, self.opts['recon_max']
)
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
self.epoller.register(self.epull_sock, zmq.POLLIN)
# Send an event to the master that the minion is live
self._fire_master(
'Minion {0} started at {1}'.format(
@ -649,23 +675,53 @@ class Minion(object):
while True:
try:
socks = dict(poller.poll(60000))
if socket in socks and socks[socket] == zmq.POLLIN:
payload = self.serial.loads(socket.recv())
socks = dict(self.poller.poll(60000))
if self.socket in socks and socks[self.socket] == zmq.POLLIN:
payload = self.serial.loads(self.socket.recv())
self._handle_payload(payload)
time.sleep(0.05)
# This next call(multiprocessing.active_children()) is
# intentional, from docs, "Calling this has the side affect of
# “joining” any processes which have already finished."
multiprocessing.active_children()
self.passive_refresh()
self.cleanup_processes()
# Check the event system
if epoller.poll(1):
if self.epoller.poll(1):
try:
package = epull_sock.recv(zmq.NOBLOCK)
epub_sock.send(package)
package = self.epull_sock.recv(zmq.NOBLOCK)
self.epub_sock.send(package)
except Exception:
pass
except Exception:
log.critical(traceback.format_exc())
def destroy(self):
if hasattr(self, 'poller'):
for socket in self.poller.sockets.keys():
if not socket.closed:
socket.close()
self.poller.unregister(socket)
if hasattr(self, 'epoller'):
for socket in self.epoller.sockets.keys():
if not socket.closed:
socket.close()
self.epoller.unregister(socket)
if hasattr(self, 'epub_sock'):
if not self.epub_sock.closed:
self.epub_sock.close()
if hasattr(self, 'epull_sock'):
if not self.epull_sock.closed:
self.epull_sock.close()
if hasattr(self, 'socket'):
if not self.socket.closed:
self.socket.close()
if hasattr(self, 'context'):
self.context.term()
def __del__(self):
self.destroy()
class Syndic(salt.client.LocalClient, Minion):
'''
@ -770,9 +826,10 @@ class Matcher(object):
if 'match' in item:
matcher = item['match']
if hasattr(self, matcher + '_match'):
funcname = '{0}_match'.format(matcher)
if matcher == 'nodegroup':
return getattr(self, '{0}_match'.format(matcher))(match, nodegroups)
return getattr(self, '{0}_match'.format(matcher))(match)
return getattr(self, funcname)(match, nodegroups)
return getattr(self, funcname)(match)
else:
log.error('Attempting to match with unknown matcher: {0}'.format(
matcher
@ -860,10 +917,17 @@ class Matcher(object):
log.debug('tgt {0}'.format(tgt))
comps = tgt.split(':')
if len(comps) < 2:
log.error('Got insufficient arguments for pillar match statement from master')
log.error(
'Got insufficient arguments for pillar match statement '
'from master'
)
return False
if comps[0] not in self.opts['pillar']:
log.error('Got unknown pillar match statement from master: {0}'.format(comps[0]))
log.error(
'Got unknown pillar match statement from master: {0}'.format(
comps[0]
)
)
return False
if isinstance(self.opts['pillar'][comps[0]], list):
# We are matching a single component to a single list member

View File

@ -41,6 +41,14 @@ def recv(files, dest):
return ret
def _mk_client():
'''
Create a file client and add it to the context
'''
if not 'cp.fileclient' in __context__:
__context__['cp.fileclient'] = salt.fileclient.get_file_client(__opts__)
def _render_filenames(path, dest, env, template):
if not template:
return (path, dest)
@ -98,8 +106,13 @@ def get_file(path, dest, env='base', makedirs=False, template=None, gzip=None):
if not hash_file(path, env):
return ''
else:
client = salt.fileclient.get_file_client(__opts__)
return client.get_file(path, dest, makedirs, env, gzip)
_mk_client()
return __context__['cp.fileclient'].get_file(
path,
dest,
makedirs,
env,
gzip)
def get_template(path, dest, template='jinja', env='base', **kwargs):
@ -110,7 +123,7 @@ def get_template(path, dest, template='jinja', env='base', **kwargs):
salt '*' cp.get_template salt://path/to/template /minion/dest
'''
client = salt.fileclient.get_file_client(__opts__)
_mk_client()
if not 'salt' in kwargs:
kwargs['salt'] = __salt__
if not 'pillar' in kwargs:
@ -119,7 +132,13 @@ def get_template(path, dest, template='jinja', env='base', **kwargs):
kwargs['grains'] = __grains__
if not 'opts' in kwargs:
kwargs['opts'] = __opts__
return client.get_template(path, dest, template, False, env, **kwargs)
return __context__['cp.fileclient'].get_template(
path,
dest,
template,
False,
env,
**kwargs)
def get_dir(path, dest, env='base', template=None, gzip=None):
@ -132,8 +151,8 @@ def get_dir(path, dest, env='base', template=None, gzip=None):
'''
(path, dest) = _render_filenames(path, dest, env, template)
client = salt.fileclient.get_file_client(__opts__)
return client.get_dir(path, dest, env, gzip)
_mk_client()
return __context__['cp.fileclient'].get_dir(path, dest, env, gzip)
def get_url(path, dest, env='base'):
@ -145,8 +164,8 @@ def get_url(path, dest, env='base'):
salt '*' cp.get_url salt://my/file /tmp/mine
salt '*' cp.get_url http://www.slashdot.org /tmp/index.html
'''
client = salt.fileclient.get_file_client(__opts__)
return client.get_url(path, dest, False, env)
_mk_client()
return __context__['cp.fileclient'].get_url(path, dest, False, env)
def get_file_str(path, env='base'):
@ -171,8 +190,12 @@ def cache_file(path, env='base'):
salt '*' cp.cache_file salt://path/to/file
'''
client = salt.fileclient.get_file_client(__opts__)
return client.cache_file(path, env)
_mk_client()
result = __context__['cp.fileclient'].cache_file(path, env)
if not result:
log.error('Unable to cache file "{0}" from env '
'"{1}".'.format(path,env))
return result
def cache_files(paths, env='base'):
@ -185,8 +208,8 @@ def cache_files(paths, env='base'):
salt '*' cp.cache_files salt://pathto/file1,salt://pathto/file1
'''
client = salt.fileclient.get_file_client(__opts__)
return client.cache_files(paths, env)
_mk_client()
return __context__['cp.fileclient'].cache_files(paths, env)
def cache_dir(path, env='base', include_empty=False):
@ -197,8 +220,8 @@ def cache_dir(path, env='base', include_empty=False):
salt '*' cp.cache_dir salt://path/to/dir
'''
client = salt.fileclient.get_file_client(__opts__)
return client.cache_dir(path, env, include_empty)
_mk_client()
return __context__['cp.fileclient'].cache_dir(path, env, include_empty)
def cache_master(env='base'):
@ -209,8 +232,8 @@ def cache_master(env='base'):
salt '*' cp.cache_master
'''
client = salt.fileclient.get_file_client(__opts__)
return client.cache_master(env)
_mk_client()
return __context__['cp.fileclient'].cache_master(env)
def cache_local_file(path):
@ -235,8 +258,8 @@ def cache_local_file(path):
return path_cached
# The file hasn't been cached or has changed; cache it
client = salt.fileclient.get_file_client(__opts__)
return client.cache_local_file(path)
_mk_client()
return __context__['cp.fileclient'].cache_local_file(path)
def list_states(env='base'):
@ -247,8 +270,8 @@ def list_states(env='base'):
salt '*' cp.list_states
'''
client = salt.fileclient.get_file_client(__opts__)
return client.list_states(env)
_mk_client()
return __context__['cp.fileclient'].list_states(env)
def list_master(env='base'):
@ -259,8 +282,8 @@ def list_master(env='base'):
salt '*' cp.list_master
'''
client = salt.fileclient.get_file_client(__opts__)
return client.file_list(env)
_mk_client()
return __context__['cp.fileclient'].file_list(env)
def list_master_dirs(env='base'):
@ -271,8 +294,8 @@ def list_master_dirs(env='base'):
salt '*' cp.list_master_dirs
'''
client = salt.fileclient.get_file_client(__opts__)
return client.dir_list(env)
_mk_client()
return __context__['cp.fileclient'].dir_list(env)
def list_minion(env='base'):
@ -283,8 +306,8 @@ def list_minion(env='base'):
salt '*' cp.list_minion
'''
client = salt.fileclient.get_file_client(__opts__)
return client.file_local_list(env)
_mk_client()
return __context__['cp.fileclient'].file_local_list(env)
def is_cached(path, env='base'):
@ -296,8 +319,8 @@ def is_cached(path, env='base'):
salt '*' cp.is_cached salt://path/to/file
'''
client = salt.fileclient.get_file_client(__opts__)
return client.is_cached(path, env)
_mk_client()
return __context__['cp.fileclient'].is_cached(path, env)
def hash_file(path, env='base'):
@ -310,5 +333,5 @@ def hash_file(path, env='base'):
salt '*' cp.hash_file salt://path/to/file
'''
client = salt.fileclient.get_file_client(__opts__)
return client.hash_file(path, env)
_mk_client()
return __context__['cp.fileclient'].hash_file(path, env)

View File

@ -11,6 +11,7 @@ import salt.utils
TAG = '# Lines below here are managed by Salt, do not edit\n'
def _render_tab(lst):
'''
Takes a tab list structure and renders it to a list for applying it to
@ -24,36 +25,20 @@ def _render_tab(lst):
ret.append(TAG)
for env in lst['env']:
if (env['value'] is None) or (env['value'] == ""):
ret.append(
'{0}=""\n'.format(
env['name']
)
)
ret.append('{0}=""\n'.format(env['name']))
else:
ret.append(
'{0}={1}\n'.format(
env['name'],
env['value']
)
)
ret.append('{0}={1}\n'.format(env['name'], env['value']))
for cron in lst['crons']:
ret.append(
'{0} {1} {2} {3} {4} {5}\n'.format(
cron['min'],
cron['hour'],
cron['daymonth'],
cron['month'],
cron['dayweek'],
cron['cmd']
)
)
ret.append('{0} {1} {2} {3} {4} {5}\n'.format(cron['min'],
cron['hour'],
cron['daymonth'],
cron['month'],
cron['dayweek'],
cron['cmd']
)
)
for spec in lst['special']:
ret.append(
'{0} {1}\n'.format(
spec['spec'],
spec['cmd']
)
)
ret.append('{0} {1}\n'.format(spec['spec'], spec['cmd']))
return ret

View File

@ -150,7 +150,7 @@ def collectstatic(settings_module,
salt '*' django.collectstatic settings.py
'''
args = []
args = ['noinput']
kwargs = {}
if no_post_process:
args.append('no-post-process')

View File

@ -89,7 +89,7 @@ def gid_to_group(gid):
# This is not an integer, maybe it's already the group name?
gid = group_to_gid(gid)
if not gid:
if gid == '':
# Don't even bother to feed it to grp
return ''
@ -612,7 +612,13 @@ def patch(originalfile, patchfile, options='', dry_run=False):
.. versionadded:: 0.10.4
'''
dry_run_opt = ' --dry-run' if dry_run else ''
if dry_run:
if __grains__['kernel'] in ('FreeBSD', 'OpenBSD'):
dry_run_opt = ' -C'
else:
dry_run_opt = ' --dry-run'
else:
dry_run_opt = ''
cmd = 'patch {0}{1} {2} {3}'.format(
options, dry_run_opt, originalfile, patchfile)
return __salt__['cmd.run_all'](cmd)
@ -1146,9 +1152,11 @@ def check_managed(
**kwargs
)
if comment:
__clean_tmp(sfn)
return False, comment
changes = check_file_meta(name, sfn, source, source_sum, user,
group, mode, env)
__clean_tmp(sfn)
if changes:
comment = 'The following values are set to be changed:\n'
for key, val in changes.items():
@ -1201,6 +1209,37 @@ def check_file_meta(
changes['mode'] = mode
return changes
def get_diff(
minionfile,
masterfile,
env='base'):
'''
Return unified diff of file compared to file on master
Example:
salt \* file.get_diff /home/fred/.vimrc salt://users/fred/.vimrc
'''
ret = ''
if not os.path.exists(minionfile):
ret = 'File {0} does not exist on the minion'.format(minionfile)
return ret
sfn = __salt__['cp.cache_file'](masterfile, env)
if sfn:
with nested(salt.utils.fopen(sfn, 'r'),
salt.utils.fopen(minionfile, 'r')) as (src, name_):
slines = src.readlines()
nlines = name_.readlines()
diff = difflib.unified_diff(nlines, slines, minionfile, masterfile)
if diff:
for line in diff:
ret = ret + line
else:
ret = 'Failed to copy file from master'
return ret
def manage_file(name,
sfn,
@ -1378,7 +1417,7 @@ def makedirs(path, user=None, group=None, mode=None):
# turn on the executable bits for user, group and others.
# Note: the special bits are set to 0.
if mode:
mode = int(mode[-3:], 8) | 0111
mode = int(str(mode)[-3:], 8) | 0111
makedirs_perms(directory, user, group, mode)
# If a caller such as managed() is invoked with

View File

@ -4,6 +4,7 @@ Package support for FreeBSD
# Import python libs
import os
import salt.utils
def _check_pkgng():
@ -15,6 +16,11 @@ def _check_pkgng():
return False
@salt.utils.memoize
def _cmd(cmd):
return salt.utils.which(cmd)
def search(pkg_name):
'''
Use `pkg search` if pkg is being used.
@ -24,7 +30,9 @@ def search(pkg_name):
salt '*' pkg.search 'mysql-server'
'''
if _check_pkgng():
res = __salt__['cmd.run']('pkg search {0}'.format(pkg_name))
res = __salt__['cmd.run']('{0} search {1}'.format(_cmd('pkg'),
pkg_name
))
res = [x for x in res.splitlines()]
return {"Results": res}
@ -56,7 +64,8 @@ def available_version(name):
salt '*' pkg.available_version <package name>
'''
if _check_pkgng():
for line in __salt__['cmd.run']('pkg search -f {0}'.format(name).splitlines()):
cmd = '{0} search -f {1}'.format(_cmd('pkg'), name)
for line in __salt__['cmd.run'](cmd).splitlines():
if line.startswith('Version'):
fn, ver = line.split(':', 1)
return ver.strip()
@ -89,13 +98,13 @@ def refresh_db():
salt '*' pkg.refresh_db
'''
if _check_pkgng():
__salt__['cmd.run']('pkg update')
__salt__['cmd.run']('{0} update'.format(_cmd('pkg')))
else:
__salt__['cmd.run']('portsnap fetch')
__salt__['cmd.run']('{0} fetch'.format(_cmd('portsnap')))
if not os.path.isdir('/usr/ports'):
__salt__['cmd.run']('portsnap extract')
__salt__['cmd.run']('{0} extract'.format(_cmd('portsnap')))
else:
__salt__['cmd.run']('portsnap update')
__salt__['cmd.run']('{0} update'.format(_cmd('portsnap')))
return {}
@ -110,9 +119,9 @@ def list_pkgs():
salt '*' pkg.list_pkgs
'''
if _check_pkgng():
pkg_command = "pkg info"
pkg_command = '{0} info'.format(_cmd('pkg'))
else:
pkg_command = "pkg_info"
pkg_command = '{0}'.format(_cmd('pkg_info'))
ret = {}
for line in __salt__['cmd.run'](pkg_command).splitlines():
if not line:
@ -137,13 +146,13 @@ def install(name, refresh=False, repo='', **kwargs):
'''
env = ()
if _check_pkgng():
pkg_command = 'pkg install -y'
pkg_command = '{0} install -y'.format(_cmd('pkg'))
if not refresh:
pkg_command += ' -L'
if repo:
env = (('PACKAGESITE', repo),)
else:
pkg_command = 'pkg_add -r'
pkg_command = '{0} -r'.format(_cmd('pkg_add'))
if repo:
env = (('PACKAGEROOT', repo),)
old = list_pkgs()
@ -186,7 +195,7 @@ def upgrade():
return {}
old = list_pkgs()
__salt__['cmd.retcode']('pkg upgrade -y')
__salt__['cmd.retcode']('{0} upgrade -y'.format(_cmd('pkg')))
new = list_pkgs()
pkgs = {}
for npkg in new:
@ -220,9 +229,9 @@ def remove(name):
if name in old:
name = '{0}-{1}'.format(name, old[name])
if _check_pkgng():
pkg_command = 'pkg delete -y'
pkg_command = '{0} delete -y'.format(_cmd('pkg'))
else:
pkg_command = 'pkg_delete'
pkg_command = '{0}'.format(_cmd('pkg_delete'))
__salt__['cmd.retcode']('{0} {1}'.format(pkg_command, name))
new = list_pkgs()
return _list_removed(old, new)
@ -252,5 +261,5 @@ def rehash():
salt '*' pkg.rehash
'''
shell = __salt__['cmd.run']('echo $SHELL').split('/')
if shell[len(shell)-1] in ["csh", "tcsh"]:
if shell[len(shell) - 1] in ['csh', 'tcsh']:
__salt__['cmd.run']('rehash')

View File

@ -1,9 +1,7 @@
'''
Support for GRUB
Support for GRUB Legacy
'''
# TODO: Support grub2
# Import python libs
import os

563
salt/modules/iptables.py Normal file
View File

@ -0,0 +1,563 @@
'''
Support for iptables
'''
# Import Python libs
import os
import argparse
# Import Salt libs
import salt.utils
from salt.exceptions import SaltException
def __virtual__():
'''
Only load the module if iptables is installed
'''
if salt.utils.which('iptables'):
return 'iptables'
return False
def _conf():
'''
Some distros have a specific location for config files
'''
if __grains__['os_family'] == 'RedHat':
return '/etc/sysconfig/iptables'
elif __grains__['os_family'] == 'Arch':
return '/etc/iptables/iptables.rules'
else:
return False
def version():
'''
Return version from iptables --version
CLI Example::
salt '*' iptables.version
'''
cmd = 'iptables --version'
out = __salt__['cmd.run'](cmd).split()
return out[1]
def get_saved_rules(conf_file=None):
'''
Return a data structure of the rules in the conf file
CLI Example::
salt '*' iptables.get_saved_rules
'''
return _parse_conf(conf_file)
def get_rules():
'''
Return a data structure of the current, in-memory rules
CLI Example::
salt '*' iptables.get_rules
'''
return _parse_conf(in_mem=True)
def get_saved_policy(table='filter', chain=None, conf_file=None):
'''
Return the current policy for the specified table/chain
CLI Examples::
salt '*' iptables.get_saved_policy filter INPUT
salt '*' iptables.get_saved_policy filter INPUT conf_file=/etc/iptables.saved
'''
if not chain:
return 'Error: Chain needs to be specified'
rules = _parse_conf(conf_file)
return rules[table][chain]['policy']
def get_policy(table='filter', chain=None):
'''
Return the current policy for the specified table/chain
CLI Example::
salt '*' iptables.get_policy filter INPUT
'''
if not chain:
return 'Error: Chain needs to be specified'
rules = _parse_conf(in_mem=True)
return rules[table][chain]['policy']
def set_policy(table='filter', chain=None, policy=None):
'''
Set the current policy for the specified table/chain
CLI Example::
salt '*' iptables.set_policy filter INPUT ACCEPT
'''
if not chain:
return 'Error: Chain needs to be specified'
if not policy:
return 'Error: Policy needs to be specified'
cmd = 'iptables -t {0} -P {1} {2}'.format(table, chain, policy)
out = __salt__['cmd.run'](cmd)
return out
def append(table='filter', rule=None):
'''
Append a rule to the specified table/chain.
This function accepts a rule in a standard iptables command format,
starting with the chain. Trying to force users to adapt to a new
method of creating rules would be irritating at best, and we
already have a parser that can handle it.
CLI Example::
salt '*' iptables.append filter 'INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT'
'''
if not rule:
return 'Error: Rule needs to be specified'
cmd = 'iptables -t {0} -A {1}'.format(table, rule)
out = __salt__['cmd.run'](cmd)
return out
def insert(table='filter', rule=None):
'''
Insert a rule into the specified table/chain, at the specified position.
This function accepts a rule in a standard iptables command format,
starting with the chain. Trying to force users to adapt to a new
method of creating rules would be irritating at best, and we
already have a parser that can handle it.
CLI Examples::
salt '*' iptables.insert filter 'INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT'
salt '*' iptables.insert filter 'INPUT 3 -m state --state RELATED,ESTABLISHED -j ACCEPT'
'''
if not rule:
return 'Error: Rule needs to be specified'
cmd = 'iptables -t {0} -I {1}'.format(table, rule)
out = __salt__['cmd.run'](cmd)
return out
def delete(table, position=None, rule=None):
'''
Delete a rule from the specified table/chain, specifying either the rule
in its entirety, or the rule's position in the chain.
This function accepts a rule in a standard iptables command format,
starting with the chain. Trying to force users to adapt to a new
method of creating rules would be irritating at best, and we
already have a parser that can handle it.
CLI Examples::
salt '*' iptables.delete filter 3
salt '*' iptables.delete filter rule='INPUT 3 -m state --state RELATED,ESTABLISHED -j ACCEPT'
'''
cmd = ''
if position:
cmd = 'iptables -t {0} -D {1}'.format(table, rule)
elif rule:
cmd = 'iptables -t {0} -D {1}'.format(table, position)
else:
return 'Error: Either rule or position needs to be specified'
out = __salt__['cmd.run'](cmd)
return out
def flush(table='filter'):
'''
Flush all chains in the specified table.
CLI Example::
salt '*' iptables.flush filter
'''
cmd = 'iptables -t {0} -F'.format(table)
out = __salt__['cmd.run'](cmd)
return out
def _parse_conf(conf_file=None, in_mem=False):
'''
If a file is not passed in, and the correct one for this OS is not
detected, return False
'''
if _conf() and not conf_file and not in_mem:
conf_file = _conf()
rules = ''
if conf_file:
f = open(conf_file, 'r')
rules = f.read()
f.close()
elif in_mem:
cmd = 'iptables-save'
rules = __salt__['cmd.run'](cmd)
else:
raise SaltException('A file was not found to parse')
ret = {}
table = ''
for line in rules.splitlines():
if line.startswith('*'):
table = line.replace('*', '')
ret[table] = {}
elif line.startswith(':'):
comps = line.split()
chain = comps[0].replace(':', '')
ret[table][chain] = {}
ret[table][chain]['policy'] = comps[1]
counters = comps[2].replace('[', '').replace(']', '')
(pcount, bcount) = counters.split(':')
ret[table][chain]['packet count'] = pcount
ret[table][chain]['byte count'] = bcount
ret[table][chain]['rules'] = []
elif line.startswith('-A'):
parser = _parser()
parsed_args = vars(parser.parse_args(line.split()))
ret_args = {}
chain = parsed_args['append']
for arg in parsed_args:
if parsed_args[arg] and arg is not 'append':
ret_args[arg] = parsed_args[arg]
ret[table][chain[0]]['rules'].append(ret_args)
return ret
def _parser():
'''
This function contains _all_ the options I could find in man 8 iptables,
listed in the first section that I found them in. They will not all be used
by all parts of the module; use them intelligently and appropriately.
'''
parser = argparse.ArgumentParser()
# COMMANDS
parser.add_argument('-A', '--append', dest='append', action='append')
parser.add_argument('-D', '--delete', dest='delete', action='append')
parser.add_argument('-I', '--insert', dest='insert', action='append')
parser.add_argument('-R', '--replace', dest='replace', action='append')
parser.add_argument('-L', '--list', dest='list', action='append')
parser.add_argument('-F', '--flush', dest='flush', action='append')
parser.add_argument('-Z', '--zero', dest='zero', action='append')
parser.add_argument('-N', '--new-chain', dest='new-chain', action='append')
parser.add_argument('-X', '--delete-chain', dest='delete-chain', action='append')
parser.add_argument('-P', '--policy', dest='policy', action='append')
parser.add_argument('-E', '--rename-chain', dest='rename-chain', action='append')
# PARAMETERS
parser.add_argument('-p', '--protocol', dest='protocol', action='append')
parser.add_argument('-s', '--source', dest='source', action='append')
parser.add_argument('-d', '--destination', dest='destination', action='append')
parser.add_argument('-j', '--jump', dest='jump', action='append')
parser.add_argument('-g', '--goto', dest='goto', action='append')
parser.add_argument('-i', '--in-interface', dest='in-interface', action='append')
parser.add_argument('-o', '--out-interface', dest='out-interface', action='append')
parser.add_argument('-f', '--fragment', dest='fragment', action='append')
parser.add_argument('-c', '--set-counters', dest='set-counters', action='append')
# MATCH EXTENSIONS
parser.add_argument('-m', '--match', dest='match', action='append')
## addrtype
parser.add_argument('--src-type', dest='src-type', action='append')
parser.add_argument('--dst-type', dest='dst-type', action='append')
parser.add_argument('--limit-iface-in', dest='limit-iface-in', action='append')
parser.add_argument('--limit-iface-out', dest='limit-iface-out', action='append')
## ah
parser.add_argument('--ahspi', dest='ahspi', action='append')
## cluster
parser.add_argument('--cluster-total-nodes', dest='cluster-total-nodes', action='append')
parser.add_argument('--cluster-local-node', dest='cluster-local-node', action='append')
parser.add_argument('--cluster-local-nodemask', dest='cluster-local-nodemask', action='append')
parser.add_argument('--cluster-hash-seed', dest='cluster-hash-seed', action='append')
parser.add_argument('--h-length', dest='h-length', action='append')
parser.add_argument('--mangle-mac-s', dest='mangle-mac-s', action='append')
parser.add_argument('--mangle-mac-d', dest='mangle-mac-d', action='append')
## comment
parser.add_argument('--comment', dest='comment', action='append')
## connbytes
parser.add_argument('--connbytes', dest='connbytes', action='append')
parser.add_argument('--connbytes-dir', dest='connbytes-dir', action='append')
parser.add_argument('--connbytes-mode', dest='connbytes-mode', action='append')
## connlimit
parser.add_argument('--connlimit-above', dest='connlimit-above', action='append')
parser.add_argument('--connlimit-mask', dest='connlimit-mask', action='append')
## connmark
parser.add_argument('--mark', dest='mark', action='append')
## conntrack
parser.add_argument('--ctstate', dest='ctstate', action='append')
parser.add_argument('--ctproto', dest='ctproto', action='append')
parser.add_argument('--ctorigsrc', dest='ctorigsrc', action='append')
parser.add_argument('--ctorigdst', dest='ctorigdst', action='append')
parser.add_argument('--ctreplsrc', dest='ctreplsrc', action='append')
parser.add_argument('--ctrepldst', dest='ctrepldst', action='append')
parser.add_argument('--ctorigsrcport', dest='ctorigsrcport', action='append')
parser.add_argument('--ctorigdstport', dest='ctorigdstport', action='append')
parser.add_argument('--ctreplsrcport', dest='ctreplsrcport', action='append')
parser.add_argument('--ctrepldstport', dest='ctrepldstport', action='append')
parser.add_argument('--ctstatus', dest='ctstatus', action='append')
parser.add_argument('--ctexpire', dest='ctexpire', action='append')
## dccp
parser.add_argument('--sport', '--source-port', dest='source_port', action='append')
parser.add_argument('--dport', '--destination-port', dest='destination_port', action='append')
parser.add_argument('--dccp-types', dest='dccp-types', action='append')
parser.add_argument('--dccp-option', dest='dccp-option', action='append')
## dscp
parser.add_argument('--dscp', dest='dscp', action='append')
parser.add_argument('--dscp-class', dest='dscp-class', action='append')
## ecn
parser.add_argument('--ecn-tcp-cwr', dest='ecn-tcp-cwr', action='append')
parser.add_argument('--ecn-tcp-ece', dest='ecn-tcp-ece', action='append')
parser.add_argument('--ecn-ip-ect', dest='ecn-ip-ect', action='append')
## esp
parser.add_argument('--espspi', dest='espspi', action='append')
## hashlimit
parser.add_argument('--hashlimit-upto', dest='hashlimit-upto', action='append')
parser.add_argument('--hashlimit-above', dest='hashlimit-above', action='append')
parser.add_argument('--hashlimit-burst', dest='hashlimit-burst', action='append')
parser.add_argument('--hashlimit-mode', dest='hashlimit-mode', action='append')
parser.add_argument('--hashlimit-srcmask', dest='hashlimit-srcmask', action='append')
parser.add_argument('--hashlimit-dstmask', dest='hashlimit-dstmask', action='append')
parser.add_argument('--hashlimit-name', dest='hashlimit-name', action='append')
parser.add_argument('--hashlimit-htable-size', dest='hashlimit-htable-size', action='append')
parser.add_argument('--hashlimit-htable-max', dest='hashlimit-htable-max', action='append')
parser.add_argument('--hashlimit-htable-expire', dest='hashlimit-htable-expire', action='append')
parser.add_argument('--hashlimit-htable-gcinterval', dest='hashlimit-htable-gcinterval', action='append')
## helper
parser.add_argument('--helper', dest='helper', action='append')
## icmp
parser.add_argument('--icmp-type', dest='icmp-type', action='append')
## iprange
parser.add_argument('--src-range', dest='src-range', action='append')
parser.add_argument('--dst-range', dest='dst-range', action='append')
## length
parser.add_argument('--length', dest='length', action='append')
## limit
parser.add_argument('--limit', dest='limit', action='append')
parser.add_argument('--limit-burst', dest='limit-burst', action='append')
## mac
parser.add_argument('--mac-source', dest='mac-source', action='append')
## multiport
parser.add_argument('--source-ports', dest='source-ports', action='append')
parser.add_argument('--destination-ports', dest='destination-ports', action='append')
parser.add_argument('--ports', dest='ports', action='append')
## owner
parser.add_argument('--uid-owner', dest='uid-owner', action='append')
parser.add_argument('--gid-owner', dest='gid-owner', action='append')
parser.add_argument('--socket-exists', dest='socket-exists', action='append')
## physdev
parser.add_argument('--physdev-in', dest='physdev-in', action='append')
parser.add_argument('--physdev-out', dest='physdev-out', action='append')
parser.add_argument('--physdev-is-in', dest='physdev-is-in', action='append')
parser.add_argument('--physdev-is-out', dest='physdev-is-out', action='append')
parser.add_argument('--physdev-is-bridged', dest='physdev-is-bridged', action='append')
## pkttype
parser.add_argument('--pkt-type', dest='pkt-type', action='append')
## policy
parser.add_argument('--dir', dest='dir', action='append')
parser.add_argument('--pol', dest='pol', action='append')
parser.add_argument('--strict', dest='strict', action='append')
parser.add_argument('--reqid', dest='reqid', action='append')
parser.add_argument('--spi', dest='spi', action='append')
parser.add_argument('--proto', dest='proto', action='append')
parser.add_argument('--mode', dest='mode', action='append')
parser.add_argument('--tunnel-src', dest='tunnel-src', action='append')
parser.add_argument('--tunnel-dst', dest='tunnel-dst', action='append')
parser.add_argument('--next', dest='next', action='append')
## quota
parser.add_argument('--quota', dest='quota', action='append')
## rateest
parser.add_argument('--rateest1', dest='rateest1', action='append')
parser.add_argument('--rateest2', dest='rateest2', action='append')
parser.add_argument('--rateest-delta', dest='rateest-delta', action='append')
parser.add_argument('--rateest1-bps', dest='rateest1-bps', action='append')
parser.add_argument('--rateest2-bps', dest='rateest2-bps', action='append')
parser.add_argument('--rateest1-pps', dest='rateest1-pps', action='append')
parser.add_argument('--rateest2-pps', dest='rateest2-pps', action='append')
parser.add_argument('--rateest1-lt', dest='rateest1-lt', action='append')
parser.add_argument('--rateest1-gt', dest='rateest1-gt', action='append')
parser.add_argument('--rateest1-eq', dest='rateest1-eq', action='append')
parser.add_argument('--rateest-name', dest='rateest-name', action='append')
parser.add_argument('--rateest-interval', dest='rateest-interval', action='append')
parser.add_argument('--rateest-ewma', dest='rateest-ewma', action='append')
## realm
parser.add_argument('--realm', dest='realm', action='append')
## recent
parser.add_argument('--set', dest='set', action='append')
parser.add_argument('--name', dest='name', action='append')
parser.add_argument('--rsource', dest='rsource', action='append')
parser.add_argument('--rdest', dest='rdest', action='append')
parser.add_argument('--rcheck', dest='rcheck', action='append')
parser.add_argument('--update', dest='update', action='append')
parser.add_argument('--remove', dest='remove', action='append')
parser.add_argument('--seconds', dest='seconds', action='append')
parser.add_argument('--hitcount', dest='hitcount', action='append')
parser.add_argument('--rttl', dest='rttl', action='append')
## sctp
parser.add_argument('--chunk-types', dest='chunk-types', action='append')
## set
parser.add_argument('--match-set', dest='match-set', action='append')
## socket
parser.add_argument('--transparent', dest='transparent', action='append')
## state
parser.add_argument('--state', dest='state', action='append')
## statistic
parser.add_argument('--probability', dest='probability', action='append')
parser.add_argument('--every', dest='every', action='append')
parser.add_argument('--packet', dest='packet', action='append')
## string
parser.add_argument('--algo', dest='algo', action='append')
parser.add_argument('--from', dest='from', action='append')
parser.add_argument('--to', dest='to', action='append')
parser.add_argument('--string', dest='string', action='append')
parser.add_argument('--hex-string', dest='hex-string', action='append')
## tcp
parser.add_argument('--tcp-flags', dest='tcp-flags', action='append')
parser.add_argument('--syn', dest='syn', action='append')
parser.add_argument('--tcp-option', dest='tcp-option', action='append')
## tcpmss
parser.add_argument('--mss', dest='mss', action='append')
## time
parser.add_argument('--datestart', dest='datestart', action='append')
parser.add_argument('--datestop', dest='datestop', action='append')
parser.add_argument('--monthdays', dest='monthdays', action='append')
parser.add_argument('--weekdays', dest='weekdays', action='append')
parser.add_argument('--utc', dest='utc', action='append')
parser.add_argument('--localtz', dest='localtz', action='append')
## tos
parser.add_argument('--tos', dest='tos', action='append')
## ttl
parser.add_argument('--ttl-eq', dest='ttl-eq', action='append')
parser.add_argument('--ttl-gt', dest='ttl-gt', action='append')
parser.add_argument('--ttl-lt', dest='ttl-lt', action='append')
## u32
parser.add_argument('--u32', dest='u32', action='append')
# CHECKSUM
parser.add_argument('--checksum-fill', dest='checksum-fill', action='append')
# CLASSIFY
parser.add_argument('--set-class', dest='set-class', action='append')
# CLUSTERIP
parser.add_argument('--new', dest='new', action='append')
parser.add_argument('--hashmode', dest='hashmode', action='append')
parser.add_argument('--clustermac', dest='clustermac', action='append')
parser.add_argument('--total-nodes', dest='total-nodes', action='append')
parser.add_argument('--local-node', dest='local-node', action='append')
parser.add_argument('--hash-init', dest='hash-init', action='append')
# CONNMARK
parser.add_argument('--set-xmark', dest='set-xmark', action='append')
parser.add_argument('--save-mark', dest='save-mark', action='append')
parser.add_argument('--restore-mark', dest='restore-mark', action='append')
parser.add_argument('--and-mark', dest='and-mark', action='append')
parser.add_argument('--or-mark', dest='or-mark', action='append')
parser.add_argument('--xor-mark', dest='xor-mark', action='append')
parser.add_argument('--set-mark', dest='set-mark', action='append')
# DNAT
parser.add_argument('--to-destination', dest='to-destination', action='append')
parser.add_argument('--random', dest='random', action='append')
parser.add_argument('--persistent', dest='persistent', action='append')
# DSCP
parser.add_argument('--set-dscp', dest='set-dscp', action='append')
parser.add_argument('--set-dscp-class', dest='set-dscp-class', action='append')
# ECN
parser.add_argument('--ecn-tcp-remove', dest='ecn-tcp-remove', action='append')
# LOG
parser.add_argument('--log-level', dest='log-level', action='append')
parser.add_argument('--log-prefix', dest='log-prefix', action='append')
parser.add_argument('--log-tcp-sequence', dest='log-tcp-sequence', action='append')
parser.add_argument('--log-tcp-options', dest='log-tcp-options', action='append')
parser.add_argument('--log-ip-options', dest='log-ip-options', action='append')
parser.add_argument('--log-uid', dest='log-uid', action='append')
# NFLOG
parser.add_argument('--nflog-group', dest='nflog-group', action='append')
parser.add_argument('--nflog-prefix', dest='nflog-prefix', action='append')
parser.add_argument('--nflog-range', dest='nflog-range', action='append')
parser.add_argument('--nflog-threshold', dest='nflog-threshold', action='append')
# NFQUEUE
parser.add_argument('--queue-num', dest='queue-num', action='append')
parser.add_argument('--queue-balance', dest='queue-balance', action='append')
# RATEEST
parser.add_argument('--rateest-ewmalog', dest='rateest-ewmalog', action='append')
# REDIRECT
parser.add_argument('--to-ports', dest='to-ports', action='append')
# REJECT
parser.add_argument('--reject-with', dest='reject-with', action='append')
# SAME
parser.add_argument('--nodst', dest='nodst', action='append')
# SECMARK
parser.add_argument('--selctx', dest='selctx', action='append')
# SET
parser.add_argument('--add-set', dest='add-set', action='append')
parser.add_argument('--del-set', dest='del-set', action='append')
# SNAT
parser.add_argument('--to-source', dest='to-source', action='append')
# TCPMSS
parser.add_argument('--set-mss', dest='set-mss', action='append')
parser.add_argument('--clamp-mss-to-pmtu', dest='clamp-mss-to-pmtu', action='append')
# TCPOPTSTRIP
parser.add_argument('--strip-options', dest='strip-options', action='append')
# TOS
parser.add_argument('--set-tos', dest='set-tos', action='append')
parser.add_argument('--and-tos', dest='and-tos', action='append')
parser.add_argument('--or-tos', dest='or-tos', action='append')
parser.add_argument('--xor-tos', dest='xor-tos', action='append')
# TPROXY
parser.add_argument('--on-port', dest='on-port', action='append')
parser.add_argument('--on-ip', dest='on-ip', action='append')
parser.add_argument('--tproxy-mark', dest='tproxy-mark', action='append')
# TTL
parser.add_argument('--ttl-set', dest='ttl-set', action='append')
parser.add_argument('--ttl-dec', dest='ttl-dec', action='append')
parser.add_argument('--ttl-inc', dest='ttl-inc', action='append')
# ULOG
parser.add_argument('--ulog-nlgroup', dest='ulog-nlgroup', action='append')
parser.add_argument('--ulog-prefix', dest='ulog-prefix', action='append')
parser.add_argument('--ulog-cprange', dest='ulog-cprange', action='append')
parser.add_argument('--ulog-qthreshold', dest='ulog-qthreshold', action='append')
return parser

84
salt/modules/keyboard.py Normal file
View File

@ -0,0 +1,84 @@
'''
Module for managing keyboards on posix-like systems.
'''
import logging
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on posix-like systems
'''
# Disable on these platorms, specific service modules exist:
disable = [
'Windows',
]
if __grains__['os'] in disable:
return False
return 'keyboard'
def get_sys():
'''
Get current system keyboard setting
CLI Example::
salt '*' keyboard.get_sys
'''
cmd = ''
if 'Arch' in __grains__['os_family']:
cmd = 'grep KEYMAP /etc/rc.conf | grep -vE "^#"'
elif 'RedHat' in __grains__['os_family']:
cmd = 'grep LAYOUT /etc/sysconfig/keyboard | grep -vE "^#"'
elif 'Debian' in __grains__['os_family']:
cmd = 'grep XKBLAYOUT /etc/default/keyboard | grep -vE "^#"'
out = __salt__['cmd.run'](cmd).split('=')
ret = out[1].replace('"', '')
return ret
def set_sys(layout):
'''
Set current system keyboard setting
CLI Example::
salt '*' keyboard.set_sys dvorak
'''
if 'Arch' in __grains__['os_family']:
__salt__['file.sed']('/etc/rc.conf', '^KEYMAP=.*', 'KEYMAP={0}'.format(layout))
elif 'RedHat' in __grains__['os_family']:
__salt__['file.sed']('/etc/sysconfig/keyboard', '^LAYOUT=.*', 'LAYOUT={0}'.format(layout))
elif 'Debian' in __grains__['os_family']:
__salt__['file.sed']('/etc/default/keyboard', '^XKBLAYOUT=.*', 'XKBLAYOUT={0}'.format(layout))
return layout
def get_x():
'''
Get current X keyboard setting
CLI Example::
salt '*' keyboard.get_x
'''
cmd = 'setxkbmap -query | grep layout'
out = __salt__['cmd.run'](cmd).split(':')
return out[1].strip()
def set_x(layout):
'''
Set current X keyboard setting
CLI Example::
salt '*' keyboard.set_x dvorak
'''
cmd = 'setxkbmap {0}'.format(layout)
__salt__['cmd.run'](cmd)
return layout

View File

@ -79,7 +79,7 @@ def start(job_label, runas=None):
'''
cmd = 'launchctl start {0}'.format(job_label, runas=runas)
return __salt__['cmd.run'](cmd, runas='marca')
return __salt__['cmd.run'](cmd, runas=runas)
def restart(job_label, runas=None):

72
salt/modules/locale.py Normal file
View File

@ -0,0 +1,72 @@
'''
Module for managing locales on posix-like systems.
'''
import os
import logging
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on posix-like systems
'''
# Disable on these platorms, specific service modules exist:
disable = [
'Windows',
]
if __grains__['os'] in disable:
return False
return 'locale'
def list_avail():
'''
Lists available (compiled) locales
CLI Example::
salt '*' timezone.list_avail
'''
cmd = 'locale -a'
out = __salt__['cmd.run'](cmd).split('\n')
return out
def get_locale():
'''
Get the current system locale
CLI Example::
salt '*' timezone.get_locale
'''
cmd = ''
if 'Arch' in __grains__['os_family']:
cmd = 'grep "^LOCALE" /etc/rc.conf | grep -vE "^#"'
elif 'RedHat' in __grains__['os_family']:
cmd = 'grep LANG /etc/sysconfig/i18n | grep -vE "^#"'
elif 'Debian' in __grains__['os_family']:
cmd = 'grep LANG /etc/default/locale | grep -vE "^#"'
out = __salt__['cmd.run'](cmd).split('=')
ret = out[1].replace('"', '')
return ret
def set_locale(locale):
'''
Sets the current system locale
CLI Example::
salt '*' timezone.set_locale 'en_US.UTF-8'
'''
if 'Arch' in __grains__['os_family']:
__salt__['file.sed']('/etc/rc.conf', '^LOCALE=.*', 'LOCALE="{0}"'.format(locale))
elif 'RedHat' in __grains__['os_family']:
__salt__['file.sed']('/etc/sysconfig/i18n', '^LANG=.*', 'LANG="{0}"'.format(locale))
elif 'Debian' in __grains__['os_family']:
__salt__['file.sed']('/etc/default/locale', '^LANG=.*', 'LANG="{0}"'.format(locale))
return True

View File

@ -21,6 +21,7 @@ Module to provide MySQL compatibility to salt.
import time
import logging
import re
import sys
# Import third party libs
try:
@ -143,8 +144,9 @@ def query(database, query):
{{ salt['mysql.query']("mydb","SELECT info from mytable limit 1")['results'][0][0] }}
'''
#Doesn't do anything about sql warnings, e.g. empty values on an insert.
#I don't think it handles multiple queries at once, so adding "commit" might not work.
# Doesn't do anything about sql warnings, e.g. empty values on an insert.
# I don't think it handles multiple queries at once, so adding "commit"
# might not work.
ret = {}
db = connect(**{'db': database})
cur = db.cursor()
@ -801,6 +803,8 @@ def processlist():
ret = []
hdr=("Id", "User", "Host", "db", "Command","Time", "State",
"Info", "Rows_sent", "Rows_examined", "Rows_read")
log.debug('processlist')
db = connect()
cur = db.cursor()
cur.execute("SHOW FULL PROCESSLIST")
@ -808,9 +812,148 @@ def processlist():
row = cur.fetchone()
r = {}
for j in range(len(hdr)):
r[hdr[j]] = row[j]
try:
r[hdr[j]] = row[j]
except KeyError:
pass
ret.append(r)
cur.close()
return ret
def __do_query_into_hash(conn, sqlStr):
'''
Perform the query that is passed to it (sqlStr).
Returns:
results in a dict.
'''
mod = sys._getframe().f_code.co_name
log.debug("%s<--(%s)" % (mod, sqlStr))
rtnResults = []
try:
cursor = conn.cursor()
except Exception:
self.__log.error("%s: Can't get cursor for SQL->%s" % (mod, sqlStr))
cursor.close()
log.debug(('%s-->' % mod))
return rtnResults
try:
rs = cursor.execute(sqlStr)
except Exception:
log.error("%s: try to execute : SQL->%s" % (mod, sqlStr))
cursor.close()
log.debug(('%s-->' % mod))
return rtnResults
rs = cursor.fetchall()
for rowData in rs:
colCnt = 0
row = {}
for colData in cursor.description:
colName = colData[0]
row[colName] = rowData[colCnt]
colCnt += 1
rtnResults.append(row)
cursor.close()
log.debug(('%s-->' % mod))
return rtnResults
def get_master_status():
'''
Retrieves the master status from the mimion.
Returns:
{'host.domain.com': {'Binlog_Do_DB': '',
'Binlog_Ignore_DB': '',
'File': 'mysql-bin.000021',
'Position': 107}}
CLI Example:
salt '*' mysql.get_master_status
'''
mod = sys._getframe().f_code.co_name
log.debug("%s<--" % (mod))
conn = connect()
rtnv = __do_query_into_hash(conn, "SHOW MASTER STATUS")
conn.close()
# check for if this minion is not a master
if (len(rtnv) == 0):
rtnv.append([])
log.debug("%s-->%d" % (mod, len(rtnv[0])))
return rtnv[0]
def get_slave_status():
'''
Retrieves the slave status from the minion.
Returns:
{'host.domain.com': {'Connect_Retry': 60,
'Exec_Master_Log_Pos': 107,
'Last_Errno': 0,
'Last_Error': '',
'Last_IO_Errno': 0,
'Last_IO_Error': '',
'Last_SQL_Errno': 0,
'Last_SQL_Error': '',
'Master_Host': 'comet.scion-eng.com',
'Master_Log_File': 'mysql-bin.000021',
'Master_Port': 3306,
'Master_SSL_Allowed': 'No',
'Master_SSL_CA_File': '',
'Master_SSL_CA_Path': '',
'Master_SSL_Cert': '',
'Master_SSL_Cipher': '',
'Master_SSL_Key': '',
'Master_SSL_Verify_Server_Cert': 'No',
'Master_Server_Id': 1,
'Master_User': 'replu',
'Read_Master_Log_Pos': 107,
'Relay_Log_File': 'klo-relay-bin.000071',
'Relay_Log_Pos': 253,
'Relay_Log_Space': 553,
'Relay_Master_Log_File': 'mysql-bin.000021',
'Replicate_Do_DB': '',
'Replicate_Do_Table': '',
'Replicate_Ignore_DB': '',
'Replicate_Ignore_Server_Ids': '',
'Replicate_Ignore_Table': '',
'Replicate_Wild_Do_Table': '',
'Replicate_Wild_Ignore_Table': '',
'Seconds_Behind_Master': 0,
'Skip_Counter': 0,
'Slave_IO_Running': 'Yes',
'Slave_IO_State': 'Waiting for master to send event',
'Slave_SQL_Running': 'Yes',
'Until_Condition': 'None',
'Until_Log_File': '',
'Until_Log_Pos': 0}}
CLI Example:
salt '*' mysql.get_slave_status
'''
mod = sys._getframe().f_code.co_name
log.debug("%s<--" % (mod))
conn = connect()
rtnv = __do_query_into_hash(conn, "SHOW SLAVE STATUS")
conn.close()
# check for if this minion is not a slave
if (len(rtnv) == 0):
rtnv.append([])
log.debug("%s-->%d" % (mod, len(rtnv[0])))
return rtnv[0]

View File

@ -1,13 +1,17 @@
'''
Package support for OpenBSD
'''
import os
import re
import logging
# Import Salt libs
import salt.utils
log = logging.getLogger(__name__)
# XXX need a way of setting PKG_PATH instead of inheriting from the environment
def __virtual__():
'''
Set the virtual pkg module if the os is OpenBSD
@ -32,29 +36,6 @@ def _list_removed(old, new):
for pkg in old:
if pkg not in new:
pkgs.append(pkg)
return pkgs
def _compare_versions(old, new):
'''
Returns a dict that that displays old and new versions for a package after
install/upgrade of package.
'''
pkgs = {}
for npkg in new:
if npkg in old:
if old[npkg] == new[npkg]:
# no change in the package
continue
else:
# the package was here before and the version has changed
pkgs[npkg] = {'old': old[npkg],
'new': new[npkg]}
else:
# the package is freshly installed
pkgs[npkg] = {'old': '',
'new': new[npkg]}
return pkgs
@ -122,7 +103,7 @@ def version(name):
return ''
def install(name, *args, **kwargs):
def install(name=None, pkgs=None, sources=None, **kwargs):
'''
Install the passed package
@ -131,21 +112,49 @@ def install(name, *args, **kwargs):
{'<package>': {'old': '<old-version>',
'new': '<new-version>']}
CLI Example::
CLI Example, Install one package::
salt '*' pkg.install <package name>
CLI Example, Install more than one package::
salt '*' pkg.install pkgs='["<package name>", "<package name>"]'
CLI Example, Install more than one package from a alternate source (e.g. salt file-server, http, ftp, local filesystem)::
salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]'
'''
pkg_params, pkg_type = __salt__['pkg_resource.parse_targets'](name,
pkgs,
sources)
if pkg_params is None or len(pkg_params) == 0:
return {}
# Get a list of the currently installed packages
old = _get_pkgs()
stem, flavor = (name.split('--') + [''])[:2]
name = '--'.join((stem, flavor))
# XXX it would be nice to be able to replace one flavor with another here
if stem in old:
cmd = 'pkg_add -u {0}'.format(name)
else:
cmd = 'pkg_add {0}'.format(name)
__salt__['cmd.retcode'](cmd)
for pkg in pkg_params:
if pkg_type == 'repository':
stem, flavor = (pkg.split('--') + [''])[:2]
pkg = '--'.join((stem, flavor))
if stem in old:
cmd = 'pkg_add -xu {0}'.format(pkg)
else:
cmd = 'pkg_add -x {0}'.format(pkg)
else:
cmd = 'pkg_add -x {0}'.format(pkg)
stderr = __salt__['cmd.run_all'](cmd).get('stderr', '')
if stderr:
log.error(stderr)
# Get a list of all the packages that are now installed.
new = _format_pkgs(_get_pkgs())
return _compare_versions(_format_pkgs(old), new)
# New way
return __salt__['pkg_resource.find_changes'](_format_pkgs(old), new)
def remove(name):
@ -161,7 +170,7 @@ def remove(name):
old = _get_pkgs()
stem, flavor = (name.split('--') + [''])[:2]
if stem in old:
cmd = 'pkg_delete -D dependencies {0}'.format(stem)
cmd = 'pkg_delete -xD dependencies {0}'.format(stem)
__salt__['cmd.retcode'](cmd)
new = _format_pkgs(_get_pkgs())
return _list_removed(_format_pkgs(old), new)

76
salt/modules/pam.py Normal file
View File

@ -0,0 +1,76 @@
'''
Support for pam
'''
# Import Python libs
import os
import argparse
# Import Salt libs
import salt.utils
from salt.exceptions import SaltException
def __virtual__():
'''
Only load the module if iptables is installed
'''
if os.path.exists('/usr/lib/libpam.so'):
return 'pam'
return False
def _parse(contents=None, file_name=None):
'''
Parse a standard pam config file
'''
if contents:
pass
elif file_name and os.path.exists(file_name):
f = open(file_name, 'r')
contents = f.read()
f.close()
else:
return False
rules = []
for line in contents.splitlines():
if not line:
continue
if line.startswith('#'):
continue
control_flag = ''
module = ''
arguments = []
comps = line.split()
interface = comps[0]
position = 1
if comps[1].startswith('['):
control_flag = comps[1].replace('[', '')
for part in comps[2:]:
position += 1
if part.endswith(']'):
control_flag += ' {0}'.format(part.replace(']', ''))
position += 1
break
else:
control_flag += ' {0}'.format(part)
else:
control_flag = comps[1]
position += 1
module = comps[position]
if len(comps) > position:
position += 1
arguments = comps[position:]
rules.append({'interface': interface,
'control_flag': control_flag,
'module': module,
'arguments': arguments})
return rules
def read_file(file_name):
'''
This is just a test function, to make sure parsing works
'''
return _parse(file_name=file_name)

304
salt/modules/parted.py Normal file
View File

@ -0,0 +1,304 @@
'''
Module for managing partitions on posix-like systems.
Some functions may not be available, depending on your version of parted.
Check man 8 parted for more information, or the online docs at:
http://www.gnu.org/software/parted/manual/html_chapter/parted_2.html
'''
import logging
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on posix-like systems
'''
# Disable on these platorms, specific service modules exist:
disable = [
'Windows',
]
if __grains__['os'] in disable:
return False
return 'partition'
def probe(device=''):
'''
Ask the kernel to update its local partition data
CLI Examples::
salt '*' partition.probe
salt '*' partition.probe /dev/sda
'''
cmd = 'partprobe {0}'.format(device)
out = __salt__['cmd.run'](cmd).splitlines()
return out
def part_list(device, unit=None):
'''
Ask the kernel to update its local partition data
CLI Examples::
salt '*' partition.partlist /dev/sda
salt '*' partition.partlist /dev/sda unit=s
salt '*' partition.partlist /dev/sda unit=kB
'''
if unit:
cmd = 'parted -m -s {0} unit {1} print'.format(device, unit)
else:
cmd = 'parted -m -s {0} print'.format(device)
out = __salt__['cmd.run'](cmd).splitlines()
ret = {'info': {}, 'partitions': {}}
mode = 'info'
for line in out:
if line.startswith('BYT'):
continue
comps = line.replace(';', '').split(':')
if mode == 'info':
if len(comps) == 8:
ret['info'] = {
'disk': comps[0],
'size': comps[1],
'interface': comps[2],
'logical sector': comps[3],
'physical sector': comps[4],
'partition table': comps[5],
'model': comps[6],
'disk flags': comps[7]}
mode = 'partitions'
else:
ret['partitions'][comps[0]] = {
'number': comps[0],
'start': comps[1],
'end': comps[2],
'size': comps[3],
'type': comps[4],
'file system': comps[5],
'flags': comps[6]}
return ret
def align_check(device, part_type, partition):
'''
partition.align_check device part_type partition
Check if partition satisfies the alignment constraint of part_type.
Type must be "minimal" or "optimal".
CLI Example::
salt '*' partition.align_check /dev/sda minimal 1
'''
cmd = 'parted -m -s {0} align-check {1} {2}'.format(device, part_type, partition)
out = __salt__['cmd.run'](cmd).splitlines()
return out
def check(device, minor):
'''
partition.check device minor
Checks if the file system on partition <minor> has any errors.
CLI Example::
salt '*' partition.check 1
'''
cmd = 'parted -m -s {0} check {1}'.format(device, minor)
out = __salt__['cmd.run'](cmd).splitlines()
return out
def cp(device, from_minor, to_minor):
'''
partition.check device from_minor to_minor
Copies the file system on the partition <from-minor> to partition
<to-minor>, deleting the original contents of the destination
partition.
CLI Example::
salt '*' partition.cp /dev/sda 2 3
'''
cmd = 'parted -m -s {0} cp {1} {2}'.format(device, from_minor, to_minor)
out = __salt__['cmd.run'](cmd).splitlines()
return out
def mkfs(device, minor, fs_type):
'''
partition.mkfs device minor fs_type
Makes a file system <fs_type> on partition <minor>, destroying all data
that resides on that partition. <fs_type> must be one of "ext2",
"fat32", "fat16", "linux-swap" or "reiserfs" (if libreiserfs is
installed)
CLI Example::
salt '*' partition.mkfs 2 fat32
'''
cmd = 'parted -m -s {0} mklabel {1}'.format(device, label_type)
out = __salt__['cmd.run'](cmd).splitlines()
return out
def mklabel(device, label_type):
'''
partition.mklabel device label_type
Create a new disklabel (partition table) of label_type.
Type should be one of "aix", "amiga", "bsd", "dvh", "gpt", "loop", "mac",
"msdos", "pc98", or "sun".
CLI Example::
salt '*' partition.mklabel /dev/sda msdos
'''
cmd = 'parted -m -s {0} mklabel {1}'.format(device, label_type)
out = __salt__['cmd.run'](cmd).splitlines()
return out
def mkpart(device, part_type, fs_type, start, end):
'''
partition.mkpart device part_type fs_type start end
Make a part_type partition for filesystem fs_type, beginning at start and
ending at end (by default in megabytes). part_type should be one of
"primary", "logical", or "extended".
CLI Example::
salt '*' partition.mkpart /dev/sda primary fat32 0 639
'''
cmd = 'parted -m -s {0} mkpart {1} {2} {3} {4}'.format(device, part_type, fs_type, start, end)
out = __salt__['cmd.run'](cmd).splitlines()
return out
def mkpartfs(device, part_type, fs_type, start, end):
'''
partition.mkpartfs device part_type fs_type start end
Make a <part_type> partition with a new filesystem of <fs_type>, beginning
at <start> and ending at <end> (by default in megabytes). <part_type>
should be one of "primary", "logical", or "extended". <fs_type> must be
one of "ext2", "fat32", "fat16", "linux-swap" or "reiserfs" (if
libreiserfs is installed)
CLI Example::
salt '*' partition.mkpartfs /dev/sda logical ext2 440 670
'''
cmd = 'parted -m -s {0} mkpart {1} {2} {3} {4}'.format(device, part_type, fs_type, start, end)
out = __salt__['cmd.run'](cmd).splitlines()
return out
def name(device, partition, name):
'''
partition.name device partition name
Set the name of partition to name. This option works only on Mac, PC98,
and GPT disklabels. The name can be placed in quotes, if necessary.
CLI Example::
salt '*' partition.name /dev/sda 1 'My Documents'
'''
cmd = 'parted -m -s {0} name {1} {2}'.format(device, partition, name)
out = __salt__['cmd.run'](cmd).splitlines()
return out
def rescue(device, start, end):
'''
partition.rescue device start end
Rescue a lost partition that was located somewhere between start and end.
If a partition is found, parted will ask if you want to create an
entry for it in the partition table.
CLI Example::
salt '*' partition.rescue /dev/sda 0 8056
'''
cmd = 'parted -m -s {0} rescue {1} {2}'.format(device, start, end)
out = __salt__['cmd.run'](cmd).splitlines()
return out
def resize(device, minor, start, end):
'''
partition.resize device minor, start, end
Resizes the partition with number <minor>. The partition will start <start>
from the beginning of the disk, and end <end> from the beginning of the
disk. resize never changes the minor number. Extended partitions can be
resized, so long as the new extended partition completely contains all
logical partitions.
CLI Example::
salt '*' partition.resize /dev/sda 3 200 850
'''
cmd = 'parted -m -s {0} resize {1} {2} {3}'.format(device, minor, start, end)
out = __salt__['cmd.run'](cmd).splitlines()
return out
def rm(device, minor):
'''
partition.rm device minor
Removes the partition with number <minor>.
CLI Example::
salt '*' partition.rm /dev/sda 5
'''
cmd = 'parted -m -s {0} rm {1}'.format(device, minor)
out = __salt__['cmd.run'](cmd).splitlines()
return out
def set(device, minor, flag, state):
'''
partition.set device minor flag state
Changes a flag on the partition with number <minor>. A flag can be either
"on" or "off". Some or all of these flags will be available, depending
on what disk label you are using.
CLI Example::
salt '*' partition.set /dev/sda 1 boot on
'''
cmd = 'parted -m -s {0} set {1} {2} {3}'.format(device, minor, flag, state)
out = __salt__['cmd.run'](cmd).splitlines()
return out
def toggle(device, partition, flag):
'''
partition.toggle device partition flag
Toggle the state of <flag> on <partition>
CLI Example::
salt '*' partition.name /dev/sda 1 boot
'''
cmd = 'parted -m -s {0} toggle {1} {2} {3}'.format(device, partition, flag)
out = __salt__['cmd.run'](cmd).splitlines()
return out

View File

@ -1,75 +0,0 @@
'''
Module for managing partitions on posix-like systems
'''
import logging
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on posix-like systems
'''
# Disable on these platorms, specific service modules exist:
disable = [
'Windows',
]
if __grains__['os'] in disable:
return False
return 'partition'
def probe(device=''):
'''
Ask the kernel to update its local partition data
CLI Examples::
salt '*' partition.probe
salt '*' partition.probe /dev/sda
'''
cmd = 'partprobe {0}'.format(device)
out = __salt__['cmd.run'](cmd).splitlines()
return out
def partlist(device, unit=None):
'''
Ask the kernel to update its local partition data
CLI Examples::
salt '*' partition.partlist /dev/sda
salt '*' partition.partlist /dev/sda unit=s
salt '*' partition.partlist /dev/sda unit=kB
'''
if unit:
cmd = 'parted -s {0} unit {1} print'.format(device, unit)
else:
cmd = 'parted -s {0} print'.format(device)
out = __salt__['cmd.run'](cmd).splitlines()
ret = {'info': [], 'partitions': {}}
mode = 'info'
for line in out:
if not line:
continue
if mode == 'info':
if line.startswith('Number'):
mode = 'partitions'
else:
ret['info'].append(line)
else:
comps = line.strip().split()
ret['partitions'][comps[0]] = {
'number': comps[0],
'start': comps[1],
'end': comps[2],
'size': comps[3],
'type': comps[4]}
if len(comps) > 5:
ret['partitions'][comps[0]]['file system'] = comps[5]
if len(comps) > 6:
ret['partitions'][comps[0]]['flags'] = comps[6:]
return ret

View File

@ -16,6 +16,7 @@ from salt.exceptions import CommandExecutionError, CommandNotFoundError
logger = logging.getLogger(__name__)
def _get_pip_bin(bin_env):
'''
Return the pip command to call, either from a virtualenv, an argument
@ -458,7 +459,9 @@ def freeze(bin_env=None,
"Could not find the path to the virtualenv's 'activate' binary"
)
cmd = 'source {0}; {1} freeze'.format(activate, pip_bin)
# We use dot(.) instead of source because it's apparently the better and/or
# more supported way to source files on the various "major" linux shells.
cmd = '. {0}; {1} freeze'.format(activate, pip_bin)
result = __salt__['cmd.run_all'](cmd, runas=runas, cwd=cwd)

View File

@ -5,6 +5,7 @@ Resources needed by pkg providers
import logging
import os
import re
import yaml
from pprint import pformat
from types import StringTypes
@ -100,14 +101,14 @@ def _pack_pkgs(sources):
'''
if type(sources) in StringTypes:
try:
# Safely eval the string data into a list
sources = eval(sources,{'__builtins__': None},{})
except Exception as e:
sources = yaml.load(sources)
except yaml.parser.ParserError as e:
log.error(e)
return []
if not isinstance(sources,list) \
or [x for x in sources if type(x) not in StringTypes]:
log.error('Invalid input: {0}'.format(pformat(source)))
log.error('Input must be a list of strings')
return []
return sources
@ -122,15 +123,15 @@ def _pack_sources(sources):
'''
if type(sources) in StringTypes:
try:
# Safely eval the string data into a list of dicts
sources = eval(sources,{'__builtins__': None},{})
except Exception as e:
sources = yaml.load(sources)
except yaml.parser.ParserError as e:
log.error(e)
return {}
ret = {}
for source in sources:
if (not isinstance(source,dict)) or len(source) != 1:
log.error('Invalid input: {0}'.format(pformat(sources)))
log.error('Input must be a list of 1-element dicts')
return {}
else:
ret.update(source)
@ -220,7 +221,7 @@ def parse_targets(name=None, pkgs=None, sources=None):
# Check metadata to make sure the name passed matches the source
if __grains__['os_family'] not in ('Solaris',) \
or __grains__['os'] not in ('Gentoo',):
and __grains__['os'] not in ('Gentoo', 'OpenBSD',):
problems = _verify_binary_pkg(srcinfo)
# If any problems are found in the caching or metadata parsing done
# in the above for loop, log each problem and return None,None,

View File

@ -2,13 +2,15 @@
Execute puppet routines
'''
import re
from salt import utils
__outputter__ = {
'run': 'txt',
'noop': 'txt',
'fact': 'txt',
'facts':None,
'facts': None,
}
def _check_puppet():
@ -35,49 +37,115 @@ def _format_fact(output):
value = None
return (fact, value)
class _Puppet(object):
'''
Puppet helper class. Used to format command for execution.
'''
def __init__(self):
'''
Setup a puppet instance, based on the premis that default usage is to
run 'puppet agent --test'. Configuration and run states are stored in
the default locations.
'''
self.subcmd = 'agent'
self.subcmd_args = [] # eg. /a/b/manifest.pp
def run(tags=None):
self.kwargs = {'color': 'false'} # eg. --tags=apache::server
self.args = [] # eg. --noop
self.vardir = '/var/lib/puppet'
self.confdir = '/etc/puppet'
def __repr__(self):
'''
Format the command string to executed using cmd.run_all.
'''
cmd = 'puppet {subcmd} --vardir {vardir} --confdir {confdir}'.format(**self.__dict__)
args = ' '.join(self.subcmd_args)
args += ''.join([' --{0}'.format(k) for k in self.args]) # single spaces
args += ''.join([' --{0} {1}'.format(k, v) for k, v in self.kwargs.items()])
return '{0} {1}'.format(cmd, args)
def arguments(self, args=[]):
'''
Read in arguments for the current subcommand. These are added to the cmd
line without '--' appended. Any others are redirected as standard options
with the double hyphen prefixed.
'''
# permits deleting elements rather than using slices
args = list(args)
# match against all known/supported subcmds
if self.subcmd == 'apply':
# apply subcommand requires a manifest file to execute
self.subcmd_args = [args[0]]
del args[0]
if self.subcmd == 'agent':
# no arguments are required
args.extend(['onetime', 'verbose', 'ignorecache', 'no-daemonize', 'no-usecacheonfailure', 'no-splay', 'show_diff'])
# finally do this after subcmd has been matched for all remaining args
self.args = args
def run(*args, **kwargs):
'''
Execute a puppet run and return a dict with the stderr, stdout,
return code, etc. If an argument is specified, it is treated as
a comma separated list of tags passed to puppet --test --tags:
return code, etc. The first positional argument given is checked as a
subcommand. Following positional arguments should be ordered with arguments
required by the subcommand first, followed by non-keyvalue pair options.
Tags are specified by a tag keyword and comma separated list of values. --
http://projects.puppetlabs.com/projects/1/wiki/Using_Tags
CLI Examples::
salt '*' puppet.run
salt '*' puppet.run basefiles::edit,apache::server
salt '*' puppet.run tags=basefiles::edit,apache::server
salt '*' puppet.run debug
salt '*' puppet.run apply /a/b/manifest.pp modulepath=/a/b/modules tags=basefiles::edit,apache::server
'''
_check_puppet()
if not tags:
cmd = 'puppet agent --test'
puppet = _Puppet()
if args:
# based on puppet documentation action must come first. making the same
# assertion. need to ensure the list of supported cmds here matches those
# defined in _Puppet.arguments()
if args[0] in ['agent', 'apply']:
puppet.subcmd = args[0]
puppet.arguments(args[1:])
else:
cmd = 'puppet agent --test --tags "{0}"'.format(tags)
# args will exist as an empty list even if none have been provided
puppet.arguments(args)
return __salt__['cmd.run_all'](cmd)
puppet.kwargs.update(utils.clean_kwargs(**kwargs))
def noop(tags=None):
return __salt__['cmd.run_all'](repr(puppet))
def noop(*args, **kwargs):
'''
Execute a puppet noop run and return a dict with the stderr, stdout,
return code, etc. If an argument is specified, it is treated as a
comma separated list of tags passed to puppet --test --noop --tags
return code, etc. Usage is the same as for puppet.run.
CLI Example::
salt '*' puppet.noop
salt '*' puppet.noop web::server,django::base
salt '*' puppet.noop tags=basefiles::edit,apache::server
salt '*' puppet.noop debug
salt '*' puppet.noop apply /a/b/manifest.pp modulepath=/a/b/modules tags=basefiles::edit,apache::server
'''
_check_puppet()
if not tags:
cmd = 'puppet agent --test --noop'
else:
cmd = 'puppet agent --test --tags "{0}" --noop'.format(tags)
return __salt__['cmd.run_all'](cmd)
args += ('noop',)
return run(*args, **kwargs)
def facts():
'''
@ -96,7 +164,8 @@ def facts():
# parse it into a nice dictionary for using
# elsewhere
for line in output.splitlines():
if not line: continue
if not line:
continue
fact, value = _format_fact(line)
if not fact:
continue

View File

@ -10,6 +10,9 @@ import StringIO
# import third party libs
import jinja2
# Import Salt libs
import salt.utils
# Set up logging
log = logging.getLogger(__name__)

View File

@ -29,43 +29,42 @@ def _list_removed(old, new):
return pkgs
def _compare_versions(old, new):
def _write_adminfile(kwargs):
'''
Returns a dict that that displays old and new versions for a package after
install/upgrade of package.
Create a temporary adminfile based on the keyword arguments passed to
pkg.install.
'''
pkgs = {}
for npkg in new:
if npkg in old:
if old[npkg] == new[npkg]:
# no change in the package
continue
else:
# the package was here before and the version has changed
pkgs[npkg] = {'old': old[npkg],
'new': new[npkg]}
else:
# the package is freshly installed
pkgs[npkg] = {'old': '',
'new': new[npkg]}
return pkgs
# Set the adminfile default variables
email = kwargs.get('email', '')
instance = kwargs.get('instance', 'quit')
partial = kwargs.get('partial', 'nocheck')
runlevel = kwargs.get('runlevel', 'nocheck')
idepend = kwargs.get('idepend', 'nocheck')
rdepend = kwargs.get('rdepend', 'nocheck')
space = kwargs.get('space', 'nocheck')
setuid = kwargs.get('setuid', 'nocheck')
conflict = kwargs.get('conflict', 'nocheck')
action = kwargs.get('action', 'nocheck')
basedir = kwargs.get('basedir', 'default')
# Make tempfile to hold the adminfile contents.
fd, adminfile = salt.utils.mkstemp(prefix="salt-", close_fd=False)
def _get_pkgs():
'''
Get a full list of the package installed on the machine
'''
pkg = {}
cmd = '/usr/bin/pkginfo -x'
# Write to file then close it.
os.write(fd, 'email={0}\n'.format(email))
os.write(fd, 'instance={0}\n'.format(instance))
os.write(fd, 'partial={0}\n'.format(partial))
os.write(fd, 'runlevel={0}\n'.format(runlevel))
os.write(fd, 'idepend={0}\n'.format(idepend))
os.write(fd, 'rdepend={0}\n'.format(rdepend))
os.write(fd, 'space={0}\n'.format(space))
os.write(fd, 'setuid={0}\n'.format(setuid))
os.write(fd, 'conflict={0}\n'.format(conflict))
os.write(fd, 'action={0}\n'.format(action))
os.write(fd, 'basedir={0}\n'.format(basedir))
os.close(fd)
line_count = 0
for line in __salt__['cmd.run'](cmd).splitlines():
if line_count % 2 == 0:
namever = line.split()[0].strip()
if line_count % 2 == 1:
pkg[namever] = line.split()[1].strip()
line_count = line_count + 1
return pkg
return adminfile
def list_pkgs():
@ -78,7 +77,17 @@ def list_pkgs():
salt '*' pkg.list_pkgs
'''
return _get_pkgs()
pkg = {}
cmd = '/usr/bin/pkginfo -x'
line_count = 0
for line in __salt__['cmd.run'](cmd).splitlines():
if line_count % 2 == 0:
namever = line.split()[0].strip()
if line_count % 2 == 1:
pkg[namever] = line.split()[1].strip()
line_count = line_count + 1
return pkg
def version(name):
@ -109,7 +118,7 @@ def available_version(name):
return version(name)
def install(name, refresh=False, **kwargs):
def install(name=None, refresh=False, sources=None, **kwargs):
'''
Install the passed package. Can install packages from the following
sources::
@ -122,23 +131,23 @@ def install(name, refresh=False, **kwargs):
Returns a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>']}
'new': '<new-version>']}
CLI Example, installing a datastream pkg that already exists on the
minion::
salt '*' pkg.install <package name once installed> source=/dir/on/minion/<package filename>
salt '*' pkg.install SMClgcc346 source=/var/spool/pkg/gcc-3.4.6-sol10-sparc-local.pkg
salt '*' pkg.install sources='[{"<pkg name>": "/dir/on/minion/<pkg filename>"}]'
salt '*' pkg.install sources='[{"SMClgcc346": "/var/spool/pkg/gcc-3.4.6-sol10-sparc-local.pkg"}]'
CLI Example, installing a datastream pkg that exists on the salt master::
salt '*' pkg.install <package name once installed> source='salt://srv/salt/pkgs/<package filename>'
salt '*' pkg.install SMClgcc346 source='salt://srv/salt/pkgs/gcc-3.4.6-sol10-sparc-local.pkg'
salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]'
salt '*' pkg.install sources='[{"SMClgcc346": "salt://pkgs/gcc-3.4.6-sol10-sparc-local.pkg"}]'
CLI Example, installing a datastream pkg that exists on a HTTP server::
salt '*' pkg.install <package name once installed> source='http://packages.server.com/<package filename>'
salt '*' pkg.install SMClgcc346 source='http://packages.server.com/gcc-3.4.6-sol10-sparc-local.pkg'
salt '*' pkg.install sources='[{"<pkg name>": "http://packages.server.com/<pkg filename>"}]'
salt '*' pkg.install sources='[{"SMClgcc346": "http://packages.server.com/gcc-3.4.6-sol10-sparc-local.pkg"}]'
If working with solaris zones and you want to install a package only in the
global zone you can pass 'current_zone_only=True' to salt to have the
@ -149,7 +158,7 @@ def install(name, refresh=False, **kwargs):
CLI Example, installing a datastream package only in the global zone::
salt 'global_zone' pkg.install SMClgcc346 source=/var/spool/pkg/gcc-3.4.6-sol10-sparc-local.pkg current_zone_only=True
salt 'global_zone' pkg.install sources='[{"SMClgcc346": "/var/spool/pkg/gcc-3.4.6-sol10-sparc-local.pkg"}]' current_zone_only=True
By default salt automatically provides an adminfile, to automate package
installation, with these options set:
@ -179,97 +188,72 @@ def install(name, refresh=False, **kwargs):
CLI Example - Overriding the 'instance' adminfile option when calling the
module directly::
salt '*' pkg.install <package name once installed> source='salt://srv/salt/pkgs/<package filename>' instance="overwrite"
salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]' instance="overwrite"
CLI Example - Overriding the 'instance' adminfile option when used in a
state::
SMClgcc346:
pkg.installed:
- source: salt://srv/salt/pkgs/gcc-3.4.6-sol10-sparc-local.pkg
- sources:
- SMClgcc346: salt://srv/salt/pkgs/gcc-3.4.6-sol10-sparc-local.pkg
- instance: overwrite
Note: the ID declaration is ignored, as the package name is read from the
"sources" parameter.
CLI Example - Providing your own adminfile when calling the module
directly::
salt '*' pkg.install <package name once installed> source='salt://srv/salt/pkgs/<package filename>' admin_source='salt://srv/salt/pkgs/<adminfile filename>'
salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]' admin_source='salt://pkgs/<adminfile filename>'
CLI Example - Providing your own adminfile when using states::
<package name once installed>:
<pkg name>:
pkg.installed:
- source: salt://srv/salt/pkgs/<package filename>
- admin_source: salt://srv/salt/pkgs/<adminfile filename>
- sources:
- <pkg name>: salt://pkgs/<pkg filename>
- admin_source: salt://pkgs/<adminfile filename>
Note: the ID declaration is ignored, as the package name is read from the
"sources" parameter.
'''
if not 'source' in kwargs:
return 'source option required with solaris pkg installs'
else:
if (kwargs['source']).startswith('salt://') \
or (kwargs['source']).startswith('http://') \
or (kwargs['source']).startswith('https://') \
or (kwargs['source']).startswith('ftp://'):
pkgname = __salt__['cp.cache_file'](kwargs['source'])
else:
pkgname = (kwargs['source'])
pkg_params,pkg_type = __salt__['pkg_resource.parse_targets'](
name,kwargs.get('pkgs'),sources)
if pkg_params is None or len(pkg_params) == 0:
return {}
if 'admin_source' in kwargs:
adminfile = __salt__['cp.cache_file'](kwargs['admin_source'])
else:
# Set the adminfile default variables
email = kwargs.get('email', '')
instance = kwargs.get('instance', 'quit')
partial = kwargs.get('partial', 'nocheck')
runlevel = kwargs.get('runlevel', 'nocheck')
idepend = kwargs.get('idepend', 'nocheck')
rdepend = kwargs.get('rdepend', 'nocheck')
space = kwargs.get('space', 'nocheck')
setuid = kwargs.get('setuid', 'nocheck')
conflict = kwargs.get('conflict', 'nocheck')
action = kwargs.get('action', 'nocheck')
basedir = kwargs.get('basedir', 'default')
# Make tempfile to hold the adminfile contents.
fd, adminfile = salt.utils.mkstemp(prefix="salt-", close_fd=False)
# Write to file then close it.
os.write(fd, 'email={0}\n'.format(email))
os.write(fd, 'email={instance={0}\n'.format(instance))
os.write(fd, 'email={partial={0}\n'.format(partial))
os.write(fd, 'email={runlevel={0}\n'.format(runlevel))
os.write(fd, 'email={idepend={0}\n'.format(idepend))
os.write(fd, 'email={rdepend={0}\n'.format(rdepend))
os.write(fd, 'email={space={0}\n'.format(space))
os.write(fd, 'email={setuid={0}\n'.format(setuid))
os.write(fd, 'email={conflict={0}\n'.format(conflict))
os.write(fd, 'email={action={0}\n'.format(action))
os.write(fd, 'email={basedir={0}\n'.format(basedir))
os.close(fd)
adminfile = _write_adminfile(kwargs)
# Get a list of the packages before install so we can diff after to see
# what got installed.
old = _get_pkgs()
old = list_pkgs()
cmd = '/usr/sbin/pkgadd -n -a {0} '.format(adminfile)
# Global only?
# Only makes sense in a global zone but works fine in non-globals.
if kwargs.get('current_zone_only') == 'True':
cmd += '-G '
cmd += '-d {0} \'all\''.format(pkgname)
# Install the package
__salt__['cmd.retcode'](cmd)
for pkg in pkg_params:
temp_cmd = cmd + '-d {0} "all"'.format(pkg)
# Install the package{s}
stderr = __salt__['cmd.run_all'](temp_cmd).get('stderr','')
if stderr:
log.error(stderr)
# Get a list of the packages again, including newly installed ones.
new = _get_pkgs()
new = list_pkgs()
# Remove the temp adminfile
if not 'admin_source' in kwargs:
os.unlink(adminfile)
# Return a list of the new package installed.
return _compare_versions(old, new)
return __salt__['pkg_resource.find_changes'](old,new)
def remove(name, **kwargs):
@ -345,7 +329,7 @@ def remove(name, **kwargs):
os.close(fd)
# Get a list of the currently installed pkgs.
old = _get_pkgs()
old = list_pkgs()
# Remove the package
cmd = '/usr/sbin/pkgrm -n -a {0} {1}'.format(adminfile, name)
@ -356,7 +340,7 @@ def remove(name, **kwargs):
os.unlink(adminfile)
# Get a list of the packages after the uninstall
new = _get_pkgs()
new = list_pkgs()
# Compare the pre and post remove package objects and report the
# uninstalled pkgs.

View File

@ -4,13 +4,14 @@ Subversion SCM
import re
import shlex
from subprocess import list2cmdline
from salt import utils, exceptions
_INI_RE = re.compile(r"^([^:]+):\s+(\S.*)$", re.M)
def _check_svn():
"""Check for svn on this node."""
'''Check for svn on this node.'''
utils.check_or_die('svn')
@ -39,10 +40,9 @@ def _run_svn(cmd, cwd, user, username, opts, **kwargs):
'''
cmd = 'svn --non-interactive {0} '.format(cmd)
if username:
opts += ("--username", username)
opts += ('--username', username)
if opts:
cmd += '"{0}"'.format('" "'.join(
[optstr.replace('"', r'\"') for optstr in opts]))
cmd += list2cmdline(opts)
result = __salt__['cmd.run_all'](cmd, cwd=cwd, runas=user, **kwargs)
@ -86,7 +86,7 @@ def info(cwd, targets=None, user=None, username=None, fmt='str'):
return infos
info_list = []
for infosplit in infos.split("\n\n"):
for infosplit in infos.split('\n\n'):
info_list.append(_INI_RE.findall(infosplit))
if fmt == 'list':

146
salt/modules/timezone.py Normal file
View File

@ -0,0 +1,146 @@
'''
Module for managing timezone on posix-like systems.
'''
import os
import logging
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on posix-like systems
'''
# Disable on these platorms, specific service modules exist:
disable = [
'Windows',
]
if __grains__['os'] in disable:
return False
return 'timezone'
def get_zone():
'''
Get current timezone (i.e. America/Denver)
CLI Example::
salt '*' timezone.get_zone
'''
cmd = ''
if 'Arch' in __grains__['os_family']:
cmd = 'grep TIMEZONE /etc/rc.conf | grep -vE "^#"'
elif 'RedHat' in __grains__['os_family']:
cmd = 'grep ZONE /etc/sysconfig/clock | grep -vE "^#"'
elif 'Debian' in __grains__['os_family']:
return open('/etc/timezone','r').read()
out = __salt__['cmd.run'](cmd).split('=')
ret = out[1].replace('"', '')
return ret
def get_zonecode():
'''
Get current timezone (i.e. PST, MDT, etc)
CLI Example::
salt '*' timezone.get_zonecode
'''
cmd = 'date +%Z'
out = __salt__['cmd.run'](cmd)
return out
def get_offset():
'''
Get current numeric timezone offset from UCT (i.e. -0700)
CLI Example::
salt '*' timezone.get_offset
'''
cmd = 'date +%z'
out = __salt__['cmd.run'](cmd)
return out
def set_zone(timezone):
'''
Unlinks, then symlinks /etc/localtime to the set timezone
CLI Example::
salt '*' timezone.set_zone 'America/Denver'
'''
zonepath = '/usr/share/zoneinfo/{0}'.format(timezone)
if not os.path.exists(zonepath):
return 'Zone does not exist: {0}'.format(zonepath)
os.unlink('/etc/localtime')
os.symlink(zonepath, '/etc/localtime')
if 'Arch' in __grains__['os_family']:
__salt__['file.sed']('/etc/rc.conf', '^TIMEZONE=.*', 'TIMEZONE="{0}"'.format(timezone))
elif 'RedHat' in __grains__['os_family']:
__salt__['file.sed']('/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="{0}"'.format(timezone))
elif 'Debian' in __grains__['os_family']:
open('/etc/timezone', 'w').write(timezone)
return True
def get_hwclock():
'''
Get current hardware clock setting (UTC or localtime)
CLI Example::
salt '*' timezone.get_hwclock
'''
cmd = ''
if 'Arch' in __grains__['os_family']:
cmd = 'grep HARDWARECLOCK /etc/rc.conf | grep -vE "^#"'
out = __salt__['cmd.run'](cmd).split('=')
return out[1].replace('"', '')
elif 'RedHat' in __grains__['os_family']:
cmd = 'tail -n 1 /etc/adjtime'
return __salt__['cmd.run'](cmd)
elif 'Debian' in __grains__['os_family']:
cmd = 'grep "UTC=" /etc/default/rcS | grep -vE "^#"'
out = __salt__['cmd.run'](cmd).split('=')
if out[1] == 'yes':
return 'UTC'
else:
return 'localtime'
def set_hwclock(clock):
'''
Sets the hardware clock to be either UTC or localtime
CLI Example::
salt '*' timezone.set_hwclock UTC
'''
zonepath = '/usr/share/zoneinfo/{0}'.format(timezone)
if not os.path.exists(zonepath):
return 'Zone does not exist: {0}'.format(zonepath)
os.unlink('/etc/localtime')
os.symlink(zonepath, '/etc/localtime')
if 'Arch' in __grains__['os_family']:
__salt__['file.sed']('/etc/rc.conf', '^HARDWARECLOCK=.*', 'HARDWARECLOCK="{0}"'.format(timezone))
elif 'RedHat' in __grains__['os_family']:
__salt__['file.sed']('/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="{0}"'.format(timezone))
elif 'Debian' in __grains__['os_family']:
if clock == 'UTC':
__salt__['file.sed']('/etc/default/rcS', '^UTC=.*', 'UTC=yes')
elif clock == 'localtime':
__salt__['file.sed']('/etc/default/rcS', '^UTC=.*', 'UTC=no')
return True

View File

@ -81,12 +81,12 @@ def add(name,
cmd = 'useradd '
if shell:
cmd += '-s {0} '.format(shell)
if uid is not None:
if uid not in (None, ''):
cmd += '-u {0} '.format(uid)
if gid is not None:
if gid not in (None, ''):
cmd += '-g {0} '.format(gid)
if groups:
cmd += '-G {0} '.format(','.join(groups))
cmd += '-G "{0}" '.format(','.join(groups))
if home:
if home is not True:
if system:
@ -255,7 +255,7 @@ def chgroups(name, groups, append=False):
cmd = 'usermod '
if append:
cmd += '-a '
cmd += '-G {0} {1}'.format(','.join(groups), name)
cmd += '-G "{0}" {1}'.format(','.join(groups), name)
return not __salt__['cmd.retcode'](cmd)

View File

@ -14,6 +14,11 @@ try:
except ImportError:
pass
import logging
log = logging.getLogger(__name__)
def __virtual__():
'''
Set the virtual pkg module if the os is Windows
@ -40,7 +45,7 @@ def available_version(name):
salt '*' pkg.available_version <package name>
'''
return 'Not implemented on Windows yet'
return 'pkg.available_version not implemented on Windows yet'
def upgrade_available(name):
@ -51,7 +56,8 @@ def upgrade_available(name):
salt '*' pkg.upgrade_available <package name>
'''
return 'Not implemented on Windows yet'
log.warning('pkg.upgrade_available not implemented on Windows yet')
return False
def list_upgrades():
@ -62,7 +68,8 @@ def list_upgrades():
salt '*' pkg.list_upgrades
'''
return 'Not implemented on Windows yet'
log.warning('pkg.list_upgrades not implemented on Windows yet')
return {}
def version(name):
@ -262,10 +269,11 @@ def refresh_db():
salt '*' pkg.refresh_db
'''
return 'Not implemented on Windows yet'
log.warning('pkg.refresh_db not implemented on Windows yet')
return {}
def install(name, refresh=False, **kwargs):
def install(name=None, refresh=False, **kwargs):
'''
Install the passed package
@ -278,7 +286,8 @@ def install(name, refresh=False, **kwargs):
salt '*' pkg.install <package name>
'''
return 'Not implemented on Windows yet'
log.warning('pkg.install not implemented on Windows yet')
return {}
def upgrade():
@ -294,7 +303,8 @@ def upgrade():
salt '*' pkg.upgrade
'''
return 'Not implemented on Windows yet'
log.warning('pkg.upgrade not implemented on Windows yet')
return {}
def remove(name):
@ -307,7 +317,8 @@ def remove(name):
salt '*' pkg.remove <package name>
'''
return 'Not implemented on Windows yet'
log.warning('pkg.remove not implemented on Windows yet')
return []
def purge(name):
@ -321,4 +332,5 @@ def purge(name):
salt '*' pkg.purge <package name>
'''
return 'Not implemented on Windows yet'
log.warning('pkg.purge not implemented on Windows yet')
return []

View File

@ -5,6 +5,8 @@ Support for YUM
- rpm Python module
- rpmUtils Python module
'''
import re
try:
import yum
import rpm
@ -175,7 +177,18 @@ def version(name):
salt '*' pkg.version <package name>
'''
pkgs = list_pkgs(name)
# since list_pkgs is used to support matching complex versions
# we can search for a digit in the name and if one doesn't exist
# then just use the dbMatch function, which is 1000 times quicker
m = re.search("[0-9]", name)
if m:
pkgs = list_pkgs(name)
else:
ts = rpm.TransactionSet()
mi = ts.dbMatch('name', name)
pkgs = {}
for h in mi:
pkgs[h['name']] = "-".join([h['version'], h['release']])
if name in pkgs:
return pkgs[name]
else:
@ -433,3 +446,96 @@ def purge(pkgs):
salt '*' pkg.purge <package name>
'''
return remove(pkgs)
def verify(*package):
'''
Runs an rpm -Va on a system, and returns the results in a dict
CLI Example::
salt '*' pkg.verify
'''
ftypes = {'c': 'config',
'd': 'doc',
'g': 'ghost',
'l': 'license',
'r': 'readme'}
ret = {}
if package:
packages = ' '.join(package)
cmd = 'rpm -V {0}'.format(packages)
else:
cmd = 'rpm -Va'
for line in __salt__['cmd.run'](cmd).split('\n'):
fdict = {'mismatch': []}
if 'missing' in line:
line = ' ' + line
fdict['missing'] = True
del(fdict['mismatch'])
fname = line[13:]
if line[11:12] in ftypes:
fdict['type'] = ftypes[line[11:12]]
if line[0:1] == 'S':
fdict['mismatch'].append('size')
if line[1:2] == 'M':
fdict['mismatch'].append('mode')
if line[2:3] == '5':
fdict['mismatch'].append('md5sum')
if line[3:4] == 'D':
fdict['mismatch'].append('device major/minor number')
if line[4:5] == 'L':
fdict['mismatch'].append('readlink path')
if line[5:6] == 'U':
fdict['mismatch'].append('user')
if line[6:7] == 'G':
fdict['mismatch'].append('group')
if line[7:8] == 'T':
fdict['mismatch'].append('mtime')
if line[8:9] == 'P':
fdict['mismatch'].append('capabilities')
ret[fname] = fdict
return ret
def grouplist():
'''
Lists all groups known by yum on this system
CLI Example::
salt '*' pkg.grouplist
'''
ret = {'installed': [], 'available': [], 'available languages': {}}
yb = yum.YumBase()
(installed, available) = yb.doGroupLists()
for group in installed:
ret['installed'].append(group.name)
for group in available:
if group.langonly:
ret['available languages'][group.name] = {
'name': group.name,
'language': group.langonly}
else:
ret['available'].append(group.name)
return ret
def groupinfo(groupname):
'''
Lists packages belonging to a certain group
CLI Example::
salt '*' pkg.groupinfo 'Perl Support'
'''
yb = yum.YumBase()
(installed, available) = yb.doGroupLists()
for group in installed + available:
if group.name == groupname:
return {'manditory packages': group.mandatory_packages,
'optional packages': group.optional_packages,
'default packages': group.default_packages,
'conditional packages': group.conditional_packages,
'description': group.description}

View File

@ -23,7 +23,7 @@ try:
import msgpack
# There is a serialization issue on ARM and potentially other platforms
# for some msgpack bindings, check for it
if msgpack.loads(msgpack.dumps([1,2,3])) is None:
if msgpack.loads(msgpack.dumps([1, 2, 3])) is None:
raise ImportError
except ImportError:
# Fall back to msgpack_pure
@ -118,17 +118,18 @@ class Serial(object):
class SREQ(object):
'''
Create a generic interface to wrap salt zeromq req calls.
Create a generic interface to wrap salt zeromq req calls.
'''
def __init__(self, master, id_='', serial='msgpack', linger=0):
self.master = master
self.serial = Serial(serial)
context = zmq.Context()
self.socket = context.socket(zmq.REQ)
self.context = zmq.Context()
self.socket = self.context.socket(zmq.REQ)
self.socket.linger = linger
if id_:
self.socket.setsockopt(zmq.IDENTITY, id_)
self.socket.connect(master)
self.poller = zmq.Poller()
def send(self, enc, load, tries=1, timeout=60):
'''
@ -138,17 +139,16 @@ class SREQ(object):
payload['load'] = load
package = self.serial.dumps(payload)
self.socket.send(package)
poller = zmq.Poller()
poller.register(self.socket, zmq.POLLIN)
self.poller.register(self.socket, zmq.POLLIN)
tried = 0
while True:
if not poller.poll(timeout*1000) and tried >= tries:
if not self.poller.poll(timeout * 1000) and tried >= tries:
raise SaltReqTimeoutError('Waited {0} seconds'.format(timeout))
else:
break
ret = self.serial.loads(self.socket.recv())
poller.unregister(self.socket)
return ret
tried += 1
try:
return self.serial.loads(self.socket.recv())
finally:
self.poller.unregister(self.socket)
def send_auto(self, payload):
'''
@ -157,3 +157,15 @@ class SREQ(object):
enc = payload.get('enc', 'clear')
load = payload.get('load', {})
return self.send(enc, load)
def destroy(self):
for socket in self.poller.sockets.keys():
if not socket.closed:
socket.close()
self.poller.unregister(socket)
if not self.socket.closed:
self.socket.close()
self.context.term()
def __del__(self):
self.destroy()

View File

@ -128,19 +128,19 @@ def ext_pillar(pillar, collection='pillar', id_field='_id', re_pattern=None,
"in mongo".format(id_field, minion_id))
pillar = db[collection].find_one({id_field: minion_id}, fields=fields)
if pillar:
result = db[collection].find_one({id_field: minion_id}, fields=fields)
if result:
if fields:
log.debug("ext_pillar.mongo: found document, returning fields "
"'{0}'".format(fields))
else:
log.debug("ext_pillar.mongo: found document, returning whole doc")
if '_id' in pillar:
if '_id' in result:
# Converting _id to a string
# will avoid the most common serialization error cases, but DBRefs
# and whatnot will still cause problems.
pillar['_id'] = str(pillar['_id'])
return pillar
result['_id'] = str(result['_id'])
return result
else:
# If we can't find the minion the database it's not necessarily an
# error.

View File

@ -28,7 +28,8 @@ def render(template_file, env='', sls='', argline='',
env=env,
sls=sls,
context=context,
tmplpath=tmplpath)
tmplpath=tmplpath,
**kws)
if not tmp_data.get('result', False):
raise SaltRenderError(tmp_data.get('data',
'Unknown render error in jinja renderer'))

View File

@ -19,7 +19,8 @@ def render(template_file, env='', sls='', context=None, tmplpath=None, **kws):
env=env,
sls=sls,
context=context,
tmplpath=tmplpath)
tmplpath=tmplpath,
**kws)
if not tmp_data.get('result', False):
raise SaltRenderError(tmp_data.get('data',
'Unknown render error in mako renderer'))

View File

@ -31,7 +31,8 @@ def render(template, env='', sls='', tmplpath=None, **kws):
opts=__opts__,
pillar=__pillar__,
env=env,
sls=sls)
sls=sls,
**kws)
if not tmp_data.get('result', False):
raise SaltRenderError(tmp_data.get('data',
'Unknown render error in py renderer'))

Some files were not shown because too many files have changed in this diff Show More