Merge branch 'master' of github.com:KB1JWQ/salt

This commit is contained in:
Corey Quinn 2012-02-08 10:33:23 -08:00
commit dab007c367
84 changed files with 2931 additions and 605 deletions

View File

@ -31,11 +31,15 @@
# Set the number of hours to keep old job information
#keep_jobs: 24
# Set the default timeout for the salt command and api, the default is 5
# seconds
#timeout: 5
# Set the directory used to hold unix sockets
#sock_dir: /tmp/salt-unix
# Set the acceptance level for serialization of messages. This should only be
# set if the master is newer that 0.9.5 and the minion are older, this option
# set if the master is newer than 0.9.5 and the minion are older. This option
# allows a 0.9.5 and newer master to communicate with minions 0.9.4 and
# earlier. It is not recommended to keep this setting on if the minions are
# all 0.9.5 or higher, as leaving pickle as the serialization medium is slow
@ -59,9 +63,15 @@
##########################################
# The state system uses a "top" file to tell the minions what environment to
# use and what modules to use. The state_top file is defined relative to the
# root of the base environment
# root of the base environment.
#state_top: top.sls
#
# The external_nodes option allows Salt to gather data that would normally be
# placed in a top file. The external_nodes option is the executable that will
# return the ENC data. Remember that Salt will look for external nodes AND top
# files and combine the results if both are enabled!
#external_nodes: None
#
# The renderer to use on the minions to render the state data
#renderer: yaml_jinja
#
@ -78,7 +88,7 @@
# The file server works on environments passed to the master, each environment
# can have multiple root directories, the subdirectories in the multiple file
# roots cannot match, otherwise the downloaded files will not be able to be
# reliably ensured. A base environment is required to house the top file
# reliably ensured. A base environment is required to house the top file.
# Example:
# file_roots:
# base:
@ -110,15 +120,14 @@
# syndic servers(s) below it set the "order_masters" setting to True, if this
# is a master that will be running a syndic daemon for passthrough the
# "syndic_master" setting needs to be set to the location of the master server
# to recieve commands from
# to recieve commands from.
#
# Set the order_masters setting to True if this master will command lower
# masters' syndic interfaces
# masters' syndic interfaces.
#order_masters: False
#
# If this master will be running a salt syndic daemon, then the syndic needs
# to know where the master it is recieving commands from is, set it with the
# syndic_master value
# If this master will be running a salt syndic daemon, syndic_master tells
# this master where to recieve commands from.
#syndic_master: masterofmaster
##### Peer Publish settings #####
@ -129,9 +138,9 @@
# compartmentalization of commands based on individual minions.
#
# The configuration uses regular expressions to match minions and then a list
# of regular expressions to match functions, the following will allow the
# of regular expressions to match functions. The following will allow the
# minion authenticated as foo.example.com to execute functions from the test
# and pkg modules
# and pkg modules.
# peer:
# foo.example.com:
# - test.*
@ -149,7 +158,7 @@
##########################################
# Salt supports automatic clustering, salt creates a single ip address which
# is shared among the individual salt components using ucarp. The private key
# and all of the minion keys are maintained across the defined cluster masters
# and all of the minion keys are maintained across the defined cluster masters.
# The failover service is automatically managed via these settings
# List the identifiers for the other cluster masters in this manner:
@ -168,14 +177,15 @@
##########################################
# The location of the master log file
#log_file: /var/log/salt/master
#
# The level of messages to send to the log file.
# One of 'info', 'quiet', 'critical', 'error', 'debug', 'warning'.
# Default: 'warning'
#log_level: warning
#
# Logger levels can be used to tweak specific loggers logging levels.
# Imagine you want to have the salt library at the 'warning' level, but, you
# still wish to have 'salt.modules' at the 'debug' level:
# For example, if you want to have the salt library at the 'warning' level,
# but you still wish to have 'salt.modules' at the 'debug' level:
# log_granular_levels:
# 'salt': 'warning',
# 'salt.modules': 'debug'

View File

@ -2,10 +2,10 @@
##### Primary configuration settings #####
##########################################
# Set the location of the salt master server, if the master server cannot be
# resolved, then the minion will fail to start
# resolved, then the minion will fail to start.
#master: salt
# Set the post used by the master reply and authentication server
# Set the port used by the master reply and authentication server
#master_port: 4506
# The user to run salt
@ -24,18 +24,19 @@
# clusters.
#id:
# The minion connection to the master may be inturupted, the minion will
# verify the connection every so many seconds, to disable connection
# verification set this value to 0
# If the the connection to the server is interrupted, the minion will
# attempt to reconnect. sub_timeout allows you to control the rate
# of reconnection attempts (in seconds). To disable reconnects, set
# this value to 0.
#sub_timeout: 60
# Where cache data goes
#cachedir: /var/cache/salt
# The minion can locally cache the return data from jobs sent to it, this
# can be a good way to keep track minion side of the jobs the minion has
# executed. By default this feature is disabled, to enable set cache_jobs
# to True
# can be a good way to keep track of jobs the minion has executed
# (on the minion side). By default this feature is disabled, to enable
# set cache_jobs to True
#cache_jobs: False
# When waiting for a master to accept the minion's public key, salt will
@ -47,18 +48,20 @@
##### Minion module management #####
##########################################
# Disable specific modules, this will allow the admin to limit the level os
# Disable specific modules. This allows the admin to limit the level of
# access the master has to the minion
#disable_modules: [cmd,test]
#disable_returners: []
# Modules can be loaded from arbitrary paths, this enables the easy deployment
# of third party modules, modules for returners and minions can be loaded.
#
# Modules can be loaded from arbitrary paths. This enables the easy deployment
# of third party modules. Modules for returners and minions can be loaded.
# Specify a list of extra directories to search for minion modules and
# returners. These paths must be fully qualified!
#module_dirs: []
#returner_dirs: []
#states_dirs: []
#render_dirs: []
#
# Enable Cython modules searching and loading. (Default: False)
#cython_enable: False
@ -92,6 +95,12 @@
# not on the master it will be deleted from the minion. By default this is
# enabled and can be disabled by changing this value to False
#clean_dynamic_modules: True
#
# Normally the minion is not isolated to any single environment on the master
# when running states, but the environment can be isolated on the minion side
# by statically setting it. Remember that the recommended way to manage
# environments is to issolate via the top file.
#environment: None
###### Security settings #####
###########################################
@ -112,14 +121,15 @@
###########################################
# The location of the minion log file
#log_file: /var/log/salt/minion
#
# The level of messages to send to the log file.
# One of 'info', 'quiet', 'critical', 'error', 'debug', 'warning'.
# Default: 'warning'
#log_level: warning
#
# Logger levels can be used to tweak specific loggers logging levels.
# Imagine you want to have the salt library at the 'warning' level, but, you
# still wish to have 'salt.modules' at the 'debug' level:
# For example, if you want to have the salt library at the 'warning' level,
# but you still wish to have 'salt.modules' at the 'debug' level:
# log_granular_levels: {
# 'salt': 'warning',
# 'salt.modules': 'debug'
@ -133,7 +143,7 @@
# passed here in valid yaml format will be passed on to the salt minion modules
# for use. It is STRONGLY recommended that a naming convention be used in which
# the module name is followed by a . and then the value. Also, all top level
# data must be allied via the yaml dict construct, some examples:
# data must be applied via the yaml dict construct, some examples:
#
# A simple value for the test module:
#test.foo: foo

6
debian/changelog vendored
View File

@ -1,3 +1,9 @@
salt (0.9.6-1) lucid; urgency=low
* Bump version; time to upgrade
-- Corey Quinn <corey@sequestered.net> Tue, 07 Feb 2012 18:15:20 -0800
salt (0.9.5-1) unstable; urgency=low
* First package release. (Closes: #643789)

5
debian/control vendored
View File

@ -3,7 +3,6 @@ Section: admin
Priority: optional
Maintainer: Corey Quinn <corey@sequestered.net>
Build-Depends: debhelper (>= 7.0.50~),
python-support,
cython,
python-yaml,
python-setuptools,
@ -24,7 +23,6 @@ Architecture: any
Depends: ${python:Depends},
${misc:Depends},
${shlibs:Depends},
python-support,
cython,
python-setuptools,
python-yaml,
@ -34,7 +32,8 @@ Depends: ${python:Depends},
libzmq-dev (>= 2.1.9),
python,
python-dev,
python-jinja2
python-jinja2,
msgpack-python
Description: Shared libraries that salt requires for all packages
This package is a powerful remote execution manager that can be used
to administer servers in a fast and efficient way.

2
debian/copyright vendored
View File

@ -1,7 +1,7 @@
Format: http://dep.debian.net/deps/dep5
Upstream-Name: salt
Upstream-Contact: salt-users@googlegroups.com
Source: https://github.com/downloads/saltstack/salt/salt-0.9.5.tar.gz
Source: https://github.com/downloads/saltstack/salt/salt-0.9.6.tar.gz
Files: *
Copyright: 2012 Thomas S Hatch <thatch45@gmail.com>

15
debian/rules vendored
View File

@ -1,14 +1,11 @@
#!/usr/bin/make -f
#export DH_VERBOSE=1
%:
dh $@ --buildsystem=python_distutils
#override_dh_installinit:
# dh_installinit --no-start --name="salt-master"
# dh_installinit --no-start --name="salt-minion"
# dh_installinit --no-start --name="salt-syndic"
dh $@ #--with python2
#dh_override_auto_build:
python setup.py build #--install-layout=deb build
get-orig-source:
git clone https://github.com/saltstack/salt.git
mv salt salt-0.9.5
tar -zcvf salt_0.9.5.orig.tar.gz --exclude "debian*" --exclude-vcs salt-0.9.5
rm -rf salt-0.9.5
mv salt salt-0.9.6
tar -zcvf salt_0.9.6.orig.tar.gz --exclude "debian*" --exclude-vcs salt-0.9.6
rm -rf salt-0.9.6

View File

@ -1 +1,6 @@
conf/master.template /etc/salt/master
conf/master.template /etc/salt
scripts/salt-master /usr/bin
scripts/salt-cp /usr/bin
scripts/salt-run /usr/bin
scripts/salt-key /usr/bin
scripts/salt /usr/bin

View File

@ -1,5 +0,0 @@
usr/lib/python2*/dist-packages/salt/salt /usr/bin/salt
usr/lib/python2*/dist-packages/salt/salt-master /usr/bin/salt-master
usr/lib/python2*/dist-packages/salt/salt-cp /usr/bin/salt-cp
usr/lib/python2*/dist-packages/salt/salt-key /usr/bin/salt-key
usr/lib/python2*/dist-packages/salt/salt-run /usr/bin/salt-run

View File

@ -1 +1,3 @@
conf/minion.template /etc/salt/minion
conf/minion.template /etc/salt
scripts/salt-minion /usr/bin
scripts/salt-call /usr/bin

View File

@ -1,2 +0,0 @@
usr/lib/python2*/dist-packages/salt/salt-minion /usr/bin/salt-minion
usr/lib/python2*/dist-packages/salt/salt-call /usr/bin/salt-call

View File

@ -0,0 +1 @@
scripts/salt-syndic /usr/bin

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.1 KiB

After

Width:  |  Height:  |  Size: 1.5 KiB

View File

@ -82,14 +82,14 @@ Targeting with Executions
`````````````````````````
As of 0.8.8 targeting with executions is still under heavy development and this
documentation is written to refernce the behavior of execution matching in the
documentation is written to reference the behavior of execution matching in the
future.
Execution matching allows for a primary function to be executed, and then based
on the return of the primary function the main function is executed.
Execution matching allows for matching minions based on any arbitrairy running
data on tne minions.
Execution matching allows for matching minions based on any arbitrary running
data on the minions.
Compound Targeting
``````````````````
@ -104,13 +104,14 @@ is well defined with an example:
salt -C 'G@os:Debian and webser* or E@db.*' test.ping
in this example any minion who's id starts with webser and is running Debian,
or any minion who's id starts with db will be matched.
In this example any minion who's id starts with ``webser`` and is running
Debian, or any minion who's id starts with db will be matched.
The type of matcher defaults to glob, but can be specified with the
corresponding letter followed by the @ symbol. In the above example a grain is
used with G@ as well as a regular expression with E@. The webser* target does
not need to be prefaced with a target type specifier because it is a glob.
corresponding letter followed by the ``@`` symbol. In the above example a grain
is used with ``G@`` as well as a regular expression with ``E@``. The
``webser*`` target does not need to be prefaced with a target type specifier
because it is a glob.
Node Group Targeting
````````````````````

View File

@ -36,12 +36,12 @@ Options
.. option:: -E, --pcre
The target expression will be interpereted as a pcre regular expression
The target expression will be interpreted as a pcre regular expression
rather than a shell glob.
.. option:: -L, --list
The target expression will be interpereted as a comma delimited list,
The target expression will be interpreted as a comma delimited list,
example: server1.foo.bar,server2.foo.bar,example7.quo.qux
.. option:: -G, --grain

View File

@ -28,8 +28,8 @@ Options
.. option:: -L, --list-all
List all public keys on this salt master, both accepted and pending
acceptance.
List all public keys on this salt master: accepted, pending,
and rejected.
.. option:: -a ACCEPT, --accept=ACCEPT
@ -39,6 +39,14 @@ Options
Accepts all pending public keys.
.. option:: -r REJECT, --reject=REJECT
Reject the named minion public key.
.. option:: -R, --reject-all
Rejects all pending public keys.
.. option:: -c CONFIG, --config=CONFIG
The master configuration file needs to be read to determine where the salt

View File

@ -14,7 +14,7 @@ Synopsis
Description
===========
Salt run is the frontend command for executing ``Salt Runners``.
salt-run is the frontend command for executing ``Salt Runners``.
Salt runners are simple modules used to execute convenience functions on the
master

View File

@ -19,15 +19,15 @@ operating system.
Writing Grains
==============
Grains are easy to write, the grains interface is derived my executing all of
the "public" functions found in the modules located in the grains package.
The functions in the modules of the grains must return a python dict, the keys
in the dict are the names of the grains, the values are the values.
Grains are easy to write. The grains interface is derived by executing all of
the "public" functions found in the modules located in the grains package or
the custom grains directory. The functions in the modules of the grains must
return a python dict, where the keys in the dict are the names of the grains and
the values are the values.
This means that the actual grains interface is simply a python dict.
Before adding a grain to salt, consider what the grain is and remember that
grains need to be static data.
Custom grains should be placed in a ``_grains`` directory located under your
:conf_master:`file_roots`. Before adding a grain to salt, consider what the grain
is and remember that grains need to be static data.
Examples of Grains
------------------
@ -36,3 +36,10 @@ The core module in the grains package is where the main grains are loaded by
the salt minion and the principal example of how to write grains:
:blob:`salt/grains/core.py`
Syncing Grains
--------------
Syncing grains can be done a number of ways, they are automatically synced when
state.highstate is called, or the grains can be synced and reloaded by calling
the saltutil.sync_grains or saltutil.sync_all functions.

View File

@ -4,7 +4,7 @@ Returners
By default the return values of the commands sent to the salt minions are
returned to the salt-master. But since the commands executed on the salt
minions are detatched from the call on the salt master, there is no need for
minions are detached from the call on the salt master, there is no need for
the minion to return the data to the salt master.
This is where the returner interface comes in. Returners are modules called
@ -16,6 +16,30 @@ a MongoDB server, a MySQL server, or any system!
.. seealso:: :ref:`Full list of builtin returners <all-salt.returners>`
Using Returners
===============
All commands will return the command data back to the master. Adding more
returners will ensure that the data is also sent to the specified returner
interfaces.
Specifying what returners to use is done when the command is invoked:
.. code-block:: bash
salt '*' test.ping --return redis_return
This command will ensure that the redis_return returner is used.
It is also possible to specify multiple returners:
.. code-block:: bash
salt '*' test.ping --return mongo_return,redis_return,cassandra_return
In this scenario all three returners will be called and the data from the
test.ping command will be sent out to the three named returers.
Writing a Returner
==================
@ -24,7 +48,7 @@ function must accept a single argument. this argument is the return data from
the called minion function. So if the minion function ``test.ping`` is called
the value of the argument will be ``True``.
A simple returner is implimented here:
A simple returner is implemented here:
.. code-block:: python
@ -51,5 +75,5 @@ serializes the data as json and sets it in redis.
Examples
--------
The collection of builtin salt returners can be found here:
The collection of built-in salt returners can be found here:
:blob:`salt/returners`

View File

@ -59,6 +59,13 @@ ID declaration
Occurs on the top level or under the :term:`extend declaration`.
Must **not** contain a dot, otherwise highstate summary output will be
unpredictable.
Must be unique across entire state tree. If the same ID declaration is
used twice, only the first one matched will be used. All subsequent
ID declarations with the same name will be ignored.
Extend declaration
------------------

View File

@ -4,9 +4,9 @@ Ordering States
When creating salt sls files, it is often important to ensure that they run in
a specific order. While states will always execute in the same order, that
order is not nessisarily defined the way you want it.
order is not necessarily defined the way you want it.
A few tools exist in Salt to set up the corect state ordering, these tools
A few tools exist in Salt to set up the correct state ordering. These tools
consist of requisite declarations and order options.
The Order Option
@ -33,8 +33,8 @@ Any state declared without an order option will be executed after all states
with order options are executed.
But this construct can only handle ordering states from the beggining.
Sometimes you may want to send a state to the end of the line, to do this
set the order to last:
Sometimes you may want to send a state to the end of the line. To do this,
set the order to ``last``:
.. code-block:: yaml

143
doc/ref/states/top.rst Normal file
View File

@ -0,0 +1,143 @@
============
The Top File
============
The top file is used to map what sls modules get loaded onto what minions via
the state system. The top file creates a few general abstractions. First it
maps what nodes should pull from which environments, next it defines which
matches systems should draw from.
Environments
============
The environments in the top file corresponds with the environments defined in
the file_roots variable. In a simple, single environment setup you only have
the base environment, and therefore only one state tree. Here is a simple
example of file_roots in the master configuration:
.. code-block:: yaml
file_roots:
base:
- /srv/salt
This means that the top file will only have one environment to pull from,
here is a simple, single environment top file:
.. code-block:: yaml
base:
'*':
- core
- edit
This also means that /srv/salt has a state tree. But if you want to use
multiple environments, or partition the file server to serve more than
just the state tree, then the file_roots option can be expanded:
.. code-block:: yaml
file_roots:
base:
- /srv/salt/base
dev:
- /srv/salt/dev
qa:
- /srv/salt/qa
prod:
- /srv/salt/prod
Then our top file could reference the environments:
.. code-block:: yaml
dev:
'webserver*dev*':
- webserver
'db*dev*':
- db
qa:
'webserver*qa*':
- webserver
'db*qa*':
- db
prod:
'webserver*prod*':
- webserver
'db*prod*':
- db
In this setup we have state trees in 3 of the 4 environments, and no state
tree in the base environment. Notice that the targets for the minions
specify environment data. In Salt the master determines who is in what
environment, and many environments can be crossed together. For instance,
a separate global state tree could be added to the base environment if
it suits your deployment:
.. code-block:: yaml
base:
'*':
- global
dev:
'webserver*dev*':
- webserver
'db*dev*':
- db
qa:
'webserver*qa*':
- webserver
'db*qa*':
- db
prod:
'webserver*prod*':
- webserver
'db*prod*':
- db
In this setup all systems will pull the global sls from the base environment,
as well as pull from their respective environments.
Remember, that since everything is a file in salt, the environments are
primarily file server environments, this means that environments that have
nothing to do with states can be defined and used to distribute other files.
A clean and recommended setup for multiple environments would look like this:
.. code-block:: yaml
# Master file_roots configuration:
file_roots:
base:
- /srv/salt/base
dev:
- /srv/salt/dev
qa:
- /srv/salt/qa
prod:
- /srv/salt/prod
Then only place state trees in the dev, qa and prod environments, leaving
the base environment open for generic file transfers. Then the top.sls file
would look something like this:
.. code-block:: yaml
dev:
'webserver*dev*':
- webserver
'db*dev*':
- db
qa:
'webserver*qa*':
- webserver
'db*qa*':
- db
prod:
'webserver*prod*':
- webserver
'db*prod*':
- db

View File

@ -14,7 +14,6 @@ passed to the SLS data structures will map directly to the states modules.
Mapping the information from the SLS data is simple, this example should
illustrate:
SLS file
.. code-block:: yaml
/etc/salt/master: # maps to "name"
@ -32,14 +31,20 @@ This does issue the burden, that function names, state names and function
arguments should be very human readable inside state modules, since they
directly define the user interface.
Using Custom State Modules
==========================
Place your custom state modules inside a ``_states`` directory within the
``file_roots`` specified by the master config file.
Cross Calling Modules
=====================
As with Execution Modules State Modules can also make use of the ``__salt__``
As with Execution Modules, State Modules can also make use of the ``__salt__``
and ``__grains__`` data.
It is important to note, that the real work of state management should not be
done in the state module unless it is needed, a good example is the pkg state
It is important to note that the real work of state management should not be
done in the state module unless it is needed. A good example is the pkg state
module. This module does not do any package management work, it just calls the
pkg execution module. This makes the pkg state module completely generic, which
is why there is only one pkg state module and many backend pkg execution
@ -49,3 +54,18 @@ On the other hand some modules will require that the logic be placed in the
state module, a good example of this is the file module. But in the vast
majority of cases this is not the best approach, and writing specific
execution modules to do the backend work will be the optimal solution.
Return Data
===========
A State Module must return a dict containing the following keys/values:
- **name:** The same value passed to the state as "name".
- **changes:** A dict describing the changes made. Each thing changed should
be a key, with its value being another dict with keys called "old" and "new"
containing the old/new values. For example, the pkg state's **changes** dict
has one key for each package changed, with the "old" and "new" keys in its
sub-dict containing the old and new versions of the package.
- **result:** A boolean value. *True* if the action was successful, otherwise
*False*.
- **comment:** A string containing a summary of the result.

View File

@ -16,6 +16,7 @@ running and the Salt :term:`minions <minion>` point to the master.
* `pyzmq`_ >= 2.1.9 — ZeroMQ Python bindings
* `M2Crypto`_ — Python OpenSSL wrapper
* `PyCrypto`_ — The Python cryptography toolkit
* `msgpack-python`_ — High-performance message interchange format
* `YAML`_ — Python YAML bindings
Optional Dependencies:
@ -82,8 +83,6 @@ We are working to get Salt into apt. In the meantime we have a PPA available
for Lucid::
aptitude -y install python-software-properties
add-apt-repository ppa:chris-lea/libpgm
add-apt-repository ppa:chris-lea/zeromq
add-apt-repository ppa:saltstack/salt
aptitude update
aptitude install salt

72
doc/topics/jobs/index.rst Normal file
View File

@ -0,0 +1,72 @@
==============
Job Management
==============
.. versionadded:: 0.9.7
Since Salt executes jobs running on many systems, Salt needs to be able to
manage jobs running on many systems. As of Salt 0.9.7 the capability was
added for more advanced job management.
The Minion proc System
======================
The Salt Minions now maintain a proc directory in the salt cachedir, the proc
directory maintains files named after the executed job id. These files contain
the information about the current running jobs on the minion and allow for
jobs to be looked up. This is located in the proc directory under the
cachedir, with a default configuration it is under /var/cache/salt/proc.
Functions in the saltutil Module
================================
Salt 0.9.7 introduced a few new functions to the saltutil module for managing
jobs. These functions are:
1. running
Returns the data of all running jobs that are found in the proc directory.
2. find_job
Returns specific data about a certain job based on job id.
3. signal_job
Allows for a given jid to be sent a signal.
4. term_job
Sends a termination signal (SIGTERM, 15) to the process controlling the
specified job.
5. kill_job
Sends a kill signal (SIGKILL, 9) to the process controlling the
specified job.
These functions make up the core of the back end used to manage jobs at the
minion level.
The jobs Runner
===============
A convenience runner front end and reporting system has been added as well.
The jobs runner contains functions to make viewing data easier and cleaner.
The jobs runner contains a number of functions...
active
------
The active function runs saltutil.running on all minions and formats the
return data about all running jobs in a much more usable and compact format.
The active function will also compare jobs that have returned and jobs that
are still running, making it easier to see what systems have completed a job
and what systems are still being waited on.
lookup_jid
----------
When jobs are executed the return data is sent back to the master and cached.
By default is is cached for 24 hours, but this can be configured via the
``keep_jobs`` option in the master configuration.
Using the lookup_jid runner will display the same return data that the initial
job invocation with the salt command would display.
list_jobs
---------
Before finding a historic job, it may be required to find the job id. list_jobs
will parse the cached execution data and display all of the job data for jobs
that have already, or partially returned.

View File

@ -0,0 +1,47 @@
========================
Salt 0.9.6 Release Notes
========================
Salt 0.9.6 is a release targeting a few bugs and changes. This is primarily
targeting an issue found in the names declaration in the state system. But a
few other bugs were also repaired, like missing support for grains in extmods.
Due to a conflict in distribution packaging msgpack will no longer be bundled
with Salt, and is required as a dependency.
New Features
============
http and ftp support in files.managed
-------------------------------------
Now under the source option in the file.managed state a http or ftp address
can be used instead of a file located on the salt master.
Allow Multiple Returners
------------------------
Now the returner interface can define multiple returners, and will also return
data back to the master, making the process less ambiguous.
Minion Memory Improvements
--------------------------
A number of modules have been taken out of the minion if the underlying
systems required by said modules are not present on the minion system.
A number of other modules need to be stripped out in this same way which
should continue to make the minion more efficient.
Minions Can Locally Cache Return Data
-------------------------------------
A new option, cache_jobs, has been added to the minion to allow for all of the
historically run jobs to cache on the minion, allowing for looking up historic
returns. By default cache_jobs is set to False.
Pure Python Template Support For file.managed
---------------------------------------------
Templates in the file.managed state can now be defined in a python script.
This script needs to have a run function that returns the string that needs to
be in the named file.

View File

@ -0,0 +1,50 @@
==============================================
Boostrapping Salt on Linux EC2 with Cloud-Init
==============================================
`Salt <http://saltstack.org>`_ is a great tool for remote execution and configuration management, however you will still need to bootstrap the daemon when spinning up a new node. One option is to create and save a custom AMI, but this creates another resource to maintain and document.
A better method for Linux machines uses Canonical's `CloudInit <https://help.ubuntu.com/community/CloudInit>`_ to run a bootstrap script during an EC2 Instance initialization. Cloud-init takes the ``user_data`` string passed into a new AWS instance and runs it in a manner similar to rc.local. The bootstrap script needs to:
#. Install `Salt`_ with dependencies
#. Point the minion to the master
Here is a sample script::
#!/bin/bash
# Install saltstack
add-apt-repository ppa:saltstack/salt -y
apt-get update -y
apt-get install salt -y
apt-get upgrade -y
# Set salt master location and start minion
cp /etc/salt/minion.template /etc/salt/minion
sed -i '' -e 's/#master: salt/master: [salt_master_fqdn]' /etc/salt/minion
salt-minion -d
First the script adds the saltstack ppa and installs the package. Then we copy over the minion config template and tell it where to find the master. You will have to replace ``[salt_master_fqdn]`` with something that resolves to your salt master.
Used With Boto
--------------
`Boto <https://github.com/boto/boto>`_ will accept a string for user data which can be used to pass our bootstrap script. If the script is saved to a file, you can read it into a string::
import boto
user_data = open('salt_bootstrap.sh')
conn = boto.connect_ec2(<AWS_ACCESS_ID>, <AWS_SECRET_KEY>)
reservation = conn.run_instances(image_id=<ami_id>,
key_name=<key_name>,
user_data=user_data.read())
Additional Notes
-------------------
Sometime in the future the ppa will include and install an upstart file. In the meantime, you can use the bootstrap to `build one <https://gist.github.com/1617054>`_.
It may also be useful to set the node's role during this phase. One option would be saving the node's role to a file and then using a custom grain to select it.

View File

@ -149,7 +149,8 @@ directly.
But with more than a single SLS file, more components can be added to the
toolkit, consider this ssh example:
/ssh/init.sls
``/ssh/init.sls``
.. code-block:: yaml
openssh-client:
@ -166,7 +167,8 @@ toolkit, consider this ssh example:
- require:
- pkg: openssh-client
/ssh/server.sls
``/ssh/server.sls``
.. code-block:: yaml
include:
@ -231,7 +233,8 @@ needs to be placed.
These examples will add more watchers to apache and change the ssh banner.
/ssh/custom-server.sls
``/ssh/custom-server.sls``
.. code-block:: yaml
include:
@ -242,7 +245,8 @@ These examples will add more watchers to apache and change the ssh banner.
file:
- source: salt://ssh/custom-banner
/python/mod_python.sls
``/python/mod_python.sls``
.. code-block:: yaml
include:
@ -301,7 +305,8 @@ available, ``salt`` and ``grains``. The salt object allows for any salt
function to be called from within the template, and grains allows for the
grains to be accessed from within the template. A few examples are in order:
/apache/init.sls
``/apache/init.sls``
.. code-block:: yaml
apache:
@ -347,7 +352,8 @@ Red Hat, then the name of the apache package and service needs to be httpd.
A more aggressive way to use Jinja can be found here, in a module to set up
a MooseFS distributed filesystem chunkserver:
/moosefs/chunk.sls
``/moosefs/chunk.sls``
.. code-block:: yaml
include:
@ -421,7 +427,7 @@ but a SLS file set to use another renderer can be easily added to the tree.
This example shows a very basic python SLS file:
/python/django.sls
``/python/django.sls``
.. code-block:: python

View File

@ -81,6 +81,10 @@ In this case it defines the name of the package to be installed. **NOTE:** the
package name for the Apache httpd web server may differ on your OS or distro —
for example, on Fedora it is ``httpd`` but on Debian/Ubuntu it is ``apache2``.
Additionally, an ID declaration should not contain a dot, as this will produce
unpredictable output in the summary returned from a call to
:func:`state.highstate <salt.modules.state.highstate>`.
The second line, called the :term:`state declaration`, defines which of the
Salt States we are using. In this example, we are using the :mod:`pkg state
<salt.states.pkg>` to ensure that a given package is installed.

View File

@ -149,7 +149,7 @@ vhosts file is changed:
- apache
extend:
apache
apache:
service:
- watch:
- file: /etc/httpd/extra/httpd-vhosts.conf

11
pkg/rpm/README.fedora Normal file
View File

@ -0,0 +1,11 @@
These packages are *optional* dependencies for salt. By default, they are not included in the salt RPMs.
Install any of these packages to enable the functionality within salt.
MySQL-python
libvirt-python
python-mako
pymongo
python-redis / redis
A semi-canonical list of the optional salt modules can be found at
https://github.com/saltstack/salt/blob/develop/doc/conf.py#L30

View File

@ -9,8 +9,8 @@
%{!?python_sitearch: %global python_sitearch %(%{__python} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib(1))")}
Name: salt
Version: 0.9.4
Release: 6%{?dist}
Version: 0.9.6
Release: 2%{?dist}
Summary: A parallel remote execution system
Group: System Environment/Daemons
@ -23,6 +23,7 @@ Source3: %{name}-minion
Source4: %{name}-master.service
Source5: %{name}-syndic.service
Source6: %{name}-minion.service
Source7: README.fedora
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
BuildArch: noarch
@ -33,6 +34,7 @@ BuildRequires: python26-crypto
BuildRequires: python26-devel
BuildRequires: python26-PyYAML
BuildRequires: python26-m2crypto
BuildRequires: python26-msgpack
Requires: python26-crypto
Requires: python26-zmq
@ -40,6 +42,7 @@ Requires: python26-jinja2
Requires: python26-PyYAML
Requires: python26-m2crypto
Requires: python26-PyXML
Requires: python26-msgpack
%else
@ -48,6 +51,7 @@ BuildRequires: python-crypto
BuildRequires: python-devel
BuildRequires: PyYAML
BuildRequires: m2crypto
BuildRequires: python-msgpack
Requires: python-crypto
Requires: python-zmq
@ -55,6 +59,7 @@ Requires: python-jinja2
Requires: PyYAML
Requires: m2crypto
Requires: PyXML
Requires: python-msgpack
%endif
@ -71,7 +76,7 @@ BuildRequires: systemd-units
%endif
Requires: MySQL-python libvirt-python yum
#Requires: MySQL-python libvirt-python yum
%description
Salt is a distributed remote execution system used to execute commands and
@ -119,6 +124,11 @@ install -p -m 0644 %{SOURCE4} $RPM_BUILD_ROOT%{_unitdir}/
install -p -m 0644 %{SOURCE5} $RPM_BUILD_ROOT%{_unitdir}/
install -p -m 0644 %{SOURCE6} $RPM_BUILD_ROOT%{_unitdir}/
%endif
install -p %{SOURCE7} .
install -p -m 0640 $RPM_BUILD_ROOT%{_sysconfdir}/salt/minion.template $RPM_BUILD_ROOT%{_sysconfdir}/salt/minion
install -p -m 0640 $RPM_BUILD_ROOT%{_sysconfdir}/salt/master.template $RPM_BUILD_ROOT%{_sysconfdir}/salt/master
%clean
rm -rf $RPM_BUILD_ROOT
@ -129,6 +139,7 @@ rm -rf $RPM_BUILD_ROOT
%{python_sitelib}/%{name}/*
%{python_sitelib}/%{name}-%{version}-py?.?.egg-info
%doc %{_mandir}/man7/salt.7.*
%doc README.fedora
%files -n salt-minion
%defattr(-,root,root)
@ -143,7 +154,8 @@ rm -rf $RPM_BUILD_ROOT
%{_unitdir}/salt-minion.service
%endif
%config(noreplace) /etc/salt/minion
%config(noreplace) %{_sysconfdir}/salt/minion
%config %{_sysconfdir}/salt/minion.template
%files -n salt-master
%defattr(-,root,root)
@ -166,7 +178,8 @@ rm -rf $RPM_BUILD_ROOT
%{_unitdir}/salt-master.service
%{_unitdir}/salt-syndic.service
%endif
%config(noreplace) /etc/salt/master
%config(noreplace) %{_sysconfdir}/salt/master
%config %{_sysconfdir}/salt/master.template
%if ! (0%{?rhel} >= 7 || 0%{?fedora} >= 15)
@ -242,6 +255,12 @@ fi
%endif
%changelog
* Tue Jan 24 2012 Clint Savage <herlo1@gmail.com> - 0.9.6-2
- Added README.fedora and removed deps for optional modules
* Sat Jan 21 2012 Clint Savage <herlo1@gmail.com> - 0.9.6-1
- New upstream release
* Sun Jan 8 2012 Clint Savage <herlo1@gmail.com> - 0.9.4-6
- Missed some critical elements for SysV and rpmlint cleanup

View File

@ -1,6 +1,7 @@
# pip requirements file for Salt
Jinja2
pyzmq
msgpack-python
M2Crypto
pycrypto
msgpack-python
PyCrypto
PyYAML
pyzmq >= 2.1.9

View File

@ -19,6 +19,22 @@ except ImportError as e:
if e.message != 'No module named _msgpack':
raise
def set_pidfile(pidfile):
'''
Save the pidfile
'''
pdir = os.path.dirname(pidfile)
if not os.path.isdir(pdir):
os.makedirs(pdir)
try:
open(pidfile, 'w+').write(str(os.getpid()))
except IOError:
err = ('Failed to commit the pid file to location {0}, please verify'
' that the location is available').format(pidfile)
log.error(err)
def verify_env(dirs):
'''
Verify that the named directories are in place and that the environment
@ -81,6 +97,8 @@ class Master(object):
# command line overrides config
if self.cli['user']:
self.opts['user'] = self.cli['user']
# Send the pidfile location to the opts
self.opts['pidfile'] = self.cli['pidfile']
def __parse_cli(self):
'''
@ -103,6 +121,11 @@ class Master(object):
'--user',
dest='user',
help='Specify user to run minion')
parser.add_option('--pid-file',
dest='pidfile',
default='/var/run/salt-master.pid',
help=('Specify the location of the pidfile. Default'
' %default'))
parser.add_option('-l',
'--log-level',
dest='log_level',
@ -118,7 +141,8 @@ class Master(object):
cli = {'daemon': options.daemon,
'config': options.config,
'user': options.user}
'user': options.user,
'pidfile': options.pidfile}
return cli
@ -128,6 +152,7 @@ class Master(object):
'''
verify_env([os.path.join(self.opts['pki_dir'], 'minions'),
os.path.join(self.opts['pki_dir'], 'minions_pre'),
os.path.join(self.opts['pki_dir'], 'minions_rejected'),
os.path.join(self.opts['cachedir'], 'jobs'),
os.path.dirname(self.opts['log_file']),
self.opts['sock_dir'],
@ -148,6 +173,7 @@ class Master(object):
# Late import so logging works correctly
import salt.utils
salt.utils.daemonize()
set_pidfile(self.cli['pidfile'])
master.start()
@ -183,6 +209,11 @@ class Minion(object):
'--user',
dest='user',
help='Specify user to run minion')
parser.add_option('--pid-file',
dest='pidfile',
default='/var/run/salt-minion.pid',
help=('Specify the location of the pidfile. Default'
' %default'))
parser.add_option('-l',
'--log-level',
dest='log_level',
@ -197,7 +228,8 @@ class Minion(object):
salt.log.setup_console_logger(options.log_level, log_format=log_format)
cli = {'daemon': options.daemon,
'config': options.config,
'user': options.user}
'user': options.user,
'pidfile': options.pidfile}
return cli
@ -226,6 +258,7 @@ class Minion(object):
# Late import so logging works correctly
import salt.utils
salt.utils.daemonize()
set_pidfile(self.cli['pidfile'])
minion = salt.minion.Minion(self.opts)
minion.tune_in()
except KeyboardInterrupt:
@ -291,6 +324,11 @@ class Syndic(object):
'--user',
dest='user',
help='Specify user to run minion')
parser.add_option('--pid-file',
dest='pidfile',
default='/var/run/salt-syndic.pid',
help=('Specify the location of the pidfile. Default'
' %default'))
parser.add_option('-l',
'--log-level',
dest='log_level',
@ -337,6 +375,7 @@ class Syndic(object):
# Late import so logging works correctly
import salt.utils
salt.utils.daemonize()
set_pidfile(self.cli['pidfile'])
syndic.tune_in()
except KeyboardInterrupt:
log.warn('Stopping the Salt Syndic Minion')

View File

@ -6,8 +6,6 @@ The management of salt command line utilities are stored in here
import optparse
import os
import sys
import yaml
import json
# Import salt components
import salt.cli.caller
@ -41,8 +39,7 @@ class SaltCMD(object):
parser.add_option('-t',
'--timeout',
default=5,
type=int,
default=None,
dest='timeout',
help=('Set the return timeout for batch jobs; '
'default=5 seconds'))
@ -146,7 +143,8 @@ class SaltCMD(object):
opts = {}
opts['timeout'] = options.timeout
if not options.timeout is None:
opts['timeout'] = int(options.timeout)
opts['pcre'] = options.pcre
opts['list'] = options.list_
opts['grain'] = options.grain
@ -160,10 +158,6 @@ class SaltCMD(object):
opts['yaml_out'] = options.yaml_out
opts['json_out'] = options.json_out
if opts['return']:
if opts['timeout'] == 5:
opts['timeout'] = 0
if options.query:
opts['query'] = options.query
if len(args) < 1:
@ -231,6 +225,8 @@ class SaltCMD(object):
print ''
else:
if not 'timeout' in self.opts:
self.opts['timeout'] = local.opts['timeout']
args = [self.opts['tgt'],
self.opts['fun'],
self.opts['arg'],
@ -390,6 +386,10 @@ class SaltCP(object):
opts['nodegroup'] = options.nodegroup
opts['conf_file'] = options.conf_file
if len(args) <= 1:
parser.print_help()
parser.exit()
if opts['list']:
opts['tgt'] = args[0].split(',')
else:
@ -447,6 +447,19 @@ class SaltKey(object):
action='store_true',
help='Accept all pending keys')
parser.add_option('-r',
'--reject',
dest='reject',
default='',
help='Reject the specified public key')
parser.add_option('-R',
'--reject-all',
dest='reject_all',
default=False,
action='store_true',
help='Reject all pending keys')
parser.add_option('-p',
'--print',
dest='print_',
@ -466,6 +479,19 @@ class SaltKey(object):
default='',
help='Delete the named key')
parser.add_option('-q',
'--quiet',
dest='quiet',
default=False,
action='store_true',
help='Supress output')
parser.add_option('--logfile',
dest='logfile',
default='/var/log/salt/key.log',
help=('Send all output to a file. '
'Default is /var/log/salt/key.log'))
parser.add_option('--gen-keys',
dest='gen_keys',
default='',
@ -496,10 +522,17 @@ class SaltKey(object):
opts = {}
opts['quiet'] = options.quiet
opts['logfile'] = options.logfile
# I decided to always set this to info, since it really all is info or
# error.
opts['loglevel'] = 'info'
opts['list'] = options.list_
opts['list_all'] = options.list_all
opts['accept'] = options.accept
opts['accept_all'] = options.accept_all
opts['reject'] = options.reject
opts['reject_all'] = options.reject_all
opts['print'] = options.print_
opts['print_all'] = options.print_all
opts['delete'] = options.delete
@ -518,6 +551,9 @@ class SaltKey(object):
'''
Execute saltkey
'''
import salt.log
salt.log.setup_logfile_logger(self.opts['logfile'],
self.opts['loglevel'])
key = salt.cli.key.Key(self.opts)
key.run()

View File

@ -8,11 +8,12 @@ import sys
# Import salt libs
import salt
import salt.utils
import salt.loader
import salt.minion
# Custom exceptions
from salt.exceptions import CommandExecutionError
from salt.exceptions import CommandExecutionError, CommandNotFoundError
class Caller(object):
'''
@ -31,18 +32,25 @@ class Caller(object):
Call the module
'''
ret = {}
if self.opts['fun'] not in self.minion.functions:
sys.stderr.write('Function {0} is not available\n'.format(self.opts['fun']))
fun = self.opts['fun']
if fun not in self.minion.functions:
sys.stderr.write('Function {0} is not available\n'.format(fun))
sys.exit(1)
try:
ret['return'] = self.minion.functions[self.opts['fun']](
ret['return'] = self.minion.functions[fun](
*self.opts['arg']
)
except (TypeError, CommandExecutionError) as exc:
sys.stderr.write('Error running \'{0}\': {1}\n'.format(self.opts['fun'], str(exc)))
msg = 'Error running \'{0}\': {1}\n'
sys.stderr.write(msg.format(fun, str(exc)))
sys.exit(1)
if hasattr(self.minion.functions[self.opts['fun']], '__outputter__'):
oput = self.minion.functions[self.opts['fun']].__outputter__
except CommandNotFoundError as exc:
msg = 'Command not found in \'{0}\': {1}\n'
sys.stderr.write(msg.format(fun, str(exc)))
sys.exit(1)
if hasattr(self.minion.functions[fun], '__outputter__'):
oput = self.minion.functions[fun].__outputter__
if isinstance(oput, str):
ret['out'] = oput
return ret

View File

@ -6,11 +6,12 @@ The actual saltkey functional code
import os
import shutil
import sys
import logging
# Import salt modules
import salt.crypt
import salt.utils as utils
log = logging.getLogger(__name__)
class Key(object):
'''
@ -28,13 +29,15 @@ class Key(object):
subdir = ''
if key_type == 'pre':
subdir = 'minions_pre'
elif key_type == 'rej':
subdir = 'minions_rejected'
elif key_type == 'acc':
subdir = 'minions'
dir_ = os.path.join(self.opts['pki_dir'], subdir)
if not os.path.isdir(dir_):
err = ('The ' + subdir + ' directory is not present, ensure that '
'the master server has been started')
sys.stderr.write(err + '\n')
self._log(err, level='error')
sys.exit(42)
keys = os.listdir(dir_)
if full_path:
@ -43,22 +46,38 @@ class Key(object):
else:
ret = set(keys)
return ret
def _log(self, message, level=''):
if hasattr(log, level):
log_msg = getattr(log, level)
log_msg(message)
if not self.opts['quiet']:
print message
def _list_pre(self):
'''
List the unaccepted keys
'''
print utils.LIGHT_RED + 'Unaccepted Keys:' + utils.ENDC
self._log(utils.LIGHT_RED + 'Unaccepted Keys:' + utils.ENDC)
for key in sorted(self._keys('pre')):
print utils.RED + key + utils.ENDC
output = utils.RED + key + utils.ENDC
self._log(output)
def _list_accepted(self):
'''
List the accepted public keys
'''
print utils.LIGHT_GREEN + 'Accepted Keys:' + utils.ENDC
self._log(utils.LIGHT_GREEN + 'Accepted Keys:' + utils.ENDC)
for key in sorted(self._keys('acc')):
print utils.GREEN + key + utils.ENDC
self._log(utils.GREEN + key + utils.ENDC)
def _list_rejected(self):
'''
List the unaccepted keys
'''
self._log(utils.LIGHT_BLUE + 'Rejected:' + utils.ENDC)
for key in sorted(self._keys('rej')):
self._log(utils.BLUE + key + utils.ENDC)
def _list_all(self):
'''
@ -66,6 +85,7 @@ class Key(object):
'''
self._list_pre()
self._list_accepted()
self._list_rejected()
def _print_key(self, name):
'''
@ -74,88 +94,117 @@ class Key(object):
keys = self._keys('pre', True).union(self._keys('acc', True))
for key in sorted(keys):
if key.endswith(name):
print open(key, 'r').read()
self._log(open(key, 'r').read())
def _print_all(self):
'''
Print out the public keys, all of em'
'''
print utils.LIGHT_RED + 'Unaccepted keys:' + utils.ENDC
self._log(utils.LIGHT_RED + 'Unaccepted keys:' + utils.ENDC)
for key in sorted(self._keys('pre', True)):
print ' ' + utils.RED + os.path.basename(key) + utils.ENDC
print open(key, 'r').read()
print utils.LIGHT_GREEN + 'Accepted keys:' + utils.ENDC
self._log(' ' + utils.RED + os.path.basename(key) + utils.ENDC)
self._log(open(key, 'r').read())
self._log(utils.LIGHT_GREEN + 'Accepted keys:' + utils.ENDC)
for key in sorted(self._keys('acc', True)):
print ' ' + utils.GREEN + os.path.basename(key) + utils.ENDC
print open(key, 'r').read()
self._log(' ' + utils.GREEN + os.path.basename(key) +
utils.ENDC)
self._log(open(key, 'r').read())
self._log(utils.LIGHT_BLUE + 'Rejected keys:' + utils.ENDC)
for key in sorted(self._keys('pre', True)):
self._log(' ' + utils.BLUE + os.path.basename(key) +
utils.ENDC)
self._log(open(key, 'r').read())
def _accept(self, key):
'''
Accept a specified host's public key
'''
pre_dir = os.path.join(self.opts['pki_dir'], 'minions_pre')
minions = os.path.join(self.opts['pki_dir'], 'minions')
if not os.path.isdir(minions):
err = ('The minions directory is not present, ensure that the '
'master server has been started')
sys.stderr.write(err + '\n')
sys.exit(42)
if not os.path.isdir(pre_dir):
err = ('The minions_pre directory is not present, ensure '
'that the master server has been started')
sys.stderr.write(err + '\n')
sys.exit(42)
pre = os.listdir(pre_dir)
(minions_accepted,
minions_pre,
minions_rejected) = self._check_minions_directories()
pre = os.listdir(minions_pre)
if not pre.count(key):
err = ('The named host is unavailable, please accept an '
'available key')
sys.stderr.write(err + '\n')
err = ('The key named %s does not exist, please accept an '
'available key' %(key))
#log.error(err)
self._log(err, level='error')
sys.exit(43)
shutil.move(os.path.join(pre_dir, key), os.path.join(minions, key))
shutil.move(os.path.join(minions_pre, key),
os.path.join(minions_accepted, key))
self._log('Key for %s accepted.' %(key), level='info')
def _accept_all(self):
'''
Accept all keys in pre
'''
pre_dir = os.path.join(self.opts['pki_dir'], 'minions_pre')
minions = os.path.join(self.opts['pki_dir'], 'minions')
if not os.path.isdir(minions):
err = ('The minions directory is not present, ensure that the '
'master server has been started')
sys.stderr.write(err + '\n')
sys.exit(42)
if not os.path.isdir(pre_dir):
err = ('The minions_pre directory is not present, ensure that the '
'master server has been started')
sys.stderr.write(err + '\n')
sys.exit(42)
for key in os.listdir(pre_dir):
(minions_accepted,
minions_pre,
minions_rejected) = self._check_minions_directories()
for key in os.listdir(minions_pre):
self._accept(key)
def _delete_key(self):
'''
Delete a key
'''
pre_dir = os.path.join(self.opts['pki_dir'], 'minions_pre')
minions = os.path.join(self.opts['pki_dir'], 'minions')
if not os.path.isdir(minions):
err = ('The minions directory is not present, ensure that the '
'master server has been started')
sys.stderr.write(err + '\n')
sys.exit(42)
if not os.path.isdir(pre_dir):
err = ('The minions_pre directory is not present, ensure that the '
'master server has been started')
sys.stderr.write(err + '\n')
sys.exit(42)
pre = os.path.join(pre_dir, self.opts['delete'])
acc = os.path.join(minions, self.opts['delete'])
(minions_accepted,
minions_pre,
minions_rejected) = self._check_minions_directories()
pre = os.path.join(minions_pre, self.opts['delete'])
acc = os.path.join(minions_accepted, self.opts['delete'])
rej= os.path.join(minions_rejected, self.opts['delete'])
if os.path.exists(pre):
os.remove(pre)
print 'Removed pending key %s' % self.opts['delete']
self._log('Removed pending key %s' % self.opts['delete'],
level='info')
if os.path.exists(acc):
os.remove(acc)
print 'Removed accepted key %s' % self.opts['delete']
self._log('Removed accepted key %s' % self.opts['delete'],
level='info')
if os.path.exists(rej):
os.remove(rej)
self._log('Removed rejected key %s' % self.opts['delete'],
level='info')
def _reject(self, key):
'''
Reject a specified host's public key
'''
(minions_accepted,
minions_pre,
minions_rejected) = self._check_minions_directories()
pre = os.listdir(minions_pre)
if not pre.count(key):
err = ('The host named %s is unavailable, please accept an '
'available key' %(key))
self._log(err, level='error')
sys.exit(43)
shutil.move(os.path.join(minions_pre, key),
os.path.join(minions_rejected, key))
self._log('%s key rejected.' %(key), level='info')
def _reject_all(self):
'''
Reject all keys in pre
'''
(minions_accepted,
minions_pre,
minions_rejected) = self._check_minions_directories()
for key in os.listdir(minions_pre):
self._reject(key)
def _check_minions_directories(self):
minions_accepted = os.path.join(self.opts['pki_dir'], 'minions')
minions_pre = os.path.join(self.opts['pki_dir'], 'minions_pre')
minions_rejected = os.path.join(self.opts['pki_dir'],
'minions_rejected')
for dir in [minions_accepted, minions_pre, minions_rejected]:
if not os.path.isdir(dir):
err = ('The minions directory {0} is not present, ensure '
'that the master server has been started'.format(dir))
self._log(err, level='error')
sys.exit(42)
return minions_accepted, minions_pre, minions_rejected
def run(self):
'''
@ -179,7 +228,11 @@ class Key(object):
self._accept(self.opts['accept'])
elif self.opts['accept_all']:
self._accept_all()
elif self.opts['reject']:
self._reject(self.opts['reject'])
elif self.opts['reject_all']:
self._reject_all()
elif self.opts['delete']:
self._delete_key()
else:
self._list_all()
self._list_all()

View File

@ -77,7 +77,8 @@ class LocalClient(object):
key = open(keyfile, 'r').read()
return key
except (OSError, IOError):
raise SaltClientError('Problem reading the salt root key. Are you root?')
raise SaltClientError(('Problem reading the salt root key. Are'
' you root?'))
def _check_glob_minions(self, expr):
'''
@ -131,12 +132,14 @@ class LocalClient(object):
tgt,
fun,
arg=(),
timeout=5,
timeout=None,
expr_form='glob',
ret=''):
'''
Execute a salt command and return.
'''
if timeout is None:
timeout = self.opts['timeout']
jid = prep_jid(self.opts['cachedir'])
pub_data = self.pub(
tgt,
@ -153,12 +156,14 @@ class LocalClient(object):
tgt,
fun,
arg=(),
timeout=5,
timeout=None,
expr_form='glob',
ret=''):
'''
Execute a salt command and return
'''
if timeout is None:
timeout = self.opts['timeout']
jid = prep_jid(self.opts['cachedir'])
pub_data = self.pub(
tgt,
@ -171,11 +176,13 @@ class LocalClient(object):
return (self.get_full_returns(pub_data['jid'],
pub_data['minions'], timeout))
def get_returns(self, jid, minions, timeout=5):
def get_returns(self, jid, minions, timeout=None):
'''
This method starts off a watcher looking at the return data for a
specified jid
'''
if timeout is None:
timeout = self.opts['timeout']
jid_dir = os.path.join(self.opts['cachedir'], 'jobs', jid)
start = 999999999999
gstart = int(time.time())
@ -209,11 +216,13 @@ class LocalClient(object):
return ret
time.sleep(0.02)
def get_full_returns(self, jid, minions, timeout=5):
def get_full_returns(self, jid, minions, timeout=None):
'''
This method starts off a watcher looking at the return data for a
specified jid, it returns all of the information for the jid
'''
if timeout is None:
timeout = self.opts['timeout']
jid_dir = os.path.join(self.opts['cachedir'], 'jobs', jid)
start = 999999999999
gstart = int(time.time())

View File

@ -4,9 +4,10 @@ All salt configuration loading and defaults should be in this module
# Import python modules
import os
import tempfile
import socket
import sys
import socket
import logging
import tempfile
# import third party libs
import yaml
@ -22,6 +23,8 @@ import salt.crypt
import salt.loader
import salt.utils
log = logging.getLogger(__name__)
def load_config(opts, path, env_var):
'''
@ -54,9 +57,10 @@ def load_config(opts, path, env_var):
opts.update(conf_opts)
opts['conf_file'] = path
except Exception, e:
print 'Error parsing configuration file: {0} - {1}'.format(path, e)
msg = 'Error parsing configuration file: {0} - {1}'
log.warn(msg.format(path, e))
else:
print 'Missing configuration file: {0}'.format(path)
log.debug('Missing configuration file: {0}'.format(path))
def prepend_root_dir(opts, path_options):
@ -86,6 +90,7 @@ def minion_config(path):
'renderer': 'yaml_jinja',
'failhard': False,
'autoload_dynamic_modules': True,
'environment': None,
'disable_modules': [],
'disable_returners': [],
'module_dirs': [],
@ -137,6 +142,7 @@ def master_config(path):
'worker_threads': 5,
'sock_dir': os.path.join(tempfile.gettempdir(), '.salt-unix'),
'ret_port': '4506',
'timeout': 5,
'keep_jobs': 24,
'root_dir': '/',
'pki_dir': '/etc/salt/pki',
@ -152,6 +158,7 @@ def master_config(path):
'renderer': 'yaml_jinja',
'failhard': False,
'state_top': 'top.sls',
'external_nodes': '',
'order_masters': False,
'log_file': '/var/log/salt/master',
'log_level': 'warning',

View File

@ -5,16 +5,16 @@ authenticating peers
'''
# Import python libs
import hashlib
import hmac
import logging
import os
import sys
import hmac
import hashlib
import logging
import tempfile
# Import Cryptography libs
from Crypto.Cipher import AES
from M2Crypto import RSA
from Crypto.Cipher import AES
# Import zeromq libs
import zmq
@ -320,8 +320,8 @@ class SAuth(Auth):
'''
creds = self.sign_in()
if creds == 'retry':
print 'Failed to authenticate with the master, verify that this'\
+ ' minion\'s public key has been accepted on the salt master'
log.error('Failed to authenticate with the master, verify that this'\
+ ' minion\'s public key has been accepted on the salt master')
sys.exit(2)
return Crypticle(self.opts, creds['aes'])

View File

@ -241,6 +241,9 @@ def _virtual(osdata):
sysctl = salt.utils.which('sysctl')
if sysctl:
model = __salt__['cmd.run']('{0} hw.model'.format(sysctl)).strip()
jail = __salt__['cmd.run']('{0} security.jail.jailed'.format(sysctl)).strip()
if jail:
grains['virtual_subtype'] = 'jail'
if 'QEMU Virtual CPU' in model:
grains['virtual'] = 'kvm'
return grains
@ -492,6 +495,14 @@ def saltpath():
path = os.path.abspath(os.path.join(__file__, os.path.pardir))
return {'saltpath': os.path.dirname(path)}
def saltversion():
'''
Return the version of salt
'''
# Provides:
# saltversion
from salt import __version__
return {'saltversion': __version__}
# Relatively complex mini-algorithm to iterate over the various
# sections of dmidecode output and return matches for specific

View File

@ -247,6 +247,10 @@ class Loader(object):
log.debug(('Failed to import module {0}, this is most likely'
' NOT a problem: {1}').format(name, exc))
continue
except Exception as exc:
log.warning(('Failed to import module {0}, this is due most'
' likely to a syntax error: {1}').format(name, exc))
continue
modules.append(mod)
for mod in modules:
virtual = ''

View File

@ -13,11 +13,15 @@ import hashlib
import tempfile
import datetime
import multiprocessing
import subprocess
# Import zeromq
import zmq
from M2Crypto import RSA
# Import Third Party Libs
import yaml
# Import salt modules
import salt.crypt
import salt.utils
@ -42,7 +46,7 @@ def prep_jid(opts, load):
os.makedirs(jid_dir)
serial.dump(load, open(os.path.join(jid_dir, '.load.p'), 'w+'))
else:
return prep_jid(cachedir, load)
return prep_jid(opts['cachedir'], load)
return jid
@ -97,6 +101,7 @@ class Master(SMaster):
'''
Clean out the old jobs
'''
salt.utils.append_pid(self.opts['pidfile'])
while True:
cur = "{0:%Y%m%d%H}".format(datetime.datetime.now())
@ -153,6 +158,7 @@ class Publisher(multiprocessing.Process):
'''
Bind to the interface specified in the configuration file
'''
salt.utils.append_pid(self.opts['pidfile'])
context = zmq.Context(1)
pub_sock = context.socket(zmq.PUB)
pull_sock = context.socket(zmq.PULL)
@ -160,14 +166,13 @@ class Publisher(multiprocessing.Process):
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
log.info('Starting the Salt Publisher on %s', pub_uri)
log.info('Starting the Salt Publisher on {0}'.format(pub_uri))
pub_sock.bind(pub_uri)
pull_sock.bind(pull_uri)
try:
while True:
package = pull_sock.recv()
log.info('Publishing command')
pub_sock.send(package)
except KeyboardInterrupt:
pub_sock.close()
@ -228,6 +233,7 @@ class ReqServer(object):
'''
Start up the ReqServer
'''
salt.utils.append_pid(self.opts['pidfile'])
self.__bind()
@ -316,6 +322,7 @@ class MWorker(multiprocessing.Process):
'''
Start a Master Worker
'''
salt.utils.append_pid(self.opts['pidfile'])
self.__bind()
@ -372,6 +379,44 @@ class AESFuncs(object):
.format(id_))
return False
def _ext_nodes(self, load):
'''
Return the results from an external node classifier if one is
specified
'''
if not 'id' in load:
log.error('Received call for external nodes without an id')
return {}
if not self.opts['external_nodes']:
return {}
if not salt.utils.which(self.opts['external_nodes']):
log.error(('Specified external nodes controller {0} is not'
' available, please verify that it is installed'
'').format(self.opts['external_nodes']))
return {}
cmd = '{0} {1}'.format(self.opts['external_nodes'], load['id'])
ndata = yaml.safe_loads(
subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE
).communicate()[0])
ret = {}
if 'environment' in ndata:
env = ndata['environment']
else:
env = 'base'
if 'classes' in ndata:
if isinstance(ndata['classes'], dict):
ret[env] = ndata['classes'].keys()
elif isinstance(ndata['classes'], list):
ret[env] = ndata['classes']
else:
return ret
return ret
def _serve_file(self, load):
'''
Return a chunk from a file based on the data received
@ -426,6 +471,19 @@ class AESFuncs(object):
)
return ret
def _file_list_emptydirs(self, load):
'''
Return a list of all empty directories on the master
'''
ret = []
if load['env'] not in self.opts['file_roots']:
return ret
for path in self.opts['file_roots'][load['env']]:
for root, dirs, files in os.walk(path):
if len(dirs)==0 and len(files)==0:
ret.append(os.path.relpath(root,path))
return ret
def _master_opts(self, load):
'''
Return the master options to the minion
@ -536,7 +594,9 @@ class AESFuncs(object):
'ret': clear_load['ret'],
}
expr_form = 'glob'
timeout = 0
timeout = 5
if 'tmo' in clear_load:
timeout = int(clear_load['tmo'])
if 'tgt_type' in clear_load:
load['tgt_type'] = clear_load['tgt_type']
expr_form = load['tgt_type']
@ -551,6 +611,8 @@ class AESFuncs(object):
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
pub_sock.connect(pull_uri)
log.info(('Publishing minion job: #{0[jid]}, func: "{0[fun]}", args:'
' "{0[arg]}", target: "{0[tgt]}"').format(load))
pub_sock.send(self.serial.dumps(payload))
# Run the client get_returns method
return self.local.get_returns(
@ -646,6 +708,9 @@ class ClearFuncs(object):
pubfn_pend = os.path.join(self.opts['pki_dir'],
'minions_pre',
load['id'])
pubfn_rejected = os.path.join(self.opts['pki_dir'],
'minions_rejected',
load['id'])
if self.opts['open_mode']:
# open mode is turned on, nuts to checks and overwrite whatever
# is there
@ -661,6 +726,12 @@ class ClearFuncs(object):
ret = {'enc': 'clear',
'load': {'ret': False}}
return ret
elif os.path.isfile(pubfn_rejected):
# The key has been rejected, don't place it in pending
log.info('Public key rejected for %(id)s', load)
ret = {'enc': 'clear',
'load': {'ret': False}}
return ret
elif not os.path.isfile(pubfn_pend)\
and not self.opts['auto_accept']:
# This is a new key, stick it in pre

View File

@ -12,6 +12,7 @@ import hashlib
import os
import re
import shutil
import string
import tempfile
import threading
import time
@ -24,7 +25,7 @@ import zmq
# Import salt libs
from salt.exceptions import AuthenticationError, MinionError, \
CommandExecutionError, SaltInvocationError
CommandExecutionError, CommandNotFoundError, SaltInvocationError
import salt.client
import salt.crypt
import salt.loader
@ -44,6 +45,21 @@ log = logging.getLogger(__name__)
# 6. handle publications
def get_proc_dir(cachedir):
'''
Return the directory that process data is stored in
'''
fn_ = os.path.join(cachedir, 'proc')
if not os.path.isdir(fn_):
# proc_dir is not present, create it
os.makedirs(fn_)
else:
# proc_dir is present, clean out old proc files
for proc_fn in os.listdir(fn_):
os.remove(os.path.join(fn_, proc_fn))
return fn_
class SMinion(object):
'''
Create an object that has loaded all of the minion module functions,
@ -81,6 +97,7 @@ class Minion(object):
self.mod_opts = self.__prep_mod_opts()
self.functions, self.returners = self.__load_modules()
self.matcher = Matcher(self.opts, self.functions)
self.proc_dir = get_proc_dir(opts['cachedir'])
if hasattr(self,'_syndic') and self._syndic:
log.warn('Starting the Salt Syndic Minion')
else:
@ -102,6 +119,7 @@ class Minion(object):
'''
Return the functions and the returners loaded up from the loader module
'''
self.opts['grains'] = salt.loader.grains(self.opts)
functions = salt.loader.minion_mods(self.opts)
returners = salt.loader.returners(self.opts)
return functions, returners
@ -168,7 +186,7 @@ class Minion(object):
self.functions, self.returners = self.__load_modules()
if self.opts['multiprocessing']:
if isinstance(data['fun'], list):
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
multiprocessing.Process(
target=lambda: self._thread_multi_return(data)
).start()
@ -177,7 +195,7 @@ class Minion(object):
target=lambda: self._thread_return(data)
).start()
else:
if isinstance(data['fun'], list):
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
threading.Thread(
target=lambda: self._thread_multi_return(data)
).start()
@ -191,6 +209,11 @@ class Minion(object):
This method should be used as a threading target, start the actual
minion side execution.
'''
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, data['jid'])
sdata = {'pid': os.getpid()}
sdata.update(data)
open(fn_, 'w+').write(self.serial.dumps(sdata))
ret = {}
for ind in range(0, len(data['arg'])):
try:
@ -208,6 +231,10 @@ class Minion(object):
if function_name in self.functions:
try:
ret['return'] = self.functions[data['fun']](*data['arg'])
except CommandNotFoundError as exc:
msg = 'Command not found in \'{0}\': {1}'
log.debug(msg.format(function_name, str(exc)))
ret['return'] = msg.format(function_name, str(exc))
except CommandExecutionError as exc:
msg = 'A command in {0} had a problem: {1}'
log.error(msg.format(function_name, str(exc)))
@ -289,6 +316,10 @@ class Minion(object):
'''
Return the data from the executed command to the master server
'''
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, ret['jid'])
if os.path.isfile(fn_):
os.remove(fn_)
log.info('Returning information for job: {0}'.format(ret['jid']))
context = zmq.Context()
socket = context.socket(zmq.REQ)
@ -356,19 +387,10 @@ class Minion(object):
Check to see if the salt refresh file has been laid down, if it has,
refresh the functions and returners.
'''
if os.path.isfile(
os.path.join(
self.opts['cachedir'],
'.module_refresh'
)
):
fn_ = os.path.join(self.opts['cachedir'], 'module_refresh')
if os.path.isfile(fn_):
os.remove(fn_)
self.functions, self.returners = self.__load_modules()
os.remove(
os.path.join(
self.opts['cachedir'],
'.module_refresh'
)
)
def tune_in(self):
'''
@ -540,7 +562,7 @@ class Matcher(object):
'''
Determines if this host is on the list
'''
return bool(tgt in self.opts['id'])
return bool(self.opts['id'] in tgt)
def grain_match(self, tgt):
'''
@ -553,7 +575,7 @@ class Matcher(object):
if comps[0] not in self.opts['grains']:
log.error('Got unknown grain from master: {0}'.format(comps[0]))
return False
return bool(re.match(comps[1], self.opts['grains'][comps[0]]))
return bool(re.match(comps[1], str(self.opts['grains'][comps[0]])))
def exsel_match(self, tgt):
'''
@ -716,6 +738,43 @@ class FileClient(object):
fn_.write(data['data'])
return dest
def get_dir(self, path, dest='', env='base'):
'''
Get a directory recursively from the salt-master
'''
ret = []
# Strip trailing slash
path = string.rstrip(self._check_proto(path), '/')
# Break up the path into a list conaining the bottom-level directory
# (the one being recursively copied) and the directories preceding it
separated = string.rsplit(path,'/',1)
if len(separated) != 2:
# No slashes in path. (This means all files in env will be copied)
prefix = ''
else:
prefix = separated[0]
# Copy files from master
for fn_ in self.file_list(env):
if fn_.startswith(path):
# Remove the leading directories from path to derive
# the relative path on the minion.
minion_relpath = string.lstrip(fn_[len(prefix):],'/')
ret.append(self.get_file('salt://{0}'.format(fn_),
'%s/%s' % (dest,minion_relpath),
True, env))
# Replicate empty dirs from master
for fn_ in self.file_list_emptydirs(env):
if fn_.startswith(path):
# Remove the leading directories from path to derive
# the relative path on the minion.
minion_relpath = string.lstrip(fn_[len(prefix):],'/')
minion_mkdir = '%s/%s' % (dest,minion_relpath)
os.makedirs(minion_mkdir)
ret.append(minion_mkdir)
ret.sort()
return ret
def get_url(self, url, dest, makedirs=False, env='base'):
'''
Get a single file from a URL.
@ -729,7 +788,7 @@ class FileClient(object):
if makedirs:
os.makedirs(destdir)
else:
return False
return ''
else:
dest = os.path.join(
self.opts['cachedir'],
@ -754,7 +813,7 @@ class FileClient(object):
*BaseHTTPServer.BaseHTTPRequestHandler.responses[ex.code]))
except urllib2.URLError, ex:
raise MinionError('Error reading {0}: {1}'.format(url, ex.reason))
return False
return ''
def cache_file(self, path, env='base'):
'''
@ -790,7 +849,10 @@ class FileClient(object):
path = self._check_proto(path)
for fn_ in self.file_list(env):
if fn_.startswith(path):
ret.append(self.cache_file('salt://{0}'.format(fn_), env))
local = self.cache_file('salt://{0}'.format(fn_), env)
if not fn_.strip():
continue
ret.append(local)
return ret
def cache_local_file(self, path, **kwargs):
@ -818,6 +880,17 @@ class FileClient(object):
self.socket.send(self.serial.dumps(payload))
return self.auth.crypticle.loads(self.serial.loads(self.socket.recv()))
def file_list_emptydirs(self, env='base'):
'''
List the empty dirs on the master
'''
payload = {'enc': 'aes'}
load = {'env': env,
'cmd': '_file_list_emptydirs'}
payload['load'] = self.auth.crypticle.dumps(load)
self.socket.send(self.serial.dumps(payload))
return self.auth.crypticle.loads(self.serial.loads(self.socket.recv()))
def file_local_list(self, env='base'):
'''
List files in the local minion files and localfiles caches
@ -906,3 +979,15 @@ class FileClient(object):
payload['load'] = self.auth.crypticle.dumps(load)
self.socket.send(self.serial.dumps(payload))
return self.auth.crypticle.loads(self.serial.loads(self.socket.recv()))
def ext_nodes(self):
'''
Return the metadata derived from the external nodes system on the
master.
'''
payload = {'enc': 'aes'}
load = {'cmd': '_ext_nodes',
'id': self.opts['id']}
payload['load'] = self.auth.crypticle.dumps(load)
self.socket.send(self.serial.dumps(payload))
return self.auth.crypticle.loads(self.serial.loads(self.socket.recv()))

View File

@ -10,93 +10,96 @@ import os
import subprocess
import tempfile
import salt.utils
import pwd
from salt.exceptions import CommandExecutionError
try:
import pwd
except:
pass
# Set up logging
log = logging.getLogger(__name__)
# Set the default working directory to the home directory
# of the user salt-minion is running as. Default: /root
DEFAULT_CWD = os.path.expanduser('~')
# Set up the default outputters
__outputter__ = {
'run': 'txt',
}
def _run(cmd,
cwd=DEFAULT_CWD,
cwd=None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
quiet=False,
runas=None):
runas=None,
with_env=True):
'''
Do the DRY thing and only call subprocess.Popen() once
'''
ret = {}
uid = os.getuid()
euid = os.geteuid()
# Set the default working directory to the home directory
# of the user salt-minion is running as. Default: /root
if not cwd:
cwd = os.path.expanduser('~{0}'.format('' if not runas else runas))
def su():
os.setuid(runas_uid)
os.seteuid(runas_uid)
# TODO: Figure out the proper way to do this in windows
disable_runas = [
'Windows',
]
ret = {}
if runas and __grains__['os'] in disable_runas:
msg = 'Sorry, {0} does not support runas functionality'
raise CommandExecutionError(msg.format(__grains__['os']))
if runas:
# Save the original command before munging it
orig_cmd = cmd
try:
p = pwd.getpwnam(runas)
except KeyError:
stderr_str = 'The user {0} is not available'.format(runas)
if stderr == subprocess.STDOUT:
ret['stdout'] = stderr_str
else:
ret['stdout'] = ''
ret['stderr'] = stderr_str
ret['retcode'] = 1
return ret
runas_uid = p.pw_uid
preexec = su
else:
preexec = None
msg = 'User \'{0}\' is not available'.format(runas)
raise CommandExecutionError(msg)
cmd_prefix = 'su'
# Load the 'nix environment
if with_env:
cmd_prefix += ' - '
cmd_prefix += runas + ' -c'
cmd = '{0} "{1}"'.format(cmd_prefix, cmd)
if not quiet:
if runas:
log.info('Executing command {0} as user {1} in directory {2}'.format(
cmd, runas, cwd))
else:
# Put the most common case first
if not runas:
log.info('Executing command {0} in directory {1}'.format(cmd, cwd))
try:
proc = subprocess.Popen(cmd,
cwd=cwd,
shell=True,
stdout=stdout,
stderr=stderr,
preexec_fn=preexec
)
out = proc.communicate()
ret['stdout'] = out[0]
ret['stderr'] = out[1]
ret['retcode'] = proc.returncode
ret['pid'] = proc.pid
except OSError:
stderr_str = 'Unable to change to user {0}: permission denied'.format(runas)
if stderr == subprocess.STDOUT:
ret['stdout'] = stderr_str
else:
ret['stdout'] = ''
ret['stderr'] = stderr_str
ret['retcode'] = 2
log.info('Executing command {0} as user {1} in directory {2}'.format(
orig_cmd, runas, cwd))
# This is where the magic happens
proc = subprocess.Popen(cmd,
cwd=cwd,
shell=True,
stdout=stdout,
stderr=stderr
)
out = proc.communicate()
ret['stdout'] = out[0]
ret['stderr'] = out[1]
ret['pid'] = proc.pid
ret['retcode'] = proc.returncode
return ret
def _run_quiet(cmd, cwd=DEFAULT_CWD, runas=None):
def _run_quiet(cmd, cwd=None, runas=None):
'''
Helper for running commands quietly for minion startup
'''
return _run(cmd, runas=runas, cwd=cwd, stderr=subprocess.STDOUT, quiet=True)['stdout']
def run(cmd, cwd=DEFAULT_CWD, runas=None):
def run(cmd, cwd=None, runas=None):
'''
Execute the passed command and return the output as a string
@ -109,7 +112,7 @@ def run(cmd, cwd=DEFAULT_CWD, runas=None):
return out
def run_stdout(cmd, cwd=DEFAULT_CWD, runas=None):
def run_stdout(cmd, cwd=None, runas=None):
'''
Execute a command, and only return the standard out
@ -122,7 +125,7 @@ def run_stdout(cmd, cwd=DEFAULT_CWD, runas=None):
return stdout
def run_stderr(cmd, cwd=DEFAULT_CWD, runas=None):
def run_stderr(cmd, cwd=None, runas=None):
'''
Execute a command and only return the standard error
@ -135,7 +138,7 @@ def run_stderr(cmd, cwd=DEFAULT_CWD, runas=None):
return stderr
def run_all(cmd, cwd=DEFAULT_CWD, runas=None):
def run_all(cmd, cwd=None, runas=None):
'''
Execute the passed command and return a dict of return data
@ -155,7 +158,7 @@ def run_all(cmd, cwd=DEFAULT_CWD, runas=None):
return ret
def retcode(cmd, cwd=DEFAULT_CWD, runas=None):
def retcode(cmd, cwd=None, runas=None):
'''
Execute a shell command and return the command's return code.
@ -186,7 +189,7 @@ def which(cmd):
'''
return salt.utils.which(cmd)
def exec_code(lang, code, cwd=DEFAULT_CWD):
def exec_code(lang, code, cwd=None):
'''
Pass in two strings, the first naming the executable language, aka -
python2, python3, ruby, perl, lua, etc. the second string containing

View File

@ -38,19 +38,35 @@ def recv(files, dest):
def get_file(path, dest, env='base'):
'''
Used to get a single file from the salt master
CLI Example::
salt '*' cp.get_file salt://path/to/file /minion/dest
'''
client = salt.minion.FileClient(__opts__)
return client.get_file(path, dest, False, env)
def get_dir(path, dest, env='base'):
'''
Used to recursively copy a directory from the salt master
CLI Example:
salt '*' cp.get_dir salt://path/to/dir/ /minion/dest
'''
client = salt.minion.FileClient(__opts__)
return client.get_dir(path, dest, env)
def get_url(path, dest, env='base'):
'''
Used to get a single file from a URL.
For example::
CLI Example::
cp.get_url salt://my/file /tmp/mine
cp.get_url http://www.slashdot.org /tmp/index.html
salt '*' cp.get_url salt://my/file /tmp/mine
salt '*' cp.get_url http://www.slashdot.org /tmp/index.html
'''
client = salt.minion.FileClient(__opts__)
return client.get_url(path, dest, False, env)
@ -59,6 +75,10 @@ def get_url(path, dest, env='base'):
def cache_file(path, env='base'):
'''
Used to cache a single file in the local salt-master file cache.
CLI Example::
salt '*' cp.cache_file salt://path/to/file
'''
client = salt.minion.FileClient(__opts__)
return client.cache_file(path, env)
@ -69,6 +89,10 @@ def cache_files(paths, env='base'):
Used to gather many files from the master, the gathered files will be
saved in the minion cachedir reflective to the paths retrieved from the
master.
CLI Example::
salt '*' cp.cache_files salt://pathto/file1,salt://pathto/file1
'''
client = salt.minion.FileClient(__opts__)
return client.cache_files(paths, env)
@ -77,6 +101,10 @@ def cache_files(paths, env='base'):
def cache_dir(path, env='base'):
'''
Download and cache everything under a directory from the master
CLI Example::
salt '*' cp.cache_dir salt://path/to/dir
'''
client = salt.minion.FileClient(__opts__)
return client.cache_dir(path, env)
@ -85,6 +113,10 @@ def cache_dir(path, env='base'):
def cache_master(env='base'):
'''
Retrieve all of the files on the master and cache them locally
CLI Example::
salt '*' cp.cache_master
'''
client = salt.minion.FileClient(__opts__)
return client.cache_master(env)
@ -93,6 +125,10 @@ def cache_master(env='base'):
def cache_local_file(path):
'''
Cache a local file on the minion in the localfiles cache
CLI Example::
salt '*' cp.cache_local_file /etc/hosts
'''
if not os.path.exists(path):
return ''
@ -115,6 +151,10 @@ def cache_local_file(path):
def list_master(env='base'):
'''
List all of the files stored on the master
CLI Example::
salt '*' cp.list_master
'''
client = salt.minion.FileClient(__opts__)
return client.file_list(env)
@ -123,6 +163,10 @@ def list_master(env='base'):
def list_minion(env='base'):
'''
List all of the files cached on the minion
CLI Example::
salt '*' cp.list_minion
'''
client = salt.minion.FileClient(__opts__)
return client.file_local_list(env)
@ -132,6 +176,10 @@ def is_cached(path, env='base'):
'''
Return a boolean if the given path on the master has been cached on the
minion
CLI Example::
salt '*' cp.is_cached salt://path/to/file
'''
client = salt.minion.FileClient(__opts__)
return client.is_cached(path, env)
@ -142,6 +190,10 @@ def hash_file(path, env='base'):
Return the hash of a file, to get the hash of a file on the
salt master file server prepend the path with salt://<file on server>
otherwise, prepend the file with / for a local file.
CLI Example::
salt '*' cp.hash_file salt://path/to/file
'''
client = salt.minion.FileClient(__opts__)
return client.hash_file(path, env)

View File

@ -12,7 +12,6 @@ def echo(text):
CLI Example:
salt '*' test.echo 'foo bar baz quo qux'
'''
print 'Echo got called!'
return text

View File

@ -2,6 +2,11 @@
Module for gathering disk information
'''
import logging
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on posix-like systems
@ -34,13 +39,17 @@ def usage():
if line.startswith('Filesystem'):
continue
comps = line.split()
ret[comps[5]] = {
'filesystem': comps[0],
'1K-blocks': comps[1],
'used': comps[2],
'available': comps[3],
'capacity': comps[4],
}
try:
ret[comps[5]] = {
'filesystem': comps[0],
'1K-blocks': comps[1],
'used': comps[2],
'available': comps[3],
'capacity': comps[4],
}
except IndexError:
log.warn("Problem parsing disk usage information")
ret = {}
return ret
def inodeusage():
@ -71,5 +80,6 @@ def inodeusage():
'filesystem': comps[0],
}
except IndexError:
print "DEBUG: comps='%s'" % comps
log.warn("Problem parsing inode usage information")
ret = {}
return ret

View File

@ -0,0 +1,115 @@
'''
The service module for FreeBSD
'''
import os
def __virtual__():
'''
Only work on systems which default to systemd
'''
# Disable on these platforms, specific service modules exist:
if __grains__['os'] == 'FreeBSD':
return 'service'
return False
def get_enabled():
'''
Return what services are set to run on boot
'''
ret = []
for rcfn in ('/etc/rc.conf', '/etc/rc.conf.local'):
if os.path.isfile(rcfn):
for line in open(rcfn, 'r').readlines():
if not line.strip():
continue
if line.startswith('#'):
continue
if not '_enable' in line:
continue
if not '=' in line:
continue
comps = line.split('=')
if 'YES' in comps[1]:
# Is enabled!
ret.append(comps[0].split('_')[0])
return ret
def get_disabled():
'''
Return what services are available but not enabled to start at boot
'''
en_ = get_enabled()
all_ = get_all()
return sorted(set(all_).difference(set(en_)))
def get_all():
'''
Return a list of all available services
'''
ret = set()
for rcdir in ('/etc/rc.d/', '/usr/local/etc/rc.d/'):
ret.update(os.listdir(rcdir))
rm_ = set()
for srv in ret:
if srv.isupper():
rm_.add(srv)
ret.difference(rm_)
return sorted(ret)
def start(name):
'''
Start the specified service
CLI Example::
salt '*' service.start <service name>
'''
cmd = 'service {0} onestart'.format(name)
return not __salt__['cmd.retcode'](cmd)
def stop(name):
'''
Stop the specified service
CLI Example::
salt '*' service.stop <service name>
'''
cmd = 'service {0} onestop'.format(name)
return not __salt__['cmd.retcode'](cmd)
def restart(name):
'''
Restart the named service
CLI Example::
salt '*' service.restart <service name>
'''
cmd = 'service {0} onerestart'.format(name)
return not __salt__['cmd.retcode'](cmd)
def status(name, sig=None):
'''
Return the status for a service, returns the PID or an empty string if the
service is running or not, pass a signature to use to find the service via
ps
CLI Example::
salt '*' service.status <service name> [service signature]
'''
sig = name if not sig else sig
cmd = "{0[ps]} | grep {1} | grep -v grep | awk '{{print $2}}'".format(
__grains__, sig)
return __salt__['cmd.run'](cmd).strip()

456
salt/modules/kvm_hyper.py Normal file
View File

@ -0,0 +1,456 @@
'''
Provide the hyper module for kvm hypervisors. This is the interface used to
interact with kvm on behalf of the salt-virt interface
'''
# This is a test interface for the salt-virt system. The api in this file is
# VERY likely to change.
# Import Python Libs
from xml.dom import minidom
import StringIO
import os
import shutil
import subprocess
# Import libvirt
import libvirt
# Import Third party modules
import yaml
# Import Salt Modules
import salt.utils
VIRT_STATE_NAME_MAP = {0: "running",
1: "running",
2: "running",
3: "paused",
4: "shutdown",
5: "shutdown",
6: "crashed"}
def __virtual__():
'''
Apply this module as the hyper module if the minion is a kvm hypervisor
'''
if __grains__['virtual'] != 'physical':
return False
if 'kvm_' not in open('/proc/modules').read():
return False
#libvirt_ret = __salt__['cmd.run'](__grains__['ps']).count('libvirtd')
try:
libvirt_conn = libvirt.open('qemu:///system')
libvirt_conn.close()
return 'hyper'
except:
return False
def __get_conn():
'''
Connects to the libvirt daemon for qemu/kvm
'''
return libvirt.open("qemu:///system")
def _get_dom(vm_):
'''
Return a domain object for the named vm
'''
conn = __get_conn()
if vm_ not in list_virts():
raise Exception('The specified vm is not present')
return conn.lookupByName(vm_)
# Define tier 1 Virt functions, all hyper interfaces have these:
# hyper_type
# list_virts
# hyper_info
# get_conf
def hyper_type():
'''
Return that type of hypervisor this is
'''
return 'kvm'
def freemem():
'''
Return an int representing the amount of memory that has not been given
to virtual machines on this node
CLI Example::
salt '*' virt.freemem
'''
conn = __get_conn()
mem = conn.getInfo()[1]
# Take off just enough to sustain the hypervisor
mem -= 256
for vm_ in list_virts():
dom = _get_dom(vm_)
if dom.ID() > 0:
mem -= dom.info()[2] / 1024
return mem
def freecpu():
'''
Return an int representing the number of unallocated cpus on this
hypervisor
CLI Example::
salt '*' virt.freemem
'''
conn = __get_conn()
cpus = conn.getInfo()[2]
for vm_ in list_virts():
dom = _get_dom(vm_)
if dom.ID() > 0:
cpus -= dom.info()[3]
return cpus
def list_virts():
'''
Return a list of virtual machine names on the minion
CLI Example::
salt '*' virt.list_virts
'''
# Expand to include down vms
conn = __get_conn()
vms = []
for id_ in conn.listDomainsID():
vms.append(conn.lookupByID(id_).name())
return vms
def virt_info():
'''
Return detailed information about the vms on this hyper in a dict::
{'cpu': <int>,
'maxMem': <int>,
'mem': <int>,
'state': '<state>',
'cputime' <int>}
CLI Example::
salt '*' virt.vm_info
'''
info = {}
for vm_ in list_virts():
dom = _get_dom(vm_)
raw = dom.info()
info[vm_] = {
'state': VIRT_STATE_NAME_MAP.get(raw[0], 'unknown'),
'maxMem': int(raw[1]),
'mem': int(raw[2]),
'cpu': raw[3],
'cputime': int(raw[4]),
'disks': get_disks(vm_),
}
return info
def hyper_info():
'''
Return a dict with information about this hypervisor
CLI Example::
salt '*' virt.node_info
'''
conn = __get_conn()
raw = conn.getInfo()
info = {
'phymemory': raw[1],
'cpus': raw[2],
'cpumhz': raw[3],
'cpucores': raw[6],
'cputhreads': raw[7],
'type': hyper_type(),
'freecpu': freecpu(),
'freemem': freemem(),
'virt_info': virt_info(),
}
return info
# Level 2 - vm class specific
# init - Create a vm from options
# start - Start a down vm
# halt
# purge
# pause
# resume
# set_autostart
# get_disks
# get_conf
def _get_image(image, vda):
'''
Copy the image into place
'''
if ':' in image:
if not os.path.isabs(image) or not image.startswith('file://'):
# The image is on a network resource
env = 'base'
if not image.rindex(':') == 4:
env = image.split(':')[-1]
image = image[:image.rindex(':')]
__salt__['cp.get_url'](image, vda, env)
if os.path.isabs(image) or image.startswith('file://'):
# This is a local file, copy it into place
if image.startswith('file://'):
# Condition this into a standard path
for ind in range(6, len(image)):
if image[ind].isalpha():
image = os.path.join('/', image[ind:])
break
shutil.copy2(image, vda)
def _gen_xml(name,
cpus,
mem,
vmdir,
disks,
network,
desc,
opts):
'''
Generate the xml used for the libvirt configuration
'''
# Don't generate the libvirt config if it already exists
vda = os.path.join(vmdir, 'vda')
data = '''
<domain type='kvm'>
<name>%%NAME%%</name>
<vcpu>%%CPU%%</vcpu>
<memory>%%MEM%%</memory>
<os>
<type>hvm</type>
<boot dev='hd'/>
</os>
<devices>
<emulator>/usr/bin/kvm</emulator>
<disk type='file' device='disk'>
<source file='%%VDA%%'/>
<target dev='vda' bus='virtio'/>
<driver name='qemu' cache='writeback' io='native'/>
</disk>
%%DISK%%
%%NICS%%
<graphics type='vnc' listen='0.0.0.0' autoport='yes'/>
</devices>
<features>
<acpi/>
</features>
</domain>
'''
data = data.replace('%%NAME%%', name)
data = data.replace('%%CPU%%', str(cpus))
data = data.replace('%%MEM%%', str(int(mem) * 1024))
data = data.replace('%%VDA%%', vda)
nics = ''
for interface, data in network.items():
for bridge, mac in data.items():
if not mac:
# Generate this interface's mac addr, use the qemu default
# prefix, 52:54
mac = salt.utils.gen_mac('52:54:')
nic = '''
<interface type='bridge'>
<source bridge='%%BRIDGE%%'/>
<mac address='%%MAC%%'/>
<model type='virtio'/>
</interface>\n'''
nic = nic.replace('%%BRIDGE%%', bridge)
nic = nic.replace('%%MAC%%', mac)
nics += nic
data = data.replace('%%NICS%%', nics)
if disks:
letters = salt.utils.gen_letters()
disk_str = ''
for ind in range(0, len(disks)):
disk = disks[ind]
disk_d = '''
<disk type='file' device='disk'>
<source file='%%DISK_PATH%%'/>
<target dev='%%VD%%' bus='virtio'/>
<driver name='qemu' type='%%TYPE%%' cache='writeback' io='native'/>
</disk>
'''
disk_d = disk_d.replace('%%DISK_PATH%%', disk['path'])
disk_d = disk_d.replace('%%TYPE%%', disk['format'])
disk_d = disk_d.replace('%%VD%%', 'vd' + letters[ind + 1])
disk_str += disk_d
data = data.replace('%%DISK%%', disk_str)
else:
data = data.replace('%%DISK%%', '')
return data
def init(
name,
cpus,
mem,
image,
storage_dir,
network={'eth0': {'bridge': 'br0', 'mac': ''}},
desc='',
opts={}):
'''
Create a KVM virtual machine based on these passed options, the virtual
machine will be started upon creation
CLI Example:
salt node1 webserver 2 2048 salt://fedora/f16.img:virt /srv/vm/images
'''
vmdir = os.path.join(storage_dir, name)
if not os.path.exists(vmdir):
os.makedirs(vmdir)
vda = os.path.join(vmdir, 'vda')
_get_image(image, vda)
# The image is in place
xml = _gen_xml(name, cpus, mem, vmdir, network, desc, opts)
config = os.path.join(vmdir, 'config.xml')
open(config, 'w+').write(xml)
return start(config)
def start(config):
'''
Start an already defined virtual machine that has been shut down
'''
# change this to use the libvirt api and add more logging and a verbose
# return
cmd = 'virsh create {0}'.format(config)
return not __salt__['cmd.retcode'](cmd)
def halt(name):
'''
Hard power down a virtual machine
'''
try:
dom = _get_dom(name)
dom.destroy()
except:
return False
return True
def purge(name):
'''
Hard power down and purge a virtual machine, this will destroy a vm and
all associated vm data
'''
disks = get_disks(name)
halt(name)
directories = set()
for disk in disks:
os.remove(disks[disk]['file'])
directories.add(os.path.dirname(disks[disk]['file']))
if directories:
for dir_ in directories:
shutil.rmtree(dir_)
return True
def pause(name):
'''
Pause the named virtual machine
'''
dom = _get_dom(name)
dom.suspend()
return True
def resume(name):
'''
Resume the named virtual machine
'''
dom = _get_dom(name)
dom.resume()
return True
def set_autostart(name):
'''
Set the named virtual machine to autostart when the hypervisor boots
'''
dom = _get_dom(name)
if state == 'on':
if dom.setAutostart(1) == 0:
return True
else:
return False
elif state == 'off':
if dom.setAutostart(0) == 0:
return True
else:
return False
else:
# return False if state is set to something other then on or off
return False
def get_disks(name):
'''
Return the disks of a named virt
CLI Example::
salt '*' virt.get_disks <vm name>
'''
disks = {}
doc = minidom.parse(StringIO.StringIO(get_conf(name)))
for elem in doc.getElementsByTagName('disk'):
sources = elem.getElementsByTagName('source')
targets = elem.getElementsByTagName('target')
if len(sources) > 0:
source = sources[0]
else:
continue
if len(targets) > 0:
target = targets[0]
else:
continue
if 'dev' in target.attributes.keys() \
and 'file' in source.attributes.keys():
disks[target.getAttribute('dev')] = \
{'file': source.getAttribute('file')}
for dev in disks:
disks[dev].update(yaml.safe_load(subprocess.Popen('qemu-img info ' \
+ disks[dev]['file'],
shell=True,
stdout=subprocess.PIPE).communicate()[0]))
return disks
def get_conf(name):
'''
Returns the xml for a given vm
CLI Example::
salt '*' virt.get_conf <vm name>
'''
dom = _get_dom(name)
return dom.XMLDesc(0)

View File

@ -65,6 +65,8 @@ def assign(name, value):
cmd = 'sysctl -w {0}={1}'.format(name, value)
ret = {}
out = __salt__['cmd.run'](cmd).strip()
if ' = ' not in out:
raise CommandExecutionError('sysctl -w failed: {0}'.format(out))
comps = out.split(' = ')
ret[comps[0]] = comps[1]
return ret

144
salt/modules/mysql.py Normal file → Executable file
View File

@ -23,6 +23,15 @@ import MySQLdb.cursors
log = logging.getLogger(__name__)
__opts__ = {}
def __virtual__():
'''
Only load this module if the mysql config is set
'''
if 'mysql' in __opts__:
return 'mysql'
return False
def __check_table(name, table):
db = connect()
cur = db.cursor(MySQLdb.cursors.DictCursor)
@ -120,7 +129,7 @@ def slave_lag():
'''
Return the number of seconds that a slave SQL server is lagging behind the
master, if the host is not a slave it will return -1. If the server is
configured to be a slave but replication but slave IO is not running then
configured to be a slave for replication but slave IO is not running then
-2 will be returned.
CLI Example::
@ -189,7 +198,7 @@ def db_list():
CLI Example::
salt '*' mysqldb.db_list
salt '*' mysql.db_list
'''
ret = []
db = connect()
@ -208,7 +217,7 @@ def db_tables(name):
CLI Example::
salt '*' mysqldb.db_tables 'database'
salt '*' mysql.db_tables 'database'
'''
if not db_exists(name):
log.info("Database '{0}' does not exist".format(name,))
@ -233,7 +242,7 @@ def db_exists(name):
CLI Example::
salt '*' mysqldb.db_exists 'dbname'
salt '*' mysql.db_exists 'dbname'
'''
db = connect()
cur = db.cursor()
@ -252,7 +261,7 @@ def db_create(name):
CLI Example::
salt '*' mysqldb.db_create 'dbname'
salt '*' mysql.db_create 'dbname'
'''
# check if db exists
if db_exists(name):
@ -275,7 +284,7 @@ def db_remove(name):
CLI Example::
salt '*' mysqldb.db_remove 'dbname'
salt '*' mysql.db_remove 'dbname'
'''
# check if db exists
if not db_exists(name):
@ -309,7 +318,7 @@ def user_list():
CLI Example::
salt '*' mysqldb.user_list
salt '*' mysql.user_list
'''
db = connect()
cur = db.cursor(MySQLdb.cursors.DictCursor)
@ -325,7 +334,7 @@ def user_exists(user,
CLI Example::
salt '*' mysqldb.user_exists 'username' 'hostname'
salt '*' mysql.user_exists 'username' 'hostname'
'''
db = connect()
cur = db.cursor()
@ -343,7 +352,7 @@ def user_info(user,
CLI Example::
salt '*' mysqldb.user_info root localhost
salt '*' mysql.user_info root localhost
'''
db = connect()
cur = db.cursor (MySQLdb.cursors.DictCursor)
@ -362,7 +371,7 @@ def user_create(user,
CLI Example::
salt '*' mysqldb.user_create 'username' 'hostname' 'password'
salt '*' mysql.user_create 'username' 'hostname' 'password'
'''
if user_exists(user,host):
log.info("User '{0}'@'{1}' already exists".format(user,host,))
@ -392,7 +401,7 @@ def user_chpass(user,
CLI Example::
salt '*' mysqldb.user_chpass frank localhost newpassword
salt '*' mysql.user_chpass frank localhost newpassword
'''
if password is None:
log.error('No password provided')
@ -410,13 +419,13 @@ def user_chpass(user,
return False
def user_remove(user,
host='localhost'):
host='localhost'):
'''
Delete MySQL user
CLI Example::
salt '*' mysqldb.user_remove frank localhost
salt '*' mysql.user_remove frank localhost
'''
db = connect()
cur = db.cursor ()
@ -441,7 +450,7 @@ def db_check(name,
CLI Example::
salt '*' mysqldb.db_check dbname
salt '*' mysql.db_check dbname
'''
ret = []
if table is None:
@ -462,7 +471,7 @@ def db_repair(name,
CLI Example::
salt '*' mysqldb.db_repair dbname
salt '*' mysql.db_repair dbname
'''
ret = []
if table is None:
@ -483,7 +492,7 @@ def db_optimize(name,
CLI Example::
salt '*' mysqldb.db_optimize dbname
salt '*' mysql.db_optimize dbname
'''
ret = []
if table is None:
@ -496,3 +505,106 @@ def db_optimize(name,
log.info("Optimizing table '%s' in db '%s'..".format(name,table,))
ret = __optimize_table(name,table)
return ret
'''
Grants
'''
def __grant_generate(grant,
database,
user,
host='localhost'):
# todo: Re-order the grant so it is according to the SHOW GRANTS for xxx@yyy query (SELECT comes first, etc)
grant = grant.replace(',', ', ').upper()
db_part = database.partition('.')
db = db_part[0]
table = db_part[2]
query = "GRANT %s ON `%s`.`%s` TO '%s'@'%s'" % (grant, db, table, user, host,)
log.debug("Query generated: {0}".format(query,))
return query
def user_grants(user,
host='localhost'):
'''
Shows the grants for the given MySQL user (if it exists)
CLI Example::
salt '*' mysql.user_grants 'frank' 'localhost'
'''
if not user_exists(user):
log.info("User '{0}' does not exist".format(user,))
return False
ret = []
db = connect()
cur = db.cursor()
query = "SHOW GRANTS FOR '%s'@'%s'" % (user,host,)
log.debug("Doing query: {0}".format(query,))
cur.execute(query)
results = cur.fetchall()
for grant in results:
ret.append(grant[0])
log.debug(ret)
return ret
def grant_exists(grant,
database,
user,
host='localhost'):
# todo: This function is a bit tricky, since it requires the ordering to be exactly the same.
# perhaps should be replaced/reworked with a better/cleaner solution.
target = __grant_generate(grant, database, user, host)
if target in user_grants(user, host):
log.debug("Grant exists.")
return True
log.debug("Grant does not exist, or is perhaps not ordered properly?")
return False
def grant_add(grant,
database,
user,
host='localhost'):
'''
Adds a grant to the MySQL server.
CLI Example::
salt '*' mysql.grant_add 'SELECT|INSERT|UPDATE|...' 'database.*' 'frank' 'localhost'
'''
# todo: validate grant
db = connect()
cur = db.cursor()
query = __grant_generate(grant, database, user, host)
log.debug("Query: {0}".format(query,))
if cur.execute( query ):
log.info("Grant '{0}' created")
return True
return False
def grant_revoke(grant,
database,
user,
host='localhost'):
'''
Removes a grant from the MySQL server.
CLI Example::
salt '*' mysql.grant_revoke 'SELECT,INSERT,UPDATE' 'database.*' 'frank' 'localhost'
'''
# todo: validate grant
db = connect()
cur = db.cursor()
query = "REVOKE %s ON %s FROM '%s'@'%s';" % (grant, database, user, host,)
log.debug("Query: {0}".format(query,))
if cur.execute( query ):
log.info("Grant '{0}' revoked")
return True
return False

View File

@ -29,7 +29,7 @@ def ping(host):
CLI Example::
salt '*' network.ping archlinux.org -c 4
salt '*' network.ping archlinux.org
'''
cmd = 'ping -c 4 %s' % _sanitize_host(host)
return __salt__['cmd.run'](cmd)

View File

@ -1,68 +1,68 @@
'''
Support for nginx
'''
import salt.utils
__outputter__ = {
'signal': 'txt',
}
def __virtual__():
'''
Only load the module if nginx is installed
'''
cmd = __detect_os()
if salt.utils.which(cmd):
return 'nginx'
return False
def __detect_os():
return 'nginx'
def version():
'''
Return server version from nginx -v
CLI Example::
salt '*' nginx.version
'''
cmd = __detect_os() + ' -v'
out = __salt__['cmd.run'](cmd).split('\n')
ret = out[0].split(': ')
return ret[2]
def signal(signal=None):
'''
Signals httpd to start, restart, or stop.
CLI Example::
salt '*' nginx.signal reload
'''
valid_signals = ('reopen', 'stop', 'quit', 'reload')
if signal not in valid_signals:
return
# Make sure you use the right arguments
if signal in valid_signals:
arguments = ' -s {0}'.format(signal)
else:
arguments = ' {0}'.format(signal)
cmd = __detect_os() + arguments
out = __salt__['cmd.run_all'](cmd)
# A non-zero return code means fail
if out['retcode'] and out['stderr']:
ret = out['stderr'].strip()
# 'nginxctl configtest' returns 'Syntax OK' to stderr
elif out['stderr']:
ret = out['stderr'].strip()
elif out['stdout']:
ret = out['stdout'].strip()
# No output for something like: nginxctl graceful
else:
ret = 'Command: "{0}" completed successfully!'.format(cmd)
return ret
'''
Support for nginx
'''
import salt.utils
__outputter__ = {
'signal': 'txt',
}
def __virtual__():
'''
Only load the module if nginx is installed
'''
cmd = __detect_os()
if salt.utils.which(cmd):
return 'nginx'
return False
def __detect_os():
return 'nginx'
def version():
'''
Return server version from nginx -v
CLI Example::
salt '*' nginx.version
'''
cmd = __detect_os() + ' -v'
out = __salt__['cmd.run'](cmd).split('\n')
ret = out[0].split(': ')
return ret[2]
def signal(signal=None):
'''
Signals httpd to start, restart, or stop.
CLI Example::
salt '*' nginx.signal reload
'''
valid_signals = ('reopen', 'stop', 'quit', 'reload')
if signal not in valid_signals:
return
# Make sure you use the right arguments
if signal in valid_signals:
arguments = ' -s {0}'.format(signal)
else:
arguments = ' {0}'.format(signal)
cmd = __detect_os() + arguments
out = __salt__['cmd.run_all'](cmd)
# A non-zero return code means fail
if out['retcode'] and out['stderr']:
ret = out['stderr'].strip()
# 'nginxctl configtest' returns 'Syntax OK' to stderr
elif out['stderr']:
ret = out['stderr'].strip()
elif out['stdout']:
ret = out['stdout'].strip()
# No output for something like: nginxctl graceful
else:
ret = 'Command: "{0}" completed successfully!'.format(cmd)
return ret

View File

@ -224,3 +224,23 @@ def boot_time():
salt '*' ps.boot_time
'''
return psutil.BOOT_TIME
def network_io_counters():
'''
Return network I/O statisitics.
CLI Example::
salt '*' ps.network_io_counters
'''
return dict(psutil.network_io_counters()._asdict())
def disk_io_counters():
'''
Return disk I/O statisitics.
CLI Example::
salt '*' ps.disk_io_counters
'''
return dict(psutil.disk_io_counters()._asdict())

View File

@ -18,7 +18,7 @@ def _get_socket():
return socket
def publish(tgt, fun, arg=None, expr_form='glob', returner=''):
def publish(tgt, fun, arg=None, expr_form='glob', returner='', timeout=5):
'''
Publish a command from the minion out to other minions, publications need
to be enabled on the Salt master and the minion needs to have permission
@ -61,6 +61,7 @@ def publish(tgt, fun, arg=None, expr_form='glob', returner=''):
'tgt': tgt,
'ret': returner,
'tok': tok,
'tmo': timeout,
'id': __opts__['id']}
payload['load'] = auth.crypticle.dumps(load)
socket = _get_socket()

View File

@ -2,6 +2,14 @@
Execute puppet routines
'''
from salt.exceptions import CommandNotFoundError
__outputter__ = {
'run': 'txt',
'noop': 'txt',
'fact': 'txt',
'facts':None,
}
def _check_puppet():
'''
@ -10,19 +18,106 @@ def _check_puppet():
# I thought about making this a virtual module, but then I realized that I
# would require the minion to restart if puppet was installed after the
# minion was started, and that would be rubbish
return __salt__['cmd.has_exec']('puppet')
return __salt__['cmd.has_exec']('puppetd')
def run():
def _check_facter():
'''
Execute a puppet run and return a dict with the stderr,stdout,return code
etc.
Checks if facter is installed
'''
return __salt__['cmd.has_exec']('facter')
def _format_fact(output):
try:
fact, value = output.split(' => ', 1)
value = value.strip()
except ValueError:
fact = None
value = None
return (fact, value)
def run(tags=None):
'''
Execute a puppet run and return a dict with the stderr, stdout,
return code, etc. If an argument is specified, it is treated as
a comma separated list of tags passed to puppetd --test --tags:
http://projects.puppetlabs.com/projects/1/wiki/Using_Tags
CLI Examples::
salt '*' puppet.run
salt '*' puppet.run basefiles::edit,apache::server
'''
if not _check_puppet():
raise CommandNotFoundError('puppetd not available')
if not tags:
cmd = 'puppetd --test'
else:
cmd = 'puppetd --test --tags "{0}"'.format(tags)
return __salt__['cmd.run_all'](cmd)
def noop(tags=None):
'''
Execute a puppet noop run and return a dict with the stderr, stdout,
return code, etc. If an argument is specified, it is treated as a
comma separated list of tags passed to puppetd --test --noop --tags
CLI Example::
salt '*' puppet.run
salt '*' puppet.noop
salt '*' puppet.noop web::server,django::base
'''
if _check_puppet():
return __salt__['cmd.run_all']('puppetd --test')
if not _check_puppet():
raise CommandNotFoundError('puppetd not available')
if not tags:
cmd = 'puppetd --test --noop'
else:
return {}
cmd = 'puppetd --test --tags "{0}" --noop'.format(tags)
return __salt__['cmd.run_all'](cmd)
def facts():
'''
Run facter and return the results
CLI Example::
salt '*' puppet.facts
'''
if not _check_facter():
raise CommandNotFoundError('facter not available')
ret = {}
output = __salt__['cmd.run']('facter')
# Loop over the facter output and properly
# parse it into a nice dictionary for using
# elsewhere
for line in output.split('\n'):
if not line: continue
fact, value = _format_fact(line)
if not fact:
continue
ret[fact] = value
return ret
def fact(name):
'''
Run facter for a specific fact
CLI Example::
salt '*' puppet.fact kernel
'''
if not _check_facter():
raise CommandNotFoundError('facter not available')
ret = __salt__['cmd.run']('facter {0}'.format(name))
if not ret:
return ''
return ret.rstrip()

View File

@ -127,10 +127,8 @@ def status(name, sig=None):
salt '*' service.status <service name> [service signature]
'''
sig = name if not sig else sig
cmd = "{0[ps]} | grep {1} | grep -v grep | awk '{{print $2}}'".format(
__grains__, sig)
return __salt__['cmd.run'](cmd).strip()
cmd = 'service {0} status'.format(name)
return not __salt__['cmd.retcode'](cmd)
def enable(name):

View File

@ -3,11 +3,16 @@ The Saltutil module is used to manage the state of the salt minion itself. It is
used to manage minion modules as well as automate updates to the salt minion
'''
# Import Python libs
import os
import hashlib
import shutil
import signal
import logging
# Import Salt libs
import salt.payload
log = logging.getLogger(__name__)
def _sync(form, env):
@ -42,7 +47,7 @@ def _sync(form, env):
shutil.copyfile(fn_, dest)
ret.append('{0}.{1}'.format(form, os.path.basename(fn_)))
if ret:
open(os.path.join(__opts__['cachedir'], '.module_refresh'), 'w+').write('')
open(os.path.join(__opts__['cachedir'], 'module_refresh'), 'w+').write('')
if __opts__.get('clean_dynamic_modules', True):
current = set(os.listdir(mod_dir))
for fn_ in current.difference(remote):
@ -138,3 +143,94 @@ def sync_all(env='base'):
ret.append(sync_renderers(env))
ret.append(sync_returners(env))
return ret
def running():
'''
Return the data on all running processes salt on the minion
CLI Example::
salt '*' saltutil.running
'''
procs = __salt__['status.procs']()
ret = []
serial = salt.payload.Serial(__opts__)
pid = os.getpid()
proc_dir = os.path.join(__opts__['cachedir'], 'proc')
if not os.path.isdir(proc_dir):
return []
for fn_ in os.listdir(proc_dir):
path = os.path.join(proc_dir, fn_)
data = serial.loads(open(path, 'rb').read())
if not procs.get(str(data['pid'])):
# The process is no longer running, clear out the file and
# continue
os.remove(path)
continue
if data.get('pid') == pid:
continue
ret.append(data)
return ret
def find_job(jid):
'''
Return the data for a specific job id
CLI Example::
salt '*' saltutil.find_job <job id>
'''
for data in running():
if data['jid'] == jid:
return data
return {}
def signal_job(jid, sig):
'''
Sends a signal to the named salt job's process
CLI Example::
salt '*' saltutil.signal_job <job id> 15
'''
for data in running():
if data['jid'] == jid:
try:
os.kill(int(data['pid']), sig)
return 'Signal {0} sent to job {1} at pid {2}'.format(
int(sig),
jid,
data['pid']
)
except OSError:
path = os.path.join(__opts__['cachedir'], 'proc', str(jid))
if os.path.isfile(path):
os.remove(path)
return ('Job {0} was not running and job data has been '
' cleaned up').format(jid)
return ''
def term_job(jid):
'''
Sends a termination signal (SIGTERM 15) to the named salt job's process
CLI Example::
salt '*' saltutil.term_job <job id>
'''
return signal_job(jid, signal.SIGTERM)
def kill_job(jid):
'''
Sends a termination signal (SIGTERM 15) to the named salt job's process
CLI Example::
salt '*' saltutil.kill_job <job id>
'''
return signal_job(jid, signal.SIGKILL)

View File

@ -25,6 +25,7 @@ def __virtual__():
'CentOS',
'Fedora',
'Gentoo',
'FreeBSD',
'Windows',
]
if __grains__['os'] in disable:

View File

@ -800,9 +800,7 @@ def backup(host=None, core_name=None, append_core_to_path=False):
salt '*' solr.backup music
'''
path = __opts__['solr.backup_path']
print path
numBackups = __opts__['solr.num_backups']
print numBackups
if path is not None:
if not path.endswith(os.path.sep):
path += os.path.sep
@ -1027,7 +1025,6 @@ def core_status(host=None, core_name=None):
return ret
extra = ['action=STATUS', 'core={0}'.format(core_name)]
url = _format_url('admin/cores', host=host, core_name=None, extra=extra)
print url
return _http_request(url)
################### DIH (Direct Import Handler) COMMANDS #####################

View File

@ -31,12 +31,10 @@ def _format_auth_line(
line = ''
if options:
line += '{0} '.format(','.join(options))
line += '{0} {1} {2}'.format(enc, key, comment)
line += '{0} {1} {2}\n'.format(enc, key, comment)
return line
# FIXME: mutable types as default parameter values, NO!
# http://goo.gl/ToU2z
def _replace_auth_key(
user,
key,
@ -188,8 +186,6 @@ def rm_auth_key(user, key, config='.ssh/authorized_keys'):
return 'Key not present'
# FIXME: mutable types as default parameter values, NO!
# http://goo.gl/ToU2z
def set_auth_key(
user,
key,
@ -205,7 +201,6 @@ def set_auth_key(
salt '*' ssh.set_auth_key <user> <key> dsa '[]' .ssh/authorized_keys
'''
enc = _refine_enc(enc)
ret = '' # FIXME: where is ret used?
replace = False
uinfo = __salt__['user.info'](user)
current = auth_keys(user, config)
@ -236,6 +231,15 @@ def set_auth_key(
options)
fconfig = os.path.join(uinfo['home'], config)
if not os.path.isdir(os.path.dirname(fconfig)):
os.makedirs(os.path.dirname(fconfig))
open(fconfig, 'a+').write('\n{0}'.format(auth_line))
dpath = os.path.dirname(fconfig)
os.makedirs(dpath)
os.chown(dpath, uinfo['uid'], uinfo['gid'])
os.chmod(dpath, 448)
if not os.path.isfile(fconfig):
open(fconfig, 'a+').write('{0}'.format(auth_line))
os.chown(fconfig, uinfo['uid'], uinfo['gid'])
os.chmod(fconfig, 384)
else:
open(fconfig, 'a+').write('{0}'.format(auth_line))
return 'new'

View File

@ -26,6 +26,40 @@ def _number(text):
return text
def procs():
'''
Return the process data
CLI Example::
salt '*' status.procs
'''
# Get the user, pid and cmd
ret = {}
uind = 0
pind = 0
cind = 0
plines = __salt__['cmd.run'](__grains__['ps']).split('\n')
guide = plines.pop(0).split()
if 'USER' in guide:
uind = guide.index('USER')
elif 'UID' in guide:
uind = guide.index('UID')
if 'PID' in guide:
pind = guide.index('PID')
if 'COMMAND' in guide:
cind = guide.index('COMMAND')
elif 'CMD' in guide:
cind = guide.index('CMD')
for line in plines:
if not line:
continue
comps = line.split()
ret[comps[pind]] = {'user': comps[uind],
'cmd': ' '.join(comps[cind:])}
return ret
def custom():
'''
Return a custom composite of status data and info for this minon,
@ -344,7 +378,7 @@ def netdev():
# Fix lines like eth0:9999..'
comps[0] = line.split(':')[0].strip()
#Support lines both like eth0:999 and eth0: 9999
comps[1] = line.split(':')[1].strip().split()[0]
comps.insert(1,line.split(':')[1].strip().split()[0])
ret[comps[0]] = {'iface': comps[0],
'rx_bytes': _number(comps[1]),
'rx_compressed': _number(comps[7]),

View File

@ -23,7 +23,6 @@ def echo(text):
salt '*' test.echo 'foo bar baz quo qux'
'''
print 'Echo got called!'
return text
@ -76,8 +75,9 @@ def get_opts():
# FIXME: mutable types as default parameter values
def cross_test(func, args=[]):
'''
Execute a minion function via the __salt__ object in the test module, used
to verify that the minion functions can be called via the __salt__module
Execute a minion function via the __salt__ object in the test
module, used to verify that the minion functions can be called
via the __salt__ module.
CLI Example::
@ -88,8 +88,8 @@ def cross_test(func, args=[]):
def fib(num):
'''
Return a Fibonacci sequence up to the passed number, and the time it took
to compute in seconds. Used for performance tests
Return a Fibonacci sequence up to the passed number, and the
timeit took to compute in seconds. Used for performance tests
CLI Example::
@ -106,8 +106,9 @@ def fib(num):
def collatz(start):
'''
Execute the collatz conjecture from the passed starting number, returns
the sequence and the time it took to compute. Used for performance tests.
Execute the collatz conjecture from the passed starting number,
returns the sequence and the time it took to compute. Used for
performance tests.
CLI Example::

View File

@ -1,10 +1,19 @@
'''
Create virtualenv environments
'''
import salt.utils
__opts__ = {
'venv_bin': 'virtualenv',
}
def __virtual__():
'''
Only load the module if virtualenv is installed
'''
cmd = __opts__.get('venv_bin', 'virtualenv')
return 'virtualenv' if salt.utils.which(cmd) else False
def create(path,
venv_bin='',
no_site_packages=False,

View File

@ -76,7 +76,7 @@ def available_version(name):
# here we can, but for now its exact match only.
versions_list = []
for pkgtype in ['available', 'updates']:
pl = yb.doPackageLists(pkgtype)
exactmatch, matched, unmatched = yum.packages.parsePackages(pl, [name])
# build a list of available packages from either available or updates
@ -94,7 +94,6 @@ def available_version(name):
# already and return a message saying 'up to date' or something along
# those lines.
return ''
# remove the duplicate items from the list and return the first one
return list(set(versions_list))[0]
@ -205,7 +204,6 @@ def install(pkgs, refresh=False, repo='', skip_verify=False):
if repo:
yb.repos.enableRepo(repo)
for pkg in pkgs:
try:
yb.install(name=pkg)

View File

@ -24,6 +24,8 @@ def __virtual__():
if __grains__['os'] in dists:
if int(__grains__['osrelease'].split('.')[0]) <= 5:
return 'pkg'
else:
return False
else:
return False
@ -68,7 +70,7 @@ def available_version(name):
salt '*' pkg.available_version <package name>
'''
out = _parse_yum('list {0}'.format(name))
out = _parse_yum('list updates {0}'.format(name))
return out[0].version if out else ''

View File

@ -71,14 +71,12 @@ class HighStateOutputter(Outputter):
if not ret['result']:
hcolor = colors['RED']
tcolor = colors['RED']
comps = tname.split('.')
comps = tname.split('_|-')
hstrs.append(('{0}----------\n State: - {1}{2[ENDC]}'
.format(tcolor, comps[0], colors)))
# FIXME: string formating below should match the
# style above
hstrs.append(' {0}Name: {1}{2[ENDC]}'.format(
tcolor,
'.'.join(comps[2:-1]),
comps[2],
colors
))
hstrs.append(' {0}Function: {1}{2[ENDC]}'.format(

104
salt/runners/jobs.py Normal file
View File

@ -0,0 +1,104 @@
'''
A conveniance system to manage jobs, both active and already run
'''
# Import Python Modules
import os
# Import Salt Modules
import salt.client
import salt.payload
import salt.utils
# Import Third party libs
import yaml
def active():
'''
Return a report on all actively running jobs from a job id centric
perspective
'''
ret = {}
job_dir = os.path.join(__opts__['cachedir'], 'jobs')
client = salt.client.LocalClient(__opts__['config'])
active = client.cmd('*', 'saltutil.running', timeout=1)
for minion, data in active.items():
if not isinstance(data, tuple):
continue
for job in data:
if not job['jid'] in ret:
ret[job['jid']] = {'Running': [],
'Returned': [],
'Function': job['fun'],
'Arguments': list(job['arg']),
'Target': job['tgt'],
'Target-type': job['tgt_type']}
else:
ret[job['jid']]['running'].append({minion: job['pid']})
if os.path.isdir(job_dir):
for jid in os.listdir(job_dir):
if not jid in ret:
continue
jid_dir = os.path.join(job_dir, jid)
if not os.path.isdir(jid_dir):
continue
for minion in os.listdir(jid_dir):
if minion.startswith('.'):
continue
if os.path.exists(os.path.join(jid_dir, minion)):
ret[jid]['returned'].append(minion)
print yaml.dump(ret)
def lookup_jid(jid):
'''
Return the printout from a previousely executed job
'''
def _format_ret(full_ret):
'''
Take the full return data and format it to simple output
'''
ret = {}
out = ''
for key, data in full_ret.items():
ret[key] = data['ret']
if 'out' in data:
out = data['out']
return ret, out
client = salt.client.LocalClient(__opts__['config'])
full_ret = client.get_full_returns(jid, [], 0)
ret, out = _format_ret(full_ret)
# Determine the proper output method and run it
get_outputter = salt.output.get_outputter
if isinstance(ret, list) or isinstance(ret, dict):
if out:
printout = get_outputter(out)
else:
printout = get_outputter(None)
# Pretty print any salt exceptions
elif isinstance(ret, SaltException):
printout = get_outputter("txt")
printout(ret)
return ret
def list_jobs():
'''
List all detectable jobs and associated functions
'''
serial = salt.payload.Serial(__opts__)
ret = {}
job_dir = os.path.join(__opts__['cachedir'], 'jobs')
for jid in os.listdir(job_dir):
loadpath = os.path.join(job_dir, jid, '.load.p')
if not os.path.isfile(loadpath):
continue
load = serial.load(open(loadpath, 'rb'))
ret[jid] = {'Start Time': salt.utils.jid_to_time(jid),
'Function': load['fun'],
'Arguments': list(load['arg']),
'Target': load['tgt'],
'Target-type': load['tgt_type']}
print yaml.dump(ret)

View File

@ -23,6 +23,12 @@ import salt.minion
log = logging.getLogger(__name__)
def _gen_tag(low):
'''
Generate the running dict tag string from the low data structure
'''
return '{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}'.format(low)
def _getargs(func):
'''
@ -41,6 +47,7 @@ def _getargs(func):
return aspec
def format_log(ret):
'''
Format the state into a log message
@ -96,6 +103,21 @@ class State(object):
opts['grains'] = salt.loader.grains(opts)
self.opts = opts
self.load_modules()
self.mod_init = set()
def _mod_init(self, low):
'''
Check the module initilization function, if this is the first run of
a state package that has a mod_init function, then execute the
mod_init function in the state module.
'''
minit = '{0}.mod_init'.format(low['state'])
if not low['state'] in self.mod_init:
if minit in self.states:
mret = self.states[minit](low)
if not mret:
return
self.mod_init.add(low['state'])
def load_modules(self):
'''
@ -125,13 +147,13 @@ class State(object):
self.load_modules()
open(os.path.join(
self.opts['cachedir'],
'.module_refresh'),
'module_refresh'),
'w+').write('')
elif data['fun'] == 'recurse':
self.load_modules()
open(os.path.join(
self.opts['cachedir'],
'.module_refresh'),
'module_refresh'),
'w+').write('')
def format_verbosity(self, returns):
@ -266,12 +288,12 @@ class State(object):
req,
body['__sls__'])
errors.append(err)
# Make sure that there is only one key in the dict
if len(arg.keys()) != 1:
errors.append(('Multiple dictionaries defined'
' in argument of state {0} in sls {1}').format(
name,
body['__sls__']))
# Make sure that there is only one key in the dict
if len(arg.keys()) != 1:
errors.append(('Multiple dictionaries defined'
' in argument of state {0} in sls {1}').format(
name,
body['__sls__']))
if not fun:
if state == 'require' or state == 'watch':
continue
@ -526,7 +548,7 @@ class State(object):
if '__FAILHARD__' in running:
running.pop('__FAILHARD__')
return running
tag = '{0[state]}.{0[__id__]}.{0[name]}.{0[fun]}'.format(low)
tag = _gen_tag(low)
if tag not in running:
running = self.call_chunk(low, running, chunks)
if self.check_failhard(low, running):
@ -537,7 +559,7 @@ class State(object):
'''
Check if the low data chunk should send a failhard signal
'''
tag = '{0[state]}.{0[__id__]}.{0[name]}.{0[fun]}'.format(low)
tag = _gen_tag(low)
if low.get('failhard', False) \
or self.opts['failhard'] \
and tag in running:
@ -545,129 +567,118 @@ class State(object):
return True
return False
def check_requires(self, low, running, chunks):
def check_requisite(self, low, running, chunks):
'''
Look into the running data to see if the requirement has been met
Look into the running data to check the status of all requisite states
'''
if 'require' not in low:
present = False
if 'watch' in low:
present = True
if 'require' in low:
present = True
if not present:
return 'met'
reqs = []
reqs = {'require': [],
'watch': []}
status = 'unmet'
for req in low['require']:
for r_state in reqs.keys():
if r_state in low:
for req in low[r_state]:
found = False
for chunk in chunks:
if chunk['__id__'] == req[req.keys()[0]] or \
chunk['name'] == req[req.keys()[0]]:
if chunk['state'] == req.keys()[0]:
found = True
reqs[r_state].append(chunk)
if not found:
return 'unmet'
fun_stats = set()
for r_state, chunks in reqs.items():
for chunk in chunks:
if chunk['__id__'] == req[req.keys()[0]] or \
chunk['name'] == req[req.keys()[0]]:
if chunk['state'] == req.keys()[0]:
reqs.append(chunk)
fun_stats = []
for req in reqs:
tag = '{0[state]}.{0[__id__]}.{0[name]}.{0[fun]}'.format(req)
if tag not in running:
fun_stats.append('unmet')
else:
fun_stats.append('met' if running[tag]['result'] else 'fail')
for stat in fun_stats:
if stat == 'unmet':
return stat
elif stat == 'fail':
return stat
return 'met'
tag = _gen_tag(chunk)
if tag not in running:
fun_stats.add('unmet')
continue
if not running[tag]['result']:
fun_stats.add('fail')
continue
if r_state == 'watch' and running[tag]['changes']:
fun_stats.add('change')
continue
else:
fun_stats.add('met')
def check_watchers(self, low, running, chunks):
'''
Look into the running data to see if the watched states have been run
'''
if 'watch' not in low:
return 'nochange'
reqs = []
status = 'unmet'
for req in low['watch']:
for chunk in chunks:
if chunk['__id__'] == req[req.keys()[0]] or \
chunk['name'] == req[req.keys()[0]]:
if chunk['state'] == req.keys()[0]:
reqs.append(chunk)
fun_stats = []
for req in reqs:
tag = '{0[state]}.{0[__id__]}.{0[name]}.{0[fun]}'.format(req)
if tag not in running:
fun_stats.append('unmet')
else:
(fun_stats.append('change' if running[tag]['changes']
else 'nochange'))
for stat in fun_stats:
if stat == 'change':
return stat
elif stat == 'unmet':
return stat
return 'nochange'
if 'unmet' in fun_stats:
return 'unmet'
elif 'fail' in fun_stats:
return 'fail'
elif 'change' in fun_stats:
return 'change'
return 'met'
def call_chunk(self, low, running, chunks):
'''
Check if a chunk has any requires, execute the requires and then the
chunk
'''
tag = '{0[state]}.{0[__id__]}.{0[name]}.{0[fun]}'.format(low)
if 'require' in low:
status = self.check_requires(low, running, chunks)
if status == 'unmet':
reqs = []
for req in low['require']:
self._mod_init(low)
tag = _gen_tag(low)
requisites = ('require', 'watch')
status = self.check_requisite(low, running, chunks)
if status == 'unmet':
lost = {'require': [],
'watch': []}
reqs = []
for requisite in requisites:
if not requisite in low:
continue
for req in low[requisite]:
found = False
for chunk in chunks:
if chunk['name'] == req[req.keys()[0]] \
or chunk['__id__'] == req[req.keys()[0]]:
if chunk['state'] == req.keys()[0]:
reqs.append(chunk)
for chunk in reqs:
# Check to see if the chunk has been run, only run it if
# it has not been run already
if (chunk['state'] + '.' + chunk['name'] +
'.' + chunk['fun'] not in running):
running = self.call_chunk(chunk, running, chunks)
if self.check_failhard(chunk, running):
running['__FAILHARD__'] = True
return running
running = self.call_chunk(low, running, chunks)
if self.check_failhard(chunk, running):
running['__FAILHARD__'] = True
return running
elif status == 'met':
running[tag] = self.call(low)
elif status == 'fail':
found = True
if not found:
lost[requisite].append(req)
if lost['require'] or lost['watch']:
comment = 'The following requisites were not found:\n'
for requisite, lreqs in lost.items():
for lreq in lreqs:
comment += '{0}{1}: {2}\n'.format(' '*19,
requisite,
lreq)
running[tag] = {'changes': {},
'result': False,
'comment': 'One or more require failed'}
elif 'watch' in low:
status = self.check_watchers(low, running, chunks)
if status == 'unmet':
reqs = []
for req in low['watch']:
for chunk in chunks:
if chunk['name'] == req[req.keys()[0]] \
or chunk['__id__'] == req[req.keys()[0]]:
if chunk['state'] == req.keys()[0]:
reqs.append(chunk)
for chunk in reqs:
# Check to see if the chunk has been run, only run it if
# it has not been run already
if (chunk['state'] + '.' + chunk['name'] +
'.' + chunk['fun'] not in running):
running = self.call_chunk(chunk, running, chunks)
if self.check_failhard(chunk, running):
running['__FAILHARD__'] = True
return running
running = self.call_chunk(low, running, chunks)
if self.check_failhard(chunk, running):
running['__FAILHARD__'] = True
return running
elif status == 'nochange':
running[tag] = self.call(low)
elif status == 'change':
'comment': comment}
return running
for chunk in reqs:
# Check to see if the chunk has been run, only run it if
# it has not been run already
ctag = _gen_tag(chunk)
if ctag not in running:
running = self.call_chunk(chunk, running, chunks)
if self.check_failhard(chunk, running):
running['__FAILHARD__'] = True
return running
running = self.call_chunk(low, running, chunks)
if self.check_failhard(chunk, running):
running['__FAILHARD__'] = True
return running
elif status == 'met':
running[tag] = self.call(low)
elif status == 'fail':
running[tag] = {'changes': {},
'result': False,
'comment': 'One or more requisite failed'}
elif status == 'change':
ret = self.call(low)
if not ret['changes']:
low['fun'] = 'watcher'
ret = self.call(low)
if not ret['changes']:
low['fun'] = 'watcher'
ret = self.call(low)
running[tag] = ret
running[tag] = ret
else:
running[tag] = self.call(low)
return running
@ -766,18 +777,30 @@ class HighState(object):
include = {}
done = {}
# Gather initial top files
for env in self._get_envs():
if not env in tops:
tops[env] = []
tops[env].append(
if self.opts['environment']:
tops[self.opts['environment']] = [
self.state.compile_template(
self.client.cache_file(
self.opts['state_top'],
env
self.opts['environment']
),
env
self.opts['environment']
)
)
]
else:
for env in self._get_envs():
if not env in tops:
tops[env] = []
tops[env].append(
self.state.compile_template(
self.client.cache_file(
self.opts['state_top'],
env
),
env
)
)
# Search initial top files for includes
for env, ctops in tops.items():
for ctop in ctops:
@ -865,6 +888,9 @@ class HighState(object):
'''
matches = {}
for env, body in top.items():
if self.opts['environment']:
if not env == self.opts['environment']:
continue
for match, data in body.items():
if self.matcher.confirm_top(match, data):
if env not in matches:
@ -928,7 +954,11 @@ class HighState(object):
else:
for sub_sls in state.pop('include'):
if not list(mods).count(sub_sls):
nstate, mods, err = self.render_state(sub_sls, env, mods)
nstate, mods, err = self.render_state(
sub_sls,
env,
mods
)
if nstate:
state.update(nstate)
if err:

View File

@ -126,7 +126,6 @@ def run(name,
ret['comment'] = 'Desired working directory is not available'
return ret
puid = os.geteuid()
pgid = os.getegid()
if group:
@ -136,19 +135,17 @@ def run(name,
except KeyError:
ret['comment'] = 'The group ' + group + ' is not available'
return ret
if user:
try:
euid = pwd.getpwnam(user).pw_uid
os.seteuid(euid)
except KeyError:
ret['comment'] = 'The user ' + user + ' is not available'
return ret
# Wow, we passed the test, run this sucker!
cmd_all = __salt__['cmd.run_all'](name, cwd)
try:
cmd_all = __salt__['cmd.run_all'](name, cwd, runas=user)
except CommandExecutionError as e:
ret['comment'] = e
return ret
ret['changes'] = cmd_all
ret['result'] = not bool(cmd_all['retcode'])
ret['comment'] = 'Command "' + name + '" run'
os.seteuid(puid)
os.setegid(pgid)
return ret

View File

@ -65,6 +65,7 @@ something like this:
- source: salt://code/flask
'''
import codecs
import os
import shutil
import difflib
@ -145,7 +146,6 @@ def _clean_dir(root, keep):
if fn_ == '/':
break
rm_files = []
print real_keep
for roots, dirs, files in os.walk(root):
for name in files:
nfn = os.path.join(roots, name)
@ -204,6 +204,9 @@ def _jinja(sfn, name, source, user, group, mode, env, context=None):
return {'result': False,
'data': 'Failed to import jinja'}
try:
newline = False
if open(sfn, 'rb').read().endswith('\n'):
newline = True
tgt = tempfile.mkstemp()[1]
passthrough = context if context else {}
passthrough['salt'] = __salt__
@ -215,7 +218,12 @@ def _jinja(sfn, name, source, user, group, mode, env, context=None):
passthrough['mode'] = mode
passthrough['env'] = env
template = get_template(sfn, __opts__, env)
open(tgt, 'w+').write(template.render(**passthrough))
try:
open(tgt, 'w+').write(template.render(**passthrough))
except UnicodeEncodeError:
codecs.open(tgt, encoding='utf-8', mode='w+').write(template.render(**passthrough))
if newline:
open(tgt, 'a').write('\n')
return {'result': True,
'data': tgt}
except:
@ -259,7 +267,7 @@ def _py(sfn, name, source, user, group, mode, env, context=None):
trb = traceback.format_exc()
return {'result': False,
'data': trb}
def symlink(name, target, force=False, makedirs=False):
'''
@ -285,6 +293,11 @@ def symlink(name, target, force=False, makedirs=False):
'changes': {},
'result': True,
'comment': ''}
if not os.path.isabs(name):
ret['result'] = False
ret['comment'] = ('Specified file {0} is not an absolute'
' path').format(name)
return ret
if not os.path.isdir(os.path.dirname(name)):
if makedirs:
_makedirs(name)
@ -340,6 +353,11 @@ def absent(name):
'changes': {},
'result': True,
'comment': ''}
if not os.path.isabs(name):
ret['result'] = False
ret['comment'] = ('Specified file {0} is not an absolute'
' path').format(name)
return ret
if os.path.isfile(name) or os.path.islink(name):
try:
os.remove(name)
@ -433,9 +451,44 @@ def managed(name,
'comment': '',
'name': name,
'result': True}
if not os.path.isabs(name):
ret['result'] = False
ret['comment'] = ('Specified file {0} is not an absolute'
' path').format(name)
return ret
# Gather the source file from the server
sfn = ''
source_sum = {}
# If the source is a list then find which file exists
if isinstance(source, list):
# get the master file list
mfiles = __salt__['cp.list_master'](__env__)
for single in source:
if isinstance(single, dict):
# check the proto, if it is http or ftp then download the file
# to check, if it is salt then check the master list
if len(single) != 1:
continue
single_src = single.keys()[0]
single_hash = single[single_src]
proto = urlparse.urlparse(single_src).scheme
if proto == 'salt':
if single_src in mfiles:
source = single_src
break
elif proto.startswith('http') or proto == 'ftp':
dest = tempfile.mkstemp()[1]
fn_ = __salt__['cp.get_url'](single_src, dest)
os.remove(fn_)
if fn_:
source = single_src
source_hash = single_hash
break
elif isinstance(single, str):
if single in mfiles:
source = single
break
# If the file is a template and the contents is managed
# then make sure to cpy it down and templatize things.
@ -731,6 +784,11 @@ def directory(name,
'changes': {},
'result': True,
'comment': ''}
if not os.path.isabs(name):
ret['result'] = False
ret['comment'] = ('Specified file {0} is not an absolute'
' path').format(name)
return ret
if os.path.isfile(name):
ret['result'] = False
ret['comment'] = ('Specified location {0} exists and is a file'
@ -838,6 +896,11 @@ def recurse(name,
'changes': {},
'result': True,
'comment': ''}
if not os.path.isabs(name):
ret['result'] = False
ret['comment'] = ('Specified file {0} is not an absolute'
' path').format(name)
return ret
keep = set()
# Verify the target directory
if not os.path.isdir(name):
@ -849,6 +912,8 @@ def recurse(name,
return ret
os.makedirs(name)
for fn_ in __salt__['cp.cache_dir'](source, __env__):
if not fn_.strip():
continue
dest = os.path.join(name,
os.path.relpath(
fn_,
@ -892,6 +957,12 @@ def sed(name, before, after, limit='', backup='.bak', options='-r -e',
'''
Maintain a simple edit to a file
The file will be searched for the ``before`` pattern before making the edit
and then searched for the ``after`` pattern to verify the edit was
successful using :mod:`salt.modules.file.contains`. In general the
``limit`` pattern should be as specific as possible and ``before`` and
``after`` should contain the minimal text to be changed.
Usage::
# Disable the epel repo by default
@ -902,10 +973,23 @@ def sed(name, before, after, limit='', backup='.bak', options='-r -e',
- after: 0
- limit: ^enabled=
# Remove ldap from nsswitch
/etc/nsswitch.conf:
file:
- sed
- before: 'ldap'
- after: ''
- limit: '^passwd:'
.. versionadded:: 0.9.5
'''
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
if not os.path.isabs(name):
ret['result'] = False
ret['comment'] = ('Specified file {0} is not an absolute'
' path').format(name)
return ret
if not os.path.exists(name):
ret['comment'] = "File '{0}' not found".format(name)
return ret
@ -913,14 +997,16 @@ def sed(name, before, after, limit='', backup='.bak', options='-r -e',
# sed returns no output if the edit matches anything or not so we'll have
# to look for ourselves
# make sure the pattern(s) match
if __salt__['file.contains'](name, after, limit):
ret['comment'] = "Edit already performed"
ret['result'] = True
return ret
elif not __salt__['file.contains'](name, before, limit):
ret['comment'] = "Pattern not matched"
return ret
# Look for the pattern before attempting the edit
if not __salt__['file.contains'](name, before, limit):
# Pattern not found; try to guess why
if __salt__['file.contains'](name, after, limit):
ret['comment'] = "Edit already performed"
ret['result'] = True
return ret
else:
ret['comment'] = "Pattern not matched"
return ret
# should be ok now; perform the edit
__salt__['file.sed'](name, before, after, limit, backup, options, flags)
@ -949,6 +1035,12 @@ def comment(name, regex, char='#', backup='.bak'):
'''
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
if not os.path.isabs(name):
ret['result'] = False
ret['comment'] = ('Specified file {0} is not an absolute'
' path').format(name)
return ret
unanchor_regex = regex.lstrip('^').rstrip('$')
if not os.path.exists(name):
@ -996,6 +1088,11 @@ def uncomment(name, regex, char='#', backup='.bak'):
.. versionadded:: 0.9.5
'''
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
if not os.path.isabs(name):
ret['result'] = False
ret['comment'] = ('Specified file {0} is not an absolute'
' path').format(name)
return ret
unanchor_regex = regex.lstrip('^')
if not os.path.exists(name):
@ -1058,11 +1155,25 @@ def append(name, text):
'''
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
if not os.path.isabs(name):
ret['result'] = False
ret['comment'] = ('Specified file {0} is not an absolute'
' path').format(name)
return ret
if isinstance(text, basestring):
text = (text,)
for chunk in text:
for line in chunk.split('\n'):
try:
lines = chunk.split('\n')
except AttributeError:
logger.debug("Error appending text to %s; given object is: %s",
name, type(chunk))
ret['comment'] = "Given text is not a string"
return ret
for line in lines:
if __salt__['file.contains'](name, line):
continue
else:
@ -1093,6 +1204,11 @@ def touch(name, atime=None, mtime=None):
'name': name,
'changes': {},
}
if not os.path.isabs(name):
ret['result'] = False
ret['comment'] = ('Specified file {0} is not an absolute'
' path').format(name)
return ret
exists = os.path.exists(name)
ret['result'] = __salt__['file.touch'](name, atime, mtime)

148
salt/states/mysql_grants.py Executable file
View File

@ -0,0 +1,148 @@
'''
MySQL Grant Management
=====================
The mysql_grants module is used to grant and revoke MySQL permissions.
The ``name`` you pass in purely symbolic and does not have anything to do
with the grant itself.
The ``database`` parameter needs to specify a 'priv_level' in the same
specification as defined in the MySQL documentation:
- *
- *.*
- db_name.*
- db_name.tbl_name
- etc...
.. code-block:: yaml
frank_exampledb:
mysql_grants:
- present
- grant: select,insert,update
- database: exampledb.*
- user: frank
- host: localhost
frank_otherdb:
mysql_grants:
- present
- grant: all privileges
- database: otherdb.*
- user: frank
restricted_singletable:
mysql_grants:
- present
- grant: select
- database: somedb.sometable
- user: joe
'''
def present(name,
grant=None,
database=None,
user=None,
host='localhost'):
'''
Ensure that the grant is present with the specified properties
name
The name (key) of the grant to add
grant
The grant priv_type (ie. select,insert,update OR all privileges)
database
The database priv_level (ie. db.tbl OR db.*)
user
The user to apply the grant to
host
The MySQL server
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Grant {0} for {1}@{2} on {3} is already present'.format(
grant,
user,
host,
database
)
}
# check if grant exists
if __salt__['mysql.grant_exists'](grant,database,user,host):
return ret
# The grant is not present, make it!
if __salt__['mysql.grant_add'](grant,database,user,host):
ret['comment'] = 'Grant {0} for {1}@{2} on {3} has been added'.format(
grant,
user,
host,
database
)
ret['changes'][name] = 'Present'
else:
ret['comment'] = 'Failed to grant {0} for {1}@{2} on {3}'.format(
grant,
user,
host,
database
)
ret['result'] = False
return ret
def absent(name,
grant=None,
database=None,
user=None,
host='localhost'):
'''
Ensure that the grant is absent
name
The name (key) of the grant to add
grant
The grant priv_type (ie. select,insert,update OR all privileges)
database
The database priv_level (ie. db.tbl OR db.*)
user
The user to apply the grant to
host
The MySQL server
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
#check if db exists and remove it
if __salt__['mysql.grant_exists'](grant,database,user,host,):
if __salt__['mysql.grant_revoke'](grant,database,user,host):
ret['comment'] = ('Grant {0} for {1}@{2} on {3} has been'
' revoked').format(
grant,
user,
host,
database
)
ret['changes'][name] = 'Absent'
return ret
# fallback
ret['comment'] = ('Grant {0} for {1}@{2} on {3} is not present, so it'
' cannot be revoked').format(
grant,
user,
host,
database
)
return ret

View File

@ -11,12 +11,14 @@ declarations are typically rather simple:
pkg:
- installed
'''
# Import python ilbs
import logging
import os
from distutils.version import LooseVersion
logger = logging.getLogger(__name__)
def installed(name, repo='', skip_verify=False):
def installed(name, version=None, refresh=False, repo='', skip_verify=False):
'''
Verify that the package is installed, and only that it is installed. This
state will not upgrade an existing package and only verify that it is
@ -37,27 +39,45 @@ def installed(name, repo='', skip_verify=False):
- repo: mycustomrepo
- skip_verify: True
'''
if __salt__['pkg.version'](name):
rtag = __gen_rtag()
cver = __salt__['pkg.version'](name)
if cver == version:
# The package is installed and is the correct version
return {'name': name,
'changes': {},
'result': True,
'comment': 'Package ' + name + ' is already installed'}
changes = __salt__['pkg.install'](name,
True,
repo=repo,
skip_verify=skip_verify)
'comment': 'Package {0} is already installed and is the correct version'.format(name)}
elif cver:
# The package is installed
return {'name': name,
'changes': {},
'result': True,
'comment': 'Package {0} is already installed'.format(name)}
if refresh or os.path.isfile(rtag):
changes = __salt__['pkg.install'](name,
True,
version=version,
repo=repo,
skip_verify=skip_verify)
if os.path.isfile(rtag):
os.remove(rtag)
else:
changes = __salt__['pkg.install'](name,
version=version,
repo=repo,
skip_verify=skip_verify)
if not changes:
return {'name': name,
'changes': changes,
'result': False,
'comment': 'Package ' + name + ' failed to install'}
'comment': 'Package {0} failed to install'.format(name)}
return {'name': name,
'changes': changes,
'result': True,
'comment': 'Package ' + name + ' installed'}
'comment': 'Package {0} installed'.format(name)}
def latest(name, repo='', skip_verify=False):
def latest(name, refresh=False, repo='', skip_verify=False):
'''
Verify that the named package is installed and the latest available
package. If the package can be updated this state function will update
@ -72,28 +92,40 @@ def latest(name, repo='', skip_verify=False):
skip_verify : False
Skip the GPG verification check for the package to be installed
'''
rtag = __gen_rtag()
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
version = __salt__['pkg.version'](name)
avail = __salt__['pkg.available_version'](name)
try:
has_newer = LooseVersion(avail) > LooseVersion(version)
except AttributeError:
# Not yet installed
if not version:
has_newer = True
else:
if not version:
# Net yet installed
has_newer = True
elif not avail:
# Already at latest
has_newer = False
else:
try:
has_newer = LooseVersion(avail) > LooseVersion(version)
except AttributeError:
logger.debug("Error comparing versions for '%s' (%s > %s)",
name, avail, version)
ret['comment'] = "No version could be retrieved for '{0}'".format(name)
return ret
if has_newer:
ret['changes'] = __salt__['pkg.install'](name,
True,
repo=repo,
skip_verify=skip_verify)
if refresh or os.path.isfile(rtag):
ret['changes'] = __salt__['pkg.install'](name,
True,
repo=repo,
skip_verify=skip_verify)
if os.path.isfile(rtag):
os.remove(rtag)
else:
ret['changes'] = __salt__['pkg.install'](name,
repo=repo,
skip_verify=skip_verify)
if ret['changes']:
ret['comment'] = 'Package {0} upgraded to latest'.format(name)
@ -122,18 +154,18 @@ def removed(name):
return {'name': name,
'changes': {},
'result': True,
'comment': 'Package ' + name + ' is not installed'}
'comment': 'Package {0} is not installed'.format(name)}
else:
changes['removed'] = __salt__['pkg.remove'](name)
if not changes:
return {'name': name,
'changes': changes,
'result': False,
'comment': 'Package ' + name + ' failed to remove'}
'comment': 'Package {0} failed to remove'.format(name)}
return {'name': name,
'changes': changes,
'result': True,
'comment': 'Package ' + name + ' removed'}
'comment': 'Package {0} removed'.format(name)}
def purged(name):
@ -149,7 +181,7 @@ def purged(name):
return {'name': name,
'changes': {},
'result': True,
'comment': 'Package ' + name + ' is not installed'}
'comment': 'Package {0} is not installed'.format(name)}
else:
changes['removed'] = __salt__['pkg.purge'](name)
@ -157,8 +189,27 @@ def purged(name):
return {'name': name,
'changes': changes,
'result': False,
'comment': 'Package ' + name + ' failed to purge'}
'comment': 'Package {0} failed to purge'.format(name)}
return {'name': name,
'changes': changes,
'result': True,
'comment': 'Package ' + name + ' purged'}
'comment': 'Package {0} purged'.format(name)}
def mod_init(low):
'''
Refresh the package database here so that it only needs to happen once
'''
if low['fun'] == 'installed' or low['fun'] == 'latest':
rtag = __gen_rtag()
if not os.path.exists(rtag):
open(rtag, 'w+').write('')
return True
else:
return False
def __gen_rtag():
'''
Return the location of the refresh tag
'''
return os.path.join(__opts__['cachedir'], 'pkg_refresh')

View File

@ -18,6 +18,25 @@ def __virtual__():
return 'service'
def _get_stat(name, sig):
'''
Return the status of a service based on signature and status, if the
signature is used then the status option will be ignored
'''
stat = False
if sig:
if sig == 'detect':
cmd = "{0[ps]} | grep {1} | grep -v grep | awk '{{print $2}}'".format(
__grains__, name)
else:
cmd = "{0[ps]} | grep {1} | grep -v grep | awk '{{print $2}}'".format(
__grains__, sig)
stat = bool(__salt__['cmd.run'](cmd))
else:
stat = __salt__['service.status'](name)
return stat
def _enable(name, started):
'''
Enable the service
@ -95,7 +114,8 @@ def _enable(name, started):
ret['comment'] = ('Failed when setting service {0} to start at boot,'
' and the service is dead').format(name)
return ret
def _disable(name, started):
'''
Disable the service
@ -194,7 +214,7 @@ def running(name, enable=None, sig=None):
'changes': {},
'result': True,
'comment': ''}
if __salt__['service.status'](name, sig):
if _get_stat(name, sig):
ret['comment'] = 'The service {0} is already running'.format(name)
if enable is True:
return _enable(name, None)
@ -242,7 +262,7 @@ def dead(name, enable=None, sig=None):
'changes': {},
'result': True,
'comment': ''}
if not __salt__['service.status'](name, sig):
if not _get_stat(name, sig):
ret['comment'] = 'The service {0} is already dead'.format(name)
if enable is True:
return _enable(name, None)

View File

@ -59,7 +59,6 @@ def present(
'result': True,
'comment': 'User {0} is present and up to date'.format(name)}
print password
if __grains__['os'] != 'FreeBSD':
lshad = __salt__['shadow.info'](name)

View File

@ -30,6 +30,19 @@ DEFAULT_COLOR = '\033[00m'
RED_BOLD = '\033[01;31m'
ENDC = '\033[0m'
months = {'01': 'Jan',
'02': 'Feb',
'03': 'Mar',
'04': 'Apr',
'05': 'May',
'06': 'Jun',
'07': 'Jul',
'08': 'Aug',
'09': 'Sep',
'10': 'Oct',
'11': 'Nov',
'12': 'Dec'}
def get_colors(use=True):
'''
@ -65,6 +78,18 @@ def get_colors(use=True):
return colors
def append_pid(pidfile):
'''
Save the pidfile
'''
try:
open(pidfile, 'a').write('\n{0}'.format(str(os.getpid())))
except IOError:
err = ('Failed to commit the pid to location {0}, please verify'
' that the location is available').format(pidfile)
log.error(err)
def daemonize():
'''
Daemonize a process
@ -107,8 +132,9 @@ def daemonize():
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
print >> sys.stderr, "fork #1 failed: %d (%s)" % (e.errno, e.strerror)
except OSError as exc:
msg = 'fork #1 failed: {0} ({1})'.format(e.errno, e.strerror)
log.error(msg)
sys.exit(1)
# decouple from parent environment
@ -120,10 +146,10 @@ def daemonize():
try:
pid = os.fork()
if pid > 0:
# print "Daemon PID %d" % pid
sys.exit(0)
except OSError, e:
print >> sys.stderr, "fork #2 failed: %d (%s)" % (e.errno, e.strerror)
except OSError as exc:
msg = 'fork #2 failed: {0} ({1})'
log.error(msg.format(e.errno, e.strerror))
sys.exit(1)
dev_null = open('/dev/null', 'rw')
@ -153,6 +179,7 @@ def profile_func(filename=None):
return profiled_func
return proffunc
def which(exe=None):
'''
Python clone of POSIX's /usr/bin/which
@ -167,6 +194,7 @@ def which(exe=None):
return full_path
return None
def list_files(directory):
'''
Return a list of all files found under directory
@ -181,3 +209,44 @@ def list_files(directory):
return list(ret)
def jid_to_time(jid):
'''
Convert a salt job id into the time when the job was invoked
'''
jid = str(jid)
if not len(jid) == 20:
return ''
year = jid[:4]
month = jid[4:6]
day = jid[6:8]
hour = jid[8:10]
minute = jid[10:12]
second = jid[12:14]
micro = jid[14:]
ret = '{0}, {1} {2} {3}:{4}:{5}.{6}'.format(
year,
months[month],
day,
hour,
minute,
second,
micro
)
return ret
def gen_mac(prefix='52:54:'):
'''
Generates a mac addr with the defined prefix
'''
src = ['1','2','3','4','5','6','7','8','9','0','a','b','c','d','e','f']
mac = prefix
while len(mac) < 18:
if len(mac) < 3:
mac = random.choice(src) + random.choice(src) + ':'
if mac.endswith(':'):
mac += random.choice(src) + random.choice(src) + ':'
return mac[:-1]

View File

@ -23,4 +23,4 @@ def run():
for func in __all__:
if func == "run": continue
if not globals().get(func)():
sys.exit(1)
continue

View File

@ -36,7 +36,7 @@ NAME = 'salt'
VER = __version__
DESC = ('Portable, distributed, remote execution and '
'configuration management system')
mod_path = os.path.join(get_python_lib(1), 'salt/modules')
mod_path = os.path.join(get_python_lib(), 'salt/modules')
doc_path = os.path.join(PREFIX, 'share/doc', NAME + '-' + VER)
example_path = os.path.join(doc_path, 'examples')
template_path = os.path.join(example_path, 'templates')

View File

@ -1,14 +1,15 @@
import unittest
from salt.modules.hosts import list_hosts, get_ip, get_alias, has_pair, add_host,\
set_host, rm_host
from os import path
import os
import shutil
import sys
import saltunittest
TEMPLATES_DIR = path.dirname(path.abspath(__file__))
monkey_pathed = (list_hosts, set_host, add_host, rm_host)
class HostsModuleTest(unittest.TestCase):
class HostsModuleTest(saltunittest.TestCase):
def setUp(self):
self._hfn = [f.hosts_filename for f in monkey_pathed]
self.files = path.join(TEMPLATES_DIR, 'files')

View File

@ -1,7 +1,7 @@
import unittest
from modules import run_module
import saltunittest
class TestModuleTest(unittest.TestCase):
class TestModuleTest(saltunittest.TestCase):
def test_ping(self):
ret = run_module('test.ping')
assert ret == {'return': True}

View File

@ -4,7 +4,7 @@ Discover all instances of unittest.TestCase in this directory.
The current working directory must be set to the build of the salt you want to test.
'''
from unittest import TestLoader, TextTestRunner
from saltunittest import TestLoader, TextTestRunner
from os.path import dirname, abspath, relpath, splitext, normpath
import sys, os, fnmatch

22
tests/saltunittest.py Normal file
View File

@ -0,0 +1,22 @@
"""
This file provides a single interface to unittest objects for our
tests while supporting python < 2.7 via unittest2.
If you need something from the unittest namespace it should be
imported here from the relevant module and then imported into your
test from here
"""
import sys
# support python < 2.7 via unittest2
if sys.version_info[0:2] < (2,7):
try:
from unittest2 import TestLoader, TextTestRunner,\
TestCase, expectedFailure
except ImportError:
print "You need to install unittest2 to run the salt tests"
sys.exit(1)
else:
from unittest import TestLoader, TextTestRunner,\
TestCase, expectedFailure

View File

@ -1,8 +1,8 @@
import unittest
from saltunittest import TestCase, expectedFailure
class SimpleTest(unittest.TestCase):
class SimpleTest(TestCase):
def test_success(self):
assert True
@unittest.expectedFailure
@expectedFailure
def test_fail(self):
assert False

View File

@ -1,8 +1,9 @@
import unittest
from os import path
from salt.utils.jinja import SaltCacheLoader, get_template
from jinja2 import Environment
from saltunittest import TestCase
TEMPLATES_DIR = path.dirname(path.abspath(__file__))
class MockFileClient(object):
@ -20,7 +21,7 @@ class MockFileClient(object):
'env': env
})
class TestSaltCacheLoader(unittest.TestCase):
class TestSaltCacheLoader(TestCase):
def test_searchpath(self):
'''
The searchpath is based on the cachedir option and the env parameter
@ -77,7 +78,7 @@ class TestSaltCacheLoader(unittest.TestCase):
result = jinja.get_template('hello_include').render(a='Hi', b='Salt')
self.assertEqual(result, 'Hey world !Hi Salt !')
class TestGetTemplate(unittest.TestCase):
class TestGetTemplate(TestCase):
def test_fallback(self):
'''
A Template without loader is returned as fallback