Merge remote-tracking branch 'upstream/develop' into sam_raet_47

This commit is contained in:
Samuel M Smith 2014-06-23 17:54:06 -06:00
commit 1ace8ba719
44 changed files with 1535 additions and 2883 deletions

View File

@ -4,6 +4,7 @@ include LICENSE
include README.rst
include _requirements.txt
include raet-requirements.txt
include cloud-requirements.txt
include zeromq-requirements.txt
include tests/*.py
recursive-include tests *

View File

@ -4424,3 +4424,123 @@ source_file = _build/locale/ref/states/all/salt.states.serverdensity_device.pot
source_lang = en
source_name = ref/states/all/salt.states.serverdensity_device.rst
[salt.ref--cli--salt-api]
file_filter = locale/<lang>/LC_MESSAGES/ref/cli/salt-api.po
source_file = _build/locale/ref/cli/salt-api.pot
source_lang = en
source_name = ref/cli/salt-api.rst
[salt.ref--netapi--all--index]
file_filter = locale/<lang>/LC_MESSAGES/ref/netapi/all/index.po
source_file = _build/locale/ref/netapi/all/index.pot
source_lang = en
source_name = ref/netapi/all/index.rst
[salt.ref--netapi--all--salt_netapi_rest_cherrypy]
file_filter = locale/<lang>/LC_MESSAGES/ref/netapi/all/salt.netapi.rest_cherrypy.po
source_file = _build/locale/ref/netapi/all/salt.netapi.rest_cherrypy.pot
source_lang = en
source_name = ref/netapi/all/salt.netapi.rest_cherrypy.rst
[salt.ref--netapi--all--salt_netapi_rest_tornado]
file_filter = locale/<lang>/LC_MESSAGES/ref/netapi/all/salt.netapi.rest_tornado.po
source_file = _build/locale/ref/netapi/all/salt.netapi.rest_tornado.pot
source_lang = en
source_name = ref/netapi/all/salt.netapi.rest_tornado.rst
[salt.ref--netapi--all--salt_netapi_rest_wsgi]
file_filter = locale/<lang>/LC_MESSAGES/ref/netapi/all/salt.netapi.rest_wsgi.po
source_file = _build/locale/ref/netapi/all/salt.netapi.rest_wsgi.pot
source_lang = en
source_name = ref/netapi/all/salt.netapi.rest_wsgi.rst
[salt.ref--pillar--all--salt_pillar_foreman]
file_filter = locale/<lang>/LC_MESSAGES/ref/pillar/all/salt.pillar.foreman.po
source_file = _build/locale/ref/pillar/all/salt.pillar.foreman.pot
source_lang = en
source_name = ref/pillar/all/salt.pillar.foreman.rst
[salt.topics--netapi--index]
file_filter = locale/<lang>/LC_MESSAGES/topics/netapi/index.po
source_file = _build/locale/topics/netapi/index.pot
source_lang = en
source_name = topics/netapi/index.rst
[salt.topics--netapi--writing]
file_filter = locale/<lang>/LC_MESSAGES/topics/netapi/writing.po
source_file = _build/locale/topics/netapi/writing.pot
source_lang = en
source_name = topics/netapi/writing.rst
[salt.topics--releases--2014_1_5]
file_filter = locale/<lang>/LC_MESSAGES/topics/releases/2014.1.5.po
source_file = _build/locale/topics/releases/2014.1.5.pot
source_lang = en
source_name = topics/releases/2014.1.5.rst
[salt.topics--releases--saltapi--0_5_0]
file_filter = locale/<lang>/LC_MESSAGES/topics/releases/saltapi/0.5.0.po
source_file = _build/locale/topics/releases/saltapi/0.5.0.pot
source_lang = en
source_name = topics/releases/saltapi/0.5.0.rst
[salt.topics--releases--saltapi--0_6_0]
file_filter = locale/<lang>/LC_MESSAGES/topics/releases/saltapi/0.6.0.po
source_file = _build/locale/topics/releases/saltapi/0.6.0.pot
source_lang = en
source_name = topics/releases/saltapi/0.6.0.rst
[salt.topics--releases--saltapi--0_7_0]
file_filter = locale/<lang>/LC_MESSAGES/topics/releases/saltapi/0.7.0.po
source_file = _build/locale/topics/releases/saltapi/0.7.0.pot
source_lang = en
source_name = topics/releases/saltapi/0.7.0.rst
[salt.topics--releases--saltapi--0_7_5]
file_filter = locale/<lang>/LC_MESSAGES/topics/releases/saltapi/0.7.5.po
source_file = _build/locale/topics/releases/saltapi/0.7.5.pot
source_lang = en
source_name = topics/releases/saltapi/0.7.5.rst
[salt.topics--releases--saltapi--0_8_0]
file_filter = locale/<lang>/LC_MESSAGES/topics/releases/saltapi/0.8.0.po
source_file = _build/locale/topics/releases/saltapi/0.8.0.pot
source_lang = en
source_name = topics/releases/saltapi/0.8.0.rst
[salt.topics--releases--saltapi--0_8_2]
file_filter = locale/<lang>/LC_MESSAGES/topics/releases/saltapi/0.8.2.po
source_file = _build/locale/topics/releases/saltapi/0.8.2.pot
source_lang = en
source_name = topics/releases/saltapi/0.8.2.rst
[salt.topics--releases--saltapi--0_8_3]
file_filter = locale/<lang>/LC_MESSAGES/topics/releases/saltapi/0.8.3.po
source_file = _build/locale/topics/releases/saltapi/0.8.3.pot
source_lang = en
source_name = topics/releases/saltapi/0.8.3.rst
[salt.topics--releases--saltapi--0_8_4]
file_filter = locale/<lang>/LC_MESSAGES/topics/releases/saltapi/0.8.4.po
source_file = _build/locale/topics/releases/saltapi/0.8.4.pot
source_lang = en
source_name = topics/releases/saltapi/0.8.4.rst
[salt.topics--releases--saltapi--index]
file_filter = locale/<lang>/LC_MESSAGES/topics/releases/saltapi/index.po
source_file = _build/locale/topics/releases/saltapi/index.pot
source_lang = en
source_name = topics/releases/saltapi/index.rst
[salt.topics--sdb--index]
file_filter = locale/<lang>/LC_MESSAGES/topics/sdb/index.po
source_file = _build/locale/topics/sdb/index.pot
source_lang = en
source_name = topics/sdb/index.rst
[salt.topics--targeting--ipcidr]
file_filter = locale/<lang>/LC_MESSAGES/topics/targeting/ipcidr.po
source_file = _build/locale/topics/targeting/ipcidr.pot
source_lang = en
source_name = topics/targeting/ipcidr.rst

File diff suppressed because it is too large Load Diff

View File

@ -4,4 +4,6 @@ rest_tornado
.. automodule:: salt.netapi.rest_tornado.saltnado
.. automodule:: salt.netapi.rest_tornado.saltnado_websockets
.. ............................................................................

View File

@ -5,7 +5,7 @@ Release notes
.. releasestree::
:maxdepth: 1
2014.1.4
2014.1.5
Archive
=======

View File

@ -13,6 +13,10 @@
#
# It will fetch libzmq and build it as a pyzmq extension.
#
# IMPORTANT: Unpacking the shar requires uudecode, which is distributed along
# with sharutils. Thus, you should have sharutils installed on any host which
# will need to unpack the shar archive.
#
# The script is capable of building a shar archive using several methods:
#
# 1. Using a custom pip requirements file
@ -27,8 +31,10 @@
# option can be used to specify directory from which dependencies will be
# sourced. Any missing dependencies will be retrieved with pip.
#
# It is recommended to run this script on a machine which does not have any of
# the Salt dependencies already installed.
# It is strongly recommended to run this script on a machine which does not
# have any of the Salt dependencies already installed, because if the script
# detects that ZeroMQ is already installed, then pyzmq's setup.py will not
# build a bundled ZeroMQ.
#
# Run the script with -h for usage details.
#

View File

@ -31,7 +31,12 @@
<service_fmri value="svc:/system/filesystem/local"/>
</dependency>
<method_context/>
<method_context>
<method_environment>
<envvar name='PATH'
value='/usr/local/sbin:/usr/local/bin:/opt/local/sbin:/opt/local/bin:/usr/sbin:/usr/bin:/sbin'/>
</method_environment>
</method_context>
<exec_method type="method"
name="start"

View File

@ -5,6 +5,7 @@ The main entry point for salt-api
# Import python libs
import logging
import multiprocessing
import signal
# Import salt-api libs
import salt.loader
@ -18,6 +19,7 @@ class NetapiClient(object):
'''
def __init__(self, opts):
self.opts = opts
self.processes = []
def run(self):
'''
@ -27,4 +29,17 @@ class NetapiClient(object):
for fun in netapi:
if fun.endswith('.start'):
logger.info("Starting '{0}' api module".format(fun))
multiprocessing.Process(target=netapi[fun]).start()
p = multiprocessing.Process(target=netapi[fun])
p.start()
self.processes.append(p)
# make sure to kill the subprocesses if the parent is killed
signal.signal(signal.SIGTERM, self.kill_children)
def kill_children(self, *args):
'''
Kill all of the children
'''
for p in self.processes:
p.terminate()
p.join()

View File

@ -1722,7 +1722,7 @@ class Map(Cloud):
overrides[setting] = overrides.pop(deprecated)
# merge minion grains from map file
if 'minion' in overrides:
if 'minion' in overrides and 'minion' in nodedata:
if 'grains' in overrides['minion']:
if 'grains' in nodedata['minion']:
nodedata['minion']['grains'].update(

View File

@ -216,6 +216,14 @@ def avail_sizes(call=None):
'name': 'A7',
'description': '8 cores, 56GB RAM',
},
'A8': {
'name': 'A8',
'description': '8 cores, 56GB RAM, 40 Gbit/s InfiniBand',
},
'A9': {
'name': 'A9',
'description': '16 cores, 112GB RAM, 40 Gbit/s InfiniBand',
},
}

View File

@ -17,7 +17,7 @@
# CREATED: 10/15/2012 09:49:37 PM WEST
#======================================================================================================================
set -o nounset # Treat unset variables as an error
__ScriptVersion="2014.06.19"
__ScriptVersion="2014.06.21"
__ScriptName="bootstrap-salt.sh"
#======================================================================================================================
@ -418,6 +418,7 @@ fi
# Export the http_proxy configuration to our current environment
if [ "x${_HTTP_PROXY}" != "x" ]; then
export http_proxy="$_HTTP_PROXY"
export https_proxy="$_HTTP_PROXY"
fi
# Let's discover how we're being called
@ -680,6 +681,9 @@ __gather_linux_system_info() {
elif [ "${DISTRO_NAME}" = "EnterpriseEnterpriseServer" ]; then
# This the Oracle Linux Enterprise ID before ORACLE LINUX 5 UPDATE 3
DISTRO_NAME="Oracle Linux"
elif [ "${DISTRO_NAME}" = "OracleServer" ]; then
# This the Oracle Linux Server 6.5
DISTRO_NAME="Oracle Linux"
fi
rv=$(lsb_release -sr)
[ "${rv}x" != "x" ] && DISTRO_VERSION=$(__parse_version_string "$rv")
@ -1055,7 +1059,7 @@ fi
# Only RedHat based distros have testing support
if [ ${ITYPE} = "testing" ]; then
if [ "$(echo ${DISTRO_NAME_L} | egrep '(centos|red_hat|amazon)')x" = "x" ]; then
if [ "$(echo ${DISTRO_NAME_L} | egrep '(centos|red_hat|amazon|oracle)')x" = "x" ]; then
echoerror "${DISTRO_NAME} does not have testing packages support"
exit 1
fi
@ -2380,7 +2384,14 @@ install_centos_stable_deps() {
fi
fi
yum -y install ${packages} --enablerepo=${_EPEL_REPO} || return 1
if [ $DISTRO_NAME_L = "oracle_linux" ]; then
# We need to install one package at a time because --enablerepo=X disables ALL OTHER REPOS!!!!
for package in ${packages}; do
yum -y install ${package} || yum -y install ${package} --enablerepo=${_EPEL_REPO} || return 1
done
else
yum -y install ${packages} --enablerepo=${_EPEL_REPO} || return 1
fi
if [ $_INSTALL_CLOUD -eq $BS_TRUE ]; then
check_pip_allowed "You need to allow pip based installations (-P) in order to install apache-libcloud"
@ -2393,7 +2404,14 @@ install_centos_stable_deps() {
if [ "x${_EXTRA_PACKAGES}" != "x" ]; then
echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}"
yum install -y ${_EXTRA_PACKAGES} --enablerepo=${_EPEL_REPO} || return 1
if [ $DISTRO_NAME_L = "oracle_linux" ]; then
# We need to install one package at a time because --enablerepo=X disables ALL OTHER REPOS!!!!
for package in ${_EXTRA_PACKAGES}; do
yum -y install ${package} || yum -y install ${package} --enablerepo=${_EPEL_REPO} || return 1
done
else
yum install -y ${_EXTRA_PACKAGES} --enablerepo=${_EPEL_REPO} || return 1
fi
fi
return 0
@ -2407,7 +2425,14 @@ install_centos_stable() {
if [ $_INSTALL_MASTER -eq $BS_TRUE ] || [ $_INSTALL_SYNDIC -eq $BS_TRUE ]; then
packages="${packages} salt-master"
fi
yum -y install ${packages} --enablerepo=${_EPEL_REPO} || return 1
if [ $DISTRO_NAME_L = "oracle_linux" ]; then
# We need to install one package at a time because --enablerepo=X disables ALL OTHER REPOS!!!!
for package in ${packages}; do
yum -y install ${package} || yum -y install ${package} --enablerepo=${_EPEL_REPO} || return 1
done
else
yum -y install ${packages} --enablerepo=${_EPEL_REPO} || return 1
fi
return 0
}
@ -2427,7 +2452,12 @@ install_centos_stable_post() {
install_centos_git_deps() {
install_centos_stable_deps || return 1
yum -y install git --enablerepo=${_EPEL_REPO} || return 1
if [ $DISTRO_NAME_L = "oracle_linux" ]; then
# try both ways --enablerepo=X disables ALL OTHER REPOS!!!!
yum -y install git || yum -y install git --enablerepo=${_EPEL_REPO} || return 1
else
yum -y install git --enablerepo=${_EPEL_REPO} || return 1
fi
__git_clone_and_checkout || return 1
@ -3409,11 +3439,11 @@ install_smartos_deps() {
# Let's download, since they were not provided, the default configuration files
if [ ! -f $_SALT_ETC_DIR/minion ] && [ ! -f $_TEMP_CONFIG_DIR/minion ]; then
curl $_CURL_ARGS -s -o $_TEMP_CONFIG_DIR/minion -L \
https://raw.github.com/saltstack/salt/develop/conf/minion || return 1
https://raw.githubusercontent.com/saltstack/salt/develop/conf/minion || return 1
fi
if [ ! -f $_SALT_ETC_DIR/master ] && [ ! -f $_TEMP_CONFIG_DIR/master ]; then
curl $_CURL_ARGS -s -o $_TEMP_CONFIG_DIR/master -L \
https://raw.github.com/saltstack/salt/develop/conf/master || return 1
https://raw.githubusercontent.com/saltstack/salt/develop/conf/master || return 1
fi
fi
@ -3465,7 +3495,7 @@ install_smartos_post() {
if [ $? -eq 1 ]; then
if [ ! -f $_TEMP_CONFIG_DIR/salt-$fname.xml ]; then
curl $_CURL_ARGS -s -o $_TEMP_CONFIG_DIR/salt-$fname.xml -L \
https://raw.github.com/saltstack/salt/develop/pkg/smartos/salt-$fname.xml
https://raw.githubusercontent.com/saltstack/salt/develop/pkg/smartos/salt-$fname.xml
fi
svccfg import $_TEMP_CONFIG_DIR/salt-$fname.xml
if [ "${VIRTUAL_TYPE}" = "global" ]; then
@ -3761,7 +3791,7 @@ install_suse_11_stable_deps() {
# Let's download, since they were not provided, the default configuration files
if [ ! -f $_SALT_ETC_DIR/$fname ] && [ ! -f $_TEMP_CONFIG_DIR/$fname ]; then
curl $_CURL_ARGS -s -o $_TEMP_CONFIG_DIR/$fname -L \
https://raw.github.com/saltstack/salt/develop/conf/$fname || return 1
https://raw.githubusercontent.com/saltstack/salt/develop/conf/$fname || return 1
fi
done
fi

View File

@ -34,7 +34,7 @@ import salt.utils.xdg
from salt._compat import string_types
import sys
#can't use salt.utils.is_windows, because config.py is included from salt.utils
# can't use salt.utils.is_windows, because config.py is included from salt.utils
if not sys.platform.lower().startswith('win'):
import salt.cloud.exceptions
@ -651,6 +651,27 @@ def _read_conf_file(path):
return conf_opts
def _absolute_path(path, relative_to=None):
'''
Return an absolute path. In case ``relative_to`` is passed and ``path`` is
not an absolute path, we try to prepend ``relative_to`` to ``path``and if
that path exists, return that one
'''
if path and os.path.isabs(path):
return path
if path and relative_to is not None:
_abspath = os.path.join(relative_to, path)
if os.path.isfile(_abspath):
log.debug(
'Relative path {0!r} converted to existing absolute path {1!r}'.format(
path, _abspath
)
)
return _abspath
return path
def load_config(path, env_var, default_path=None):
'''
Returns configuration dict from parsing either the file described by
@ -1017,6 +1038,9 @@ def cloud_config(path, env_var='SALT_CLOUD_CONFIG', defaults=None,
# configuration file, and
master_config_path = os.path.join(config_dir, 'master')
# Convert relative to absolute paths if necessary
master_config_path = _absolute_path(master_config_path, config_dir)
if 'providers_config' in overrides and providers_config_path is None:
# The configuration setting is being specified in the main cloud
# configuration file
@ -1025,6 +1049,9 @@ def cloud_config(path, env_var='SALT_CLOUD_CONFIG', defaults=None,
and not providers_config_path:
providers_config_path = os.path.join(config_dir, 'cloud.providers')
# Convert relative to absolute paths if necessary
providers_config_path = _absolute_path(providers_config_path, config_dir)
if 'profiles_config' in overrides and profiles_config_path is None:
# The configuration setting is being specified in the main cloud
# configuration file
@ -1033,6 +1060,9 @@ def cloud_config(path, env_var='SALT_CLOUD_CONFIG', defaults=None,
and not profiles_config_path:
profiles_config_path = os.path.join(config_dir, 'cloud.profiles')
# Convert relative to absolute paths if necessary
profiles_config_path = _absolute_path(profiles_config_path, config_dir)
# Prepare the deploy scripts search path
deploy_scripts_search_path = overrides.get(
'deploy_scripts_search_path',

View File

@ -51,6 +51,7 @@ except ImportError:
pass
log = logging.getLogger(__name__)
class SaltRaetRoadStack(ioflo.base.deeding.Deed):
'''
Initialize and run raet udp stack for Salt
@ -225,7 +226,7 @@ class SaltRaetRoadStackRejected(ioflo.base.deeding.Deed):
if stack.remotes:
rejected = (stack.remotes.values()[0].acceptance
== raeting.acceptances.rejected)
else: #no remotes so assume rejected
else: # no remotes so assume rejected
rejected = True
self.status.update(rejected=rejected)
@ -282,6 +283,7 @@ class SaltRaetRoadStackAllowed(ioflo.base.deeding.Deed):
allowed = stack.remotes.values()[0].allowed
self.status.update(allowed=allowed)
class SaltRaetRoadStackManager(ioflo.base.deeding.Deed):
'''
Runs the manage method of RoadStack

View File

@ -474,6 +474,7 @@ def _run(cmd,
shell=True,
log_stdout=True,
log_stderr=True,
cwd=cwd,
env=env,
log_stdin_level=output_loglevel,
log_stdout_level=output_loglevel,

View File

@ -8,15 +8,15 @@ Support for RFC 2136 dynamic DNS updates.
support this (the keyname is only needed if the keyring contains more
than one key)::
ddns.keyring: keyring file (default=None)
ddns.keyname: key name in file (default=None)
keyring: keyring file (default=None)
keyname: key name in file (default=None)
The keyring file needs to be in json format and the key name needs to end
with an extra period in the file, similar to this:
.. code-block:: bash
{'keyname.': 'keycontent'}
{"keyname.": "keycontent"}
'''
# Import python libs
import logging

View File

@ -876,7 +876,8 @@ def start(container,
links=None,
privileged=False,
dns=None,
volumes_from=None):
volumes_from=None,
network_mode=None):
'''
Restart the specified container
@ -923,7 +924,8 @@ def start(container,
links=links,
privileged=privileged,
dns=dns,
volumes_from=volumes_from)
volumes_from=volumes_from,
network_mode=network_mode)
except TypeError:
# maybe older version of docker-py <= 0.3.1 dns and
# volumes_from are not accepted

View File

@ -280,7 +280,7 @@ def append(key, val, convert=False):
salt '*' grains.append key val
'''
grains = get(key)
grains = get(key, [])
if not isinstance(grains, list) and convert is True:
grains = [grains]
if not isinstance(grains, list):

View File

@ -240,8 +240,8 @@ def psql_query(query, user=None, host=None, port=None, maintenance_db=None,
Run an SQL-Query and return the results as a list. This command
only supports SELECT statements. This limitation can be worked around
with a query like this:
WITH updated AS (UPDATE pg_authid SET rolconnlimit = 2000 WHERE
WITH updated AS (UPDATE pg_authid SET rolconnlimit = 2000 WHERE
rolname = 'rolename' RETURNING rolconnlimit) SELECT * FROM updated;
CLI Example:

View File

@ -7,10 +7,12 @@ Module for manging the Salt schedule on a minion
'''
# Import Python libs
import difflib
import os
import yaml
import salt.utils
import salt.utils.odict
__proxyenabled__ = ['*']
@ -23,6 +25,8 @@ __func_alias__ = {
}
SCHEDULE_CONF = [
'name',
'maxrunning',
'function',
'splay',
'range',
@ -40,7 +44,7 @@ SCHEDULE_CONF = [
]
def list_(show_all=False):
def list_(show_all=False, return_yaml=True):
'''
List the jobs currently scheduled on the minion
@ -81,9 +85,12 @@ def list_(show_all=False):
del schedule[job]['_seconds']
if schedule:
tmp = {'schedule': schedule}
yaml_out = yaml.safe_dump(tmp, default_flow_style=False)
return yaml_out
if return_yaml:
tmp = {'schedule': schedule}
yaml_out = yaml.safe_dump(tmp, default_flow_style=False)
return yaml_out
else:
return schedule
else:
return None
@ -147,7 +154,6 @@ def delete(name):
ret['comment'] = 'Failed to delete job {0} from schedule.'.format(name)
ret['result'] = False
elif 'schedule' in __pillar__ and name in __pillar__['schedule']:
log.debug('found job in pillar')
out = __salt__['event.fire']({'name': name, 'where': 'pillar', 'func': 'delete'}, 'manage_schedule')
if out:
ret['comment'] = 'Deleted Job {0} from schedule.'.format(name)
@ -160,6 +166,75 @@ def delete(name):
return ret
def build_schedule_item(name, **kwargs):
'''
Build a schedule job
CLI Example:
.. code-block:: bash
salt '*' schedule.build_schedule_item job1 function='test.ping' seconds=3600
'''
ret = {'comment': [],
'result': True}
if not name:
ret['comment'] = 'Job name is required.'
ret['result'] = False
schedule = {}
schedule[name] = salt.utils.odict.OrderedDict()
schedule[name]['function'] = kwargs['function']
time_conflict = False
for item in ['seconds', 'minutes', 'hours', 'days']:
if item in kwargs and 'when' in kwargs:
time_conflict = True
if time_conflict:
return 'Error: Unable to use "seconds", "minutes", "hours", or "days" with "when" option.'
for item in ['seconds', 'minutes', 'hours', 'days']:
if item in kwargs:
schedule[name][item] = kwargs[item]
if 'job_args' in kwargs:
schedule[name]['args'] = kwargs['job_args']
if 'job_kwargs' in kwargs:
schedule[name]['kwargs'] = kwargs['job_kwargs']
if 'maxrunning' in kwargs:
schedule[name]['maxrunning'] = kwargs['maxrunning']
else:
schedule[name]['maxrunning'] = 1
if 'name' in kwargs:
schedule[name]['name'] = kwargs['name']
else:
schedule[name]['name'] = name
if 'jid_include' not in kwargs or kwargs['jid_include']:
schedule[name]['jid_include'] = True
if 'splay' in kwargs:
if isinstance(kwargs['splay'], dict):
# Ensure ordering of start and end arguments
schedule[name]['splay'] = salt.utils.odict.OrderedDict()
schedule[name]['splay']['start'] = kwargs['splay']['start']
schedule[name]['splay']['end'] = kwargs['splay']['end']
else:
schedule[name]['splay'] = kwargs['splay']
for item in ['range', 'when', 'returner']:
if item in kwargs:
schedule[name][item] = kwargs[item]
return schedule[name]
def add(name, **kwargs):
'''
Add a job to the schedule
@ -187,30 +262,19 @@ def add(name, **kwargs):
ret['comment'] = 'Job name is required.'
ret['result'] = False
schedule = {}
schedule[name] = {'function': kwargs['function']}
time_conflict = False
for item in ['seconds', 'minutes', 'hours', 'days']:
if item in kwargs and 'when' in kwargs:
time_conflict = True
if time_conflict:
return 'Error: Unable to use "seconds", "minutes", "hours", or "days" with "when" option.'
ret['result'] = False
ret['comment'] = 'Error: Unable to use "seconds", "minutes", "hours", or "days" with "when" option.'
for item in ['seconds', 'minutes', 'hours', 'days']:
if item in kwargs:
schedule[name][item] = kwargs[item]
_new = build_schedule_item(name, **kwargs)
if 'job_args' in kwargs:
schedule[name]['args'] = kwargs['job_args']
if 'job_kwargs' in kwargs:
schedule[name]['kwargs'] = kwargs['job_kwargs']
for item in ['splay', 'range', 'when', 'returner', 'jid_include']:
if item in kwargs:
schedule[name][item] = kwargs[item]
schedule = {}
schedule[name] = _new
out = __salt__['event.fire']({'name': name, 'schedule': schedule, 'func': 'add'}, 'manage_schedule')
if out:
@ -232,9 +296,19 @@ def modify(name, **kwargs):
salt '*' schedule.modify job1 function='test.ping' seconds=3600
'''
ret = {'comment': [],
ret = {'comment': '',
'changes': {},
'result': True}
time_conflict = False
for item in ['seconds', 'minutes', 'hours', 'days']:
if item in kwargs and 'when' in kwargs:
time_conflict = True
if time_conflict:
ret['result'] = False
ret['comment'] = 'Error: Unable to use "seconds", "minutes", "hours", or "days" with "when" option.'
current_schedule = __opts__['schedule'].copy()
if 'schedule' in __pillar__:
current_schedule.update(__pillar__['schedule'])
@ -244,39 +318,33 @@ def modify(name, **kwargs):
ret['result'] = False
return ret
schedule = {'function': kwargs['function']}
_current = current_schedule[name]
if '_seconds' in _current:
_current['seconds'] = _current['_seconds']
del _current['_seconds']
time_conflict = False
for item in ['seconds', 'minutes', 'hours', 'days']:
if item in kwargs and 'when' in kwargs:
time_conflict = True
_new = build_schedule_item(name, **kwargs)
if _new == _current:
ret['comment'] = 'Job {0} in correct state'.format(name)
return ret
if time_conflict:
return 'Error: Unable to use "seconds", "minutes", "hours", or "days" with "when" option.'
_current_lines = ['%s:%s\n' % (key, value)
for (key, value) in sorted(_current.items())]
_new_lines = ['%s:%s\n' % (key, value)
for (key, value) in sorted(_new.items())]
_diff = difflib.unified_diff(_current_lines, _new_lines)
for item in ['seconds', 'minutes', 'hours', 'days']:
if item in kwargs:
schedule[item] = kwargs[item]
if 'job_args' in kwargs:
schedule['args'] = kwargs['job_args']
if 'job_kwargs' in kwargs:
schedule['kwargs'] = kwargs['job_kwargs']
for item in ['splay', 'range', 'when', 'returner', 'jid_include']:
if item in kwargs:
schedule[item] = kwargs[item]
ret['changes']['diff'] = ''.join(_diff)
if name in __opts__['schedule']:
out = __salt__['event.fire']({'name': name, 'schedule': schedule, 'func': 'modify'}, 'manage_schedule')
out = __salt__['event.fire']({'name': name, 'schedule': _new, 'func': 'modify'}, 'manage_schedule')
if out:
ret['comment'] = 'Modified job: {0} in schedule.'.format(name)
else:
ret['comment'] = 'Failed to modify job {0} in schedule.'.format(name)
ret['result'] = False
elif 'schedule' in __pillar__ and name in __pillar__['schedule']:
out = __salt__['event.fire']({'name': name, 'schedule': schedule, 'where': 'pillar', 'func': 'modify'}, 'manage_schedule')
out = __salt__['event.fire']({'name': name, 'schedule': _new, 'where': 'pillar', 'func': 'modify'}, 'manage_schedule')
if out:
ret['comment'] = 'Modified job: {0} in schedule.'.format(name)
else:
@ -424,26 +492,7 @@ def save():
ret = {'comment': [],
'result': True}
schedule = __opts__['schedule']
for job in schedule.keys():
if job == 'enabled':
continue
if job.startswith('_'):
del schedule[job]
continue
for item in schedule[job].keys():
if item not in SCHEDULE_CONF:
del schedule[job][item]
continue
if schedule[job][item] == 'true':
schedule[job][item] = True
if schedule[job][item] == 'false':
schedule[job][item] = False
if '_seconds' in schedule[job].keys():
schedule[job]['seconds'] = schedule[job]['_seconds']
del schedule[job]['_seconds']
schedule = list_(return_yaml=False)
# move this file into an configurable opt
sfn = '{0}/{1}/schedule.conf'.format(__opts__['config_dir'], os.path.dirname(__opts__['default_include']))

View File

@ -39,8 +39,10 @@ def __virtual__():
# Import python libs
import os
import re
import logging
import sys
import traceback
import copy
import urllib2
# Import salt libs
@ -67,12 +69,18 @@ _URL_VERSIONS = {
2: u'http://downloads.buildout.org/2/bootstrap.py',
}
DEFAULT_VER = 2
_logger = logging.getLogger(__name__)
def _salt_callback(func):
def _salt_callback(func, **kwargs):
LOG.clear()
def _call_callback(*a, **kw):
# cleanup the module kwargs before calling it from the
# decorator
kw = copy.deepcopy(kw)
for k in [ar for ar in kw if '__pub' in ar]:
kw.pop(k, None)
st = BASE_STATUS.copy()
directory = kw.get('directory', '.')
onlyif = kw.get('onlyif', None)
@ -252,7 +260,9 @@ def _Popen(command,
directory='.',
runas=None,
env=(),
exitcode=0):
exitcode=0,
use_vt=False,
loglevel=None):
'''
Run a command.
@ -272,13 +282,20 @@ def _Popen(command,
fails if cmd does not return this exit code
(set to None to disable check)
use_vt
Use the new salt VT to stream output [experimental]
'''
ret = None
directory = os.path.abspath(directory)
if isinstance(command, list):
command = ' '.join(command)
LOG.debug(u'Running {0}'.format(command))
ret = __salt__['cmd.run_all'](command, cwd=directory, runas=runas, env=env)
if not loglevel:
loglevel = 'debug'
ret = __salt__['cmd.run_all'](
command, cwd=directory, output_loglevel=loglevel,
runas=runas, env=env, use_vt=use_vt)
out = ret['stdout'] + '\n\n' + ret['stderr']
if (exitcode is not None) and (ret['retcode'] != exitcode):
raise _BuildoutError(out)
@ -543,7 +560,9 @@ def bootstrap(directory='.',
buildout_ver=None,
test_release=False,
offline=False,
new_st=None):
new_st=None,
use_vt=False,
loglevel=None):
'''
Run the buildout bootstrap dance (python bootstrap.py).
@ -583,6 +602,9 @@ def bootstrap(directory='.',
unless
Do not execute cmd if statement on the host return 0
use_vt
Use the new salt VT to stream output [experimental]
CLI Example:
.. code-block:: bash
@ -721,8 +743,20 @@ def bootstrap(directory='.',
bootstrap_args += ' --accept-buildout-test-releases'
if config and '"-c"' in content:
bootstrap_args += ' -c {0}'.format(config)
# be sure that the bootstrap belongs to the running user
try:
if runas:
uid = __salt__['user.info'](runas)['uid']
gid = __salt__['user.info'](runas)['gid']
os.chown('bootstrap.py', uid, gid)
except (IOError, OSError) as exc:
# dont block here, try to execute it if can pass
_logger.error('BUILDOUT bootstrap permissions error:'
' {0}'.format(exc),
exc_info=_logger.isEnabledFor(logging.DEBUG))
cmd = '{0} bootstrap.py {1}'.format(python, bootstrap_args)
ret = _Popen(cmd, directory=directory, runas=runas, env=env)
ret = _Popen(cmd, directory=directory, runas=runas, loglevel=loglevel,
env=env, use_vt=use_vt)
output = ret['output']
return {'comment': cmd, 'out': output}
@ -738,7 +772,9 @@ def run_buildout(directory='.',
runas=None,
env=(),
verbose=False,
debug=False):
debug=False,
use_vt=False,
loglevel=None):
'''
Run a buildout in a directory.
@ -772,6 +808,9 @@ def run_buildout(directory='.',
verbose
run buildout in verbose mode (-vvvvv)
use_vt
Use the new salt VT to stream output [experimental]
CLI Example:
.. code-block:: bash
@ -809,7 +848,9 @@ def run_buildout(directory='.',
cmd, directory=directory,
runas=runas,
env=env,
output=True)
output=True,
loglevel=loglevel,
use_vt=use_vt)
)
else:
LOG.info(u'Installing all buildout parts')
@ -817,7 +858,9 @@ def run_buildout(directory='.',
bcmd, config, ' '.join(argv))
cmds.append(cmd)
outputs.append(
_Popen(cmd, directory=directory, runas=runas, env=env, output=True)
_Popen(
cmd, directory=directory, runas=runas, loglevel=loglevel,
env=env, output=True, use_vt=use_vt)
)
return {'comment': '\n'.join(cmds),
@ -888,7 +931,9 @@ def buildout(directory='.',
debug=False,
verbose=False,
onlyif=None,
unless=None):
unless=None,
use_vt=False,
loglevel=None):
'''
Run buildout in a directory.
@ -939,6 +984,8 @@ def buildout(directory='.',
verbose
run buildout in verbose mode (-vvvvv)
use_vt
Use the new salt VT to stream output [experimental]
CLI Example:
@ -956,7 +1003,9 @@ def buildout(directory='.',
env=env,
runas=runas,
distribute=distribute,
python=python)
python=python,
use_vt=use_vt,
loglevel=loglevel)
buildout_ret = run_buildout(directory=directory,
config=config,
parts=parts,
@ -965,7 +1014,9 @@ def buildout(directory='.',
runas=runas,
env=env,
verbose=verbose,
debug=debug)
debug=debug,
use_vt=use_vt,
loglevel=loglevel)
# signal the decorator or our return
return _merge_statuses([boot_ret, buildout_ret])

View File

@ -34,7 +34,7 @@ class NetapiClient(object):
if 'client' not in low:
raise SaltException('No client specified')
if 'token' not in low or 'eauth' not in low:
if not ('token' in low or 'eauth' in low):
raise EauthAuthenticationError(
'No authentication credentials given')

View File

@ -37,6 +37,9 @@ def start():
mod_opts = __opts__.get(__virtualname__, {})
if mod_opts.get('websockets', False):
from . import saltnado_websockets
if 'num_processes' not in mod_opts:
mod_opts['num_processes'] = 1
@ -46,7 +49,7 @@ def start():
formatted_events_pattern = r"/formatted_events/{0}".format(token_pattern)
logger.debug("All events URL pattern is {0}".format(all_events_pattern))
application = tornado.web.Application([
paths = [
(r"/", saltnado.SaltAPIHandler),
(r"/login", saltnado.SaltAuthHandler),
(r"/minions/(.*)", saltnado.MinionSaltAPIHandler),
@ -56,14 +59,21 @@ def start():
(r"/run", saltnado.RunSaltAPIHandler),
(r"/events", saltnado.EventsSaltAPIHandler),
(r"/hook(/.*)?", saltnado.WebhookSaltAPIHandler),
# Matches /all_events/[0-9A-Fa-f]{n}
# Where n is the length of hexdigest
# for the current hashing algorithm.
# This algorithm is specified in the
# salt master config file.
(all_events_pattern, saltnado.AllEventsHandler),
(formatted_events_pattern, saltnado.FormattedEventsHandler),
], debug=mod_opts.get('debug', False))
]
# if you have enabled websockets, add them!
if mod_opts.get('websockets', False):
paths += [
# Matches /all_events/[0-9A-Fa-f]{n}
# Where n is the length of hexdigest
# for the current hashing algorithm.
# This algorithm is specified in the
# salt master config file.
(all_events_pattern, saltnado_websockets.AllEventsHandler),
(formatted_events_pattern, saltnado_websockets.FormattedEventsHandler),
]
application = tornado.web.Application(paths, debug=mod_opts.get('debug', False))
application.opts = __opts__
application.mod_opts = mod_opts

View File

@ -1,6 +1,7 @@
# encoding: utf-8
import json
import logging
import threading
import salt.netapi
@ -201,7 +202,6 @@ class SaltInfo(object):
'''
Process events and publish data
'''
import threading
logger.debug('In process {0}'.format(threading.current_thread()))
logger.debug(salt_data['tag'])
logger.debug(salt_data)

View File

@ -7,271 +7,7 @@ A REST API for Salt
:depends: - tornado Python module
All Events
----------
Exposes ``all`` "real-time" events from Salt's event bus on a websocket connection.
It should be noted that "Real-time" here means these events are made available
to the server as soon as any salt related action (changes to minions, new jobs etc) happens.
Clients are however assumed to be able to tolerate any network transport related latencies.
Functionality provided by this endpoint is similar to the ``/events`` end point.
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure. Uses websocket as the transport mechanism.
Exposes GET method to return websocket connections.
All requests should include an auth token.
A way to obtain obtain authentication tokens is shown below.
.. code-block:: bash
% curl -si localhost:8000/login \\
-H "Accept: application/json" \\
-d username='salt' \\
-d password='salt' \\
-d eauth='pam'
Which results in the response
.. code-block:: json
{
"return": [{
"perms": [".*", "@runner", "@wheel"],
"start": 1400556492.277421,
"token": "d0ce6c1a37e99dcc0374392f272fe19c0090cca7",
"expire": 1400599692.277422,
"user": "salt",
"eauth": "pam"
}]
}
In this example the ``token`` returned is ``d0ce6c1a37e99dcc0374392f272fe19c0090cca7`` and can be included
in subsequent websocket requests (as part of the URL).
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
// Get the Websocket connection to Salt
var source = new Websocket('wss://localhost:8000/all_events/d0ce6c1a37e99dcc0374392f272fe19c0090cca7');
// Get Salt's "real time" event stream.
source.onopen = function() { source.send('websocket client ready'); };
// Other handlers
source.onerror = function(e) { console.debug('error!', e); };
// e.data represents Salt's "real time" event data as serialized JSON.
source.onmessage = function(e) { console.debug(e.data); };
// Terminates websocket connection and Salt's "real time" event stream on the server.
source.close();
Or via Python, using the Python module
`websocket-client <https://pypi.python.org/pypi/websocket-client/>`_ for example.
Or the tornado
`client <http://tornado.readthedocs.org/en/latest/websocket.html#client-side-support>`_.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
# Get the Websocket connection to Salt
ws = create_connection('wss://localhost:8000/all_events/d0ce6c1a37e99dcc0374392f272fe19c0090cca7')
# Get Salt's "real time" event stream.
ws.send('websocket client ready')
# Simple listener to print results of Salt's "real time" event stream.
# Look at https://pypi.python.org/pypi/websocket-client/ for more examples.
while listening_to_events:
print ws.recv() # Salt's "real time" event data as serialized JSON.
# Terminates websocket connection and Salt's "real time" event stream on the server.
ws.close()
# Please refer to https://github.com/liris/websocket-client/issues/81 when using a self signed cert
Above examples show how to establish a websocket connection to Salt and activating
real time updates from Salt's event stream by signaling ``websocket client ready``.
Formatted Events
-----------------
Exposes ``formatted`` "real-time" events from Salt's event bus on a websocket connection.
It should be noted that "Real-time" here means these events are made available
to the server as soon as any salt related action (changes to minions, new jobs etc) happens.
Clients are however assumed to be able to tolerate any network transport related latencies.
Functionality provided by this endpoint is similar to the ``/events`` end point.
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure. Uses websocket as the transport mechanism.
Formatted events parses the raw "real time" event stream and maintains
a current view of the following:
- minions
- jobs
A change to the minions (such as addition, removal of keys or connection drops)
or jobs is processed and clients are updated.
Since we use salt's presence events to track minions,
please enable ``presence_events``
and set a small value for the ``loop_interval``
in the salt master config file.
Exposes GET method to return websocket connections.
All requests should include an auth token.
A way to obtain obtain authentication tokens is shown below.
.. code-block:: bash
% curl -si localhost:8000/login \\
-H "Accept: application/json" \\
-d username='salt' \\
-d password='salt' \\
-d eauth='pam'
Which results in the response
.. code-block:: json
{
"return": [{
"perms": [".*", "@runner", "@wheel"],
"start": 1400556492.277421,
"token": "d0ce6c1a37e99dcc0374392f272fe19c0090cca7",
"expire": 1400599692.277422,
"user": "salt",
"eauth": "pam"
}]
}
In this example the ``token`` returned is ``d0ce6c1a37e99dcc0374392f272fe19c0090cca7`` and can be included
in subsequent websocket requests (as part of the URL).
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
// Get the Websocket connection to Salt
var source = new Websocket('wss://localhost:8000/formatted_events/d0ce6c1a37e99dcc0374392f272fe19c0090cca7');
// Get Salt's "real time" event stream.
source.onopen = function() { source.send('websocket client ready'); };
// Other handlers
source.onerror = function(e) { console.debug('error!', e); };
// e.data represents Salt's "real time" event data as serialized JSON.
source.onmessage = function(e) { console.debug(e.data); };
// Terminates websocket connection and Salt's "real time" event stream on the server.
source.close();
Or via Python, using the Python module
`websocket-client <https://pypi.python.org/pypi/websocket-client/>`_ for example.
Or the tornado
`client <http://tornado.readthedocs.org/en/latest/websocket.html#client-side-support>`_.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
# Get the Websocket connection to Salt
ws = create_connection('wss://localhost:8000/formatted_events/d0ce6c1a37e99dcc0374392f272fe19c0090cca7')
# Get Salt's "real time" event stream.
ws.send('websocket client ready')
# Simple listener to print results of Salt's "real time" event stream.
# Look at https://pypi.python.org/pypi/websocket-client/ for more examples.
while listening_to_events:
print ws.recv() # Salt's "real time" event data as serialized JSON.
# Terminates websocket connection and Salt's "real time" event stream on the server.
ws.close()
# Please refer to https://github.com/liris/websocket-client/issues/81 when using a self signed cert
Above examples show how to establish a websocket connection to Salt and activating
real time updates from Salt's event stream by signaling ``websocket client ready``.
Example responses
-----------------
``Minion information`` is a dictionary keyed by each connected minion's ``id`` (``mid``),
grains information for each minion is also included.
Minion information is sent in response to the following minion events:
- connection drops
- requires running ``manage.present`` periodically every ``loop_interval`` seconds
- minion addition
- minon removal
.. code-block:: python
# Not all grains are shown
data: {
"minions": {
"minion1": {
"id": "minion1",
"grains": {
"kernel": "Darwin",
"domain": "local",
"zmqversion": "4.0.3",
"kernelrelease": "13.2.0"
}
}
}
}
``Job information`` is also tracked and delivered.
Job information is also a dictionary
in which each job's information is keyed by salt's ``jid``.
.. code-block:: python
data: {
"jobs": {
"20140609153646699137": {
"tgt_type": "glob",
"jid": "20140609153646699137",
"tgt": "*",
"start_time": "2014-06-09T15:36:46.700315",
"state": "complete",
"fun": "test.ping",
"minions": {
"minion1": {
"return": true,
"retcode": 0,
"success": true
}
}
}
}
}
Setup
=====
In order to run rest_tornado with the salt-master
add the following to your salt master config file.
@ -320,15 +56,13 @@ import tornado.httpserver
import tornado.ioloop
import tornado.web
import tornado.gen
import tornado.websocket
from tornado.concurrent import Future
from . import event_processor
from collections import defaultdict
import math
import functools
import json
import yaml
import zmq
import fnmatch
@ -343,6 +77,8 @@ import salt.runner
import salt.auth
from salt import syspaths
json = salt.utils.import_json()
logger = logging.getLogger()
# The clients rest_cherrypi supports. We want to mimic the interface, but not
@ -358,7 +94,9 @@ logger = logging.getLogger()
class SaltClientsMixIn(object):
'''
MixIn class to container all of the salt clients that the API needs
'''
@property
def saltclients(self):
if not hasattr(self, '__saltclients'):
@ -398,13 +136,19 @@ class Any(Future):
class EventListener(object):
'''
Class responsible for listening to the salt master event bus and updating
futures. This is the core of what makes this async, this allows us to do
non-blocking work in the main processes and "wait" for an event to happen
'''
def __init__(self, mod_opts, opts):
self.mod_opts = mod_opts
self.opts = opts
self.event = salt.utils.event.get_event(
'master',
opts['sock_dir'],
opts['transport'])
'master',
opts['sock_dir'],
opts['transport'])
# tag -> list of futures
self.tag_map = defaultdict(list)
@ -430,9 +174,11 @@ class EventListener(object):
if len(self.tag_map[tag]) == 0:
del self.tag_map[tag]
def get_event(self, request,
tag='',
callback=None):
def get_event(self,
request,
tag='',
callback=None,
):
'''
Get an event (async of course) return a future that will get it later
'''
@ -680,8 +426,8 @@ class SaltAuthHandler(BaseSaltAPIHandler):
perms = self.application.opts['external_auth'][token['eauth']][token['name']]
except (AttributeError, IndexError):
logging.debug("Configuration for external_auth malformed for "
"eauth '{0}', and user '{1}'."
.format(token.get('eauth'), token.get('name')), exc_info=True)
"eauth '{0}', and user '{1}'."
.format(token.get('eauth'), token.get('name')), exc_info=True)
# TODO better error -- 'Configuration for external_auth could not be read.'
self.send_error(500)
@ -771,9 +517,9 @@ class SaltAPIHandler(BaseSaltAPIHandler, SaltClientsMixIn):
# ping all the minions (to see who we have to talk to)
# TODO: actually ping them all? this just gets the pub data
minions = self.saltclients['local'](chunk['tgt'],
'test.ping',
[],
expr_form=f_call['kwargs']['expr_form'])['minions']
'test.ping',
[],
expr_form=f_call['kwargs']['expr_form'])['minions']
chunk_ret = {}
maxflight = get_batch_size(f_call['kwargs']['batch'], len(minions))
@ -992,108 +738,6 @@ class EventsSaltAPIHandler(SaltAPIHandler):
self.finish()
class AllEventsHandler(tornado.websocket.WebSocketHandler):
'''
Server side websocket handler.
'''
def open(self, token):
'''
Return a websocket connection to Salt
representing Salt's "real time" event stream.
'''
logger.debug('In the websocket open method')
self.token = token
# close the connection, if not authenticated
if not self.application.auth.get_tok(token):
logger.debug('Refusing websocket connection, bad token!')
self.close()
return
self.connected = False
@tornado.gen.coroutine
def on_message(self, message):
"""Listens for a "websocket client ready" message.
Once that message is received an asynchronous job
is stated that yeilds messages to the client.
These messages make up salt's
"real time" event stream.
"""
logger.debug('Got websocket message {0}'.format(message))
if message == 'websocket client ready':
if self.connected:
# TBD: Add ability to run commands in this branch
logger.debug('Websocket already connected, returning')
return
self.connected = True
while True:
try:
event = yield self.application.event_listener.get_event(self)
self.write_message(u'data: {0}\n\n'.format(json.dumps(event)))
except Exception as err:
logger.info('Error! Ending server side websocket connection. Reason = {0}'.format(str(err)))
break
self.close()
else:
# TBD: Add logic to run salt commands here
pass
def on_close(self, *args, **kwargs):
'''Cleanup.
'''
logger.debug('In the websocket close method')
self.close()
class FormattedEventsHandler(AllEventsHandler):
@tornado.gen.coroutine
def on_message(self, message):
"""Listens for a "websocket client ready" message.
Once that message is received an asynchronous job
is stated that yeilds messages to the client.
These messages make up salt's
"real time" event stream.
"""
logger.debug('Got websocket message {0}'.format(message))
if message == 'websocket client ready':
if self.connected:
# TBD: Add ability to run commands in this branch
logger.debug('Websocket already connected, returning')
return
self.connected = True
evt_processor = event_processor.SaltInfo(self)
client = salt.netapi.NetapiClient(self.application.opts)
client.run({
'fun': 'grains.items',
'tgt': '*',
'token': self.token,
'mode': 'client',
'async': 'local_async',
'client': 'local'
})
while True:
try:
event = yield self.application.event_listener.get_event(self)
evt_processor.process(event, self.token, self.application.opts)
# self.write_message(u'data: {0}\n\n'.format(json.dumps(event)))
except Exception as err:
logger.debug('Error! Ending server side websocket connection. Reason = {0}'.format(str(err)))
break
self.close()
else:
# TBD: Add logic to run salt commands here
pass
class WebhookSaltAPIHandler(SaltAPIHandler):
'''
Handler for /run requests
@ -1110,9 +754,9 @@ class WebhookSaltAPIHandler(SaltAPIHandler):
# TODO: consolidate??
self.event = salt.utils.event.get_event(
'master',
self.application.opts['sock_dir'],
self.application.opts['transport'])
'master',
self.application.opts['sock_dir'],
self.application.opts['transport'])
ret = self.event.fire_event({
'post': self.raw_data,

View File

@ -0,0 +1,406 @@
# encoding: utf-8
'''
A Websockets add-on to saltnado
===================
.. py:currentmodule:: salt.netapi.rest_tornado.saltnado
:depends: - tornado Python module
In order to enable saltnado_webosockets you must add websockets: True to your
saltnado config block.
.. code-block:: yaml
rest_tornado:
# can be any port
port: 8000
ssl_crt: /etc/pki/api/certs/server.crt
# no need to specify ssl_key if cert and key
# are in one single file
ssl_key: /etc/pki/api/certs/server.key
debug: False
disable_ssl: False
websockets: True
All Events
----------
Exposes ``all`` "real-time" events from Salt's event bus on a websocket connection.
It should be noted that "Real-time" here means these events are made available
to the server as soon as any salt related action (changes to minions, new jobs etc) happens.
Clients are however assumed to be able to tolerate any network transport related latencies.
Functionality provided by this endpoint is similar to the ``/events`` end point.
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure. Uses websocket as the transport mechanism.
Exposes GET method to return websocket connections.
All requests should include an auth token.
A way to obtain obtain authentication tokens is shown below.
.. code-block:: bash
% curl -si localhost:8000/login \\
-H "Accept: application/json" \\
-d username='salt' \\
-d password='salt' \\
-d eauth='pam'
Which results in the response
.. code-block:: json
{
"return": [{
"perms": [".*", "@runner", "@wheel"],
"start": 1400556492.277421,
"token": "d0ce6c1a37e99dcc0374392f272fe19c0090cca7",
"expire": 1400599692.277422,
"user": "salt",
"eauth": "pam"
}]
}
In this example the ``token`` returned is ``d0ce6c1a37e99dcc0374392f272fe19c0090cca7`` and can be included
in subsequent websocket requests (as part of the URL).
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
// Get the Websocket connection to Salt
var source = new Websocket('wss://localhost:8000/all_events/d0ce6c1a37e99dcc0374392f272fe19c0090cca7');
// Get Salt's "real time" event stream.
source.onopen = function() { source.send('websocket client ready'); };
// Other handlers
source.onerror = function(e) { console.debug('error!', e); };
// e.data represents Salt's "real time" event data as serialized JSON.
source.onmessage = function(e) { console.debug(e.data); };
// Terminates websocket connection and Salt's "real time" event stream on the server.
source.close();
Or via Python, using the Python module
`websocket-client <https://pypi.python.org/pypi/websocket-client/>`_ for example.
Or the tornado
`client <http://tornado.readthedocs.org/en/latest/websocket.html#client-side-support>`_.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
# Get the Websocket connection to Salt
ws = create_connection('wss://localhost:8000/all_events/d0ce6c1a37e99dcc0374392f272fe19c0090cca7')
# Get Salt's "real time" event stream.
ws.send('websocket client ready')
# Simple listener to print results of Salt's "real time" event stream.
# Look at https://pypi.python.org/pypi/websocket-client/ for more examples.
while listening_to_events:
print ws.recv() # Salt's "real time" event data as serialized JSON.
# Terminates websocket connection and Salt's "real time" event stream on the server.
ws.close()
# Please refer to https://github.com/liris/websocket-client/issues/81 when using a self signed cert
Above examples show how to establish a websocket connection to Salt and activating
real time updates from Salt's event stream by signaling ``websocket client ready``.
Formatted Events
-----------------
Exposes ``formatted`` "real-time" events from Salt's event bus on a websocket connection.
It should be noted that "Real-time" here means these events are made available
to the server as soon as any salt related action (changes to minions, new jobs etc) happens.
Clients are however assumed to be able to tolerate any network transport related latencies.
Functionality provided by this endpoint is similar to the ``/events`` end point.
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure. Uses websocket as the transport mechanism.
Formatted events parses the raw "real time" event stream and maintains
a current view of the following:
- minions
- jobs
A change to the minions (such as addition, removal of keys or connection drops)
or jobs is processed and clients are updated.
Since we use salt's presence events to track minions,
please enable ``presence_events``
and set a small value for the ``loop_interval``
in the salt master config file.
Exposes GET method to return websocket connections.
All requests should include an auth token.
A way to obtain obtain authentication tokens is shown below.
.. code-block:: bash
% curl -si localhost:8000/login \\
-H "Accept: application/json" \\
-d username='salt' \\
-d password='salt' \\
-d eauth='pam'
Which results in the response
.. code-block:: json
{
"return": [{
"perms": [".*", "@runner", "@wheel"],
"start": 1400556492.277421,
"token": "d0ce6c1a37e99dcc0374392f272fe19c0090cca7",
"expire": 1400599692.277422,
"user": "salt",
"eauth": "pam"
}]
}
In this example the ``token`` returned is ``d0ce6c1a37e99dcc0374392f272fe19c0090cca7`` and can be included
in subsequent websocket requests (as part of the URL).
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
// Get the Websocket connection to Salt
var source = new Websocket('wss://localhost:8000/formatted_events/d0ce6c1a37e99dcc0374392f272fe19c0090cca7');
// Get Salt's "real time" event stream.
source.onopen = function() { source.send('websocket client ready'); };
// Other handlers
source.onerror = function(e) { console.debug('error!', e); };
// e.data represents Salt's "real time" event data as serialized JSON.
source.onmessage = function(e) { console.debug(e.data); };
// Terminates websocket connection and Salt's "real time" event stream on the server.
source.close();
Or via Python, using the Python module
`websocket-client <https://pypi.python.org/pypi/websocket-client/>`_ for example.
Or the tornado
`client <http://tornado.readthedocs.org/en/latest/websocket.html#client-side-support>`_.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
# Get the Websocket connection to Salt
ws = create_connection('wss://localhost:8000/formatted_events/d0ce6c1a37e99dcc0374392f272fe19c0090cca7')
# Get Salt's "real time" event stream.
ws.send('websocket client ready')
# Simple listener to print results of Salt's "real time" event stream.
# Look at https://pypi.python.org/pypi/websocket-client/ for more examples.
while listening_to_events:
print ws.recv() # Salt's "real time" event data as serialized JSON.
# Terminates websocket connection and Salt's "real time" event stream on the server.
ws.close()
# Please refer to https://github.com/liris/websocket-client/issues/81 when using a self signed cert
Above examples show how to establish a websocket connection to Salt and activating
real time updates from Salt's event stream by signaling ``websocket client ready``.
Example responses
-----------------
``Minion information`` is a dictionary keyed by each connected minion's ``id`` (``mid``),
grains information for each minion is also included.
Minion information is sent in response to the following minion events:
- connection drops
- requires running ``manage.present`` periodically every ``loop_interval`` seconds
- minion addition
- minon removal
.. code-block:: python
# Not all grains are shown
data: {
"minions": {
"minion1": {
"id": "minion1",
"grains": {
"kernel": "Darwin",
"domain": "local",
"zmqversion": "4.0.3",
"kernelrelease": "13.2.0"
}
}
}
}
``Job information`` is also tracked and delivered.
Job information is also a dictionary
in which each job's information is keyed by salt's ``jid``.
.. code-block:: python
data: {
"jobs": {
"20140609153646699137": {
"tgt_type": "glob",
"jid": "20140609153646699137",
"tgt": "*",
"start_time": "2014-06-09T15:36:46.700315",
"state": "complete",
"fun": "test.ping",
"minions": {
"minion1": {
"return": true,
"retcode": 0,
"success": true
}
}
}
}
}
Setup
=====
'''
import tornado.websocket
from . import event_processor
import tornado.gen
import salt.utils
import salt.netapi
json = salt.utils.import_json()
import logging
logger = logging.getLogger()
class AllEventsHandler(tornado.websocket.WebSocketHandler): # pylint: disable=W0232
'''
Server side websocket handler.
'''
def open(self, token):
'''
Return a websocket connection to Salt
representing Salt's "real time" event stream.
'''
logger.debug('In the websocket open method')
self.token = token
# close the connection, if not authenticated
if not self.application.auth.get_tok(token):
logger.debug('Refusing websocket connection, bad token!')
self.close()
return
self.connected = False
@tornado.gen.coroutine
def on_message(self, message):
"""Listens for a "websocket client ready" message.
Once that message is received an asynchronous job
is stated that yeilds messages to the client.
These messages make up salt's
"real time" event stream.
"""
logger.debug('Got websocket message {0}'.format(message))
if message == 'websocket client ready':
if self.connected:
# TBD: Add ability to run commands in this branch
logger.debug('Websocket already connected, returning')
return
self.connected = True
while True:
try:
event = yield self.application.event_listener.get_event(self)
self.write_message(u'data: {0}\n\n'.format(json.dumps(event)))
except Exception as err:
logger.info('Error! Ending server side websocket connection. Reason = {0}'.format(str(err)))
break
self.close()
else:
# TBD: Add logic to run salt commands here
pass
def on_close(self, *args, **kwargs):
'''Cleanup.
'''
logger.debug('In the websocket close method')
self.close()
class FormattedEventsHandler(AllEventsHandler): # pylint: disable=W0232
@tornado.gen.coroutine
def on_message(self, message):
"""Listens for a "websocket client ready" message.
Once that message is received an asynchronous job
is stated that yeilds messages to the client.
These messages make up salt's
"real time" event stream.
"""
logger.debug('Got websocket message {0}'.format(message))
if message == 'websocket client ready':
if self.connected:
# TBD: Add ability to run commands in this branch
logger.debug('Websocket already connected, returning')
return
self.connected = True
evt_processor = event_processor.SaltInfo(self)
client = salt.netapi.NetapiClient(self.application.opts)
client.run({
'fun': 'grains.items',
'tgt': '*',
'token': self.token,
'mode': 'client',
'async': 'local_async',
'client': 'local'
})
while True:
try:
event = yield self.application.event_listener.get_event(self)
evt_processor.process(event, self.token, self.application.opts)
# self.write_message(u'data: {0}\n\n'.format(json.dumps(event)))
except Exception as err:
logger.debug('Error! Ending server side websocket connection. Reason = {0}'.format(str(err)))
break
self.close()
else:
# TBD: Add logic to run salt commands here
pass

View File

@ -102,9 +102,9 @@ class GitPillar(object):
self.working_dir = ''
self.repo = None
needle = '{0} {1}'.format(self.branch, self.rp_location)
for idx, opts_dict in enumerate(self.opts['ext_pillar']):
if opts_dict.get('git', '').startswith(needle):
lopts = opts_dict.get('git', '').split()
if len(lopts) >= 2 and lopts[:2] == [self.branch, self.rp_location]:
rp_ = os.path.join(self.opts['cachedir'],
'pillar_gitfs', str(idx))

View File

@ -55,14 +55,8 @@ def __virtual__():
return False
# try to load some faster json libraries. In order of fastest to slowest
for fast_json in ('ujson', 'yajl'):
try:
mod = __import__(fast_json)
couchbase.set_json_converters(mod.dumps, mod.loads)
log.info('loaded {0} json lib'.format(fast_json))
break
except ImportError:
continue
json = salt.utils.import_json()
couchbase.set_json_converters(json.dumps, json.loads)
return __virtualname__

View File

@ -158,7 +158,7 @@ executed when the state it is watching changes. Example:
- require:
- file: /usr/local/bin/postinstall.sh
How do I create a environment from a pillar map?
How do I create an environment from a pillar map?
-------------------------------------------------------------------------------
The map that comes from a pillar cannot be directly consumed by the env option.

View File

@ -12,6 +12,9 @@ type dynamic updates. Requires dnspython module.
ddns.present:
- zone: example.com
- ttl: 60
- data: 111.222.333.444
- nameserver: 123.234.345.456
- keyfile: /srv/salt/tsig_key.txt
'''
@ -39,7 +42,7 @@ def present(name, zone, ttl, data, rdtype='A', **kwargs):
DNS resource type. Default 'A'.
``**kwargs``
Additional arguments the ddns.update function may need (e.g. keyfile).
Additional arguments the ddns.update function may need (e.g. nameserver, keyfile, keyname).
'''
ret = {'name': name,
'changes': {},
@ -90,7 +93,7 @@ def absent(name, zone, data=None, rdtype=None, **kwargs):
DNS resource type. If omitted, all types will be purged.
``**kwargs``
Additional arguments the ddns.update function may need (e.g. keyfile).
Additional arguments the ddns.delete function may need (e.g. nameserver, keyfile, keyname).
'''
ret = {'name': name,
'changes': {},
@ -110,7 +113,10 @@ def absent(name, zone, data=None, rdtype=None, **kwargs):
elif status:
ret['result'] = True
ret['comment'] = 'Deleted DNS record(s)'
ret['changes'] = True
ret['changes'] = {'Deleted': {'name': name,
'zone': zone
}
}
else:
ret['result'] = False
ret['comment'] = 'Failed to delete DNS record(s)'

View File

@ -556,7 +556,7 @@ def script(*args, **kw):
def running(name, container=None, port_bindings=None, binds=None,
publish_all_ports=False, links=None, lxc_conf=None,
privileged=False, dns=None, volumes_from=None,
check_is_running=True):
network_mode=None, check_is_running=True):
'''
Ensure that a container is running. (`docker inspect`)
@ -623,6 +623,16 @@ def running(name, container=None, port_bindings=None, binds=None,
- dns:
- name_other_container
network_mode
- 'bridge': creates a new network stack for the container on the docker bridge
- 'none': no networking for this container
- 'container:[name|id]': reuses another container network stack)
- 'host': use the host network stack inside the container
.. code-block:: yaml
- network_mode: host
check_is_running
Enable checking if a container should run or not.
Useful for data-only containers that must be linked to another one.
@ -639,7 +649,7 @@ def running(name, container=None, port_bindings=None, binds=None,
container, binds=binds, port_bindings=port_bindings,
lxc_conf=lxc_conf, publish_all_ports=publish_all_ports,
links=links, privileged=privileged,
dns=dns, volumes_from=volumes_from,
dns=dns, volumes_from=volumes_from, network_mode=network_mode,
)
if check_is_running:
is_running = __salt__['docker.is_running'](container)
@ -651,9 +661,14 @@ def running(name, container=None, port_bindings=None, binds=None,
changes={name: True})
else:
return _invalid(
comment=('Container {0!r}'
' cannot be started\n{0!s}').format(container,
started['out']))
comment=(
'Container {0!r} cannot be started\n{0!s}'
.format(
container,
started['out'],
)
)
)
else:
return _valid(
comment='Container {0!r} started.\n'.format(container),

View File

@ -6,11 +6,12 @@ Operations on regular files, special files, directories, and symlinks
Salt States can aggressively manipulate files on a system. There are a number
of ways in which files can be managed.
Regular files can be enforced with the ``managed`` function. This function
downloads files from the salt master and places them on the target system.
The downloaded files can be rendered as a jinja, mako, or wempy template,
adding a dynamic component to file management. An example of ``file.managed``
which makes use of the jinja templating system would look like this:
Regular files can be enforced with the :mod:`file.managed
<salt.states.file.managed>` state. This state downloads files from the salt
master and places them on the target system. Managed files can be rendered as a
jinja, mako, or wempy template, adding a dynamic component to file management.
An example of :mod:`file.managed <salt.states.file.managed>` which makes use of
the jinja templating system would look like this:
.. code-block:: yaml
@ -29,6 +30,17 @@ which makes use of the jinja templating system would look like this:
custom_var: "override"
{% endif %}
It is also possible to use the :mod:`py renderer <salt.renderers.py>` as a
templating option. The template would be a python script which would need to
contain a function called ``run()``, which returns a string. The returned
string will be the contents of the managed file. For example:
.. code-block:: python
def run():
lines = ('foo', 'bar', 'baz')
return '\n\n'.join(lines)
.. note::
When using both the ``defaults`` and ``context`` arguments, note the extra

View File

@ -484,7 +484,7 @@ def config(name,
cwd=repo,
user=user)
except CommandExecutionError:
oval = 'None'
oval = None
if value == oval:
ret['comment'] = 'No changes made'
@ -504,6 +504,9 @@ def config(name,
cwd=repo,
user=user)
if oval is None:
oval = 'None'
ret['changes'][name] = '{0} => {1}'.format(oval, nval)
return ret

169
salt/states/schedule.py Normal file
View File

@ -0,0 +1,169 @@
# -*- coding: utf-8 -*-
'''
Management of the Salt scheduler
==============================================
.. code-block:: yaml
job3:
schedule.present:
- function: test.ping
- seconds: 3600
- splay: 10
This will schedule the command: test.ping every 3600 seconds
(every hour) splaying the time between 0 and 10 seconds
job2:
schedule.present:
- function: test.ping
- seconds: 15
- splay:
- start: 10
- end: 20
This will schedule the command: test.ping every 3600 seconds
(every hour) splaying the time between 10 and 20 seconds
job1:
schedule.present:
- function: state.sls
- args:
- httpd
- kwargs:
test: True
- when:
- Monday 5:00pm
- Tuesday 3:00pm
- Wednesday 5:00pm
- Thursday 3:00pm
- Friday 5:00pm
This will schedule the command: state.sls httpd test=True at 5pm on Monday,
Wednesday and Friday, and 3pm on Tuesday and Thursday.
'''
import logging
log = logging.getLogger(__name__)
def present(name,
**kwargs):
'''
Ensure a job is present in the schedule
name
The unique name that is given to the scheduled job.
seconds
The scheduled job will be executed after the specified
number of seconds have passed.
minutes
The scheduled job will be executed after the specified
number of minutes have passed.
hours
The scheduled job will be executed after the specified
number of hours have passed.
days
The scheduled job will be executed after the specified
number of days have passed.
when
This will schedule the job at the specified time(s).
The when parameter must be a single value or a dictionary
with the date string(s) using the dateutil format.
function
The function that should be executed by the scheduled job.
job_args
The arguments that will be used by the scheduled job.
job_kwargs
The keyword arguments that will be used by the scheduled job.
maxrunning
Ensure that there are no more than N copies of a particular job running.
jid_include
Include the job into the job cache.
splay
The amount of time in seconds to splay a scheduled job.
Can be specified as a single value in seconds or as a dictionary
range with 'start' and 'end' values.
range
This will schedule the command within the range specified.
The range parameter must be a dictionary with the date strings
using the dateutil format.
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': []}
current_schedule = __salt__['schedule.list'](show_all=True, return_yaml=False)
if name in current_schedule:
new_item = __salt__['schedule.build_schedule_item'](name, **kwargs)
if new_item == current_schedule[name]:
ret['comment'].append('Job {0} in correct state'.format(name))
else:
result = __salt__['schedule.modify'](name, **kwargs)
if not result['result']:
ret['result'] = result['result']
ret['comment'].append(result['comment'])
return ret
else:
ret['comment'].append('Modifying job {0} in schedule'.format(name))
ret['changes'] = result['changes']
else:
result = __salt__['schedule.add'](name, **kwargs)
if not result['result']:
ret['result'] = result['result']
ret['comment'].append(result['comment'])
return ret
else:
ret['comment'].append('Adding new job {0} to schedule'.format(name))
ret['comment'] = '\n'.join(ret['comment'])
return ret
def absent(name, **kwargs):
'''
Ensure a job is absent from the schedule
name
The unique name that is given to the scheduled job.
'''
### NOTE: The keyword arguments in **kwargs are ignored in this state, but
### cannot be removed from the function definition, otherwise the use
### of unsupported arguments will result in a traceback.
ret = {'name': name,
'result': True,
'changes': {},
'comment': []}
current_schedule = __salt__['schedule.list'](show_all=True, return_yaml=False)
if name in current_schedule:
result = __salt__['schedule.delete'](name)
if not result['result']:
ret['result'] = result['result']
ret['comment'].append(result['comment'])
else:
ret['comment'].append('Removed job {0} from schedule'.format(name))
else:
ret['comment'].append('Job {0} not present in schedule'.format(name))
ret['comment'] = '\n'.join(ret['comment'])
return ret

View File

@ -133,7 +133,9 @@ def installed(name,
debug=False,
verbose=False,
unless=None,
onlyif=None):
onlyif=None,
use_vt=False,
loglevel='debug'):
'''
Install buildout in a specific directory
@ -198,6 +200,12 @@ def installed(name,
verbose
run buildout in verbose mode (-vvvvv)
use_vt
Use the new salt VT to stream output [experimental]
loglevel
loglevel for buildout commands
'''
ret = {}
@ -248,6 +256,7 @@ def installed(name,
verbose=verbose,
onlyif=onlyif,
unless=unless,
use_vt=use_vt
)
ret.update(_ret_status(func(**kwargs), name, quiet=quiet))
return ret

View File

@ -2333,3 +2333,16 @@ def total_seconds(td):
method which does not exist in versions of Python < 2.7.
'''
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6
def import_json():
'''
Import a json module, starting with the quick ones and going down the list)
'''
for fast_json in ('ujson', 'yajl', 'json'):
try:
mod = __import__(fast_json)
log.info('loaded {0} json lib'.format(fast_json))
return mod
except ImportError:
continue

View File

@ -77,8 +77,8 @@ localtime.
- Thursday 3:00pm
- Friday 5:00pm
This will schedule the command: state.sls httpd test=True at 5pm on Monday, Wednesday
and Friday, and 3pm on Tuesday and Thursday.
This will schedule the command: state.sls httpd test=True at 5pm on Monday,
Wednesday and Friday, and 3pm on Tuesday and Thursday.
schedule:
job1:
@ -92,7 +92,6 @@ and Friday, and 3pm on Tuesday and Thursday.
start: 8:00am
end: 5:00pm
w
This will schedule the command: state.sls httpd test=True every 3600 seconds
(every hour) between the hours of 8am and 5pm. The range parameter must be a
dictionary with the date strings using the dateutil format.

View File

@ -49,7 +49,7 @@ BOOTSTRAP_SCRIPT_DISTRIBUTED_VERSION = os.environ.get(
'BOOTSTRAP_SCRIPT_VERSION',
# If no bootstrap-script version was provided from the environment, let's
# provide the one we define.
'v2014.06.19'
'v2014.06.21'
)
# Store a reference to the executing platform
@ -407,19 +407,13 @@ class Install(install):
self.salt_transport
)
)
if self.salt_transport == 'none':
elif self.salt_transport == 'none':
for requirement in _parse_requirements_file(SALT_ZEROMQ_REQS):
if requirement not in self.distribution.install_requires:
continue
self.distribution.install_requires.remove(requirement)
return
if self.salt_transport in ('zeromq', 'both'):
self.distribution.install_requires.extend(
_parse_requirements_file(SALT_ZEROMQ_REQS)
)
if self.salt_transport in ('raet', 'both'):
elif self.salt_transport in ('raet', 'both'):
self.distribution.install_requires.extend(
_parse_requirements_file(SALT_RAET_REQS)
)
@ -550,7 +544,9 @@ SETUP_KWARGS = {'name': NAME,
],
# Required for esky builds, ZeroMQ or RAET deps will be added
# at install time
'install_requires': _parse_requirements_file(SALT_REQS),
'install_requires':
_parse_requirements_file(SALT_REQS) +
_parse_requirements_file(SALT_ZEROMQ_REQS),
'extras_require': {
'RAET': _parse_requirements_file(SALT_RAET_REQS),
'Cloud': _parse_requirements_file(SALT_CLOUD_REQS)
@ -570,6 +566,7 @@ if IS_WINDOWS_PLATFORM is False:
'doc/man/salt-master.1',
'doc/man/salt-key.1',
'doc/man/salt.1',
'doc/man/salt-api.1',
'doc/man/salt-syndic.1',
'doc/man/salt-run.1',
'doc/man/salt-ssh.1',

View File

@ -6,15 +6,14 @@
# Import Python Libs
import os
# Import Salt Libs
import integration
from salt.config import cloud_providers_config
# Import Salt Testing Libs
from salttesting import skipIf
from salttesting.helpers import ensure_in_syspath, expensiveTest
ensure_in_syspath('../../../')
# Import Salt Libs
import integration
from salt.config import cloud_providers_config
# Import Third-Party Libs
try:
@ -38,6 +37,7 @@ class DigitalOceanTest(integration.ShellCase):
Integration tests for the Digital Ocean cloud provider in Salt-Cloud
'''
@expensiveTest
def setUp(self):
'''
Sets up the test requirements
@ -70,7 +70,6 @@ class DigitalOceanTest(integration.ShellCase):
.format(provider)
)
@expensiveTest
def test_instance(self):
'''
Test creating an instance on Digital Ocean

View File

@ -6,16 +6,16 @@
# Import Python Libs
import os
# Import Salt Libs
import integration
from salt.config import cloud_providers_config
# Import Salt Testing Libs
from salttesting import skipIf
from salttesting.helpers import ensure_in_syspath, expensiveTest
ensure_in_syspath('../../../')
# Import Salt Libs
import integration
from salt.config import cloud_providers_config
# Import Third-Party Libs
try:
import libcloud # pylint: disable=W0611
@ -31,6 +31,7 @@ class GoGridTest(integration.ShellCase):
Integration tests for the GoGrid cloud provider in Salt-Cloud
'''
@expensiveTest
def setUp(self):
'''
Sets up the test requirements
@ -63,7 +64,6 @@ class GoGridTest(integration.ShellCase):
.format(provider)
)
@expensiveTest
def test_instance(self):
'''
Test creating an instance on GoGrid

View File

@ -6,16 +6,16 @@
# Import Python Libs
import os
# Import Salt Libs
import integration
from salt.config import cloud_providers_config
# Import Salt Testing Libs
from salttesting import skipIf
from salttesting.helpers import ensure_in_syspath, expensiveTest
ensure_in_syspath('../../../')
# Import Salt Libs
import integration
from salt.config import cloud_providers_config
# Import Third-Party Libs
try:
import libcloud # pylint: disable=W0611
@ -30,6 +30,7 @@ class LinodeTest(integration.ShellCase):
Integration tests for the Linode cloud provider in Salt-Cloud
'''
@expensiveTest
def setUp(self):
'''
Sets up the test requirements
@ -63,7 +64,6 @@ class LinodeTest(integration.ShellCase):
)
)
@expensiveTest
def test_instance(self):
'''
Test creating an instance on Linode

View File

@ -6,16 +6,16 @@
# Import Python Libs
import os
# Import Salt Libs
import integration
from salt.config import cloud_providers_config
# Import Salt Testing Libs
from salttesting import skipIf
from salttesting.helpers import ensure_in_syspath, expensiveTest
ensure_in_syspath('../../../')
# Import Salt Libs
import integration
from salt.config import cloud_providers_config
# Import Third-Party Libs
try:
import libcloud # pylint: disable=W0611
@ -30,6 +30,7 @@ class RackspaceTest(integration.ShellCase):
Integration tests for the Rackspace cloud provider using the Openstack driver
'''
@expensiveTest
def setUp(self):
'''
Sets up the test requirements
@ -63,7 +64,6 @@ class RackspaceTest(integration.ShellCase):
.format(provider)
)
@expensiveTest
def test_instance(self):
'''
Test creating an instance on rackspace with the openstack driver

View File

@ -0,0 +1,135 @@
# -*- coding: utf-8 -*-
'''
integration.loader.globals
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test Salt's loader regarding globals that it should pack in
'''
# Import Salt Testing libs
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../')
# Import salt libs
import integration
import salt.loader
import inspect
import yaml
class LoaderGlobalsTest(integration.ModuleCase):
'''
Test all of the globals that the loader is responsible for adding to modules
This shouldn't be done here, but should rather be done per module type (in the cases where they are used)
so they can check ALL globals that they have (or should have) access to.
This is intended as a shorter term way of testing these so we don't break the loader
'''
def _verify_globals(self, mod_dict):
'''
Verify that the globals listed in the doc string (from the test) are in these modules
'''
# find the globals
global_vars = None
for val in mod_dict.itervalues():
if hasattr(val, '__globals__'):
global_vars = val.__globals__
break
# if we couldn't find any, then we have no modules-- so something is broken
if global_vars is None:
# TODO: log or something? Skip however we do that
return
# get the names of the globals you should have
func_name = inspect.stack()[1][3]
names = yaml.load(getattr(self, func_name).__doc__).values()[0]
for name in names:
assert name in global_vars
def test_auth(self):
'''
Test that auth mods have:
- __pillar__
- __grains__
- __salt__
'''
self._verify_globals(salt.loader.auth(self.master_opts))
def test_runners(self):
'''
Test that runners have:
- __pillar__
- __salt__
- __opts__
- __grains__
'''
self._verify_globals(salt.loader.runner(self.master_opts))
def test_returners(self):
'''
Test that returners have:
- __salt__
- __opts__
- __pillar__
- __grains__
'''
self._verify_globals(salt.loader.returners(self.master_opts, {}))
def test_pillars(self):
'''
Test that pillars have:
- __salt__
- __opts__
- __pillar__
- __grains__
'''
self._verify_globals(salt.loader.pillars(self.master_opts, {}))
def test_tops(self):
'''
Test that tops have: []
'''
self._verify_globals(salt.loader.tops(self.master_opts))
def test_outputters(self):
'''
Test that outputters have:
- __opts__
- __pillar__
- __grains__
'''
self._verify_globals(salt.loader.outputters(self.master_opts))
def test_states(self):
'''
Test that states:
- __pillar__
- __salt__
- __opts__
- __grains__
'''
self._verify_globals(salt.loader.states(self.master_opts, {}))
def test_log_handlers(self):
'''
Test that log_handlers have:
- __path__
'''
self._verify_globals(salt.loader.log_handlers(self.master_opts))
def test_renderers(self):
'''
Test that renderers have:
- __salt__ # Execution functions (i.e. __salt__['test.echo']('foo'))
- __grains__ # Grains (i.e. __grains__['os'])
- __pillar__ # Pillar data (i.e. __pillar__['foo'])
- __opts__ # Minion configuration options
'''
self._verify_globals(salt.loader.render(self.master_opts, {}))
if __name__ == '__main__':
from integration import run_tests
run_tests(LoaderGlobalsTest, needs_daemon=False)

View File

@ -66,6 +66,10 @@ def parse():
dest='root_dir',
default=None,
help='Override the minion root_dir config')
parser.add_option('--transport',
dest='transport',
default='zeromq',
help='Declare which transport to use, default is zeromq')
parser.add_option(
'-c', '--config-dir', default='/etc/salt',
help=('Pass in an alternative configuration directory. Default: '
@ -99,7 +103,8 @@ class Swarm(object):
self.swarm_root = tempfile.mkdtemp(prefix='mswarm-root', suffix='.d',
dir=tmpdir)
self.pki = self._pki_dir()
if self.opts['transport'] == 'zeromq':
self.pki = self._pki_dir()
self.__zfill = len(str(self.opts['minions']))
self.confs = set()
@ -133,22 +138,25 @@ class Swarm(object):
dpath = os.path.join(self.swarm_root, minion_id)
os.makedirs(dpath)
minion_pkidir = os.path.join(dpath, 'pki')
os.makedirs(minion_pkidir)
minion_pem = os.path.join(self.pki, 'minion.pem')
minion_pub = os.path.join(self.pki, 'minion.pub')
shutil.copy(minion_pem, minion_pkidir)
shutil.copy(minion_pub, minion_pkidir)
data = {
'id': minion_id,
'user': self.opts['user'],
'pki_dir': minion_pkidir,
'cachedir': os.path.join(dpath, 'cache'),
'master': self.opts['master'],
'log_file': os.path.join(dpath, 'minion.log')
}
if self.opts['transport'] == 'zeromq':
minion_pkidir = os.path.join(dpath, 'pki')
os.makedirs(minion_pkidir)
minion_pem = os.path.join(self.pki, 'minion.pem')
minion_pub = os.path.join(self.pki, 'minion.pub')
shutil.copy(minion_pem, minion_pkidir)
shutil.copy(minion_pub, minion_pkidir)
data['pki_dir'] = minion_pkidir
elif self.opts['transport'] == 'raet':
data['transport'] = 'raet'
if self.opts['root_dir']:
data['root_dir'] = self.opts['root_dir']

View File

@ -474,6 +474,10 @@ class ConfigTestCase(TestCase, integration.AdaptedConfigurationTestCaseMixIn):
sconfig.get_id(cache=False), (MOCK_HOSTNAME, False)
)
# <---- Salt Cloud Configuration Tests ---------------------------------------------
# cloud_config tests
def test_cloud_config_vm_profiles_config(self):
'''
Tests passing in vm_config and profiles_config.
@ -524,7 +528,8 @@ class ConfigTestCase(TestCase, integration.AdaptedConfigurationTestCaseMixIn):
profiles_config_path='foo', profiles_config='bar')
@patch('salt.config.load_config', MagicMock(return_value={}))
@patch('salt.config.apply_cloud_config', MagicMock(return_value={'providers': 'foo'}))
@patch('salt.config.apply_cloud_config',
MagicMock(return_value={'providers': 'foo'}))
def test_cloud_config_providers_in_opts(self):
'''
Tests mixing old cloud providers with pre-configured providers configurations
@ -534,7 +539,8 @@ class ConfigTestCase(TestCase, integration.AdaptedConfigurationTestCaseMixIn):
providers_config='bar')
@patch('salt.config.load_config', MagicMock(return_value={}))
@patch('salt.config.apply_cloud_config', MagicMock(return_value={'providers': 'foo'}))
@patch('salt.config.apply_cloud_config',
MagicMock(return_value={'providers': 'foo'}))
@patch('os.path.isfile', MagicMock(return_value=True))
def test_cloud_config_providers_in_opts_path(self):
'''
@ -544,88 +550,65 @@ class ConfigTestCase(TestCase, integration.AdaptedConfigurationTestCaseMixIn):
self.assertRaises(SaltCloudConfigError, sconfig.cloud_config, PATH,
providers_config_path='bar')
def test_load_cloud_config_from_environ_var(self):
original_environ = os.environ.copy()
# apply_vm_profiles_config tests
tempdir = tempfile.mkdtemp(dir=integration.SYS_TMP_DIR)
try:
env_root_dir = os.path.join(tempdir, 'foo', 'env')
os.makedirs(env_root_dir)
env_fpath = os.path.join(env_root_dir, 'config-env')
salt.utils.fopen(env_fpath, 'w').write(
'root_dir: {0}\n'
'log_file: {1}\n'.format(env_root_dir, env_fpath)
)
os.environ['SALT_CLOUD_CONFIG'] = env_fpath
# Should load from env variable, not the default configuration file
config = sconfig.cloud_config('/etc/salt/cloud')
self.assertEqual(config['log_file'], env_fpath)
os.environ.clear()
os.environ.update(original_environ)
root_dir = os.path.join(tempdir, 'foo', 'bar')
os.makedirs(root_dir)
fpath = os.path.join(root_dir, 'config')
salt.utils.fopen(fpath, 'w').write(
'root_dir: {0}\n'
'log_file: {1}\n'.format(root_dir, fpath)
)
# Let's set the environment variable, yet, since the configuration
# file path is not the default one, ie, the user has passed an
# alternative configuration file form the CLI parser, the
# environment variable will be ignored.
os.environ['SALT_CLOUD_CONFIG'] = env_fpath
config = sconfig.cloud_config(fpath)
self.assertEqual(config['log_file'], fpath)
finally:
# Reset the environ
os.environ.clear()
os.environ.update(original_environ)
if os.path.isdir(tempdir):
shutil.rmtree(tempdir)
def test_deploy_search_path_as_string(self):
temp_conf_dir = os.path.join(integration.TMP, 'issue-8863')
config_file_path = os.path.join(temp_conf_dir, 'cloud')
deploy_dir_path = os.path.join(temp_conf_dir, 'test-deploy.d')
try:
for directory in (temp_conf_dir, deploy_dir_path):
if not os.path.isdir(directory):
os.makedirs(directory)
default_config = sconfig.cloud_config(config_file_path)
default_config['deploy_scripts_search_path'] = deploy_dir_path
with salt.utils.fopen(config_file_path, 'w') as cfd:
cfd.write(yaml.dump(default_config))
default_config = sconfig.cloud_config(config_file_path)
# Our custom deploy scripts path was correctly added to the list
self.assertIn(
deploy_dir_path,
default_config['deploy_scripts_search_path']
)
# And it's even the first occurrence as it should
self.assertEqual(
deploy_dir_path,
default_config['deploy_scripts_search_path'][0]
)
finally:
if os.path.isdir(temp_conf_dir):
shutil.rmtree(temp_conf_dir)
def test_includes_load(self):
def test_apply_vm_profiles_config_bad_profile_format(self):
'''
Tests that cloud.{providers,profiles}.d directories are loaded, even if not
directly passed in through path
Tests passing in a bad profile format in overrides
'''
config = sconfig.cloud_config(self.get_config_file_path('cloud'))
self.assertIn('ec2-config', config['providers'])
self.assertIn('Ubuntu-13.04-AMD64', config['profiles'])
overrides = {'foo': 'bar', 'conf_file': PATH}
self.assertRaises(SaltCloudConfigError, sconfig.apply_vm_profiles_config,
PATH, overrides, defaults=DEFAULT)
def test_apply_vm_profiles_config_success(self):
'''
Tests passing in valid provider and profile config files successfully
'''
providers = {'test-provider':
{'digital_ocean':
{'provider': 'digital_ocean', 'profiles': {}}}}
overrides = {'test-profile':
{'provider': 'test-provider',
'image': 'Ubuntu 12.10 x64',
'size': '512MB'},
'conf_file': PATH}
ret = {'test-profile':
{'profile': 'test-profile',
'provider': 'test-provider:digital_ocean',
'image': 'Ubuntu 12.10 x64',
'size': '512MB'}}
self.assertEqual(sconfig.apply_vm_profiles_config(providers,
overrides,
defaults=DEFAULT), ret)
def test_apply_vm_profiles_config_extend_success(self):
'''
Tests profile extends functionality with valid provider and profile configs
'''
providers = {'test-config': {'ec2': {'profiles': {}, 'provider': 'ec2'}}}
overrides = {'Amazon': {'image': 'test-image-1',
'extends': 'dev-instances'},
'Fedora': {'image': 'test-image-2',
'extends': 'dev-instances'},
'conf_file': PATH,
'dev-instances': {'ssh_username': 'test_user',
'provider': 'test-config'}}
ret = {'Amazon': {'profile': 'Amazon',
'ssh_username': 'test_user',
'image': 'test-image-1',
'provider': 'test-config:ec2'},
'Fedora': {'profile': 'Fedora',
'ssh_username': 'test_user',
'image': 'test-image-2',
'provider': 'test-config:ec2'},
'dev-instances': {'profile': 'dev-instances',
'ssh_username': 'test_user',
'provider': 'test-config:ec2'}}
self.assertEqual(sconfig.apply_vm_profiles_config(providers,
overrides,
defaults=DEFAULT), ret)
# apply_cloud_providers_config tests
def test_apply_cloud_providers_config_same_providers(self):
'''
@ -686,7 +669,10 @@ class ConfigTestCase(TestCase, integration.AdaptedConfigurationTestCaseMixIn):
'provider': 'ec2',
'id': 'ABCDEFGHIJKLMNOP',
'user': 'user@mycorp.com'}}}
self.assertEqual(ret, sconfig.apply_cloud_providers_config(overrides, defaults=DEFAULT))
self.assertEqual(ret,
sconfig.apply_cloud_providers_config(
overrides,
defaults=DEFAULT))
def test_apply_cloud_providers_config_extend_multiple(self):
'''
@ -821,6 +807,168 @@ class ConfigTestCase(TestCase, integration.AdaptedConfigurationTestCaseMixIn):
overrides,
DEFAULT)
# is_provider_configured tests
def test_is_provider_configured_no_alias(self):
'''
Tests when provider alias is not in opts
'''
opts = {'providers': 'test'}
provider = 'foo:bar'
self.assertFalse(sconfig.is_provider_configured(opts, provider))
def test_is_provider_configured_no_driver(self):
'''
Tests when provider driver is not in opts
'''
opts = {'providers': {'foo': 'baz'}}
provider = 'foo:bar'
self.assertFalse(sconfig.is_provider_configured(opts, provider))
def test_is_provider_configured_key_is_none(self):
'''
Tests when a required configuration key is not set
'''
opts = {'providers': {'foo': {'bar': {'api_key': None}}}}
provider = 'foo:bar'
self.assertFalse(
sconfig.is_provider_configured(opts,
provider,
required_keys=('api_key',)))
def test_is_provider_configured_success(self):
'''
Tests successful cloud provider configuration
'''
opts = {'providers': {'foo': {'bar': {'api_key': 'baz'}}}}
provider = 'foo:bar'
ret = {'api_key': 'baz'}
self.assertEqual(
sconfig.is_provider_configured(opts,
provider,
required_keys=('api_key',)), ret)
def test_is_provider_configured_multiple_driver_not_provider(self):
'''
Tests when the drive is not the same as the provider when
searching through multiple providers
'''
opts = {'providers': {'foo': {'bar': {'api_key': 'baz'}}}}
provider = 'foo'
self.assertFalse(sconfig.is_provider_configured(opts, provider))
def test_is_provider_configured_multiple_key_is_none(self):
'''
Tests when a required configuration key is not set when
searching through multiple providers
'''
opts = {'providers': {'foo': {'bar': {'api_key': None}}}}
provider = 'bar'
self.assertFalse(
sconfig.is_provider_configured(opts,
provider,
required_keys=('api_key',)))
def test_is_provider_configured_multiple_success(self):
'''
Tests successful cloud provider configuration when searching
through multiple providers
'''
opts = {'providers': {'foo': {'bar': {'api_key': 'baz'}}}}
provider = 'bar'
ret = {'api_key': 'baz'}
self.assertEqual(
sconfig.is_provider_configured(opts,
provider,
required_keys=('api_key',)), ret)
# other cloud configuration tests
def test_load_cloud_config_from_environ_var(self):
original_environ = os.environ.copy()
tempdir = tempfile.mkdtemp(dir=integration.SYS_TMP_DIR)
try:
env_root_dir = os.path.join(tempdir, 'foo', 'env')
os.makedirs(env_root_dir)
env_fpath = os.path.join(env_root_dir, 'config-env')
salt.utils.fopen(env_fpath, 'w').write(
'root_dir: {0}\n'
'log_file: {1}\n'.format(env_root_dir, env_fpath)
)
os.environ['SALT_CLOUD_CONFIG'] = env_fpath
# Should load from env variable, not the default configuration file
config = sconfig.cloud_config('/etc/salt/cloud')
self.assertEqual(config['log_file'], env_fpath)
os.environ.clear()
os.environ.update(original_environ)
root_dir = os.path.join(tempdir, 'foo', 'bar')
os.makedirs(root_dir)
fpath = os.path.join(root_dir, 'config')
salt.utils.fopen(fpath, 'w').write(
'root_dir: {0}\n'
'log_file: {1}\n'.format(root_dir, fpath)
)
# Let's set the environment variable, yet, since the configuration
# file path is not the default one, ie, the user has passed an
# alternative configuration file form the CLI parser, the
# environment variable will be ignored.
os.environ['SALT_CLOUD_CONFIG'] = env_fpath
config = sconfig.cloud_config(fpath)
self.assertEqual(config['log_file'], fpath)
finally:
# Reset the environ
os.environ.clear()
os.environ.update(original_environ)
if os.path.isdir(tempdir):
shutil.rmtree(tempdir)
def test_deploy_search_path_as_string(self):
temp_conf_dir = os.path.join(integration.TMP, 'issue-8863')
config_file_path = os.path.join(temp_conf_dir, 'cloud')
deploy_dir_path = os.path.join(temp_conf_dir, 'test-deploy.d')
try:
for directory in (temp_conf_dir, deploy_dir_path):
if not os.path.isdir(directory):
os.makedirs(directory)
default_config = sconfig.cloud_config(config_file_path)
default_config['deploy_scripts_search_path'] = deploy_dir_path
with salt.utils.fopen(config_file_path, 'w') as cfd:
cfd.write(yaml.dump(default_config))
default_config = sconfig.cloud_config(config_file_path)
# Our custom deploy scripts path was correctly added to the list
self.assertIn(
deploy_dir_path,
default_config['deploy_scripts_search_path']
)
# And it's even the first occurrence as it should
self.assertEqual(
deploy_dir_path,
default_config['deploy_scripts_search_path'][0]
)
finally:
if os.path.isdir(temp_conf_dir):
shutil.rmtree(temp_conf_dir)
def test_includes_load(self):
'''
Tests that cloud.{providers,profiles}.d directories are loaded, even if not
directly passed in through path
'''
config = sconfig.cloud_config(self.get_config_file_path('cloud'))
self.assertIn('ec2-config', config['providers'])
self.assertIn('Ubuntu-13.04-AMD64', config['profiles'])
# <---- Salt Cloud Configuration Tests ---------------------------------------------
if __name__ == '__main__':
from integration import run_tests
run_tests(ConfigTestCase, needs_daemon=False)