Merge pull request #20230 from rallytime/merge_forward_develop

Merge 2015.2 into develop
This commit is contained in:
Thomas S Hatch 2015-01-30 10:33:59 -07:00
commit 267d16505f
54 changed files with 2108 additions and 299 deletions

3
.gitignore vendored
View File

@ -70,3 +70,6 @@ tests/integration/files/conf/grains
# ignore the local root
/root/**
# Ignore file cache created by jinja tests
tests/unit/templates/roots

View File

@ -27,10 +27,10 @@ persistent=yes
# List of plugins (as comma separated values of python modules names) to load,
# usually to register additional checkers.
load-plugins=salttesting.pylintplugins.pep8,
salttesting.pylintplugins.pep263,
salttesting.pylintplugins.strings,
salttesting.pylintplugins.fileperms
load-plugins=saltpylint.pep8,
saltpylint.pep263,
saltpylint.strings,
saltpylint.fileperms
# Fileperms Lint Plugin Settings

View File

@ -14,10 +14,10 @@ init-hook="
# Pickle collected data for later comparisons.
persistent=no
load-plugins=salttesting.pylintplugins.pep8,
salttesting.pylintplugins.pep263,
salttesting.pylintplugins.strings,
salttesting.pylintplugins.fileperms
load-plugins=saltpylint.pep8,
saltpylint.pep263,
saltpylint.strings,
saltpylint.fileperms
# Fileperms Lint Plugin Settings
fileperms-default=0644

View File

@ -151,7 +151,6 @@
# auth_tries: 10
# auth_safemode: False
# ping_interval: 90
# restart_on_error: True
#
# Minions won't know master is missing until a ping fails. After the ping fail,
# the minion will attempt authentication and likely fails out and cause a restart.

View File

@ -0,0 +1,13 @@
.. _all-salt.beacons:
===================================
Full list of builtin beacon modules
===================================
.. currentmodule:: salt.beacons
.. autosummary::
:toctree:
:template: autosummary.rst.tmpl
inotify

View File

@ -0,0 +1,6 @@
====================
salt.beacons.inotify
====================
.. automodule:: salt.beacons.inotify
:members:

View File

@ -163,7 +163,7 @@ at the ``debug`` level:
.. code-block:: yaml
log_granular_levels:
'salt': 'warning',
'salt': 'warning'
'salt.modules': 'debug'
@ -172,4 +172,4 @@ External Logging Handlers
Besides the internal logging handlers used by salt, there are some external
which can be used, see the :doc:`external logging handlers<handlers/index>`
document.
document.

View File

@ -29,4 +29,5 @@ Reference
modules/index
tops/index
tops/all/index
wheel/all/index
wheel/all/index
beacons/all/index

View File

@ -0,0 +1,42 @@
.. _beacons:
=======
Beacons
=======
The beacon system allows the minion to hook into system processes and
continually translate external events into the salt event bus. The
primary example of this is the :py:mod:`~salt.beacons.inotify` beacon. This
beacon uses inotify to watch configured files or directories on the minion for
changes, creation, deletion etc.
This allows for the changes to be sent up to the master where the
reactor can respond to changes.
Configuring The Beacons
=======================
The beacon system, like many others in Salt, can be configured via the
minion pillar, grains, or local config file:
.. code-block:: yaml
beacons:
inotify:
/etc/httpd/conf.d: {}
/opt: {}
Writing Beacon Plugins
======================
Beacon plugins use the standard Salt loader system, meaning that many of the
constructs from other plugin systems holds true, such as the ``__virtual__``
function.
The important function in the Beacon Plugin is the ``beacon`` function. When
the beacon is configured to run, this function will be executed repeatedly
by the minion. The ``beacon`` function therefore cannot block and should be
as lightweight as possible. The ``beacon`` also must return a list of dicts,
each dict in the list will be translated into an event on the master.
Please see the :py:mod:`~salt.beacons.inotify` beacon as an example.

View File

@ -28,7 +28,7 @@ minion exe>` should match the contents of the corresponding md5 file.
* Salt-Minion-2014.7.0-1-win32-Setup.exe | md5
* Salt-Minion-2014.7.0-AMD64-Setup.exe | md5
.. note::
The 2014.7.0 exe's have been removed because of a regression. Please use the 2014.7.1 release instead.
The 2014.7.0 installers have been removed because of a regression. Please use the 2014.7.1 release instead.
* 2014.1.13
* `Salt-Minion-2014.1.13-x86-Setup.exe <http://docs.saltstack.com/downloads/Salt-Minion-2014.1.13-x86-Setup.exe>`__ | `md5 <http://docs.saltstack.com/downloads/Salt-Minion-2014.1.13-x86-Setup.exe.md5>`__
@ -380,4 +380,4 @@ this, salt-minion can't report some installed packages.
.. _pywin32: http://sourceforge.net/projects/pywin32/files/pywin32
.. _Cython: http://www.lfd.uci.edu/~gohlke/pythonlibs/#cython
.. _jinja2: http://www.lfd.uci.edu/~gohlke/pythonlibs/#jinja2
.. _msgpack: http://www.lfd.uci.edu/~gohlke/pythonlibs/#msgpack
.. _msgpack: http://www.lfd.uci.edu/~gohlke/pythonlibs/#msgpack

View File

@ -2,6 +2,18 @@
Salt 2014.12.0 Release Notes - Codename Lithium
===============================================
Beacons
=======
The beacon system allows the minion to hook into system processes and
continually translate external events into the salt event bus. The
primary example of this is the :py:mod:`~salt.beacons.inotify` beacon. This
beacon uses inotify to watch configured files or directories on the minion for
changes, creation, deletion etc.
This allows for the changes to be sent up to the master where the
reactor can respond to changes.
Salt SSH
========

View File

@ -275,6 +275,35 @@ as required. However, each must individually be turned on.
The return from these will be found in the return dict as ``status``,
``headers`` and ``text``, respectively.
Writing Return Data to Files
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
It is possible to write either the return data or headers to files, as soon as
the response is received from the server, but specifying file locations via the
``text_out`` or ``headers_out`` arguments. ``text`` and ``headers`` do not need
to be returned to the user in order to do this.
.. code-block:: python
salt.utils.http.query(
'http://example.com',
text=False,
headers=False,
text_out='/path/to/url_download.txt',
headers_out='/path/to/headers_download.txt',
)
SSL Verification
~~~~~~~~~~~~~~~~
By default, this function will verify SSL certificates. However, for testing or
debugging purposes, SSL verification can be turned off.
.. code-block:: python
salt.utils.http.query(
'https://example.com',
ssl_verify=False,
)
CA Bundles
~~~~~~~~~~
The ``requests`` library has its own method of detecting which CA (certficate
@ -292,19 +321,58 @@ using the ``ca_bundle`` variable.
ca_bundle='/path/to/ca_bundle.pem',
)
SSL Verification
~~~~~~~~~~~~~~~~
Updating CA Bundles
+++++++++++++++++++
The ``update_ca_bundle()`` function can be used to update the bundle file at a
specified location. If the target location is not specified, then it will
attempt to auto-detect the location of the bundle file. If the URL to download
the bundle from does not exist, a bundle will be downloaded from the cURL
website.
By default, this function will verify SSL certificates. However, for testing or
debugging purposes, SSL verification can be turned off.
CAUTION: The ``target`` and the ``source`` should always be specified! Failure
to specify the ``target`` may result in the file being written to the wrong
location on the local system. Failure to specify the ``source`` may cause the
upstream URL to receive excess unnecessary traffic, and may cause a file to be
download which is hazardous or does not meet the needs of the user.
.. code-block:: python
salt.utils.http.query(
'https://example.com',
ssl_verify=False,
salt.utils.http.update_ca_bundle(
target='/path/to/ca-bundle.crt',
source='https://example.com/path/to/ca-bundle.crt',
opts=__opts__,
)
The ``opts`` parameter should also always be specified. If it is, then the
``target`` and the ``source`` may be specified in the relevant configuration
file (master or minion) as ``ca_bundle`` and ``ca_bundle_url``, respectively.
.. code-block:: yaml
ca_bundle: /path/to/ca-bundle.crt
ca_bundle_url: https://example.com/path/to/ca-bundle.crt
If Salt is unable to auto-detect the location of the CA bundle, it will raise
an error.
The ``update_ca_bundle()`` function can also be passed a string or a list of
strings which represent files on the local system, which should be appended (in
the specified order) to the end of the CA bundle file. This is useful in
environments where private certs need to be made available, and are not
otherwise reasonable to add to the bundle file.
.. code-block:: python
salt.utils.http.update_ca_bundle(
opts=__opts__,
merge_files=[
'/etc/ssl/private_cert_1.pem',
'/etc/ssl/private_cert_2.pem',
'/etc/ssl/private_cert_3.pem',
]
)
Test Mode
~~~~~~~~~

40
salt/beacons/__init__.py Normal file
View File

@ -0,0 +1,40 @@
# -*- coding: utf-8 -*-
'''
This package contains the loader modules for the salt streams system
'''
# Import salt libs
import salt.loader
class Beacon(object):
'''
This class is used to eveluate and execute on the beacon system
'''
def __init__(self, opts):
self.opts = opts
self.beacons = salt.loader.beacons(opts)
def process(self, config):
'''
Process the configured beacons
The config must be a dict and looks like this in yaml
code_block:: yaml
beacons:
inotify:
- /etc/fstab
- /var/cache/foo/*
'''
ret = []
for mod in config:
fun_str = '{0}.beacon'.format(mod)
if fun_str in self.beacons:
tag = 'salt/beacon/{0}/{1}/'.format(self.opts['id'], mod)
raw = self.beacons[fun_str](config[mod])
for data in raw:
if 'tag' in data:
tag += data.pop('tag')
ret.append({'tag': tag, 'data': data})
return ret

106
salt/beacons/inotify.py Normal file
View File

@ -0,0 +1,106 @@
# -*- coding: utf-8 -*-
'''
Watch files and translate the changes into salt events
'''
# Import python libs
import collections
# Import third party libs
try:
import pyinotify
HAS_PYINOTIFY = True
DEFAULT_MASK = pyinotify.IN_CREATE | pyinotify.IN_DELETE | pyinotify.IN_MODIFY
except ImportError:
HAS_PYINOTIFY = False
DEFAULT_MASK = None
__virtualname__ = 'inotify'
def __virtual__():
if HAS_PYINOTIFY:
return __virtualname__
return False
def _enqueue(revent):
'''
Enqueue the event
'''
__context__['inotify.que'].append(revent)
def _get_notifier():
'''
Check the context for the notifier and construct it if not present
'''
if 'inotify.notifier' in __context__:
return __context__['inotify.notifier']
__context__['inotify.que'] = collections.deque()
wm = pyinotify.WatchManager()
__context__['inotify.notifier'] = pyinotify.Notifier(wm, _enqueue)
return __context__['inotify.notifier']
def beacon(config):
'''
Watch the configured files
'''
ret = []
notifier = _get_notifier()
wm = notifier._watch_manager
# Read in existing events
# remove watcher files that are not in the config
# update all existing files with watcher settings
# return original data
if notifier.check_events(1):
notifier.read_events()
notifier.process_events()
while __context__['inotify.que']:
sub = {}
event = __context__['inotify.que'].popleft()
sub['tag'] = event.path
sub['path'] = event.pathname
sub['change'] = event.maskname
ret.append(sub)
current = set()
for wd in wm.watches:
current.add(wm.watches[wd].path)
need = set(config)
for path in current.difference(need):
# These need to be removed
for wd in wm.watches:
if path == wm.watches[wd].path:
wm.rm_watch(wd)
for path in config:
if isinstance(config[path], dict):
mask = config[path].get('mask', DEFAULT_MASK)
rec = config[path].get('rec', False)
auto_add = config[path].get('auto_add', False)
else:
mask = DEFAULT_MASK
rec = False
auto_add = False
# TODO: make the config handle more options
if path not in current:
wm.add_watch(
path,
mask,
rec=rec,
auto_add=auto_add)
else:
for wd in wm.watches:
if path == wm.watches[wd].path:
update = False
if wm.watches[wd].mask != mask:
update = True
if wm.watches[wd].auto_add != auto_add:
update = True
if update:
wm.update_watch(
wd,
mask=mask,
rec=rec,
auto_add=auto_add)
return ret

71
salt/beacons/journald.py Normal file
View File

@ -0,0 +1,71 @@
# -*- coding: utf-8 -*-
'''
A simple beacon to watch journald for specific entries
'''
# Import salt libs
import salt.utils
import salt.utils.cloud
import salt.ext.six
# Import third party libs
try:
import systemd.journal
HAS_SYSTEMD = True
except ImportError:
HAS_SYSTEMD = False
__virtualname__ = 'journald'
def __virtual__():
if HAS_SYSTEMD:
return __virtualname__
return False
def _get_journal():
'''
Return the active running journal object
'''
if 'systemd.journald' in __context__:
return __context__['systemd.journald']
__context__['systemd.journald'] = systemd.journal.Reader()
# get to the end of the journal
__context__['systemd.journald'].seek_tail()
__context__['systemd.journald'].get_previous()
return __context__['systemd.journald']
def beacon(config):
'''
The journald beacon allows for the systemd jornal to be parsed and linked
objects to be turned into events.
This beacons config will return all sshd jornal entries
.. code-block:: yaml
beacons:
journald:
sshd:
SYSLOG_IDENTIFIER: sshd
PRIORITY: 6
'''
ret = []
journal = _get_journal()
while True:
cur = journal.get_next()
if not cur:
break
for name in config:
n_flag = 0
for key in config[name]:
if isinstance(key, salt.ext.six.string_types):
key = salt.utils.sdecode(key)
if key in cur:
if config[name][key] == cur[key]:
n_flag += 1
if n_flag == len(config[name]):
# Match!
ret.append(salt.utils.cloud.simple_types_filter(cur))
return ret

67
salt/beacons/wtmp.py Normal file
View File

@ -0,0 +1,67 @@
# -*- coding: utf-8 -*-
'''
Beacon to fire events at login of users as registered in the wtmp file
'''
# Import python libs
import os
import struct
__virtualname__ = 'wtmp'
WTMP = '/var/log/wtmp'
FMT = '<hI32s4s32s256siili4l20s'
FIELDS = [
'type',
'PID',
'line',
'inittab',
'user',
'hostname',
'exit_status',
'session',
'time',
'addr'
]
SIZE = struct.calcsize(FMT)
LOC_KEY = 'wtmp.loc'
def __virtual__():
if os.path.isfile(WTMP):
return __virtualname__
return False
def _get_loc():
'''
return the active file location
'''
if LOC_KEY in __context__:
return __context__[LOC_KEY]
def beacon(config):
'''
Read the last wtmp file and return information on the logins
'''
ret = []
with open(WTMP, 'rb') as fp_:
loc = __context__.get(LOC_KEY, 0)
if loc == 0:
fp_.seek(0, 2)
__context__[LOC_KEY] = fp_.tell()
return ret
else:
fp_.seek(loc)
while True:
raw = fp_.read(SIZE)
if len(raw) != SIZE:
return ret
__context__[LOC_KEY] = fp_.tell()
pack = struct.unpack(FMT, raw)
event = {}
for ind in range(len(FIELDS)):
event[FIELDS[ind]] = pack[ind]
if isinstance(event[FIELDS[ind]], str):
event[FIELDS[ind]] = event[FIELDS[ind]].strip('\x00')
ret.append(event)
return ret

View File

@ -43,8 +43,6 @@ included:
.. code-block:: yaml
api_host_suffix: .api.myhostname.com
:depends: requests
'''
from __future__ import absolute_import
# pylint: disable=E0102
@ -55,7 +53,6 @@ from __future__ import absolute_import
import os
import copy
import salt.ext.six.moves.http_client # pylint: disable=E0611
import requests
import json
import logging
import base64
@ -67,6 +64,7 @@ import yaml
from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401
# Import salt.cloud libs
import salt.utils.http
import salt.utils.cloud
import salt.config as config
from salt.utils import namespaced_function
@ -884,8 +882,21 @@ def avail_images(call=None):
if not img_url.startswith('http://') and not img_url.startswith('https://'):
img_url = '{0}://{1}'.format(_get_proto(), img_url)
result = requests.get(img_url)
content = result.text
verify_ssl = config.get_cloud_config_value(
'verify_ssl', get_configured_provider(), __opts__,
search_global=False, default=True
)
result = salt.utils.http.query(
img_url,
decode=False,
text=True,
status=True,
headers=True,
verify=verify_ssl,
opts=__opts__,
)
content = result['text']
ret = {}
for image in yaml.safe_load(content):
@ -1046,8 +1057,12 @@ def get_location_path(location=DEFAULT_LOCATION, api_host_suffix=JOYENT_API_HOST
return '{0}://{1}{2}'.format(_get_proto(), location, api_host_suffix)
def query(action=None, command=None, args=None, method='GET', location=None,
data=None):
def query(action=None,
command=None,
args=None,
method='GET',
location=None,
data=None):
'''
Make a web call to Joyent
'''
@ -1088,7 +1103,8 @@ def query(action=None, command=None, args=None, method='GET', location=None,
'Content-Type': 'application/json',
'Accept': 'application/json',
'X-Api-Version': JOYENT_API_VERSION,
'Authorization': 'Basic {0}'.format(auth_key)}
'Authorization': 'Basic {0}'.format(auth_key)
}
if not isinstance(args, dict):
args = {}
@ -1098,32 +1114,26 @@ def query(action=None, command=None, args=None, method='GET', location=None,
data = json.dumps({})
return_content = None
try:
result = requests.request(
method,
path,
params=args,
headers=headers,
data=data,
verify=verify_ssl,
result = salt.utils.http.query(
path,
method,
params=args,
header_dict=headers,
data=data,
decode=False,
text=True,
status=True,
headers=True,
verify=verify_ssl,
opts=__opts__,
)
log.debug(
'Joyent Response Status Code: {0}'.format(
result['status']
)
log.debug(
'Joyent Response Status Code: {0}'.format(
result.status_code
)
)
if 'content-length' in result.headers:
content = result.text
return_content = yaml.safe_load(content)
)
if 'Content-Length' in result['headers']:
content = result['text']
return_content = yaml.safe_load(content)
return [result.status_code, return_content]
except requests.exceptions.HTTPError as exc:
log.error(
'Joyent Response Status Code: {0}'.format(
str(exc)
)
)
log.error(exc)
return [0, {'error': exc}]
return [result['status'], return_content]

View File

@ -34,7 +34,6 @@ Example ``/etc/salt/cloud.providers`` or
'''
# pylint: disable=E0102
from __future__ import absolute_import
from __future__ import absolute_import
# Import python libs
@ -287,9 +286,9 @@ def list_nodes_full(conn=None, call=None):
ret[role].update(role_instances[role])
ret[role]['id'] = role
ret[role]['hosted_service'] = service
if role_instances[role]['power_state'] == "Started":
if role_instances[role]['power_state'] == 'Started':
ret[role]['state'] = 'running'
elif role_instances[role]['power_state'] == "Stopped":
elif role_instances[role]['power_state'] == 'Stopped':
ret[role]['state'] = 'stopped'
else:
ret[role]['state'] = 'pending'
@ -555,7 +554,7 @@ def create(vm_):
try:
conn.create_hosted_service(**service_kwargs)
except WindowsAzureConflictError:
log.debug("Cloud service already exists")
log.debug('Cloud service already exists')
except Exception as exc:
error = 'The hosted service name is invalid.'
if error in str(exc):
@ -581,13 +580,15 @@ def create(vm_):
)
return False
try:
conn.create_virtual_machine_deployment(**vm_kwargs)
result = conn.create_virtual_machine_deployment(**vm_kwargs)
_wait_for_async(conn, result.request_id)
except WindowsAzureConflictError:
log.debug("Conflict error. The deployment may already exist, trying add_role")
log.debug('Conflict error. The deployment may already exist, trying add_role')
# Deleting two useless keywords
del vm_kwargs['deployment_slot']
del vm_kwargs['label']
conn.add_role(**vm_kwargs)
result = conn.add_role(**vm_kwargs)
_wait_for_async(conn, result.request_id)
except Exception as exc:
error = 'The hosted service name is invalid.'
if error in str(exc):
@ -622,8 +623,8 @@ def create(vm_):
Wait for the IP address to become available
'''
try:
conn.get_role(service_name, service_name, vm_["name"])
data = show_instance(vm_["name"], call='action')
conn.get_role(service_name, service_name, vm_['name'])
data = show_instance(vm_['name'], call='action')
if 'url' in data and data['url'] != str(''):
return data['url']
except WindowsAzureMissingResourceError:
@ -656,7 +657,7 @@ def create(vm_):
deploy_kwargs = {
'opts': __opts__,
'host': hostname,
'port': ssh_port,
'port': int(ssh_port),
'username': ssh_username,
'password': ssh_password,
'script': deploy_script,
@ -905,11 +906,101 @@ def create_attach_volumes(name, kwargs, call=None, wait_to_finish=True):
return ret
def create_attach_volumes(name, kwargs, call=None, wait_to_finish=True):
'''
Create and attach volumes to created node
'''
if call != 'action':
raise SaltCloudSystemExit(
'The create_attach_volumes action must be called with '
'-a or --action.'
)
if isinstance(kwargs['volumes'], str):
volumes = yaml.safe_load(kwargs['volumes'])
else:
volumes = kwargs['volumes']
# From the Azure .NET SDK doc
#
# The Create Data Disk operation adds a data disk to a virtual
# machine. There are three ways to create the data disk using the
# Add Data Disk operation.
# Option 1 - Attach an empty data disk to
# the role by specifying the disk label and location of the disk
# image. Do not include the DiskName and SourceMediaLink elements in
# the request body. Include the MediaLink element and reference a
# blob that is in the same geographical region as the role. You can
# also omit the MediaLink element. In this usage, Azure will create
# the data disk in the storage account configured as default for the
# role.
# Option 2 - Attach an existing data disk that is in the image
# repository. Do not include the DiskName and SourceMediaLink
# elements in the request body. Specify the data disk to use by
# including the DiskName element. Note: If included the in the
# response body, the MediaLink and LogicalDiskSizeInGB elements are
# ignored.
# Option 3 - Specify the location of a blob in your storage
# account that contain a disk image to use. Include the
# SourceMediaLink element. Note: If the MediaLink element
# isincluded, it is ignored. (see
# http://msdn.microsoft.com/en-us/library/windowsazure/jj157199.aspx
# for more information)
#
# Here only option 1 is implemented
conn = get_conn()
ret = []
for volume in volumes:
if "disk_name" in volume:
log.error("You cannot specify a disk_name. Only new volumes are allowed")
return False
# Use the size keyword to set a size, but you can use the
# azure name too. If neither is set, the disk has size 100GB
volume.setdefault("logical_disk_size_in_gb", volume.get("size", 100))
volume.setdefault("host_caching", "ReadOnly")
volume.setdefault("lun", 0)
# The media link is vm_name-disk-[0-15].vhd
volume.setdefault("media_link",
kwargs["media_link"][:-4] + "-disk-{0}.vhd".format(volume["lun"]))
volume.setdefault("disk_label",
kwargs["role_name"] + "-disk-{0}".format(volume["lun"]))
volume_dict = {
'volume_name': volume["lun"],
'disk_label': volume["disk_label"]
}
# Preparing the volume dict to be passed with **
kwargs_add_data_disk = ["lun", "host_caching", "media_link",
"disk_label", "disk_name",
"logical_disk_size_in_gb",
"source_media_link"]
for key in set(volume.keys()) - set(kwargs_add_data_disk):
del volume[key]
result = conn.add_data_disk(kwargs["service_name"],
kwargs["deployment_name"],
kwargs["role_name"],
**volume)
_wait_for_async(conn, result.request_id)
msg = (
'{0} attached to {1} (aka {2})'.format(
volume_dict['volume_name'],
kwargs['role_name'],
name)
)
log.info(msg)
ret.append(msg)
return ret
# Helper function for azure tests
def _wait_for_async(conn, request_id):
'''
Helper function for azure tests
'''
count = 0
log.debug('Waiting for asynchronous operation to complete')
result = conn.get_operation_status(request_id)
while result.status == 'InProgress':
count = count + 1
@ -919,7 +1010,9 @@ def _wait_for_async(conn, request_id):
result = conn.get_operation_status(request_id)
if result.status != 'Succeeded':
raise ValueError('Asynchronous operation did not succeed.')
raise WindowsAzureError('Operation failed. {message} ({code})'
.format(message=result.error.message,
code=result.error.code))
def destroy(name, conn=None, call=None, kwargs=None):

View File

@ -281,6 +281,7 @@ VALID_OPTS = {
'con_cache': bool,
'rotate_aes_key': bool,
'cache_sreqs': bool,
'cmd_safe': bool,
}
# default configurations
@ -435,6 +436,7 @@ DEFAULT_MINION_OPTS = {
'password': None,
'zmq_filtering': False,
'cache_sreqs': True,
'cmd_safe': True,
}
DEFAULT_MASTER_OPTS = {
@ -2043,7 +2045,7 @@ def master_config(path, env_var='SALT_MASTER_CONFIG', defaults=None):
# out or not present.
if opts.get('nodegroups') is None:
opts['nodegroups'] = DEFAULT_MASTER_OPTS.get('nodegroups', {})
if opts.get('transport') == 'raet' and not opts.get('zmq_behavior') and 'aes' in opts:
if opts.get('transport') == 'raet' and 'aes' in opts:
opts.pop('aes')
return opts

View File

@ -1509,6 +1509,72 @@ class SaltRaetPublisher(ioflo.base.deeding.Deed):
)
class SaltRaetSetupBeacon(ioflo.base.deeding.Deed):
'''
Create the Beacon subsystem
'''
Ioinits = {'opts': '.salt.opts',
'beacon': '.salt.beacon'}
def action(self):
'''
Run the beacons
'''
self.beacon.value = salt.beacons.Beacon(self.opts.value)
class SaltRaetBeacon(ioflo.base.deeding.Deed):
'''
Run the beacons
'''
Ioinits = {'opts': '.salt.opts',
'modules': '.salt.loader.modules',
'master_events': '.salt.var.master_events',
'beacon': '.salt.beacon'}
def action(self):
'''
Run the beacons
'''
if 'config.merge' in self.modules.value:
b_conf = self.modules.value['config.merge']('beacons')
if b_conf:
try:
self.master_events.value.extend(self.beacon.value.process(b_conf))
except Exception:
log.error('Error in the beacon system: ', exc_info=True)
return []
class SaltRaetMasterEvents(ioflo.base.deeding.Deed):
'''
Take the events off the master event que and send them to the master to
be fired
'''
Ioinits = {'opts': '.salt.opts',
'road_stack': '.salt.road.manor.stack',
'master_events': '.salt.var.master_events'}
def postinitio(self):
self.master_events.value = deque()
def action(self):
if not self.master_events.value:
return
events = []
for master in self.road_stack.value.remotes:
master_uid = master
while self.master_events.value:
events.append(self.master_events.value.popleft())
route = {'src': (self.road_stack.value.local.name, None, None),
'dst': (next(six.itervalues(self.road_stack.value.remotes)).name, None, 'remote_cmd')}
load = {'id': self.opts.value['id'],
'events': events,
'cmd': '_minion_event'}
self.road_stack.value.transmit({'route': route, 'load': load},
uid=master_uid)
class SaltRaetNixJobber(ioflo.base.deeding.Deed):
'''
Execute a function call job on a minion on a *nix based system

View File

@ -14,6 +14,10 @@ framer minionudpstack be active first setup
# Load the minion mods
frame loadmodules
do salt load modules at enter
go setupbeacon
frame setupbeacon
do salt raet setup beacon at enter
go start
# OK, let's start the minion up
@ -28,6 +32,7 @@ framer minionudpstack be active first setup
bid start functionmanager
# Start the outbound framer
bid start outbound
# Start the scheduler
bid start scheduler
# Cleanup on exit
@ -102,6 +107,14 @@ framer bootstrap be inactive first setup
print Loading
enter
do salt load modules
go latestart
frame latestart
# Start late frames that need the pillar/modules to be available
# Start the master events loop
bid start masterevents
# Start Beacon
bid start beacon
go router
frame router
@ -124,7 +137,15 @@ framer functionmanager be inactive first checkexec
framer manager be inactive first start at 10.0
frame start
do salt raet road stack manager per inode ".salt.road.manor"
do salt raet road stack manager per inode ".salt.road.manor"
framer beacon be inactive first start
frame start
do salt raet beacon
framer masterevents be inactive first start
frame start
do salt raet master events
# Framer for handling outbound traffic
framer outbound be inactive first start

View File

@ -242,17 +242,16 @@ def _verify_dulwich(quiet=False):
log.error(_RECOMMEND_PYGIT2)
return False
dulwich_ver = distutils.version.LooseVersion(dulwich.__version__)
dulwich_minver_str = '0.9.4'
dulwich_minver = distutils.version.LooseVersion(dulwich_minver_str)
dulwich_version = dulwich.__version__
dulwich_min_version = (0, 9, 4)
errors = []
if dulwich_ver < dulwich_minver:
if dulwich_version < dulwich_min_version:
errors.append(
'Git fileserver backend is enabled in the master config file, but '
'the installed version of Dulwich is earlier than {0}. Version {1} '
'detected.'.format(dulwich_minver_str, dulwich.__version__)
'detected.'.format(dulwich_min_version, dulwich_version)
)
if HAS_PYGIT2 and not quiet:

View File

@ -284,6 +284,19 @@ def states(opts, functions, whitelist=None):
return load.gen_functions(pack, whitelist=whitelist)
def beacons(opts, context=None):
'''
Load the beacon modules
'''
load = _create_loader(opts, 'beacons', 'beacons')
if context is None:
context = {}
pack = {'name': '__context__',
'value': context}
functions = load.gen_functions(pack)
return functions
def search(opts, returners, whitelist=None):
'''
Returns the search modules

View File

@ -62,6 +62,7 @@ import salt.client
import salt.crypt
import salt.loader
import salt.payload
import salt.beacons
import salt.utils
import salt.utils.jid
import salt.pillar
@ -295,6 +296,7 @@ class SMinion(object):
class MinionBase(object):
def __init__(self, opts):
self.opts = opts
self.beacons = salt.beacons.Beacon(opts)
def _init_context_and_poller(self):
self.context = zmq.Context()
@ -408,6 +410,17 @@ class MinionBase(object):
)
return loop_interval
def process_beacons(self, functions):
'''
Evaluate all of the configured beacons, grab the config again in case
the pillar or grains changed
'''
if 'config.merge' in functions:
b_conf = functions['config.merge']('beacons')
if b_conf:
return self.beacons.process(b_conf)
return []
class MasterMinion(object):
'''
@ -865,14 +878,13 @@ class Minion(MinionBase):
elif not data and tag:
load['data'] = {}
load['tag'] = tag
else:
return
channel = salt.transport.Channel.factory(self.opts)
try:
result = channel.send(load)
except Exception:
log.info("fire_master failed: {0}".format(traceback.format_exc()))
log.info('fire_master failed: {0}'.format(traceback.format_exc()))
def _handle_payload(self, payload):
'''
@ -1750,6 +1762,13 @@ class Minion(MinionBase):
'An exception occurred while polling the minion',
exc_info=True
)
# Process Beacons
try:
beacons = self.process_beacons(self.functions)
except Exception:
log.critical('The beacon errored: ', exec_info=True)
if beacons:
self._fire_master(events=beacons)
def tune_in_no_block(self):
'''

View File

@ -446,6 +446,11 @@ def install(name=None,
.. versionadded:: Lithium
only_upgrade
Only upgrade the packages, if they are already installed. Default is False.
.. versionadded:: Lithium
Returns a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
@ -530,6 +535,8 @@ def install(name=None,
cmd = cmd + ['-o', 'DPkg::Options::=--force-confdef']
if 'install_recommends' in kwargs and not kwargs['install_recommends']:
cmd.append('--no-install-recommends')
if 'only_upgrade' in kwargs and kwargs['only_upgrade']:
cmd.append('--only-upgrade')
if skip_verify:
cmd.append('--allow-unauthenticated')
if fromrepo:

View File

@ -0,0 +1,278 @@
# -*- coding: utf-8 -*-
'''
Connection module for Amazon DynamoDB
.. versionadded:: 2015.2
:configuration: This module accepts explicit DynamoDB credentials but can also
utilize IAM roles assigned to the instance trough Instance Profiles.
Dynamic credentials are then automatically obtained from AWS API and no
further configuration is necessary. More Information available at::
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file::
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
A region may also be specified in the configuration::
region: us-east-1
If a region is not specified, the default is us-east-1.
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
:depends: boto
'''
# Import Python libs
import logging
import time
logger = logging.getLogger(__name__)
logging.getLogger('boto').setLevel(logging.INFO)
# Import third party libs
try:
import boto
import boto.dynamodb2
from boto.dynamodb2.fields import HashKey, RangeKey
from boto.dynamodb2.fields import AllIndex, GlobalAllIndex
from boto.dynamodb2.table import Table
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from salt._compat import string_types
def __virtual__():
'''
Only load if boto libraries exist.
'''
if not HAS_BOTO:
return False
return True
def _create_connection(region=None, key=None, keyid=None, profile=None):
'''
Get a boto connection to DynamoDB.
'''
if profile:
if isinstance(profile, string_types):
_profile = __salt__['config.option'](profile)
elif isinstance(profile, dict):
_profile = profile
key = _profile.get('key', None)
keyid = _profile.get('keyid', None)
region = _profile.get('region', None)
if not region and __salt__['config.option']('dynamodb.region'):
region = __salt__['config.option']('dynamodb.region')
if not region:
region = 'us-east-1'
if not key and __salt__['config.option']('dynamodb.key'):
key = __salt__['config.option']('dynamodb.key')
if not keyid and __salt__['config.option']('dynamodb.keyid'):
keyid = __salt__['config.option']('dynamodb.keyid')
try:
conn = boto.dynamodb2.connect_to_region(
region,
aws_access_key_id=keyid,
aws_secret_access_key=key
)
except boto.exception.NoAuthHandlerFound:
logger.error('No authentication credentials found when attempting to'
' make boto dynamodb connection.')
return None
return conn
def create_table(table_name, region=None, key=None, keyid=None, profile=None,
read_capacity_units=None, write_capacity_units=None,
hash_key=None, hash_key_data_type=None, range_key=None,
range_key_data_type=None, local_indexes=None,
global_indexes=None):
'''
Creates a DynamoDB table.
CLI example::
salt myminion boto_dynamodb.create_table table_name /
region=us-east-1 /
hash_key=id /
hash_key_data_type=N /
range_key=created_at /
range_key_data_type=N /
read_capacity_units=1 /
write_capacity_units=1
'''
schema = []
primary_index_fields = []
primary_index_name = ''
if hash_key:
hash_key_obj = HashKey(hash_key, data_type=hash_key_data_type)
schema.append(hash_key_obj)
primary_index_fields.append(hash_key_obj)
primary_index_name += hash_key
if range_key:
range_key_obj = RangeKey(range_key, data_type=range_key_data_type)
schema.append(range_key_obj)
primary_index_fields.append(range_key_obj)
primary_index_name += '_'
primary_index_name += range_key
primary_index_name += '_index'
throughput = {
'read': read_capacity_units,
'write': write_capacity_units
}
local_table_indexes = []
# Add the table's key
local_table_indexes.append(
AllIndex(primary_index_name, parts=primary_index_fields)
)
if local_indexes:
for index in local_indexes:
local_table_indexes.append(_extract_index(index))
global_table_indexes = []
if global_indexes:
for index in global_indexes:
global_table_indexes.append(
_extract_index(index, global_index=True)
)
conn = _create_connection(region, key, keyid, profile)
Table.create(
table_name,
schema=schema,
throughput=throughput,
indexes=local_table_indexes,
global_indexes=global_table_indexes,
connection=conn
)
# Table creation can take several seconds to propagate.
# We will check MAX_ATTEMPTS times.
MAX_ATTEMPTS = 30
for i in range(MAX_ATTEMPTS):
if exists(
table_name,
region,
key,
keyid,
profile
):
return True
else:
time.sleep(1) # sleep for one second and try again
return False
def exists(table_name, region=None, key=None, keyid=None, profile=None):
'''
Check to see if a table exists.
CLI example::
salt myminion boto_dynamodb.exists table_name region=us-east-1
'''
conn = _create_connection(region, key, keyid, profile)
tables = conn.list_tables()
return tables and table_name in tables['TableNames']
def delete(table_name, region=None, key=None, keyid=None, profile=None):
'''
Delete a DynamoDB table.
CLI example::
salt myminion boto_dynamodb.delete table_name region=us-east-1
'''
conn = _create_connection(region, key, keyid, profile)
table = Table(table_name, connection=conn)
table.delete()
# Table deletion can take several seconds to propagate.
# We will retry MAX_ATTEMPTS times.
MAX_ATTEMPTS = 30
for i in range(MAX_ATTEMPTS):
if not exists(table_name, region, key, keyid, profile):
return True
else:
time.sleep(1) # sleep for one second and try again
return False
def _extract_index(index_data, global_index=False):
'''
Instantiates and returns an AllIndex object given a valid index
configuration
'''
parsed_data = {}
keys = []
for key, value in index_data.iteritems():
for item in value:
for field, data in item.iteritems():
if field == 'hash_key':
parsed_data['hash_key'] = data
elif field == 'hash_key_data_type':
parsed_data['hash_key_data_type'] = data
elif field == 'range_key':
parsed_data['range_key'] = data
elif field == 'range_key_data_type':
parsed_data['range_key_data_type'] = data
elif field == 'name':
parsed_data['name'] = data
elif field == 'read_capacity_units':
parsed_data['read_capacity_units'] = data
elif field == 'write_capacity_units':
parsed_data['write_capacity_units'] = data
if parsed_data['hash_key']:
keys.append(
HashKey(
parsed_data['hash_key'],
data_type=parsed_data['hash_key_data_type']
)
)
if parsed_data['range_key']:
keys.append(
RangeKey(
parsed_data['range_key'],
data_type=parsed_data['range_key_data_type']
)
)
if (
global_index and
parsed_data['read_capacity_units'] and
parsed_data['write_capacity_units']):
parsed_data['throughput'] = {
'read': parsed_data['read_capacity_units'],
'write': parsed_data['write_capacity_units']
}
if parsed_data['name'] and len(keys) > 0:
if global_index:
return GlobalAllIndex(
parsed_data['name'],
parts=keys,
throughput=parsed_data['throughput']
)
else:
return AllIndex(
parsed_data['name'],
parts=keys
)

View File

@ -163,7 +163,7 @@ def _run(cmd,
output_loglevel='debug',
runas=None,
shell=DEFAULT_SHELL,
python_shell=True,
python_shell=False,
env=None,
clean_env=False,
rstrip=True,
@ -313,6 +313,9 @@ def _run(cmd,
run_env = os.environ.copy()
run_env.update(env)
if python_shell is None:
python_shell = False
kwargs = {'cwd': cwd,
'shell': python_shell,
'env': run_env,
@ -471,7 +474,7 @@ def _run_quiet(cmd,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=True,
python_shell=False,
env=None,
template=None,
umask=None,
@ -502,7 +505,7 @@ def _run_all_quiet(cmd,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=True,
python_shell=False,
env=None,
template=None,
umask=None,
@ -533,7 +536,7 @@ def run(cmd,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=True,
python_shell=None,
env=None,
clean_env=False,
template=None,
@ -552,6 +555,19 @@ def run(cmd,
Note that ``env`` represents the environment variables for the command, and
should be formatted as a dict, or a YAML string which resolves to a dict.
*************************************************************************
WARNING: This function does not process commands through a shell
unless the python_shell flag is set to True. This means that any
shell-specific functionality such as 'echo' or the use of pipes,
redirection or &&, should either be migrated to cmd.shell or
have the python_shell=True flag set here.
The use of python_shell=True means that the shell will accept _any_ input
including potentially malicious commands such as 'good_command;rm -rf /'.
Be absolutely certain that you have sanitized your input prior to using
python_shell=True
*************************************************************************
CLI Example:
.. code-block:: bash
@ -589,6 +605,12 @@ def run(cmd,
salt '*' cmd.run cmd='sed -e s/=/:/g'
'''
try:
if __opts__.get('cmd_safe', True) is False and python_shell is None:
# Override-switch for python_shell
python_shell = True
except NameError:
pass
ret = _run(cmd,
runas=runas,
shell=shell,
@ -637,12 +659,101 @@ def run(cmd,
return ret['stdout']
def shell(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_loglevel='debug',
quiet=False,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
**kwargs):
'''
Execute the passed command and return the output as a string.
************************************************************
WARNING: This passes the cmd argument directly to the shell
without any further processing! Be absolutely sure that you
have properly santized the command passed to this function
and do not use untrusted inputs.
************************************************************
Note that ``env`` represents the environment variables for the command, and
should be formatted as a dict, or a YAML string which resolves to a dict.
CLI Example:
.. code-block:: bash
salt '*' cmd.run "ls -l | awk '/foo/{print \\$2}'"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.run template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
Specify an alternate shell with the shell parameter:
.. code-block:: bash
salt '*' cmd.run "Get-ChildItem C:\\ " shell='powershell'
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.:
.. code-block:: bash
salt '*' cmd.run "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: bash
salt '*' cmd.run cmd='sed -e s/=/:/g'
'''
run(cmd,
cwd=cwd,
stdin=stdin,
runas=runas,
shell=shell,
env=env,
clean_env=clean_env,
template=template,
rstrip=rstrip,
umask=umask,
output_loglevel=output_loglevel,
quiet=quiet,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
use_vt=use_vt,
python_shell=True,
**kwargs)
def run_stdout(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=True,
python_shell=None,
env=None,
clean_env=False,
template=None,
@ -683,6 +794,12 @@ def run_stdout(cmd,
salt '*' cmd.run_stdout "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
try:
if __opts__.get('cmd_safe', True) is False and python_shell is None:
# Override-switch for python_shell
python_shell = True
except NameError:
pass
ret = _run(cmd,
runas=runas,
cwd=cwd,
@ -723,7 +840,7 @@ def run_stderr(cmd,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=True,
python_shell=None,
env=None,
clean_env=False,
template=None,
@ -764,6 +881,12 @@ def run_stderr(cmd,
salt '*' cmd.run_stderr "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
try:
if __opts__.get('cmd_safe', True) is False and python_shell is None:
# Override-switch for python_shell
python_shell = True
except NameError:
pass
ret = _run(cmd,
runas=runas,
cwd=cwd,
@ -804,7 +927,7 @@ def run_all(cmd,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=True,
python_shell=None,
env=None,
clean_env=False,
template=None,
@ -845,6 +968,12 @@ def run_all(cmd,
salt '*' cmd.run_all "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
try:
if __opts__.get('cmd_safe', True) is False and python_shell is None:
# Override-switch for python_shell
python_shell = True
except NameError:
pass
ret = _run(cmd,
runas=runas,
cwd=cwd,
@ -885,7 +1014,7 @@ def retcode(cmd,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=True,
python_shell=None,
env=None,
clean_env=False,
template=None,
@ -964,7 +1093,7 @@ def _retcode_quiet(cmd,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=True,
python_shell=False,
env=None,
clean_env=False,
template=None,
@ -1005,7 +1134,7 @@ def script(source,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=True,
python_shell=None,
env=None,
template=None,
umask=None,
@ -1044,6 +1173,13 @@ def script(source,
salt '*' cmd.script salt://scripts/runme.sh stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
try:
if __opts__.get('cmd_safe', True) is False and python_shell is None:
# Override-switch for python_shell
python_shell = True
except NameError:
pass
def _cleanup_tempfile(path):
try:
os.remove(path)
@ -1110,7 +1246,7 @@ def script_retcode(source,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=True,
python_shell=None,
env=None,
template='jinja',
umask=None,
@ -1147,6 +1283,12 @@ def script_retcode(source,
salt '*' cmd.script_retcode salt://scripts/runme.sh stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
try:
if __opts__.get('cmd_safe', True) is False and python_shell is None:
# Override-switch for python_shell
python_shell = True
except NameError:
pass
if isinstance(__env__, string_types):
salt.utils.warn_until(
'Boron',

View File

@ -24,3 +24,45 @@ def query(url, **kwargs):
data='<xml>somecontent</xml>'
'''
return salt.utils.http.query(url=url, opts=__opts__, **kwargs)
def update_ca_bundle(target=None, source=None, merge_files=None):
'''
Update the local CA bundle file from a URL
CLI Example:
.. code-block:: bash
salt '*' http.update_ca_bundle
salt '*' http.update_ca_bundle target=/path/to/cacerts.pem
salt '*' http.update_ca_bundle source=https://example.com/cacerts.pem
If the ``target`` is not specified, it will be pulled from the ``ca_cert``
configuration variable available to the minion. If it cannot be found there,
it will be placed at ``<<FILE_ROOTS>>/cacerts.pem``.
If the ``source`` is not specified, it will be pulled from the
``ca_cert_url`` configuration variable available to the minion. If it cannot
be found, it will be downloaded from the cURL website, using an http (not
https) URL. USING THE DEFAULT URL SHOULD BE AVOIDED!
``merge_files`` may also be specified, which includes a string or list of
strings representing a file or files to be appended to the end of the CA
bundle, once it is downloaded.
CLI Example:
.. code-block:: bash
salt '*' http.update_ca_bundle merge_files=/path/to/mycert.pem
'''
if target is None:
target = __salt__['config.get']('ca_bundle', None)
if source is None:
source = __salt__['config.get']('ca_bundle_url', None)
return salt.utils.http.update_ca_bundle(
target, source, __opts__, merge_files
)

View File

@ -7,10 +7,12 @@ from __future__ import absolute_import
# Import python libs
import logging
import re
import os
# Import salt libs
import salt.utils
import salt.ext.six as six
import salt.utils.decorators as decorators
log = logging.getLogger(__name__)
@ -173,41 +175,62 @@ def avail(locale):
salt '*' locale.avail 'en_US.UTF-8'
'''
normalized_locale = _normalize_locale(locale)
try:
normalized_locale = _normalize_locale(locale)
except IndexError:
log.error('Unable to validate locale "{0}"'.format(locale))
return False
avail_locales = __salt__['locale.list_avail']()
locale_exists = next((True for x in avail_locales
if _normalize_locale(x.strip()) == normalized_locale), False)
return locale_exists
def gen_locale(locale):
@decorators.which('locale-gen')
def gen_locale(locale, charmap=None):
'''
Generate a locale.
.. versionadded:: 2014.7.0
:param locale: Any locale listed in /usr/share/i18n/locales or
/usr/share/i18n/SUPPORTED for debian and gentoo based distros
:param charmap: debian and gentoo based systems require the charmap to be
specified independently of the locale.
CLI Example:
.. code-block:: bash
salt '*' locale.gen_locale 'en_US.UTF-8'
salt '*' locale.gen_locale en_US.UTF-8
salt '*' locale.gen_locale en_US.UTF-8 UTF-8 # debian and gentoo only
'''
# validate the supplied locale
valid = __salt__['file.replace'](
'/usr/share/i18n/SUPPORTED',
'^{0}$'.format(locale),
'^{0}$'.format(locale),
search_only=True
)
on_debian = __grains__.get('os') == 'Debian'
on_gentoo = __grains__.get('os_family') == 'Gentoo'
if on_debian or on_gentoo:
if not charmap:
log.error('On debian and gentoo systems you must provide a charmap')
return False
search = '/usr/share/i18n/SUPPORTED'
locale_format = '{0} {1}'.format(locale, charmap)
valid = __salt__['file.search'](search, '^{0}$'.format(locale_format))
else:
search = '/usr/share/i18n/locales'
locale_format = locale
valid = locale_format in os.listdir(search)
if not valid:
log.error('The provided locale "{0}" is invalid'.format(locale))
log.error('The provided locale "{0}" is not found in {1}'.format(locale, search))
return False
if __grains__.get('os') == 'Debian' or __grains__.get('os_family') == 'Gentoo':
if on_debian or on_gentoo:
__salt__['file.replace'](
'/etc/locale.gen',
'# {0} '.format(locale),
'{0} '.format(locale),
r'^#\s*{0}$'.format(locale_format),
'{0}'.format(locale_format),
append_if_not_found=True
)
elif __grains__.get('os') == 'Ubuntu':
@ -222,13 +245,9 @@ def gen_locale(locale):
'locale-gen'
)
if __grains__.get('os_family') == 'Gentoo':
return __salt__['cmd.retcode'](
'locale-gen --generate "{0}"'.format(locale),
python_shell=False
)
else:
return __salt__['cmd.retcode'](
'locale-gen "{0}"'.format(locale),
python_shell=False
)
cmd = ['locale-gen']
if on_gentoo:
cmd.append('--generate')
cmd.append(locale_format)
return __salt__['cmd.retcode'](cmd, python_shell=False)

View File

@ -156,33 +156,65 @@ def cpustats():
salt '*' status.cpustats
'''
procf = '/proc/stat'
if not os.path.isfile(procf):
return {}
stats = salt.utils.fopen(procf, 'r').read().splitlines()
ret = {}
for line in stats:
if not line:
continue
comps = line.split()
if comps[0] == 'cpu':
ret[comps[0]] = {'idle': _number(comps[4]),
'iowait': _number(comps[5]),
'irq': _number(comps[6]),
'nice': _number(comps[2]),
'softirq': _number(comps[7]),
'steal': _number(comps[8]),
'system': _number(comps[3]),
'user': _number(comps[1])}
elif comps[0] == 'intr':
ret[comps[0]] = {'total': _number(comps[1]),
'irqs': [_number(x) for x in comps[2:]]}
elif comps[0] == 'softirq':
ret[comps[0]] = {'total': _number(comps[1]),
'softirqs': [_number(x) for x in comps[2:]]}
else:
ret[comps[0]] = _number(comps[1])
return ret
def linux_cpustats():
'''
linux specific implementation of cpustats
'''
procf = '/proc/stat'
if not os.path.isfile(procf):
return {}
stats = salt.utils.fopen(procf, 'r').read().splitlines()
ret = {}
for line in stats:
if not line:
continue
comps = line.split()
if comps[0] == 'cpu':
ret[comps[0]] = {'idle': _number(comps[4]),
'iowait': _number(comps[5]),
'irq': _number(comps[6]),
'nice': _number(comps[2]),
'softirq': _number(comps[7]),
'steal': _number(comps[8]),
'system': _number(comps[3]),
'user': _number(comps[1])}
elif comps[0] == 'intr':
ret[comps[0]] = {'total': _number(comps[1]),
'irqs': [_number(x) for x in comps[2:]]}
elif comps[0] == 'softirq':
ret[comps[0]] = {'total': _number(comps[1]),
'softirqs': [_number(x) for x in comps[2:]]}
else:
ret[comps[0]] = _number(comps[1])
return ret
def freebsd_cpustats():
'''
freebsd specific implementation of cpustats
'''
vmstat = __salt__['cmd.run']('vmstat -P').splitlines()
vm0 = vmstat[0].split()
cpu0loc = vm0.index('cpu0')
vm1 = vmstat[1].split()
usloc = vm1.index('us')
vm2 = vmstat[2].split()
cpuctr = 0
ret = {}
for cpu in vm0[cpu0loc:]:
ret[cpu] = {'us': _number(vm2[usloc + 3 * cpuctr]),
'sy': _number(vm2[usloc + 1 + 3 * cpuctr]),
'id': _number(vm2[usloc + 2 + 3 * cpuctr]), }
cpuctr += 1
return ret
# dict that return a function that does the right thing per platform
get_version = {
'Linux': linux_cpustats,
'FreeBSD': freebsd_cpustats,
}
errmsg = 'This method is unsupported on the current operating system!'
return get_version.get(__grains__['kernel'], lambda: errmsg)()
def meminfo():
@ -195,22 +227,53 @@ def meminfo():
salt '*' status.meminfo
'''
procf = '/proc/meminfo'
if not os.path.isfile(procf):
return {}
stats = salt.utils.fopen(procf, 'r').read().splitlines()
ret = {}
for line in stats:
if not line:
continue
comps = line.split()
comps[0] = comps[0].replace(':', '')
ret[comps[0]] = {
'value': comps[1],
}
if len(comps) > 2:
ret[comps[0]]['unit'] = comps[2]
return ret
def linux_meminfo():
'''
linux specific implementation of meminfo
'''
procf = '/proc/meminfo'
if not os.path.isfile(procf):
return {}
stats = salt.utils.fopen(procf, 'r').read().splitlines()
ret = {}
for line in stats:
if not line:
continue
comps = line.split()
comps[0] = comps[0].replace(':', '')
ret[comps[0]] = {
'value': comps[1],
}
if len(comps) > 2:
ret[comps[0]]['unit'] = comps[2]
return ret
def freebsd_meminfo():
'''
freebsd specific implementation of meminfo
'''
sysctlvm = __salt__['cmd.run']('sysctl vm').splitlines()
sysctlvm = [x for x in sysctlvm if x.startswith('vm')]
sysctlvm = [x.split(':') for x in sysctlvm]
sysctlvm = [[y.strip() for y in x] for x in sysctlvm]
sysctlvm = [x for x in sysctlvm if x[1]] # If x[1] not empty
ret = {}
for line in sysctlvm:
ret[line[0]] = line[1]
# Special handling for vm.total as it's especially important
sysctlvmtot = __salt__['cmd.run']('sysctl -n vm.vmtotal').splitlines()
sysctlvmtot = [x for x in sysctlvmtot if x]
ret['vm.vmtotal'] = sysctlvmtot
return ret
# dict that return a function that does the right thing per platform
get_version = {
'Linux': linux_meminfo,
'FreeBSD': freebsd_meminfo,
}
errmsg = 'This method is unsupported on the current operating system!'
return get_version.get(__grains__['kernel'], lambda: errmsg)()
def cpuinfo():
@ -223,21 +286,48 @@ def cpuinfo():
salt '*' status.cpuinfo
'''
procf = '/proc/cpuinfo'
if not os.path.isfile(procf):
return {}
stats = salt.utils.fopen(procf, 'r').read().splitlines()
ret = {}
for line in stats:
if not line:
continue
comps = line.split(':')
comps[0] = comps[0].strip()
if comps[0] == 'flags':
ret[comps[0]] = comps[1].split()
else:
def linux_cpuinfo():
'''
linux specific cpuinfo implementation
'''
procf = '/proc/cpuinfo'
if not os.path.isfile(procf):
return {}
stats = salt.utils.fopen(procf, 'r').read().splitlines()
ret = {}
for line in stats:
if not line:
continue
comps = line.split(':')
comps[0] = comps[0].strip()
if comps[0] == 'flags':
ret[comps[0]] = comps[1].split()
else:
ret[comps[0]] = comps[1].strip()
return ret
def freebsd_cpuinfo():
'''
freebds specific cpuinfo implementation
'''
freebsd_cmd = 'sysctl hw.model hw.ncpu'
ret = {}
for line in __salt__['cmd.run'](freebsd_cmd).splitlines():
if not line:
continue
comps = line.split(':')
comps[0] = comps[0].strip()
ret[comps[0]] = comps[1].strip()
return ret
return ret
# dict that returns a function that does the right thing per platform
get_version = {
'Linux': linux_cpuinfo,
'FreeBSD': freebsd_cpuinfo,
}
errmsg = 'This method is unsupported on the current operating system!'
return get_version.get(__grains__['kernel'], lambda: errmsg)()
def diskstats():
@ -370,12 +460,7 @@ def nproc():
salt '*' status.nproc
'''
data = __salt__['cmd.run']('nproc')
try:
ret = int(data.strip())
except Exception:
return 0
return ret
return __grains__.get('num_cpus', 0)
def netstats():
@ -436,7 +521,7 @@ def netdev():
comps = line.split()
# Fix lines like eth0:9999..'
comps[0] = line.split(':')[0].strip()
#Support lines both like eth0:999 and eth0: 9999
# Support lines both like eth0:999 and eth0: 9999
comps.insert(1, line.split(':')[1].strip().split()[0])
ret[comps[0]] = {'iface': comps[0],
'rx_bytes': _number(comps[1]),
@ -548,12 +633,23 @@ def version():
salt '*' status.version
'''
procf = '/proc/version'
if not os.path.isfile(procf):
return {}
ret = salt.utils.fopen(procf, 'r').read().strip()
def linux_version():
'''
linux specific implementation of version
'''
procf = '/proc/version'
if not os.path.isfile(procf):
return {}
return salt.utils.fopen(procf, 'r').read().strip()
return ret
# dict that returns a function that does the right thing per platform
get_version = {
'Linux': linux_version,
'FreeBSD': lambda: __salt__['cmd.run']('sysctl -n kern.version'),
}
errmsg = 'This method is unsupported on the current operating system!'
return get_version.get(__grains__['kernel'], lambda: errmsg)()
def master(master=None, connected=True):

View File

@ -1473,47 +1473,30 @@ class Events(object):
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.auth = salt.auth.LoadAuth(self.opts)
self.resolver = salt.auth.Resolver(self.opts)
def _is_valid_salt_token(self, salt_token):
def _is_valid_token(self, auth_token):
'''
Check if this is a valid salt master token
More on salt master token generation can
be found at
http://docs.saltstack.com/en/latest/topics/eauth/index.html#tokens
Check if this is a valid salt-api token or valid Salt token
Returns
True if this token is a valid salt token
False otherwise
'''
if salt_token and self.resolver.get_token(salt_token):
return True
return False
salt-api tokens are regular session tokens that tie back to a real Salt
token. Salt tokens are tokens generated by Salt's eauth system.
def _is_valid_salt_api_token(self, salt_api_token):
:return bool: True if valid, False if not valid.
'''
Check if this is a valid salt api token
Salt API tokens are generated on Login
Returns
True if this token is a valid salt api token
False otherwise
'''
if not salt_api_token:
if auth_token is None:
return False
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if salt_api_token:
orig_sesion, _ = cherrypy.session.cache.get(salt_api_token,
({}, None))
salt_token = orig_sesion.get('token')
else:
salt_token = cherrypy.session.get('token')
# First check if the given token is in our session table; if so it's a
# salt-api token and we need to get the Salt token from there.
orig_sesion, _ = cherrypy.session.cache.get(auth_token, ({}, None))
# If it's not in the session table, assume it's a regular Salt token.
salt_token = orig_sesion.get('token', auth_token)
# Manually verify the token
if salt_token and self.auth.get_tok(salt_token):
# The eauth system does not currently support perms for the event
# stream, so we're just checking if the token exists not if the token
# allows access.
if salt_token and self.resolver.get_token(salt_token):
return True
return False
@ -1530,21 +1513,21 @@ class Events(object):
:status 200: |200|
:status 401: |401|
:status 406: |406|
:query token: **optional** parameter containing the token
ordinarily supplied via the X-Auth-Token header in order to
allow cross-domain requests in browsers that do not include
CORS support in the EventSource API. E.g.,
``curl -NsS localhost:8000/events?token=308650d``
:query salt_token: **optional** parameter containing a raw Salt
*eauth token* (not to be confused with the token returned from
the /login URL). E.g.,
``curl -NsS localhost:8000/events?salt_token=30742765``
**Example request:**
.. code-block:: bash
curl -NsS localhost:8000/events?salt_token=307427657b16a70aed360a46c5370035
Or you can pass the token sent by cherrypy's
`/login` endpoint (these are different tokens).
:ref:`salt-token-generation` describes the process of obtaining a
Salt token.
.. code-block:: bash
curl -NsS localhost:8000/events?token=308650dbd728d8405a32ac9c2b2c1ed7705222bc
curl -NsS localhost:8000/events
.. code-block:: http
@ -1569,9 +1552,7 @@ class Events(object):
.. code-block:: javascript
var source = new EventSource('/events?token=ecd589e4e01912cf3c4035afad73426dbb8dba75');
// Salt token works as well!
// var source = new EventSource('/events?salt_token=307427657b16a70aed360a46c5370035');
var source = new EventSource('/events');
source.onopen = function() { console.debug('opening') };
source.onerror = function(e) { console.debug('error!', e) };
source.onmessage = function(e) { console.debug(e.data) };
@ -1581,16 +1562,6 @@ class Events(object):
.. code-block:: javascript
var source = new EventSource('/events?token=ecd589e4e01912cf3c4035afad73426dbb8dba75', {withCredentials: true});
// You can supply the salt token as well
var source = new EventSource('/events?salt_token=307427657b16a70aed360a46c5370035', {withCredentials: true});
Some browser clients lack CORS support for the ``EventSource()`` API. Such
clients may instead pass the :mailheader:`X-Auth-Token` value as an URL
parameter:
.. code-block:: bash
curl -NsS localhost:8000/events/6d1b722e
It is also possible to consume the stream via the shell.
@ -1605,7 +1576,7 @@ class Events(object):
.. code-block:: bash
curl -NsS localhost:8000/events?salt_token=307427657b16a70aed360a46c5370035 |\
curl -NsS localhost:8000/events |\
while IFS= read -r line ; do
echo $line
done
@ -1614,7 +1585,7 @@ class Events(object):
.. code-block:: bash
curl -NsS localhost:8000/events?salt_token=307427657b16a70aed360a46c5370035 |\
curl -NsS localhost:8000/events |\
awk '
BEGIN { RS=""; FS="\\n" }
$1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 }
@ -1624,9 +1595,11 @@ class Events(object):
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
'''
if (not (self._is_valid_salt_api_token(token) or
self._is_valid_salt_token(salt_token))):
cookies = cherrypy.request.cookie
auth_token = token or salt_token or (
cookies['session_id'].value if 'session_id' in cookies else None)
if not self._is_valid_token(auth_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response

View File

@ -165,7 +165,6 @@ class Pillar(object):
saltenv = env
opts = dict(opts_in)
opts['file_roots'] = opts['pillar_roots']
opts['__pillar'] = True
opts['file_client'] = 'local'
if not grains:
opts['grains'] = {}

View File

@ -72,3 +72,25 @@ def execution():
ret = dict(list(i))
return ret
# Still need to modify some of the backend for auth checks to make this work
def __list_functions(user=None):
'''
List all of the functions, optionally pass in a user to evaluate
permissions on
'''
client = salt.client.get_local_client(__opts__['conf_file'])
funcs = {}
gener = client.cmd_iter(
'*',
'sys.list_functions',
timeout=__opts__['timeout'])
for ret in gener:
funcs.update(ret)
if not user:
__jid_event__.fire_event({'message': funcs}, 'progress')
return funcs
for _, val in __opts__['external_auth'].items():
if user in val:
pass

View File

@ -34,3 +34,39 @@ def query(url, output=True, **kwargs):
ret = salt.utils.http.query(url=url, opts=__opts__, **kwargs)
return ret
def update_ca_bundle(target=None, source=None, merge_files=None):
'''
Update the local CA bundle file from a URL
CLI Example:
.. code-block:: bash
salt-run http.update_ca_bundle
salt-run http.update_ca_bundle target=/path/to/cacerts.pem
salt-run http.update_ca_bundle source=https://example.com/cacerts.pem
If the ``target`` is not specified, it will be pulled from the ``ca_cert``
configuration variable available to the master. If it cannot be found there,
it will be placed at ``<<FILE_ROOTS>>/cacerts.pem``.
If the ``source`` is not specified, it will be pulled from the
``ca_cert_url`` configuration variable available to the master. If it cannot
be found, it will be downloaded from the cURL website, using an http (not
https) URL. USING THE DEFAULT URL SHOULD BE AVOIDED!
``merge_files`` may also be specified, which includes a string or list of
strings representing a file or files to be appended to the end of the CA
bundle, once it is downloaded.
CLI Example:
.. code-block:: bash
salt-run http.update_ca_bundle merge_files=/path/to/mycert.pem
'''
return salt.utils.http.update_ca_bundle(
target, source, __opts__, merge_files
)

View File

@ -572,6 +572,7 @@ class State(object):
self.pre = {}
self.__run_num = 0
self.jid = jid
self.instance_id = str(id(self))
def _gather_pillar(self):
'''
@ -1505,6 +1506,7 @@ class State(object):
# state module to change these at runtime.
'__low__': immutabletypes.freeze(low),
'__running__': immutabletypes.freeze(running) if running else {},
'__instance_id__': self.instance_id,
'__lowstate__': immutabletypes.freeze(chunks) if chunks else {}
}
@ -2046,6 +2048,23 @@ class State(object):
return errors
ret = dict(list(disabled.items()) + list(self.call_chunks(chunks).items()))
ret = self.call_listen(chunks, ret)
def _cleanup_accumulator_data():
accum_data_path = os.path.join(
salt.utils.get_accumulator_dir(self.opts['cachedir']),
self.instance_id
)
try:
os.remove(accum_data_path)
log.debug('Deleted accumulator data file {0}'.format(
accum_data_path)
)
except OSError:
log.debug('File {0} does not exist, no need to cleanup.'.format(
accum_data_path)
)
_cleanup_accumulator_data()
return ret
def render_template(self, high, template):

View File

@ -0,0 +1,268 @@
# -*- coding: utf-8 -*-
'''
Manage DynamoDB Tables
=================
.. versionadded:: 2015.2
Create and destroy DynamoDB tables. Be aware that this interacts with Amazon's
services, and so may incur charges.
This module uses ``boto``, which can be installed via package, or pip.
This module accepts explicit DynamoDB credentials but can also utilize
IAM roles assigned to the instance through Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More information available `here
<http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_.
If IAM roles are not used you need to specify them either in a pillar file or
in the minion's config file:
.. code-block:: yaml
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
It's also possible to specify ``key``, ``keyid`` and ``region`` via a
profile, either passed in as a dict, or as a string to pull from
pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
.. code-block:: yaml
Ensure DynamoDB table does not exist:
boto_dynamodb.absent:
- table_name: new_table
- keyid: GKTADJGHEIQSXMKKRBJ08H
- key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
- region: us-east-1
Ensure DynamoDB table exists:
boto_dynamodb.present:
- table_name: new_table
- read_capacity_units: 1
- write_capacity_units: 2
- hash_key: primary_id
- hash_key_data_type: N
- range_key: start_timestamp
- range_key_data_type: N
- keyid: GKTADJGHEIQSXMKKRBJ08H
- key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
- region: us-east-1
- local_indexes:
- index:
- name: "primary_id_end_timestamp_index"
- hash_key: primary_id
- hash_key_data_type: N
- range_key: end_timestamp
- range_key_data_type: N
- global_indexes:
- index:
- name: "name_end_timestamp_index"
- hash_key: name
- hash_key_data_type: S
- range_key: end_timestamp
- range_key_data_type: N
- read_capacity_units: 3
- write_capacity_units: 4
'''
# Import Python libs
import sys
import logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(name)s %(levelname)s %(message)s',
stream=sys.stdout
)
log = logging.getLogger()
def __virtual__():
'''
Only load if boto_dynamodb is available.
'''
ret = 'boto_dynamodb' if 'boto_dynamodb.exists' in __salt__ else False
return ret
def present(table_name,
region=None,
key=None,
keyid=None,
profile=None,
read_capacity_units=None,
write_capacity_units=None,
hash_key=None,
hash_key_data_type=None,
range_key=None,
range_key_data_type=None,
local_indexes=None,
global_indexes=None):
'''
Ensure the DynamoDB table exists. Note: all properties of the table
can only be set during table creation. Adding or changing
indexes or key schema cannot be done after table creation
table_name
Name of the DynamoDB table
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
read_capacity_units
The read throughput for this table
write_capacity_units
The write throughput for this table
hash_key
The name of the attribute that will be used as the hash key
for this table
hash_key_data_type
The DynamoDB datatype of the hash key
range_key
The name of the attribute that will be used as the range key
for this table
range_key_data_type
The DynamoDB datatype of the range key
local_indexes
The local indexes you would like to create
global_indexes
The local indexes you would like to create
'''
ret = {'name': table_name, 'result': None, 'comment': '', 'changes': {}}
exists = __salt__['boto_dynamodb.exists'](
table_name,
region,
key,
keyid,
profile
)
if exists:
ret['comment'] = 'DynamoDB table {0} already exists. \
Nothing to change.'.format(table_name)
ret['result'] = True
return ret
if __opts__['test']:
ret['comment'] = 'DynamoDB table {0} is set to be created \
'.format(table_name)
return ret
is_created = __salt__['boto_dynamodb.create_table'](
table_name,
region,
key,
keyid,
profile,
read_capacity_units,
write_capacity_units,
hash_key,
hash_key_data_type,
range_key,
range_key_data_type,
local_indexes,
global_indexes
)
ret['result'] = is_created
if is_created:
ret['comment'] = 'DynamoDB table {0} created successfully \
'.format(table_name)
ret['changes'].setdefault('old', None)
changes = {}
changes['table'] = table_name
changes['read_capacity_units'] = read_capacity_units,
changes['write_capacity_units'] = write_capacity_units,
changes['hash_key'] = hash_key,
changes['hash_key_data_type'] = hash_key_data_type
changes['range_key'] = range_key,
changes['range_key_data_type'] = range_key_data_type,
changes['local_indexes'] = local_indexes,
changes['global_indexes'] = global_indexes
ret['changes']['new'] = changes
else:
ret['comment'] = 'Failed to create table {0}'.format(table_name)
return ret
def absent(table_name,
region=None,
key=None,
keyid=None,
profile=None):
'''
Ensure the DynamoDB table does not exist.
table_name
Name of the DynamoDB table.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
'''
ret = {'name': table_name, 'result': None, 'comment': '', 'changes': {}}
exists = __salt__['boto_dynamodb.exists'](
table_name,
region,
key,
keyid,
profile
)
if not exists:
ret['comment'] = 'DynamoDB table {0} does not exist'.format(table_name)
ret['result'] = True
return ret
if __opts__['test']:
ret['comment'] = 'DynamoDB table {0} is set to be deleted \
'.format(table_name)
ret['result'] = None
return ret
is_deleted = __salt__['boto_dynamodb.delete'](table_name, region, key, keyid, profile)
if is_deleted:
ret['comment'] = 'Deleted DynamoDB table {0}'.format(table_name)
ret['changes'].setdefault('old', 'Table {0} exists'.format(table_name))
ret['changes'].setdefault('new', 'Table {0} deleted'.format(table_name))
ret['result'] = True
else:
ret['comment'] = 'Failed to delete DynamoDB table {0} \
'.format(table_name)
ret['result'] = False
return ret

View File

@ -245,6 +245,7 @@ import traceback
import yaml
# Import salt libs
import salt.payload
import salt.utils
import salt.utils.templates
from salt.exceptions import CommandExecutionError
@ -258,8 +259,39 @@ log = logging.getLogger(__name__)
COMMENT_REGEX = r'^([[:space:]]*){0}[[:space:]]?'
_ACCUMULATORS = {}
_ACCUMULATORS_DEPS = {}
def _get_accumulator_filepath():
'''
Return accumulator data path.
'''
return os.path.join(salt.utils.get_accumulator_dir(__opts__['cachedir']),
__instance_id__)
def _load_accumulators():
def _deserialize(path):
serial = salt.payload.Serial(__opts__)
ret = {'accumulators': {}, 'accumulators_deps': {}}
try:
with open(path, 'rb') as f:
loaded = serial.load(f)
return loaded if loaded else ret
except IOError:
return ret
loaded = _deserialize(_get_accumulator_filepath())
return loaded['accumulators'], loaded['accumulators_deps']
def _persist_accummulators(accumulators, accumulators_deps):
accumm_data = {'accumulators': accumulators,
'accumulators_deps': accumulators_deps}
serial = salt.payload.Serial(__opts__)
with open(_get_accumulator_filepath(), 'w+b') as f:
serial.dump(accumm_data, f)
def _check_user(user, group):
@ -961,7 +993,7 @@ def exists(name):
if not name:
return _error(ret, 'Must provide name to file.exists')
if not os.path.exists(name):
return _error(ret, ('Specified path {0} does not exist').format(name))
return _error(ret, 'Specified path {0} does not exist'.format(name))
ret['comment'] = 'Path {0} exists'.format(name)
return ret
@ -984,7 +1016,7 @@ def missing(name):
if not name:
return _error(ret, 'Must provide name to file.missing')
if os.path.exists(name):
return _error(ret, ('Specified path {0} exists').format(name))
return _error(ret, 'Specified path {0} exists'.format(name))
ret['comment'] = 'Path {0} is missing'.format(name)
return ret
@ -1348,10 +1380,11 @@ def managed(name,
'No changes made.'.format(name))
return ret
if name in _ACCUMULATORS:
accum_data, _ = _load_accumulators()
if name in accum_data:
if not context:
context = {}
context['accumulator'] = _ACCUMULATORS[name]
context['accumulator'] = accum_data[name]
try:
if __opts__['test']:
@ -2381,13 +2414,15 @@ def replace(name,
if changes:
ret['changes'] = {'diff': changes}
ret['comment'] = ('Changes were made'
if not __opts__['test'] else 'Changes would have been made')
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Changes would have been made'
else:
ret['result'] = True
ret['comment'] = 'Changes were made'
else:
ret['comment'] = ('No changes were made'
if not __opts__['test'] else 'No changes would have been made')
ret['result'] = True
ret['comment'] = 'No changes needed to be made'
return ret
@ -2507,11 +2542,12 @@ def blockreplace(
if not check_res:
return _error(ret, check_msg)
if name in _ACCUMULATORS:
accumulator = _ACCUMULATORS[name]
accum_data, accum_deps = _load_accumulators()
if name in accum_data:
accumulator = accum_data[name]
# if we have multiple accumulators for a file, only apply the one
# required at a time
deps = _ACCUMULATORS_DEPS.get(name, [])
deps = accum_deps.get(name, [])
filtered = [a for a in deps if
__low__['__id__'] in deps[a] and a in accumulator]
if not filtered:
@ -3728,21 +3764,23 @@ def accumulated(name, filename, text, **kwargs):
return ret
if isinstance(text, string_types):
text = (text,)
if filename not in _ACCUMULATORS:
_ACCUMULATORS[filename] = {}
if filename not in _ACCUMULATORS_DEPS:
_ACCUMULATORS_DEPS[filename] = {}
if name not in _ACCUMULATORS_DEPS[filename]:
_ACCUMULATORS_DEPS[filename][name] = []
accum_data, accum_deps = _load_accumulators()
if filename not in accum_data:
accum_data[filename] = {}
if filename not in accum_deps:
accum_deps[filename] = {}
if name not in accum_deps[filename]:
accum_deps[filename][name] = []
for accumulator in deps:
_ACCUMULATORS_DEPS[filename][name].extend(six.itervalues(accumulator))
if name not in _ACCUMULATORS[filename]:
_ACCUMULATORS[filename][name] = []
accum_deps[filename][name].extend(six.itervalues(accumulator))
if name not in accum_data[filename]:
accum_data[filename][name] = []
for chunk in text:
if chunk not in _ACCUMULATORS[filename][name]:
_ACCUMULATORS[filename][name].append(chunk)
if chunk not in accum_data[filename][name]:
accum_data[filename][name].append(chunk)
ret['comment'] = ('Accumulator {0} for file {1} '
'was charged by text'.format(name, filename))
_persist_accummulators(accum_data, accum_deps)
return ret

View File

@ -782,6 +782,17 @@ def installed(
pkg.installed:
- install_recommends: False
only_upgrade
Only upgrade the packages, if they are already installed. Default is False.
Currently only works with APT based systems.
.. versionadded:: Lithium
.. code-block:: yaml
httpd:
pkg.installed:
- only_upgrade: True
'''
if isinstance(pkgs, list) and len(pkgs) == 0:
@ -1187,6 +1198,18 @@ def latest(
pkg.latest:
- install_recommends: False
only_upgrade
Only upgrade the packages, if they are already installed. Default is False.
Currently only works with APT based systems.
.. versionadded:: Lithium
.. code-block:: yaml
httpd:
pkg.latest:
- only_upgrade: True
'''
rtag = __gen_rtag()
refresh = bool(

View File

@ -80,7 +80,7 @@ def present(name, value, config=None):
'The value {0} is set to be changed to {1} '
return ret
elif name in configured and name in current:
if str(value) == __salt__['sysctl.get'](name):
if str(value).split() == __salt__['sysctl.get'](name).split():
ret['result'] = True
ret['comment'] = 'Sysctl value {0} = {1} is already set'.format(
name,

View File

@ -550,6 +550,18 @@ def required_modules_error(name, docstring):
return msg.format(filename, ', '.join(modules))
def get_accumulator_dir(cachedir):
'''
Return the directory that accumulator data is stored in, creating it if it
doesn't exist.
'''
fn_ = os.path.join(cachedir, 'accumulator')
if not os.path.isdir(fn_):
# accumulator_dir is not present, create it
os.makedirs(fn_)
return fn_
def check_or_die(command):
'''
Simple convenience function for modules to use for gracefully blowing up

View File

@ -2025,7 +2025,7 @@ def simple_types_filter(data):
simpletypes_keys = (str, six.text_type, int, long, float, bool)
simpletypes_values = tuple(list(simpletypes_keys) + [list, tuple])
if isinstance(data, list):
if isinstance(data, (list, tuple)):
simplearray = []
for value in data:
if value is not None:
@ -2041,7 +2041,7 @@ def simple_types_filter(data):
for key, value in data.items():
if key is not None and not isinstance(key, simpletypes_keys):
key = repr(key)
if value is not None and isinstance(value, (dict, list)):
if value is not None and isinstance(value, (dict, list, tuple)):
value = simple_types_filter(value)
elif value is not None and not isinstance(value, simpletypes_values):
value = repr(value)

View File

@ -130,7 +130,8 @@ def get_master_event(opts, sock_dir, listen=True):
elif opts['transport'] == 'raet':
import salt.utils.raetevent
return salt.utils.raetevent.MasterEvent(
opts=opts, sock_dir=sock_dir, listen=listen)
opts=opts, sock_dir=sock_dir, listen=listen
)
def tagify(suffix='', prefix='', base=SALT):

View File

@ -1,14 +1,12 @@
# -*- coding: utf-8 -*-
'''
Connection library for GitHub
:depends: requests
'''
from __future__ import absolute_import
# Import Python libs
import json
import requests
import salt.utils.http
import logging
log = logging.getLogger(__name__)
@ -48,8 +46,14 @@ def get_user_pubkeys(users):
user = tmp_user
url = 'https://api.github.com/users/{0}/keys'.format(user)
result = requests.request('GET', url)
keys = json.loads(result.text)
result = salt.utils.http.query(
url,
'GET',
decode=False,
text=True,
)
keys = json.loads(result['text'])
ret[user] = {}
for key in keys:

View File

@ -11,6 +11,7 @@ import os.path
import json
import logging
import salt.ext.six.moves.http_cookiejar # pylint: disable=E0611
from salt.ext.six import string_types
from salt._compat import ElementTree as ET
import ssl
@ -82,6 +83,9 @@ def query(url,
requests_lib=None,
ca_bundle=None,
verify_ssl=None,
text_out=None,
headers_out=None,
decode_out=None,
**kwargs):
'''
Query a resource, and decode the return data
@ -123,7 +127,10 @@ def query(url,
data = _render(
data_file, data_render, data_renderer, template_dict, opts
)
log.trace('POST Data: {0}'.format(pprint.pformat(data)))
log.debug('Using {0} Method'.format(method))
if method == 'POST':
log.trace('POST Data: {0}'.format(pprint.pformat(data)))
if header_file is not None:
header_tpl = _render(
@ -205,7 +212,7 @@ def query(url,
if url.startswith('https') or port == 443:
if not HAS_MATCHHOSTNAME:
log.warn(('match_hostname() not available, SSL hostname checking'
log.warn(('match_hostname() not available, SSL hostname checking '
'not available. THIS CONNECTION MAY NOT BE SECURE!'))
elif verify_ssl is False:
log.warn(('SSL certificate verification has been explicitly '
@ -243,6 +250,13 @@ def query(url,
result_headers = result.headers.headers
result_text = result.read()
if isinstance(result_headers, list):
result_headers_dict = {}
for header in result_headers:
comps = header.split(':')
result_headers_dict[comps[0].strip()] = ':'.join(comps[1:]).strip()
result_headers = result_headers_dict
log.debug('Response Status Code: {0}'.format(result_status_code))
log.trace('Response Headers: {0}'.format(result_headers))
log.trace('Response Cookies: {0}'.format(sess_cookies))
@ -252,6 +266,14 @@ def query(url,
log.trace(('Cannot Trace Log Response Text: {0}. This may be due to '
'incompatibilities between requests and logging.').format(exc))
if os.path.exists(text_out):
with salt.utils.fopen(text_out, 'w') as tof:
tof.write(result_text)
if os.path.exists(headers_out):
with salt.utils.fopen(headers_out, 'w') as hof:
hof.write(result_headers)
if cookies is not None:
sess_cookies.save()
@ -304,13 +326,17 @@ def query(url,
else:
text = True
if os.path.exists(decode_out):
with salt.utils.fopen(decode_out, 'w') as dof:
dof.write(result_text)
if text is True:
ret['text'] = result_text
return ret
def get_ca_bundle(opts):
def get_ca_bundle(opts=None):
'''
Return the location of the ca bundle file. See the following article:
@ -319,16 +345,28 @@ def get_ca_bundle(opts):
if hasattr(get_ca_bundle, '__return_value__'):
return get_ca_bundle.__return_value__
if opts is None:
opts = {}
opts_bundle = opts.get('ca_bundle', None)
if opts_bundle is not None and os.path.exists(opts_bundle):
return opts_bundle
file_roots = opts.get('file_roots', '/srv/salt')
# Please do not change the order without good reason
for path in (
'/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem',
# Check Salt first
os.path.join(file_roots, 'cacert.pem'),
os.path.join(file_roots, 'ca-bundle.crt'),
# Debian has paths that often exist on other distros
'/etc/ssl/certs/ca-certificates.crt',
# RedHat is also very common
'/etc/pki/tls/certs/ca-bundle.crt',
'/etc/pki/tls/certs/ca-bundle.trust.crt',
# RedHat's link for Debian compatability
'/etc/ssl/certs/ca-bundle.crt',
'/etc/ssl/certs/ca-certificates.crt',
# Suse has an unusual path
'/var/lib/ca-certificates/ca-bundle.pem',
):
if os.path.exists(path):
@ -337,6 +375,97 @@ def get_ca_bundle(opts):
return None
def update_ca_bundle(
target=None,
source=None,
opts=None,
merge_files=None,
):
'''
Attempt to update the CA bundle file from a URL
If not specified, the local location on disk (``target``) will be
auto-detected, if possible. If it is not found, then a new location on disk
will be created and updated.
The default ``source`` is:
http://curl.haxx.se/ca/cacert.pem
This is based on the information at:
http://curl.haxx.se/docs/caextract.html
A string or list of strings representing files to be appended to the end of
the CA bundle file may also be passed through as ``merge_files``.
'''
if opts is None:
opts = {}
file_roots = opts.get('file_roots', '/srv/salt')
if target is None:
target = get_ca_bundle(opts)
if target is None:
log.error('Unable to detect location to write CA bundle to')
return
if source is None:
source = opts.get('ca_bundle_url', 'http://curl.haxx.se/ca/cacert.pem')
log.debug('Attempting to download {0} to {1}'.format(source, target))
query(
source,
text=True,
decode=False,
headers=False,
status=False,
text_out=target
)
if merge_files is not None:
if isinstance(merge_files, string_types):
merge_files = [merge_files]
if not isinstance(merge_files, list):
log.error('A value was passed as merge_files which was not either '
'a string or a list')
return
merge_content = ''
for cert_file in merge_files:
if os.path.exists(cert_file):
log.debug(
'Queueing up {0} to be appended to {1}'.format(
cert_file, target
)
)
try:
with salt.utils.fopen(cert_file, 'r') as fcf:
merge_content = '\n'.join((merge_content, fcf.read()))
except IOError as exc:
log.error(
'Reading from {0} caused the following error: {1}'.format(
cert_file, exc
)
)
if merge_content:
log.debug('Appending merge_files to {0}'.format(target))
try:
with salt.utils.fopen(target, 'a') as tfp:
tfp.write('\n')
tfp.write(merge_content)
except IOError as exc:
log.error(
'Writing to {0} caused the following error: {1}'.format(
target, exc
)
)
def _render(template, render, renderer, template_dict, opts):
'''
Render a template

View File

@ -65,7 +65,7 @@ class SaltCacheLoader(BaseLoader):
self.opts = opts
self.saltenv = saltenv
self.encoding = encoding
if self.opts.get('__pillar', False):
if self.opts['file_roots'] is self.opts['pillar_roots']:
self.searchpath = opts['file_roots'][saltenv]
else:
self.searchpath = [path.join(opts['cachedir'], 'files', saltenv)]

View File

@ -380,16 +380,17 @@ class CkMinions(object):
matcher_args = ['@'.join(comps[1:])]
if comps[0] in ('G', 'P', 'I'):
matcher_args.append(delimiter)
matcher_args.append(True)
if not matcher:
# If an unknown matcher is called at any time, fail out
return []
if unmatched and unmatched[-1] == '-':
results.append(str(set(matcher('@'.join(comps[1:]), True))))
results.append(str(set(matcher(*matcher_args))))
results.append(')')
unmatched.pop()
else:
results.append(str(set(matcher('@'.join(comps[1:]), True))))
results.append(str(set(matcher(*matcher_args))))
elif match in opers:
# We didn't match a target, so append a boolean operator or
# subexpression

View File

@ -7,6 +7,7 @@ from __future__ import absolute_import
# Import python libs
import socket
import subprocess
import shlex
import re
import logging
import os
@ -1011,7 +1012,8 @@ def _freebsd_remotes_on(port, which_end):
remotes = set()
try:
data = subprocess.check_output(['sockstat', '-4', '-c', '-p {0}'.format(port)])
cmd = shlex.split('sockstat -4 -c -p {0}'.format(port))
data = subprocess.check_output(cmd)
except subprocess.CalledProcessError as ex:
log.error('Failed "sockstat" with returncode = {0}'.format(ex.returncode))
raise

View File

@ -19,8 +19,8 @@ Library for interacting with PagerDuty API
from __future__ import absolute_import
import json
import requests
import logging
import salt.utils.http
from salt.version import __version__
log = logging.getLogger(__name__)
@ -90,16 +90,18 @@ def query(method='GET', profile=None, url=None, path='api/v1',
else:
headers['Content-type'] = 'application/json'
result = requests.request(
method,
result = salt.utils.http.query(
url,
headers=headers,
method,
params=params,
header_dict=headers,
data=json.dumps(data),
verify=verify_ssl
decode=False,
text=True,
opts=opts,
)
return result.text
return result['text']
def list_items(action, key, profile=None, api_key=None, opts=None):

View File

@ -233,6 +233,7 @@ try:
_CRON_SUPPORTED = True
except ImportError:
_CRON_SUPPORTED = False
import yaml
# Import Salt libs
import salt.utils
@ -241,6 +242,7 @@ import salt.utils.process
from salt.utils.odict import OrderedDict
from salt.utils.process import os_is_running
import salt.payload
import salt.syspaths
log = logging.getLogger(__name__)
@ -260,6 +262,7 @@ class Schedule(object):
self.returners = returners
else:
self.returners = returners.loader.gen_functions()
self.time_offset = self.functions.get('timezone.get_offset', lambda: '0000')()
self.schedule_returner = self.option('schedule_returner')
# Keep track of the lowest loop interval needed in this variable
self.loop_interval = sys.maxint
@ -273,11 +276,24 @@ class Schedule(object):
return self.functions['config.merge'](opt, {}, omit_master=True)
return self.opts.get(opt, {})
def persist(self):
'''
Persist the modified schedule into <<configdir>>/minion.d/_schedule.conf
'''
schedule_conf = os.path.join(
salt.syspaths.CONFIG_DIR,
'minion.d',
'_schedule.conf')
try:
with salt.utils.fopen(schedule_conf, 'w+') as fp_:
fp_.write(yaml.dump({'schedule': self.opts['schedule']}))
except (IOError, OSError):
log.error('Failed to persist the updated schedule')
def delete_job(self, name, where=None):
'''
Deletes a job from the scheduler.
'''
if where is None or where != 'pillar':
# ensure job exists, then delete it
if name in self.opts['schedule']:
@ -314,6 +330,7 @@ class Schedule(object):
else:
log.info('Added new job {0} to scheduler'.format(new_job))
self.opts['schedule'].update(data)
self.persist()
def enable_job(self, name, where=None):
'''
@ -429,6 +446,8 @@ class Schedule(object):
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
ret['metadata']['_TOS'] = self.time_offset
ret['metadata']['_TS'] = time.ctime()
else:
log.warning('schedule: The metadata parameter must be '
'specified as a dictionary. Ignoring.')

View File

@ -1,4 +1,4 @@
rackspace-test:
provider: rackspace-config
size: 2 GB Performance
image: Ubuntu 14.04 LTS (Trusty Tahr)
image: Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)

View File

@ -39,7 +39,8 @@ class MinionTestCase(TestCase):
opts = {
'id': 'salt-testing',
'hash_type': 'sha512',
'sock_dir': os.path.join(salt.syspaths.SOCK_DIR, 'minion')
'sock_dir': os.path.join(salt.syspaths.SOCK_DIR, 'minion'),
'extension_modules': ''
}
with patch.dict(__opts__, opts):
testminion = minion.MinionBase(__opts__)

View File

@ -215,7 +215,7 @@ class CMDMODTestCase(TestCase):
'''
Tests end result when a command is not found
'''
ret = cmdmod._run('foo').get('stderr')
ret = cmdmod._run('foo', use_vt=True).get('stderr')
self.assertIn('foo', ret)
@patch('salt.utils.is_windows', MagicMock(return_value=True))

View File

@ -18,7 +18,8 @@ import salt.states.file as filestate
filestate.__env__ = 'base'
filestate.__salt__ = {'file.manage_file': False}
filestate.__opts__ = {'test': False}
filestate.__opts__ = {'test': False, 'cachedir': ''}
filestate.__instance_id__ = ''
@skipIf(NO_MOCK, NO_MOCK_REASON)

View File

@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Import python libs
import copy
import os
import tempfile
import json
@ -57,12 +58,26 @@ class MockFileClient(object):
class TestSaltCacheLoader(TestCase):
def __init__(self, *args, **kws):
TestCase.__init__(self, *args, **kws)
self.opts = {
'cachedir': TEMPLATES_DIR,
'file_roots': {
'test': [os.path.join(TEMPLATES_DIR, 'files', 'test')]
},
'pillar_roots': {
'test': [os.path.join(TEMPLATES_DIR, 'files', 'test')]
}
}
def test_searchpath(self):
'''
The searchpath is based on the cachedir option and the saltenv parameter
'''
tmp = tempfile.gettempdir()
loader = SaltCacheLoader({'cachedir': tmp}, saltenv='test')
opts = copy.deepcopy(self.opts)
opts.update({'cachedir': tmp})
loader = SaltCacheLoader(opts, saltenv='test')
assert loader.searchpath == [os.path.join(tmp, 'files', 'test')]
def test_mockclient(self):
@ -70,7 +85,7 @@ class TestSaltCacheLoader(TestCase):
A MockFileClient is used that records all file requests normally sent
to the master.
'''
loader = SaltCacheLoader({'cachedir': TEMPLATES_DIR}, 'test')
loader = SaltCacheLoader(self.opts, 'test')
fc = MockFileClient(loader)
res = loader.get_source(None, 'hello_simple')
assert len(res) == 3
@ -86,7 +101,7 @@ class TestSaltCacheLoader(TestCase):
'''
Setup a simple jinja test environment
'''
loader = SaltCacheLoader({'cachedir': TEMPLATES_DIR}, 'test')
loader = SaltCacheLoader(self.opts, 'test')
fc = MockFileClient(loader)
jinja = Environment(loader=loader)
return fc, jinja
@ -134,6 +149,9 @@ class TestGetTemplate(TestCase):
'file_roots': {
'test': [os.path.join(TEMPLATES_DIR, 'files', 'test')]
},
'pillar_roots': {
'test': [os.path.join(TEMPLATES_DIR, 'files', 'test')]
},
'fileserver_backend': ['roots'],
'hash_type': 'md5',
'extension_modules': os.path.join(
@ -178,7 +196,9 @@ class TestGetTemplate(TestCase):
filename = os.path.join(TEMPLATES_DIR, 'files', 'test', 'hello_import')
out = render_jinja_tmpl(
salt.utils.fopen(filename).read(),
dict(opts={'cachedir': TEMPLATES_DIR, 'file_client': 'remote'},
dict(opts={'cachedir': TEMPLATES_DIR, 'file_client': 'remote',
'file_roots': self.local_opts['file_roots'],
'pillar_roots': self.local_opts['pillar_roots']},
a='Hi', b='Salt', saltenv='test'))
self.assertEqual(out, 'Hey world !Hi Salt !\n')
self.assertEqual(fc.requests[0]['path'], 'salt://macro')
@ -267,7 +287,9 @@ class TestGetTemplate(TestCase):
filename = os.path.join(TEMPLATES_DIR, 'files', 'test', 'hello_import')
out = render_jinja_tmpl(
salt.utils.fopen(filename).read(),
dict(opts={'cachedir': TEMPLATES_DIR, 'file_client': 'remote'},
dict(opts={'cachedir': TEMPLATES_DIR, 'file_client': 'remote',
'file_roots': self.local_opts['file_roots'],
'pillar_roots': self.local_opts['pillar_roots']},
a='Hi', b='Sàlt', saltenv='test'))
self.assertEqual(out, u'Hey world !Hi Sàlt !\n')
self.assertEqual(fc.requests[0]['path'], 'salt://macro')
@ -278,7 +300,9 @@ class TestGetTemplate(TestCase):
filename = os.path.join(TEMPLATES_DIR, 'files', 'test', 'non_ascii')
out = render_jinja_tmpl(
salt.utils.fopen(filename).read(),
dict(opts={'cachedir': TEMPLATES_DIR, 'file_client': 'remote'},
dict(opts={'cachedir': TEMPLATES_DIR, 'file_client': 'remote',
'file_roots': self.local_opts['file_roots'],
'pillar_roots': self.local_opts['pillar_roots']},
a='Hi', b='Sàlt', saltenv='test'))
self.assertEqual(u'Assunção\n', out)
self.assertEqual(fc.requests[0]['path'], 'salt://macro')