Merge branch 'develop' into develop

This commit is contained in:
rares-pop 2017-08-16 10:52:48 +03:00 committed by GitHub
commit 1bbd1565d1
1061 changed files with 21979 additions and 14079 deletions

4
.github/stale.yml vendored
View File

@ -1,8 +1,8 @@
# Probot Stale configuration file
# Number of days of inactivity before an issue becomes stale
# 1130 is approximately 3 years and 1 month
daysUntilStale: 1130
# 1115 is approximately 3 years and 1 month
daysUntilStale: 1115
# Number of days of inactivity before a stale issue is closed
daysUntilClose: 7

View File

@ -1,4 +1,11 @@
{
"alwaysNotifyForPaths": [
{
"name": "ryan-lane",
"files": ["salt/**/*boto*.py"],
"skipTeamPrs": false
}
],
"skipTitle": "Merge forward",
"userBlacklist": ["cvrebert", "markusgattol", "olliewalsh"]
}

View File

@ -245,8 +245,8 @@ on_saltstack = 'SALT_ON_SALTSTACK' in os.environ
project = 'Salt'
version = salt.version.__version__
latest_release = '2017.7.0' # latest release
previous_release = '2016.11.6' # latest release from previous branch
latest_release = '2017.7.1' # latest release
previous_release = '2016.11.7' # latest release from previous branch
previous_release_dir = '2016.11' # path on web server for previous branch
next_release = '' # next release
next_release_dir = '' # path on web server for next release branch
@ -258,8 +258,8 @@ if on_saltstack:
copyright = time.strftime("%Y")
# < --- START do not merge these settings to other branches START ---> #
build_type = 'latest' # latest, previous, develop, next
release = latest_release # version, latest_release, previous_release
build_type = 'develop' # latest, previous, develop, next
release = version # version, latest_release, previous_release
# < --- END do not merge these settings to other branches END ---> #
# Set google custom search engine

View File

@ -19,5 +19,4 @@ auth modules
pki
rest
sharedsecret
stormpath
yubico

View File

@ -1,6 +0,0 @@
===================
salt.auth.stormpath
===================
.. automodule:: salt.auth.stormpath
:members:

View File

@ -33,6 +33,10 @@ Output Options
Write the output to the specified file.
.. option:: --out-file-append, --output-file-append
Append the output to the specified file.
.. option:: --no-color
Disable all colored output
@ -46,3 +50,14 @@ Output Options
``green`` denotes success, ``red`` denotes failure, ``blue`` denotes
changes and success and ``yellow`` denotes a expected future change in configuration.
.. option:: --state-output=STATE_OUTPUT, --state_output=STATE_OUTPUT
Override the configured state_output value for minion
output. One of 'full', 'terse', 'mixed', 'changes' or
'filter'. Default: 'none'.
.. option:: --state-verbose=STATE_VERBOSE, --state_verbose=STATE_VERBOSE
Override the configured state_verbose value for minion
output. Set to True or False. Default: none.

View File

@ -81,7 +81,7 @@ Options
Pass in an external authentication medium to validate against. The
credentials will be prompted for. The options are `auto`,
`keystone`, `ldap`, `pam`, and `stormpath`. Can be used with the -T
`keystone`, `ldap`, and `pam`. Can be used with the -T
option.
.. option:: -T, --make-token

View File

@ -97,6 +97,7 @@ execution modules
cytest
daemontools
data
datadog_api
ddns
deb_apache
deb_postgres
@ -399,7 +400,6 @@ execution modules
state
status
statuspage
stormpath
supervisord
suse_apache
svn

View File

@ -0,0 +1,6 @@
========================
salt.modules.datadog_api
========================
.. automodule:: salt.modules.datadog_api
:members:

View File

@ -1,6 +0,0 @@
======================
salt.modules.stormpath
======================
.. automodule:: salt.modules.stormpath
:members:

View File

@ -3,4 +3,5 @@ salt.modules.test
=================
.. automodule:: salt.modules.test
:members:
:members:
:exclude-members: rand_str

View File

@ -429,7 +429,7 @@ similar to the following:
Confine this module to Mac OS with Homebrew.
'''
if salt.utils.which('brew') and __grains__['os'] == 'MacOS':
if salt.utils.path.which('brew') and __grains__['os'] == 'MacOS':
return __virtualname__
return False

View File

@ -146,8 +146,10 @@ Here is a simple YAML renderer example:
import yaml
from salt.utils.yamlloader import SaltYamlSafeLoader
from salt.ext import six
def render(yaml_data, saltenv='', sls='', **kws):
if not isinstance(yaml_data, basestring):
if not isinstance(yaml_data, six.string_types):
yaml_data = yaml_data.read()
data = yaml.load(
yaml_data,

View File

@ -250,7 +250,6 @@ state modules
stateconf
status
statuspage
stormpath_account
supervisord
svn
sysctl

View File

@ -1,6 +1,6 @@
=======================
salt.modules.kubernetes
=======================
======================
salt.states.kubernetes
======================
.. automodule:: salt.modules.kubernetes
.. automodule:: salt.states.kubernetes
:members:

View File

@ -1,6 +0,0 @@
=============================
salt.states.stormpath_account
=============================
.. automodule:: salt.states.stormpath_account
:members:

View File

@ -135,19 +135,23 @@ A State Module must return a dict containing the following keys/values:
``test=True``, and changes would have been made if the state was not run in
test mode.
+--------------------+-----------+-----------+
| | live mode | test mode |
+====================+===========+===========+
| no changes | ``True`` | ``True`` |
+--------------------+-----------+-----------+
| successful changes | ``True`` | ``None`` |
+--------------------+-----------+-----------+
| failed changes | ``False`` | ``None`` |
+--------------------+-----------+-----------+
+--------------------+-----------+------------------------+
| | live mode | test mode |
+====================+===========+========================+
| no changes | ``True`` | ``True`` |
+--------------------+-----------+------------------------+
| successful changes | ``True`` | ``None`` |
+--------------------+-----------+------------------------+
| failed changes | ``False`` | ``False`` or ``None`` |
+--------------------+-----------+------------------------+
.. note::
Test mode does not predict if the changes will be successful or not.
Test mode does not predict if the changes will be successful or not,
and hence the result for pending changes is usually ``None``.
However, if a state is going to fail and this can be determined
in test mode without applying the change, ``False`` can be returned.
- **comment:** A string containing a summary of the result.

View File

@ -777,8 +777,6 @@ Stateconf
stderr
stdin
stdout
stormpath
Stormpath
str
strftime
subfolder

View File

@ -146,24 +146,24 @@ library. The following two lines set up the imports:
.. code-block:: python
from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401
from salt.utils import namespaced_function
import salt.utils
And then a series of declarations will make the necessary functions available
within the cloud module.
.. code-block:: python
get_size = namespaced_function(get_size, globals())
get_image = namespaced_function(get_image, globals())
avail_locations = namespaced_function(avail_locations, globals())
avail_images = namespaced_function(avail_images, globals())
avail_sizes = namespaced_function(avail_sizes, globals())
script = namespaced_function(script, globals())
destroy = namespaced_function(destroy, globals())
list_nodes = namespaced_function(list_nodes, globals())
list_nodes_full = namespaced_function(list_nodes_full, globals())
list_nodes_select = namespaced_function(list_nodes_select, globals())
show_instance = namespaced_function(show_instance, globals())
get_size = salt.utils.namespaced_function(get_size, globals())
get_image = salt.utils.namespaced_function(get_image, globals())
avail_locations = salt.utils.namespaced_function(avail_locations, globals())
avail_images = salt.utils.namespaced_function(avail_images, globals())
avail_sizes = salt.utils.namespaced_function(avail_sizes, globals())
script = salt.utils.namespaced_function(script, globals())
destroy = salt.utils.namespaced_function(destroy, globals())
list_nodes = salt.utils.namespaced_function(list_nodes, globals())
list_nodes_full = salt.utils.namespaced_function(list_nodes_full, globals())
list_nodes_select = salt.utils.namespaced_function(list_nodes_select, globals())
show_instance = salt.utils.namespaced_function(show_instance, globals())
If necessary, these functions may be replaced by removing the appropriate
declaration line, and then adding the function as normal.

View File

@ -49,7 +49,7 @@ Set up an initial profile at ``/etc/salt/cloud.profiles`` or in the
.. code-block:: yaml
joyent_512
joyent_512:
provider: my-joyent-config
size: g4-highcpu-512M
image: ubuntu-16.04

View File

@ -18,10 +18,10 @@ on the significance and complexity of the changes required by the user.
Salt feature releases are based on the Periodic Table. Any new features going
into the develop branch will be named after the next element in the Periodic
Table. For example, Beryllium was the feature release name of the develop branch
before the 2015.8 branch was tagged. At that point in time, any new features going
into the develop branch after 2015.8 was branched were part of the Boron feature
release.
Table. For example, Beryllium was the feature release name of the develop
branch before the 2015.8 branch was tagged. At that point in time, any new
features going into the develop branch after 2015.8 was branched were part of
the Boron feature release.
A deprecation warning should be in place for at least two major releases before
the deprecated code and its accompanying deprecation warning are removed. More
@ -29,14 +29,14 @@ time should be given for more complex changes. For example, if the current
release under development is ``Sodium``, the deprecated code and associated
warnings should remain in place and warn for at least ``Aluminum``.
To help in this deprecation task, salt provides :func:`salt.utils.warn_until
<salt.utils.warn_until>`. The idea behind this helper function is to show the
deprecation warning to the user until salt reaches the provided version. Once
that provided version is equaled :func:`salt.utils.warn_until
<salt.utils.warn_until>` will raise a :py:exc:`RuntimeError` making salt stop
its execution. This stoppage is unpleasant and will remind the developer that
the deprecation limit has been reached and that the code can then be safely
removed.
To help in this deprecation task, salt provides
:func:`salt.utils.versions.warn_until <salt.utils.versions.warn_until>`. The
idea behind this helper function is to show the deprecation warning to the user
until salt reaches the provided version. Once that provided version is equaled
:func:`salt.utils.versions.warn_until <salt.utils.versions.warn_until>` will
raise a :py:exc:`RuntimeError` making salt stop its execution. This stoppage is
unpleasant and will remind the developer that the deprecation limit has been
reached and that the code can then be safely removed.
Consider the following example:
@ -44,7 +44,7 @@ Consider the following example:
def some_function(bar=False, foo=None):
if foo is not None:
salt.utils.warn_until(
salt.utils.versions.warn_until(
'Aluminum',
'The \'foo\' argument has been deprecated and its '
'functionality removed, as such, its usage is no longer '

View File

@ -0,0 +1,154 @@
=========================================
Arista EOS Salt minion installation guide
=========================================
The Salt minion for Arista EOS is distributed as a SWIX extension and can be installed directly on the switch. The EOS network operating system is based on old Fedora distributions and the installation of the ``salt-minion`` requires backports. This SWIX extension contains the necessary backports, together with the Salt basecode.
.. note::
This SWIX extension has been tested on Arista DCS-7280SE-68-R, running EOS 4.17.5M and vEOS 4.18.3F.
Important Notes
===============
This package is in beta, make sure to test it carefully before running it in production.
If confirmed working correctly, please report and add a note on this page with the platform model and EOS version.
If you want to uninstall this package, please refer to the uninstalling_ section.
Installation from the Official SaltStack Repository
===================================================
Download the swix package and save it to flash.
.. code-block:: bash
veos#copy https://salt-eos.netops.life/salt-eos-latest.swix flash:
veos#copy https://salt-eos.netops.life/startup.sh flash:
Install the Extension
=====================
Copy the Salt package to extension
.. code-block:: bash
veos#copy flash:salt-eos-latest.swix extension:
Install the SWIX
.. code-block:: bash
veos#extension salt-eos-latest.swix force
Verify the installation
.. code-block:: bash
veos#show extensions | include salt-eos
salt-eos-2017-07-19.swix 1.0.11/1.fc25 A, F 27
Change the Salt master IP address or FQDN, by edit the variable (SALT_MASTER)
.. code-block:: bash
veos#bash vi /mnt/flash/startup.sh
Make sure you enable the eAPI with unix-socket
.. code-block:: bash
veos(config)#management api http-commands
protocol unix-socket
no shutdown
Post-installation tasks
=======================
Generate Keys and host record and start Salt minion
.. code-block:: bash
veos#bash
#sudo /mnt/flash/startup.sh
``salt-minion`` should be running
Copy the installed extensions to boot-extensions
.. code-block:: bash
veos#copy installed-extensions boot-extensions
Apply event-handler to let EOS start salt-minion during boot-up
.. code-block:: bash
veos(config)#event-handler boot-up-script
trigger on-boot
action bash sudo /mnt/flash/startup.sh
For more specific installation details of the ``salt-minion``, please refer to :ref:`Configuring Salt<configuring-salt>`.
.. _uninstalling:
Uninstalling
============
If you decide to uninstall this package, the following steps are recommended for safety:
1. Remove the extension from boot-extensions
.. code-block:: bash
veos#bash rm /mnt/flash/boot-extensions
2. Remove the extension from extensions folder
.. code-block:: bash
veos#bash rm /mnt/flash/.extensions/salt-eos-latest.swix
2. Remove boot-up script
.. code-block:: bash
veos(config)#no event-handler boot-up-script
Additional Information
======================
This SWIX extension contains the following RPM packages:
.. code-block:: text
libsodium-1.0.11-1.fc25.i686.rpm
libstdc++-6.2.1-2.fc25.i686.rpm
openpgm-5.2.122-6.fc24.i686.rpm
python-Jinja2-2.8-0.i686.rpm
python-PyYAML-3.12-0.i686.rpm
python-babel-0.9.6-5.fc18.noarch.rpm
python-backports-1.0-3.fc18.i686.rpm
python-backports-ssl_match_hostname-3.4.0.2-1.fc18.noarch.rpm
python-backports_abc-0.5-0.i686.rpm
python-certifi-2016.9.26-0.i686.rpm
python-chardet-2.0.1-5.fc18.noarch.rpm
python-crypto-1.4.1-1.noarch.rpm
python-crypto-2.6.1-1.fc18.i686.rpm
python-futures-3.1.1-1.noarch.rpm
python-jtextfsm-0.3.1-0.noarch.rpm
python-kitchen-1.1.1-2.fc18.noarch.rpm
python-markupsafe-0.18-1.fc18.i686.rpm
python-msgpack-python-0.4.8-0.i686.rpm
python-napalm-base-0.24.3-1.noarch.rpm
python-napalm-eos-0.6.0-1.noarch.rpm
python-netaddr-0.7.18-0.noarch.rpm
python-pyeapi-0.7.0-0.noarch.rpm
python-salt-2017.7.0_1414_g2fb986f-1.noarch.rpm
python-singledispatch-3.4.0.3-0.i686.rpm
python-six-1.10.0-0.i686.rpm
python-tornado-4.4.2-0.i686.rpm
python-urllib3-1.5-7.fc18.noarch.rpm
python2-zmq-15.3.0-2.fc25.i686.rpm
zeromq-4.1.4-5.fc25.i686.rpm

View File

@ -46,6 +46,7 @@ These guides go into detail how to install Salt on a given platform.
arch
debian
eos
fedora
freebsd
gentoo

View File

@ -828,12 +828,14 @@ Returns:
08.03.2017 17:00
.. jinja_ref:: str_to_num
.. jinja_ref:: to_num
``str_to_num``
--------------
``to_num``
----------
.. versionadded:: 2017.7.0
.. versionadded:: Oxygen
Renamed from ``str_to_num`` to ``to_num``.
Converts a string to its numerical value.
@ -841,7 +843,7 @@ Example:
.. code-block:: jinja
{{ '5' | str_to_num }}
{{ '5' | to_num }}
Returns:
@ -917,10 +919,10 @@ Returns:
{'a': 'b'}
.. jinja_ref:: rand_str
.. jinja_ref:: random_hash
``rand_str``
------------
``random_hash``
---------------
.. versionadded:: 2017.7.0
.. versionadded:: Oxygen
@ -937,8 +939,8 @@ Example:
.. code-block:: jinja
{% set num_range = 99999999 %}
{{ num_range | rand_str }}
{{ num_range | rand_str('sha512') }}
{{ num_range | random_hash }}
{{ num_range | random_hash('sha512') }}
Returns:

View File

@ -89,7 +89,7 @@ they are being loaded for the correct proxytype, example below:
Only work on proxy
'''
try:
if salt.utils.is_proxy() and \
if salt.utils.platform.is_proxy() and \
__opts__['proxy']['proxytype'] == 'ssh_sample':
return __virtualname__
except KeyError:
@ -156,20 +156,23 @@ will need to be restarted to pick up any changes. A corresponding utility funct
``saltutil.sync_proxymodules``, has been added to sync these modules to minions.
In addition, a salt.utils helper function called `is_proxy()` was added to make
it easier to tell when the running minion is a proxy minion.
it easier to tell when the running minion is a proxy minion. **NOTE: This
function was renamed to salt.utils.platform.is_proxy() for the Oxygen release**
New in 2015.8
-------------
Starting with the 2015.8 release of Salt, proxy processes are no longer forked off from a controlling minion.
Instead, they have their own script ``salt-proxy`` which takes mostly the same arguments that the
standard Salt minion does with the addition of ``--proxyid``. This is the id that the salt-proxy will
use to identify itself to the master. Proxy configurations are still best kept in Pillar and their format
has not changed.
Starting with the 2015.8 release of Salt, proxy processes are no longer forked
off from a controlling minion. Instead, they have their own script
``salt-proxy`` which takes mostly the same arguments that the standard Salt
minion does with the addition of ``--proxyid``. This is the id that the
salt-proxy will use to identify itself to the master. Proxy configurations are
still best kept in Pillar and their format has not changed.
This change allows for better process control and logging. Proxy processes can now be listed with standard
process management utilities (``ps`` from the command line). Also, a full Salt minion is no longer
required (though it is still strongly recommended) on machines hosting proxies.
This change allows for better process control and logging. Proxy processes can
now be listed with standard process management utilities (``ps`` from the
command line). Also, a full Salt minion is no longer required (though it is
still strongly recommended) on machines hosting proxies.
Getting Started
@ -619,9 +622,10 @@ in the proxymodule itself. This might be useful if a proxymodule author wants t
all the code for the proxy interface in the same place instead of splitting it between
the proxy and grains directories.
This function will only be called automatically if the configuration variable ``proxy_merge_grains_in_module``
is set to True in the proxy configuration file (default ``/etc/salt/proxy``). This
variable defaults to ``True`` in the release code-named *2017.7.0*.
This function will only be called automatically if the configuration variable
``proxy_merge_grains_in_module`` is set to True in the proxy configuration file
(default ``/etc/salt/proxy``). This variable defaults to ``True`` in the
release code-named *2017.7.0*.
.. code: python::
@ -640,7 +644,7 @@ variable defaults to ``True`` in the release code-named *2017.7.0*.
def __virtual__():
try:
if salt.utils.is_proxy() and __opts__['proxy']['proxytype'] == 'rest_sample':
if salt.utils.platform.is_proxy() and __opts__['proxy']['proxytype'] == 'rest_sample':
return __virtualname__
except KeyError:
pass
@ -708,7 +712,7 @@ Example from ``salt/grains/rest_sample.py``:
def __virtual__():
try:
if salt.utils.is_proxy() and __opts__['proxy']['proxytype'] == 'rest_sample':
if salt.utils.platform.is_proxy() and __opts__['proxy']['proxytype'] == 'rest_sample':
return __virtualname__
except KeyError:
pass

View File

@ -175,6 +175,10 @@ they are being loaded for the correct proxytype, example below:
return False
.. note::
``salt.utils.is_proxy()`` has been renamed to
``salt.utils.platform.is_proxy`` as of the Oxygen release.
The try/except block above exists because grains are processed very early
in the proxy minion startup process, sometimes earlier than the proxy
key in the ``__opts__`` dictionary is populated.

View File

@ -38,6 +38,14 @@ In previous releases the bind credentials would only be used to determine
the LDAP user's existence and group membership. The user's LDAP credentials
were used from then on.
Stormpath External Authentication Removed
-----------------------------------------
Per Stormpath's announcement, their API will be shutting down on 8/17/2017 at
noon PST so the Stormpath external authentication module has been removed.
https://stormpath.com/oktaplusstormpath
New GitFS Features
------------------
@ -63,10 +71,10 @@ environments (i.e. ``saltenvs``) have been added:
available as saltenvs.
Salt Cloud Features
===================
-------------------
Pre-Flight Commands
-------------------
===================
Support has been added for specified "preflight commands" to run on a VM before
the deploy script is run. These must be defined as a list in a cloud configuration
@ -98,23 +106,514 @@ running on T-Series SPARC hardware. The ``virtual_subtype`` grain is
populated as a list of domain roles.
Beacon configuration changes
----------------------------------------
In order to remain consistent and to align with other Salt components such as states,
support for configuring beacons using dictionary based configuration has been deprecated
in favor of list based configuration. All beacons have a validation function which will
check the configuration for the correct format and only load if the validation passes.
- ``avahi_announce`` beacon
Old behavior:
```
beacons:
avahi_announce:
run_once: True
servicetype: _demo._tcp
port: 1234
txt:
ProdName: grains.productname
SerialNo: grains.serialnumber
Comments: 'this is a test'
```
New behavior:
```
beacons:
avahi_announce:
- run_once: True
- servicetype: _demo._tcp
- port: 1234
- txt:
ProdName: grains.productname
SerialNo: grains.serialnumber
Comments: 'this is a test'
```
- ``bonjour_announce`` beacon
Old behavior:
```
beacons:
bonjour_announce:
run_once: True
servicetype: _demo._tcp
port: 1234
txt:
ProdName: grains.productname
SerialNo: grains.serialnumber
Comments: 'this is a test'
```
New behavior:
```
beacons:
bonjour_announce:
- run_once: True
- servicetype: _demo._tcp
- port: 1234
- txt:
ProdName: grains.productname
SerialNo: grains.serialnumber
Comments: 'this is a test'
```
- ``btmp`` beacon
Old behavior:
```
beacons:
btmp: {}
```
New behavior:
```
beacons:
btmp: []
```
- ``glxinfo`` beacon
Old behavior:
```
beacons:
glxinfo:
user: frank
screen_event: True
```
New behavior:
```
beacons:
glxinfo:
- user: frank
- screen_event: True
```
- ``haproxy`` beacon
Old behavior:
```
beacons:
haproxy:
- www-backend:
threshold: 45
servers:
- web1
- web2
- interval: 120
```
New behavior:
```
beacons:
haproxy:
- backends:
www-backend:
threshold: 45
servers:
- web1
- web2
- interval: 120
```
- ``inotify`` beacon
Old behavior:
```
beacons:
inotify:
/path/to/file/or/dir:
mask:
- open
- create
- close_write
recurse: True
auto_add: True
exclude:
- /path/to/file/or/dir/exclude1
- /path/to/file/or/dir/exclude2
- /path/to/file/or/dir/regex[a-m]*$:
regex: True
coalesce: True
```
New behavior:
```
beacons:
inotify:
- files:
/path/to/file/or/dir:
mask:
- open
- create
- close_write
recurse: True
auto_add: True
exclude:
- /path/to/file/or/dir/exclude1
- /path/to/file/or/dir/exclude2
- /path/to/file/or/dir/regex[a-m]*$:
regex: True
- coalesce: True
```
- ``journald`` beacon
Old behavior:
```
beacons:
journald:
sshd:
SYSLOG_IDENTIFIER: sshd
PRIORITY: 6
```
New behavior:
```
beacons:
journald:
- services:
sshd:
SYSLOG_IDENTIFIER: sshd
PRIORITY: 6
```
- ``load`` beacon
Old behavior:
```
beacons:
load:
1m:
- 0.0
- 2.0
5m:
- 0.0
- 1.5
15m:
- 0.1
- 1.0
emitatstartup: True
onchangeonly: False
```
New behavior:
```
beacons:
load:
- averages:
1m:
- 0.0
- 2.0
5m:
- 0.0
- 1.5
15m:
- 0.1
- 1.0
- emitatstartup: True
- onchangeonly: False
```
- ``log`` beacon
Old behavior:
```
beacons:
log:
file: <path>
<tag>:
regex: <pattern>
```
New behavior:
```
beacons:
log:
- file: <path>
- tags:
<tag>:
regex: <pattern>
```
- ``network_info`` beacon
Old behavior:
```
beacons:
network_info:
- eth0:
type: equal
bytes_sent: 100000
bytes_recv: 100000
packets_sent: 100000
packets_recv: 100000
errin: 100
errout: 100
dropin: 100
dropout: 100
```
New behavior:
```
beacons:
network_info:
- interfaces:
eth0:
type: equal
bytes_sent: 100000
bytes_recv: 100000
packets_sent: 100000
packets_recv: 100000
errin: 100
errout: 100
dropin: 100
dropout: 100
```
- ``network_settings`` beacon
Old behavior:
```
beacons:
network_settings:
eth0:
ipaddr:
promiscuity:
onvalue: 1
eth1:
linkmode:
```
New behavior:
```
beacons:
network_settings:
- interfaces:
- eth0:
ipaddr:
promiscuity:
onvalue: 1
- eth1:
linkmode:
```
- ``proxy_example`` beacon
Old behavior:
```
beacons:
proxy_example:
endpoint: beacon
```
New behavior:
```
beacons:
proxy_example:
- endpoint: beacon
```
- ``ps`` beacon
Old behavior:
```
beacons:
ps:
- salt-master: running
- mysql: stopped
```
New behavior:
```
beacons:
ps:
- processes:
salt-master: running
mysql: stopped
```
- ``salt_proxy`` beacon
Old behavior:
```
beacons:
salt_proxy:
- p8000: {}
- p8001: {}
```
New behavior:
```
beacons:
salt_proxy:
- proxies:
p8000: {}
p8001: {}
```
- ``sensehat`` beacon
Old behavior:
```
beacons:
sensehat:
humidity: 70%
temperature: [20, 40]
temperature_from_pressure: 40
pressure: 1500
```
New behavior:
```
beacons:
sensehat:
- sensors:
humidity: 70%
temperature: [20, 40]
temperature_from_pressure: 40
pressure: 1500
```
- ``service`` beacon
Old behavior:
```
beacons:
service:
salt-master:
mysql:
```
New behavior:
```
beacons:
service:
- services:
nginx:
onchangeonly: True
delay: 30
uncleanshutdown: /run/nginx.pid
```
- ``sh`` beacon
Old behavior:
```
beacons:
sh: {}
```
New behavior:
```
beacons:
sh: []
```
- ``status`` beacon
Old behavior:
```
beacons:
status: {}
```
New behavior:
```
beacons:
status: []
```
- ``telegram_bot_msg`` beacon
Old behavior:
```
beacons:
telegram_bot_msg:
token: "<bot access token>"
accept_from:
- "<valid username>"
interval: 10
```
New behavior:
```
beacons:
telegram_bot_msg:
- token: "<bot access token>"
- accept_from:
- "<valid username>"
- interval: 10
```
- ``twilio_txt_msg`` beacon
Old behavior:
```
beacons:
twilio_txt_msg:
account_sid: "<account sid>"
auth_token: "<auth token>"
twilio_number: "+15555555555"
interval: 10
```
New behavior:
```
beacons:
twilio_txt_msg:
- account_sid: "<account sid>"
- auth_token: "<auth token>"
- twilio_number: "+15555555555"
- interval: 10
```
- ``wtmp`` beacon
Old behavior:
```
beacons:
wtmp: {}
```
New behavior:
```
beacons:
wtmp: []
```
Deprecations
============
------------
Configuration Option Deprecations
---------------------------------
=================================
- The ``requests_lib`` configuration option has been removed. Please use
``backend`` instead.
Profitbricks Cloud Updated Dependency
-------------------------------------
=====================================
The minimum version of the `profitbrick` python package for the `profitbricks`
The minimum version of the ``profitbrick`` python package for the ``profitbricks``
cloud driver has changed from 3.0.0 to 3.1.0.
Module Deprecations
-------------------
===================
The ``blockdev`` execution module has been removed. Its functions were merged
with the ``disk`` module. Please use the ``disk`` execution module instead.
@ -154,7 +653,7 @@ The ``win_service`` module had the following changes:
``service_type`` instead.
Runner Deprecations
-------------------
===================
The ``manage`` runner had the following changes:
@ -162,7 +661,7 @@ The ``manage`` runner had the following changes:
use ``salt-ssh`` roster entries for the host instead.
State Deprecations
------------------
==================
The ``archive`` state had the following changes:
@ -185,7 +684,7 @@ The ``file`` state had the following changes:
- The ``show_diff`` option was removed. Please use ``show_changes`` instead.
Grain Deprecations
------------------
==================
For ``smartos`` some grains have been deprecated. These grains will be removed in Neon.
@ -205,7 +704,7 @@ as well, by using special grains keys ``minion_blackout`` and
``minion_blackout_whitelist``.
Utils Deprecations
------------------
==================
The ``salt.utils.cloud.py`` file had the following change:
@ -213,7 +712,7 @@ The ``salt.utils.cloud.py`` file had the following change:
optional.
Other Miscellaneous Deprecations
--------------------------------
================================
The ``version.py`` file had the following changes:

View File

@ -166,13 +166,15 @@ Ubuntu 14.04 LTS and Debian Wheezy (7.x) also have a compatible version packaged
# apt-get install python-git
If your master is running an older version (such as Ubuntu 12.04 LTS or Debian
Squeeze), then you will need to install GitPython using either pip_ or
easy_install (it is recommended to use pip). Version 0.3.2.RC1 is now marked as
the stable release in PyPI, so it should be a simple matter of running ``pip
install GitPython`` (or ``easy_install GitPython``) as root.
GitPython_ requires the ``git`` CLI utility to work. If installed from a system
package, then git should already be installed, but if installed via pip_ then
it may still be necessary to install git separately. For MacOS users,
GitPython_ comes bundled in with the Salt installer, but git must still be
installed for it to work properly. Git can be installed in several ways,
including by installing XCode_.
.. _`pip`: http://www.pip-installer.org/
.. _pip: http://www.pip-installer.org/
.. _XCode: https://developer.apple.com/xcode/
.. warning::

View File

@ -110,7 +110,7 @@ To pass through a file that contains jinja + yaml templating (the default):
method='POST',
data_file='/srv/salt/somefile.jinja',
data_render=True,
template_data={'key1': 'value1', 'key2': 'value2'}
template_dict={'key1': 'value1', 'key2': 'value2'}
)
To pass through a file that contains mako templating:
@ -123,7 +123,7 @@ To pass through a file that contains mako templating:
data_file='/srv/salt/somefile.mako',
data_render=True,
data_renderer='mako',
template_data={'key1': 'value1', 'key2': 'value2'}
template_dict={'key1': 'value1', 'key2': 'value2'}
)
Because this function uses Salt's own rendering system, any Salt renderer can
@ -140,7 +140,7 @@ However, this can be changed to ``master`` if necessary.
method='POST',
data_file='/srv/salt/somefile.jinja',
data_render=True,
template_data={'key1': 'value1', 'key2': 'value2'},
template_dict={'key1': 'value1', 'key2': 'value2'},
opts=__opts__
)
@ -149,7 +149,7 @@ However, this can be changed to ``master`` if necessary.
method='POST',
data_file='/srv/salt/somefile.jinja',
data_render=True,
template_data={'key1': 'value1', 'key2': 'value2'},
template_dict={'key1': 'value1', 'key2': 'value2'},
node='master'
)
@ -170,11 +170,11 @@ a Python dict.
header_file='/srv/salt/headers.jinja',
header_render=True,
header_renderer='jinja',
template_data={'key1': 'value1', 'key2': 'value2'}
template_dict={'key1': 'value1', 'key2': 'value2'}
)
Because much of the data that would be templated between headers and data may be
the same, the ``template_data`` is the same for both. Correcting possible
the same, the ``template_dict`` is the same for both. Correcting possible
variable name collisions is up to the user.
Authentication

View File

@ -28,9 +28,8 @@ Tutorials Index
* :ref:`States tutorial, part 3 - Templating, Includes, Extends <tutorial-states-part-3>`
* :ref:`States tutorial, part 4 <tutorial-states-part-4>`
* :ref:`How to Convert Jinja Logic to an Execution Module <tutorial-jinja_to_execution-module>`
* :ref:`Using Salt with Stormpath <tutorial-stormpath>`
* :ref:`Syslog-ng usage <syslog-ng-sate-usage>`
* :ref:`The macOS (Maverick) Developer Step By Step Guide To Salt Installation <tutorial-macos-walk-through>`
* :ref:`SaltStack Walk-through <tutorial-salt-walk-through>`
* :ref:`Writing Salt Tests <tutorial-salt-testing>`
* :ref:`Multi-cloud orchestration with Apache Libcloud <tutorial-libcloud>`
* :ref:`Multi-cloud orchestration with Apache Libcloud <tutorial-libcloud>`

View File

@ -1,198 +0,0 @@
.. _tutorial-stormpath:
=========================
Using Salt with Stormpath
=========================
`Stormpath <https://stormpath.com/>`_ is a user management and authentication
service. This tutorial covers using SaltStack to manage and take advantage of
Stormpath's features.
External Authentication
-----------------------
Stormpath can be used for Salt's external authentication system. In order to do
this, the master should be configured with an ``apiid``, ``apikey``, and the ID
of the ``application`` that is associated with the users to be authenticated:
.. code-block:: yaml
stormpath:
apiid: 367DFSF4FRJ8767FSF4G34FGH
apikey: FEFREF43t3FEFRe/f323fwer4FWF3445gferWRWEer1
application: 786786FREFrefreg435fr1
.. note::
These values can be found in the `Stormpath dashboard
<https://api.stormpath.com/ui2/index.html#/>`_`.
Users that are to be authenticated should be set up under the ``stormpath``
dict under ``external_auth``:
.. code-block:: yaml
external_auth:
stormpath:
larry:
- .*
- '@runner'
- '@wheel'
Keep in mind that while Stormpath defaults the username associated with the
account to the email address, it is better to use a username without an ``@``
sign in it.
Configuring Stormpath Modules
-----------------------------
Stormpath accounts can be managed via either an execution or state module. In
order to use either, a minion must be configured with an API ID and key.
.. code-block:: yaml
stormpath:
apiid: 367DFSF4FRJ8767FSF4G34FGH
apikey: FEFREF43t3FEFRe/f323fwer4FWF3445gferWRWEer1
directory: efreg435fr1786786FREFr
application: 786786FREFrefreg435fr1
Some functions in the ``stormpath`` modules can make use of other options. The
following options are also available.
directory
`````````
The ID of the directory that is to be used with this minion. Many functions
require an ID to be specified to do their work. However, if the ID of a
``directory`` is specified, then Salt can often look up the resource in
question.
application
```````````
The ID of the application that is to be used with this minion. Many functions
require an ID to be specified to do their work. However, if the ID of a
``application`` is specified, then Salt can often look up the resource in
question.
Managing Stormpath Accounts
---------------------------
With the ``stormpath`` configuration in place, Salt can be used to configure
accounts (which may be thought of as users) on the Stormpath service. The
following functions are available.
stormpath.create_account
````````````````````````
Create an account on the Stormpath service. This requires a ``directory_id`` as
the first argument; it will not be retrieved from the minion configuration. An
``email`` address, ``password``, first name (``givenName``) and last name
(``surname``) are also required. For the full list of other parameters that may
be specified, see:
http://docs.stormpath.com/rest/product-guide/#account-resource
When executed with no errors, this function will return the information about
the account, from Stormpath.
.. code-block:: bash
salt myminion stormpath.create_account <directory_id> shemp@example.com letmein Shemp Howard
stormpath.list_accounts
```````````````````````
Show all accounts on the Stormpath service. This will return all accounts,
regardless of directory, application, or group.
.. code-block:: bash
salt myminion stormpath.list_accounts
'''
stormpath.show_account
``````````````````````
Show the details for a specific Stormpath account. An ``account_id`` is normally
required. However, if am ``email`` is provided instead, along with either a
``directory_id``, ``application_id``, or ``group_id``, then Salt will search the
specified resource to try and locate the ``account_id``.
.. code-block:: bash
salt myminion stormpath.show_account <account_id>
salt myminion stormpath.show_account email=<email> directory_id=<directory_id>
stormpath.update_account
````````````````````````
Update one or more items for this account. Specifying an empty value will clear
it for that account. This function may be used in one of two ways. In order to
update only one key/value pair, specify them in order:
.. code-block:: bash
salt myminion stormpath.update_account <account_id> givenName shemp
salt myminion stormpath.update_account <account_id> middleName ''
In order to specify multiple items, they need to be passed in as a dict. From
the command line, it is best to do this as a JSON string:
.. code-block:: bash
salt myminion stormpath.update_account <account_id> items='{"givenName": "Shemp"}
salt myminion stormpath.update_account <account_id> items='{"middlename": ""}
When executed with no errors, this function will return the information about
the account, from Stormpath.
stormpath.delete_account
````````````````````````
Delete an account from Stormpath.
.. code-block:: bash
salt myminion stormpath.delete_account <account_id>
stormpath.list_directories
``````````````````````````
Show all directories associated with this tenant.
.. code-block:: bash
salt myminion stormpath.list_directories
Using Stormpath States
----------------------
Stormpath resources may be managed using the state system. The following states
are available.
stormpath_account.present
`````````````````````````
Ensure that an account exists on the Stormpath service. All options that are
available with the ``stormpath.create_account`` function are available here.
If an account needs to be created, then this function will require the same
fields that ``stormpath.create_account`` requires, including the ``password``.
However, if a password changes for an existing account, it will NOT be updated
by this state.
.. code-block:: yaml
curly@example.com:
stormpath_account.present:
- directory_id: efreg435fr1786786FREFr
- password: badpass
- firstName: Curly
- surname: Howard
- nickname: curly
It is advisable to always set a ``nickname`` that is not also an email address,
so that it can be used by Salt's external authentication module.
stormpath_account.absent
````````````````````````
Ensure that an account does not exist on Stormpath. As with
``stormpath_account.present``, the ``name`` supplied to this state is the
``email`` address associated with this account. Salt will use this, with or
without the ``directory`` ID that is configured for the minion. However, lookups
will be much faster with a directory ID specified.

View File

@ -7,7 +7,7 @@ CherryPy==11.0.0
click==6.7
enum34==1.1.6
gitdb==0.6.4
GitPython==2.1.5
GitPython==2.1.1
idna==2.5
ipaddress==1.0.18
Jinja2==2.9.6

View File

@ -5,3 +5,4 @@ yappi>=0.8.2
--allow-unverified python-neutronclient>2.3.6
python-gnupg
cherrypy>=3.2.2
libnacl

View File

@ -7,6 +7,7 @@ Salt package
from __future__ import absolute_import
import warnings
# future lint: disable=non-unicode-string
# All salt related deprecation warnings should be shown once each!
warnings.filterwarnings(
'once', # Show once
@ -14,18 +15,19 @@ warnings.filterwarnings(
DeprecationWarning, # This filter is for DeprecationWarnings
r'^(salt|salt\.(.*))$' # Match module(s) 'salt' and 'salt.<whatever>'
)
# future lint: enable=non-unicode-string
# While we are supporting Python2.6, hide nested with-statements warnings
warnings.filterwarnings(
'ignore',
'With-statements now directly support multiple context managers',
u'ignore',
u'With-statements now directly support multiple context managers',
DeprecationWarning
)
# Filter the backports package UserWarning about being re-imported
warnings.filterwarnings(
'ignore',
'^Module backports was already imported from (.*), but (.*) is being added to sys.path$',
u'ignore',
u'^Module backports was already imported from (.*), but (.*) is being added to sys.path$',
UserWarning
)
@ -37,7 +39,7 @@ def __define_global_system_encoding_variable__():
# and reset to None
encoding = None
if not sys.platform.startswith('win') and sys.stdin is not None:
if not sys.platform.startswith(u'win') and sys.stdin is not None:
# On linux we can rely on sys.stdin for the encoding since it
# most commonly matches the filesystem encoding. This however
# does not apply to windows
@ -63,16 +65,16 @@ def __define_global_system_encoding_variable__():
# the way back to ascii
encoding = sys.getdefaultencoding()
if not encoding:
if sys.platform.startswith('darwin'):
if sys.platform.startswith(u'darwin'):
# Mac OS X uses UTF-8
encoding = 'utf-8'
elif sys.platform.startswith('win'):
encoding = u'utf-8'
elif sys.platform.startswith(u'win'):
# Windows uses a configurable encoding; on Windows, Python uses the name “mbcs”
# to refer to whatever the currently configured encoding is.
encoding = 'mbcs'
encoding = u'mbcs'
else:
# On linux default to ascii as a last resort
encoding = 'ascii'
encoding = u'ascii'
# We can't use six.moves.builtins because these builtins get deleted sooner
# than expected. See:
@ -83,7 +85,7 @@ def __define_global_system_encoding_variable__():
import builtins # pylint: disable=import-error
# Define the detected encoding as a built-in variable for ease of use
setattr(builtins, '__salt_system_encoding__', encoding)
setattr(builtins, u'__salt_system_encoding__', encoding)
# This is now garbage collectable
del sys

View File

@ -46,7 +46,7 @@ else:
if HAS_XML:
if not hasattr(ElementTree, 'ParseError'):
if not hasattr(ElementTree, u'ParseError'):
class ParseError(Exception):
'''
older versions of ElementTree do not have ParseError
@ -56,7 +56,7 @@ if HAS_XML:
ElementTree.ParseError = ParseError
def text_(s, encoding='latin-1', errors='strict'):
def text_(s, encoding=u'latin-1', errors=u'strict'):
'''
If ``s`` is an instance of ``binary_type``, return
``s.decode(encoding, errors)``, otherwise return ``s``
@ -66,7 +66,7 @@ def text_(s, encoding='latin-1', errors='strict'):
return s
def bytes_(s, encoding='latin-1', errors='strict'):
def bytes_(s, encoding=u'latin-1', errors=u'strict'):
'''
If ``s`` is an instance of ``text_type``, return
``s.encode(encoding, errors)``, otherwise return ``s``
@ -79,25 +79,25 @@ def bytes_(s, encoding='latin-1', errors='strict'):
if PY3:
def ascii_native_(s):
if isinstance(s, text_type):
s = s.encode('ascii')
return str(s, 'ascii', 'strict')
s = s.encode(u'ascii')
return str(s, u'ascii', u'strict')
else:
def ascii_native_(s):
if isinstance(s, text_type):
s = s.encode('ascii')
s = s.encode(u'ascii')
return str(s)
ascii_native_.__doc__ = '''
Python 3: If ``s`` is an instance of ``text_type``, return
``s.encode('ascii')``, otherwise return ``str(s, 'ascii', 'strict')``
``s.encode(u'ascii')``, otherwise return ``str(s, 'ascii', 'strict')``
Python 2: If ``s`` is an instance of ``text_type``, return
``s.encode('ascii')``, otherwise return ``str(s)``
'''
``s.encode(u'ascii')``, otherwise return ``str(s)``
''' # future lint: disable=non-unicode-string
if PY3:
def native_(s, encoding='latin-1', errors='strict'):
def native_(s, encoding=u'latin-1', errors=u'strict'):
'''
If ``s`` is an instance of ``text_type``, return
``s``, otherwise return ``str(s, encoding, errors)``
@ -106,7 +106,7 @@ if PY3:
return s
return str(s, encoding, errors)
else:
def native_(s, encoding='latin-1', errors='strict'):
def native_(s, encoding=u'latin-1', errors=u'strict'):
'''
If ``s`` is an instance of ``text_type``, return
``s.encode(encoding, errors)``, otherwise return ``str(s)``
@ -121,7 +121,7 @@ return ``str(s, encoding, errors)``
Python 2: If ``s`` is an instance of ``text_type``, return
``s.encode(encoding, errors)``, otherwise return ``str(s)``
'''
''' # future lint: disable=non-unicode-string
def string_io(data=None): # cStringIO can't handle unicode

View File

@ -10,8 +10,13 @@ found by reading the salt documentation:
# Import python libraries
from __future__ import absolute_import
# Import salt libs
import salt.utils
# Import 3rd-party libs
from salt.ext import six
class PublisherACL(object):
'''
@ -30,7 +35,7 @@ class PublisherACL(object):
def cmd_is_blacklisted(self, cmd):
# If this is a regular command, it is a single function
if isinstance(cmd, str):
if isinstance(cmd, six.string_types):
cmd = [cmd]
for fun in cmd:
if not salt.utils.check_whitelist_blacklist(fun, blacklist=self.blacklist.get('modules', [])):

View File

@ -55,7 +55,7 @@ import sys
# Import 3rd-party libs
import salt.ext.six as six
from salt.ext import six
# pylint: disable=import-error
try:
import django

View File

@ -101,8 +101,8 @@ import logging
import os
# Import salt utils
import salt.utils
import salt.utils.files
import salt.utils.versions
log = logging.getLogger(__name__)
@ -200,7 +200,7 @@ def _htpasswd(username, password, **kwargs):
pwfile = HtpasswdFile(kwargs['filename'])
# passlib below version 1.6 uses 'verify' function instead of 'check_password'
if salt.utils.version_cmp(kwargs['passlib_version'], '1.6') < 0:
if salt.utils.versions.version_cmp(kwargs['passlib_version'], '1.6') < 0:
return pwfile.verify(username, password)
else:
return pwfile.check_password(username, password)
@ -222,7 +222,7 @@ def _htdigest(username, password, **kwargs):
pwfile = HtdigestFile(kwargs['filename'])
# passlib below version 1.6 uses 'verify' function instead of 'check_password'
if salt.utils.version_cmp(kwargs['passlib_version'], '1.6') < 0:
if salt.utils.versions.version_cmp(kwargs['passlib_version'], '1.6') < 0:
return pwfile.verify(username, realm, password)
else:
return pwfile.check_password(username, realm, password)

View File

@ -8,7 +8,7 @@ Provide authentication using simple LDAP binds
# Import python libs
from __future__ import absolute_import
import logging
import salt.ext.six as six
from salt.ext import six
# Import salt libs
from salt.exceptions import CommandExecutionError, SaltInvocationError

View File

@ -42,11 +42,11 @@ from ctypes import c_void_p, c_uint, c_char_p, c_char, c_int
from ctypes.util import find_library
# Import Salt libs
from salt.utils import get_group_list
import salt.utils # Can be removed once get_group_list is moved
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
# Import 3rd-party libs
import salt.ext.six as six
from salt.ext import six
LIBPAM = CDLL(find_library('pam'))
LIBC = CDLL(find_library('c'))
@ -214,4 +214,4 @@ def groups(username, *args, **kwargs):
Uses system groups
'''
return get_group_list(username)
return salt.utils.get_group_list(username)

View File

@ -1,71 +0,0 @@
# -*- coding: utf-8 -*-
'''
Provide authentication using Stormpath.
This driver requires some extra configuration beyond that which Stormpath
normally requires.
.. code-block:: yaml
stormpath:
apiid: 1234567890
apikey: 1234567890/ABCDEF
# Can use an application ID
application: 6789012345
# Or can use a directory ID
directory: 3456789012
# But not both
.. versionadded:: 2015.8.0
'''
from __future__ import absolute_import
import json
import base64
import urllib
import salt.utils.http
import logging
log = logging.getLogger(__name__)
def auth(username, password):
'''
Authenticate using a Stormpath directory or application
'''
apiid = __opts__.get('stormpath', {}).get('apiid', None)
apikey = __opts__.get('stormpath', {}).get('apikey', None)
application = __opts__.get('stormpath', {}).get('application', None)
path = 'https://api.stormpath.com/v1'
if application is not None:
path = '{0}/applications/{1}/loginAttempts'.format(path, application)
else:
return False
username = urllib.quote(username)
data = {
'type': 'basic',
'value': base64.b64encode('{0}:{1}'.format(username, password))
}
log.debug('{0}:{1}'.format(username, password))
log.debug(path)
log.debug(data)
log.debug(json.dumps(data))
result = salt.utils.http.query(
path,
method='POST',
username=apiid,
password=apikey,
data=json.dumps(data),
header_dict={'Content-type': 'application/json;charset=UTF-8'},
decode=False,
status=True,
opts=__opts__,
)
log.debug(result)
if result.get('status', 403) == 200:
return True
return False

View File

@ -37,8 +37,9 @@ class Beacon(object):
.. code_block:: yaml
beacons:
inotify:
- /etc/fstab: {}
- /var/cache/foo: {}
- files:
- /etc/fstab: {}
- /var/cache/foo: {}
'''
ret = []
b_config = copy.deepcopy(config)
@ -69,6 +70,7 @@ class Beacon(object):
log.trace('Beacon processing: {0}'.format(mod))
fun_str = '{0}.beacon'.format(mod)
validate_str = '{0}.validate'.format(mod)
if fun_str in self.beacons:
runonce = self._determine_beacon_config(current_beacon_config, 'run_once')
interval = self._determine_beacon_config(current_beacon_config, 'interval')
@ -95,6 +97,17 @@ class Beacon(object):
continue
# Update __grains__ on the beacon
self.beacons[fun_str].__globals__['__grains__'] = grains
# Run the validate function if it's available,
# otherwise there is a warning about it being missing
if validate_str in self.beacons:
valid, vcomment = self.beacons[validate_str](b_config[mod])
if not valid:
log.info('Beacon %s configuration invalid, '
'not running.\n%s', mod, vcomment)
continue
# Fire the beacon!
raw = self.beacons[fun_str](b_config[mod])
for data in raw:
@ -193,6 +206,8 @@ class Beacon(object):
# Fire the complete event back along with the list of beacons
evt = salt.utils.event.get_event('minion', opts=self.opts)
b_conf = self.functions['config.merge']('beacons')
if not isinstance(self.opts['beacons'], dict):
self.opts['beacons'] = {}
self.opts['beacons'].update(b_conf)
evt.fire_event({'complete': True, 'beacons': self.opts['beacons']},
tag='/salt/minion/minion_beacons_list_complete')

View File

@ -10,7 +10,8 @@ from __future__ import absolute_import
import logging
# Salt libs
import salt.utils
import salt.utils.path
from salt.ext.six.moves import map
log = logging.getLogger(__name__)
@ -21,32 +22,41 @@ last_state_extra = {'value': False, 'no_devices': False}
def __virtual__():
which_result = salt.utils.which('adb')
which_result = salt.utils.path.which('adb')
if which_result is None:
return False
else:
return __virtualname__
def __validate__(config):
def validate(config):
'''
Validate the beacon configuration
'''
# Configuration for adb beacon should be a dictionary with states array
if not isinstance(config, dict):
log.info('Configuration for adb beacon must be a dict.')
return False, ('Configuration for adb beacon must be a dict.')
elif 'states' not in config.keys():
if not isinstance(config, list):
log.info('Configuration for adb beacon must be a list.')
return False, ('Configuration for adb beacon must be a list.')
_config = {}
list(map(_config.update, config))
if 'states' not in _config:
log.info('Configuration for adb beacon must include a states array.')
return False, ('Configuration for adb beacon must include a states array.')
else:
states = ['offline', 'bootloader', 'device', 'host', 'recovery', 'no permissions',
'sideload', 'unauthorized', 'unknown', 'missing']
if any(s not in states for s in config['states']):
log.info('Need a one of the following adb '
'states: {0}'.format(', '.join(states)))
return False, ('Need a one of the following adb '
'states: {0}'.format(', '.join(states)))
if not isinstance(_config['states'], list):
log.info('Configuration for adb beacon must include a states array.')
return False, ('Configuration for adb beacon must include a states array.')
else:
states = ['offline', 'bootloader', 'device', 'host',
'recovery', 'no permissions',
'sideload', 'unauthorized', 'unknown', 'missing']
if any(s not in states for s in _config['states']):
log.info('Need a one of the following adb '
'states: {0}'.format(', '.join(states)))
return False, ('Need a one of the following adb '
'states: {0}'.format(', '.join(states)))
return True, 'Valid beacon configuration'
@ -74,11 +84,10 @@ def beacon(config):
log.trace('adb beacon starting')
ret = []
_validate = __validate__(config)
if not _validate[0]:
return ret
_config = {}
list(map(_config.update, config))
out = __salt__['cmd.run']('adb devices', runas=config.get('user', None))
out = __salt__['cmd.run']('adb devices', runas=_config.get('user', None))
lines = out.split('\n')[1:]
last_state_devices = list(last_state.keys())
@ -90,21 +99,21 @@ def beacon(config):
found_devices.append(device)
if device not in last_state_devices or \
('state' in last_state[device] and last_state[device]['state'] != state):
if state in config['states']:
if state in _config['states']:
ret.append({'device': device, 'state': state, 'tag': state})
last_state[device] = {'state': state}
if 'battery_low' in config:
if 'battery_low' in _config:
val = last_state.get(device, {})
cmd = 'adb -s {0} shell cat /sys/class/power_supply/*/capacity'.format(device)
battery_levels = __salt__['cmd.run'](cmd, runas=config.get('user', None)).split('\n')
battery_levels = __salt__['cmd.run'](cmd, runas=_config.get('user', None)).split('\n')
for l in battery_levels:
battery_level = int(l)
if 0 < battery_level < 100:
if 'battery' not in val or battery_level != val['battery']:
if ('battery' not in val or val['battery'] > config['battery_low']) and \
battery_level <= config['battery_low']:
if ('battery' not in val or val['battery'] > _config['battery_low']) and \
battery_level <= _config['battery_low']:
ret.append({'device': device, 'battery_level': battery_level, 'tag': 'battery_low'})
if device not in last_state:
@ -118,13 +127,13 @@ def beacon(config):
# Find missing devices and remove them / send an event
for device in last_state_devices:
if device not in found_devices:
if 'missing' in config['states']:
if 'missing' in _config['states']:
ret.append({'device': device, 'state': 'missing', 'tag': 'missing'})
del last_state[device]
# Maybe send an event if we don't have any devices
if 'no_devices_event' in config and config['no_devices_event'] is True:
if 'no_devices_event' in _config and _config['no_devices_event'] is True:
if len(found_devices) == 0 and not last_state_extra['no_devices']:
ret.append({'tag': 'no_devices'})

View File

@ -15,6 +15,7 @@ Dependencies
from __future__ import absolute_import
import logging
import time
from salt.ext.six.moves import map
# Import 3rd Party libs
try:
@ -54,17 +55,23 @@ def __virtual__():
'\'python-avahi\' dependency is missing.'.format(__virtualname__)
def __validate__(config):
def validate(config):
'''
Validate the beacon configuration
'''
if not isinstance(config, dict):
return False, ('Configuration for avahi_announcement '
'beacon must be a dictionary')
elif not all(x in list(config.keys()) for x in ('servicetype', 'port', 'txt')):
_config = {}
list(map(_config.update, config))
if not isinstance(config, list):
return False, ('Configuration for avahi_announce '
'beacon must be a list.')
elif not all(x in _config for x in ('servicetype',
'port',
'txt')):
return False, ('Configuration for avahi_announce beacon '
'must contain servicetype, port and txt items')
return True, 'Valid beacon configuration'
'must contain servicetype, port and txt items.')
return True, 'Valid beacon configuration.'
def _enforce_txt_record_maxlen(key, value):
@ -138,13 +145,13 @@ def beacon(config):
beacons:
avahi_announce:
run_once: True
servicetype: _demo._tcp
port: 1234
txt:
ProdName: grains.productname
SerialNo: grains.serialnumber
Comments: 'this is a test'
- run_once: True
- servicetype: _demo._tcp
- port: 1234
- txt:
ProdName: grains.productname
SerialNo: grains.serialnumber
Comments: 'this is a test'
'''
ret = []
changes = {}
@ -152,30 +159,27 @@ def beacon(config):
global LAST_GRAINS
_validate = __validate__(config)
if not _validate[0]:
log.warning('Beacon {0} configuration invalid, '
'not adding. {1}'.format(__virtualname__, _validate[1]))
return ret
_config = {}
list(map(_config.update, config))
if 'servicename' in config:
servicename = config['servicename']
if 'servicename' in _config:
servicename = _config['servicename']
else:
servicename = __grains__['host']
# Check for hostname change
if LAST_GRAINS and LAST_GRAINS['host'] != servicename:
changes['servicename'] = servicename
if LAST_GRAINS and config.get('reset_on_change', False):
if LAST_GRAINS and _config.get('reset_on_change', False):
# Check for IP address change in the case when we reset on change
if LAST_GRAINS.get('ipv4', []) != __grains__.get('ipv4', []):
changes['ipv4'] = __grains__.get('ipv4', [])
if LAST_GRAINS.get('ipv6', []) != __grains__.get('ipv6', []):
changes['ipv6'] = __grains__.get('ipv6', [])
for item in config['txt']:
if config['txt'][item].startswith('grains.'):
grain = config['txt'][item][7:]
for item in _config['txt']:
if _config['txt'][item].startswith('grains.'):
grain = _config['txt'][item][7:]
grain_index = None
square_bracket = grain.find('[')
if square_bracket != -1 and grain[-1] == ']':
@ -192,7 +196,7 @@ def beacon(config):
if LAST_GRAINS and (LAST_GRAINS.get(grain, '') != __grains__.get(grain, '')):
changes[str('txt.' + item)] = txt[item]
else:
txt[item] = _enforce_txt_record_maxlen(item, config['txt'][item])
txt[item] = _enforce_txt_record_maxlen(item, _config['txt'][item])
if not LAST_GRAINS:
changes[str('txt.' + item)] = txt[item]
@ -200,33 +204,33 @@ def beacon(config):
if changes:
if not LAST_GRAINS:
changes['servicename'] = servicename
changes['servicetype'] = config['servicetype']
changes['port'] = config['port']
changes['servicetype'] = _config['servicetype']
changes['port'] = _config['port']
changes['ipv4'] = __grains__.get('ipv4', [])
changes['ipv6'] = __grains__.get('ipv6', [])
GROUP.AddService(avahi.IF_UNSPEC, avahi.PROTO_UNSPEC, dbus.UInt32(0),
servicename, config['servicetype'], '', '',
dbus.UInt16(config['port']), avahi.dict_to_txt_array(txt))
servicename, _config['servicetype'], '', '',
dbus.UInt16(_config['port']), avahi.dict_to_txt_array(txt))
GROUP.Commit()
elif config.get('reset_on_change', False) or 'servicename' in changes:
elif _config.get('reset_on_change', False) or 'servicename' in changes:
# A change in 'servicename' requires a reset because we can only
# directly update TXT records
GROUP.Reset()
reset_wait = config.get('reset_wait', 0)
reset_wait = _config.get('reset_wait', 0)
if reset_wait > 0:
time.sleep(reset_wait)
GROUP.AddService(avahi.IF_UNSPEC, avahi.PROTO_UNSPEC, dbus.UInt32(0),
servicename, config['servicetype'], '', '',
dbus.UInt16(config['port']), avahi.dict_to_txt_array(txt))
servicename, _config['servicetype'], '', '',
dbus.UInt16(_config['port']), avahi.dict_to_txt_array(txt))
GROUP.Commit()
else:
GROUP.UpdateServiceTxt(avahi.IF_UNSPEC, avahi.PROTO_UNSPEC, dbus.UInt32(0),
servicename, config['servicetype'], '',
servicename, _config['servicetype'], '',
avahi.dict_to_txt_array(txt))
ret.append({'tag': 'result', 'changes': changes})
if config.get('copy_grains', False):
if _config.get('copy_grains', False):
LAST_GRAINS = __grains__.copy()
else:
LAST_GRAINS = __grains__

View File

@ -9,6 +9,7 @@ import atexit
import logging
import select
import time
from salt.ext.six.moves import map
# Import 3rd Party libs
try:
@ -47,17 +48,23 @@ def _register_callback(sdRef, flags, errorCode, name, regtype, domain): # pylin
log.error('Bonjour registration failed with error code {0}'.format(errorCode))
def __validate__(config):
def validate(config):
'''
Validate the beacon configuration
'''
if not isinstance(config, dict):
return False, ('Configuration for bonjour_announcement '
'beacon must be a dictionary')
elif not all(x in list(config.keys()) for x in ('servicetype', 'port', 'txt')):
_config = {}
list(map(_config.update, config))
if not isinstance(config, list):
return False, ('Configuration for bonjour_announce '
'beacon must be a list.')
elif not all(x in _config for x in ('servicetype',
'port',
'txt')):
return False, ('Configuration for bonjour_announce beacon '
'must contain servicetype, port and txt items')
return True, 'Valid beacon configuration'
'must contain servicetype, port and txt items.')
return True, 'Valid beacon configuration.'
def _enforce_txt_record_maxlen(key, value):
@ -131,13 +138,13 @@ def beacon(config):
beacons:
bonjour_announce:
run_once: True
servicetype: _demo._tcp
port: 1234
txt:
ProdName: grains.productname
SerialNo: grains.serialnumber
Comments: 'this is a test'
- run_once: True
- servicetype: _demo._tcp
- port: 1234
- txt:
ProdName: grains.productname
SerialNo: grains.serialnumber
Comments: 'this is a test'
'''
ret = []
changes = {}
@ -146,30 +153,27 @@ def beacon(config):
global LAST_GRAINS
global SD_REF
_validate = __validate__(config)
if not _validate[0]:
log.warning('Beacon {0} configuration invalid, '
'not adding. {1}'.format(__virtualname__, _validate[1]))
return ret
_config = {}
list(map(_config.update, config))
if 'servicename' in config:
servicename = config['servicename']
if 'servicename' in _config:
servicename = _config['servicename']
else:
servicename = __grains__['host']
# Check for hostname change
if LAST_GRAINS and LAST_GRAINS['host'] != servicename:
changes['servicename'] = servicename
if LAST_GRAINS and config.get('reset_on_change', False):
if LAST_GRAINS and _config.get('reset_on_change', False):
# Check for IP address change in the case when we reset on change
if LAST_GRAINS.get('ipv4', []) != __grains__.get('ipv4', []):
changes['ipv4'] = __grains__.get('ipv4', [])
if LAST_GRAINS.get('ipv6', []) != __grains__.get('ipv6', []):
changes['ipv6'] = __grains__.get('ipv6', [])
for item in config['txt']:
if config['txt'][item].startswith('grains.'):
grain = config['txt'][item][7:]
for item in _config['txt']:
if _config['txt'][item].startswith('grains.'):
grain = _config['txt'][item][7:]
grain_index = None
square_bracket = grain.find('[')
if square_bracket != -1 and grain[-1] == ']':
@ -186,7 +190,7 @@ def beacon(config):
if LAST_GRAINS and (LAST_GRAINS.get(grain, '') != __grains__.get(grain, '')):
changes[str('txt.' + item)] = txt[item]
else:
txt[item] = _enforce_txt_record_maxlen(item, config['txt'][item])
txt[item] = _enforce_txt_record_maxlen(item, _config['txt'][item])
if not LAST_GRAINS:
changes[str('txt.' + item)] = txt[item]
@ -195,32 +199,32 @@ def beacon(config):
txt_record = pybonjour.TXTRecord(items=txt)
if not LAST_GRAINS:
changes['servicename'] = servicename
changes['servicetype'] = config['servicetype']
changes['port'] = config['port']
changes['servicetype'] = _config['servicetype']
changes['port'] = _config['port']
changes['ipv4'] = __grains__.get('ipv4', [])
changes['ipv6'] = __grains__.get('ipv6', [])
SD_REF = pybonjour.DNSServiceRegister(
name=servicename,
regtype=config['servicetype'],
port=config['port'],
regtype=_config['servicetype'],
port=_config['port'],
txtRecord=txt_record,
callBack=_register_callback)
atexit.register(_close_sd_ref)
ready = select.select([SD_REF], [], [])
if SD_REF in ready[0]:
pybonjour.DNSServiceProcessResult(SD_REF)
elif config.get('reset_on_change', False) or 'servicename' in changes:
elif _config.get('reset_on_change', False) or 'servicename' in changes:
# A change in 'servicename' requires a reset because we can only
# directly update TXT records
SD_REF.close()
SD_REF = None
reset_wait = config.get('reset_wait', 0)
reset_wait = _config.get('reset_wait', 0)
if reset_wait > 0:
time.sleep(reset_wait)
SD_REF = pybonjour.DNSServiceRegister(
name=servicename,
regtype=config['servicetype'],
port=config['port'],
regtype=_config['servicetype'],
port=_config['port'],
txtRecord=txt_record,
callBack=_register_callback)
ready = select.select([SD_REF], [], [])
@ -236,7 +240,7 @@ def beacon(config):
ret.append({'tag': 'result', 'changes': changes})
if config.get('copy_grains', False):
if _config.get('copy_grains', False):
LAST_GRAINS = __grains__.copy()
else:
LAST_GRAINS = __grains__

View File

@ -5,7 +5,7 @@ Beacon to fire events at failed login of users
.. code-block:: yaml
beacons:
btmp: {}
btmp: []
'''
# Import python libs
@ -16,6 +16,9 @@ import struct
# Import Salt Libs
import salt.utils.files
# Import 3rd-party libs
from salt.ext import six
__virtualname__ = 'btmp'
BTMP = '/var/log/btmp'
FMT = 'hi32s4s32s256shhiii4i20x'
@ -49,14 +52,14 @@ def _get_loc():
return __context__[LOC_KEY]
def __validate__(config):
def validate(config):
'''
Validate the beacon configuration
'''
# Configuration for load beacon should be a list of dicts
if not isinstance(config, dict):
if not isinstance(config, list):
return False, ('Configuration for btmp beacon must '
'be a list of dictionaries.')
'be a list.')
return True, 'Valid beacon configuration'
@ -68,7 +71,7 @@ def beacon(config):
.. code-block:: yaml
beacons:
btmp: {}
btmp: []
'''
ret = []
with salt.utils.files.fopen(BTMP, 'rb') as fp_:
@ -88,7 +91,7 @@ def beacon(config):
event = {}
for ind, field in enumerate(FIELDS):
event[field] = pack[ind]
if isinstance(event[field], str):
if isinstance(event[field], six.string_types):
event[field] = event[field].strip('\x00')
ret.append(event)
return ret

View File

@ -31,14 +31,14 @@ def __virtual__():
return __virtualname__
def __validate__(config):
def validate(config):
'''
Validate the beacon configuration
'''
# Configuration for diskusage beacon should be a list of dicts
if not isinstance(config, dict):
if not isinstance(config, list):
return False, ('Configuration for diskusage beacon '
'must be a dictionary.')
'must be a list.')
return True, 'Valid beacon configuration'
@ -86,25 +86,24 @@ def beacon(config):
parts = psutil.disk_partitions(all=False)
ret = []
for mounts in config:
mount = mounts.keys()[0]
mount = next(iter(mounts))
try:
_current_usage = psutil.disk_usage(mount)
except OSError:
# Ensure a valid mount point
log.warning('{0} is not a valid mount point, try regex.'.format(mount))
for part in parts:
if re.match(mount, part.mountpoint):
row = {}
row[part.mountpoint] = mounts[mount]
config.append(row)
continue
for part in parts:
if re.match(mount, part.mountpoint):
_mount = part.mountpoint
current_usage = _current_usage.percent
monitor_usage = mounts[mount]
if '%' in monitor_usage:
monitor_usage = re.sub('%', '', monitor_usage)
monitor_usage = float(monitor_usage)
if current_usage >= monitor_usage:
ret.append({'diskusage': current_usage, 'mount': mount})
try:
_current_usage = psutil.disk_usage(mount)
except OSError:
log.warning('{0} is not a valid mount point.'.format(mount))
continue
current_usage = _current_usage.percent
monitor_usage = mounts[mount]
log.info('current_usage {}'.format(current_usage))
if '%' in monitor_usage:
monitor_usage = re.sub('%', '', monitor_usage)
monitor_usage = float(monitor_usage)
if current_usage >= monitor_usage:
ret.append({'diskusage': current_usage, 'mount': _mount})
return ret

View File

@ -10,7 +10,8 @@ from __future__ import absolute_import
import logging
# Salt libs
import salt.utils
import salt.utils.path
from salt.ext.six.moves import map
log = logging.getLogger(__name__)
@ -21,21 +22,25 @@ last_state = {}
def __virtual__():
which_result = salt.utils.which('glxinfo')
which_result = salt.utils.path.which('glxinfo')
if which_result is None:
return False
else:
return __virtualname__
def __validate__(config):
def validate(config):
'''
Validate the beacon configuration
'''
# Configuration for glxinfo beacon should be a dictionary
if not isinstance(config, dict):
return False, ('Configuration for glxinfo beacon must be a dict.')
if 'user' not in config:
if not isinstance(config, list):
return False, ('Configuration for glxinfo beacon must be a list.')
_config = {}
list(map(_config.update, config))
if 'user' not in _config:
return False, ('Configuration for glxinfo beacon must '
'include a user as glxinfo is not available to root.')
return True, 'Valid beacon configuration'
@ -45,27 +50,28 @@ def beacon(config):
'''
Emit the status of a connected display to the minion
Mainly this is used to detect when the display fails to connect for whatever reason.
Mainly this is used to detect when the display fails to connect
for whatever reason.
.. code-block:: yaml
beacons:
glxinfo:
user: frank
screen_event: True
- user: frank
- screen_event: True
'''
log.trace('glxinfo beacon starting')
ret = []
_validate = __validate__(config)
if not _validate[0]:
return ret
_config = {}
list(map(_config.update, config))
retcode = __salt__['cmd.retcode']('DISPLAY=:0 glxinfo', runas=config['user'], python_shell=True)
retcode = __salt__['cmd.retcode']('DISPLAY=:0 glxinfo',
runas=_config['user'], python_shell=True)
if 'screen_event' in config and config['screen_event']:
if 'screen_event' in _config and _config['screen_event']:
last_value = last_state.get('screen_available', False)
screen_available = retcode == 0
if last_value != screen_available or 'screen_available' not in last_state:

View File

@ -9,7 +9,7 @@ Fire an event when over a specified threshold.
# Import Python libs
from __future__ import absolute_import
import logging
from salt.ext.six.moves import map
log = logging.getLogger(__name__)
@ -23,17 +23,39 @@ def __virtual__():
if 'haproxy.get_sessions' in __salt__:
return __virtualname__
else:
log.debug('Not loading haproxy beacon')
return False
def __validate__(config):
def validate(config):
'''
Validate the beacon configuration
'''
if not isinstance(config, dict):
return False, ('Configuration for haproxy beacon must be a dictionary.')
if 'haproxy' not in config:
return False, ('Configuration for haproxy beacon requires a list of backends and servers')
if not isinstance(config, list):
return False, ('Configuration for haproxy beacon must '
'be a list.')
else:
_config = {}
list(map(_config.update, config))
if 'backends' not in _config:
return False, ('Configuration for haproxy beacon '
'requires backends.')
else:
if not isinstance(_config['backends'], dict):
return False, ('Backends for haproxy beacon '
'must be a dictionary.')
else:
for backend in _config['backends']:
log.debug('_config {}'.format(_config['backends'][backend]))
if 'servers' not in _config['backends'][backend]:
return False, ('Backends for haproxy beacon '
'require servers.')
else:
_servers = _config['backends'][backend]['servers']
if not isinstance(_servers, list):
return False, ('Servers for haproxy beacon '
'must be a list.')
return True, 'Valid beacon configuration'
@ -46,22 +68,23 @@ def beacon(config):
beacons:
haproxy:
- www-backend:
threshold: 45
servers:
- web1
- web2
- backends:
www-backend:
threshold: 45
servers:
- web1
- web2
- interval: 120
'''
log.debug('haproxy beacon starting')
ret = []
_validate = __validate__(config)
if not _validate:
log.debug('haproxy beacon unable to validate')
return ret
for backend in config:
threshold = config[backend]['threshold']
for server in config[backend]['servers']:
_config = {}
list(map(_config.update, config))
for backend in _config.get('backends', ()):
backend_config = _config['backends'][backend]
threshold = backend_config['threshold']
for server in backend_config['servers']:
scur = __salt__['haproxy.get_sessions'](server, backend)
if scur:
if int(scur) > int(threshold):
@ -69,6 +92,10 @@ def beacon(config):
'scur': scur,
'threshold': threshold,
}
log.debug('Emit because {0} > {1} for {2} in {3}'.format(scur, threshold, server, backend))
log.debug('Emit because {0} > {1}'
' for {2} in {3}'.format(scur,
threshold,
server,
backend))
ret.append(_server)
return ret

View File

@ -23,6 +23,7 @@ import re
# Import salt libs
import salt.ext.six
from salt.ext.six.moves import map
# Import third party libs
try:
@ -79,7 +80,7 @@ def _get_notifier(config):
return __context__['inotify.notifier']
def __validate__(config):
def validate(config):
'''
Validate the beacon configuration
'''
@ -105,37 +106,45 @@ def __validate__(config):
]
# Configuration for inotify beacon should be a dict of dicts
log.debug('config {0}'.format(config))
if not isinstance(config, dict):
return False, 'Configuration for inotify beacon must be a dictionary.'
if not isinstance(config, list):
return False, 'Configuration for inotify beacon must be a list.'
else:
for config_item in config:
if not isinstance(config[config_item], dict):
return False, ('Configuration for inotify beacon must '
'be a dictionary of dictionaries.')
else:
if not any(j in ['mask', 'recurse', 'auto_add'] for j in config[config_item]):
_config = {}
list(map(_config.update, config))
if 'files' not in _config:
return False, 'Configuration for inotify beacon must include files.'
else:
for path in _config.get('files'):
if not isinstance(_config['files'][path], dict):
return False, ('Configuration for inotify beacon must '
'contain mask, recurse or auto_add items.')
'be a list of dictionaries.')
else:
if not any(j in ['mask',
'recurse',
'auto_add'] for j in _config['files'][path]):
return False, ('Configuration for inotify beacon must '
'contain mask, recurse or auto_add items.')
if 'auto_add' in config[config_item]:
if not isinstance(config[config_item]['auto_add'], bool):
return False, ('Configuration for inotify beacon '
'auto_add must be boolean.')
if 'auto_add' in _config['files'][path]:
if not isinstance(_config['files'][path]['auto_add'], bool):
return False, ('Configuration for inotify beacon '
'auto_add must be boolean.')
if 'recurse' in config[config_item]:
if not isinstance(config[config_item]['recurse'], bool):
return False, ('Configuration for inotify beacon '
'recurse must be boolean.')
if 'recurse' in _config['files'][path]:
if not isinstance(_config['files'][path]['recurse'], bool):
return False, ('Configuration for inotify beacon '
'recurse must be boolean.')
if 'mask' in config[config_item]:
if not isinstance(config[config_item]['mask'], list):
return False, ('Configuration for inotify beacon '
'mask must be list.')
for mask in config[config_item]['mask']:
if mask not in VALID_MASK:
return False, ('Configuration for inotify beacon '
'invalid mask option {0}.'.format(mask))
if 'mask' in _config['files'][path]:
if not isinstance(_config['files'][path]['mask'], list):
return False, ('Configuration for inotify beacon '
'mask must be list.')
for mask in _config['files'][path]['mask']:
if mask not in VALID_MASK:
return False, ('Configuration for inotify beacon '
'invalid mask option {0}.'.format(mask))
return True, 'Valid beacon configuration'
@ -149,19 +158,20 @@ def beacon(config):
beacons:
inotify:
/path/to/file/or/dir:
mask:
- open
- create
- close_write
recurse: True
auto_add: True
exclude:
- /path/to/file/or/dir/exclude1
- /path/to/file/or/dir/exclude2
- /path/to/file/or/dir/regex[a-m]*$:
regex: True
coalesce: True
- files:
/path/to/file/or/dir:
mask:
- open
- create
- close_write
recurse: True
auto_add: True
exclude:
- /path/to/file/or/dir/exclude1
- /path/to/file/or/dir/exclude2
- /path/to/file/or/dir/regex[a-m]*$:
regex: True
- coalesce: True
The mask list can contain the following events (the default mask is create,
delete, and modify):
@ -203,8 +213,11 @@ def beacon(config):
affects all paths that are being watched. This is due to this option
being at the Notifier level in pyinotify.
'''
_config = {}
list(map(_config.update, config))
ret = []
notifier = _get_notifier(config)
notifier = _get_notifier(_config)
wm = notifier._watch_manager
# Read in existing events
@ -219,11 +232,13 @@ def beacon(config):
# Find the matching path in config
path = event.path
while path != '/':
if path in config:
if path in _config.get('files', {}):
break
path = os.path.dirname(path)
excludes = config[path].get('exclude', '')
for path in _config.get('files', {}):
excludes = _config['files'][path].get('exclude', '')
if excludes and isinstance(excludes, list):
for exclude in excludes:
if isinstance(exclude, dict):
@ -257,9 +272,10 @@ def beacon(config):
# Update existing watches and add new ones
# TODO: make the config handle more options
for path in config:
if isinstance(config[path], dict):
mask = config[path].get('mask', DEFAULT_MASK)
for path in _config.get('files', ()):
if isinstance(_config['files'][path], dict):
mask = _config['files'][path].get('mask', DEFAULT_MASK)
if isinstance(mask, list):
r_mask = 0
for sub in mask:
@ -269,8 +285,8 @@ def beacon(config):
else:
r_mask = mask
mask = r_mask
rec = config[path].get('recurse', False)
auto_add = config[path].get('auto_add', False)
rec = _config['files'][path].get('recurse', False)
auto_add = _config['files'][path].get('auto_add', False)
else:
mask = DEFAULT_MASK
rec = False
@ -287,7 +303,7 @@ def beacon(config):
if update:
wm.update_watch(wd, mask=mask, rec=rec, auto_add=auto_add)
elif os.path.exists(path):
excludes = config[path].get('exclude', '')
excludes = _config['files'][path].get('exclude', '')
excl = None
if isinstance(excludes, list):
excl = []

View File

@ -10,6 +10,7 @@ from __future__ import absolute_import
import salt.utils
import salt.utils.locales
import salt.ext.six
from salt.ext.six.moves import map
# Import third party libs
try:
@ -43,18 +44,21 @@ def _get_journal():
return __context__['systemd.journald']
def __validate__(config):
def validate(config):
'''
Validate the beacon configuration
'''
# Configuration for journald beacon should be a list of dicts
if not isinstance(config, dict):
return False
if not isinstance(config, list):
return (False, 'Configuration for journald beacon must be a list.')
else:
for item in config:
if not isinstance(config[item], dict):
return False, ('Configuration for journald beacon must '
'be a dictionary of dictionaries.')
_config = {}
list(map(_config.update, config))
for name in _config.get('services', {}):
if not isinstance(_config['services'][name], dict):
return False, ('Services configuration for journald beacon '
'must be a list of dictionaries.')
return True, 'Valid beacon configuration'
@ -69,25 +73,31 @@ def beacon(config):
beacons:
journald:
sshd:
SYSLOG_IDENTIFIER: sshd
PRIORITY: 6
- services:
sshd:
SYSLOG_IDENTIFIER: sshd
PRIORITY: 6
'''
ret = []
journal = _get_journal()
_config = {}
list(map(_config.update, config))
while True:
cur = journal.get_next()
if not cur:
break
for name in config:
for name in _config.get('services', {}):
n_flag = 0
for key in config[name]:
for key in _config['services'][name]:
if isinstance(key, salt.ext.six.string_types):
key = salt.utils.locales.sdecode(key)
if key in cur:
if config[name][key] == cur[key]:
if _config['services'][name][key] == cur[key]:
n_flag += 1
if n_flag == len(config[name]):
if n_flag == len(_config['services'][name]):
# Match!
sub = salt.utils.simple_types_filter(cur)
sub.update({'tag': name})

View File

@ -9,7 +9,8 @@ import logging
import os
# Import Salt libs
import salt.utils
import salt.utils.platform
from salt.ext.six.moves import map
# Import Py3 compat
from salt.ext.six.moves import zip
@ -22,13 +23,13 @@ LAST_STATUS = {}
def __virtual__():
if salt.utils.is_windows():
if salt.utils.platform.is_windows():
return False
else:
return __virtualname__
def __validate__(config):
def validate(config):
'''
Validate the beacon configuration
'''
@ -37,25 +38,26 @@ def __validate__(config):
if not isinstance(config, list):
return False, ('Configuration for load beacon must be a list.')
else:
for config_item in config:
if not isinstance(config_item, dict):
return False, ('Configuration for load beacon must '
'be a list of dictionaries.')
else:
if not all(j in ['1m', '5m', '15m'] for j in config_item.keys()):
return False, ('Configuration for load beacon must '
'contain 1m, 5m or 15m items.')
_config = {}
list(map(_config.update, config))
if 'averages' not in _config:
return False, ('Averages configuration is required'
' for load beacon.')
else:
if not any(j in ['1m', '5m', '15m'] for j
in _config.get('averages', {})):
return False, ('Averages configuration for load beacon '
'must contain 1m, 5m or 15m items.')
for item in ['1m', '5m', '15m']:
if item not in config_item:
continue
if not isinstance(config_item[item], list):
return False, ('Configuration for load beacon: '
if not isinstance(_config['averages'][item], list):
return False, ('Averages configuration for load beacon: '
'1m, 5m and 15m items must be '
'a list of two items.')
else:
if len(config_item[item]) != 2:
if len(_config['averages'][item]) != 2:
return False, ('Configuration for load beacon: '
'1m, 5m and 15m items must be '
'a list of two items.')
@ -82,33 +84,37 @@ def beacon(config):
beacons:
load:
1m:
- 0.0
- 2.0
5m:
- 0.0
- 1.5
15m:
- 0.1
- 1.0
emitatstartup: True
onchangeonly: False
- averages:
1m:
- 0.0
- 2.0
5m:
- 0.0
- 1.5
15m:
- 0.1
- 1.0
- emitatstartup: True
- onchangeonly: False
'''
log.trace('load beacon starting')
_config = {}
list(map(_config.update, config))
# Default config if not present
if 'emitatstartup' not in config:
config['emitatstartup'] = True
if 'onchangeonly' not in config:
config['onchangeonly'] = False
if 'emitatstartup' not in _config:
_config['emitatstartup'] = True
if 'onchangeonly' not in _config:
_config['onchangeonly'] = False
ret = []
avgs = os.getloadavg()
avg_keys = ['1m', '5m', '15m']
avg_dict = dict(zip(avg_keys, avgs))
if config['onchangeonly']:
if _config['onchangeonly']:
if not LAST_STATUS:
for k in ['1m', '5m', '15m']:
LAST_STATUS[k] = avg_dict[k]
@ -120,27 +126,40 @@ def beacon(config):
# Check each entry for threshold
for k in ['1m', '5m', '15m']:
if k in config:
if config['onchangeonly']:
# Emit if current is more that threshold and old value less that threshold
if float(avg_dict[k]) > float(config[k][1]) and float(LAST_STATUS[k]) < float(config[k][1]):
log.debug('Emit because {0} > {1} and last was {2}'.format(float(avg_dict[k]), float(config[k][1]), float(LAST_STATUS[k])))
if k in _config.get('averages', {}):
if _config['onchangeonly']:
# Emit if current is more that threshold and old value less
# that threshold
if float(avg_dict[k]) > float(_config['averages'][k][1]) and \
float(LAST_STATUS[k]) < float(_config['averages'][k][1]):
log.debug('Emit because {0} > {1} and last was '
'{2}'.format(float(avg_dict[k]),
float(_config['averages'][k][1]),
float(LAST_STATUS[k])))
send_beacon = True
break
# Emit if current is less that threshold and old value more that threshold
if float(avg_dict[k]) < float(config[k][0]) and float(LAST_STATUS[k]) > float(config[k][0]):
log.debug('Emit because {0} < {1} and last was {2}'.format(float(avg_dict[k]), float(config[k][0]), float(LAST_STATUS[k])))
# Emit if current is less that threshold and old value more
# that threshold
if float(avg_dict[k]) < float(_config['averages'][k][0]) and \
float(LAST_STATUS[k]) > float(_config['averages'][k][0]):
log.debug('Emit because {0} < {1} and last was'
'{2}'.format(float(avg_dict[k]),
float(_config['averages'][k][0]),
float(LAST_STATUS[k])))
send_beacon = True
break
else:
# Emit no matter LAST_STATUS
if float(avg_dict[k]) < float(config[k][0]) or \
float(avg_dict[k]) > float(config[k][1]):
log.debug('Emit because {0} < {1} or > {2}'.format(float(avg_dict[k]), float(config[k][0]), float(config[k][1])))
if float(avg_dict[k]) < float(_config['averages'][k][0]) or \
float(avg_dict[k]) > float(_config['averages'][k][1]):
log.debug('Emit because {0} < {1} or > '
'{2}'.format(float(avg_dict[k]),
float(_config['averages'][k][0]),
float(_config['averages'][k][1])))
send_beacon = True
break
if config['onchangeonly']:
if _config['onchangeonly']:
for k in ['1m', '5m', '15m']:
LAST_STATUS[k] = avg_dict[k]

View File

@ -11,8 +11,9 @@ from __future__ import absolute_import
import logging
# Import salt libs
import salt.utils
import salt.utils.files
import salt.utils.platform
from salt.ext.six.moves import map
try:
@ -35,7 +36,7 @@ log = logging.getLogger(__name__)
def __virtual__():
if not salt.utils.is_windows() and HAS_REGEX:
if not salt.utils.platform.is_windows() and HAS_REGEX:
return __virtualname__
return False
@ -48,13 +49,20 @@ def _get_loc():
return __context__[LOC_KEY]
def __validate__(config):
def validate(config):
'''
Validate the beacon configuration
'''
_config = {}
list(map(_config.update, config))
# Configuration for log beacon should be a list of dicts
if not isinstance(config, dict):
return False, ('Configuration for log beacon must be a dictionary.')
if not isinstance(config, list):
return False, ('Configuration for log beacon must be a list.')
if 'file' not in _config:
return False, ('Configuration for log beacon '
'must contain file option.')
return True, 'Valid beacon configuration'
@ -67,20 +75,24 @@ def beacon(config):
beacons:
log:
file: <path>
<tag>:
regex: <pattern>
- file: <path>
- tags:
<tag>:
regex: <pattern>
'''
_config = {}
list(map(_config.update, config))
ret = []
if 'file' not in config:
if 'file' not in _config:
event = SKEL.copy()
event['tag'] = 'global'
event['error'] = 'file not defined in config'
ret.append(event)
return ret
with salt.utils.files.fopen(config['file'], 'r') as fp_:
with salt.utils.files.fopen(_config['file'], 'r') as fp_:
loc = __context__.get(LOC_KEY, 0)
if loc == 0:
fp_.seek(0, 2)
@ -92,16 +104,17 @@ def beacon(config):
fp_.seek(loc)
txt = fp_.read()
log.info('txt {}'.format(txt))
d = {}
for tag in config:
if 'regex' not in config[tag]:
for tag in _config.get('tags', {}):
if 'regex' not in _config['tags'][tag]:
continue
if len(config[tag]['regex']) < 1:
if len(_config['tags'][tag]['regex']) < 1:
continue
try:
d[tag] = re.compile(r'{0}'.format(config[tag]['regex']))
except Exception:
d[tag] = re.compile(r'{0}'.format(_config['tags'][tag]['regex']))
except Exception as e:
event = SKEL.copy()
event['tag'] = tag
event['error'] = 'bad regex'

View File

@ -11,6 +11,7 @@ Beacon to monitor memory usage.
from __future__ import absolute_import
import logging
import re
from salt.ext.six.moves import map
# Import Third Party Libs
try:
@ -31,14 +32,22 @@ def __virtual__():
return __virtualname__
def __validate__(config):
def validate(config):
'''
Validate the beacon configuration
'''
# Configuration for memusage beacon should be a list of dicts
if not isinstance(config, dict):
if not isinstance(config, list):
return False, ('Configuration for memusage '
'beacon must be a dictionary.')
'beacon must be a list.')
else:
_config = {}
list(map(_config.update, config))
if 'percent' not in _config:
return False, ('Configuration for memusage beacon '
'requires percent.')
return True, 'Valid beacon configuration'
@ -46,7 +55,8 @@ def beacon(config):
'''
Monitor the memory usage of the minion
Specify thresholds for percent used and only emit a beacon if it is exceeded.
Specify thresholds for percent used and only emit a beacon
if it is exceeded.
.. code-block:: yaml
@ -55,15 +65,17 @@ def beacon(config):
- percent: 63%
'''
ret = []
for memusage in config:
mount = memusage.keys()[0]
_current_usage = psutil.virtual_memory()
current_usage = _current_usage.percent
monitor_usage = memusage[mount]
if '%' in monitor_usage:
monitor_usage = re.sub('%', '', monitor_usage)
monitor_usage = float(monitor_usage)
if current_usage >= monitor_usage:
ret.append({'memusage': current_usage})
_config = {}
list(map(_config.update, config))
_current_usage = psutil.virtual_memory()
current_usage = _current_usage.percent
monitor_usage = _config['percent']
if '%' in monitor_usage:
monitor_usage = re.sub('%', '', monitor_usage)
monitor_usage = float(monitor_usage)
if current_usage >= monitor_usage:
ret.append({'memusage': current_usage})
return ret

View File

@ -16,6 +16,9 @@ try:
HAS_PSUTIL = True
except ImportError:
HAS_PSUTIL = False
from salt.ext.six.moves import map
# pylint: enable=import-error
log = logging.getLogger(__name__)
@ -45,7 +48,7 @@ def __virtual__():
return __virtualname__
def __validate__(config):
def validate(config):
'''
Validate the beacon configuration
'''
@ -57,15 +60,19 @@ def __validate__(config):
]
# Configuration for load beacon should be a list of dicts
if not isinstance(config, dict):
return False, ('Configuration for load beacon must be a dictionary.')
if not isinstance(config, list):
return False, ('Configuration for network_info beacon must be a list.')
else:
for item in config:
if not isinstance(config[item], dict):
return False, ('Configuration for load beacon must '
'be a dictionary of dictionaries.')
_config = {}
list(map(_config.update, config))
for item in _config.get('interfaces', {}):
if not isinstance(_config['interfaces'][item], dict):
return False, ('Configuration for network_info beacon must '
'be a list of dictionaries.')
else:
if not any(j in VALID_ITEMS for j in config[item]):
if not any(j in VALID_ITEMS for j in _config['interfaces'][item]):
return False, ('Invalid configuration item in '
'Beacon configuration.')
return True, 'Valid beacon configuration'
@ -86,16 +93,17 @@ def beacon(config):
beacons:
network_info:
- eth0:
type: equal
bytes_sent: 100000
bytes_recv: 100000
packets_sent: 100000
packets_recv: 100000
errin: 100
errout: 100
dropin: 100
dropout: 100
- interfaces:
eth0:
type: equal
bytes_sent: 100000
bytes_recv: 100000
packets_sent: 100000
packets_recv: 100000
errin: 100
errout: 100
dropin: 100
dropout: 100
Emit beacon when any values are greater
than configured values.
@ -104,46 +112,53 @@ def beacon(config):
beacons:
network_info:
- eth0:
type: greater
bytes_sent: 100000
bytes_recv: 100000
packets_sent: 100000
packets_recv: 100000
errin: 100
errout: 100
dropin: 100
dropout: 100
- interfaces:
eth0:
type: greater
bytes_sent: 100000
bytes_recv: 100000
packets_sent: 100000
packets_recv: 100000
errin: 100
errout: 100
dropin: 100
dropout: 100
'''
ret = []
_config = {}
list(map(_config.update, config))
log.debug('psutil.net_io_counters {}'.format(psutil.net_io_counters))
_stats = psutil.net_io_counters(pernic=True)
for interface_config in config:
interface = interface_config.keys()[0]
log.debug('_stats {}'.format(_stats))
for interface in _config.get('interfaces', {}):
if interface in _stats:
interface_config = _config['interfaces'][interface]
_if_stats = _stats[interface]
_diff = False
for attr in __attrs:
if attr in interface_config[interface]:
if 'type' in interface_config[interface] and \
interface_config[interface]['type'] == 'equal':
if attr in interface_config:
if 'type' in interface_config and \
interface_config['type'] == 'equal':
if getattr(_if_stats, attr, None) == \
int(interface_config[interface][attr]):
int(interface_config[attr]):
_diff = True
elif 'type' in interface_config[interface] and \
interface_config[interface]['type'] == 'greater':
elif 'type' in interface_config and \
interface_config['type'] == 'greater':
if getattr(_if_stats, attr, None) > \
int(interface_config[interface][attr]):
int(interface_config[attr]):
_diff = True
else:
log.debug('attr {}'.format(getattr(_if_stats,
attr, None)))
else:
if getattr(_if_stats, attr, None) == \
int(interface_config[interface][attr]):
int(interface_config[attr]):
_diff = True
if _diff:
ret.append({'interface': interface,

View File

@ -18,6 +18,8 @@ import ast
import re
import salt.loader
import logging
from salt.ext.six.moves import map
log = logging.getLogger(__name__)
__virtual_name__ = 'network_settings'
@ -45,22 +47,23 @@ def __virtual__():
return False
def __validate__(config):
def validate(config):
'''
Validate the beacon configuration
'''
if not isinstance(config, dict):
if not isinstance(config, list):
return False, ('Configuration for network_settings '
'beacon must be a dictionary.')
'beacon must be a list.')
else:
for item in config:
if item == 'coalesce':
continue
if not isinstance(config[item], dict):
return False, ('Configuration for network_settings beacon must be a '
'dictionary of dictionaries.')
_config = {}
list(map(_config.update, config))
for item in _config.get('interfaces', {}):
if not isinstance(_config['interfaces'][item], dict):
return False, ('Configuration for network_settings beacon '
' must be a list of dictionaries.')
else:
if not all(j in ATTRS for j in config[item]):
if not all(j in ATTRS for j in _config['interfaces'][item]):
return False, ('Invalid configuration item in Beacon '
'configuration.')
return True, 'Valid beacon configuration'
@ -75,9 +78,10 @@ def _copy_interfaces_info(interfaces):
for interface in interfaces:
_interface_attrs_cpy = set()
for attr in ATTRS:
attr_dict = Hashabledict()
attr_dict[attr] = repr(interfaces[interface][attr])
_interface_attrs_cpy.add(attr_dict)
if attr in interfaces[interface]:
attr_dict = Hashabledict()
attr_dict[attr] = repr(interfaces[interface][attr])
_interface_attrs_cpy.add(attr_dict)
ret[interface] = _interface_attrs_cpy
return ret
@ -89,8 +93,8 @@ def beacon(config):
By default, the beacon will emit when there is a value change on one of the
settings on watch. The config also support the onvalue parameter for each
setting, which instruct the beacon to only emit if the setting changed to the
value defined.
setting, which instruct the beacon to only emit if the setting changed to
the value defined.
Example Config
@ -98,12 +102,13 @@ def beacon(config):
beacons:
network_settings:
eth0:
ipaddr:
promiscuity:
onvalue: 1
eth1:
linkmode:
- interfaces:
- eth0:
ipaddr:
promiscuity:
onvalue: 1
- eth1:
linkmode:
The config above will check for value changes on eth0 ipaddr and eth1 linkmode. It will also
emit if the promiscuity value changes to 1.
@ -118,12 +123,16 @@ def beacon(config):
beacons:
network_settings:
coalesce: True
eth0:
ipaddr:
promiscuity:
- coalesce: True
- interfaces:
- eth0:
ipaddr:
promiscuity:
'''
_config = {}
list(map(_config.update, config))
ret = []
interfaces = []
expanded_config = {}
@ -137,45 +146,51 @@ def beacon(config):
if not LAST_STATS:
LAST_STATS = _stats
if 'coalesce' in config and config['coalesce']:
if 'coalesce' in _config and _config['coalesce']:
coalesce = True
changes = {}
log.debug('_stats {}'.format(_stats))
# Get list of interfaces included in config that are registered in the
# system, including interfaces defined by wildcards (eth*, wlan*)
for item in config:
if item == 'coalesce':
continue
if item in _stats:
interfaces.append(item)
for interface in _config.get('interfaces', {}):
if interface in _stats:
interfaces.append(interface)
else:
# No direct match, try with * wildcard regexp
interface_regexp = item.replace('*', '[0-9]+')
interface_regexp = interface.replace('*', '[0-9]+')
for interface in _stats:
match = re.search(interface_regexp, interface)
if match:
interfaces.append(match.group())
expanded_config[match.group()] = config[item]
expanded_config[match.group()] = config['interfaces'][interface]
if expanded_config:
config.update(expanded_config)
# config updated so update _config
list(map(_config.update, config))
log.debug('interfaces {}'.format(interfaces))
for interface in interfaces:
_send_event = False
_diff_stats = _stats[interface] - LAST_STATS[interface]
_ret_diff = {}
interface_config = _config['interfaces'][interface]
log.debug('_diff_stats {}'.format(_diff_stats))
if _diff_stats:
_diff_stats_dict = {}
LAST_STATS[interface] = _stats[interface]
for item in _diff_stats:
_diff_stats_dict.update(item)
for attr in config[interface]:
for attr in interface_config:
if attr in _diff_stats_dict:
config_value = None
if config[interface][attr] and 'onvalue' in config[interface][attr]:
config_value = config[interface][attr]['onvalue']
if interface_config[attr] and \
'onvalue' in interface_config[attr]:
config_value = interface_config[attr]['onvalue']
new_value = ast.literal_eval(_diff_stats_dict[attr])
if not config_value or config_value == new_value:
_send_event = True
@ -185,7 +200,9 @@ def beacon(config):
if coalesce:
changes[interface] = _ret_diff
else:
ret.append({'tag': interface, 'interface': interface, 'change': _ret_diff})
ret.append({'tag': interface,
'interface': interface,
'change': _ret_diff})
if coalesce and changes:
grains_info = salt.loader.grains(__opts__, True)

View File

@ -21,15 +21,25 @@ def __virtual__():
return __virtualname__ if 'pkg.upgrade_available' in __salt__ else False
def __validate__(config):
def validate(config):
'''
Validate the beacon configuration
'''
# Configuration for pkg beacon should be a list
if not isinstance(config, dict):
return False, ('Configuration for pkg beacon must be a dictionary.')
if 'pkgs' not in config:
return False, ('Configuration for pkg beacon requires list of pkgs.')
if not isinstance(config, list):
return False, ('Configuration for pkg beacon must be a list.')
# Configuration for pkg beacon should contain pkgs
pkgs_found = False
pkgs_not_list = False
for config_item in config:
if 'pkgs' in config_item:
pkgs_found = True
if isinstance(config_item['pkgs'], list):
pkgs_not_list = True
if not pkgs_found or not pkgs_not_list:
return False, 'Configuration for pkg beacon requires list of pkgs.'
return True, 'Valid beacon configuration'
@ -48,14 +58,16 @@ def beacon(config):
- refresh: True
'''
ret = []
_validate = __validate__(config)
if not _validate[0]:
return ret
_refresh = False
if 'refresh' in config and config['refresh']:
_refresh = True
for pkg in config['pkgs']:
pkgs = []
for config_item in config:
if 'pkgs' in config_item:
pkgs += config_item['pkgs']
if 'refresh' in config and config['refresh']:
_refresh = True
for pkg in pkgs:
_installed = __salt__['pkg.version'](pkg)
_latest = __salt__['pkg.latest_version'](pkg, refresh=_refresh)
if _installed and _latest:

View File

@ -15,6 +15,7 @@ import logging
# Import salt libs
import salt.utils.http
from salt.ext.six.moves import map
# Important: If used with salt-proxy
# this is required for the beacon to load!!!
@ -33,12 +34,12 @@ def __virtual__():
return True
def __validate__(config):
def validate(config):
'''
Validate the beacon configuration
'''
if not isinstance(config, dict):
return False, ('Configuration for rest_example beacon must be a dictionary.')
if not isinstance(config, list):
return False, ('Configuration for proxy_example beacon must be a list.')
return True, 'Valid beacon configuration'
@ -51,7 +52,7 @@ def beacon(config):
beacons:
proxy_example:
endpoint: beacon
- endpoint: beacon
'''
# Important!!!
# Although this toy example makes an HTTP call
@ -59,8 +60,11 @@ def beacon(config):
# please be advised that doing CPU or IO intensive
# operations in this method will cause the beacon loop
# to block.
_config = {}
list(map(_config.update, config))
beacon_url = '{0}{1}'.format(__opts__['proxy']['url'],
config['endpoint'])
_config['endpoint'])
ret = salt.utils.http.query(beacon_url,
decode_type='json',
decode=True)

View File

@ -14,6 +14,9 @@ try:
HAS_PSUTIL = True
except ImportError:
HAS_PSUTIL = False
from salt.ext.six.moves import map
# pylint: enable=import-error
log = logging.getLogger(__name__) # pylint: disable=invalid-name
@ -23,17 +26,27 @@ __virtualname__ = 'ps'
def __virtual__():
if not HAS_PSUTIL:
return (False, 'cannot load network_info beacon: psutil not available')
return (False, 'cannot load ps beacon: psutil not available')
return __virtualname__
def __validate__(config):
def validate(config):
'''
Validate the beacon configuration
'''
# Configuration for ps beacon should be a list of dicts
if not isinstance(config, dict):
return False, ('Configuration for ps beacon must be a dictionary.')
if not isinstance(config, list):
return False, ('Configuration for ps beacon must be a list.')
else:
_config = {}
list(map(_config.update, config))
if 'processes' not in _config:
return False, ('Configuration for ps beacon requires processes.')
else:
if not isinstance(_config['processes'], dict):
return False, ('Processes for ps beacon must be a dictionary.')
return True, 'Valid beacon configuration'
@ -47,8 +60,9 @@ def beacon(config):
beacons:
ps:
- salt-master: running
- mysql: stopped
- processes:
salt-master: running
mysql: stopped
The config above sets up beacons to check that
processes are running or stopped.
@ -60,19 +74,21 @@ def beacon(config):
if _name not in procs:
procs.append(_name)
for entry in config:
for process in entry:
ret_dict = {}
if entry[process] == 'running':
if process in procs:
ret_dict[process] = 'Running'
ret.append(ret_dict)
elif entry[process] == 'stopped':
if process not in procs:
ret_dict[process] = 'Stopped'
ret.append(ret_dict)
else:
if process not in procs:
ret_dict[process] = False
ret.append(ret_dict)
_config = {}
list(map(_config.update, config))
for process in _config.get('processes', {}):
ret_dict = {}
if _config['processes'][process] == 'running':
if process in procs:
ret_dict[process] = 'Running'
ret.append(ret_dict)
elif _config['processes'][process] == 'stopped':
if process not in procs:
ret_dict[process] = 'Stopped'
ret.append(ret_dict)
else:
if process not in procs:
ret_dict[process] = False
ret.append(ret_dict)
return ret

View File

@ -9,6 +9,7 @@
# Import python libs
from __future__ import absolute_import
import logging
from salt.ext.six.moves import map
log = logging.getLogger(__name__)
@ -20,9 +21,7 @@ def _run_proxy_processes(proxies):
aren't running
'''
ret = []
for prox_ in proxies:
# prox_ is a dict
proxy = prox_.keys()[0]
for proxy in proxies:
result = {}
if not __salt__['salt_proxy.is_running'](proxy)['result']:
__salt__['salt_proxy.configure_proxy'](proxy, start=True)
@ -35,7 +34,29 @@ def _run_proxy_processes(proxies):
return ret
def beacon(proxies):
def validate(config):
'''
Validate the beacon configuration
'''
# Configuration for adb beacon should be a dictionary with states array
if not isinstance(config, list):
log.info('Configuration for salt_proxy beacon must be a list.')
return False, ('Configuration for salt_proxy beacon must be a list.')
else:
_config = {}
list(map(_config.update, config))
if 'proxies' not in _config:
return False, ('Configuration for salt_proxy'
' beacon requires proxies.')
else:
if not isinstance(_config['proxies'], dict):
return False, ('Proxies for salt_proxy '
'beacon must be a dictionary.')
def beacon(config):
'''
Handle configured proxies
@ -43,9 +64,13 @@ def beacon(proxies):
beacons:
salt_proxy:
- p8000: {}
- p8001: {}
- proxies:
p8000: {}
p8001: {}
'''
log.trace('salt proxy beacon called')
return _run_proxy_processes(proxies)
_config = {}
list(map(_config.update, config))
return _run_proxy_processes(_config['proxies'])

View File

@ -11,6 +11,7 @@ of a Raspberry Pi.
from __future__ import absolute_import
import logging
import re
from salt.ext.six.moves import map
log = logging.getLogger(__name__)
@ -19,14 +20,22 @@ def __virtual__():
return 'sensehat.get_pressure' in __salt__
def __validate__(config):
def validate(config):
'''
Validate the beacon configuration
'''
# Configuration for sensehat beacon should be a dict
if not isinstance(config, dict):
# Configuration for sensehat beacon should be a list
if not isinstance(config, list):
return False, ('Configuration for sensehat beacon '
'must be a dictionary.')
'must be a list.')
else:
_config = {}
list(map(_config.update, config))
if 'sensors' not in _config:
return False, ('Configuration for sensehat'
' beacon requires sensors.')
return True, 'Valid beacon configuration'
@ -48,10 +57,11 @@ def beacon(config):
beacons:
sensehat:
humidity: 70%
temperature: [20, 40]
temperature_from_pressure: 40
pressure: 1500
- sensors:
humidity: 70%
temperature: [20, 40]
temperature_from_pressure: 40
pressure: 1500
'''
ret = []
min_default = {
@ -60,13 +70,16 @@ def beacon(config):
'temperature': '-273.15'
}
for sensor in config:
_config = {}
list(map(_config.update, config))
for sensor in _config.get('sensors', {}):
sensor_function = 'sensehat.get_{0}'.format(sensor)
if sensor_function not in __salt__:
log.error('No sensor for meassuring {0}. Skipping.'.format(sensor))
continue
sensor_config = config[sensor]
sensor_config = _config['sensors'][sensor]
if isinstance(sensor_config, list):
sensor_min = str(sensor_config[0])
sensor_max = str(sensor_config[1])

View File

@ -9,19 +9,35 @@ from __future__ import absolute_import
import os
import logging
import time
from salt.ext.six.moves import map
log = logging.getLogger(__name__) # pylint: disable=invalid-name
LAST_STATUS = {}
__virtualname__ = 'service'
def __validate__(config):
def validate(config):
'''
Validate the beacon configuration
'''
# Configuration for service beacon should be a list of dicts
if not isinstance(config, dict):
return False, ('Configuration for service beacon must be a dictionary.')
if not isinstance(config, list):
return False, ('Configuration for service beacon must be a list.')
else:
_config = {}
list(map(_config.update, config))
if 'services' not in _config:
return False, ('Configuration for service beacon'
' requires services.')
else:
for config_item in _config['services']:
if not isinstance(_config['services'][config_item], dict):
return False, ('Configuration for service beacon must '
'be a list of dictionaries.')
return True, 'Valid beacon configuration'
@ -35,8 +51,9 @@ def beacon(config):
beacons:
service:
salt-master:
mysql:
- services:
salt-master:
mysql:
The config above sets up beacons to check for
the salt-master and mysql services.
@ -79,14 +96,21 @@ def beacon(config):
beacons:
service:
nginx:
onchangeonly: True
delay: 30
uncleanshutdown: /run/nginx.pid
- services:
nginx:
onchangeonly: True
delay: 30
uncleanshutdown: /run/nginx.pid
'''
ret = []
for service in config:
_config = {}
list(map(_config.update, config))
for service in _config.get('services', {}):
ret_dict = {}
service_config = _config['services'][service]
ret_dict[service] = {'running': __salt__['service.status'](service)}
ret_dict['service_name'] = service
ret_dict['tag'] = service
@ -95,40 +119,43 @@ def beacon(config):
# If no options is given to the service, we fall back to the defaults
# assign a False value to oncleanshutdown and onchangeonly. Those
# key:values are then added to the service dictionary.
if 'oncleanshutdown' not in config[service]:
config[service]['oncleanshutdown'] = False
if 'emitatstartup' not in config[service]:
config[service]['emitatstartup'] = True
if 'onchangeonly' not in config[service]:
config[service]['onchangeonly'] = False
if 'delay' not in config[service]:
config[service]['delay'] = 0
if not service_config:
service_config = {}
if 'oncleanshutdown' not in service_config:
service_config['oncleanshutdown'] = False
if 'emitatstartup' not in service_config:
service_config['emitatstartup'] = True
if 'onchangeonly' not in service_config:
service_config['onchangeonly'] = False
if 'delay' not in service_config:
service_config['delay'] = 0
# We only want to report the nature of the shutdown
# if the current running status is False
# as well as if the config for the beacon asks for it
if 'uncleanshutdown' in config[service] and not ret_dict[service]['running']:
filename = config[service]['uncleanshutdown']
if 'uncleanshutdown' in service_config and not ret_dict[service]['running']:
filename = service_config['uncleanshutdown']
ret_dict[service]['uncleanshutdown'] = True if os.path.exists(filename) else False
if 'onchangeonly' in config[service] and config[service]['onchangeonly'] is True:
if 'onchangeonly' in service_config and service_config['onchangeonly'] is True:
if service not in LAST_STATUS:
LAST_STATUS[service] = ret_dict[service]
if config[service]['delay'] > 0:
if service_config['delay'] > 0:
LAST_STATUS[service]['time'] = currtime
elif not config[service]['emitatstartup']:
elif not service_config['emitatstartup']:
continue
else:
ret.append(ret_dict)
if LAST_STATUS[service]['running'] != ret_dict[service]['running']:
LAST_STATUS[service] = ret_dict[service]
if config[service]['delay'] > 0:
if service_config['delay'] > 0:
LAST_STATUS[service]['time'] = currtime
else:
ret.append(ret_dict)
if 'time' in LAST_STATUS[service]:
elapsedtime = int(round(currtime - LAST_STATUS[service]['time']))
if elapsedtime > config[service]['delay']:
if elapsedtime > service_config['delay']:
del LAST_STATUS[service]['time']
ret.append(ret_dict)
else:

View File

@ -9,6 +9,7 @@ import time
# Import salt libs
import salt.utils
import salt.utils.path
import salt.utils.vt
__virtualname__ = 'sh'
@ -21,7 +22,7 @@ def __virtual__():
'''
Only load if strace is installed
'''
return __virtualname__ if salt.utils.which('strace') else False
return __virtualname__ if salt.utils.path.which('strace') else False
def _get_shells():
@ -40,13 +41,13 @@ def _get_shells():
return __context__['sh.shells']
def __validate__(config):
def validate(config):
'''
Validate the beacon configuration
'''
# Configuration for sh beacon should be a list of dicts
if not isinstance(config, dict):
return False, ('Configuration for sh beacon must be a dictionary.')
if not isinstance(config, list):
return False, ('Configuration for sh beacon must be a list.')
return True, 'Valid beacon configuration'
@ -57,7 +58,7 @@ def beacon(config):
.. code-block:: yaml
beacons:
sh: {}
sh: []
'''
ret = []
pkey = 'sh.vt'

View File

@ -15,7 +15,7 @@ the minion config:
.. code-block:: yaml
beacons:
status: {}
status: []
By default, all of the information from the following execution module
functions will be returned:
@ -96,19 +96,19 @@ import datetime
import salt.exceptions
# Import salt libs
import salt.utils
import salt.utils.platform
log = logging.getLogger(__name__)
__virtualname__ = 'status'
def __validate__(config):
def validate(config):
'''
Validate the the config is a dict
'''
if not isinstance(config, dict):
return False, ('Configuration for status beacon must be a dictionary.')
if not isinstance(config, list):
return False, ('Configuration for status beacon must be a list.')
return True, 'Valid beacon configuration'
@ -123,7 +123,7 @@ def beacon(config):
log.debug(config)
ctime = datetime.datetime.utcnow().isoformat()
ret = {}
if salt.utils.is_windows():
if salt.utils.platform.is_windows():
return [{
'tag': ctime,
'data': ret,

View File

@ -1,11 +1,15 @@
# -*- coding: utf-8 -*-
'''
Beacon to emit Telegram messages
Requires the python-telegram-bot library
'''
# Import Python libs
from __future__ import absolute_import
import logging
from salt.ext.six.moves import map
# Import 3rd Party libs
try:
@ -28,20 +32,23 @@ def __virtual__():
return False
def __validate__(config):
def validate(config):
'''
Validate the beacon configuration
'''
if not isinstance(config, dict):
if not isinstance(config, list):
return False, ('Configuration for telegram_bot_msg '
'beacon must be a dictionary.')
'beacon must be a list.')
if not all(config.get(required_config)
_config = {}
list(map(_config.update, config))
if not all(_config.get(required_config)
for required_config in ['token', 'accept_from']):
return False, ('Not all required configuration for '
'telegram_bot_msg are set.')
if not isinstance(config.get('accept_from'), list):
if not isinstance(_config.get('accept_from'), list):
return False, ('Configuration for telegram_bot_msg, '
'accept_from must be a list of usernames.')
@ -57,18 +64,22 @@ def beacon(config):
beacons:
telegram_bot_msg:
token: "<bot access token>"
accept_from:
- token: "<bot access token>"
- accept_from:
- "<valid username>"
interval: 10
- interval: 10
'''
_config = {}
list(map(_config.update, config))
log.debug('telegram_bot_msg beacon starting')
ret = []
output = {}
output['msgs'] = []
bot = telegram.Bot(config['token'])
bot = telegram.Bot(_config['token'])
updates = bot.get_updates(limit=100, timeout=0, network_delay=10)
log.debug('Num updates: {0}'.format(len(updates)))
@ -83,7 +94,7 @@ def beacon(config):
if update.update_id > latest_update_id:
latest_update_id = update.update_id
if message.chat.username in config['accept_from']:
if message.chat.username in _config['accept_from']:
output['msgs'].append(message.to_dict())
# mark in the server that previous messages are processed

View File

@ -6,10 +6,17 @@ Beacon to emit Twilio text messages
# Import Python libs
from __future__ import absolute_import
import logging
from salt.ext.six.moves import map
# Import 3rd Party libs
try:
from twilio.rest import TwilioRestClient
import twilio
# Grab version, ensure elements are ints
twilio_version = tuple([int(x) for x in twilio.__version_info__])
if twilio_version > (5, ):
from twilio.rest import Client as TwilioRestClient
else:
from twilio.rest import TwilioRestClient
HAS_TWILIO = True
except ImportError:
HAS_TWILIO = False
@ -26,14 +33,24 @@ def __virtual__():
return False
def __validate__(config):
def validate(config):
'''
Validate the beacon configuration
'''
# Configuration for twilio_txt_msg beacon should be a list of dicts
if not isinstance(config, dict):
if not isinstance(config, list):
return False, ('Configuration for twilio_txt_msg beacon '
'must be a dictionary.')
'must be a list.')
else:
_config = {}
list(map(_config.update, config))
if not all(x in _config for x in ('account_sid',
'auth_token',
'twilio_number')):
return False, ('Configuration for twilio_txt_msg beacon '
'must contain account_sid, auth_token '
'and twilio_number items.')
return True, 'Valid beacon configuration'
@ -46,20 +63,26 @@ def beacon(config):
beacons:
twilio_txt_msg:
account_sid: "<account sid>"
auth_token: "<auth token>"
twilio_number: "+15555555555"
interval: 10
- account_sid: "<account sid>"
- auth_token: "<auth token>"
- twilio_number: "+15555555555"
- interval: 10
'''
log.trace('twilio_txt_msg beacon starting')
_config = {}
list(map(_config.update, config))
ret = []
if not all([config['account_sid'], config['auth_token'], config['twilio_number']]):
if not all([_config['account_sid'],
_config['auth_token'],
_config['twilio_number']]):
return ret
output = {}
output['texts'] = []
client = TwilioRestClient(config['account_sid'], config['auth_token'])
messages = client.messages.list(to=config['twilio_number'])
client = TwilioRestClient(_config['account_sid'], _config['auth_token'])
messages = client.messages.list(to=_config['twilio_number'])
log.trace('Num messages: {0}'.format(len(messages)))
if len(messages) < 1:
log.trace('Twilio beacon has no texts')

View File

@ -5,7 +5,7 @@ Beacon to fire events at login of users as registered in the wtmp file
.. code-block:: yaml
beacons:
wtmp: {}
wtmp: []
'''
# Import Python libs
@ -16,6 +16,9 @@ import struct
# Import salt libs
import salt.utils.files
# Import 3rd-party libs
from salt.ext import six
__virtualname__ = 'wtmp'
WTMP = '/var/log/wtmp'
FMT = 'hi32s4s32s256shhiii4i20x'
@ -52,13 +55,13 @@ def _get_loc():
return __context__[LOC_KEY]
def __validate__(config):
def validate(config):
'''
Validate the beacon configuration
'''
# Configuration for wtmp beacon should be a list of dicts
if not isinstance(config, dict):
return False, ('Configuration for wtmp beacon must be a dictionary.')
if not isinstance(config, list):
return False, ('Configuration for wtmp beacon must be a list.')
return True, 'Valid beacon configuration'
@ -70,7 +73,7 @@ def beacon(config):
.. code-block:: yaml
beacons:
wtmp: {}
wtmp: []
'''
ret = []
with salt.utils.files.fopen(WTMP, 'rb') as fp_:
@ -90,7 +93,7 @@ def beacon(config):
event = {}
for ind, field in enumerate(FIELDS):
event[field] = pack[ind]
if isinstance(event[field], str):
if isinstance(event[field], six.string_types):
event[field] = event[field].strip('\x00')
ret.append(event)
return ret

View File

@ -11,14 +11,14 @@ import copy
from datetime import datetime, timedelta
# Import salt libs
import salt.utils # Can be removed once print_cli is moved
import salt.client
import salt.output
import salt.exceptions
from salt.utils import print_cli
# Import 3rd-party libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
import salt.ext.six as six
from salt.ext import six
from salt.ext.six.moves import range
# pylint: enable=import-error,no-name-in-module,redefined-builtin
import logging
@ -73,7 +73,7 @@ class Batch(object):
m = next(six.iterkeys(ret))
except StopIteration:
if not self.quiet:
print_cli('No minions matched the target.')
salt.utils.print_cli('No minions matched the target.')
break
if m is not None:
fret.add(m)
@ -95,7 +95,7 @@ class Batch(object):
return int(self.opts['batch'])
except ValueError:
if not self.quiet:
print_cli('Invalid batch data sent: {0}\nData must be in the '
salt.utils.print_cli('Invalid batch data sent: {0}\nData must be in the '
'form of %10, 10% or 3'.format(self.opts['batch']))
def __update_wait(self, wait):
@ -146,7 +146,7 @@ class Batch(object):
# We already know some minions didn't respond to the ping, so inform
# the user we won't be attempting to run a job on them
for down_minion in self.down_minions:
print_cli('Minion {0} did not respond. No job will be sent.'.format(down_minion))
salt.utils.print_cli('Minion {0} did not respond. No job will be sent.'.format(down_minion))
# Iterate while we still have things to execute
while len(ret) < len(self.minions):
@ -171,7 +171,7 @@ class Batch(object):
if next_:
if not self.quiet:
print_cli('\nExecuting run on {0}\n'.format(sorted(next_)))
salt.utils.print_cli('\nExecuting run on {0}\n'.format(sorted(next_)))
# create a new iterator for this batch of minions
new_iter = self.local.cmd_iter_no_block(
*args,
@ -218,14 +218,14 @@ class Batch(object):
if part['data']['id'] in minion_tracker[queue]['minions']:
minion_tracker[queue]['minions'].remove(part['data']['id'])
else:
print_cli('minion {0} was already deleted from tracker, probably a duplicate key'.format(part['id']))
salt.utils.print_cli('minion {0} was already deleted from tracker, probably a duplicate key'.format(part['id']))
else:
parts.update(part)
for id in part:
if id in minion_tracker[queue]['minions']:
minion_tracker[queue]['minions'].remove(id)
else:
print_cli('minion {0} was already deleted from tracker, probably a duplicate key'.format(id))
salt.utils.print_cli('minion {0} was already deleted from tracker, probably a duplicate key'.format(id))
except StopIteration:
# if a iterator is done:
# - set it to inactive

View File

@ -1,16 +1,15 @@
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
from __future__ import absolute_import, print_function
import os
from salt.utils import parsers
import salt.utils.parsers
from salt.utils.verify import verify_log
from salt.config import _expand_glob_path
import salt.cli.caller
import salt.defaults.exitcodes
class SaltCall(parsers.SaltCallOptionParser):
class SaltCall(salt.utils.parsers.SaltCallOptionParser):
'''
Used to locally execute a salt command
'''

View File

@ -20,19 +20,17 @@ import salt.minion
import salt.output
import salt.payload
import salt.transport
import salt.utils # Can be removed once print_cli, activate_profile, and output_profile are moved
import salt.utils.args
import salt.utils.files
import salt.utils.jid
import salt.utils.kinds as kinds
import salt.utils.minion
import salt.defaults.exitcodes
from salt.log import LOG_LEVELS
from salt.utils import is_windows
from salt.utils import print_cli
from salt.utils import kinds
from salt.utils import activate_profile
from salt.utils import output_profile
from salt.utils.process import MultiprocessingProcess
from salt.cli import daemons
from salt.log import LOG_LEVELS
from salt.utils.platform import is_windows
from salt.utils.process import MultiprocessingProcess
try:
from raet import raeting, nacling
@ -47,7 +45,7 @@ except ImportError:
pass
# Import 3rd-party libs
import salt.ext.six as six
from salt.ext import six
# Custom exceptions
from salt.exceptions import (
@ -115,7 +113,7 @@ class BaseCaller(object):
docs[name] = func.__doc__
for name in sorted(docs):
if name.startswith(self.opts.get('fun', '')):
print_cli('{0}:\n{1}\n'.format(name, docs[name]))
salt.utils.print_cli('{0}:\n{1}\n'.format(name, docs[name]))
def print_grains(self):
'''
@ -130,14 +128,14 @@ class BaseCaller(object):
'''
profiling_enabled = self.opts.get('profiling_enabled', False)
try:
pr = activate_profile(profiling_enabled)
pr = salt.utils.activate_profile(profiling_enabled)
try:
ret = self.call()
finally:
output_profile(pr,
stats_path=self.opts.get('profiling_path',
'/tmp/stats'),
stop=True)
salt.utils.output_profile(
pr,
stats_path=self.opts.get('profiling_path', '/tmp/stats'),
stop=True)
out = ret.get('out', 'nested')
if self.opts['print_metadata']:
print_ret = ret
@ -211,7 +209,7 @@ class BaseCaller(object):
ret['return'] = func(*args, **kwargs)
except TypeError as exc:
sys.stderr.write('\nPassed invalid arguments: {0}.\n\nUsage:\n'.format(exc))
print_cli(func.__doc__)
salt.utils.print_cli(func.__doc__)
active_level = LOG_LEVELS.get(
self.opts['log_level'].lower(), logging.ERROR)
if active_level <= logging.DEBUG:

View File

@ -21,7 +21,9 @@ import salt.client
import salt.utils.gzip_util
import salt.utils.itertools
import salt.utils.minions
from salt.utils import parsers, to_bytes
import salt.utils.parsers
import salt.utils.platform
import salt.utils.stringutils
from salt.utils.verify import verify_log
import salt.output
@ -31,7 +33,7 @@ from salt.ext import six
log = logging.getLogger(__name__)
class SaltCPCli(parsers.SaltCPOptionParser):
class SaltCPCli(salt.utils.parsers.SaltCPOptionParser):
'''
Run the salt-cp command line client
'''
@ -56,7 +58,7 @@ class SaltCP(object):
'''
def __init__(self, opts):
self.opts = opts
self.is_windows = salt.utils.is_windows()
self.is_windows = salt.utils.platform.is_windows()
def _mode(self, path):
if self.is_windows:
@ -152,7 +154,7 @@ class SaltCP(object):
index = 1
failed = {}
for chunk in reader(fn_, chunk_size=self.opts['salt_cp_chunk_size']):
chunk = base64.b64encode(to_bytes(chunk))
chunk = base64.b64encode(salt.utils.stringutils.to_bytes(chunk))
append = index > 1
log.debug(
'Copying %s to %starget \'%s\' as %s%s',

View File

@ -41,10 +41,11 @@ import salt.log.setup
# the try block below bypasses an issue at build time so that modules don't
# cause the build to fail
from salt.utils import migrations
from salt.utils import kinds
import salt.utils.kinds as kinds
try:
from salt.utils import parsers, ip_bracket
from salt.utils import ip_bracket
import salt.utils.parsers
from salt.utils.verify import check_user, verify_env, verify_socket
except ImportError as exc:
if exc.args[0] != 'No module named _msgpack':
@ -109,7 +110,7 @@ class DaemonsMixin(object): # pylint: disable=no-init
self.shutdown(error)
class Master(parsers.MasterOptionParser, DaemonsMixin): # pylint: disable=no-init
class Master(salt.utils.parsers.MasterOptionParser, DaemonsMixin): # pylint: disable=no-init
'''
Creates a master server
'''
@ -220,7 +221,7 @@ class Master(parsers.MasterOptionParser, DaemonsMixin): # pylint: disable=no-in
super(Master, self).shutdown(exitcode, exitmsg)
class Minion(parsers.MinionOptionParser, DaemonsMixin): # pylint: disable=no-init
class Minion(salt.utils.parsers.MinionOptionParser, DaemonsMixin): # pylint: disable=no-init
'''
Create a minion server
'''
@ -398,7 +399,7 @@ class Minion(parsers.MinionOptionParser, DaemonsMixin): # pylint: disable=no-in
# pylint: enable=no-member
class ProxyMinion(parsers.ProxyMinionOptionParser, DaemonsMixin): # pylint: disable=no-init
class ProxyMinion(salt.utils.parsers.ProxyMinionOptionParser, DaemonsMixin): # pylint: disable=no-init
'''
Create a proxy minion server
'''
@ -549,7 +550,7 @@ class ProxyMinion(parsers.ProxyMinionOptionParser, DaemonsMixin): # pylint: dis
# pylint: enable=no-member
class Syndic(parsers.SyndicOptionParser, DaemonsMixin): # pylint: disable=no-init
class Syndic(salt.utils.parsers.SyndicOptionParser, DaemonsMixin): # pylint: disable=no-init
'''
Create a syndic server
'''

View File

@ -3,11 +3,11 @@ from __future__ import print_function
from __future__ import absolute_import
from salt.utils import parsers
import salt.utils.parsers
from salt.utils.verify import check_user, verify_log
class SaltKey(parsers.SaltKeyOptionParser):
class SaltKey(salt.utils.parsers.SaltKeyOptionParser):
'''
Initialize the Salt key manager
'''

View File

@ -2,15 +2,14 @@
from __future__ import print_function
from __future__ import absolute_import
from salt.utils import parsers
from salt.utils import activate_profile
from salt.utils import output_profile
import salt.utils # Can be removed once activate_profile and output_profile are moved
import salt.utils.parsers
from salt.utils.verify import check_user, verify_log
from salt.exceptions import SaltClientError
import salt.defaults.exitcodes # pylint: disable=W0611
class SaltRun(parsers.SaltRunOptionParser):
class SaltRun(salt.utils.parsers.SaltRunOptionParser):
'''
Used to execute Salt runners
'''
@ -36,7 +35,7 @@ class SaltRun(parsers.SaltRunOptionParser):
# someone tries to use the runners via the python API
try:
if check_user(self.config['user']):
pr = activate_profile(profiling_enabled)
pr = salt.utils.activate_profile(profiling_enabled)
try:
ret = runner.run()
# In older versions ret['data']['retcode'] was used
@ -50,7 +49,7 @@ class SaltRun(parsers.SaltRunOptionParser):
elif isinstance(ret, dict) and 'retcode' in ret.get('data', {}):
self.exit(ret['data']['retcode'])
finally:
output_profile(
salt.utils.output_profile(
pr,
stats_path=self.options.profiling_path,
stop=True)

View File

@ -7,8 +7,8 @@ sys.modules['pkg_resources'] = None
import os
# Import Salt libs
from salt.ext.six import string_types
from salt.utils import parsers, print_cli
import salt.utils # Can be removed once print_cli is moved
import salt.utils.parsers
from salt.utils.args import yamlify_arg
from salt.utils.verify import verify_log
from salt.exceptions import (
@ -18,10 +18,10 @@ from salt.exceptions import (
)
# Import 3rd-party libs
import salt.ext.six as six
from salt.ext import six
class SaltCMD(parsers.SaltCMDOptionParser):
class SaltCMD(salt.utils.parsers.SaltCMDOptionParser):
'''
The execution of a salt command happens here
'''
@ -93,7 +93,7 @@ class SaltCMD(parsers.SaltCMDOptionParser):
# potentially switch to batch execution
if self.options.batch_safe_limit > 1:
if len(self._preview_target()) >= self.options.batch_safe_limit:
print_cli('\nNOTICE: Too many minions targeted, switching to batch execution.')
salt.utils.print_cli('\nNOTICE: Too many minions targeted, switching to batch execution.')
self.options.batch = self.options.batch_safe_size
self._run_batch()
return
@ -140,7 +140,7 @@ class SaltCMD(parsers.SaltCMDOptionParser):
if self.config['async']:
jid = self.local_client.cmd_async(**kwargs)
print_cli('Executed command with job ID: {0}'.format(jid))
salt.utils.print_cli('Executed command with job ID: {0}'.format(jid))
return
# local will be None when there was an error
@ -279,12 +279,12 @@ class SaltCMD(parsers.SaltCMDOptionParser):
def _print_errors_summary(self, errors):
if errors:
print_cli('\n')
print_cli('---------------------------')
print_cli('Errors')
print_cli('---------------------------')
salt.utils.print_cli('\n')
salt.utils.print_cli('---------------------------')
salt.utils.print_cli('Errors')
salt.utils.print_cli('---------------------------')
for error in errors:
print_cli(self._format_error(error))
salt.utils.print_cli(self._format_error(error))
def _print_returns_summary(self, ret):
'''
@ -301,7 +301,7 @@ class SaltCMD(parsers.SaltCMDOptionParser):
if isinstance(minion_ret, dict) and 'ret' in minion_ret:
minion_ret = ret[each_minion].get('ret')
if (
isinstance(minion_ret, string_types)
isinstance(minion_ret, six.string_types)
and minion_ret.startswith("Minion did not return")
):
if "Not connected" in minion_ret:
@ -314,22 +314,22 @@ class SaltCMD(parsers.SaltCMDOptionParser):
return_counter += 1
if self._get_retcode(ret[each_minion]):
failed_minions.append(each_minion)
print_cli('\n')
print_cli('-------------------------------------------')
print_cli('Summary')
print_cli('-------------------------------------------')
print_cli('# of minions targeted: {0}'.format(return_counter + not_return_counter))
print_cli('# of minions returned: {0}'.format(return_counter))
print_cli('# of minions that did not return: {0}'.format(not_return_counter))
print_cli('# of minions with errors: {0}'.format(len(failed_minions)))
salt.utils.print_cli('\n')
salt.utils.print_cli('-------------------------------------------')
salt.utils.print_cli('Summary')
salt.utils.print_cli('-------------------------------------------')
salt.utils.print_cli('# of minions targeted: {0}'.format(return_counter + not_return_counter))
salt.utils.print_cli('# of minions returned: {0}'.format(return_counter))
salt.utils.print_cli('# of minions that did not return: {0}'.format(not_return_counter))
salt.utils.print_cli('# of minions with errors: {0}'.format(len(failed_minions)))
if self.options.verbose:
if not_connected_minions:
print_cli('Minions not connected: {0}'.format(" ".join(not_connected_minions)))
salt.utils.print_cli('Minions not connected: {0}'.format(" ".join(not_connected_minions)))
if not_response_minions:
print_cli('Minions not responding: {0}'.format(" ".join(not_response_minions)))
salt.utils.print_cli('Minions not responding: {0}'.format(" ".join(not_response_minions)))
if failed_minions:
print_cli('Minions with failures: {0}'.format(" ".join(failed_minions)))
print_cli('-------------------------------------------')
salt.utils.print_cli('Minions with failures: {0}'.format(" ".join(failed_minions)))
salt.utils.print_cli('-------------------------------------------')
def _progress_end(self, out):
import salt.output
@ -406,10 +406,10 @@ class SaltCMD(parsers.SaltCMDOptionParser):
docs = {}
if not ret:
self.exit(2, 'No minions found to gather docs from\n')
if isinstance(ret, str):
if isinstance(ret, six.string_types):
self.exit(2, '{0}\n'.format(ret))
for host in ret:
if isinstance(ret[host], string_types) \
if isinstance(ret[host], six.string_types) \
and (ret[host].startswith("Minion did not return")
or ret[host] == 'VALUE TRIMMED'):
continue
@ -421,6 +421,6 @@ class SaltCMD(parsers.SaltCMDOptionParser):
salt.output.display_output({fun: docs[fun]}, 'nested', self.config)
else:
for fun in sorted(docs):
print_cli('{0}:'.format(fun))
print_cli(docs[fun])
print_cli('')
salt.utils.print_cli('{0}:'.format(fun))
salt.utils.print_cli(docs[fun])
salt.utils.print_cli('')

View File

@ -2,17 +2,21 @@
from __future__ import print_function
from __future__ import absolute_import
import sys
import salt.client.ssh
from salt.utils import parsers
import salt.utils.parsers
from salt.utils.verify import verify_log
class SaltSSH(parsers.SaltSSHOptionParser):
class SaltSSH(salt.utils.parsers.SaltSSHOptionParser):
'''
Used to Execute the salt ssh routine
'''
def run(self):
if '-H' in sys.argv or '--hosts' in sys.argv:
sys.argv += ['x', 'x'] # Hack: pass a mandatory two options
# that won't be used anyways with -H or --hosts
self.parse_args()
self.setup_logfile_logger()
verify_log(self.config)

File diff suppressed because it is too large Load Diff

View File

@ -37,7 +37,7 @@ def tokenify(cmd, token=None):
Otherwise return cmd
'''
if token is not None:
cmd['token'] = token
cmd[u'token'] = token
return cmd
@ -50,19 +50,19 @@ class APIClient(object):
if not opts:
opts = salt.config.client_config(
os.environ.get(
'SALT_MASTER_CONFIG',
os.path.join(syspaths.CONFIG_DIR, 'master')
u'SALT_MASTER_CONFIG',
os.path.join(syspaths.CONFIG_DIR, u'master')
)
)
self.opts = opts
self.localClient = salt.client.get_local_client(self.opts['conf_file'])
self.localClient = salt.client.get_local_client(self.opts[u'conf_file'])
self.runnerClient = salt.runner.RunnerClient(self.opts)
self.wheelClient = salt.wheel.Wheel(self.opts)
self.resolver = salt.auth.Resolver(self.opts)
self.event = salt.utils.event.get_event(
'master',
self.opts['sock_dir'],
self.opts['transport'],
u'master',
self.opts[u'sock_dir'],
self.opts[u'transport'],
opts=self.opts,
listen=listen)
@ -118,20 +118,20 @@ class APIClient(object):
'''
cmd = dict(cmd) # make copy
client = 'minion' # default to local minion client
mode = cmd.get('mode', 'async') # default to 'async'
client = u'minion' # default to local minion client
mode = cmd.get(u'mode', u'async') # default to 'async'
# check for wheel or runner prefix to fun name to use wheel or runner client
funparts = cmd.get('fun', '').split('.')
if len(funparts) > 2 and funparts[0] in ['wheel', 'runner']: # master
funparts = cmd.get(u'fun', u'').split(u'.')
if len(funparts) > 2 and funparts[0] in [u'wheel', u'runner']: # master
client = funparts[0]
cmd['fun'] = '.'.join(funparts[1:]) # strip prefix
cmd[u'fun'] = u'.'.join(funparts[1:]) # strip prefix
if not ('token' in cmd or
('eauth' in cmd and 'password' in cmd and 'username' in cmd)):
raise EauthAuthenticationError('No authentication credentials given')
if not (u'token' in cmd or
(u'eauth' in cmd and u'password' in cmd and u'username' in cmd)):
raise EauthAuthenticationError(u'No authentication credentials given')
executor = getattr(self, '{0}_{1}'.format(client, mode))
executor = getattr(self, u'{0}_{1}'.format(client, mode))
result = executor(**cmd)
return result
@ -204,9 +204,9 @@ class APIClient(object):
Adds client per the command.
'''
cmd['client'] = 'minion'
if len(cmd['module'].split('.')) > 2 and cmd['module'].split('.')[0] in ['runner', 'wheel']:
cmd['client'] = 'master'
cmd[u'client'] = u'minion'
if len(cmd[u'module'].split(u'.')) > 2 and cmd[u'module'].split(u'.')[0] in [u'runner', u'wheel']:
cmd[u'client'] = u'master'
return self._signature(cmd)
def _signature(self, cmd):
@ -216,20 +216,20 @@ class APIClient(object):
'''
result = {}
client = cmd.get('client', 'minion')
if client == 'minion':
cmd['fun'] = 'sys.argspec'
cmd['kwarg'] = dict(module=cmd['module'])
client = cmd.get(u'client', u'minion')
if client == u'minion':
cmd[u'fun'] = u'sys.argspec'
cmd[u'kwarg'] = dict(module=cmd[u'module'])
result = self.run(cmd)
elif client == 'master':
parts = cmd['module'].split('.')
elif client == u'master':
parts = cmd[u'module'].split(u'.')
client = parts[0]
module = '.'.join(parts[1:]) # strip prefix
if client == 'wheel':
module = u'.'.join(parts[1:]) # strip prefix
if client == u'wheel':
functions = self.wheelClient.functions
elif client == 'runner':
elif client == u'runner':
functions = self.runnerClient.functions
result = {'master': salt.utils.argspec_report(functions, module)}
result = {u'master': salt.utils.argspec_report(functions, module)}
return result
def create_token(self, creds):
@ -274,20 +274,20 @@ class APIClient(object):
tokenage = self.resolver.mk_token(creds)
except Exception as ex:
raise EauthAuthenticationError(
"Authentication failed with {0}.".format(repr(ex)))
u"Authentication failed with {0}.".format(repr(ex)))
if 'token' not in tokenage:
raise EauthAuthenticationError("Authentication failed with provided credentials.")
if u'token' not in tokenage:
raise EauthAuthenticationError(u"Authentication failed with provided credentials.")
# Grab eauth config for the current backend for the current user
tokenage_eauth = self.opts['external_auth'][tokenage['eauth']]
if tokenage['name'] in tokenage_eauth:
tokenage['perms'] = tokenage_eauth[tokenage['name']]
tokenage_eauth = self.opts[u'external_auth'][tokenage[u'eauth']]
if tokenage[u'name'] in tokenage_eauth:
tokenage[u'perms'] = tokenage_eauth[tokenage[u'name']]
else:
tokenage['perms'] = tokenage_eauth['*']
tokenage[u'perms'] = tokenage_eauth[u'*']
tokenage['user'] = tokenage['name']
tokenage['username'] = tokenage['name']
tokenage[u'user'] = tokenage[u'name']
tokenage[u'username'] = tokenage[u'name']
return tokenage
@ -300,11 +300,11 @@ class APIClient(object):
result = self.resolver.get_token(token)
except Exception as ex:
raise EauthAuthenticationError(
"Token validation failed with {0}.".format(repr(ex)))
u"Token validation failed with {0}.".format(repr(ex)))
return result
def get_event(self, wait=0.25, tag='', full=False):
def get_event(self, wait=0.25, tag=u'', full=False):
'''
Get a single salt event.
If no events are available, then block for up to ``wait`` seconds.
@ -322,4 +322,4 @@ class APIClient(object):
Need to convert this to a master call with appropriate authentication
'''
return self.event.fire_event(data, tagify(tag, 'wui'))
return self.event.fire_event(data, tagify(tag, u'wui'))

View File

@ -23,10 +23,12 @@ import salt.utils.event
import salt.utils.jid
import salt.utils.job
import salt.utils.lazy
import salt.utils.platform
import salt.utils.process
import salt.utils.versions
import salt.transport
import salt.log.setup
import salt.ext.six as six
from salt.ext import six
# Import 3rd-party libs
import tornado.stack_context
@ -34,18 +36,18 @@ import tornado.stack_context
log = logging.getLogger(__name__)
CLIENT_INTERNAL_KEYWORDS = frozenset([
'client',
'cmd',
'eauth',
'fun',
'kwarg',
'match',
'token',
'__jid__',
'__tag__',
'__user__',
'username',
'password'
u'client',
u'cmd',
u'eauth',
u'fun',
u'kwarg',
u'match',
u'token',
u'__jid__',
u'__tag__',
u'__user__',
u'username',
u'password'
])
@ -77,9 +79,9 @@ class ClientFuncsDict(collections.MutableMapping):
raise KeyError
def wrapper(*args, **kwargs):
low = {'fun': key,
'args': args,
'kwargs': kwargs,
low = {u'fun': key,
u'args': args,
u'kwargs': kwargs,
}
pub_data = {}
# Copy kwargs keys so we can iterate over and pop the pub data
@ -87,18 +89,18 @@ class ClientFuncsDict(collections.MutableMapping):
# pull out pub_data if you have it
for kwargs_key in kwargs_keys:
if kwargs_key.startswith('__pub_'):
if kwargs_key.startswith(u'__pub_'):
pub_data[kwargs_key] = kwargs.pop(kwargs_key)
async_pub = self.client._gen_async_pub(pub_data.get('__pub_jid'))
async_pub = self.client._gen_async_pub(pub_data.get(u'__pub_jid'))
user = salt.utils.get_specific_user()
return self.client._proc_function(
key,
low,
user,
async_pub['tag'], # TODO: fix
async_pub['jid'], # TODO: fix
async_pub[u'tag'], # TODO: fix
async_pub[u'jid'], # TODO: fix
False, # Don't daemonize
)
return wrapper
@ -129,14 +131,14 @@ class SyncClientMixin(object):
Execute a function through the master network interface.
'''
load = kwargs
load['cmd'] = self.client
load[u'cmd'] = self.client
channel = salt.transport.Channel.factory(self.opts,
crypt='clear',
usage='master_call')
crypt=u'clear',
usage=u'master_call')
ret = channel.send(load)
if isinstance(ret, collections.Mapping):
if 'error' in ret:
salt.utils.error.raise_error(**ret['error'])
if u'error' in ret:
salt.utils.error.raise_error(**ret[u'error'])
return ret
def cmd_sync(self, low, timeout=None, full_return=False):
@ -155,19 +157,19 @@ class SyncClientMixin(object):
'eauth': 'pam',
})
'''
event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=True)
event = salt.utils.event.get_master_event(self.opts, self.opts[u'sock_dir'], listen=True)
job = self.master_call(**low)
ret_tag = salt.utils.event.tagify('ret', base=job['tag'])
ret_tag = salt.utils.event.tagify(u'ret', base=job[u'tag'])
if timeout is None:
timeout = self.opts.get('rest_timeout', 300)
timeout = self.opts.get(u'rest_timeout', 300)
ret = event.get_event(tag=ret_tag, full=True, wait=timeout, auto_reconnect=True)
if ret is None:
raise salt.exceptions.SaltClientTimeout(
"RunnerClient job '{0}' timed out".format(job['jid']),
jid=job['jid'])
u"RunnerClient job '{0}' timed out".format(job[u'jid']),
jid=job[u'jid'])
return ret if full_return else ret['data']['return']
return ret if full_return else ret[u'data'][u'return']
def cmd(self, fun, arg=None, pub_data=None, kwarg=None, print_event=True, full_return=False):
'''
@ -202,40 +204,40 @@ class SyncClientMixin(object):
arg = tuple()
if not isinstance(arg, list) and not isinstance(arg, tuple):
raise salt.exceptions.SaltInvocationError(
'arg must be formatted as a list/tuple'
u'arg must be formatted as a list/tuple'
)
if pub_data is None:
pub_data = {}
if not isinstance(pub_data, dict):
raise salt.exceptions.SaltInvocationError(
'pub_data must be formatted as a dictionary'
u'pub_data must be formatted as a dictionary'
)
if kwarg is None:
kwarg = {}
if not isinstance(kwarg, dict):
raise salt.exceptions.SaltInvocationError(
'kwarg must be formatted as a dictionary'
u'kwarg must be formatted as a dictionary'
)
arglist = salt.utils.args.parse_input(
arg,
no_parse=self.opts.get('no_parse', []))
no_parse=self.opts.get(u'no_parse', []))
# if you were passed kwarg, add it to arglist
if kwarg:
kwarg['__kwarg__'] = True
kwarg[u'__kwarg__'] = True
arglist.append(kwarg)
args, kwargs = salt.minion.load_args_and_kwargs(
self.functions[fun], arglist, pub_data
)
low = {'fun': fun,
'arg': args,
'kwarg': kwargs}
low = {u'fun': fun,
u'arg': args,
u'kwarg': kwargs}
return self.low(fun, low, print_event=print_event, full_return=full_return)
@property
def mminion(self):
if not hasattr(self, '_mminion'):
if not hasattr(self, u'_mminion'):
self._mminion = salt.minion.MasterMinion(self.opts, states=False, rend=False)
return self._mminion
@ -244,15 +246,15 @@ class SyncClientMixin(object):
Check for deprecated usage and allow until Salt Oxygen.
'''
msg = []
if 'args' in low:
msg.append('call with arg instead')
low['arg'] = low.pop('args')
if 'kwargs' in low:
msg.append('call with kwarg instead')
low['kwarg'] = low.pop('kwargs')
if u'args' in low:
msg.append(u'call with arg instead')
low[u'arg'] = low.pop(u'args')
if u'kwargs' in low:
msg.append(u'call with kwarg instead')
low[u'kwarg'] = low.pop(u'kwargs')
if msg:
salt.utils.warn_until('Oxygen', ' '.join(msg))
salt.utils.versions.warn_until(u'Oxygen', u' '.join(msg))
return self._low(fun, low, print_event=print_event, full_return=full_return)
@ -266,13 +268,13 @@ class SyncClientMixin(object):
class_name = self.__class__.__name__.lower()
except AttributeError:
log.warning(
'Unable to determine class name',
u'Unable to determine class name',
exc_info_on_loglevel=logging.DEBUG
)
return True
try:
return self.opts['{0}_returns'.format(class_name)]
return self.opts[u'{0}_returns'.format(class_name)]
except KeyError:
# No such option, assume this isn't one we care about gating and
# just return True.
@ -295,24 +297,24 @@ class SyncClientMixin(object):
# this is not to clutter the output with the module loading
# if we have a high debug level.
self.mminion # pylint: disable=W0104
jid = low.get('__jid__', salt.utils.jid.gen_jid())
tag = low.get('__tag__', salt.utils.event.tagify(jid, prefix=self.tag_prefix))
jid = low.get(u'__jid__', salt.utils.jid.gen_jid())
tag = low.get(u'__tag__', salt.utils.event.tagify(jid, prefix=self.tag_prefix))
data = {'fun': '{0}.{1}'.format(self.client, fun),
'jid': jid,
'user': low.get('__user__', 'UNKNOWN'),
data = {u'fun': u'{0}.{1}'.format(self.client, fun),
u'jid': jid,
u'user': low.get(u'__user__', u'UNKNOWN'),
}
event = salt.utils.event.get_event(
'master',
self.opts['sock_dir'],
self.opts['transport'],
u'master',
self.opts[u'sock_dir'],
self.opts[u'transport'],
opts=self.opts,
listen=False)
if print_event:
print_func = self.print_async_event \
if hasattr(self, 'print_async_event') \
if hasattr(self, u'print_async_event') \
else None
else:
# Suppress printing of return event (this keeps us from printing
@ -327,12 +329,12 @@ class SyncClientMixin(object):
# TODO: document these, and test that they exist
# TODO: Other things to inject??
func_globals = {'__jid__': jid,
'__user__': data['user'],
'__tag__': tag,
func_globals = {u'__jid__': jid,
u'__user__': data[u'user'],
u'__tag__': tag,
# weak ref to avoid the Exception in interpreter
# teardown of event
'__jid_event__': weakref.proxy(namespaced_event),
u'__jid_event__': weakref.proxy(namespaced_event),
}
try:
@ -344,9 +346,9 @@ class SyncClientMixin(object):
completed_funcs = []
for mod_name in six.iterkeys(self_functions):
if '.' not in mod_name:
if u'.' not in mod_name:
continue
mod, _ = mod_name.split('.', 1)
mod, _ = mod_name.split(u'.', 1)
if mod in completed_funcs:
continue
completed_funcs.append(mod)
@ -362,86 +364,87 @@ class SyncClientMixin(object):
# we make the transition we will load "kwargs" using format_call if
# there are no kwargs in the low object passed in
f_call = None
if 'arg' not in low:
if u'arg' not in low:
f_call = salt.utils.format_call(
self.functions[fun],
low,
expected_extra_kws=CLIENT_INTERNAL_KEYWORDS
)
args = f_call.get('args', ())
args = f_call.get(u'args', ())
else:
args = low['arg']
args = low[u'arg']
if 'kwarg' not in low:
if u'kwarg' not in low:
log.critical(
'kwargs must be passed inside the low data within the '
'\'kwarg\' key. See usage of '
'salt.utils.args.parse_input() and '
'salt.minion.load_args_and_kwargs() elsewhere in the '
'codebase.'
u'kwargs must be passed inside the low data within the '
u'\'kwarg\' key. See usage of '
u'salt.utils.args.parse_input() and '
u'salt.minion.load_args_and_kwargs() elsewhere in the '
u'codebase.'
)
kwargs = {}
else:
kwargs = low['kwarg']
kwargs = low[u'kwarg']
# Update the event data with loaded args and kwargs
data['fun_args'] = list(args) + ([kwargs] if kwargs else [])
func_globals['__jid_event__'].fire_event(data, 'new')
data[u'fun_args'] = list(args) + ([kwargs] if kwargs else [])
func_globals[u'__jid_event__'].fire_event(data, u'new')
# Initialize a context for executing the method.
with tornado.stack_context.StackContext(self.functions.context_dict.clone):
data['return'] = self.functions[fun](*args, **kwargs)
data['success'] = True
if isinstance(data['return'], dict) and 'data' in data['return']:
data[u'return'] = self.functions[fun](*args, **kwargs)
data[u'success'] = True
if isinstance(data[u'return'], dict) and u'data' in data[u'return']:
# some functions can return boolean values
data['success'] = salt.utils.check_state_result(data['return']['data'])
data[u'success'] = salt.utils.check_state_result(data[u'return'][u'data'])
except (Exception, SystemExit) as ex:
if isinstance(ex, salt.exceptions.NotImplemented):
data['return'] = str(ex)
data[u'return'] = str(ex)
else:
data['return'] = 'Exception occurred in {0} {1}: {2}'.format(
data[u'return'] = u'Exception occurred in {0} {1}: {2}'.format(
self.client,
fun,
traceback.format_exc(),
)
data['success'] = False
namespaced_event.fire_event(data, 'ret')
data[u'success'] = False
if self.store_job:
try:
salt.utils.job.store_job(
self.opts,
{
'id': self.opts['id'],
'tgt': self.opts['id'],
'jid': data['jid'],
'return': data,
u'id': self.opts[u'id'],
u'tgt': self.opts[u'id'],
u'jid': data[u'jid'],
u'return': data,
},
event=None,
mminion=self.mminion,
)
except salt.exceptions.SaltCacheError:
log.error('Could not store job cache info. '
'Job details for this run may be unavailable.')
log.error(u'Could not store job cache info. '
u'Job details for this run may be unavailable.')
# Outputters _can_ mutate data so write to the job cache first!
namespaced_event.fire_event(data, u'ret')
# if we fired an event, make sure to delete the event object.
# This will ensure that we call destroy, which will do the 0MQ linger
log.info('Runner completed: {0}'.format(data['jid']))
log.info(u'Runner completed: %s', data[u'jid'])
del event
del namespaced_event
return data if full_return else data['return']
return data if full_return else data[u'return']
def get_docs(self, arg=None):
'''
Return a dictionary of functions and the inline documentation for each
'''
if arg:
if '*' in arg:
if u'*' in arg:
target_mod = arg
_use_fnmatch = True
else:
target_mod = arg + '.' if not arg.endswith('.') else arg
target_mod = arg + u'.' if not arg.endswith(u'.') else arg
if _use_fnmatch:
docs = [(fun, self.functions[fun].__doc__)
for fun in fnmatch.filter(self.functions, target_mod)]
@ -468,7 +471,7 @@ class AsyncClientMixin(object):
Run this method in a multiprocess target to execute the function in a
multiprocess and fire the return data on the event bus
'''
if daemonize and not salt.utils.is_windows():
if daemonize and not salt.utils.platform.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
@ -478,9 +481,9 @@ class AsyncClientMixin(object):
salt.log.setup.setup_multiprocessing_logging()
# pack a few things into low
low['__jid__'] = jid
low['__user__'] = user
low['__tag__'] = tag
low[u'__jid__'] = jid
low[u'__user__'] = user
low[u'__tag__'] = tag
return self.low(fun, low, full_return=False)
@ -508,9 +511,9 @@ class AsyncClientMixin(object):
if jid is None:
jid = salt.utils.jid.gen_jid()
tag = salt.utils.event.tagify(jid, prefix=self.tag_prefix)
return {'tag': tag, 'jid': jid}
return {u'tag': tag, u'jid': jid}
def async(self, fun, low, user='UNKNOWN', pub=None):
def async(self, fun, low, user=u'UNKNOWN', pub=None):
'''
Execute the function in a multiprocess and return the event tag to use
to watch for the return
@ -519,7 +522,7 @@ class AsyncClientMixin(object):
proc = salt.utils.process.SignalHandlingMultiprocessingProcess(
target=self._proc_function,
args=(fun, low, user, async_pub['tag'], async_pub['jid']))
args=(fun, low, user, async_pub[u'tag'], async_pub[u'jid']))
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
@ -535,29 +538,29 @@ class AsyncClientMixin(object):
return
# if we are "quiet", don't print
if self.opts.get('quiet', False):
if self.opts.get(u'quiet', False):
return
# some suffixes we don't want to print
if suffix in ('new',):
if suffix in (u'new',):
return
try:
outputter = self.opts.get('output', event.get('outputter', None) or event.get('return').get('outputter'))
outputter = self.opts.get(u'output', event.get(u'outputter', None) or event.get(u'return').get(u'outputter'))
except AttributeError:
outputter = None
# if this is a ret, we have our own set of rules
if suffix == 'ret':
if suffix == u'ret':
# Check if outputter was passed in the return data. If this is the case,
# then the return data will be a dict two keys: 'data' and 'outputter'
if isinstance(event.get('return'), dict) \
and set(event['return']) == set(('data', 'outputter')):
event_data = event['return']['data']
outputter = event['return']['outputter']
if isinstance(event.get(u'return'), dict) \
and set(event[u'return']) == set((u'data', u'outputter')):
event_data = event[u'return'][u'data']
outputter = event[u'return'][u'outputter']
else:
event_data = event['return']
event_data = event[u'return']
else:
event_data = {'suffix': suffix, 'event': event}
event_data = {u'suffix': suffix, u'event': event}
salt.output.display_output(event_data, outputter, self.opts)

View File

@ -20,7 +20,7 @@ class NetapiClient(object):
'''
def __init__(self, opts):
self.opts = opts
self.process_manager = salt.utils.process.ProcessManager(name='NetAPIProcessManager')
self.process_manager = salt.utils.process.ProcessManager(name=u'NetAPIProcessManager')
self.netapi = salt.loader.netapi(self.opts)
def run(self):
@ -28,11 +28,11 @@ class NetapiClient(object):
Load and start all available api modules
'''
if not len(self.netapi):
log.error("Did not find any netapi configurations, nothing to start")
log.error(u"Did not find any netapi configurations, nothing to start")
for fun in self.netapi:
if fun.endswith('.start'):
log.info('Starting {0} netapi module'.format(fun))
if fun.endswith(u'.start'):
log.info(u'Starting %s netapi module', fun)
self.process_manager.add_process(self.netapi[fun])
# Install the SIGINT/SIGTERM handlers if not done so far

View File

@ -12,9 +12,9 @@ import logging
# Import Salt libs
import salt.config
import salt.client
import salt.utils
import salt.utils.kinds as kinds
import salt.utils.versions
import salt.syspaths as syspaths
from salt.utils import kinds
try:
from raet import raeting, nacling
@ -32,7 +32,7 @@ class LocalClient(salt.client.LocalClient):
The RAET LocalClient
'''
def __init__(self,
c_path=os.path.join(syspaths.CONFIG_DIR, 'master'),
c_path=os.path.join(syspaths.CONFIG_DIR, u'master'),
mopts=None):
salt.client.LocalClient.__init__(self, c_path, mopts)
@ -41,22 +41,22 @@ class LocalClient(salt.client.LocalClient):
tgt,
fun,
arg=(),
tgt_type='glob',
ret='',
jid='',
tgt_type=u'glob',
ret=u'',
jid=u'',
timeout=5,
**kwargs):
'''
Publish the command!
'''
if 'expr_form' in kwargs:
salt.utils.warn_until(
'Fluorine',
'The target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
if u'expr_form' in kwargs:
salt.utils.versions.warn_until(
u'Fluorine',
u'The target type should be passed using the \'tgt_type\' '
u'argument instead of \'expr_form\'. Support for using '
u'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = kwargs.pop('expr_form')
tgt_type = kwargs.pop(u'expr_form')
payload_kwargs = self._prep_pub(
tgt,
@ -68,21 +68,21 @@ class LocalClient(salt.client.LocalClient):
timeout=timeout,
**kwargs)
kind = self.opts['__role']
kind = self.opts[u'__role']
if kind not in kinds.APPL_KINDS:
emsg = ("Invalid application kind = '{0}' for Raet LocalClient.".format(kind))
log.error(emsg + "\n")
emsg = (u"Invalid application kind = '{0}' for Raet LocalClient.".format(kind))
log.error(emsg + u"\n")
raise ValueError(emsg)
if kind in [kinds.APPL_KIND_NAMES[kinds.applKinds.master],
kinds.APPL_KIND_NAMES[kinds.applKinds.syndic]]:
lanename = 'master'
lanename = u'master'
else:
emsg = ("Unsupported application kind '{0}' for Raet LocalClient.".format(kind))
log.error(emsg + '\n')
emsg = (u"Unsupported application kind '{0}' for Raet LocalClient.".format(kind))
log.error(emsg + u'\n')
raise ValueError(emsg)
sockdirpath = self.opts['sock_dir']
name = 'client' + nacling.uuid(size=18)
sockdirpath = self.opts[u'sock_dir']
name = u'client' + nacling.uuid(size=18)
stack = LaneStack(
name=name,
lanename=lanename,
@ -91,12 +91,12 @@ class LocalClient(salt.client.LocalClient):
manor_yard = RemoteYard(
stack=stack,
lanename=lanename,
name='manor',
name=u'manor',
dirpath=sockdirpath)
stack.addRemote(manor_yard)
route = {'dst': (None, manor_yard.name, 'local_cmd'),
'src': (None, stack.local.name, None)}
msg = {'route': route, 'load': payload_kwargs}
route = {u'dst': (None, manor_yard.name, u'local_cmd'),
u'src': (None, stack.local.name, None)}
msg = {u'route': route, u'load': payload_kwargs}
stack.transmit(msg)
stack.serviceAll()
while True:
@ -104,9 +104,9 @@ class LocalClient(salt.client.LocalClient):
stack.serviceAll()
while stack.rxMsgs:
msg, sender = stack.rxMsgs.popleft()
ret = msg.get('return', {})
if 'ret' in ret:
ret = msg.get(u'return', {})
if u'ret' in ret:
stack.server.close()
return ret['ret']
return ret[u'ret']
stack.server.close()
return ret

File diff suppressed because it is too large Load Diff

View File

@ -9,6 +9,7 @@ import random
# Import Salt libs
import salt.config
import salt.utils.versions
import salt.syspaths as syspaths
from salt.exceptions import SaltClientError # Temporary
@ -22,7 +23,7 @@ class SSHClient(object):
.. versionadded:: 2015.5.0
'''
def __init__(self,
c_path=os.path.join(syspaths.CONFIG_DIR, 'master'),
c_path=os.path.join(syspaths.CONFIG_DIR, u'master'),
mopts=None,
disable_custom_roster=False):
if mopts:
@ -30,15 +31,14 @@ class SSHClient(object):
else:
if os.path.isdir(c_path):
log.warning(
'{0} expects a file path not a directory path({1}) to '
'it\'s \'c_path\' keyword argument'.format(
self.__class__.__name__, c_path
)
u'%s expects a file path not a directory path(%s) to '
u'its \'c_path\' keyword argument',
self.__class__.__name__, c_path
)
self.opts = salt.config.client_config(c_path)
# Salt API should never offer a custom roster!
self.opts['__disable_custom_roster'] = disable_custom_roster
self.opts[u'__disable_custom_roster'] = disable_custom_roster
def _prep_ssh(
self,
@ -46,30 +46,30 @@ class SSHClient(object):
fun,
arg=(),
timeout=None,
tgt_type='glob',
tgt_type=u'glob',
kwarg=None,
**kwargs):
'''
Prepare the arguments
'''
if 'expr_form' in kwargs:
salt.utils.warn_until(
'Fluorine',
'The target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
if u'expr_form' in kwargs:
salt.utils.versions.warn_until(
u'Fluorine',
u'The target type should be passed using the \'tgt_type\' '
u'argument instead of \'expr_form\'. Support for using '
u'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = kwargs.pop('expr_form')
tgt_type = kwargs.pop(u'expr_form')
opts = copy.deepcopy(self.opts)
opts.update(kwargs)
if timeout:
opts['timeout'] = timeout
opts[u'timeout'] = timeout
arg = salt.utils.args.condition_input(arg, kwarg)
opts['argv'] = [fun] + arg
opts['selected_target_option'] = tgt_type
opts['tgt'] = tgt
opts['arg'] = arg
opts[u'argv'] = [fun] + arg
opts[u'selected_target_option'] = tgt_type
opts[u'tgt'] = tgt
opts[u'arg'] = arg
return salt.client.ssh.SSH(opts)
def cmd_iter(
@ -78,8 +78,8 @@ class SSHClient(object):
fun,
arg=(),
timeout=None,
tgt_type='glob',
ret='',
tgt_type=u'glob',
ret=u'',
kwarg=None,
**kwargs):
'''
@ -88,14 +88,14 @@ class SSHClient(object):
.. versionadded:: 2015.5.0
'''
if 'expr_form' in kwargs:
salt.utils.warn_until(
'Fluorine',
'The target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
if u'expr_form' in kwargs:
salt.utils.versions.warn_until(
u'Fluorine',
u'The target type should be passed using the \'tgt_type\' '
u'argument instead of \'expr_form\'. Support for using '
u'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = kwargs.pop('expr_form')
tgt_type = kwargs.pop(u'expr_form')
ssh = self._prep_ssh(
tgt,
@ -105,7 +105,7 @@ class SSHClient(object):
tgt_type,
kwarg,
**kwargs)
for ret in ssh.run_iter(jid=kwargs.get('jid', None)):
for ret in ssh.run_iter(jid=kwargs.get(u'jid', None)):
yield ret
def cmd(self,
@ -113,7 +113,7 @@ class SSHClient(object):
fun,
arg=(),
timeout=None,
tgt_type='glob',
tgt_type=u'glob',
kwarg=None,
**kwargs):
'''
@ -122,14 +122,14 @@ class SSHClient(object):
.. versionadded:: 2015.5.0
'''
if 'expr_form' in kwargs:
salt.utils.warn_until(
'Fluorine',
'The target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
if u'expr_form' in kwargs:
salt.utils.versions.warn_until(
u'Fluorine',
u'The target type should be passed using the \'tgt_type\' '
u'argument instead of \'expr_form\'. Support for using '
u'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = kwargs.pop('expr_form')
tgt_type = kwargs.pop(u'expr_form')
ssh = self._prep_ssh(
tgt,
@ -140,7 +140,7 @@ class SSHClient(object):
kwarg,
**kwargs)
final = {}
for ret in ssh.run_iter(jid=kwargs.get('jid', None)):
for ret in ssh.run_iter(jid=kwargs.get(u'jid', None)):
final.update(ret)
return final
@ -166,16 +166,16 @@ class SSHClient(object):
kwargs = copy.deepcopy(low)
for ignore in ['tgt', 'fun', 'arg', 'timeout', 'tgt_type', 'kwarg']:
for ignore in [u'tgt', u'fun', u'arg', u'timeout', u'tgt_type', u'kwarg']:
if ignore in kwargs:
del kwargs[ignore]
return self.cmd(low['tgt'],
low['fun'],
low.get('arg', []),
low.get('timeout'),
low.get('tgt_type'),
low.get('kwarg'),
return self.cmd(low[u'tgt'],
low[u'fun'],
low.get(u'arg', []),
low.get(u'timeout'),
low.get(u'tgt_type'),
low.get(u'kwarg'),
**kwargs)
def cmd_async(self, low, timeout=None):
@ -204,8 +204,8 @@ class SSHClient(object):
fun,
arg=(),
timeout=None,
tgt_type='glob',
ret='',
tgt_type=u'glob',
ret=u'',
kwarg=None,
sub=3,
**kwargs):
@ -226,24 +226,24 @@ class SSHClient(object):
.. versionadded:: 2017.7.0
'''
if 'expr_form' in kwargs:
salt.utils.warn_until(
'Fluorine',
'The target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
if u'expr_form' in kwargs:
salt.utils.versions.warn_until(
u'Fluorine',
u'The target type should be passed using the \'tgt_type\' '
u'argument instead of \'expr_form\'. Support for using '
u'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = kwargs.pop('expr_form')
tgt_type = kwargs.pop(u'expr_form')
minion_ret = self.cmd(tgt,
'sys.list_functions',
u'sys.list_functions',
tgt_type=tgt_type,
**kwargs)
minions = list(minion_ret)
random.shuffle(minions)
f_tgt = []
for minion in minions:
if fun in minion_ret[minion]['return']:
if fun in minion_ret[minion][u'return']:
f_tgt.append(minion)
if len(f_tgt) >= sub:
break
return self.cmd_iter(f_tgt, fun, arg, timeout, tgt_type='list', ret=ret, kwarg=kwarg, **kwargs)
return self.cmd_iter(f_tgt, fun, arg, timeout, tgt_type=u'list', ret=ret, kwarg=kwarg, **kwargs)

View File

@ -21,12 +21,12 @@ import salt.utils.vt
log = logging.getLogger(__name__)
SSH_PASSWORD_PROMPT_RE = re.compile(r'(?:.*)[Pp]assword(?: for .*)?:', re.M)
KEY_VALID_RE = re.compile(r'.*\(yes\/no\).*')
SSH_PASSWORD_PROMPT_RE = re.compile(r'(?:.*)[Pp]assword(?: for .*)?:', re.M) # future lint: disable=non-unicode-string
KEY_VALID_RE = re.compile(r'.*\(yes\/no\).*') # future lint: disable=non-unicode-string
# Keep these in sync with ./__init__.py
RSTR = '_edbc7885e4f9aac9b83b35999b68d015148caf467b78fa39c05f669c0ff89878'
RSTR_RE = re.compile(r'(?:^|\r?\n)' + RSTR + r'(?:\r?\n|$)')
RSTR = u'_edbc7885e4f9aac9b83b35999b68d015148caf467b78fa39c05f669c0ff89878'
RSTR_RE = re.compile(r'(?:^|\r?\n)' + RSTR + r'(?:\r?\n|$)') # future lint: disable=non-unicode-string
class NoPasswdError(Exception):
@ -41,7 +41,7 @@ def gen_key(path):
'''
Generate a key for use with salt-ssh
'''
cmd = 'ssh-keygen -P "" -f {0} -t rsa -q'.format(path)
cmd = u'ssh-keygen -P "" -f {0} -t rsa -q'.format(path)
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
subprocess.call(cmd, shell=True)
@ -51,12 +51,12 @@ def gen_shell(opts, **kwargs):
'''
Return the correct shell interface for the target system
'''
if kwargs['winrm']:
if kwargs[u'winrm']:
try:
import saltwinshell
shell = saltwinshell.Shell(opts, **kwargs)
except ImportError:
log.error('The saltwinshell library is not available')
log.error(u'The saltwinshell library is not available')
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
else:
shell = Shell(opts, **kwargs)
@ -86,7 +86,7 @@ class Shell(object):
ssh_options=None):
self.opts = opts
# ssh <ipv6>, but scp [<ipv6]:/path
self.host = host.strip('[]')
self.host = host.strip(u'[]')
self.user = user
self.port = port
self.passwd = str(passwd) if passwd else passwd
@ -97,18 +97,18 @@ class Shell(object):
self.mods = mods
self.identities_only = identities_only
self.remote_port_forwards = remote_port_forwards
self.ssh_options = '' if ssh_options is None else ssh_options
self.ssh_options = u'' if ssh_options is None else ssh_options
def get_error(self, errstr):
'''
Parse out an error and return a targeted error string
'''
for line in errstr.split('\n'):
if line.startswith('ssh:'):
for line in errstr.split(u'\n'):
if line.startswith(u'ssh:'):
return line
if line.startswith('Pseudo-terminal'):
if line.startswith(u'Pseudo-terminal'):
continue
if 'to the list of known hosts.' in line:
if u'to the list of known hosts.' in line:
continue
return line
return errstr
@ -118,36 +118,36 @@ class Shell(object):
Return options for the ssh command base for Salt to call
'''
options = [
'KbdInteractiveAuthentication=no',
u'KbdInteractiveAuthentication=no',
]
if self.passwd:
options.append('PasswordAuthentication=yes')
options.append(u'PasswordAuthentication=yes')
else:
options.append('PasswordAuthentication=no')
if self.opts.get('_ssh_version', (0,)) > (4, 9):
options.append('GSSAPIAuthentication=no')
options.append('ConnectTimeout={0}'.format(self.timeout))
if self.opts.get('ignore_host_keys'):
options.append('StrictHostKeyChecking=no')
if self.opts.get('no_host_keys'):
options.extend(['StrictHostKeyChecking=no',
'UserKnownHostsFile=/dev/null'])
known_hosts = self.opts.get('known_hosts_file')
options.append(u'PasswordAuthentication=no')
if self.opts.get(u'_ssh_version', (0,)) > (4, 9):
options.append(u'GSSAPIAuthentication=no')
options.append(u'ConnectTimeout={0}'.format(self.timeout))
if self.opts.get(u'ignore_host_keys'):
options.append(u'StrictHostKeyChecking=no')
if self.opts.get(u'no_host_keys'):
options.extend([u'StrictHostKeyChecking=no',
u'UserKnownHostsFile=/dev/null'])
known_hosts = self.opts.get(u'known_hosts_file')
if known_hosts and os.path.isfile(known_hosts):
options.append('UserKnownHostsFile={0}'.format(known_hosts))
options.append(u'UserKnownHostsFile={0}'.format(known_hosts))
if self.port:
options.append('Port={0}'.format(self.port))
options.append(u'Port={0}'.format(self.port))
if self.priv:
options.append('IdentityFile={0}'.format(self.priv))
options.append(u'IdentityFile={0}'.format(self.priv))
if self.user:
options.append('User={0}'.format(self.user))
options.append(u'User={0}'.format(self.user))
if self.identities_only:
options.append('IdentitiesOnly=yes')
options.append(u'IdentitiesOnly=yes')
ret = []
for option in options:
ret.append('-o {0} '.format(option))
return ''.join(ret)
ret.append(u'-o {0} '.format(option))
return u''.join(ret)
def _passwd_opts(self):
'''
@ -156,42 +156,42 @@ class Shell(object):
# TODO ControlMaster does not work without ControlPath
# user could take advantage of it if they set ControlPath in their
# ssh config. Also, ControlPersist not widely available.
options = ['ControlMaster=auto',
'StrictHostKeyChecking=no',
options = [u'ControlMaster=auto',
u'StrictHostKeyChecking=no',
]
if self.opts['_ssh_version'] > (4, 9):
options.append('GSSAPIAuthentication=no')
options.append('ConnectTimeout={0}'.format(self.timeout))
if self.opts.get('ignore_host_keys'):
options.append('StrictHostKeyChecking=no')
if self.opts.get('no_host_keys'):
options.extend(['StrictHostKeyChecking=no',
'UserKnownHostsFile=/dev/null'])
if self.opts[u'_ssh_version'] > (4, 9):
options.append(u'GSSAPIAuthentication=no')
options.append(u'ConnectTimeout={0}'.format(self.timeout))
if self.opts.get(u'ignore_host_keys'):
options.append(u'StrictHostKeyChecking=no')
if self.opts.get(u'no_host_keys'):
options.extend([u'StrictHostKeyChecking=no',
u'UserKnownHostsFile=/dev/null'])
if self.passwd:
options.extend(['PasswordAuthentication=yes',
'PubkeyAuthentication=yes'])
options.extend([u'PasswordAuthentication=yes',
u'PubkeyAuthentication=yes'])
else:
options.extend(['PasswordAuthentication=no',
'PubkeyAuthentication=yes',
'KbdInteractiveAuthentication=no',
'ChallengeResponseAuthentication=no',
'BatchMode=yes'])
options.extend([u'PasswordAuthentication=no',
u'PubkeyAuthentication=yes',
u'KbdInteractiveAuthentication=no',
u'ChallengeResponseAuthentication=no',
u'BatchMode=yes'])
if self.port:
options.append('Port={0}'.format(self.port))
options.append(u'Port={0}'.format(self.port))
if self.user:
options.append('User={0}'.format(self.user))
options.append(u'User={0}'.format(self.user))
if self.identities_only:
options.append('IdentitiesOnly=yes')
options.append(u'IdentitiesOnly=yes')
ret = []
for option in options:
ret.append('-o {0} '.format(option))
return ''.join(ret)
ret.append(u'-o {0} '.format(option))
return u''.join(ret)
def _ssh_opts(self):
return ' '.join(['-o {0}'.format(opt)
for opt in self.ssh_options])
return u' '.join([u'-o {0}'.format(opt)
for opt in self.ssh_options])
def _copy_id_str_old(self):
'''
@ -200,9 +200,9 @@ class Shell(object):
if self.passwd:
# Using single quotes prevents shell expansion and
# passwords containing '$'
return "{0} {1} '{2} -p {3} {4} {5}@{6}'".format(
'ssh-copy-id',
'-i {0}.pub'.format(self.priv),
return u"{0} {1} '{2} -p {3} {4} {5}@{6}'".format(
u'ssh-copy-id',
u'-i {0}.pub'.format(self.priv),
self._passwd_opts(),
self.port,
self._ssh_opts(),
@ -218,9 +218,9 @@ class Shell(object):
if self.passwd:
# Using single quotes prevents shell expansion and
# passwords containing '$'
return "{0} {1} {2} -p {3} {4} {5}@{6}".format(
'ssh-copy-id',
'-i {0}.pub'.format(self.priv),
return u"{0} {1} {2} -p {3} {4} {5}@{6}".format(
u'ssh-copy-id',
u'-i {0}.pub'.format(self.priv),
self._passwd_opts(),
self.port,
self._ssh_opts(),
@ -233,11 +233,11 @@ class Shell(object):
Execute ssh-copy-id to plant the id file on the target
'''
stdout, stderr, retcode = self._run_cmd(self._copy_id_str_old())
if salt.defaults.exitcodes.EX_OK != retcode and 'Usage' in stderr:
if salt.defaults.exitcodes.EX_OK != retcode and u'Usage' in stderr:
stdout, stderr, retcode = self._run_cmd(self._copy_id_str_new())
return stdout, stderr, retcode
def _cmd_str(self, cmd, ssh='ssh'):
def _cmd_str(self, cmd, ssh=u'ssh'):
'''
Return the cmd string to execute
'''
@ -246,21 +246,21 @@ class Shell(object):
# need to deliver the SHIM to the remote host and execute it there
command = [ssh]
if ssh != 'scp':
if ssh != u'scp':
command.append(self.host)
if self.tty and ssh == 'ssh':
command.append('-t -t')
if self.tty and ssh == u'ssh':
command.append(u'-t -t')
if self.passwd or self.priv:
command.append(self.priv and self._key_opts() or self._passwd_opts())
if ssh != 'scp' and self.remote_port_forwards:
command.append(' '.join(['-R {0}'.format(item)
for item in self.remote_port_forwards.split(',')]))
if ssh != u'scp' and self.remote_port_forwards:
command.append(u' '.join([u'-R {0}'.format(item)
for item in self.remote_port_forwards.split(u',')]))
if self.ssh_options:
command.append(self._ssh_opts())
command.append(cmd)
return ' '.join(command)
return u' '.join(command)
def _old_run_cmd(self, cmd):
'''
@ -277,7 +277,7 @@ class Shell(object):
data = proc.communicate()
return data[0], data[1], proc.returncode
except Exception:
return ('local', 'Unknown Error', None)
return (u'local', u'Unknown Error', None)
def _run_nb_cmd(self, cmd):
'''
@ -301,7 +301,7 @@ class Shell(object):
err = self.get_error(err)
yield out, err, rcode
except Exception:
yield ('', 'Unknown Error', None)
yield (u'', u'Unknown Error', None)
def exec_nb_cmd(self, cmd):
'''
@ -312,9 +312,9 @@ class Shell(object):
rcode = None
cmd = self._cmd_str(cmd)
logmsg = 'Executing non-blocking command: {0}'.format(cmd)
logmsg = u'Executing non-blocking command: {0}'.format(cmd)
if self.passwd:
logmsg = logmsg.replace(self.passwd, ('*' * 6))
logmsg = logmsg.replace(self.passwd, (u'*' * 6))
log.debug(logmsg)
for out, err, rcode in self._run_nb_cmd(cmd):
@ -323,7 +323,7 @@ class Shell(object):
if err is not None:
r_err.append(err)
yield None, None, None
yield ''.join(r_out), ''.join(r_err), rcode
yield u''.join(r_out), u''.join(r_err), rcode
def exec_cmd(self, cmd):
'''
@ -331,11 +331,11 @@ class Shell(object):
'''
cmd = self._cmd_str(cmd)
logmsg = 'Executing command: {0}'.format(cmd)
logmsg = u'Executing command: {0}'.format(cmd)
if self.passwd:
logmsg = logmsg.replace(self.passwd, ('*' * 6))
if 'decode("base64")' in logmsg or 'base64.b64decode(' in logmsg:
log.debug('Executed SHIM command. Command logged to TRACE')
logmsg = logmsg.replace(self.passwd, (u'*' * 6))
if u'decode("base64")' in logmsg or u'base64.b64decode(' in logmsg:
log.debug(u'Executed SHIM command. Command logged to TRACE')
log.trace(logmsg)
else:
log.debug(logmsg)
@ -348,19 +348,19 @@ class Shell(object):
scp a file or files to a remote system
'''
if makedirs:
self.exec_cmd('mkdir -p {0}'.format(os.path.dirname(remote)))
self.exec_cmd(u'mkdir -p {0}'.format(os.path.dirname(remote)))
# scp needs [<ipv6}
host = self.host
if ':' in host:
host = '[{0}]'.format(host)
if u':' in host:
host = u'[{0}]'.format(host)
cmd = '{0} {1}:{2}'.format(local, host, remote)
cmd = self._cmd_str(cmd, ssh='scp')
cmd = u'{0} {1}:{2}'.format(local, host, remote)
cmd = self._cmd_str(cmd, ssh=u'scp')
logmsg = 'Executing command: {0}'.format(cmd)
logmsg = u'Executing command: {0}'.format(cmd)
if self.passwd:
logmsg = logmsg.replace(self.passwd, ('*' * 6))
logmsg = logmsg.replace(self.passwd, (u'*' * 6))
log.debug(logmsg)
return self._run_cmd(cmd)
@ -374,16 +374,16 @@ class Shell(object):
cmd,
shell=True,
log_stdout=True,
log_stdout_level='trace',
log_stdout_level=u'trace',
log_stderr=True,
log_stderr_level='trace',
log_stderr_level=u'trace',
stream_stdout=False,
stream_stderr=False)
sent_passwd = 0
send_password = True
ret_stdout = ''
ret_stderr = ''
old_stdout = ''
ret_stdout = u''
ret_stderr = u''
old_stdout = u''
try:
while term.has_unread_data:
@ -400,26 +400,26 @@ class Shell(object):
send_password = False
if buff and SSH_PASSWORD_PROMPT_RE.search(buff) and send_password:
if not self.passwd:
return '', 'Permission denied, no authentication information', 254
return u'', u'Permission denied, no authentication information', 254
if sent_passwd < passwd_retries:
term.sendline(self.passwd)
sent_passwd += 1
continue
else:
# asking for a password, and we can't seem to send it
return '', 'Password authentication failed', 254
return u'', u'Password authentication failed', 254
elif buff and KEY_VALID_RE.search(buff):
if key_accept:
term.sendline('yes')
term.sendline(u'yes')
continue
else:
term.sendline('no')
ret_stdout = ('The host key needs to be accepted, to '
'auto accept run salt-ssh with the -i '
'flag:\n{0}').format(stdout)
return ret_stdout, '', 254
elif buff and buff.endswith('_||ext_mods||_'):
mods_raw = json.dumps(self.mods, separators=(',', ':')) + '|_E|0|'
term.sendline(u'no')
ret_stdout = (u'The host key needs to be accepted, to '
u'auto accept run salt-ssh with the -i '
u'flag:\n{0}').format(stdout)
return ret_stdout, u'', 254
elif buff and buff.endswith(u'_||ext_mods||_'):
mods_raw = json.dumps(self.mods, separators=(u',', u':')) + u'|_E|0|'
term.sendline(mods_raw)
if stdout:
old_stdout = stdout

View File

@ -18,8 +18,8 @@ import os
import stat
import subprocess
THIN_ARCHIVE = 'salt-thin.tgz'
EXT_ARCHIVE = 'salt-ext_mods.tgz'
THIN_ARCHIVE = u'salt-thin.tgz'
EXT_ARCHIVE = u'salt-ext_mods.tgz'
# Keep these in sync with salt/defaults/exitcodes.py
EX_THIN_DEPLOY = 11
@ -30,7 +30,9 @@ EX_CANTCREAT = 73
class OBJ(object):
"""An empty class for holding instance attribute values."""
'''
An empty class for holding instance attribute values.
'''
pass
@ -52,7 +54,7 @@ def get_system_encoding():
# and reset to None
encoding = None
if not sys.platform.startswith('win') and sys.stdin is not None:
if not sys.platform.startswith(u'win') and sys.stdin is not None:
# On linux we can rely on sys.stdin for the encoding since it
# most commonly matches the filesystem encoding. This however
# does not apply to windows
@ -78,16 +80,16 @@ def get_system_encoding():
# the way back to ascii
encoding = sys.getdefaultencoding()
if not encoding:
if sys.platform.startswith('darwin'):
if sys.platform.startswith(u'darwin'):
# Mac OS X uses UTF-8
encoding = 'utf-8'
elif sys.platform.startswith('win'):
encoding = u'utf-8'
elif sys.platform.startswith(u'win'):
# Windows uses a configurable encoding; on Windows, Python uses the name "mbcs"
# to refer to whatever the currently configured encoding is.
encoding = 'mbcs'
encoding = u'mbcs'
else:
# On linux default to ascii as a last resort
encoding = 'ascii'
encoding = u'ascii'
return encoding
@ -95,14 +97,14 @@ def is_windows():
'''
Simple function to return if a host is Windows or not
'''
return sys.platform.startswith('win')
return sys.platform.startswith(u'win')
def need_deployment():
"""
'''
Salt thin needs to be deployed - prep the target directory and emit the
delimeter and exit code that signals a required deployment.
"""
'''
if os.path.exists(OPTIONS.saltdir):
shutil.rmtree(OPTIONS.saltdir)
old_umask = os.umask(0o077)
@ -119,39 +121,43 @@ def need_deployment():
# Attack detected
need_deployment()
# If SUDOing then also give the super user group write permissions
sudo_gid = os.environ.get('SUDO_GID')
sudo_gid = os.environ.get(u'SUDO_GID')
if sudo_gid:
try:
os.chown(OPTIONS.saltdir, -1, int(sudo_gid))
stt = os.stat(OPTIONS.saltdir)
os.chmod(OPTIONS.saltdir, stt.st_mode | stat.S_IWGRP | stat.S_IRGRP | stat.S_IXGRP)
except OSError:
sys.stdout.write('\n\nUnable to set permissions on thin directory.\nIf sudo_user is set '
'and is not root, be certain the user is in the same group\nas the login user')
sys.stdout.write(u'\n\nUnable to set permissions on thin directory.\nIf sudo_user is set '
u'and is not root, be certain the user is in the same group\nas the login user')
sys.exit(1)
# Delimiter emitted on stdout *only* to indicate shim message to master.
sys.stdout.write("{0}\ndeploy\n".format(OPTIONS.delimiter))
sys.stdout.write(u"{0}\ndeploy\n".format(OPTIONS.delimiter))
sys.exit(EX_THIN_DEPLOY)
# Adapted from salt.utils.get_hash()
def get_hash(path, form='sha1', chunk_size=4096):
"""Generate a hash digest string for a file."""
def get_hash(path, form=u'sha1', chunk_size=4096):
'''
Generate a hash digest string for a file.
'''
try:
hash_type = getattr(hashlib, form)
except AttributeError:
raise ValueError('Invalid hash type: {0}'.format(form))
with open(path, 'rb') as ifile:
raise ValueError(u'Invalid hash type: {0}'.format(form))
with open(path, u'rb') as ifile:
hash_obj = hash_type()
# read the file in in chunks, not the entire file
for chunk in iter(lambda: ifile.read(chunk_size), b''):
for chunk in iter(lambda: ifile.read(chunk_size), b''): # future lint: disable=non-unicode-string
hash_obj.update(chunk)
return hash_obj.hexdigest()
def unpack_thin(thin_path):
"""Unpack the Salt thin archive."""
'''
Unpack the Salt thin archive.
'''
tfile = tarfile.TarFile.gzopen(thin_path)
old_umask = os.umask(0o077)
tfile.extractall(path=OPTIONS.saltdir)
@ -161,34 +167,40 @@ def unpack_thin(thin_path):
def need_ext():
"""Signal that external modules need to be deployed."""
sys.stdout.write("{0}\next_mods\n".format(OPTIONS.delimiter))
'''
Signal that external modules need to be deployed.
'''
sys.stdout.write(u"{0}\next_mods\n".format(OPTIONS.delimiter))
sys.exit(EX_MOD_DEPLOY)
def unpack_ext(ext_path):
"""Unpack the external modules."""
'''
Unpack the external modules.
'''
modcache = os.path.join(
OPTIONS.saltdir,
'running_data',
'var',
'cache',
'salt',
'minion',
'extmods')
u'running_data',
u'var',
u'cache',
u'salt',
u'minion',
u'extmods')
tfile = tarfile.TarFile.gzopen(ext_path)
old_umask = os.umask(0o077)
tfile.extractall(path=modcache)
tfile.close()
os.umask(old_umask)
os.unlink(ext_path)
ver_path = os.path.join(modcache, 'ext_version')
ver_dst = os.path.join(OPTIONS.saltdir, 'ext_version')
ver_path = os.path.join(modcache, u'ext_version')
ver_dst = os.path.join(OPTIONS.saltdir, u'ext_version')
shutil.move(ver_path, ver_dst)
def main(argv): # pylint: disable=W0613
"""Main program body"""
'''
Main program body
'''
thin_path = os.path.join(OPTIONS.saltdir, THIN_ARCHIVE)
if os.path.isfile(thin_path):
if OPTIONS.checksum != get_hash(thin_path, OPTIONS.hashfunc):
@ -196,8 +208,8 @@ def main(argv): # pylint: disable=W0613
unpack_thin(thin_path)
# Salt thin now is available to use
else:
if not sys.platform.startswith('win'):
scpstat = subprocess.Popen(['/bin/sh', '-c', 'command -v scp']).wait()
if not sys.platform.startswith(u'win'):
scpstat = subprocess.Popen([u'/bin/sh', u'-c', u'command -v scp']).wait()
if scpstat != 0:
sys.exit(EX_SCP_NOT_FOUND)
@ -206,46 +218,46 @@ def main(argv): # pylint: disable=W0613
if not os.path.isdir(OPTIONS.saltdir):
sys.stderr.write(
'ERROR: salt path "{0}" exists but is'
' not a directory\n'.format(OPTIONS.saltdir)
u'ERROR: salt path "{0}" exists but is'
u' not a directory\n'.format(OPTIONS.saltdir)
)
sys.exit(EX_CANTCREAT)
version_path = os.path.normpath(os.path.join(OPTIONS.saltdir, 'version'))
version_path = os.path.normpath(os.path.join(OPTIONS.saltdir, u'version'))
if not os.path.exists(version_path) or not os.path.isfile(version_path):
sys.stderr.write(
'WARNING: Unable to locate current thin '
' version: {0}.\n'.format(version_path)
u'WARNING: Unable to locate current thin '
u' version: {0}.\n'.format(version_path)
)
need_deployment()
with open(version_path, 'r') as vpo:
with open(version_path, u'r') as vpo:
cur_version = vpo.readline().strip()
if cur_version != OPTIONS.version:
sys.stderr.write(
'WARNING: current thin version {0}'
' is not up-to-date with {1}.\n'.format(
u'WARNING: current thin version {0}'
u' is not up-to-date with {1}.\n'.format(
cur_version, OPTIONS.version
)
)
need_deployment()
# Salt thin exists and is up-to-date - fall through and use it
salt_call_path = os.path.join(OPTIONS.saltdir, 'salt-call')
salt_call_path = os.path.join(OPTIONS.saltdir, u'salt-call')
if not os.path.isfile(salt_call_path):
sys.stderr.write('ERROR: thin is missing "{0}"\n'.format(salt_call_path))
sys.stderr.write(u'ERROR: thin is missing "{0}"\n'.format(salt_call_path))
need_deployment()
with open(os.path.join(OPTIONS.saltdir, 'minion'), 'w') as config:
config.write(OPTIONS.config + '\n')
with open(os.path.join(OPTIONS.saltdir, u'minion'), u'w') as config:
config.write(OPTIONS.config + u'\n')
if OPTIONS.ext_mods:
ext_path = os.path.join(OPTIONS.saltdir, EXT_ARCHIVE)
if os.path.exists(ext_path):
unpack_ext(ext_path)
else:
version_path = os.path.join(OPTIONS.saltdir, 'ext_version')
version_path = os.path.join(OPTIONS.saltdir, u'ext_version')
if not os.path.exists(version_path) or not os.path.isfile(version_path):
need_ext()
with open(version_path, 'r') as vpo:
with open(version_path, u'r') as vpo:
cur_version = vpo.readline().strip()
if cur_version != OPTIONS.ext_mods:
need_ext()
@ -258,38 +270,38 @@ def main(argv): # pylint: disable=W0613
salt_argv = [
sys.executable,
salt_call_path,
'--retcode-passthrough',
'--local',
'--metadata',
'--out', 'json',
'-l', 'quiet',
'-c', OPTIONS.saltdir
u'--retcode-passthrough',
u'--local',
u'--metadata',
u'--out', u'json',
u'-l', u'quiet',
u'-c', OPTIONS.saltdir
]
try:
if argv_prepared[-1].startswith('--no-parse='):
if argv_prepared[-1].startswith(u'--no-parse='):
salt_argv.append(argv_prepared.pop(-1))
except (IndexError, TypeError):
pass
salt_argv.append('--')
salt_argv.append(u'--')
salt_argv.extend(argv_prepared)
sys.stderr.write('SALT_ARGV: {0}\n'.format(salt_argv))
sys.stderr.write(u'SALT_ARGV: {0}\n'.format(salt_argv))
# Only emit the delimiter on *both* stdout and stderr when completely successful.
# Yes, the flush() is necessary.
sys.stdout.write(OPTIONS.delimiter + '\n')
sys.stdout.write(OPTIONS.delimiter + u'\n')
sys.stdout.flush()
if not OPTIONS.tty:
sys.stderr.write(OPTIONS.delimiter + '\n')
sys.stderr.write(OPTIONS.delimiter + u'\n')
sys.stderr.flush()
if OPTIONS.cmd_umask is not None:
old_umask = os.umask(OPTIONS.cmd_umask)
if OPTIONS.tty:
# Returns bytes instead of string on python 3
stdout, _ = subprocess.Popen(salt_argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
sys.stdout.write(stdout.decode(encoding=get_system_encoding(), errors="replace"))
sys.stdout.write(stdout.decode(encoding=get_system_encoding(), errors=u"replace"))
sys.stdout.flush()
if OPTIONS.wipe:
shutil.rmtree(OPTIONS.saltdir)
@ -301,5 +313,5 @@ def main(argv): # pylint: disable=W0613
if OPTIONS.cmd_umask is not None:
os.umask(old_umask)
if __name__ == '__main__':
if __name__ == u'__main__':
sys.exit(main(sys.argv))

View File

@ -24,6 +24,9 @@ import salt.state
import salt.loader
import salt.minion
# Import 3rd-party libs
from salt.ext import six
log = logging.getLogger(__name__)
@ -82,35 +85,33 @@ class SSHHighState(salt.state.BaseHighState):
'''
Evaluate master_tops locally
'''
if 'id' not in self.opts:
log.error('Received call for external nodes without an id')
if u'id' not in self.opts:
log.error(u'Received call for external nodes without an id')
return {}
if not salt.utils.verify.valid_id(self.opts, self.opts['id']):
if not salt.utils.verify.valid_id(self.opts, self.opts[u'id']):
return {}
# Evaluate all configured master_tops interfaces
grains = {}
ret = {}
if 'grains' in self.opts:
grains = self.opts['grains']
if u'grains' in self.opts:
grains = self.opts[u'grains']
for fun in self.tops:
if fun not in self.opts.get('master_tops', {}):
if fun not in self.opts.get(u'master_tops', {}):
continue
try:
ret.update(self.tops[fun](opts=self.opts, grains=grains))
except Exception as exc:
# If anything happens in the top generation, log it and move on
log.error(
'Top function {0} failed with error {1} for minion '
'{2}'.format(
fun, exc, self.opts['id']
)
u'Top function %s failed with error %s for minion %s',
fun, exc, self.opts[u'id']
)
return ret
def lowstate_file_refs(chunks, extras=''):
def lowstate_file_refs(chunks, extras=u''):
'''
Create a list of file ref objects to reconcile
'''
@ -118,12 +119,12 @@ def lowstate_file_refs(chunks, extras=''):
for chunk in chunks:
if not isinstance(chunk, dict):
continue
saltenv = 'base'
saltenv = u'base'
crefs = []
for state in chunk:
if state == '__env__':
if state == u'__env__':
saltenv = chunk[state]
elif state.startswith('__'):
elif state.startswith(u'__'):
continue
crefs.extend(salt_refs(chunk[state]))
if crefs:
@ -131,7 +132,7 @@ def lowstate_file_refs(chunks, extras=''):
refs[saltenv] = []
refs[saltenv].append(crefs)
if extras:
extra_refs = extras.split(',')
extra_refs = extras.split(u',')
if extra_refs:
for env in refs:
for x in extra_refs:
@ -143,10 +144,10 @@ def salt_refs(data, ret=None):
'''
Pull salt file references out of the states
'''
proto = 'salt://'
proto = u'salt://'
if ret is None:
ret = []
if isinstance(data, str):
if isinstance(data, six.string_types):
if data.startswith(proto) and data not in ret:
ret.append(data)
if isinstance(data, list):
@ -165,38 +166,38 @@ def prep_trans_tar(opts, file_client, chunks, file_refs, pillar=None, id_=None,
'''
gendir = tempfile.mkdtemp()
trans_tar = salt.utils.files.mkstemp()
lowfn = os.path.join(gendir, 'lowstate.json')
pillarfn = os.path.join(gendir, 'pillar.json')
roster_grainsfn = os.path.join(gendir, 'roster_grains.json')
lowfn = os.path.join(gendir, u'lowstate.json')
pillarfn = os.path.join(gendir, u'pillar.json')
roster_grainsfn = os.path.join(gendir, u'roster_grains.json')
sync_refs = [
[salt.utils.url.create('_modules')],
[salt.utils.url.create('_states')],
[salt.utils.url.create('_grains')],
[salt.utils.url.create('_renderers')],
[salt.utils.url.create('_returners')],
[salt.utils.url.create('_output')],
[salt.utils.url.create('_utils')],
[salt.utils.url.create(u'_modules')],
[salt.utils.url.create(u'_states')],
[salt.utils.url.create(u'_grains')],
[salt.utils.url.create(u'_renderers')],
[salt.utils.url.create(u'_returners')],
[salt.utils.url.create(u'_output')],
[salt.utils.url.create(u'_utils')],
]
with salt.utils.files.fopen(lowfn, 'w+') as fp_:
with salt.utils.files.fopen(lowfn, u'w+') as fp_:
fp_.write(json.dumps(chunks))
if pillar:
with salt.utils.files.fopen(pillarfn, 'w+') as fp_:
with salt.utils.files.fopen(pillarfn, u'w+') as fp_:
fp_.write(json.dumps(pillar))
if roster_grains:
with salt.utils.files.fopen(roster_grainsfn, 'w+') as fp_:
with salt.utils.files.fopen(roster_grainsfn, u'w+') as fp_:
fp_.write(json.dumps(roster_grains))
if id_ is None:
id_ = ''
id_ = u''
try:
cachedir = os.path.join('salt-ssh', id_).rstrip(os.sep)
cachedir = os.path.join(u'salt-ssh', id_).rstrip(os.sep)
except AttributeError:
# Minion ID should always be a str, but don't let an int break this
cachedir = os.path.join('salt-ssh', str(id_)).rstrip(os.sep)
cachedir = os.path.join(u'salt-ssh', str(id_)).rstrip(os.sep)
for saltenv in file_refs:
# Location where files in this saltenv will be cached
cache_dest_root = os.path.join(cachedir, 'files', saltenv)
cache_dest_root = os.path.join(cachedir, u'files', saltenv)
file_refs[saltenv].extend(sync_refs)
env_root = os.path.join(gendir, saltenv)
if not os.path.isdir(env_root):
@ -208,7 +209,7 @@ def prep_trans_tar(opts, file_client, chunks, file_refs, pillar=None, id_=None,
try:
path = file_client.cache_file(name, saltenv, cachedir=cachedir)
except IOError:
path = ''
path = u''
if path:
tgt = os.path.join(env_root, short)
tgt_dir = os.path.dirname(tgt)
@ -219,10 +220,10 @@ def prep_trans_tar(opts, file_client, chunks, file_refs, pillar=None, id_=None,
try:
files = file_client.cache_dir(name, saltenv, cachedir=cachedir)
except IOError:
files = ''
files = u''
if files:
for filename in files:
fn = filename[len(file_client.get_cachedir(cache_dest)):].strip('/')
fn = filename[len(file_client.get_cachedir(cache_dest)):].strip(u'/')
tgt = os.path.join(
env_root,
short,
@ -239,7 +240,7 @@ def prep_trans_tar(opts, file_client, chunks, file_refs, pillar=None, id_=None,
except OSError:
cwd = None
os.chdir(gendir)
with closing(tarfile.open(trans_tar, 'w:gz')) as tfp:
with closing(tarfile.open(trans_tar, u'w:gz')) as tfp:
for root, dirs, files in os.walk(gendir):
for name in files:
full = os.path.join(root, name)

View File

@ -17,7 +17,7 @@ import salt.utils
import salt.client.ssh
# Import 3rd-party libs
import salt.ext.six as six
from salt.ext import six
class FunctionWrapper(object):
@ -42,8 +42,8 @@ class FunctionWrapper(object):
self.wfuncs = wfuncs if isinstance(wfuncs, dict) else {}
self.opts = opts
self.mods = mods if isinstance(mods, dict) else {}
self.kwargs = {'id_': id_,
'host': host}
self.kwargs = {u'id_': id_,
u'host': host}
self.fsclient = fsclient
self.kwargs.update(kwargs)
self.aliases = aliases
@ -67,14 +67,14 @@ class FunctionWrapper(object):
'''
Return the function call to simulate the salt local lookup system
'''
if '.' not in cmd and not self.cmd_prefix:
if u'.' not in cmd and not self.cmd_prefix:
# Form of salt.cmd.run in Jinja -- it's expecting a subdictionary
# containing only 'cmd' module calls, in that case. Create a new
# FunctionWrapper which contains the prefix 'cmd' (again, for the
# salt.cmd.run example)
kwargs = copy.deepcopy(self.kwargs)
id_ = kwargs.pop('id_')
host = kwargs.pop('host')
id_ = kwargs.pop(u'id_')
host = kwargs.pop(u'host')
return FunctionWrapper(self.opts,
id_,
host,
@ -90,7 +90,7 @@ class FunctionWrapper(object):
# We're in an inner FunctionWrapper as created by the code block
# above. Reconstruct the original cmd in the form 'cmd.run' and
# then evaluate as normal
cmd = '{0}.{1}'.format(self.cmd_prefix, cmd)
cmd = u'{0}.{1}'.format(self.cmd_prefix, cmd)
if cmd in self.wfuncs:
return self.wfuncs[cmd]
@ -104,7 +104,7 @@ class FunctionWrapper(object):
'''
argv = [cmd]
argv.extend([json.dumps(arg) for arg in args])
argv.extend(['{0}={1}'.format(key, json.dumps(val)) for key, val in six.iteritems(kwargs)])
argv.extend([u'{0}={1}'.format(key, json.dumps(val)) for key, val in six.iteritems(kwargs)])
single = salt.client.ssh.Single(
self.opts,
argv,
@ -115,21 +115,21 @@ class FunctionWrapper(object):
**self.kwargs
)
stdout, stderr, retcode = single.cmd_block()
if stderr.count('Permission Denied'):
return {'_error': 'Permission Denied',
'stdout': stdout,
'stderr': stderr,
'retcode': retcode}
if stderr.count(u'Permission Denied'):
return {u'_error': u'Permission Denied',
u'stdout': stdout,
u'stderr': stderr,
u'retcode': retcode}
try:
ret = json.loads(stdout, object_hook=salt.utils.decode_dict)
if len(ret) < 2 and 'local' in ret:
ret = ret['local']
ret = ret.get('return', {})
if len(ret) < 2 and u'local' in ret:
ret = ret[u'local']
ret = ret.get(u'return', {})
except ValueError:
ret = {'_error': 'Failed to return clean data',
'stderr': stderr,
'stdout': stdout,
'retcode': retcode}
ret = {u'_error': u'Failed to return clean data',
u'stderr': stderr,
u'stdout': stdout,
u'retcode': retcode}
return ret
return caller
@ -137,18 +137,18 @@ class FunctionWrapper(object):
'''
Set aliases for functions
'''
if '.' not in cmd and not self.cmd_prefix:
if u'.' not in cmd and not self.cmd_prefix:
# Form of salt.cmd.run in Jinja -- it's expecting a subdictionary
# containing only 'cmd' module calls, in that case. We don't
# support assigning directly to prefixes in this way
raise KeyError('Cannot assign to module key {0} in the '
'FunctionWrapper'.format(cmd))
raise KeyError(u'Cannot assign to module key {0} in the '
u'FunctionWrapper'.format(cmd))
if self.cmd_prefix:
# We're in an inner FunctionWrapper as created by the first code
# block in __getitem__. Reconstruct the original cmd in the form
# 'cmd.run' and then evaluate as normal
cmd = '{0}.{1}'.format(self.cmd_prefix, cmd)
cmd = u'{0}.{1}'.format(self.cmd_prefix, cmd)
if cmd in self.wfuncs:
self.wfuncs[cmd] = value

View File

@ -13,47 +13,47 @@ import salt.utils
import salt.syspaths as syspaths
# Import 3rd-party libs
import salt.ext.six as six
from salt.ext import six
# Set up the default values for all systems
DEFAULTS = {'mongo.db': 'salt',
'mongo.host': 'salt',
'mongo.password': '',
'mongo.port': 27017,
'mongo.user': '',
'redis.db': '0',
'redis.host': 'salt',
'redis.port': 6379,
'test.foo': 'unconfigured',
'ca.cert_base_path': '/etc/pki',
'solr.cores': [],
'solr.host': 'localhost',
'solr.port': '8983',
'solr.baseurl': '/solr',
'solr.type': 'master',
'solr.request_timeout': None,
'solr.init_script': '/etc/rc.d/solr',
'solr.dih.import_options': {'clean': False, 'optimize': True,
'commit': True, 'verbose': False},
'solr.backup_path': None,
'solr.num_backups': 1,
'poudriere.config': '/usr/local/etc/poudriere.conf',
'poudriere.config_dir': '/usr/local/etc/poudriere.d',
'ldap.server': 'localhost',
'ldap.port': '389',
'ldap.tls': False,
'ldap.scope': 2,
'ldap.attrs': None,
'ldap.binddn': '',
'ldap.bindpw': '',
'hosts.file': '/etc/hosts',
'aliases.file': '/etc/aliases',
'virt.images': os.path.join(syspaths.SRV_ROOT_DIR, 'salt-images'),
'virt.tunnel': False,
DEFAULTS = {u'mongo.db': u'salt',
u'mongo.host': u'salt',
u'mongo.password': u'',
u'mongo.port': 27017,
u'mongo.user': u'',
u'redis.db': u'0',
u'redis.host': u'salt',
u'redis.port': 6379,
u'test.foo': u'unconfigured',
u'ca.cert_base_path': u'/etc/pki',
u'solr.cores': [],
u'solr.host': u'localhost',
u'solr.port': u'8983',
u'solr.baseurl': u'/solr',
u'solr.type': u'master',
u'solr.request_timeout': None,
u'solr.init_script': u'/etc/rc.d/solr',
u'solr.dih.import_options': {u'clean': False, u'optimize': True,
u'commit': True, u'verbose': False},
u'solr.backup_path': None,
u'solr.num_backups': 1,
u'poudriere.config': u'/usr/local/etc/poudriere.conf',
u'poudriere.config_dir': u'/usr/local/etc/poudriere.d',
u'ldap.server': u'localhost',
u'ldap.port': u'389',
u'ldap.tls': False,
u'ldap.scope': 2,
u'ldap.attrs': None,
u'ldap.binddn': u'',
u'ldap.bindpw': u'',
u'hosts.file': u'/etc/hosts',
u'aliases.file': u'/etc/aliases',
u'virt.images': os.path.join(syspaths.SRV_ROOT_DIR, u'salt-images'),
u'virt.tunnel': False,
}
def backup_mode(backup=''):
def backup_mode(backup=u''):
'''
Return the backup mode
@ -65,7 +65,7 @@ def backup_mode(backup=''):
'''
if backup:
return backup
return option('backup_mode')
return option(u'backup_mode')
def manage_mode(mode):
@ -96,14 +96,14 @@ def valid_fileproto(uri):
salt '*' config.valid_fileproto salt://path/to/file
'''
try:
return bool(re.match('^(?:salt|https?|ftp)://', uri))
return bool(re.match(u'^(?:salt|https?|ftp)://', uri))
except Exception:
return False
def option(
value,
default='',
default=u'',
omit_opts=False,
omit_master=False,
omit_pillar=False):
@ -120,8 +120,8 @@ def option(
if value in __opts__:
return __opts__[value]
if not omit_master:
if value in __pillar__.get('master', {}):
return __pillar__['master'][value]
if value in __pillar__.get(u'master', {}):
return __pillar__[u'master'][value]
if not omit_pillar:
if value in __pillar__:
return __pillar__[value]
@ -131,7 +131,7 @@ def option(
def merge(value,
default='',
default=u'',
omit_opts=False,
omit_master=False,
omit_pillar=False):
@ -151,14 +151,14 @@ def merge(value,
if not omit_opts:
if value in __opts__:
ret = __opts__[value]
if isinstance(ret, str):
if isinstance(ret, six.string_types):
return ret
if not omit_master:
if value in __pillar__.get('master', {}):
tmp = __pillar__['master'][value]
if value in __pillar__.get(u'master', {}):
tmp = __pillar__[u'master'][value]
if ret is None:
ret = tmp
if isinstance(ret, str):
if isinstance(ret, six.string_types):
return ret
elif isinstance(ret, dict) and isinstance(tmp, dict):
tmp.update(ret)
@ -171,7 +171,7 @@ def merge(value,
tmp = __pillar__[value]
if ret is None:
ret = tmp
if isinstance(ret, str):
if isinstance(ret, six.string_types):
return ret
elif isinstance(ret, dict) and isinstance(tmp, dict):
tmp.update(ret)
@ -184,7 +184,7 @@ def merge(value,
return ret or default
def get(key, default=''):
def get(key, default=u''):
'''
.. versionadded: 0.14.0
@ -215,17 +215,17 @@ def get(key, default=''):
salt '*' config.get pkg:apache
'''
ret = salt.utils.traverse_dict_and_list(__opts__, key, '_|-')
if ret != '_|-':
ret = salt.utils.traverse_dict_and_list(__opts__, key, u'_|-')
if ret != u'_|-':
return ret
ret = salt.utils.traverse_dict_and_list(__grains__, key, '_|-')
if ret != '_|-':
ret = salt.utils.traverse_dict_and_list(__grains__, key, u'_|-')
if ret != u'_|-':
return ret
ret = salt.utils.traverse_dict_and_list(__pillar__, key, '_|-')
if ret != '_|-':
ret = salt.utils.traverse_dict_and_list(__pillar__, key, u'_|-')
if ret != u'_|-':
return ret
ret = salt.utils.traverse_dict_and_list(__pillar__.get('master', {}), key, '_|-')
if ret != '_|-':
ret = salt.utils.traverse_dict_and_list(__pillar__.get(u'master', {}), key, u'_|-')
if ret != u'_|-':
return ret
return default
@ -242,10 +242,10 @@ def dot_vals(value):
salt '*' config.dot_vals host
'''
ret = {}
for key, val in six.iteritems(__pillar__.get('master', {})):
if key.startswith('{0}.'.format(value)):
for key, val in six.iteritems(__pillar__.get(u'master', {})):
if key.startswith(u'{0}.'.format(value)):
ret[key] = val
for key, val in six.iteritems(__opts__):
if key.startswith('{0}.'.format(value)):
if key.startswith(u'{0}.'.format(value)):
ret[key] = val
return ret

View File

@ -18,7 +18,7 @@ log = logging.getLogger(__name__)
def get_file(path,
dest,
saltenv='base',
saltenv=u'base',
makedirs=False,
template=None,
gzip=None):
@ -31,83 +31,83 @@ def get_file(path,
cp.get_file. The argument is only accepted for interface compatibility.
'''
if gzip is not None:
log.warning('The gzip argument to cp.get_file in salt-ssh is '
'unsupported')
log.warning(u'The gzip argument to cp.get_file in salt-ssh is '
u'unsupported')
if template is not None:
(path, dest) = _render_filenames(path, dest, saltenv, template)
src = __context__['fileclient'].cache_file(
src = __context__[u'fileclient'].cache_file(
path,
saltenv,
cachedir=os.path.join('salt-ssh', __salt__.kwargs['id_']))
cachedir=os.path.join(u'salt-ssh', __salt__.kwargs[u'id_']))
single = salt.client.ssh.Single(
__opts__,
'',
u'',
**__salt__.kwargs)
ret = single.shell.send(src, dest, makedirs)
return not ret[2]
def get_dir(path, dest, saltenv='base'):
def get_dir(path, dest, saltenv=u'base'):
'''
Transfer a directory down
'''
src = __context__['fileclient'].cache_dir(
src = __context__[u'fileclient'].cache_dir(
path,
saltenv,
cachedir=os.path.join('salt-ssh', __salt__.kwargs['id_']))
src = ' '.join(src)
cachedir=os.path.join(u'salt-ssh', __salt__.kwargs[u'id_']))
src = u' '.join(src)
single = salt.client.ssh.Single(
__opts__,
'',
u'',
**__salt__.kwargs)
ret = single.shell.send(src, dest)
return not ret[2]
def get_url(path, dest, saltenv='base'):
def get_url(path, dest, saltenv=u'base'):
'''
retrieve a URL
'''
src = __context__['fileclient'].cache_file(
src = __context__[u'fileclient'].cache_file(
path,
saltenv,
cachedir=os.path.join('salt-ssh', __salt__.kwargs['id_']))
cachedir=os.path.join(u'salt-ssh', __salt__.kwargs[u'id_']))
single = salt.client.ssh.Single(
__opts__,
'',
u'',
**__salt__.kwargs)
ret = single.shell.send(src, dest)
return not ret[2]
def list_states(saltenv='base'):
def list_states(saltenv=u'base'):
'''
List all the available state modules in an environment
'''
return __context__['fileclient'].list_states(saltenv)
return __context__[u'fileclient'].list_states(saltenv)
def list_master(saltenv='base', prefix=''):
def list_master(saltenv=u'base', prefix=u''):
'''
List all of the files stored on the master
'''
return __context__['fileclient'].file_list(saltenv, prefix)
return __context__[u'fileclient'].file_list(saltenv, prefix)
def list_master_dirs(saltenv='base', prefix=''):
def list_master_dirs(saltenv=u'base', prefix=u''):
'''
List all of the directories stored on the master
'''
return __context__['fileclient'].dir_list(saltenv, prefix)
return __context__[u'fileclient'].dir_list(saltenv, prefix)
def list_master_symlinks(saltenv='base', prefix=''):
def list_master_symlinks(saltenv=u'base', prefix=u''):
'''
List all of the symlinks stored on the master
'''
return __context__['fileclient'].symlink_list(saltenv, prefix)
return __context__[u'fileclient'].symlink_list(saltenv, prefix)
def _render_filenames(path, dest, saltenv, template):
@ -122,16 +122,16 @@ def _render_filenames(path, dest, saltenv, template):
# render the path as a template using path_template_engine as the engine
if template not in salt.utils.templates.TEMPLATE_REGISTRY:
raise CommandExecutionError(
'Attempted to render file paths with unavailable engine '
'{0}'.format(template)
u'Attempted to render file paths with unavailable engine '
u'{0}'.format(template)
)
kwargs = {}
kwargs['salt'] = __salt__
kwargs['pillar'] = __pillar__
kwargs['grains'] = __grains__
kwargs['opts'] = __opts__
kwargs['saltenv'] = saltenv
kwargs[u'salt'] = __salt__
kwargs[u'pillar'] = __pillar__
kwargs[u'grains'] = __grains__
kwargs[u'opts'] = __opts__
kwargs[u'saltenv'] = saltenv
def _render(contents):
'''
@ -140,7 +140,7 @@ def _render_filenames(path, dest, saltenv, template):
'''
# write out path to temp file
tmp_path_fn = salt.utils.files.mkstemp()
with salt.utils.files.fopen(tmp_path_fn, 'w+') as fp_:
with salt.utils.files.fopen(tmp_path_fn, u'w+') as fp_:
fp_.write(contents)
data = salt.utils.templates.TEMPLATE_REGISTRY[template](
tmp_path_fn,
@ -148,15 +148,15 @@ def _render_filenames(path, dest, saltenv, template):
**kwargs
)
salt.utils.files.safe_rm(tmp_path_fn)
if not data['result']:
if not data[u'result']:
# Failed to render the template
raise CommandExecutionError(
'Failed to render file path with error: {0}'.format(
data['data']
u'Failed to render file path with error: {0}'.format(
data[u'data']
)
)
else:
return data['data']
return data[u'data']
path = _render(path)
dest = _render(dest)

View File

@ -17,38 +17,40 @@ from salt.defaults import DEFAULT_TARGET_DELIM
from salt.exceptions import SaltException
# Import 3rd-party libs
import salt.ext.six as six
from salt.ext import six
# Seed the grains dict so cython will build
__grains__ = {}
def _serial_sanitizer(instr):
'''Replaces the last 1/4 of a string with X's'''
'''
Replaces the last 1/4 of a string with X's
'''
length = len(instr)
index = int(math.floor(length * .75))
return '{0}{1}'.format(instr[:index], 'X' * (length - index))
return u'{0}{1}'.format(instr[:index], u'X' * (length - index))
_FQDN_SANITIZER = lambda x: 'MINION.DOMAINNAME'
_HOSTNAME_SANITIZER = lambda x: 'MINION'
_DOMAINNAME_SANITIZER = lambda x: 'DOMAINNAME'
_FQDN_SANITIZER = lambda x: u'MINION.DOMAINNAME'
_HOSTNAME_SANITIZER = lambda x: u'MINION'
_DOMAINNAME_SANITIZER = lambda x: u'DOMAINNAME'
# A dictionary of grain -> function mappings for sanitizing grain output. This
# is used when the 'sanitize' flag is given.
_SANITIZERS = {
'serialnumber': _serial_sanitizer,
'domain': _DOMAINNAME_SANITIZER,
'fqdn': _FQDN_SANITIZER,
'id': _FQDN_SANITIZER,
'host': _HOSTNAME_SANITIZER,
'localhost': _HOSTNAME_SANITIZER,
'nodename': _HOSTNAME_SANITIZER,
u'serialnumber': _serial_sanitizer,
u'domain': _DOMAINNAME_SANITIZER,
u'fqdn': _FQDN_SANITIZER,
u'id': _FQDN_SANITIZER,
u'host': _HOSTNAME_SANITIZER,
u'localhost': _HOSTNAME_SANITIZER,
u'nodename': _HOSTNAME_SANITIZER,
}
def get(key, default='', delimiter=DEFAULT_TARGET_DELIM, ordered=True):
def get(key, default=u'', delimiter=DEFAULT_TARGET_DELIM, ordered=True):
'''
Attempt to retrieve the named value from grains, if the named value is not
available return the passed default. The default return is an empty string.
@ -149,7 +151,7 @@ def item(*args, **kwargs):
ret[arg] = __grains__[arg]
except KeyError:
pass
if salt.utils.is_true(kwargs.get('sanitize')):
if salt.utils.is_true(kwargs.get(u'sanitize')):
for arg, func in six.iteritems(_SANITIZERS):
if arg in ret:
ret[arg] = func(ret[arg])
@ -170,9 +172,9 @@ def ls(): # pylint: disable=C0103
def filter_by(lookup_dict,
grain='os_family',
grain=u'os_family',
merge=None,
default='default',
default=u'default',
base=None):
'''
.. versionadded:: 0.17.0
@ -266,12 +268,12 @@ def filter_by(lookup_dict,
elif isinstance(base_values, collections.Mapping):
if not isinstance(ret, collections.Mapping):
raise SaltException('filter_by default and look-up values must both be dictionaries.')
raise SaltException(u'filter_by default and look-up values must both be dictionaries.')
ret = salt.utils.dictupdate.update(copy.deepcopy(base_values), ret)
if merge:
if not isinstance(merge, collections.Mapping):
raise SaltException('filter_by merge argument must be a dictionary.')
raise SaltException(u'filter_by merge argument must be a dictionary.')
else:
if ret is None:
ret = merge

View File

@ -14,7 +14,7 @@ import copy
import salt.client.ssh
def get(tgt, fun, tgt_type='glob', roster='flat'):
def get(tgt, fun, tgt_type=u'glob', roster=u'flat'):
'''
Get data from the mine based on the target, function and tgt_type
@ -36,15 +36,15 @@ def get(tgt, fun, tgt_type='glob', roster='flat'):
salt-ssh '*' mine.get '192.168.5.0' network.ipaddrs roster=scan
'''
# Set up opts for the SSH object
opts = copy.deepcopy(__context__['master_opts'])
opts = copy.deepcopy(__context__[u'master_opts'])
minopts = copy.deepcopy(__opts__)
opts.update(minopts)
if roster:
opts['roster'] = roster
opts['argv'] = [fun]
opts['selected_target_option'] = tgt_type
opts['tgt'] = tgt
opts['arg'] = []
opts[u'roster'] = roster
opts[u'argv'] = [fun]
opts[u'selected_target_option'] = tgt_type
opts[u'tgt'] = tgt
opts[u'arg'] = []
# Create the SSH object to handle the actual call
ssh = salt.client.ssh.SSH(opts)
@ -56,8 +56,8 @@ def get(tgt, fun, tgt_type='glob', roster='flat'):
cret = {}
for host in rets:
if 'return' in rets[host]:
cret[host] = rets[host]['return']
if u'return' in rets[host]:
cret[host] = rets[host][u'return']
else:
cret[host] = rets[host]
return cret

View File

@ -13,7 +13,7 @@ import salt.utils
from salt.defaults import DEFAULT_TARGET_DELIM
def get(key, default='', merge=False, delimiter=DEFAULT_TARGET_DELIM):
def get(key, default=u'', merge=False, delimiter=DEFAULT_TARGET_DELIM):
'''
.. versionadded:: 0.14
@ -130,10 +130,10 @@ def keys(key, delimiter=DEFAULT_TARGET_DELIM):
__pillar__, key, KeyError, delimiter)
if ret is KeyError:
raise KeyError("Pillar key not found: {0}".format(key))
raise KeyError(u"Pillar key not found: {0}".format(key))
if not isinstance(ret, dict):
raise ValueError("Pillar value in key {0} is not a dict".format(key))
raise ValueError(u"Pillar value in key {0} is not a dict".format(key))
return ret.keys()

View File

@ -17,6 +17,8 @@ import logging
# Import salt libs
import salt.client.ssh
import salt.runner
import salt.utils.args
import salt.utils.versions
log = logging.getLogger(__name__)
@ -24,10 +26,10 @@ log = logging.getLogger(__name__)
def _publish(tgt,
fun,
arg=None,
tgt_type='glob',
returner='',
tgt_type=u'glob',
returner=u'',
timeout=None,
form='clean',
form=u'clean',
roster=None):
'''
Publish a command "from the minion out to other minions". In reality, the
@ -53,13 +55,13 @@ def _publish(tgt,
salt-ssh system.example.com publish.publish '*' cmd.run 'ls -la /tmp'
'''
if fun.startswith('publish.'):
log.info('Cannot publish publish calls. Returning {}')
if fun.startswith(u'publish.'):
log.info(u'Cannot publish publish calls. Returning {}')
return {}
# TODO: implement returners? Do they make sense for salt-ssh calls?
if returner:
log.warning('Returners currently not supported in salt-ssh publish')
log.warning(u'Returners currently not supported in salt-ssh publish')
# Make sure args have been processed
if arg is None:
@ -72,17 +74,17 @@ def _publish(tgt,
arg = []
# Set up opts for the SSH object
opts = copy.deepcopy(__context__['master_opts'])
opts = copy.deepcopy(__context__[u'master_opts'])
minopts = copy.deepcopy(__opts__)
opts.update(minopts)
if roster:
opts['roster'] = roster
opts[u'roster'] = roster
if timeout:
opts['timeout'] = timeout
opts['argv'] = [fun] + arg
opts['selected_target_option'] = tgt_type
opts['tgt'] = tgt
opts['arg'] = arg
opts[u'timeout'] = timeout
opts[u'argv'] = [fun] + arg
opts[u'selected_target_option'] = tgt_type
opts[u'tgt'] = tgt
opts[u'arg'] = arg
# Create the SSH object to handle the actual call
ssh = salt.client.ssh.SSH(opts)
@ -92,11 +94,11 @@ def _publish(tgt,
for ret in ssh.run_iter():
rets.update(ret)
if form == 'clean':
if form == u'clean':
cret = {}
for host in rets:
if 'return' in rets[host]:
cret[host] = rets[host]['return']
if u'return' in rets[host]:
cret[host] = rets[host][u'return']
else:
cret[host] = rets[host]
return cret
@ -107,8 +109,8 @@ def _publish(tgt,
def publish(tgt,
fun,
arg=None,
tgt_type='glob',
returner='',
tgt_type=u'glob',
returner=u'',
timeout=5,
roster=None,
expr_form=None):
@ -173,11 +175,11 @@ def publish(tgt,
# remember to remove the expr_form argument from this function when
# performing the cleanup on this deprecation.
if expr_form is not None:
salt.utils.warn_until(
'Fluorine',
'the target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
salt.utils.versions.warn_until(
u'Fluorine',
u'the target type should be passed using the \'tgt_type\' '
u'argument instead of \'expr_form\'. Support for using '
u'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = expr_form
@ -187,15 +189,15 @@ def publish(tgt,
tgt_type=tgt_type,
returner=returner,
timeout=timeout,
form='clean',
form=u'clean',
roster=roster)
def full_data(tgt,
fun,
arg=None,
tgt_type='glob',
returner='',
tgt_type=u'glob',
returner=u'',
timeout=5,
roster=None,
expr_form=None):
@ -223,11 +225,11 @@ def full_data(tgt,
# remember to remove the expr_form argument from this function when
# performing the cleanup on this deprecation.
if expr_form is not None:
salt.utils.warn_until(
'Fluorine',
'the target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
salt.utils.versions.warn_until(
u'Fluorine',
u'the target type should be passed using the \'tgt_type\' '
u'argument instead of \'expr_form\'. Support for using '
u'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = expr_form
@ -237,7 +239,7 @@ def full_data(tgt,
tgt_type=tgt_type,
returner=returner,
timeout=timeout,
form='full',
form=u'full',
roster=roster)
@ -260,5 +262,5 @@ def runner(fun, arg=None, timeout=5):
arg = []
# Create and run the runner
runner = salt.runner.RunnerClient(__opts__['__master_opts__'])
runner = salt.runner.RunnerClient(__opts__[u'__master_opts__'])
return runner.cmd(fun, arg)

View File

@ -20,10 +20,12 @@ import salt.state
import salt.loader
import salt.minion
import salt.log
from salt.ext.six import string_types
# Import 3rd-party libs
from salt.ext import six
__func_alias__ = {
'apply_': 'apply'
u'apply_': u'apply'
}
log = logging.getLogger(__name__)
@ -34,37 +36,37 @@ def _merge_extra_filerefs(*args):
'''
ret = []
for arg in args:
if isinstance(arg, string_types):
if isinstance(arg, six.string_types):
if arg:
ret.extend(arg.split(','))
ret.extend(arg.split(u','))
elif isinstance(arg, list):
if arg:
ret.extend(arg)
return ','.join(ret)
return u','.join(ret)
def sls(mods, saltenv='base', test=None, exclude=None, **kwargs):
def sls(mods, saltenv=u'base', test=None, exclude=None, **kwargs):
'''
Create the seed file for a state.sls run
'''
st_kwargs = __salt__.kwargs
__opts__['grains'] = __grains__
__pillar__.update(kwargs.get('pillar', {}))
__opts__[u'grains'] = __grains__
__pillar__.update(kwargs.get(u'pillar', {}))
st_ = salt.client.ssh.state.SSHHighState(
__opts__,
__pillar__,
__salt__,
__context__['fileclient'])
if isinstance(mods, str):
mods = mods.split(',')
__context__[u'fileclient'])
if isinstance(mods, six.string_types):
mods = mods.split(u',')
high_data, errors = st_.render_highstate({saltenv: mods})
if exclude:
if isinstance(exclude, str):
exclude = exclude.split(',')
if '__exclude__' in high_data:
high_data['__exclude__'].extend(exclude)
if isinstance(exclude, six.string_types):
exclude = exclude.split(u',')
if u'__exclude__' in high_data:
high_data[u'__exclude__'].extend(exclude)
else:
high_data['__exclude__'] = exclude
high_data[u'__exclude__'] = exclude
high_data, ext_errors = st_.state.reconcile_extend(high_data)
errors += ext_errors
errors += st_.state.verify_high(high_data)
@ -81,38 +83,38 @@ def sls(mods, saltenv='base', test=None, exclude=None, **kwargs):
file_refs = salt.client.ssh.state.lowstate_file_refs(
chunks,
_merge_extra_filerefs(
kwargs.get('extra_filerefs', ''),
__opts__.get('extra_filerefs', '')
kwargs.get(u'extra_filerefs', u''),
__opts__.get(u'extra_filerefs', u'')
)
)
roster = salt.roster.Roster(__opts__, __opts__.get('roster', 'flat'))
roster_grains = roster.opts['grains']
roster = salt.roster.Roster(__opts__, __opts__.get(u'roster', u'flat'))
roster_grains = roster.opts[u'grains']
# Create the tar containing the state pkg and relevant files.
trans_tar = salt.client.ssh.state.prep_trans_tar(
__opts__,
__context__['fileclient'],
__context__[u'fileclient'],
chunks,
file_refs,
__pillar__,
st_kwargs['id_'],
st_kwargs[u'id_'],
roster_grains)
trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__['hash_type'])
cmd = 'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
__opts__['thin_dir'],
trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__[u'hash_type'])
cmd = u'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
__opts__[u'thin_dir'],
test,
trans_tar_sum,
__opts__['hash_type'])
__opts__[u'hash_type'])
single = salt.client.ssh.Single(
__opts__,
cmd,
fsclient=__context__['fileclient'],
fsclient=__context__[u'fileclient'],
minion_opts=__salt__.minion_opts,
**st_kwargs)
single.shell.send(
trans_tar,
'{0}/salt_state.tgz'.format(__opts__['thin_dir']))
u'{0}/salt_state.tgz'.format(__opts__[u'thin_dir']))
stdout, stderr, _ = single.cmd_block()
# Clean up our tar
@ -125,7 +127,7 @@ def sls(mods, saltenv='base', test=None, exclude=None, **kwargs):
try:
return json.loads(stdout, object_hook=salt.utils.decode_dict)
except Exception as e:
log.error("JSON Render failed for: {0}\n{1}".format(stdout, stderr))
log.error(u"JSON Render failed for: %s\n%s", stdout, stderr)
log.error(str(e))
# If for some reason the json load fails, return the stdout
@ -144,51 +146,51 @@ def low(data, **kwargs):
salt '*' state.low '{"state": "pkg", "fun": "installed", "name": "vi"}'
'''
st_kwargs = __salt__.kwargs
__opts__['grains'] = __grains__
__opts__[u'grains'] = __grains__
chunks = [data]
st_ = salt.client.ssh.state.SSHHighState(
__opts__,
__pillar__,
__salt__,
__context__['fileclient'])
__context__[u'fileclient'])
for chunk in chunks:
chunk['__id__'] = chunk['name'] if not chunk.get('__id__') else chunk['__id__']
chunk[u'__id__'] = chunk[u'name'] if not chunk.get(u'__id__') else chunk[u'__id__']
err = st_.state.verify_data(data)
if err:
return err
file_refs = salt.client.ssh.state.lowstate_file_refs(
chunks,
_merge_extra_filerefs(
kwargs.get('extra_filerefs', ''),
__opts__.get('extra_filerefs', '')
kwargs.get(u'extra_filerefs', u''),
__opts__.get(u'extra_filerefs', u'')
)
)
roster = salt.roster.Roster(__opts__, __opts__.get('roster', 'flat'))
roster_grains = roster.opts['grains']
roster = salt.roster.Roster(__opts__, __opts__.get(u'roster', u'flat'))
roster_grains = roster.opts[u'grains']
# Create the tar containing the state pkg and relevant files.
trans_tar = salt.client.ssh.state.prep_trans_tar(
__opts__,
__context__['fileclient'],
__context__[u'fileclient'],
chunks,
file_refs,
__pillar__,
st_kwargs['id_'],
st_kwargs[u'id_'],
roster_grains)
trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__['hash_type'])
cmd = 'state.pkg {0}/salt_state.tgz pkg_sum={1} hash_type={2}'.format(
__opts__['thin_dir'],
trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__[u'hash_type'])
cmd = u'state.pkg {0}/salt_state.tgz pkg_sum={1} hash_type={2}'.format(
__opts__[u'thin_dir'],
trans_tar_sum,
__opts__['hash_type'])
__opts__[u'hash_type'])
single = salt.client.ssh.Single(
__opts__,
cmd,
fsclient=__context__['fileclient'],
fsclient=__context__[u'fileclient'],
minion_opts=__salt__.minion_opts,
**st_kwargs)
single.shell.send(
trans_tar,
'{0}/salt_state.tgz'.format(__opts__['thin_dir']))
u'{0}/salt_state.tgz'.format(__opts__[u'thin_dir']))
stdout, stderr, _ = single.cmd_block()
# Clean up our tar
@ -201,7 +203,7 @@ def low(data, **kwargs):
try:
return json.loads(stdout, object_hook=salt.utils.decode_dict)
except Exception as e:
log.error("JSON Render failed for: {0}\n{1}".format(stdout, stderr))
log.error(u"JSON Render failed for: %s\n%s", stdout, stderr)
log.error(str(e))
# If for some reason the json load fails, return the stdout
@ -219,49 +221,49 @@ def high(data, **kwargs):
salt '*' state.high '{"vim": {"pkg": ["installed"]}}'
'''
__pillar__.update(kwargs.get('pillar', {}))
__pillar__.update(kwargs.get(u'pillar', {}))
st_kwargs = __salt__.kwargs
__opts__['grains'] = __grains__
__opts__[u'grains'] = __grains__
st_ = salt.client.ssh.state.SSHHighState(
__opts__,
__pillar__,
__salt__,
__context__['fileclient'])
__context__[u'fileclient'])
chunks = st_.state.compile_high_data(data)
file_refs = salt.client.ssh.state.lowstate_file_refs(
chunks,
_merge_extra_filerefs(
kwargs.get('extra_filerefs', ''),
__opts__.get('extra_filerefs', '')
kwargs.get(u'extra_filerefs', u''),
__opts__.get(u'extra_filerefs', u'')
)
)
roster = salt.roster.Roster(__opts__, __opts__.get('roster', 'flat'))
roster_grains = roster.opts['grains']
roster = salt.roster.Roster(__opts__, __opts__.get(u'roster', u'flat'))
roster_grains = roster.opts[u'grains']
# Create the tar containing the state pkg and relevant files.
trans_tar = salt.client.ssh.state.prep_trans_tar(
__opts__,
__context__['fileclient'],
__context__[u'fileclient'],
chunks,
file_refs,
__pillar__,
st_kwargs['id_'],
st_kwargs[u'id_'],
roster_grains)
trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__['hash_type'])
cmd = 'state.pkg {0}/salt_state.tgz pkg_sum={1} hash_type={2}'.format(
__opts__['thin_dir'],
trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__[u'hash_type'])
cmd = u'state.pkg {0}/salt_state.tgz pkg_sum={1} hash_type={2}'.format(
__opts__[u'thin_dir'],
trans_tar_sum,
__opts__['hash_type'])
__opts__[u'hash_type'])
single = salt.client.ssh.Single(
__opts__,
cmd,
fsclient=__context__['fileclient'],
fsclient=__context__[u'fileclient'],
minion_opts=__salt__.minion_opts,
**st_kwargs)
single.shell.send(
trans_tar,
'{0}/salt_state.tgz'.format(__opts__['thin_dir']))
u'{0}/salt_state.tgz'.format(__opts__[u'thin_dir']))
stdout, stderr, _ = single.cmd_block()
# Clean up our tar
@ -274,7 +276,7 @@ def high(data, **kwargs):
try:
return json.loads(stdout, object_hook=salt.utils.decode_dict)
except Exception as e:
log.error("JSON Render failed for: {0}\n{1}".format(stdout, stderr))
log.error(u"JSON Render failed for: %s\n%s", stdout, stderr)
log.error(str(e))
# If for some reason the json load fails, return the stdout
@ -316,56 +318,56 @@ def highstate(test=None, **kwargs):
salt '*' state.highstate exclude=sls_to_exclude
salt '*' state.highstate exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
'''
__pillar__.update(kwargs.get('pillar', {}))
__pillar__.update(kwargs.get(u'pillar', {}))
st_kwargs = __salt__.kwargs
__opts__['grains'] = __grains__
__opts__[u'grains'] = __grains__
st_ = salt.client.ssh.state.SSHHighState(
__opts__,
__pillar__,
__salt__,
__context__['fileclient'])
__context__[u'fileclient'])
chunks = st_.compile_low_chunks()
file_refs = salt.client.ssh.state.lowstate_file_refs(
chunks,
_merge_extra_filerefs(
kwargs.get('extra_filerefs', ''),
__opts__.get('extra_filerefs', '')
kwargs.get(u'extra_filerefs', u''),
__opts__.get(u'extra_filerefs', u'')
)
)
# Check for errors
for chunk in chunks:
if not isinstance(chunk, dict):
__context__['retcode'] = 1
__context__[u'retcode'] = 1
return chunks
roster = salt.roster.Roster(__opts__, __opts__.get('roster', 'flat'))
roster_grains = roster.opts['grains']
roster = salt.roster.Roster(__opts__, __opts__.get(u'roster', u'flat'))
roster_grains = roster.opts[u'grains']
# Create the tar containing the state pkg and relevant files.
trans_tar = salt.client.ssh.state.prep_trans_tar(
__opts__,
__context__['fileclient'],
__context__[u'fileclient'],
chunks,
file_refs,
__pillar__,
st_kwargs['id_'],
st_kwargs[u'id_'],
roster_grains)
trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__['hash_type'])
cmd = 'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
__opts__['thin_dir'],
trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__[u'hash_type'])
cmd = u'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
__opts__[u'thin_dir'],
test,
trans_tar_sum,
__opts__['hash_type'])
__opts__[u'hash_type'])
single = salt.client.ssh.Single(
__opts__,
cmd,
fsclient=__context__['fileclient'],
fsclient=__context__[u'fileclient'],
minion_opts=__salt__.minion_opts,
**st_kwargs)
single.shell.send(
trans_tar,
'{0}/salt_state.tgz'.format(__opts__['thin_dir']))
u'{0}/salt_state.tgz'.format(__opts__[u'thin_dir']))
stdout, stderr, _ = single.cmd_block()
# Clean up our tar
@ -378,7 +380,7 @@ def highstate(test=None, **kwargs):
try:
return json.loads(stdout, object_hook=salt.utils.decode_dict)
except Exception as e:
log.error("JSON Render failed for: {0}\n{1}".format(stdout, stderr))
log.error(u"JSON Render failed for: %s\n%s", stdout, stderr)
log.error(str(e))
# If for some reason the json load fails, return the stdout
@ -397,55 +399,55 @@ def top(topfn, test=None, **kwargs):
salt '*' state.top reverse_top.sls exclude=sls_to_exclude
salt '*' state.top reverse_top.sls exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
'''
__pillar__.update(kwargs.get('pillar', {}))
__pillar__.update(kwargs.get(u'pillar', {}))
st_kwargs = __salt__.kwargs
__opts__['grains'] = __grains__
__opts__[u'grains'] = __grains__
if salt.utils.test_mode(test=test, **kwargs):
__opts__['test'] = True
__opts__[u'test'] = True
else:
__opts__['test'] = __opts__.get('test', None)
__opts__[u'test'] = __opts__.get(u'test', None)
st_ = salt.client.ssh.state.SSHHighState(
__opts__,
__pillar__,
__salt__,
__context__['fileclient'])
st_.opts['state_top'] = os.path.join('salt://', topfn)
__context__[u'fileclient'])
st_.opts[u'state_top'] = os.path.join(u'salt://', topfn)
chunks = st_.compile_low_chunks()
file_refs = salt.client.ssh.state.lowstate_file_refs(
chunks,
_merge_extra_filerefs(
kwargs.get('extra_filerefs', ''),
__opts__.get('extra_filerefs', '')
kwargs.get(u'extra_filerefs', u''),
__opts__.get(u'extra_filerefs', u'')
)
)
roster = salt.roster.Roster(__opts__, __opts__.get('roster', 'flat'))
roster_grains = roster.opts['grains']
roster = salt.roster.Roster(__opts__, __opts__.get(u'roster', u'flat'))
roster_grains = roster.opts[u'grains']
# Create the tar containing the state pkg and relevant files.
trans_tar = salt.client.ssh.state.prep_trans_tar(
__opts__,
__context__['fileclient'],
__context__[u'fileclient'],
chunks,
file_refs,
__pillar__,
st_kwargs['id_'],
st_kwargs[u'id_'],
roster_grains)
trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__['hash_type'])
cmd = 'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
__opts__['thin_dir'],
trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__[u'hash_type'])
cmd = u'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
__opts__[u'thin_dir'],
test,
trans_tar_sum,
__opts__['hash_type'])
__opts__[u'hash_type'])
single = salt.client.ssh.Single(
__opts__,
cmd,
fsclient=__context__['fileclient'],
fsclient=__context__[u'fileclient'],
minion_opts=__salt__.minion_opts,
**st_kwargs)
single.shell.send(
trans_tar,
'{0}/salt_state.tgz'.format(__opts__['thin_dir']))
u'{0}/salt_state.tgz'.format(__opts__[u'thin_dir']))
stdout, stderr, _ = single.cmd_block()
# Clean up our tar
@ -458,7 +460,7 @@ def top(topfn, test=None, **kwargs):
try:
return json.loads(stdout, object_hook=salt.utils.decode_dict)
except Exception as e:
log.error("JSON Render failed for: {0}\n{1}".format(stdout, stderr))
log.error(u"JSON Render failed for: %s\n%s", stdout, stderr)
log.error(str(e))
# If for some reason the json load fails, return the stdout
@ -475,12 +477,12 @@ def show_highstate():
salt '*' state.show_highstate
'''
__opts__['grains'] = __grains__
__opts__[u'grains'] = __grains__
st_ = salt.client.ssh.state.SSHHighState(
__opts__,
__pillar__,
__salt__,
__context__['fileclient'])
__context__[u'fileclient'])
return st_.compile_highstate()
@ -494,16 +496,16 @@ def show_lowstate():
salt '*' state.show_lowstate
'''
__opts__['grains'] = __grains__
__opts__[u'grains'] = __grains__
st_ = salt.client.ssh.state.SSHHighState(
__opts__,
__pillar__,
__salt__,
__context__['fileclient'])
__context__[u'fileclient'])
return st_.compile_low_chunks()
def show_sls(mods, saltenv='base', test=None, **kwargs):
def show_sls(mods, saltenv=u'base', test=None, **kwargs):
'''
Display the state data from a specific sls or list of sls files on the
master
@ -514,20 +516,20 @@ def show_sls(mods, saltenv='base', test=None, **kwargs):
salt '*' state.show_sls core,edit.vim dev
'''
__pillar__.update(kwargs.get('pillar', {}))
__opts__['grains'] = __grains__
__pillar__.update(kwargs.get(u'pillar', {}))
__opts__[u'grains'] = __grains__
opts = copy.copy(__opts__)
if salt.utils.test_mode(test=test, **kwargs):
opts['test'] = True
opts[u'test'] = True
else:
opts['test'] = __opts__.get('test', None)
opts[u'test'] = __opts__.get(u'test', None)
st_ = salt.client.ssh.state.SSHHighState(
__opts__,
__pillar__,
__salt__,
__context__['fileclient'])
if isinstance(mods, string_types):
mods = mods.split(',')
__context__[u'fileclient'])
if isinstance(mods, six.string_types):
mods = mods.split(u',')
high_data, errors = st_.render_highstate({saltenv: mods})
high_data, ext_errors = st_.state.reconcile_extend(high_data)
errors += ext_errors
@ -543,7 +545,7 @@ def show_sls(mods, saltenv='base', test=None, **kwargs):
return high_data
def show_low_sls(mods, saltenv='base', test=None, **kwargs):
def show_low_sls(mods, saltenv=u'base', test=None, **kwargs):
'''
Display the low state data from a specific sls or list of sls files on the
master.
@ -556,21 +558,21 @@ def show_low_sls(mods, saltenv='base', test=None, **kwargs):
salt '*' state.show_sls core,edit.vim dev
'''
__pillar__.update(kwargs.get('pillar', {}))
__opts__['grains'] = __grains__
__pillar__.update(kwargs.get(u'pillar', {}))
__opts__[u'grains'] = __grains__
opts = copy.copy(__opts__)
if salt.utils.test_mode(test=test, **kwargs):
opts['test'] = True
opts[u'test'] = True
else:
opts['test'] = __opts__.get('test', None)
opts[u'test'] = __opts__.get(u'test', None)
st_ = salt.client.ssh.state.SSHHighState(
__opts__,
__pillar__,
__salt__,
__context__['fileclient'])
if isinstance(mods, string_types):
mods = mods.split(',')
__context__[u'fileclient'])
if isinstance(mods, six.string_types):
mods = mods.split(u',')
high_data, errors = st_.render_highstate({saltenv: mods})
high_data, ext_errors = st_.state.reconcile_extend(high_data)
errors += ext_errors
@ -597,12 +599,12 @@ def show_top():
salt '*' state.show_top
'''
__opts__['grains'] = __grains__
__opts__[u'grains'] = __grains__
st_ = salt.client.ssh.state.SSHHighState(
__opts__,
__pillar__,
__salt__,
__context__['fileclient'])
__context__[u'fileclient'])
top_data = st_.get_top()
errors = []
errors += st_.verify_tops(top_data)
@ -632,30 +634,30 @@ def single(fun, name, test=None, **kwargs):
'''
st_kwargs = __salt__.kwargs
__opts__['grains'] = __grains__
__opts__[u'grains'] = __grains__
# state.fun -> [state, fun]
comps = fun.split('.')
comps = fun.split(u'.')
if len(comps) < 2:
__context__['retcode'] = 1
return 'Invalid function passed'
__context__[u'retcode'] = 1
return u'Invalid function passed'
# Create the low chunk, using kwargs as a base
kwargs.update({'state': comps[0],
'fun': comps[1],
'__id__': name,
'name': name})
kwargs.update({u'state': comps[0],
u'fun': comps[1],
u'__id__': name,
u'name': name})
opts = copy.deepcopy(__opts__)
# Set test mode
if salt.utils.test_mode(test=test, **kwargs):
opts['test'] = True
opts[u'test'] = True
else:
opts['test'] = __opts__.get('test', None)
opts[u'test'] = __opts__.get(u'test', None)
# Get the override pillar data
__pillar__.update(kwargs.get('pillar', {}))
__pillar__.update(kwargs.get(u'pillar', {}))
# Create the State environment
st_ = salt.client.ssh.state.SSHState(__opts__, __pillar__)
@ -663,7 +665,7 @@ def single(fun, name, test=None, **kwargs):
# Verify the low chunk
err = st_.verify_data(kwargs)
if err:
__context__['retcode'] = 1
__context__[u'retcode'] = 1
return err
# Must be a list of low-chunks
@ -674,46 +676,46 @@ def single(fun, name, test=None, **kwargs):
file_refs = salt.client.ssh.state.lowstate_file_refs(
chunks,
_merge_extra_filerefs(
kwargs.get('extra_filerefs', ''),
__opts__.get('extra_filerefs', '')
kwargs.get(u'extra_filerefs', u''),
__opts__.get(u'extra_filerefs', u'')
)
)
roster = salt.roster.Roster(__opts__, __opts__.get('roster', 'flat'))
roster_grains = roster.opts['grains']
roster = salt.roster.Roster(__opts__, __opts__.get(u'roster', u'flat'))
roster_grains = roster.opts[u'grains']
# Create the tar containing the state pkg and relevant files.
trans_tar = salt.client.ssh.state.prep_trans_tar(
__opts__,
__context__['fileclient'],
__context__[u'fileclient'],
chunks,
file_refs,
__pillar__,
st_kwargs['id_'],
st_kwargs[u'id_'],
roster_grains)
# Create a hash so we can verify the tar on the target system
trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__['hash_type'])
trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__[u'hash_type'])
# We use state.pkg to execute the "state package"
cmd = 'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
__opts__['thin_dir'],
cmd = u'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
__opts__[u'thin_dir'],
test,
trans_tar_sum,
__opts__['hash_type'])
__opts__[u'hash_type'])
# Create a salt-ssh Single object to actually do the ssh work
single = salt.client.ssh.Single(
__opts__,
cmd,
fsclient=__context__['fileclient'],
fsclient=__context__[u'fileclient'],
minion_opts=__salt__.minion_opts,
**st_kwargs)
# Copy the tar down
single.shell.send(
trans_tar,
'{0}/salt_state.tgz'.format(__opts__['thin_dir']))
u'{0}/salt_state.tgz'.format(__opts__[u'thin_dir']))
# Run the state.pkg command on the target
stdout, stderr, _ = single.cmd_block()
@ -728,7 +730,7 @@ def single(fun, name, test=None, **kwargs):
try:
return json.loads(stdout, object_hook=salt.utils.decode_dict)
except Exception as e:
log.error("JSON Render failed for: {0}\n{1}".format(stdout, stderr))
log.error(u"JSON Render failed for: %s\n%s", stdout, stderr)
log.error(str(e))
# If for some reason the json load fails, return the stdout

View File

@ -30,13 +30,12 @@ import salt.config
import salt.client
import salt.loader
import salt.utils
import salt.utils.args
import salt.utils.cloud
import salt.utils.context
import salt.utils.dictupdate
import salt.utils.files
import salt.syspaths
from salt.utils import reinit_crypto
from salt.utils import context
from salt.ext.six import string_types
from salt.template import compile_template
# Import third party libs
@ -48,7 +47,7 @@ except ImportError:
except ImportError:
pass # pycrypto < 2.1
import yaml
import salt.ext.six as six
from salt.ext import six
from salt.ext.six.moves import input # pylint: disable=import-error,redefined-builtin
# Get logging started
@ -342,7 +341,7 @@ class CloudClient(object):
vm_overrides = {}
kwargs['profile'] = profile
mapper = salt.cloud.Map(self._opts_defaults(**kwargs))
if isinstance(names, str):
if isinstance(names, six.string_types):
names = names.split(',')
return salt.utils.simple_types_filter(
mapper.run_profile(profile, names, vm_overrides=vm_overrides)
@ -367,7 +366,7 @@ class CloudClient(object):
Destroy the named VMs
'''
mapper = salt.cloud.Map(self._opts_defaults(destroy=True))
if isinstance(names, str):
if isinstance(names, six.string_types):
names = names.split(',')
return salt.utils.simple_types_filter(
mapper.destroy(names)
@ -391,7 +390,7 @@ class CloudClient(object):
provider += ':{0}'.format(next(six.iterkeys(providers[provider])))
else:
return False
if isinstance(names, str):
if isinstance(names, six.string_types):
names = names.split(',')
ret = {}
for name in names:
@ -433,7 +432,7 @@ class CloudClient(object):
provider += ':{0}'.format(next(six.iterkeys(providers[provider])))
else:
return False
if isinstance(names, str):
if isinstance(names, six.string_types):
names = names.split(',')
ret = {}
@ -625,7 +624,7 @@ class Cloud(object):
pmap[alias] = {}
try:
with context.func_globals_inject(
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
@ -708,7 +707,7 @@ class Cloud(object):
def get_running_by_names(self, names, query='list_nodes', cached=False,
profile=None):
if isinstance(names, string_types):
if isinstance(names, six.string_types):
names = [names]
matches = {}
@ -730,18 +729,9 @@ class Cloud(object):
continue
for vm_name, details in six.iteritems(vms):
# If VM was created with use_fqdn with either of the softlayer drivers,
# we need to strip the VM name and only search for the short hostname.
if driver == 'softlayer' or driver == 'softlayer_hw':
ret = []
for name in names:
name = name.split('.')[0]
ret.append(name)
if vm_name not in ret:
continue
# XXX: The logic below can be removed once the aws driver
# is removed
elif vm_name not in names:
if vm_name not in names:
continue
elif driver == 'ec2' and 'aws' in handled_drivers and \
@ -827,7 +817,7 @@ class Cloud(object):
try:
with context.func_globals_inject(
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
@ -870,7 +860,7 @@ class Cloud(object):
data[alias] = {}
try:
with context.func_globals_inject(
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
@ -913,7 +903,7 @@ class Cloud(object):
data[alias] = {}
try:
with context.func_globals_inject(
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
@ -1035,7 +1025,7 @@ class Cloud(object):
log.info('Destroying in non-parallel mode.')
for alias, driver, name in vms_to_destroy:
fun = '{0}.destroy'.format(driver)
with context.func_globals_inject(
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
@ -1280,7 +1270,7 @@ class Cloud(object):
try:
alias, driver = vm_['provider'].split(':')
func = '{0}.create'.format(driver)
with context.func_globals_inject(
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
@ -1366,7 +1356,7 @@ class Cloud(object):
return
try:
with context.func_globals_inject(
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=extra_['provider']
):
@ -1514,7 +1504,7 @@ class Cloud(object):
invalid_functions[fun].append(vm_name)
continue
with context.func_globals_inject(
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
@ -1526,7 +1516,7 @@ class Cloud(object):
# Clean kwargs of "__pub_*" data before running the cloud action call.
# Prevents calling positional "kwarg" arg before "call" when no kwarg
# argument is present in the cloud driver function's arg spec.
kwargs = salt.utils.clean_kwargs(**kwargs)
kwargs = salt.utils.args.clean_kwargs(**kwargs)
if kwargs:
ret[alias][driver][vm_name] = self.clouds[fun](
@ -1598,7 +1588,7 @@ class Cloud(object):
)
)
with context.func_globals_inject(
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
@ -1646,7 +1636,7 @@ class Cloud(object):
self.opts['providers'].pop(alias)
continue
with context.func_globals_inject(
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
@ -1815,7 +1805,7 @@ class Map(Cloud):
if isinstance(mapped, (list, tuple)):
entries = {}
for mapping in mapped:
if isinstance(mapping, string_types):
if isinstance(mapping, six.string_types):
# Foo:
# - bar1
# - bar2
@ -1855,7 +1845,7 @@ class Map(Cloud):
map_[profile] = entries
continue
if isinstance(mapped, string_types):
if isinstance(mapped, six.string_types):
# If it's a single string entry, let's make iterable because of
# the next step
mapped = [mapped]
@ -2310,7 +2300,7 @@ def create_multiprocessing(parallel_data, queue=None):
This function will be called from another process when running a map in
parallel mode. The result from the create is always a json object.
'''
reinit_crypto()
salt.utils.reinit_crypto()
parallel_data['opts']['output'] = 'json'
cloud = Cloud(parallel_data['opts'])
@ -2342,14 +2332,14 @@ def destroy_multiprocessing(parallel_data, queue=None):
This function will be called from another process when running a map in
parallel mode. The result from the destroy is always a json object.
'''
reinit_crypto()
salt.utils.reinit_crypto()
parallel_data['opts']['output'] = 'json'
clouds = salt.loader.clouds(parallel_data['opts'])
try:
fun = clouds['{0}.destroy'.format(parallel_data['driver'])]
with context.func_globals_inject(
with salt.utils.context.func_globals_inject(
fun,
__active_provider_name__=':'.join([
parallel_data['alias'],
@ -2378,11 +2368,11 @@ def run_parallel_map_providers_query(data, queue=None):
This function will be called from another process when building the
providers map.
'''
reinit_crypto()
salt.utils.reinit_crypto()
cloud = Cloud(data['opts'])
try:
with context.func_globals_inject(
with salt.utils.context.func_globals_inject(
cloud.clouds[data['fun']],
__active_provider_name__=':'.join([
data['alias'],

View File

@ -20,23 +20,24 @@ import logging
from salt.ext.six.moves import input
# Import salt libs
import salt.cloud
import salt.utils.cloud
import salt.config
import salt.defaults.exitcodes
import salt.output
import salt.syspaths as syspaths
import salt.utils
from salt.utils import parsers
import salt.utils.parsers
from salt.exceptions import SaltCloudException, SaltCloudSystemExit
from salt.utils.verify import check_user, verify_env, verify_files, verify_log
# Import salt.cloud libs
import salt.cloud
import salt.utils.cloud
from salt.exceptions import SaltCloudException, SaltCloudSystemExit
import salt.ext.six as six
import salt.syspaths as syspaths
# Import 3rd-party libs
from salt.ext import six
log = logging.getLogger(__name__)
class SaltCloud(parsers.SaltCloudParser):
class SaltCloud(salt.utils.parsers.SaltCloudParser):
def run(self):
'''

View File

@ -50,7 +50,8 @@ from salt.exceptions import (
SaltCloudExecutionTimeout
)
# Import Third Party Libs
# Import 3rd-party libs
from salt.ext import six
try:
import requests
HAS_REQUESTS = True
@ -745,7 +746,7 @@ def _compute_signature(parameters, access_key_secret):
'''
def percent_encode(line):
if not isinstance(line, str):
if not isinstance(line, six.string_types):
return line
s = line

View File

@ -65,7 +65,7 @@ import salt.config as config
import salt.utils
import salt.utils.cloud
import salt.utils.files
import salt.ext.six as six
from salt.ext import six
import salt.version
from salt.exceptions import (
SaltCloudSystemExit,

View File

@ -30,6 +30,8 @@ import logging
# Import salt cloud libs
import salt.config as config
import salt.utils.cloud
import salt.utils.event
from salt.cloud.libcloudfuncs import * # pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
from salt.utils import namespaced_function
from salt.exceptions import SaltCloudSystemExit

View File

@ -46,9 +46,8 @@ from salt.exceptions import (
SaltCloudExecutionFailure,
SaltCloudExecutionTimeout
)
import salt.ext.six as six
from salt.ext import six
from salt.ext.six.moves import zip
from salt.ext.six import string_types
# Import Third Party Libs
try:
@ -209,7 +208,7 @@ def get_image(vm_):
vm_image = config.get_cloud_config_value(
'image', vm_, __opts__, search_global=False
)
if not isinstance(vm_image, string_types):
if not isinstance(vm_image, six.string_types):
vm_image = str(vm_image)
for image in images:

Some files were not shown because too many files have changed in this diff Show More