Merge remote-tracking branch 'upstream/develop' into develop

This commit is contained in:
vnitinv 2017-02-01 16:26:56 +05:30
commit 829fdf4e49
91 changed files with 4382 additions and 943 deletions

View File

@ -700,6 +700,11 @@
# ext_pillar.
#ext_pillar_first: False
# The external pillars permitted to be used on-demand using pillar.ext
#on_demand_ext_pillar:
# - libvirt
# - virtkey
# The pillar_gitfs_ssl_verify option specifies whether to ignore ssl certificate
# errors when contacting the pillar gitfs backend. You might want to set this to
# false if you're using a git backend that uses a self-signed certificate but

View File

@ -239,8 +239,8 @@ on_saltstack = 'SALT_ON_SALTSTACK' in os.environ
project = 'Salt'
version = salt.version.__version__
latest_release = '2016.11.1' # latest release
previous_release = '2016.3.4' # latest release from previous branch
latest_release = '2016.11.2' # latest release
previous_release = '2016.3.5' # latest release from previous branch
previous_release_dir = '2016.3' # path on web server for previous branch
next_release = '' # next release
next_release_dir = '' # path on web server for next release branch

View File

@ -2548,6 +2548,34 @@ configuration is the same as :conf_master:`file_roots`:
prod:
- /srv/pillar/prod
.. conf_master:: on_demand_ext_pillar
``on_demand_ext_pillar``
------------------------
.. versionadded:: 2016.3.6,2016.11.3,Nitrogen
Default: ``['libvirt', 'virtkey']``
The external pillars permitted to be used on-demand using :py:func:`pillar.ext
<salt.modules.pillar.ext>`.
.. code-block:: yaml
on_demand_ext_pillar:
- libvirt
- virtkey
- git
.. warning::
This will allow minions to request specific pillar data via
:py:func:`pillar.ext <salt.modules.pillar.ext>`, and may be considered a
security risk. However, pillar data generated in this way will not affect
the :ref:`in-memory pillar data <pillar-in-memory>`, so this risk is
limited to instances in which states/modules/etc. (built-in or custom) rely
upon pillar data generated by :py:func:`pillar.ext
<salt.modules.pillar.ext>`.
.. conf_master:: pillar_opts
``pillar_opts``

View File

@ -1608,6 +1608,35 @@ the pillar environments.
prod:
- /srv/pillar/prod
.. conf_minion:: on_demand_ext_pillar
``on_demand_ext_pillar``
------------------------
.. versionadded:: 2016.3.6,2016.11.3,Nitrogen
Default: ``['libvirt', 'virtkey']``
When using a local :conf_minion:`file_client`, this option controls which
external pillars are permitted to be used on-demand using :py:func:`pillar.ext
<salt.modules.pillar.ext>`.
.. code-block:: yaml
on_demand_ext_pillar:
- libvirt
- virtkey
- git
.. warning::
This will allow a masterless minion to request specific pillar data via
:py:func:`pillar.ext <salt.modules.pillar.ext>`, and may be considered a
security risk. However, pillar data generated in this way will not affect
the :ref:`in-memory pillar data <pillar-in-memory>`, so this risk is
limited to instances in which states/modules/etc. (built-in or custom) rely
upon pillar data generated by :py:func:`pillar.ext
<salt.modules.pillar.ext>`.
.. conf_minion:: pillarenv
``pillarenv``

View File

@ -70,7 +70,7 @@ To install fresh release of Salt minion on Jessie:
.. code-block:: bash
apt-get update
apt-get install python-zmq python-tornado/jessie-backports salt-common/stretch
apt-get install python-zmq python-systemd/jessie-backports python-tornado/jessie-backports salt-common/stretch
**Raspbian**:

View File

@ -5,6 +5,26 @@ Salt 2015.8.13 Release Notes
Version 2015.8.13 is a bugfix release for :ref:`2015.8.0 <release-2015-8-0>`.
Security Fixes
==============
CVE-2017-5192: local_batch client external authentication not respected
The ``LocalClient.cmd_batch()`` method client does not accept ``external_auth``
credentials and so access to it from salt-api has been removed for now. This
vulnerability allows code execution for already-authenticated users and is only
in effect when running salt-api as the ``root`` user.
CVE-2017-5200: Salt-api allows arbitrary command execution on a salt-master via
Salt's ssh_client
Users of Salt-API and salt-ssh could execute a command on the salt master via a
hole when both systems were enabled.
We recommend everyone on the 2015.8 branch upgrade to a patched release as soon
as possible.
Changes for v2015.8.12..v2015.8.13
----------------------------------

View File

@ -0,0 +1,5 @@
============================
Salt 2015.8.14 Release Notes
============================
Version 2015.8.14 is a bugfix release for :ref:`2015.8.0 <release-2015-8-0>`.

View File

@ -4,8 +4,28 @@ Salt 2016.11.2 Release Notes
Version 2016.11.2 is a bugfix release for :ref:`2016.11.0 <release-2016-11-0>`.
Security Fixes
==============
CVE-2017-5192: local_batch client external authentication not respected
The ``LocalClient.cmd_batch()`` method client does not accept ``external_auth``
credentials and so access to it from salt-api has been removed for now. This
vulnerability allows code execution for already-authenticated users and is only
in effect when running salt-api as the ``root`` user.
CVE-2017-5200: Salt-api allows arbitrary command execution on a salt-master via
Salt's ssh_client
Users of Salt-API and salt-ssh could execute a command on the salt master via a
hole when both systems were enabled.
We recommend everyone upgrade to 2016.11.2 as soon as possible.
Changes for v2016.11.1..v2016.11.2
----------------------------------------
----------------------------------
Extended changelog courtesy of Todd Stansell (https://github.com/tjstansell/salt-changelogs):

View File

@ -0,0 +1,5 @@
============================
Salt 2016.11.3 Release Notes
============================
Version 2016.11.3 is a bugfix release for :ref:`2016.11.0 <release-2016-11-0>`.

File diff suppressed because it is too large Load Diff

View File

@ -265,3 +265,7 @@ The ``git`` state had the following changes:
The ``glusterfs`` state had the following function removed:
- ``created``: Please use ``volume_present`` instead.
The ``openvswitch_port`` state had the following change:
- The ``type`` option was removed from the ``present`` function. Please use ``tunnel_type`` instead.

View File

@ -0,0 +1,5 @@
===========================
Salt 2016.3.6 Release Notes
===========================
Version 2016.3.6 is a bugfix release for :ref:`2016.3.0 <release-2016-3-0>`.

View File

@ -105,7 +105,7 @@ the key from the master when the minion has been gone for 60 seconds:
key.timeout:
- delete: 60
- require:
- status: statreg
- status: startreg
There are two stanzas in this formula, whose IDs are ``statreg`` and
``keydel``. The first stanza, ``statreg``, tells Thorium to keep track of
@ -136,7 +136,7 @@ to function properly you will also need to enable the ``status`` beacon in the
beacons:
status:
interval: 10
- interval: 10
This will cause the minion to use the status beacon to check in with the master
every 10 seconds.

View File

@ -96,9 +96,9 @@ variable in a Salt state.
.. code-block:: yaml
Create a file with contents from an environment variable:
file.managed:
- name: /tmp/hello
- contents: {{ salt['environ.get']('MYENVVAR') }}
file.managed:
- name: /tmp/hello
- contents: {{ salt['environ.get']('MYENVVAR') }}
Error checking:
@ -115,8 +115,7 @@ Error checking:
{% else %}
Fail - no environment passed in:
test:
A. fail_without_changes
test.fail_without_changes
{% endif %}

View File

@ -11,7 +11,7 @@ GitPython==1.0.1
idna==2.0
ioflo==1.5.0
ipaddress==1.0.16
Jinja2==2.8
Jinja2==2.9.4
libnacl==1.4.4
linode-python==1.1.1
Mako==1.0.3

70
pkg/smartos/salt-api.xml Normal file
View File

@ -0,0 +1,70 @@
<?xml version="1.0"?>
<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
<!--
Created by Manifold
-->
<service_bundle type="manifest" name="salt-api">
<service name="network/salt-api" type="service" version="1">
<create_default_instance enabled="false"/>
<single_instance/>
<dependency name="config-file"
grouping="require_all"
restart_on="none"
type="path">
<service_fmri value='file:///opt/local/etc/salt/minion'/>
</dependency>
<dependency name="network"
grouping="require_all"
restart_on="error"
type="service">
<service_fmri value="svc:/milestone/network:default"/>
</dependency>
<dependency name="filesystem"
grouping="require_all"
restart_on="error"
type="service">
<service_fmri value="svc:/system/filesystem/local"/>
</dependency>
<method_context/>
<exec_method type="method"
name="start"
exec="/opt/local/bin/salt-api -c %{config_dir}"
timeout_seconds="60"/>
<exec_method type="method"
name="stop"
exec=":kill"
timeout_seconds="60"/>
<property_group name="startd" type="framework">
<propval name="duration" type="astring" value="child"/>
<propval name="ignore_error" type="astring" value="core,signal"/>
</property_group>
<property_group name="application" type="application">
<propval name="config_file" type="astring" value="/opt/local/etc/salt/master"/>
<propval name="config_dir" type="astring" value="/opt/local/etc/salt"/>
</property_group>
<stability value="Unstable"/>
<template>
<common_name>
<loctext xml:lang="C">Salt API</loctext>
</common_name>
<documentation>
<doc_link name="SaltStack Documentation"
uri="http://docs.saltstack.com"/>
</documentation>
</template>
</service>
</service_bundle>

View File

@ -69,7 +69,7 @@ If NOT Exist "%PreDir%" mkdir "%PreDir%"
:: Check for 64 bit by finding the Program Files (x86) directory
Set Url64="http://repo.saltstack.com/windows/dependencies/64/vcredist_x64_2008_mfc.exe"
Set Url32="http://repo.saltstack.com/windows/dependencies/32/vcredist_x86_2008_mfc.exe"
If Exist "C:\Program Files (x86)" (
If Defined ProgramFiles(x86) (
bitsadmin /transfer "VCRedist 2008 MFC AMD64" "%Url64%" "%PreDir%\vcredist.exe"
) Else (
bitsadmin /transfer "VCRedist 2008 MFC x86" "%Url32%" "%PreDir%\vcredist.exe"

View File

@ -20,6 +20,7 @@
!include "StrFunc.nsh"
!include "x64.nsh"
!include "WinMessages.nsh"
!include "WinVer.nsh"
${StrLoc}
${StrStrAdv}
@ -193,35 +194,40 @@ ShowUnInstDetails show
; See http://blogs.msdn.com/b/astebner/archive/2009/01/29/9384143.aspx for more info
Section -Prerequisites
!define VC_REDIST_X64_GUID "{5FCE6D76-F5DC-37AB-B2B8-22AB8CEDB1D4}"
!define VC_REDIST_X86_GUID "{9BE518E6-ECC6-35A9-88E4-87755C07200F}"
; VCRedist only needed on Server 2008/Vista and below
${If} ${AtMostWin2008}
Var /GLOBAL VcRedistGuid
Var /GLOBAL NeedVcRedist
${If} ${CPUARCH} == "AMD64"
StrCpy $VcRedistGuid ${VC_REDIST_X64_GUID}
${Else}
StrCpy $VcRedistGuid ${VC_REDIST_X86_GUID}
${EndIf}
!define VC_REDIST_X64_GUID "{5FCE6D76-F5DC-37AB-B2B8-22AB8CEDB1D4}"
!define VC_REDIST_X86_GUID "{9BE518E6-ECC6-35A9-88E4-87755C07200F}"
Push $VcRedistGuid
Call MsiQueryProductState
${If} $NeedVcRedist == "True"
MessageBox MB_ICONQUESTION|MB_YESNO|MB_DEFBUTTON2 \
"VC Redist 2008 SP1 MFC is currently not installed. Would you like to install?" \
/SD IDYES IDNO endVcRedist
Var /GLOBAL VcRedistGuid
Var /GLOBAL NeedVcRedist
${If} ${CPUARCH} == "AMD64"
StrCpy $VcRedistGuid ${VC_REDIST_X64_GUID}
${Else}
StrCpy $VcRedistGuid ${VC_REDIST_X86_GUID}
${EndIf}
ClearErrors
; The Correct version of VCRedist is copied over by "build_pkg.bat"
SetOutPath "$INSTDIR\"
File "..\prereqs\vcredist.exe"
ExecWait "$INSTDIR\vcredist.exe /qb!"
IfErrors 0 endVcRedist
MessageBox MB_OK \
"VC Redist 2008 SP1 MFC failed to install. Try installing the package manually." \
/SD IDOK
Push $VcRedistGuid
Call MsiQueryProductState
${If} $NeedVcRedist == "True"
MessageBox MB_ICONQUESTION|MB_YESNO|MB_DEFBUTTON2 \
"VC Redist 2008 SP1 MFC is currently not installed. Would you like to install?" \
/SD IDYES IDNO endVcRedist
ClearErrors
; The Correct version of VCRedist is copied over by "build_pkg.bat"
SetOutPath "$INSTDIR\"
File "..\prereqs\vcredist.exe"
ExecWait "$INSTDIR\vcredist.exe /qb!"
IfErrors 0 endVcRedist
MessageBox MB_OK \
"VC Redist 2008 SP1 MFC failed to install. Try installing the package manually." \
/SD IDOK
endVcRedist:
${EndIf}
endVcRedist:
${EndIf}
SectionEnd

View File

@ -20,6 +20,7 @@
!include "StrFunc.nsh"
!include "x64.nsh"
!include "WinMessages.nsh"
!include "WinVer.nsh"
${StrLoc}
${StrStrAdv}
@ -323,35 +324,40 @@ ShowUnInstDetails show
; See http://blogs.msdn.com/b/astebner/archive/2009/01/29/9384143.aspx for more info
Section -Prerequisites
!define VC_REDIST_X64_GUID "{5FCE6D76-F5DC-37AB-B2B8-22AB8CEDB1D4}"
!define VC_REDIST_X86_GUID "{9BE518E6-ECC6-35A9-88E4-87755C07200F}"
; VCRedist only needed on Server 2008/Vista and below
${If} ${AtMostWin2008}
Var /GLOBAL VcRedistGuid
Var /GLOBAL NeedVcRedist
${If} ${CPUARCH} == "AMD64"
StrCpy $VcRedistGuid ${VC_REDIST_X64_GUID}
${Else}
StrCpy $VcRedistGuid ${VC_REDIST_X86_GUID}
${EndIf}
!define VC_REDIST_X64_GUID "{5FCE6D76-F5DC-37AB-B2B8-22AB8CEDB1D4}"
!define VC_REDIST_X86_GUID "{9BE518E6-ECC6-35A9-88E4-87755C07200F}"
Push $VcRedistGuid
Call MsiQueryProductState
${If} $NeedVcRedist == "True"
MessageBox MB_ICONQUESTION|MB_YESNO|MB_DEFBUTTON2 \
"VC Redist 2008 SP1 MFC is currently not installed. Would you like to install?" \
/SD IDYES IDNO endVcRedist
Var /GLOBAL VcRedistGuid
Var /GLOBAL NeedVcRedist
${If} ${CPUARCH} == "AMD64"
StrCpy $VcRedistGuid ${VC_REDIST_X64_GUID}
${Else}
StrCpy $VcRedistGuid ${VC_REDIST_X86_GUID}
${EndIf}
ClearErrors
; The Correct version of VCRedist is copied over by "build_pkg.bat"
SetOutPath "$INSTDIR\"
File "..\prereqs\vcredist.exe"
ExecWait "$INSTDIR\vcredist.exe /qb!"
IfErrors 0 endVcRedist
MessageBox MB_OK \
"VC Redist 2008 SP1 MFC failed to install. Try installing the package manually." \
/SD IDOK
Push $VcRedistGuid
Call MsiQueryProductState
${If} $NeedVcRedist == "True"
MessageBox MB_ICONQUESTION|MB_YESNO|MB_DEFBUTTON2 \
"VC Redist 2008 SP1 MFC is currently not installed. Would you like to install?" \
/SD IDYES IDNO endVcRedist
ClearErrors
; The Correct version of VCRedist is copied over by "build_pkg.bat"
SetOutPath "$INSTDIR\"
File "..\prereqs\vcredist.exe"
ExecWait "$INSTDIR\vcredist.exe /qb!"
IfErrors 0 endVcRedist
MessageBox MB_OK \
"VC Redist 2008 SP1 MFC failed to install. Try installing the package manually." \
/SD IDOK
endVcRedist:
${EndIf}
endVcRedist:
${EndIf}
SectionEnd

View File

@ -25,6 +25,14 @@ authenticated against. This defaults to `login`
The Python interface to PAM does not support authenticating as ``root``.
.. note:: Using PAM groups with SSSD groups on python2.
To use sssd with the PAM eauth module and groups the `pysss` module is
needed. On RedHat/CentOS this is `python-sss`.
This should not be needed with python >= 3.3, because the `os` modules has the
`getgrouplist` function.
'''
# Import Python Libs

View File

@ -43,7 +43,7 @@ def __validate__(config):
def beacon(config):
'''
r'''
Monitor the disk usage of the minion
Specify thresholds for each disk and only emit a beacon if any of them are
@ -66,7 +66,24 @@ def beacon(config):
- 'c:\': 90%
- 'd:\': 50%
Regular expressions can be used as mount points.
.. code-block:: yaml
beacons:
diskusage:
- '^\/(?!home).*$': 90%
- '^[a-zA-Z]:\$': 50%
The first one will match all mounted disks beginning with "/", except /home
The second one will match disks from A:\ to Z:\ on a Windows system
Note that if a regular expression are evaluated after static mount points,
which means that if a regular expression matches an other defined mount point,
it will override the previously defined threshold.
'''
parts = psutil.disk_partitions(all=False)
ret = []
for mounts in config:
mount = mounts.keys()[0]
@ -75,7 +92,12 @@ def beacon(config):
_current_usage = psutil.disk_usage(mount)
except OSError:
# Ensure a valid mount point
log.error('{0} is not a valid mount point, skipping.'.format(mount))
log.warning('{0} is not a valid mount point, try regex.'.format(mount))
for part in parts:
if re.match(mount, part.mountpoint):
row = {}
row[part.mountpoint] = mounts[mount]
config.append(row)
continue
current_usage = _current_usage.percent

View File

@ -140,7 +140,7 @@ class SaltCMD(parsers.SaltCMDOptionParser):
try:
if self.options.subset:
cmd_func = self.local_client.cmd_subset
kwargs['sub'] = True
kwargs['sub'] = self.options.subset
kwargs['cli'] = True
else:
cmd_func = self.local_client.cmd_cli

View File

@ -493,7 +493,7 @@ class LocalClient(object):
'sys.list_functions',
tgt_type=tgt_type,
**kwargs)
minions = minion_ret.keys()
minions = list(minion_ret)
random.shuffle(minions)
f_tgt = []
for minion in minions:

View File

@ -446,6 +446,17 @@ def __get_network(conn, vm_):
return conn.ex_get_network(network)
def __get_subnetwork(vm_):
'''
Get configured subnetwork.
'''
ex_subnetwork = config.get_cloud_config_value(
'subnetwork', vm_, __opts__,
default='default', search_global=False)
return ex_subnetwork
def __get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
@ -2226,6 +2237,7 @@ def request_instance(vm_):
'image': __get_image(conn, vm_),
'location': __get_location(conn, vm_),
'ex_network': __get_network(conn, vm_),
'ex_subnetwork': __get_subnetwork(vm_),
'ex_tags': __get_tags(vm_),
'ex_metadata': __get_metadata(vm_),
}

View File

@ -298,7 +298,7 @@ def get_conn():
kwargs['project_id'] = vm_['tenant']
kwargs['auth_url'] = vm_['identity_url']
kwargs['region_name'] = vm_['compute_region']
kwargs['use_keystoneauth'] = vm_['use_keystoneauth']
kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False)
if 'password' in vm_:
kwargs['password'] = vm_['password']

View File

@ -846,19 +846,18 @@ def _assign_floating_ips(vm_, conn, kwargs):
pool = OpenStack_1_1_FloatingIpPool(
net['floating'], conn.connection
)
for idx in [pool.create_floating_ip()]:
for idx in pool.list_floating_ips():
if idx.node_id is None:
floating.append(idx)
if not floating:
# Note(pabelanger): We have no available floating IPs.
# For now, we raise an exception and exit.
# A future enhancement might be to allow salt-cloud
# to dynamically allocate new address but that might
raise SaltCloudSystemExit(
'Floating pool \'{0}\' does not have any more '
'please create some more or use a different '
'pool.'.format(net['floating'])
)
try:
floating.append(pool.create_floating_ip())
except Exception as e:
raise SaltCloudSystemExit(
'Floating pool \'{0}\' does not have any more '
'please create some more or use a different '
'pool.'.format(net['floating'])
)
# otherwise, attempt to obtain list without specifying pool
# this is the same as 'nova floating-ip-list'
elif ssh_interface(vm_) != 'private_ips':
@ -874,15 +873,13 @@ def _assign_floating_ips(vm_, conn, kwargs):
if idx.node_id is None:
floating.append(idx)
if not floating:
# Note(pabelanger): We have no available floating IPs.
# For now, we raise an exception and exit.
# A future enhancement might be to allow salt-cloud to
# dynamically allocate new address but that might be
# tricky to manage.
raise SaltCloudSystemExit(
'There are no more floating IP addresses '
'available, please create some more'
)
try:
floating.append(pool.create_floating_ip())
except Exception as e:
raise SaltCloudSystemExit(
'There are no more floating IP addresses '
'available, please create some more'
)
except Exception as e:
if str(e).startswith('404'):
pass

View File

@ -48,8 +48,6 @@ try:
HAS_PSUTIL = True
except ImportError:
HAS_PSUTIL = False
import platform
import salt.grains.core
log = logging.getLogger(__name__)
@ -78,10 +76,13 @@ def _gather_buffer_space():
Result is in bytes.
'''
if HAS_PSUTIL:
if HAS_PSUTIL and psutil.version_info >= (0, 6, 0):
# Oh good, we have psutil. This will be quick.
total_mem = psutil.virtual_memory().total
else:
# Avoid loading core grains unless absolutely required
import platform
import salt.grains.core
# We need to load up ``mem_total`` grain. Let's mimic required OS data.
os_data = {'kernel': platform.system()}
grains = salt.grains.core._memdata(os_data)
@ -258,6 +259,9 @@ VALID_OPTS = {
# A map of saltenvs and fileserver backend locations
'pillar_roots': dict,
# The external pillars permitted to be used on-demand using pillar.ext
'on_demand_ext_pillar': list,
# The type of hashing algorithm to use when doing file comparisons
'hash_type': str,
@ -1040,6 +1044,7 @@ DEFAULT_MINION_OPTS = {
'base': [salt.syspaths.BASE_PILLAR_ROOTS_DIR,
salt.syspaths.SPM_PILLAR_PATH]
},
'on_demand_ext_pillar': ['libvirt', 'virtkey'],
'git_pillar_base': 'master',
'git_pillar_branch': 'master',
'git_pillar_env': '',
@ -1240,6 +1245,7 @@ DEFAULT_MASTER_OPTS = {
'base': [salt.syspaths.BASE_PILLAR_ROOTS_DIR,
salt.syspaths.SPM_PILLAR_PATH]
},
'on_demand_ext_pillar': ['libvirt', 'virtkey'],
'thorium_interval': 0.5,
'thorium_roots': {
'base': [salt.syspaths.BASE_THORIUM_ROOTS_DIR],
@ -1385,6 +1391,7 @@ DEFAULT_MASTER_OPTS = {
'event_match_type': 'startswith',
'runner_returns': True,
'serial': 'msgpack',
'test': False,
'state_verbose': True,
'state_output': 'full',
'state_output_diff': False,

View File

@ -111,12 +111,12 @@ class Engine(SignalHandlingMultiprocessingProcess):
'''
if salt.utils.is_windows():
# Calculate function references since they can't be pickled.
self.utils = salt.loader.utils(self.opts)
self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
if self.opts['__role'] == 'master':
self.runners = salt.loader.runner(self.opts, utils=self.utils)
else:
self.runners = []
self.funcs = salt.loader.minion_mods(self.opts, utils=self.utils)
self.funcs = salt.loader.minion_mods(self.opts, utils=self.utils, proxy=self.proxy)
self.engine = salt.loader.engines(self.opts,
self.funcs,

View File

@ -19,7 +19,6 @@ them onto a logstash endpoint.
# Import python libraries
from __future__ import absolute_import
import logging
import json
# Import salt libs
import salt.utils.event
@ -66,4 +65,4 @@ def start(host, port=5959, tag='salt/engine/logstash'):
while True:
event = event_bus.get_event()
if event:
logstash_logger.info(tag, extra=json.dumps(event))
logstash_logger.info(tag, extra=event)

View File

@ -288,7 +288,7 @@ def engines(opts, functions, runners, proxy=None):
)
def proxy(opts, functions=None, returners=None, whitelist=None):
def proxy(opts, functions=None, returners=None, whitelist=None, utils=None):
'''
Returns the proxy module for this salt-proxy-minion
'''
@ -296,7 +296,7 @@ def proxy(opts, functions=None, returners=None, whitelist=None):
_module_dirs(opts, 'proxy'),
opts,
tag='proxy',
pack={'__salt__': functions, '__ret__': returners},
pack={'__salt__': functions, '__ret__': returners, '__utils__': utils},
)
ret.pack['__proxy__'] = ret
@ -304,7 +304,7 @@ def proxy(opts, functions=None, returners=None, whitelist=None):
return ret
def returners(opts, functions, whitelist=None, context=None):
def returners(opts, functions, whitelist=None, context=None, proxy=None):
'''
Returns the returner modules
'''
@ -313,11 +313,11 @@ def returners(opts, functions, whitelist=None, context=None):
opts,
tag='returner',
whitelist=whitelist,
pack={'__salt__': functions, '__context__': context},
pack={'__salt__': functions, '__context__': context, '__proxy__': proxy or {}},
)
def utils(opts, whitelist=None, context=None):
def utils(opts, whitelist=None, context=None, proxy=proxy):
'''
Returns the utility modules
'''
@ -326,7 +326,7 @@ def utils(opts, whitelist=None, context=None):
opts,
tag='utils',
whitelist=whitelist,
pack={'__context__': context},
pack={'__context__': context, '__proxy__': proxy or {}},
)
@ -456,7 +456,7 @@ def thorium(opts, functions, runners):
return ret
def states(opts, functions, utils, serializers, whitelist=None):
def states(opts, functions, utils, serializers, whitelist=None, proxy=None):
'''
Returns the state modules
@ -476,7 +476,7 @@ def states(opts, functions, utils, serializers, whitelist=None):
_module_dirs(opts, 'states'),
opts,
tag='states',
pack={'__salt__': functions},
pack={'__salt__': functions, '__proxy__': proxy or {}},
whitelist=whitelist,
)
ret.pack['__states__'] = ret
@ -485,7 +485,7 @@ def states(opts, functions, utils, serializers, whitelist=None):
return ret
def beacons(opts, functions, context=None):
def beacons(opts, functions, context=None, proxy=None):
'''
Load the beacon modules
@ -497,8 +497,8 @@ def beacons(opts, functions, context=None):
_module_dirs(opts, 'beacons'),
opts,
tag='beacons',
pack={'__context__': context, '__salt__': functions},
virtual_funcs=['__validate__'],
pack={'__context__': context, '__salt__': functions, '__proxy__': proxy or {}},
)
@ -919,7 +919,7 @@ def netapi(opts):
)
def executors(opts, functions=None, context=None):
def executors(opts, functions=None, context=None, proxy=None):
'''
Returns the executor modules
'''
@ -927,7 +927,7 @@ def executors(opts, functions=None, context=None):
_module_dirs(opts, 'executors', 'executor'),
opts,
tag='executor',
pack={'__salt__': functions, '__context__': context or {}},
pack={'__salt__': functions, '__context__': context or {}, '__proxy__': proxy or {}},
)

View File

@ -1882,9 +1882,9 @@ class ClearFuncs(object):
'user': username}
self.event.fire_event(data, tagify([jid, 'new'], 'wheel'))
ret = self.wheel_.call_func(fun, **clear_load)
data['return'] = ret
data['success'] = True
ret = self.wheel_.call_func(fun, full_return=True, **clear_load)
data['return'] = ret['return']
data['success'] = ret['success']
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}

View File

@ -1232,7 +1232,7 @@ class Minion(MinionBase):
if grains is None:
self.opts['grains'] = salt.loader.grains(self.opts, force_refresh, proxy=proxy)
self.utils = salt.loader.utils(self.opts)
self.utils = salt.loader.utils(self.opts, proxy=proxy)
if self.opts.get('multimaster', False):
s_opts = copy.deepcopy(self.opts)
@ -1240,7 +1240,7 @@ class Minion(MinionBase):
loaded_base_name=self.loaded_base_name, notify=notify)
else:
functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=notify, proxy=proxy)
returners = salt.loader.returners(self.opts, functions)
returners = salt.loader.returners(self.opts, functions, proxy=proxy)
errors = {}
if '_errors' in functions:
errors = functions['_errors']
@ -1250,7 +1250,7 @@ class Minion(MinionBase):
if modules_max_memory is True:
resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
executors = salt.loader.executors(self.opts, functions)
executors = salt.loader.executors(self.opts, functions, proxy=proxy)
return functions, returners, errors, executors
@ -2149,6 +2149,10 @@ class Minion(MinionBase):
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
elif tag.startswith('__schedule_return'):
# reporting current connection with master
if data['schedule'].startswith(master_event(type='alive', master='')):
if data['return']:
log.debug('Connected to master {0}'.format(data['schedule'].split(master_event(type='alive', master=''))[1]))
self._return_pub(data, ret_cmd='_return', sync=False)
elif tag.startswith('_salt_error'):
if self.connected:
@ -3218,6 +3222,9 @@ class ProxyMinion(Minion):
# SPM or was manually placed in /srv/salt/_modules etc.
self.functions['saltutil.sync_all'](saltenv=self.opts['environment'])
# Pull in the utils
self.utils = salt.loader.utils(self.opts)
# Then load the proxy module
self.proxy = salt.loader.proxy(self.opts)
@ -3228,6 +3235,10 @@ class ProxyMinion(Minion):
self.proxy.pack['__ret__'] = self.returners
self.proxy.pack['__pillar__'] = self.opts['pillar']
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
self.proxy.pack['__utils__'] = self.utils
# Start engines here instead of in the Minion superclass __init__
# This is because we need to inject the __proxy__ variable but
# it is not setup until now.

View File

@ -73,7 +73,7 @@ Connection module for Amazon Lambda
'''
# keep lint from choking on _get_conn and _cache_id
#pylint: disable=E0602
# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import
@ -97,11 +97,12 @@ log = logging.getLogger(__name__)
# pylint: disable=import-error
try:
#pylint: disable=unused-import
# pylint: disable=unused-import
import boto
import boto3
#pylint: enable=unused-import
# pylint: enable=unused-import
from botocore.exceptions import ClientError
from botocore import __version__ as found_botocore_version
logging.getLogger('boto').setLevel(logging.CRITICAL)
logging.getLogger('boto3').setLevel(logging.CRITICAL)
HAS_BOTO = True
@ -117,9 +118,11 @@ def __virtual__():
'''
required_boto_version = '2.8.0'
required_boto3_version = '1.2.5'
required_botocore_version = '1.5.2'
# the boto_lambda execution module relies on the connect_to_region() method
# which was added in boto 2.8.0
# https://github.com/boto/boto/commit/33ac26b416fbb48a60602542b4ce15dcc7029f12
# botocore version >= 1.5.2 is required due to lambda environment variables
if not HAS_BOTO:
return (False, 'The boto_lambda module could not be loaded: '
'boto libraries not found')
@ -129,6 +132,9 @@ def __virtual__():
elif _LooseVersion(boto3.__version__) < _LooseVersion(required_boto3_version):
return (False, 'The boto_lambda module could not be loaded: '
'boto version {0} or later must be installed.'.format(required_boto3_version))
elif _LooseVersion(found_botocore_version) < _LooseVersion(required_botocore_version):
return (False, 'The boto_apigateway module could not be loaded: '
'botocore version {0} or later must be installed.'.format(required_botocore_version))
else:
return True
@ -140,8 +146,7 @@ def __init__(opts):
def _find_function(name,
region=None, key=None, keyid=None, profile=None):
region=None, key=None, keyid=None, profile=None):
'''
Given function name, find and return matching Lambda information.
'''
@ -155,7 +160,7 @@ def _find_function(name,
def function_exists(FunctionName, region=None, key=None,
keyid=None, profile=None):
keyid=None, profile=None):
'''
Given a function name, check to see if the given function name exists.
@ -172,7 +177,7 @@ def function_exists(FunctionName, region=None, key=None,
try:
func = _find_function(FunctionName,
region=region, key=key, keyid=keyid, profile=profile)
region=region, key=key, keyid=keyid, profile=profile)
return {'exists': bool(func)}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
@ -201,10 +206,22 @@ def create_function(FunctionName, Runtime, Role, Handler, ZipFile=None,
S3Bucket=None, S3Key=None, S3ObjectVersion=None,
Description="", Timeout=3, MemorySize=128, Publish=False,
WaitForRole=False, RoleRetries=5,
region=None, key=None, keyid=None, profile=None, VpcConfig=None):
region=None, key=None, keyid=None, profile=None,
VpcConfig=None, Environment=None):
'''
Given a valid config, create a function.
Environment
The parent object that contains your environment's configuration
settings. This is a dictionary of the form:
{
'Variables': {
'VariableName': 'VariableValue'
}
}
.. versionadded:: Nitrogen
Returns {created: true} if the function was created and returns
{created: False} if the function was not created.
@ -216,29 +233,32 @@ def create_function(FunctionName, Runtime, Role, Handler, ZipFile=None,
'''
role_arn = _get_role_arn(Role, region=region, key=key, keyid=keyid, profile=profile)
role_arn = _get_role_arn(Role, region=region, key=key,
keyid=keyid, profile=profile)
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if ZipFile:
if S3Bucket or S3Key or S3ObjectVersion:
raise SaltInvocationError('Either ZipFile must be specified, or '
'S3Bucket and S3Key must be provided.')
'S3Bucket and S3Key must be provided.')
code = {
'ZipFile': _filedata(ZipFile),
'ZipFile': _filedata(ZipFile),
}
else:
if not S3Bucket or not S3Key:
raise SaltInvocationError('Either ZipFile must be specified, or '
'S3Bucket and S3Key must be provided.')
'S3Bucket and S3Key must be provided.')
code = {
'S3Bucket': S3Bucket,
'S3Key': S3Key,
'S3Bucket': S3Bucket,
'S3Key': S3Key,
}
if S3ObjectVersion:
code['S3ObjectVersion'] = S3ObjectVersion
kwargs = {}
if VpcConfig is not None:
kwargs['VpcConfig'] = VpcConfig
if Environment is not None:
kwargs['Environment'] = Environment
if WaitForRole:
retrycount = RoleRetries
else:
@ -246,20 +266,23 @@ def create_function(FunctionName, Runtime, Role, Handler, ZipFile=None,
for retry in range(retrycount, 0, -1):
try:
func = conn.create_function(FunctionName=FunctionName, Runtime=Runtime, Role=role_arn, Handler=Handler,
Code=code, Description=Description, Timeout=Timeout, MemorySize=MemorySize,
Publish=Publish, **kwargs)
Code=code, Description=Description, Timeout=Timeout, MemorySize=MemorySize,
Publish=Publish, **kwargs)
except ClientError as e:
if retry > 1 and e.response.get('Error', {}).get('Code') == 'InvalidParameterValueException':
log.info('Function not created but IAM role may not have propagated, will retry')
log.info(
'Function not created but IAM role may not have propagated, will retry')
# exponential backoff
time.sleep((2 ** (RoleRetries - retry)) + (random.randint(0, 1000) / 1000))
time.sleep((2 ** (RoleRetries - retry)) +
(random.randint(0, 1000) / 1000))
continue
else:
raise
else:
break
if func:
log.info('The newly created function name is {0}'.format(func['FunctionName']))
log.info('The newly created function name is {0}'.format(
func['FunctionName']))
return {'created': True, 'name': func['FunctionName']}
else:
@ -287,7 +310,8 @@ def delete_function(FunctionName, Qualifier=None, region=None, key=None, keyid=N
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if Qualifier:
conn.delete_function(FunctionName=FunctionName, Qualifier=Qualifier)
conn.delete_function(
FunctionName=FunctionName, Qualifier=Qualifier)
else:
conn.delete_function(FunctionName=FunctionName)
return {'deleted': True}
@ -296,7 +320,7 @@ def delete_function(FunctionName, Qualifier=None, region=None, key=None, keyid=N
def describe_function(FunctionName, region=None, key=None,
keyid=None, profile=None):
keyid=None, profile=None):
'''
Given a function name describe its properties.
@ -312,11 +336,11 @@ def describe_function(FunctionName, region=None, key=None,
try:
func = _find_function(FunctionName,
region=region, key=key, keyid=keyid, profile=profile)
region=region, key=key, keyid=keyid, profile=profile)
if func:
keys = ('FunctionName', 'Runtime', 'Role', 'Handler', 'CodeSha256',
'CodeSize', 'Description', 'Timeout', 'MemorySize', 'FunctionArn',
'LastModified', 'VpcConfig')
'CodeSize', 'Description', 'Timeout', 'MemorySize',
'FunctionArn', 'LastModified', 'VpcConfig', 'Environment')
return {'function': dict([(k, func.get(k)) for k in keys])}
else:
return {'function': None}
@ -325,12 +349,24 @@ def describe_function(FunctionName, region=None, key=None,
def update_function_config(FunctionName, Role=None, Handler=None,
Description=None, Timeout=None, MemorySize=None,
region=None, key=None, keyid=None, profile=None, VpcConfig=None,
WaitForRole=False, RoleRetries=5):
Description=None, Timeout=None, MemorySize=None,
region=None, key=None, keyid=None, profile=None,
VpcConfig=None, WaitForRole=False, RoleRetries=5,
Environment=None):
'''
Update the named lambda function to the configuration.
Environment
The parent object that contains your environment's configuration
settings. This is a dictionary of the form:
{
'Variables': {
'VariableName': 'VariableValue'
}
}
.. versionadded:: Nitrogen
Returns {updated: true} if the function was updated and returns
{updated: False} if the function was not updated.
@ -347,7 +383,8 @@ def update_function_config(FunctionName, Role=None, Handler=None,
'Description': Description,
'Timeout': Timeout,
'MemorySize': MemorySize,
'VpcConfig': VpcConfig}
'VpcConfig': VpcConfig,
'Environment': Environment}
for val, var in six.iteritems(options):
if var:
@ -366,9 +403,11 @@ def update_function_config(FunctionName, Role=None, Handler=None,
r = conn.update_function_configuration(**args)
except ClientError as e:
if retry > 1 and e.response.get('Error', {}).get('Code') == 'InvalidParameterValueException':
log.info('Function not updated but IAM role may not have propagated, will retry')
log.info(
'Function not updated but IAM role may not have propagated, will retry')
# exponential backoff
time.sleep((2 ** (RoleRetries - retry)) + (random.randint(0, 1000) / 1000))
time.sleep((2 ** (RoleRetries - retry)) +
(random.randint(0, 1000) / 1000))
continue
else:
raise
@ -376,8 +415,8 @@ def update_function_config(FunctionName, Role=None, Handler=None,
break
if r:
keys = ('FunctionName', 'Runtime', 'Role', 'Handler', 'CodeSha256',
'CodeSize', 'Description', 'Timeout', 'MemorySize', 'FunctionArn',
'LastModified', 'VpcConfig')
'CodeSize', 'Description', 'Timeout', 'MemorySize',
'FunctionArn', 'LastModified', 'VpcConfig', 'Environment')
return {'updated': True, 'function': dict([(k, r.get(k)) for k in keys])}
else:
log.warning('Function was not updated')
@ -387,8 +426,8 @@ def update_function_config(FunctionName, Role=None, Handler=None,
def update_function_code(FunctionName, ZipFile=None, S3Bucket=None, S3Key=None,
S3ObjectVersion=None, Publish=False,
region=None, key=None, keyid=None, profile=None):
S3ObjectVersion=None, Publish=False,
region=None, key=None, keyid=None, profile=None):
'''
Upload the given code to the named lambda function.
@ -408,14 +447,14 @@ def update_function_code(FunctionName, ZipFile=None, S3Bucket=None, S3Key=None,
if ZipFile:
if S3Bucket or S3Key or S3ObjectVersion:
raise SaltInvocationError('Either ZipFile must be specified, or '
'S3Bucket and S3Key must be provided.')
'S3Bucket and S3Key must be provided.')
r = conn.update_function_code(FunctionName=FunctionName,
ZipFile=_filedata(ZipFile),
Publish=Publish)
ZipFile=_filedata(ZipFile),
Publish=Publish)
else:
if not S3Bucket or not S3Key:
raise SaltInvocationError('Either ZipFile must be specified, or '
'S3Bucket and S3Key must be provided.')
'S3Bucket and S3Key must be provided.')
args = {
'S3Bucket': S3Bucket,
'S3Key': S3Key,
@ -423,11 +462,11 @@ def update_function_code(FunctionName, ZipFile=None, S3Bucket=None, S3Key=None,
if S3ObjectVersion:
args['S3ObjectVersion'] = S3ObjectVersion
r = conn.update_function_code(FunctionName=FunctionName,
Publish=Publish, **args)
Publish=Publish, **args)
if r:
keys = ('FunctionName', 'Runtime', 'Role', 'Handler', 'CodeSha256',
'CodeSize', 'Description', 'Timeout', 'MemorySize', 'FunctionArn',
'LastModified', 'VpcConfig')
'CodeSize', 'Description', 'Timeout', 'MemorySize',
'FunctionArn', 'LastModified', 'VpcConfig', 'Environment')
return {'updated': True, 'function': dict([(k, r.get(k)) for k in keys])}
else:
log.warning('Function was not updated')
@ -462,15 +501,15 @@ def add_permission(FunctionName, StatementId, Action, Principal, SourceArn=None,
if locals()[key] is not None:
kwargs[key] = str(locals()[key])
conn.add_permission(FunctionName=FunctionName, StatementId=StatementId,
Action=Action, Principal=str(Principal),
**kwargs)
Action=Action, Principal=str(Principal),
**kwargs)
return {'updated': True}
except ClientError as e:
return {'updated': False, 'error': salt.utils.boto3.get_error(e)}
def remove_permission(FunctionName, StatementId, Qualifier=None,
region=None, key=None, keyid=None, profile=None):
region=None, key=None, keyid=None, profile=None):
'''
Remove a permission from a lambda function.
@ -491,7 +530,7 @@ def remove_permission(FunctionName, StatementId, Qualifier=None,
if Qualifier is not None:
kwargs['Qualifier'] = Qualifier
conn.remove_permission(FunctionName=FunctionName, StatementId=StatementId,
**kwargs)
**kwargs)
return {'updated': True}
except ClientError as e:
return {'updated': False, 'error': salt.utils.boto3.get_error(e)}
@ -521,7 +560,7 @@ def get_permissions(FunctionName, Qualifier=None,
# The get_policy call is not symmetric with add/remove_permissions. So
# massage it until it is, for better ease of use.
policy = conn.get_policy(FunctionName=FunctionName,
**kwargs)
**kwargs)
policy = policy.get('Policy', {})
if isinstance(policy, six.string_types):
policy = json.loads(policy)
@ -540,9 +579,11 @@ def get_permissions(FunctionName, Qualifier=None,
'Principal': principal,
}
if 'ArnLike' in condition:
permission['SourceArn'] = condition['ArnLike'].get('AWS:SourceArn')
permission['SourceArn'] = condition[
'ArnLike'].get('AWS:SourceArn')
if 'StringEquals' in condition:
permission['SourceAccount'] = condition['StringEquals'].get('AWS:SourceAccount')
permission['SourceAccount'] = condition[
'StringEquals'].get('AWS:SourceAccount')
permissions[statement.get('Sid')] = permission
return {'permissions': permissions}
except ClientError as e:
@ -553,7 +594,7 @@ def get_permissions(FunctionName, Qualifier=None,
def list_function_versions(FunctionName,
region=None, key=None, keyid=None, profile=None):
region=None, key=None, keyid=None, profile=None):
'''
List the versions available for the given function.
@ -572,7 +613,7 @@ def list_function_versions(FunctionName,
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
vers = []
for ret in salt.utils.boto3.paged_call(conn.list_versions_by_function,
FunctionName=FunctionName):
FunctionName=FunctionName):
vers.extend(ret['Versions'])
if not bool(vers):
log.warning('No versions found')
@ -582,7 +623,7 @@ def list_function_versions(FunctionName,
def create_alias(FunctionName, Name, FunctionVersion, Description="",
region=None, key=None, keyid=None, profile=None):
region=None, key=None, keyid=None, profile=None):
'''
Given a valid config, create an alias to a function.
@ -599,9 +640,10 @@ def create_alias(FunctionName, Name, FunctionVersion, Description="",
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
alias = conn.create_alias(FunctionName=FunctionName, Name=Name,
FunctionVersion=FunctionVersion, Description=Description)
FunctionVersion=FunctionVersion, Description=Description)
if alias:
log.info('The newly created alias name is {0}'.format(alias['Name']))
log.info(
'The newly created alias name is {0}'.format(alias['Name']))
return {'created': True, 'name': alias['Name']}
else:
@ -635,8 +677,7 @@ def delete_alias(FunctionName, Name, region=None, key=None, keyid=None, profile=
def _find_alias(FunctionName, Name, FunctionVersion=None,
region=None, key=None, keyid=None, profile=None):
region=None, key=None, keyid=None, profile=None):
'''
Given function name and alias name, find and return matching alias information.
'''
@ -656,7 +697,7 @@ def _find_alias(FunctionName, Name, FunctionVersion=None,
def alias_exists(FunctionName, Name, region=None, key=None,
keyid=None, profile=None):
keyid=None, profile=None):
'''
Given a function name and alias name, check to see if the given alias exists.
@ -673,14 +714,14 @@ def alias_exists(FunctionName, Name, region=None, key=None,
try:
alias = _find_alias(FunctionName, Name,
region=region, key=key, keyid=keyid, profile=profile)
region=region, key=key, keyid=keyid, profile=profile)
return {'exists': bool(alias)}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
def describe_alias(FunctionName, Name, region=None, key=None,
keyid=None, profile=None):
keyid=None, profile=None):
'''
Given a function name and alias name describe the properties of the alias.
@ -696,7 +737,7 @@ def describe_alias(FunctionName, Name, region=None, key=None,
try:
alias = _find_alias(FunctionName, Name,
region=region, key=key, keyid=keyid, profile=profile)
region=region, key=key, keyid=keyid, profile=profile)
if alias:
keys = ('AliasArn', 'Name', 'FunctionVersion', 'Description')
return {'alias': dict([(k, alias.get(k)) for k in keys])}
@ -707,7 +748,7 @@ def describe_alias(FunctionName, Name, region=None, key=None,
def update_alias(FunctionName, Name, FunctionVersion=None, Description=None,
region=None, key=None, keyid=None, profile=None):
region=None, key=None, keyid=None, profile=None):
'''
Update the named alias to the configuration.
@ -741,8 +782,8 @@ def update_alias(FunctionName, Name, FunctionVersion=None, Description=None,
def create_event_source_mapping(EventSourceArn, FunctionName, StartingPosition,
Enabled=True, BatchSize=100,
region=None, key=None, keyid=None, profile=None):
Enabled=True, BatchSize=100,
region=None, key=None, keyid=None, profile=None):
'''
Identifies a stream as an event source for a Lambda function. It can be
either an Amazon Kinesis stream or an Amazon DynamoDB stream. AWS Lambda
@ -766,7 +807,8 @@ def create_event_source_mapping(EventSourceArn, FunctionName, StartingPosition,
BatchSize=BatchSize,
StartingPosition=StartingPosition)
if obj:
log.info('The newly created event source mapping ID is {0}'.format(obj['UUID']))
log.info(
'The newly created event source mapping ID is {0}'.format(obj['UUID']))
return {'created': True, 'id': obj['UUID']}
else:
@ -777,7 +819,7 @@ def create_event_source_mapping(EventSourceArn, FunctionName, StartingPosition,
def get_event_source_mapping_ids(EventSourceArn, FunctionName,
region=None, key=None, keyid=None, profile=None):
region=None, key=None, keyid=None, profile=None):
'''
Given an event source and function name, return a list of mapping IDs
@ -793,28 +835,29 @@ def get_event_source_mapping_ids(EventSourceArn, FunctionName,
try:
mappings = []
for maps in salt.utils.boto3.paged_call(conn.list_event_source_mappings,
EventSourceArn=EventSourceArn,
FunctionName=FunctionName):
mappings.extend([mapping['UUID'] for mapping in maps['EventSourceMappings']])
EventSourceArn=EventSourceArn,
FunctionName=FunctionName):
mappings.extend([mapping['UUID']
for mapping in maps['EventSourceMappings']])
return mappings
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
def _get_ids(UUID=None, EventSourceArn=None, FunctionName=None,
region=None, key=None, keyid=None, profile=None):
region=None, key=None, keyid=None, profile=None):
if UUID:
if EventSourceArn or FunctionName:
raise SaltInvocationError('Either UUID must be specified, or '
'EventSourceArn and FunctionName must be provided.')
'EventSourceArn and FunctionName must be provided.')
return [UUID]
else:
if not EventSourceArn or not FunctionName:
raise SaltInvocationError('Either UUID must be specified, or '
'EventSourceArn and FunctionName must be provided.')
'EventSourceArn and FunctionName must be provided.')
return get_event_source_mapping_ids(EventSourceArn=EventSourceArn,
FunctionName=FunctionName,
region=region, key=key, keyid=keyid, profile=profile)
region=region, key=key, keyid=keyid, profile=profile)
def delete_event_source_mapping(UUID=None, EventSourceArn=None, FunctionName=None,
@ -834,7 +877,7 @@ def delete_event_source_mapping(UUID=None, EventSourceArn=None, FunctionName=Non
'''
ids = _get_ids(UUID, EventSourceArn=EventSourceArn,
FunctionName=FunctionName)
FunctionName=FunctionName)
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
for id in ids:
@ -845,8 +888,8 @@ def delete_event_source_mapping(UUID=None, EventSourceArn=None, FunctionName=Non
def event_source_mapping_exists(UUID=None, EventSourceArn=None,
FunctionName=None,
region=None, key=None, keyid=None, profile=None):
FunctionName=None,
region=None, key=None, keyid=None, profile=None):
'''
Given an event source mapping ID or an event source ARN and FunctionName,
check whether the mapping exists.
@ -873,8 +916,8 @@ def event_source_mapping_exists(UUID=None, EventSourceArn=None,
def describe_event_source_mapping(UUID=None, EventSourceArn=None,
FunctionName=None,
region=None, key=None, keyid=None, profile=None):
FunctionName=None,
region=None, key=None, keyid=None, profile=None):
'''
Given an event source mapping ID or an event source ARN and FunctionName,
obtain the current settings of that mapping.
@ -890,7 +933,7 @@ def describe_event_source_mapping(UUID=None, EventSourceArn=None,
'''
ids = _get_ids(UUID, EventSourceArn=EventSourceArn,
FunctionName=FunctionName)
FunctionName=FunctionName)
if len(ids) < 1:
return {'event_source_mapping': None}
@ -910,8 +953,8 @@ def describe_event_source_mapping(UUID=None, EventSourceArn=None,
def update_event_source_mapping(UUID,
FunctionName=None, Enabled=None, BatchSize=None,
region=None, key=None, keyid=None, profile=None):
FunctionName=None, Enabled=None, BatchSize=None,
region=None, key=None, keyid=None, profile=None):
'''
Update the event source mapping identified by the UUID.

View File

@ -295,6 +295,11 @@ def cql_query(query, contact_points=None, port=None, cql_user=None, cql_pass=Non
:type params: str
:return: A dictionary from the return values of the query
:rtype: list[dict]
.. code-block:: bash
# CLI Example: Simple CQL query
salt '*' cql_query "SELECT * FROM users_by_name WHERE first_name = 'jane'"
'''
try:
cluster, session = _connect(contact_points=contact_points, port=port, cql_user=cql_user, cql_pass=cql_pass)
@ -363,6 +368,19 @@ def cql_query_with_prepare(query, statement_name, statement_arguments, async=Fal
:type params: str
:return: A dictionary from the return values of the query
:rtype: list[dict]
CLI Example:
.. code-block:: bash
# Insert data asynchronously
salt this-node cassandra_cql.cql_query_with_prepare "name_insert" "INSERT INTO USERS (first_name, last_name) VALUES (?, ?)" \
statement_arguments=['John','Doe'], async=True
# Select data, should not be asynchronous because there is not currently a facility to return data from a future
salt this-node cassandra_cql.cql_query_with_prepare "name_select" "SELECT * FROM USERS WHERE first_name=?" \
statement_arguments=['John']
'''
try:
cluster, session = _connect(contact_points=contact_points, port=port,
@ -663,6 +681,7 @@ def create_keyspace(keyspace, replication_strategy='SimpleStrategy', replication
.. code-block:: bash
# CLI Example:
salt 'minion1' cassandra_cql.create_keyspace keyspace=newkeyspace
salt 'minion1' cassandra_cql.create_keyspace keyspace=newkeyspace replication_strategy=NetworkTopologyStrategy \
@ -757,6 +776,8 @@ def list_users(contact_points=None, port=None, cql_user=None, cql_pass=None):
:return: The list of existing users.
:rtype: dict
CLI Example:
.. code-block:: bash
salt 'minion1' cassandra_cql.list_users
@ -800,6 +821,8 @@ def create_user(username, password, superuser=False, contact_points=None, port=N
:return:
:rtype:
CLI Example:
.. code-block:: bash
salt 'minion1' cassandra_cql.create_user username=joe password=secret
@ -850,6 +873,8 @@ def list_permissions(username=None, resource=None, resource_type='keyspace', per
:return: Dictionary of permissions.
:rtype: dict
CLI Example:
.. code-block:: bash
salt 'minion1' cassandra_cql.list_permissions
@ -857,7 +882,7 @@ def list_permissions(username=None, resource=None, resource_type='keyspace', per
salt 'minion1' cassandra_cql.list_permissions username=joe resource=test_keyspace permission=select
salt 'minion1' cassandra_cql.list_permissions username=joe resource=test_table resource_type=table \
permission=select contact_points=minion1
permission=select contact_points=minion1
'''
keyspace_cql = "{0} {1}".format(resource_type, resource) if resource else "all keyspaces"
permission_cql = "{0} permission".format(permission) if permission else "all permissions"
@ -906,6 +931,8 @@ def grant_permission(username, resource=None, resource_type='keyspace', permissi
:return:
:rtype:
CLI Example:
.. code-block:: bash
salt 'minion1' cassandra_cql.grant_permission

View File

@ -373,6 +373,9 @@ def cache_file(path, saltenv='base'):
salt '*' cp.cache_file salt://foo/bar.conf saltenv=config
salt '*' cp.cache_file salt://foo/bar.conf?saltenv=config
If the path being cached is a ``salt://`` URI, and the path does not exist,
then ``False`` will be returned.
.. note::
It may be necessary to quote the URL when using the querystring method,
depending on the shell being used to run the command.

View File

@ -821,11 +821,12 @@ def _get_client(timeout=None):
except Exception as exc:
raise CommandExecutionError(
'Docker machine {0} failed: {1}'.format(docker_machine, exc))
try:
__context__['docker.client'] = docker.Client(**client_kwargs)
except docker.errors.DockerException:
log.error('Could not initialize Docker client')
return False
except AttributeError:
# docker-py 2.0 renamed this client attribute
__context__['docker.client'] = docker.APIClient(**client_kwargs)
# Set a new timeout if one was passed
if timeout is not None and __context__['docker.client'].timeout != timeout:

View File

@ -54,7 +54,7 @@ import salt.utils.files
import salt.utils.locales
import salt.utils.templates
import salt.utils.url
from salt.exceptions import CommandExecutionError, SaltInvocationError, get_error_message as _get_error_message
from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError, get_error_message as _get_error_message
log = logging.getLogger(__name__)
@ -1999,7 +1999,7 @@ def replace(path,
)
flags_num = _get_flags(flags)
cpattern = re.compile(str(pattern), flags_num)
cpattern = re.compile(salt.utils.to_bytes(pattern), flags_num)
filesize = os.path.getsize(path)
if bufsize == 'file':
bufsize = filesize
@ -2013,15 +2013,18 @@ def replace(path,
pre_group = get_group(path)
pre_mode = salt.utils.normalize_mode(get_mode(path))
# Avoid TypeErrors by forcing repl to be a string
repl = str(repl)
# Avoid TypeErrors by forcing repl to be bytearray related to mmap
# Replacement text may contains integer: 123 for example
repl = salt.utils.to_bytes(str(repl))
if not_found_content:
not_found_content = salt.utils.to_bytes(not_found_content)
found = False
temp_file = None
content = str(not_found_content) if not_found_content and \
content = salt.utils.to_str(not_found_content) if not_found_content and \
(prepend_if_not_found or
append_if_not_found) \
else repl
else salt.utils.to_str(repl)
try:
# First check the whole file, determine whether to make the replacement
@ -2038,7 +2041,7 @@ def replace(path,
access=mmap.ACCESS_READ)
except (ValueError, mmap.error):
# size of file in /proc is 0, but contains data
r_data = "".join(r_file)
r_data = salt.utils.to_bytes("".join(r_file))
if search_only:
# Just search; bail as early as a match is found
if re.search(cpattern, r_data):
@ -2055,7 +2058,7 @@ def replace(path,
if prepend_if_not_found or append_if_not_found:
# Search for content, to avoid pre/appending the
# content if it was pre/appended in a previous run.
if re.search('^{0}$'.format(re.escape(content)),
if re.search(salt.utils.to_bytes('^{0}$'.format(re.escape(content))),
r_data,
flags=flags_num):
# Content was found, so set found.
@ -2066,7 +2069,7 @@ def replace(path,
if show_changes or append_if_not_found or \
prepend_if_not_found:
orig_file = r_data.read(filesize).splitlines(True) \
if hasattr(r_data, 'read') \
if isinstance(r_data, mmap.mmap) \
else r_data.splitlines(True)
new_file = result.splitlines(True)
@ -2105,7 +2108,7 @@ def replace(path,
result, nrepl = re.subn(cpattern, repl,
r_data, count)
try:
w_file.write(result)
w_file.write(salt.utils.to_str(result))
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to write file '{0}'. Contents may "
@ -2125,14 +2128,14 @@ def replace(path,
if not_found_content is None:
not_found_content = repl
if prepend_if_not_found:
new_file.insert(0, not_found_content + '\n')
new_file.insert(0, not_found_content + b'\n')
else:
# append_if_not_found
# Make sure we have a newline at the end of the file
if 0 != len(new_file):
if not new_file[-1].endswith('\n'):
new_file[-1] += '\n'
new_file.append(not_found_content + '\n')
if not new_file[-1].endswith(b'\n'):
new_file[-1] += b'\n'
new_file.append(not_found_content + b'\n')
has_changes = True
if not dry_run:
try:
@ -2145,7 +2148,7 @@ def replace(path,
try:
fh_ = salt.utils.atomicfile.atomic_open(path, 'w')
for line in new_file:
fh_.write(line)
fh_.write(salt.utils.to_str(line))
finally:
fh_.close()
@ -2191,7 +2194,9 @@ def replace(path,
check_perms(path, None, pre_user, pre_group, pre_mode)
if show_changes:
return ''.join(difflib.unified_diff(orig_file, new_file))
orig_file_as_str = ''.join([salt.utils.to_str(x) for x in orig_file])
new_file_as_str = ''.join([salt.utils.to_str(x) for x in new_file])
return ''.join(difflib.unified_diff(orig_file_as_str, new_file_as_str))
return has_changes
@ -3546,9 +3551,14 @@ def source_list(source, source_hash, saltenv):
ret = (single_src, single_hash)
break
elif proto.startswith('http') or proto == 'ftp':
if __salt__['cp.cache_file'](single_src):
ret = (single_src, single_hash)
break
try:
if __salt__['cp.cache_file'](single_src):
ret = (single_src, single_hash)
break
except MinionError as exc:
# Error downloading file. Log the caught exception and
# continue on to the next source.
log.exception(exc)
elif proto == 'file' and os.path.exists(urlparsed_single_src.path):
ret = (single_src, single_hash)
break
@ -3631,7 +3641,9 @@ def apply_template_on_contents(
grains=__opts__['grains'],
pillar=__pillar__,
salt=__salt__,
opts=__opts__)['data'].encode('utf-8')
opts=__opts__)['data']
if six.PY2:
contents = contents.encode('utf-8')
else:
ret = {}
ret['result'] = False

View File

@ -89,7 +89,9 @@ def get(key, default='', delimiter=DEFAULT_TARGET_DELIM, ordered=True):
:param delimiter:
Specify an alternate delimiter to use when traversing a nested dict
Specify an alternate delimiter to use when traversing a nested dict.
This is useful for when the desired key contains a colon. See CLI
example below for usage.
.. versionadded:: 2014.7.0
@ -103,6 +105,7 @@ def get(key, default='', delimiter=DEFAULT_TARGET_DELIM, ordered=True):
.. code-block:: bash
salt '*' grains.get pkg:apache
salt '*' grains.get abc::def|ghi delimiter='|'
'''
if ordered is True:
grains = __grains__

View File

@ -181,7 +181,7 @@ def build(format='qcow2', path='/tmp/'):
CLI Example:
.. code-block:: bash:
.. code-block:: bash
salt myminion inspector.build
salt myminion inspector.build format=iso path=/opt/builds/
@ -209,7 +209,7 @@ def export(local=False, path="/tmp", format='qcow2'):
CLI Example:
.. code-block:: bash:
.. code-block:: bash
salt myminion inspector.export
salt myminion inspector.export format=iso path=/opt/builds/
@ -232,7 +232,7 @@ def snapshots():
CLI Example:
.. code-block:: bash:
.. code-block:: bash
salt myminion inspector.snapshots
'''
@ -254,7 +254,7 @@ def delete(all=False, *databases):
CLI example:
.. code-block:: bash:
.. code-block:: bash
salt myminion inspector.delete <ID> <ID1> <ID2>..
salt myminion inspector.delete all=True

View File

@ -103,11 +103,17 @@ def _available_services():
plist = plistlib.readPlistFromBytes(
salt.utils.to_bytes(plist_xml))
available_services[plist.Label.lower()] = {
'filename': filename,
'file_path': true_path,
'plist': plist,
}
try:
available_services[plist.Label.lower()] = {
'filename': filename,
'file_path': true_path,
'plist': plist,
}
except AttributeError:
# As of MacOS 10.12 there might be plist files without Label key
# in the searched directories. As these files do not represent
# services, thay are not added to the list.
pass
return available_services

View File

@ -205,7 +205,7 @@ def pvcreate(devices, override=True, **kwargs):
Set a physical device to be used as an LVM physical volume
override
Skip devices, if they are already an LVM physical volumes
Skip devices, if they are already LVM physical volumes
CLI Examples:
@ -223,7 +223,6 @@ def pvcreate(devices, override=True, **kwargs):
for device in devices:
if not os.path.exists(device):
raise CommandExecutionError('{0} does not exist'.format(device))
# Verify pvcreate was successful
if not pvdisplay(device):
cmd.append(device)
elif not override:
@ -258,7 +257,7 @@ def pvremove(devices, override=True):
Remove a physical device being used as an LVM physical volume
override
Skip devices, if they are already not used as an LVM physical volumes
Skip devices, if they are already not used as LVM physical volumes
CLI Examples:

View File

@ -381,6 +381,13 @@ def upgrade_available(pkg, refresh=True):
def refresh_db():
'''
Update ports with ``port selfupdate``
CLI Example:
.. code-block:: bash
salt mac pkg.refresh_db
'''
cmd = ['port', 'selfupdate']
return salt.utils.mac_utils.execute_return_success(cmd)

View File

@ -37,7 +37,7 @@ def compound(tgt, minion_id=None):
salt '*' match.compound 'L@cheese,foo and *'
'''
opts = {'grains': __grains__}
opts = {'grains': __grains__, 'pillar': __pillar__}
if minion_id is not None:
if not isinstance(minion_id, string_types):
minion_id = str(minion_id)

View File

@ -80,7 +80,7 @@ def _mine_get(load, opts):
return ret
def update(clear=False):
def update(clear=False, mine_functions=None):
'''
Execute the configured functions and send the data back up to the master.
The functions to be executed are merged from the master config, pillar and
@ -93,6 +93,34 @@ def update(clear=False):
- eth0
disk.usage: []
This function accepts the following arguments:
clear: False
Boolean flag specifying whether updating will clear the existing
mines, or will update. Default: `False` (update).
mine_functions
Update the mine data on certain functions only.
This feature can be used when updating the mine for functions
that require refresh at different intervals than the rest of
the functions specified under `mine_functions` in the
minion/master config or pillar.
A potential use would be together with the `scheduler`, for example:
.. code-block:: yaml
schedule:
lldp_mine_update:
function: mine.update
kwargs:
mine_functions:
net.lldp: []
hours: 12
In the example above, the mine for `net.lldp` would be refreshed
every 12 hours, while `network.ip_addrs` would continue to be updated
as specified in `mine_interval`.
The function cache will be populated with information from executing these
functions
@ -102,9 +130,17 @@ def update(clear=False):
salt '*' mine.update
'''
m_data = __salt__['config.merge']('mine_functions', {})
# If we don't have any mine functions configured, then we should just bail out
if not m_data:
m_data = {}
if not mine_functions:
m_data = __salt__['config.merge']('mine_functions', {})
# If we don't have any mine functions configured, then we should just bail out
if not m_data:
return
elif mine_functions and isinstance(mine_functions, list):
m_data = dict((fun, {}) for fun in mine_functions)
elif mine_functions and isinstance(mine_functions, dict):
m_data = mine_functions
else:
return
data = {}

View File

@ -57,7 +57,9 @@ def get(key,
.. versionadded:: 2014.7.0
delimiter
Specify an alternate delimiter to use when traversing a nested dict
Specify an alternate delimiter to use when traversing a nested dict.
This is useful for when the desired key contains a colon. See CLI
example below for usage.
.. versionadded:: 2014.7.0
@ -88,6 +90,7 @@ def get(key,
.. code-block:: bash
salt '*' pillar.get pkg:apache
salt '*' pillar.get abc::def|ghi delimiter='|'
'''
if not __opts__.get('pillar_raise_on_missing'):
if default is KeyError:
@ -252,15 +255,8 @@ def item(*args, **kwargs):
'''
.. versionadded:: 0.16.2
Return one or more pillar entries
pillar
If specified, allows for a dictionary of pillar data to be made
available to pillar and ext_pillar rendering. these pillar variables
will also override any variables of the same name in pillar or
ext_pillar.
.. versionadded:: 2015.5.0
Return one or more pillar entries from the :ref:`in-memory pillar data
<pillar-in-memory>`.
delimiter
Delimiter used to traverse nested dictionaries.
@ -288,14 +284,14 @@ def item(*args, **kwargs):
'''
ret = {}
default = kwargs.get('default', '')
delimiter = kwargs.get('delimiter', ':')
delimiter = kwargs.get('delimiter', DEFAULT_TARGET_DELIM)
try:
for arg in args:
ret[arg] = salt.utils.traverse_dict_and_list(__pillar__,
arg,
default,
delimiter)
arg,
default,
delimiter)
except KeyError:
pass
@ -330,9 +326,39 @@ def raw(key=None):
def ext(external, pillar=None):
'''
.. versionchanged:: 2016.3.6,2016.11.3,Nitrogen
The supported ext_pillar types are now tunable using the
:conf_master:`on_demand_ext_pillar` config option. Earlier releases
used a hard-coded default.
Generate the pillar and apply an explicit external pillar
CLI Example:
external
A single ext_pillar to add to the ext_pillar configuration. This must
be passed as a single section from the ext_pillar configuration (see
CLI examples below). For more complicated ``ext_pillar``
configurations, it can be helpful to use the Python shell to load YAML
configuration into a dictionary, and figure out
.. code-block:: python
>>> import yaml
>>> ext_pillar = yaml.safe_load("""
... ext_pillar:
... - git:
... - issue38440 https://github.com/terminalmage/git_pillar:
... - env: base
... """)
>>> ext_pillar
{'ext_pillar': [{'git': [{'mybranch https://github.com/myuser/myrepo': [{'env': 'base'}]}]}]}
>>> ext_pillar['ext_pillar'][0]
{'git': [{'mybranch https://github.com/myuser/myrepo': [{'env': 'base'}]}]}
In the above example, the value to pass would be
``{'git': [{'mybranch https://github.com/myuser/myrepo': [{'env': 'base'}]}]}``.
Note that this would need to be quoted when passing on the CLI (as in
the CLI examples below).
pillar : None
If specified, allows for a dictionary of pillar data to be made
@ -342,9 +368,13 @@ def ext(external, pillar=None):
.. versionadded:: 2015.5.0
CLI Examples:
.. code-block:: bash
salt '*' pillar.ext '{libvirt: _}'
salt '*' pillar.ext "{'git': ['master https://github.com/myuser/myrepo']}"
salt '*' pillar.ext "{'git': [{'mybranch https://github.com/myuser/myrepo': [{'env': 'base'}]}]}"
'''
if isinstance(external, six.string_types):
external = yaml.safe_load(external)

View File

@ -496,6 +496,9 @@ def total_physical_memory():
salt '*' ps.total_physical_memory
'''
if psutil.version_info < (0, 6, 0):
msg = 'virtual_memory is only available in psutil 0.6.0 or greater'
raise CommandExecutionError(msg)
try:
return psutil.virtual_memory().total
except AttributeError:

View File

@ -19,3 +19,17 @@ def fix_outage():
'''
return __proxy__['rest_sample.fix_outage']()
def get_test_string():
'''
Helper function to test cross-calling to the __proxy__ dunder.
CLI Example:
.. code-block:: bash
salt 'rest-sample-proxy' rest_sample.get_test_string
'''
return __proxy__['rest_sample.test_from_state']()

View File

@ -575,7 +575,7 @@ def info(*packages, **attr):
# Convert Unix ticks into ISO time format
if key in ['build_date', 'install_date']:
try:
pkg_data[key] = datetime.datetime.fromtimestamp(int(value)).isoformat() + "Z"
pkg_data[key] = datetime.datetime.utcfromtimestamp(int(value)).isoformat() + "Z"
except ValueError:
log.warning('Could not convert "{0}" into Unix time'.format(value))
continue

View File

@ -38,13 +38,6 @@ __func_alias__ = {
}
def __virtual__():
if salt.utils.is_windows():
return False, 'Windows platform is not supported by this module'
return __virtualname__
def _number(text):
'''
Convert a string to a number.
@ -71,6 +64,8 @@ def procs():
salt '*' status.procs
'''
# Get the user, pid and cmd
if salt.utils.is_windows():
raise CommandExecutionError('This platform is not supported')
ret = {}
uind = 0
pind = 0
@ -119,6 +114,8 @@ def custom():
salt '*' status.custom
'''
if salt.utils.is_windows():
raise CommandExecutionError('This platform is not supported')
ret = {}
conf = __salt__['config.dot_vals']('status')
for key, val in six.iteritems(conf):
@ -587,6 +584,10 @@ def diskusage(*args):
salt '*' status.diskusage ext? # usage for ext[234] filesystems
salt '*' status.diskusage / ext? # usage for / and all ext filesystems
'''
if salt.utils.is_windows():
raise CommandExecutionError('This platform is not supported')
selected = set()
fstypes = set()
if not args:
@ -925,6 +926,8 @@ def w(): # pylint: disable=C0103
salt '*' status.w
'''
if salt.utils.is_windows():
raise CommandExecutionError('This platform is not supported')
user_list = []
users = __salt__['cmd.run']('w -h').splitlines()
for row in users:

View File

@ -512,6 +512,7 @@ def set_computer_desc(desc):
lines.append(new_line)
# time to write our changes to the file
mach_info.seek(0, 0)
mach_info.truncate()
mach_info.write(''.join(lines))
mach_info.write('\n')
return True

View File

@ -1915,6 +1915,7 @@ def list_datacenters(host, username, password, protocol=None, port=None):
.. code-block:: bash
salt '*' vsphere.list_datacenters 1.2.3.4 root bad-password
'''
service_instance = salt.utils.vmware.get_service_instance(host=host,
username=username,
@ -1951,6 +1952,7 @@ def list_clusters(host, username, password, protocol=None, port=None):
.. code-block:: bash
salt '*' vsphere.list_clusters 1.2.3.4 root bad-password
'''
service_instance = salt.utils.vmware.get_service_instance(host=host,
username=username,
@ -2274,6 +2276,7 @@ def list_vapps(host, username, password, protocol=None, port=None):
.. code-block:: bash
# List vapps from all minions
salt '*' vsphere.list_vapps 1.2.3.4 root bad-password
'''
service_instance = salt.utils.vmware.get_service_instance(host=host,

View File

@ -3500,7 +3500,8 @@ def _checkAllAdmxPolicies(policy_class,
log.debug('returning non configured policies')
not_configured_policies = ALL_CLASS_POLICY_XPATH(admx_policy_definitions, registry_class=policy_class)
for policy_item in admx_policies:
not_configured_policies.remove(policy_item)
if policy_item in not_configured_policies:
not_configured_policies.remove(policy_item)
for not_configured_policy in not_configured_policies:
policy_vals[not_configured_policy.attrib['name']] = 'Not Configured'
@ -3510,6 +3511,11 @@ def _checkAllAdmxPolicies(policy_class,
not_configured_policy.attrib['name'],
return_full_policy_names,
adml_policy_resources)
log.debug('building hierarchy for non-configured item {0}'.format(not_configured_policy.attrib['name']))
hierarchy[not_configured_policy.attrib['name']] = _build_parent_list(not_configured_policy,
admx_policy_definitions,
return_full_policy_names,
adml_policy_resources)
for admx_policy in admx_policies:
this_key = None
this_valuename = None
@ -3822,7 +3828,7 @@ def _checkAllAdmxPolicies(policy_class,
policy_vals[full_names[policy_item]] = policy_vals.pop(policy_item)
unpathed_dict[full_names[policy_item]] = policy_item
# go back and remove any "unpathed" policies that need a full path
for path_needed in pathed_dict.keys():
for path_needed in unpathed_dict.keys():
# remove the item with the same full name and re-add it w/a path'd version
full_path_list = hierarchy[unpathed_dict[path_needed]]
full_path_list.reverse()
@ -4939,9 +4945,9 @@ def set_computer_policy(name,
pol = {}
pol[name] = setting
ret = set_(computer_policy=pol,
user_policy=None,
cumulative_rights_assignments=cumulative_rights_assignments,
adml_language=adml_language)
user_policy=None,
cumulative_rights_assignments=cumulative_rights_assignments,
adml_language=adml_language)
return ret
@ -4976,9 +4982,9 @@ def set_user_policy(name,
pol = {}
pol[name] = setting
ret = set_(user_policy=pol,
computer_policy=None,
cumulative_rights_assignments=True,
adml_language=adml_language)
computer_policy=None,
cumulative_rights_assignments=True,
adml_language=adml_language)
return ret

View File

@ -1419,6 +1419,9 @@ def create_certificate(
# If neither public_key or csr are included, this cert is self-signed
if 'public_key' not in kwargs and 'csr' not in kwargs:
kwargs['public_key'] = kwargs['signing_private_key']
if 'signing_private_key_passphrase' in kwargs:
kwargs['public_key_passphrase'] = kwargs[
'signing_private_key_passphrase']
csrexts = {}
if 'csr' in kwargs:

View File

@ -270,13 +270,14 @@ class Pillar(object):
def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None,
pillar=None, pillarenv=None, rend=None):
self.minion_id = minion_id
self.ext = ext
if pillarenv is None:
if opts.get('pillarenv_from_saltenv', False):
opts['pillarenv'] = saltenv
# Store the file_roots path so we can restore later. Issue 5449
self.actual_file_roots = opts['file_roots']
# use the local file client
self.opts = self.__gen_opts(opts, grains, saltenv=saltenv, ext=ext, pillarenv=pillarenv)
self.opts = self.__gen_opts(opts, grains, saltenv=saltenv, pillarenv=pillarenv)
self.saltenv = saltenv
self.client = salt.fileclient.get_file_client(self.opts, True)
@ -318,16 +319,39 @@ class Pillar(object):
else:
log.error('Pillar data must be a dictionary')
def __valid_ext(self, ext):
def __valid_on_demand_ext_pillar(self, opts):
'''
Check to see if the on demand external pillar is allowed
'''
if not isinstance(ext, dict):
return {}
valid = set(('libvirt', 'virtkey'))
if any(key not in valid for key in ext):
return {}
return ext
if not isinstance(self.ext, dict):
log.error(
'On-demand pillar %s is not formatted as a dictionary',
self.ext
)
return False
on_demand = opts.get('on_demand_ext_pillar', [])
try:
invalid_on_demand = set([x for x in self.ext if x not in on_demand])
except TypeError:
# Prevent traceback when on_demand_ext_pillar option is malformed
log.error(
'The \'on_demand_ext_pillar\' configuration option is '
'malformed, it should be a list of ext_pillar module names'
)
return False
if invalid_on_demand:
log.error(
'The following ext_pillar modules are not allowed for '
'on-demand pillar data: %s. Valid on-demand ext_pillar '
'modules are: %s. The valid modules can be adjusted by '
'setting the \'on_demand_ext_pillar\' config option.',
', '.join(sorted(invalid_on_demand)),
', '.join(on_demand),
)
return False
return True
def __gen_opts(self, opts_in, grains, saltenv=None, ext=None, pillarenv=None):
'''
@ -351,11 +375,11 @@ class Pillar(object):
opts['state_top'] = salt.utils.url.create(opts['state_top'][1:])
else:
opts['state_top'] = salt.utils.url.create(opts['state_top'])
if self.__valid_ext(ext):
if self.ext and self.__valid_on_demand_ext_pillar(opts):
if 'ext_pillar' in opts:
opts['ext_pillar'].append(ext)
opts['ext_pillar'].append(self.ext)
else:
opts['ext_pillar'] = [ext]
opts['ext_pillar'] = [self.ext]
return opts
def _get_envs(self):
@ -742,6 +766,21 @@ class Pillar(object):
'''
if errors is None:
errors = []
try:
# Make sure that on-demand git_pillar is fetched before we try to
# compile the pillar data. git_pillar will fetch a remote when
# the git ext_pillar() func is run, but only for masterless.
if self.ext and 'git' in self.ext \
and self.opts.get('__role') != 'minion':
# Avoid circular import
import salt.utils.gitfs
from salt.pillar.git_pillar import PER_REMOTE_OVERRIDES
git_pillar = salt.utils.gitfs.GitPillar(self.opts)
git_pillar.init_remotes(self.ext['git'], PER_REMOTE_OVERRIDES)
git_pillar.fetch_remotes()
except TypeError:
# Handle malformed ext_pillar
pass
if 'ext_pillar' not in self.opts:
return pillar, errors
if not isinstance(self.opts['ext_pillar'], list):

View File

@ -30,21 +30,38 @@ statically, as above, or as an environment variable:
$ export VAULT_TOKEN=0123456789abcdef
After the profile is created, configure the external pillar system to use it.
A path must also be specified so that vault knows where to look.
After the profile is created, edit the salt master config file and configure
the external pillar system to use it. A path pointing to the needed vault key
must also be specified so that vault knows where to look. Vault does not apply
a recursive list, so each required key needs to be individually mapped.
.. code-block:: yaml
ext_pillar:
- vault: my_vault_config path=secret/salt
- vault: myvault path=secret/salt
- vault: myvault path=secret/another_key
Each key needs to have all the key-value pairs with the names you
require. Avoid naming every key 'password' as you they will collide:
.. code-block:: bash
$ vault write secret/salt auth=my_password master=127.0.0.1
You can then use normal pillar requests to get each key pair directly from
pillar root. Example:
.. code-block:: bash
$ salt-ssh '*' pillar.get auth
Using these configuration profiles, multiple vault sources may also be used:
.. code-block:: yaml
ext_pillar:
- vault: my_vault_config
- vault: my_other_vault_config
- vault: myvault path=secret/salt
- vault: my_other_vault path=secret/root
'''
# import python libs

View File

@ -200,3 +200,12 @@ def shutdown(opts):
For this proxy shutdown is a no-op
'''
log.debug('rest_sample proxy shutdown() called...')
def test_from_state():
'''
Test function so we have something to call from a state
:return:
'''
log.debug('test_from_state called')
return 'testvalue'

View File

@ -132,9 +132,7 @@ __virtualname__ = 'smtp'
def __virtual__():
if HAS_GNUPG:
return __virtualname__
return False, 'Could not import smtp returner; gnupg is not installed.'
return __virtualname__
def _get_options(ret=None):
@ -219,18 +217,20 @@ def returner(ret):
input_data=template,
**ret)
if HAS_GNUPG and gpgowner:
gpg = gnupg.GPG(gnupghome=os.path.expanduser('~{0}/.gnupg'.format(gpgowner)),
options=['--trust-model always'])
encrypted_data = gpg.encrypt(content, to_addrs)
if encrypted_data.ok:
log.debug('smtp_return: Encryption successful')
content = str(encrypted_data)
if gpgowner:
if HAS_GNUPG:
gpg = gnupg.GPG(gnupghome=os.path.expanduser('~{0}/.gnupg'.format(gpgowner)),
options=['--trust-model always'])
encrypted_data = gpg.encrypt(content, to_addrs)
if encrypted_data.ok:
log.debug('smtp_return: Encryption successful')
content = str(encrypted_data)
else:
log.error('smtp_return: Encryption failed, only an error message will be sent')
content = 'Encryption failed, the return data was not sent.\r\n\r\n{0}\r\n{1}'.format(
encrypted_data.status, encrypted_data.stderr)
else:
log.error('smtp_return: Encryption failed, only an error message will be sent')
content = 'Encryption failed, the return data was not sent.\r\n\r\n{0}\r\n{1}'.format(
encrypted_data.status, encrypted_data.stderr)
log.error("gnupg python module is required in order to user gpgowner in smtp returner ; ignoring gpgowner configuration for now")
if isinstance(content, six.moves.StringIO):
content = content.read()

View File

@ -27,3 +27,42 @@ def get(tgt, fun, tgt_type='glob'):
'''
ret = salt.utils.minions.mine_get(tgt, fun, tgt_type, __opts__)
return ret
def update(tgt,
tgt_type='glob',
clear=False,
mine_functions=None):
'''
Update the mine data on a certain group of minions.
tgt
Which minions to target for the execution.
tgt_type: ``glob``
The type of ``tgt``.
clear: ``False``
Boolean flag specifying whether updating will clear the existing
mines, or will update. Default: ``False`` (update).
mine_functions
Update the mine data on certain functions only.
This feature can be used when updating the mine for functions
that require refresh at different intervals than the rest of
the functions specified under ``mine_functions`` in the
minion/master config or pillar.
CLI Example:
.. code-block:: bash
salt-run mine.update '*'
salt-run mine.update 'juniper-edges' tgt_type='nodegroup'
'''
ret = __salt__['salt.execute'](tgt,
'mine.update',
tgt_type=tgt_type,
clear=clear,
mine_functions=mine_functions)
return ret

View File

@ -1,9 +1,10 @@
# -*- coding: utf-8 -*-
'''
NET Finder
==========
.. versionadded:: Nitrogen
A runner to find network details easily and fast.
It's smart enough to know what you are looking for.
@ -66,8 +67,6 @@ Configuration
- jsrv
- fxp0
outputter: yaml
.. versionadded:: Nitrogen
'''
from __future__ import print_function
@ -78,20 +77,19 @@ from __future__ import unicode_literals
import salt.output
from salt.ext import six
from salt.ext.six.moves import map
from salt.exceptions import SaltSystemExit
# Import third party libs
try:
from netaddr import IPNetwork # netaddr is already required by napalm-base
from netaddr.core import AddrFormatError
from napalm_base import helpers as napalm_helpers
HAS_NAPALM_BASE = True
except ImportError:
# sorry
raise SaltSystemExit('Please install napalm-base')
HAS_NAPALM_BASE = False
# ----------------------------------------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# module properties
# ----------------------------------------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------
_DEFAULT_TARGET = '*'
_DEFAULT_EXPR_FORM = 'glob'
@ -100,16 +98,26 @@ _DEFAULT_IGNORE_INTF = []
_DEFAULT_DISPLAY = True
_DEFAULT_OUTPUTTER = 'table'
# ----------------------------------------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# global variables
# ----------------------------------------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# will cache several details to avoid loading them several times from the mines.
_CACHE = {}
# ----------------------------------------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# helper functions -- will not be exported
# ----------------------------------------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Define the module's virtual name
__virtualname__ = 'net'
def __virtual__():
if HAS_NAPALM_BASE:
return __virtualname__
return (False, 'The napalm-base module could not be imported')
def _get_net_runner_opts():
@ -229,12 +237,18 @@ def _find_interfaces_mac(ip): # pylint: disable=invalid-name
return ('', '', '')
# ----------------------------------------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# callable functions
# ----------------------------------------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def interfaces(device=None, interface=None, title=None, pattern=None, ipnet=None, best=True, display=_DEFAULT_DISPLAY):
def interfaces(device=None,
interface=None,
title=None,
pattern=None,
ipnet=None,
best=True,
display=_DEFAULT_DISPLAY):
'''
Search for interfaces details in the following mine functions:
@ -416,7 +430,11 @@ def interfaces(device=None, interface=None, title=None, pattern=None, ipnet=None
return _display_runner(rows, labels, title, display=display)
def findarp(device=None, interface=None, mac=None, ip=None, display=_DEFAULT_DISPLAY): # pylint: disable=invalid-name
def findarp(device=None,
interface=None,
mac=None,
ip=None,
display=_DEFAULT_DISPLAY): # pylint: disable=invalid-name
'''
Search for entries in the ARP tables using the following mine functions:
@ -593,7 +611,12 @@ def findmac(device=None, mac=None, interface=None, vlan=None, display=_DEFAULT_D
return _display_runner(rows, labels, title, display=display)
def lldp(device=None, interface=None, title=None, pattern=None, chassis=None, display=_DEFAULT_DISPLAY):
def lldp(device=None,
interface=None,
title=None,
pattern=None,
chassis=None,
display=_DEFAULT_DISPLAY):
'''
Search in the LLDP neighbors, using the following mine functions:

View File

@ -35,7 +35,9 @@ from __future__ import print_function
import logging
# import salt libs
import salt.client
from salt.loader import minion_mods, utils
from salt.exceptions import SaltClientError
log = logging.getLogger(__name__) # pylint: disable=invalid-name
@ -65,3 +67,48 @@ def cmd(fun, *args, **kwargs):
return minion_mods(
__opts__,
utils=utils(__opts__)).get(fun)(*args, **kws)
def execute(tgt,
fun,
arg=(),
timeout=None,
tgt_type='glob',
ret='',
jid='',
kwarg=None,
**kwargs):
'''
Execute `fun` on all minions matched by `tgt` and `tgt_type`.
Parameter `fun` is the name of execution module function to call.
This function should mainly be used as a helper for runner modules,
in order to avoid redundant code.
For example, when inside a runner one needs to execute a certain function
on arbitrary groups of minions, only has to:
.. code-block:: python
ret1 = __salt__['salt.execute']('*', 'mod.fun')
ret2 = __salt__['salt.execute']('my_nodegroup', 'mod2.fun2', tgt_type='nodegroup')
.. versionadded:: Nitrogen
'''
client = salt.client.get_local_client(__opts__['conf_file'])
try:
ret = client.cmd(tgt,
fun,
arg=arg,
timeout=timeout or __opts__['timeout'],
tgt_type=tgt_type, # no warn_until, as this is introduced only in Nitrogen
ret=ret,
jid=jid,
kwarg=kwarg,
**kwargs)
except SaltClientError as client_error:
log.error('Error while executing {fun} on {tgt} ({tgt_type})'.format(fun=fun,
tgt=tgt,
tgt_type=tgt_type))
log.error(client_error)
return {}
return ret

View File

@ -663,7 +663,7 @@ class State(object):
self._pillar_enc = pillar_enc
self.opts['pillar'] = self._gather_pillar()
self.state_con = context or {}
self.load_modules(proxy=proxy)
self.load_modules()
self.active = set()
self.mod_init = set()
self.pre = {}
@ -863,7 +863,8 @@ class State(object):
if self.states_loader == 'thorium':
self.states = salt.loader.thorium(self.opts, self.functions, {}) # TODO: Add runners
else:
self.states = salt.loader.states(self.opts, self.functions, self.utils, self.serializers)
self.states = salt.loader.states(self.opts, self.functions, self.utils,
self.serializers, proxy=self.proxy)
def load_modules(self, data=None, proxy=None):
'''
@ -873,7 +874,7 @@ class State(object):
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, self.state_con,
utils=self.utils,
proxy=proxy)
proxy=self.proxy)
if isinstance(data, dict):
if data.get('provider', False):
if isinstance(data['provider'], str):
@ -913,7 +914,7 @@ class State(object):
log.error('Error encountered during module reload. Modules were not reloaded.')
except TypeError:
log.error('Error encountered during module reload. Modules were not reloaded.')
self.load_modules(proxy=self.proxy)
self.load_modules()
if not self.opts.get('local', False) and self.opts.get('multiprocessing', True):
self.functions['saltutil.refresh_modules']()
@ -3666,6 +3667,7 @@ class HighState(BaseHighState):
mocked=mocked,
loader=loader)
self.matcher = salt.minion.Matcher(self.opts)
self.proxy = proxy
# tracks all pydsl state declarations globally across sls files
self._pydsl_all_decls = {}

View File

@ -80,11 +80,12 @@ def __virtual__():
return 'boto_lambda' if 'boto_lambda.function_exists' in __salt__ else False
def function_present(name, FunctionName, Runtime, Role, Handler, ZipFile=None, S3Bucket=None,
S3Key=None, S3ObjectVersion=None,
Description='', Timeout=3, MemorySize=128,
Permissions=None, RoleRetries=5,
region=None, key=None, keyid=None, profile=None, VpcConfig=None):
def function_present(name, FunctionName, Runtime, Role, Handler, ZipFile=None,
S3Bucket=None, S3Key=None, S3ObjectVersion=None,
Description='', Timeout=3, MemorySize=128,
Permissions=None, RoleRetries=5, region=None, key=None,
keyid=None, profile=None, VpcConfig=None,
Environment=None):
'''
Ensure function exists.
@ -157,6 +158,17 @@ def function_present(name, FunctionName, Runtime, Role, Handler, ZipFile=None, S
During that time function creation may fail; this state will
atuomatically retry this number of times. The default is 5.
Environment
The parent object that contains your environment's configuration
settings. This is a dictionary of the form:
{
'Variables': {
'VariableName': 'VariableValue'
}
}
.. versionadded:: Nitrogen
region
Region to connect to.
@ -185,67 +197,72 @@ def function_present(name, FunctionName, Runtime, Role, Handler, ZipFile=None, S
keyset = set(permission.keys())
if not keyset.issuperset(required_keys):
raise SaltInvocationError('{0} are required for each permission '
'specification'.format(', '.join(required_keys)))
'specification'.format(', '.join(required_keys)))
keyset = keyset - required_keys
keyset = keyset - optional_keys
if bool(keyset):
raise SaltInvocationError('Invalid permission value {0}'.format(', '.join(keyset)))
raise SaltInvocationError(
'Invalid permission value {0}'.format(', '.join(keyset)))
r = __salt__['boto_lambda.function_exists'](FunctionName=FunctionName, region=region,
key=key, keyid=keyid, profile=profile)
r = __salt__['boto_lambda.function_exists'](
FunctionName=FunctionName, region=region,
key=key, keyid=keyid, profile=profile)
if 'error' in r:
ret['result'] = False
ret['comment'] = 'Failed to create function: {0}.'.format(r['error']['message'])
ret['comment'] = ('Failed to create function: '
'{0}.'.format(r['error']['message']))
return ret
if not r.get('exists'):
if __opts__['test']:
ret['comment'] = 'Function {0} is set to be created.'.format(FunctionName)
ret['comment'] = 'Function {0} is set to be created.'.format(
FunctionName)
ret['result'] = None
return ret
r = __salt__['boto_lambda.create_function'](FunctionName=FunctionName, Runtime=Runtime,
Role=Role, Handler=Handler,
ZipFile=ZipFile, S3Bucket=S3Bucket,
S3Key=S3Key,
S3ObjectVersion=S3ObjectVersion,
Description=Description,
Timeout=Timeout, MemorySize=MemorySize,
VpcConfig=VpcConfig,
WaitForRole=True,
RoleRetries=RoleRetries,
region=region, key=key,
keyid=keyid, profile=profile)
r = __salt__['boto_lambda.create_function'](
FunctionName=FunctionName, Runtime=Runtime, Role=Role,
Handler=Handler, ZipFile=ZipFile, S3Bucket=S3Bucket, S3Key=S3Key,
S3ObjectVersion=S3ObjectVersion, Description=Description,
Timeout=Timeout, MemorySize=MemorySize, VpcConfig=VpcConfig,
Environment=Environment, WaitForRole=True, RoleRetries=RoleRetries,
region=region, key=key, keyid=keyid, profile=profile)
if not r.get('created'):
ret['result'] = False
ret['comment'] = 'Failed to create function: {0}.'.format(r['error']['message'])
ret['comment'] = ('Failed to create function: '
'{0}.'.format(r['error']['message']))
return ret
if Permissions:
for sid, permission in six.iteritems(Permissions):
r = __salt__['boto_lambda.add_permission'](FunctionName=FunctionName,
StatementId=sid,
region=region, key=key,
keyid=keyid, profile=profile,
**permission)
r = __salt__['boto_lambda.add_permission'](
FunctionName=FunctionName, StatementId=sid,
region=region, key=key, keyid=keyid, profile=profile,
**permission)
if not r.get('updated'):
ret['result'] = False
ret['comment'] = 'Failed to create function: {0}.'.format(r['error']['message'])
ret['comment'] = ('Failed to create function: '
'{0}.'.format(r['error']['message']))
_describe = __salt__['boto_lambda.describe_function'](FunctionName,
region=region, key=key, keyid=keyid, profile=profile)
_describe['function']['Permissions'] = __salt__['boto_lambda.get_permissions'](FunctionName,
region=region, key=key, keyid=keyid, profile=profile)['permissions']
_describe = __salt__['boto_lambda.describe_function'](
FunctionName, region=region, key=key, keyid=keyid, profile=profile)
_describe['function']['Permissions'] = (
__salt__['boto_lambda.get_permissions'](
FunctionName, region=region, key=key, keyid=keyid,
profile=profile)['permissions'])
ret['changes']['old'] = {'function': None}
ret['changes']['new'] = _describe
ret['comment'] = 'Function {0} created.'.format(FunctionName)
return ret
ret['comment'] = os.linesep.join([ret['comment'], 'Function {0} is present.'.format(FunctionName)])
ret['comment'] = os.linesep.join(
[ret['comment'], 'Function {0} is present.'.format(FunctionName)])
ret['changes'] = {}
# function exists, ensure config matches
_ret = _function_config_present(FunctionName, Role, Handler, Description, Timeout,
MemorySize, VpcConfig, region, key, keyid, profile, RoleRetries)
_ret = _function_config_present(FunctionName, Role, Handler, Description,
Timeout, MemorySize, VpcConfig,
Environment, region, key, keyid,
profile, RoleRetries)
if not _ret.get('result'):
ret['result'] = False
ret['comment'] = _ret['comment']
@ -285,10 +302,12 @@ def _get_role_arn(name, region=None, key=None, keyid=None, profile=None):
def _function_config_present(FunctionName, Role, Handler, Description, Timeout,
MemorySize, VpcConfig, region, key, keyid, profile, RoleRetries):
MemorySize, VpcConfig, Environment, region,
key, keyid, profile, RoleRetries):
ret = {'result': True, 'comment': '', 'changes': {}}
func = __salt__['boto_lambda.describe_function'](FunctionName,
region=region, key=key, keyid=keyid, profile=profile)['function']
func = __salt__['boto_lambda.describe_function'](
FunctionName, region=region,
key=key, keyid=keyid, profile=profile)['function']
role_arn = _get_role_arn(Role, region, key, keyid, profile)
need_update = False
options = {'Role': 'role_arn',
@ -309,33 +328,44 @@ def _function_config_present(FunctionName, Role, Handler, Description, Timeout,
if oldval != VpcConfig:
need_update = True
ret['changes'].setdefault('new', {})['VpcConfig'] = VpcConfig
ret['changes'].setdefault('old', {})['VpcConfig'] = func.get('VpcConfig')
ret['changes'].setdefault(
'old', {})['VpcConfig'] = func.get('VpcConfig')
if Environment is not None:
if func.get('Environment') != Environment:
need_update = True
ret['changes'].setdefault('new', {})['Environment'] = Environment
ret['changes'].setdefault('old', {})['Environment'] = func.get(
'Environment')
if need_update:
ret['comment'] = os.linesep.join([ret['comment'], 'Function config to be modified'])
ret['comment'] = os.linesep.join(
[ret['comment'], 'Function config to be modified'])
if __opts__['test']:
msg = 'Function {0} set to be modified.'.format(FunctionName)
ret['comment'] = msg
ret['result'] = None
return ret
_r = __salt__['boto_lambda.update_function_config'](FunctionName=FunctionName,
Role=Role, Handler=Handler, Description=Description,
Timeout=Timeout, MemorySize=MemorySize,
VpcConfig=VpcConfig,
region=region, key=key,
keyid=keyid, profile=profile,
WaitForRole=True, RoleRetries=RoleRetries)
_r = __salt__['boto_lambda.update_function_config'](
FunctionName=FunctionName, Role=Role, Handler=Handler,
Description=Description, Timeout=Timeout, MemorySize=MemorySize,
VpcConfig=VpcConfig, Environment=Environment, region=region,
key=key, keyid=keyid, profile=profile, WaitForRole=True,
RoleRetries=RoleRetries)
if not _r.get('updated'):
ret['result'] = False
ret['comment'] = 'Failed to update function: {0}.'.format(_r['error']['message'])
ret['comment'] = ('Failed to update function: '
'{0}.'.format(_r['error']['message']))
ret['changes'] = {}
return ret
def _function_code_present(FunctionName, ZipFile, S3Bucket, S3Key, S3ObjectVersion,
region, key, keyid, profile):
def _function_code_present(FunctionName, ZipFile, S3Bucket, S3Key,
S3ObjectVersion, region, key, keyid, profile):
ret = {'result': True, 'comment': '', 'changes': {}}
func = __salt__['boto_lambda.describe_function'](FunctionName,
region=region, key=key, keyid=keyid, profile=profile)['function']
func = __salt__['boto_lambda.describe_function'](
FunctionName, region=region,
key=key, keyid=keyid, profile=profile)['function']
update = False
if ZipFile:
size = os.path.getsize(ZipFile)
@ -363,18 +393,21 @@ def _function_code_present(FunctionName, ZipFile, S3Bucket, S3Key, S3ObjectVersi
'CodeSha256': func['CodeSha256'],
'CodeSize': func['CodeSize'],
}
func = __salt__['boto_lambda.update_function_code'](FunctionName, ZipFile, S3Bucket,
func = __salt__['boto_lambda.update_function_code'](
FunctionName, ZipFile, S3Bucket,
S3Key, S3ObjectVersion,
region=region, key=key, keyid=keyid, profile=profile)
if not func.get('updated'):
ret['result'] = False
ret['comment'] = 'Failed to update function: {0}.'.format(func['error']['message'])
ret['comment'] = ('Failed to update function: '
'{0}.'.format(func['error']['message']))
ret['changes'] = {}
return ret
func = func['function']
if func['CodeSha256'] != ret['changes']['old']['CodeSha256'] or \
func['CodeSize'] != ret['changes']['old']['CodeSize']:
ret['comment'] = os.linesep.join([ret['comment'], 'Function code to be modified'])
ret['comment'] = os.linesep.join(
[ret['comment'], 'Function code to be modified'])
ret['changes']['new'] = {
'CodeSha256': func['CodeSha256'],
'CodeSize': func['CodeSize'],
@ -387,14 +420,16 @@ def _function_code_present(FunctionName, ZipFile, S3Bucket, S3Key, S3ObjectVersi
def _function_permissions_present(FunctionName, Permissions,
region, key, keyid, profile):
ret = {'result': True, 'comment': '', 'changes': {}}
curr_permissions = __salt__['boto_lambda.get_permissions'](FunctionName,
region=region, key=key, keyid=keyid, profile=profile).get('permissions')
curr_permissions = __salt__['boto_lambda.get_permissions'](
FunctionName, region=region,
key=key, keyid=keyid, profile=profile).get('permissions')
if curr_permissions is None:
curr_permissions = {}
need_update = False
diffs = salt.utils.compare_dicts(curr_permissions, Permissions or {})
if bool(diffs):
ret['comment'] = os.linesep.join([ret['comment'], 'Function permissions to be modified'])
ret['comment'] = os.linesep.join(
[ret['comment'], 'Function permissions to be modified'])
if __opts__['test']:
msg = 'Function {0} set to be modified.'.format(FunctionName)
ret['comment'] = msg
@ -403,30 +438,29 @@ def _function_permissions_present(FunctionName, Permissions,
for sid, diff in six.iteritems(diffs):
if diff.get('old', '') != '':
# There's a permssion that needs to be removed
_r = __salt__['boto_lambda.remove_permission'](FunctionName=FunctionName,
StatementId=sid,
region=region, key=key,
keyid=keyid, profile=profile)
ret['changes'].setdefault('new', {}).setdefault('Permissions',
{})[sid] = {}
ret['changes'].setdefault('old', {}).setdefault('Permissions',
{})[sid] = diff['old']
_r = __salt__['boto_lambda.remove_permission'](
FunctionName=FunctionName, StatementId=sid,
region=region, key=key, keyid=keyid, profile=profile)
ret['changes'].setdefault(
'new', {}).setdefault('Permissions', {})[sid] = {}
ret['changes'].setdefault(
'old', {}).setdefault('Permissions', {})[sid] = diff['old']
if diff.get('new', '') != '':
# New permission information needs to be added
_r = __salt__['boto_lambda.add_permission'](FunctionName=FunctionName,
StatementId=sid,
region=region, key=key,
keyid=keyid, profile=profile,
**diff['new'])
ret['changes'].setdefault('new', {}).setdefault('Permissions',
{})[sid] = diff['new']
oldperms = ret['changes'].setdefault('old', {}).setdefault('Permissions',
{})
_r = __salt__['boto_lambda.add_permission'](
FunctionName=FunctionName, StatementId=sid,
region=region, key=key, keyid=keyid, profile=profile,
**diff['new'])
ret['changes'].setdefault(
'new', {}).setdefault('Permissions', {})[sid] = diff['new']
oldperms = ret['changes'].setdefault(
'old', {}).setdefault('Permissions', {})
if sid not in oldperms:
oldperms[sid] = {}
if not _r.get('updated'):
ret['result'] = False
ret['comment'] = 'Failed to update function: {0}.'.format(_r['error']['message'])
ret['comment'] = ('Failed to update function: '
'{0}.'.format(_r['error']['message']))
ret['changes'] = {}
return ret
@ -461,11 +495,12 @@ def function_absent(name, FunctionName, region=None, key=None, keyid=None, profi
'changes': {}
}
r = __salt__['boto_lambda.function_exists'](FunctionName, region=region,
key=key, keyid=keyid, profile=profile)
r = __salt__['boto_lambda.function_exists'](
FunctionName, region=region, key=key, keyid=keyid, profile=profile)
if 'error' in r:
ret['result'] = False
ret['comment'] = 'Failed to delete function: {0}.'.format(r['error']['message'])
ret['comment'] = 'Failed to delete function: {0}.'.format(r['error'][
'message'])
return ret
if r and not r['exists']:
@ -473,15 +508,17 @@ def function_absent(name, FunctionName, region=None, key=None, keyid=None, profi
return ret
if __opts__['test']:
ret['comment'] = 'Function {0} is set to be removed.'.format(FunctionName)
ret['comment'] = 'Function {0} is set to be removed.'.format(
FunctionName)
ret['result'] = None
return ret
r = __salt__['boto_lambda.delete_function'](FunctionName,
region=region, key=key,
keyid=keyid, profile=profile)
region=region, key=key,
keyid=keyid, profile=profile)
if not r['deleted']:
ret['result'] = False
ret['comment'] = 'Failed to delete function: {0}.'.format(r['error']['message'])
ret['comment'] = ('Failed to delete function: '
'{0}.'.format(r['error']['message']))
return ret
ret['changes']['old'] = {'function': FunctionName}
ret['changes']['new'] = {'function': None}
@ -490,7 +527,7 @@ def function_absent(name, FunctionName, region=None, key=None, keyid=None, profi
def alias_present(name, FunctionName, Name, FunctionVersion, Description='',
region=None, key=None, keyid=None, profile=None):
region=None, key=None, keyid=None, profile=None):
'''
Ensure alias exists.
@ -529,12 +566,14 @@ def alias_present(name, FunctionName, Name, FunctionVersion, Description='',
'changes': {}
}
r = __salt__['boto_lambda.alias_exists'](FunctionName=FunctionName, Name=Name, region=region,
key=key, keyid=keyid, profile=profile)
r = __salt__['boto_lambda.alias_exists'](
FunctionName=FunctionName, Name=Name, region=region,
key=key, keyid=keyid, profile=profile)
if 'error' in r:
ret['result'] = False
ret['comment'] = 'Failed to create alias: {0}.'.format(r['error']['message'])
ret['comment'] = ('Failed to create alias: '
'{0}.'.format(r['error']['message']))
return ret
if not r.get('exists'):
@ -543,24 +582,27 @@ def alias_present(name, FunctionName, Name, FunctionVersion, Description='',
ret['result'] = None
return ret
r = __salt__['boto_lambda.create_alias'](FunctionName, Name,
FunctionVersion, Description,
region, key, keyid, profile)
FunctionVersion, Description,
region, key, keyid, profile)
if not r.get('created'):
ret['result'] = False
ret['comment'] = 'Failed to create alias: {0}.'.format(r['error']['message'])
ret['comment'] = ('Failed to create alias: '
'{0}.'.format(r['error']['message']))
return ret
_describe = __salt__['boto_lambda.describe_alias'](FunctionName, Name, region=region, key=key,
keyid=keyid, profile=profile)
_describe = __salt__['boto_lambda.describe_alias'](
FunctionName, Name, region=region, key=key,
keyid=keyid, profile=profile)
ret['changes']['old'] = {'alias': None}
ret['changes']['new'] = _describe
ret['comment'] = 'Alias {0} created.'.format(Name)
return ret
ret['comment'] = os.linesep.join([ret['comment'], 'Alias {0} is present.'.format(Name)])
ret['comment'] = os.linesep.join(
[ret['comment'], 'Alias {0} is present.'.format(Name)])
ret['changes'] = {}
_describe = __salt__['boto_lambda.describe_alias'](FunctionName, Name,
region=region, key=key, keyid=keyid,
profile=profile)['alias']
_describe = __salt__['boto_lambda.describe_alias'](
FunctionName, Name, region=region, key=key, keyid=keyid,
profile=profile)['alias']
need_update = False
options = {'FunctionVersion': 'FunctionVersion',
@ -572,19 +614,21 @@ def alias_present(name, FunctionName, Name, FunctionVersion, Description='',
ret['changes'].setdefault('new', {})[var] = locals()[var]
ret['changes'].setdefault('old', {})[var] = _describe[val]
if need_update:
ret['comment'] = os.linesep.join([ret['comment'], 'Alias config to be modified'])
ret['comment'] = os.linesep.join(
[ret['comment'], 'Alias config to be modified'])
if __opts__['test']:
msg = 'Alias {0} set to be modified.'.format(Name)
ret['comment'] = msg
ret['result'] = None
return ret
_r = __salt__['boto_lambda.update_alias'](FunctionName=FunctionName, Name=Name,
FunctionVersion=FunctionVersion, Description=Description,
region=region, key=key,
keyid=keyid, profile=profile)
_r = __salt__['boto_lambda.update_alias'](
FunctionName=FunctionName, Name=Name,
FunctionVersion=FunctionVersion, Description=Description,
region=region, key=key, keyid=keyid, profile=profile)
if not _r.get('updated'):
ret['result'] = False
ret['comment'] = 'Failed to update alias: {0}.'.format(_r['error']['message'])
ret['comment'] = ('Failed to update alias: '
'{0}.'.format(_r['error']['message']))
ret['changes'] = {}
return ret
@ -622,11 +666,13 @@ def alias_absent(name, FunctionName, Name, region=None, key=None, keyid=None, pr
'changes': {}
}
r = __salt__['boto_lambda.alias_exists'](FunctionName, Name, region=region,
key=key, keyid=keyid, profile=profile)
r = __salt__['boto_lambda.alias_exists'](
FunctionName, Name, region=region, key=key, keyid=keyid,
profile=profile)
if 'error' in r:
ret['result'] = False
ret['comment'] = 'Failed to delete alias: {0}.'.format(r['error']['message'])
ret['comment'] = ('Failed to delete alias: '
'{0}.'.format(r['error']['message']))
return ret
if r and not r['exists']:
@ -637,12 +683,13 @@ def alias_absent(name, FunctionName, Name, region=None, key=None, keyid=None, pr
ret['comment'] = 'Alias {0} is set to be removed.'.format(Name)
ret['result'] = None
return ret
r = __salt__['boto_lambda.delete_alias'](FunctionName, Name,
region=region, key=key,
keyid=keyid, profile=profile)
r = __salt__['boto_lambda.delete_alias'](
FunctionName, Name, region=region, key=key, keyid=keyid,
profile=profile)
if not r['deleted']:
ret['result'] = False
ret['comment'] = 'Failed to delete alias: {0}.'.format(r['error']['message'])
ret['comment'] = ('Failed to delete alias: '
'{0}.'.format(r['error']['message']))
return ret
ret['changes']['old'] = {'alias': Name}
ret['changes']['new'] = {'alias': None}
@ -664,9 +711,10 @@ def _get_function_arn(name, region=None, key=None, keyid=None, profile=None):
return 'arn:aws:lambda:{0}:{1}:function:{2}'.format(region, account_id, name)
def event_source_mapping_present(name, EventSourceArn, FunctionName, StartingPosition,
Enabled=True, BatchSize=100,
region=None, key=None, keyid=None, profile=None):
def event_source_mapping_present(name, EventSourceArn, FunctionName,
StartingPosition, Enabled=True, BatchSize=100,
region=None, key=None, keyid=None,
profile=None):
'''
Ensure event source mapping exists.
@ -722,44 +770,49 @@ def event_source_mapping_present(name, EventSourceArn, FunctionName, StartingPos
'changes': {}
}
r = __salt__['boto_lambda.event_source_mapping_exists'](EventSourceArn=EventSourceArn,
FunctionName=FunctionName,
region=region, key=key, keyid=keyid, profile=profile)
r = __salt__['boto_lambda.event_source_mapping_exists'](
EventSourceArn=EventSourceArn, FunctionName=FunctionName,
region=region, key=key, keyid=keyid, profile=profile)
if 'error' in r:
ret['result'] = False
ret['comment'] = 'Failed to create event source mapping: {0}.'.format(r['error']['message'])
ret['comment'] = ('Failed to create event source mapping: '
'{0}.'.format(r['error']['message']))
return ret
if not r.get('exists'):
if __opts__['test']:
ret['comment'] = 'Event source mapping {0} is set to be created.'.format(FunctionName)
ret['comment'] = ('Event source mapping {0} is set '
'to be created.'.format(FunctionName))
ret['result'] = None
return ret
r = __salt__['boto_lambda.create_event_source_mapping'](EventSourceArn=EventSourceArn,
FunctionName=FunctionName, StartingPosition=StartingPosition,
Enabled=Enabled, BatchSize=BatchSize,
region=region, key=key, keyid=keyid, profile=profile)
r = __salt__['boto_lambda.create_event_source_mapping'](
EventSourceArn=EventSourceArn, FunctionName=FunctionName,
StartingPosition=StartingPosition, Enabled=Enabled,
BatchSize=BatchSize, region=region, key=key, keyid=keyid,
profile=profile)
if not r.get('created'):
ret['result'] = False
ret['comment'] = 'Failed to create event source mapping: {0}.'.format(r['error']['message'])
ret['comment'] = ('Failed to create event source mapping: '
'{0}.'.format(r['error']['message']))
return ret
_describe = __salt__['boto_lambda.describe_event_source_mapping'](
EventSourceArn=EventSourceArn,
FunctionName=FunctionName,
region=region, key=key, keyid=keyid, profile=profile)
EventSourceArn=EventSourceArn, FunctionName=FunctionName,
region=region, key=key, keyid=keyid, profile=profile)
ret['name'] = _describe['event_source_mapping']['UUID']
ret['changes']['old'] = {'event_source_mapping': None}
ret['changes']['new'] = _describe
ret['comment'] = 'Event source mapping {0} created.'.format(ret['name'])
ret['comment'] = ('Event source mapping {0} '
'created.'.format(ret['name']))
return ret
ret['comment'] = os.linesep.join([ret['comment'], 'Event source mapping is present.'])
ret['comment'] = os.linesep.join(
[ret['comment'], 'Event source mapping is present.'])
ret['changes'] = {}
_describe = __salt__['boto_lambda.describe_event_source_mapping'](
EventSourceArn=EventSourceArn,
FunctionName=FunctionName,
region=region, key=key, keyid=keyid, profile=profile)['event_source_mapping']
EventSourceArn=EventSourceArn, FunctionName=FunctionName,
region=region, key=key, keyid=keyid,
profile=profile)['event_source_mapping']
need_update = False
options = {'BatchSize': 'BatchSize'}
@ -770,35 +823,38 @@ def event_source_mapping_present(name, EventSourceArn, FunctionName, StartingPos
ret['changes'].setdefault('new', {})[var] = locals()[var]
ret['changes'].setdefault('old', {})[var] = _describe[val]
# verify FunctionName against FunctionArn
function_arn = _get_function_arn(FunctionName,
region=region, key=key, keyid=keyid, profile=profile)
function_arn = _get_function_arn(FunctionName, region=region,
key=key, keyid=keyid, profile=profile)
if _describe['FunctionArn'] != function_arn:
need_update = True
ret['changes'].setdefault('new', {})['FunctionArn'] = function_arn
ret['changes'].setdefault('old', {})['FunctionArn'] = _describe['FunctionArn']
# TODO check for 'Enabled', since it doesn't directly map to a specific state
ret['changes'].setdefault('old', {})['FunctionArn'] = _describe[
'FunctionArn']
# TODO check for 'Enabled', since it doesn't directly map to a specific
# state
if need_update:
ret['comment'] = os.linesep.join([ret['comment'], 'Event source mapping to be modified'])
ret['comment'] = os.linesep.join(
[ret['comment'], 'Event source mapping to be modified'])
if __opts__['test']:
msg = 'Event source mapping {0} set to be modified.'.format(_describe['UUID'])
msg = ('Event source mapping {0} set to be '
'modified.'.format(_describe['UUID']))
ret['comment'] = msg
ret['result'] = None
return ret
_r = __salt__['boto_lambda.update_event_source_mapping'](UUID=_describe['UUID'],
FunctionName=FunctionName,
Enabled=Enabled,
BatchSize=BatchSize,
region=region, key=key,
keyid=keyid, profile=profile)
_r = __salt__['boto_lambda.update_event_source_mapping'](
UUID=_describe['UUID'], FunctionName=FunctionName,
Enabled=Enabled, BatchSize=BatchSize,
region=region, key=key, keyid=keyid, profile=profile)
if not _r.get('updated'):
ret['result'] = False
ret['comment'] = 'Failed to update mapping: {0}.'.format(_r['error']['message'])
ret['comment'] = ('Failed to update mapping: '
'{0}.'.format(_r['error']['message']))
ret['changes'] = {}
return ret
def event_source_mapping_absent(name, EventSourceArn, FunctionName,
region=None, key=None, keyid=None, profile=None):
region=None, key=None, keyid=None, profile=None):
'''
Ensure event source mapping with passed properties is absent.
@ -831,12 +887,13 @@ def event_source_mapping_absent(name, EventSourceArn, FunctionName,
'changes': {}
}
desc = __salt__['boto_lambda.describe_event_source_mapping'](EventSourceArn=EventSourceArn,
FunctionName=FunctionName,
region=region, key=key, keyid=keyid, profile=profile)
desc = __salt__['boto_lambda.describe_event_source_mapping'](
EventSourceArn=EventSourceArn, FunctionName=FunctionName,
region=region, key=key, keyid=keyid, profile=profile)
if 'error' in desc:
ret['result'] = False
ret['comment'] = 'Failed to delete event source mapping: {0}.'.format(desc['error']['message'])
ret['comment'] = ('Failed to delete event source mapping: '
'{0}.'.format(desc['error']['message']))
return ret
if not desc.get('event_source_mapping'):
@ -848,13 +905,13 @@ def event_source_mapping_absent(name, EventSourceArn, FunctionName,
ret['comment'] = 'Event source mapping is set to be removed.'
ret['result'] = None
return ret
r = __salt__['boto_lambda.delete_event_source_mapping'](EventSourceArn=EventSourceArn,
FunctionName=FunctionName,
region=region, key=key,
keyid=keyid, profile=profile)
r = __salt__['boto_lambda.delete_event_source_mapping'](
EventSourceArn=EventSourceArn, FunctionName=FunctionName,
region=region, key=key, keyid=keyid, profile=profile)
if not r['deleted']:
ret['result'] = False
ret['comment'] = 'Failed to delete event source mapping: {0}.'.format(r['error']['message'])
ret['comment'] = 'Failed to delete event source mapping: {0}.'.format(r['error'][
'message'])
return ret
ret['changes']['old'] = desc
ret['changes']['new'] = {'event_source_mapping': None}

View File

@ -2225,10 +2225,10 @@ def managed(name,
follow_symlinks,
skip_verify,
keep_mode,
win_owner,
win_perms,
win_deny_perms,
win_inheritance,
win_owner=win_owner,
win_perms=win_perms,
win_deny_perms=win_deny_perms,
win_inheritance=win_inheritance,
**kwargs)
except Exception as exc:
ret['changes'] = {}
@ -5334,7 +5334,8 @@ def serialize(name,
mode=None,
backup='',
makedirs=False,
show_diff=True,
show_diff=None,
show_changes=True,
create=True,
merge_if_exists=False,
**kwargs):
@ -5386,7 +5387,14 @@ def serialize(name,
.. versionadded:: 2014.1.3
show_diff
If set to False, the diff will not be shown.
DEPRECATED: Please use show_changes.
If set to ``False``, the diff will not be shown in the return data if
changes are made.
show_changes
Output a unified diff of the old file and the new file. If ``False``
return a boolean if any changes were made.
create
Default is True, if create is set to False then the file will only be
@ -5518,6 +5526,14 @@ def serialize(name,
# Make sure that any leading zeros stripped by YAML loader are added back
mode = salt.utils.normalize_mode(mode)
if show_diff is not None:
show_changes = show_diff
msg = (
'The \'show_diff\' argument to the file.serialized state has been '
'deprecated, please use \'show_changes\' instead.'
)
salt.utils.warn_until('Oxygen', msg)
if __opts__['test']:
ret['changes'] = __salt__['file.check_managed_changes'](
name=name,
@ -5540,10 +5556,12 @@ def serialize(name,
ret['result'] = None
ret['comment'] = 'Dataset will be serialized and stored into {0}'.format(
name)
if not show_changes:
ret['changes']['diff'] = '<show_changes=False>'
else:
ret['result'] = True
ret['comment'] = 'The file {0} is in the correct state'.format(name)
return ret
return __salt__['file.manage_file'](name=name,
@ -5558,7 +5576,7 @@ def serialize(name,
backup=backup,
makedirs=makedirs,
template=None,
show_changes=show_diff,
show_changes=show_changes,
contents=contents)

View File

@ -11,14 +11,14 @@ def __virtual__():
return 'openvswitch.port_add' in __salt__
def present(name, bridge, type=None, id=None, remote=None, dst_port=None, internal=False):
def present(name, bridge, tunnel_type=None, id=None, remote=None, dst_port=None, internal=False):
'''
Ensures that the named port exists on bridge, eventually creates it.
Args:
name: The name of the port.
bridge: The name of the bridge.
type: Optional type of interface to create, currently supports: vlan, vxlan and gre.
tunnel_type: Optional type of interface to create, currently supports: vlan, vxlan and gre.
id: Optional tunnel's key.
remote: Remote endpoint's IP address.
dst_port: Port to use when creating tunnelport in the switch.
@ -26,68 +26,74 @@ def present(name, bridge, type=None, id=None, remote=None, dst_port=None, intern
'''
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
types = ('vlan', 'vxlan', 'gre')
tunnel_types = ('vlan', 'vxlan', 'gre')
if type and type not in types:
raise TypeError('The optional type argument must be one of these values: {0}.'.format(str(types)))
if tunnel_type and tunnel_type not in tunnel_types:
raise TypeError('The optional type argument must be one of these values: {0}.'.format(str(tunnel_types)))
bridge_exists = __salt__['openvswitch.bridge_exists'](bridge)
port_list = []
if bridge_exists:
port_list = __salt__['openvswitch.port_list'](bridge)
# Comment and change messages
comment_bridge_notexists = 'Bridge {0} does not exist.'.format(bridge)
comment_port_exists = 'Port {0} already exists.'.format(name)
comment_port_created = 'Port {0} created on bridge {1}.'.format(name, bridge)
comment_port_notcreated = 'Unable to create port {0} on bridge {1}.'.format(name, bridge)
changes_port_created = {name: {'old': 'No port named {0} present.'.format(name),
comments = {}
comments['comment_bridge_notexists'] = 'Bridge {0} does not exist.'.format(bridge)
comments['comment_port_exists'] = 'Port {0} already exists.'.format(name)
comments['comment_port_created'] = 'Port {0} created on bridge {1}.'.format(name, bridge)
comments['comment_port_notcreated'] = 'Unable to create port {0} on bridge {1}.'.format(name, bridge)
comments['changes_port_created'] = {name: {'old': 'No port named {0} present.'.format(name),
'new': 'Created port {1} on bridge {0}.'.format(bridge, name),
}
}
comment_vlan_invalid_id = 'VLANs id must be between 0 and 4095.'
comment_vlan_invalid_name = 'Could not find network interface {0}.'.format(name)
comment_vlan_port_exists = 'Port {0} with access to VLAN {1} already exists on bridge {2}.'.format(name, id, bridge)
comment_vlan_created = 'Created port {0} with access to VLAN {1} on bridge {2}.'.format(name, id, bridge)
comment_vlan_notcreated = 'Unable to create port {0} with access to VLAN {1} on ' \
'bridge {2}.'.format(name, id, bridge)
changes_vlan_created = {name: {'old': 'No port named {0} with access to VLAN {1} present on '
'bridge {2} present.'.format(name, id, bridge),
'new': 'Created port {1} with access to VLAN {2} on '
'bridge {0}.'.format(bridge, name, id),
}
}
if tunnel_type:
comments['comment_invalid_ip'] = 'Remote is not valid ip address.'
if tunnel_type == "vlan":
comments['comment_vlan_invalid_id'] = 'VLANs id must be between 0 and 4095.'
comments['comment_vlan_invalid_name'] = 'Could not find network interface {0}.'.format(name)
comments['comment_vlan_port_exists'] = 'Port {0} with access to VLAN {1} already exists on bridge {2}.'.format(name, id, bridge)
comments['comment_vlan_created'] = 'Created port {0} with access to VLAN {1} on bridge {2}.'.format(name, id, bridge)
comments['comment_vlan_notcreated'] = 'Unable to create port {0} with access to VLAN {1} on ' \
'bridge {2}.'.format(name, id, bridge)
comments['changes_vlan_created'] = {name: {'old': 'No port named {0} with access to VLAN {1} present on '
'bridge {2} present.'.format(name, id, bridge),
'new': 'Created port {1} with access to VLAN {2} on '
'bridge {0}.'.format(bridge, name, id),
}
}
comment_gre_invalid_id = 'Id of GRE tunnel must be an unsigned 32-bit integer.'
comment_gre_interface_exists = 'GRE tunnel interface {0} with rempte ip {1} and key {2} ' \
'already exists on bridge {3}.'.format(name, remote, id, bridge)
comment_gre_created = 'Created GRE tunnel interface {0} with remote ip {1} and key {2} ' \
'on bridge {3}.'.format(name, remote, id, bridge)
comment_gre_notcreated = 'Unable to create GRE tunnel interface {0} with remote ip {1} and key {2} ' \
'on bridge {3}.'.format(name, remote, id, bridge)
changes_gre_created = {name: {'old': 'No GRE tunnel interface {0} with remote ip {1} and key {2} '
'on bridge {3} present.'.format(name, remote, id, bridge),
'new': 'Created GRE tunnel interface {0} with remote ip {1} and key {2} '
'on bridge {3}.'.format(name, remote, id, bridge),
}
}
comment_dstport = ' (dst_port' + str(dst_port) + ')' if 0 < dst_port <= 65535 else ''
comment_vxlan_invalid_id = 'Id of VXLAN tunnel must be an unsigned 64-bit integer.'
comment_vxlan_interface_exists = 'VXLAN tunnel interface {0} with rempte ip {1} and key {2} ' \
'already exists on bridge {3}{4}.'.format(name, remote, id, bridge, comment_dstport)
comment_vxlan_created = 'Created VXLAN tunnel interface {0} with remote ip {1} and key {2} ' \
'on bridge {3}{4}.'.format(name, remote, id, bridge, comment_dstport)
comment_vxlan_notcreated = 'Unable to create VXLAN tunnel interface {0} with remote ip {1} and key {2} ' \
'on bridge {3}{4}.'.format(name, remote, id, bridge, comment_dstport)
changes_vxlan_created = {name: {'old': 'No VXLAN tunnel interface {0} with remote ip {1} and key {2} '
'on bridge {3}{4} present.'.format(name, remote, id, bridge, comment_dstport),
'new': 'Created VXLAN tunnel interface {0} with remote ip {1} and key {2} '
'on bridge {3}{4}.'.format(name, remote, id, bridge, comment_dstport),
}
}
comment_invalid_ip = 'Remote is not valid ip address.'
elif tunnel_type == "gre":
comments['comment_gre_invalid_id'] = 'Id of GRE tunnel must be an unsigned 32-bit integer.'
comments['comment_gre_interface_exists'] = 'GRE tunnel interface {0} with rempte ip {1} and key {2} ' \
'already exists on bridge {3}.'.format(name, remote, id, bridge)
comments['comment_gre_created'] = 'Created GRE tunnel interface {0} with remote ip {1} and key {2} ' \
'on bridge {3}.'.format(name, remote, id, bridge)
comments['comment_gre_notcreated'] = 'Unable to create GRE tunnel interface {0} with remote ip {1} and key {2} ' \
'on bridge {3}.'.format(name, remote, id, bridge)
comments['changes_gre_created'] = {name: {'old': 'No GRE tunnel interface {0} with remote ip {1} and key {2} '
'on bridge {3} present.'.format(name, remote, id, bridge),
'new': 'Created GRE tunnel interface {0} with remote ip {1} and key {2} '
'on bridge {3}.'.format(name, remote, id, bridge),
}
}
elif tunnel_type == "vxlan":
comments['comment_dstport'] = ' (dst_port' + str(dst_port) + ')' if 0 < dst_port <= 65535 else ''
comments['comment_vxlan_invalid_id'] = 'Id of VXLAN tunnel must be an unsigned 64-bit integer.'
comments['comment_vxlan_interface_exists'] = 'VXLAN tunnel interface {0} with rempte ip {1} and key {2} ' \
'already exists on bridge {3}{4}.'.format(name, remote, id, bridge, comments['comment_dstport'])
comments['comment_vxlan_created'] = 'Created VXLAN tunnel interface {0} with remote ip {1} and key {2} ' \
'on bridge {3}{4}.'.format(name, remote, id, bridge, comments['comment_dstport'])
comments['comment_vxlan_notcreated'] = 'Unable to create VXLAN tunnel interface {0} with remote ip {1} and key {2} ' \
'on bridge {3}{4}.'.format(name, remote, id, bridge, comments['comment_dstport'])
comments['changes_vxlan_created'] = {name: {'old': 'No VXLAN tunnel interface {0} with remote ip {1} and key {2} '
'on bridge {3}{4} present.'.format(name, remote, id, bridge, comments['comment_dstport']),
'new': 'Created VXLAN tunnel interface {0} with remote ip {1} and key {2} '
'on bridge {3}{4}.'.format(name, remote, id, bridge, comments['comment_dstport']),
}
}
# Check VLANs attributes
def _check_vlan():
@ -95,15 +101,15 @@ def present(name, bridge, type=None, id=None, remote=None, dst_port=None, intern
interfaces = __salt__['network.interfaces']()
if not 0 <= id <= 4095:
ret['result'] = False
ret['comment'] = comment_vlan_invalid_id
ret['comment'] = comments['comment_vlan_invalid_id']
elif not internal and name not in interfaces:
ret['result'] = False
ret['comment'] = comment_vlan_invalid_name
ret['comment'] = comments['comment_vlan_invalid_name']
elif tag and name in port_list:
try:
if int(tag[0]) == id:
ret['result'] = True
ret['comment'] = comment_vlan_port_exists
ret['comment'] = comments['comment_vlan_port_exists']
except (ValueError, KeyError):
pass
@ -113,16 +119,16 @@ def present(name, bridge, type=None, id=None, remote=None, dst_port=None, intern
interface_type = __salt__['openvswitch.interface_get_type'](name)
if not 0 <= id <= 2**32:
ret['result'] = False
ret['comment'] = comment_gre_invalid_id
ret['comment'] = comments['comment_gre_invalid_id']
elif not __salt__['dig.check_ip'](remote):
ret['result'] = False
ret['comment'] = comment_invalid_ip
ret['comment'] = comments['comment_invalid_ip']
elif interface_options and interface_type and name in port_list:
interface_attroptions = '{key=\"' + str(id) + '\", remote_ip=\"' + str(remote) + '\"}'
try:
if interface_type[0] == 'gre' and interface_options[0] == interface_attroptions:
ret['result'] = True
ret['comment'] = comment_gre_interface_exists
ret['comment'] = comments['comment_gre_interface_exists']
except KeyError:
pass
@ -132,101 +138,101 @@ def present(name, bridge, type=None, id=None, remote=None, dst_port=None, intern
interface_type = __salt__['openvswitch.interface_get_type'](name)
if not 0 <= id <= 2**64:
ret['result'] = False
ret['comment'] = comment_vxlan_invalid_id
ret['comment'] = comments['comment_vxlan_invalid_id']
elif not __salt__['dig.check_ip'](remote):
ret['result'] = False
ret['comment'] = comment_invalid_ip
ret['comment'] = comments['comment_invalid_ip']
elif interface_options and interface_type and name in port_list:
opt_port = 'dst_port=\"' + str(dst_port) + '\", ' if 0 < dst_port <= 65535 else ''
interface_attroptions = '{{{0}key=\"'.format(opt_port) + str(id) + '\", remote_ip=\"' + str(remote) + '\"}'
try:
if interface_type[0] == 'vxlan' and interface_options[0] == interface_attroptions:
ret['result'] = True
ret['comment'] = comment_vxlan_interface_exists
ret['comment'] = comments['comment_vxlan_interface_exists']
except KeyError:
pass
# Dry run, test=true mode
if __opts__['test']:
if bridge_exists:
if type == 'vlan':
if tunnel_type == 'vlan':
_check_vlan()
if not ret['comment']:
ret['result'] = None
ret['comment'] = comment_vlan_created
elif type == 'vxlan':
ret['comment'] = comments['comment_vlan_created']
elif tunnel_type == 'vxlan':
_check_vxlan()
if not ret['comment']:
ret['result'] = None
ret['comment'] = comment_vxlan_created
elif type == 'gre':
ret['comment'] = comments['comment_vxlan_created']
elif tunnel_type == 'gre':
_check_gre()
if not ret['comment']:
ret['result'] = None
ret['comment'] = comment_gre_created
ret['comment'] = comments['comment_gre_created']
else:
if name in port_list:
ret['result'] = True
ret['comment'] = comment_port_exists
ret['comment'] = comments['comment_port_exists']
else:
ret['result'] = None
ret['comment'] = comment_port_created
ret['comment'] = comments['comment_port_created']
else:
ret['result'] = None
ret['comment'] = comment_bridge_notexists
ret['comment'] = comments['comment_bridge_notexists']
return ret
if bridge_exists:
if type == 'vlan':
if tunnel_type == 'vlan':
_check_vlan()
if not ret['comment']:
port_create_vlan = __salt__['openvswitch.port_create_vlan'](bridge, name, id, internal)
if port_create_vlan:
ret['result'] = True
ret['comment'] = comment_vlan_created
ret['changes'] = changes_vlan_created
ret['comment'] = comments['comment_vlan_created']
ret['changes'] = comments['changes_vlan_created']
else:
ret['result'] = False
ret['comment'] = comment_vlan_notcreated
elif type == 'vxlan':
ret['comment'] = comments['comment_vlan_notcreated']
elif tunnel_type == 'vxlan':
_check_vxlan()
if not ret['comment']:
port_create_vxlan = __salt__['openvswitch.port_create_vxlan'](bridge, name, id, remote, dst_port)
if port_create_vxlan:
ret['result'] = True
ret['comment'] = comment_vxlan_created
ret['changes'] = changes_vxlan_created
ret['comment'] = comments['comment_vxlan_created']
ret['changes'] = comments['changes_vxlan_created']
else:
ret['result'] = False
ret['comment'] = comment_vxlan_notcreated
elif type == 'gre':
ret['comment'] = comments['comment_vxlan_notcreated']
elif tunnel_type == 'gre':
_check_gre()
if not ret['comment']:
port_create_gre = __salt__['openvswitch.port_create_gre'](bridge, name, id, remote)
if port_create_gre:
ret['result'] = True
ret['comment'] = comment_gre_created
ret['changes'] = changes_gre_created
ret['comment'] = comments['comment_gre_created']
ret['changes'] = comments['changes_gre_created']
else:
ret['result'] = False
ret['comment'] = comment_gre_notcreated
ret['comment'] = comments['comment_gre_notcreated']
else:
if name in port_list:
ret['result'] = True
ret['comment'] = comment_port_exists
ret['comment'] = comments['comment_port_exists']
else:
port_add = __salt__['openvswitch.port_add'](bridge, name)
if port_add:
ret['result'] = True
ret['comment'] = comment_port_created
ret['changes'] = changes_port_created
ret['comment'] = comments['comment_port_created']
ret['changes'] = comments['changes_port_created']
else:
ret['result'] = False
ret['comment'] = comment_port_notcreated
ret['comment'] = comments['comment_port_notcreated']
else:
ret['result'] = False
ret['comment'] = comment_bridge_notexists
ret['comment'] = comments['comment_bridge_notexists']
return ret
@ -242,7 +248,7 @@ def absent(name, bridge=None):
'''
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
bridge_exists = False
if bridge:
bridge_exists = __salt__['openvswitch.bridge_exists'](bridge)
if bridge_exists:
@ -253,11 +259,12 @@ def absent(name, bridge=None):
port_list = [name]
# Comment and change messages
comment_bridge_notexists = 'Bridge {0} does not exist.'.format(bridge)
comment_port_notexists = 'Port {0} does not exist on bridge {1}.'.format(name, bridge)
comment_port_deleted = 'Port {0} deleted.'.format(name)
comment_port_notdeleted = 'Unable to delete port {0}.'.format(name)
changes_port_deleted = {name: {'old': 'Port named {0} may exist.'.format(name),
comments = {}
comments['comment_bridge_notexists'] = 'Bridge {0} does not exist.'.format(bridge)
comments['comment_port_notexists'] = 'Port {0} does not exist on bridge {1}.'.format(name, bridge)
comments['comment_port_deleted'] = 'Port {0} deleted.'.format(name)
comments['comment_port_notdeleted'] = 'Unable to delete port {0}.'.format(name)
comments['changes_port_deleted'] = {name: {'old': 'Port named {0} may exist.'.format(name),
'new': 'Deleted port {0}.'.format(name),
}
}
@ -266,21 +273,21 @@ def absent(name, bridge=None):
if __opts__['test']:
if bridge and not bridge_exists:
ret['result'] = None
ret['comment'] = comment_bridge_notexists
ret['comment'] = comments['comment_bridge_notexists']
elif name not in port_list:
ret['result'] = True
ret['comment'] = comment_port_notexists
ret['comment'] = comments['comment_port_notexists']
else:
ret['result'] = None
ret['comment'] = comment_port_deleted
ret['comment'] = comments['comment_port_deleted']
return ret
if bridge and not bridge_exists:
ret['result'] = False
ret['comment'] = comment_bridge_notexists
ret['comment'] = comments['comment_bridge_notexists']
elif name not in port_list:
ret['result'] = True
ret['comment'] = comment_port_notexists
ret['comment'] = comments['comment_port_notexists']
else:
if bridge:
port_remove = __salt__['openvswitch.port_remove'](br=bridge, port=name)
@ -289,10 +296,10 @@ def absent(name, bridge=None):
if port_remove:
ret['result'] = True
ret['comment'] = comment_port_deleted
ret['changes'] = changes_port_deleted
ret['comment'] = comments['comment_port_deleted']
ret['changes'] = comments['changes_port_deleted']
else:
ret['result'] = False
ret['comment'] = comment_port_notdeleted
ret['comment'] = comments['comment_port_notdeleted']
return ret

View File

@ -1915,7 +1915,7 @@ def latest(
for pkg in desired_pkgs:
if not avail.get(pkg):
# Package either a) is up-to-date, or b) does not exist
if not cur[pkg]:
if not cur.get(pkg):
# Package does not exist
msg = 'No information found for \'{0}\'.'.format(pkg)
log.error(msg)

View File

@ -662,7 +662,9 @@ def runner(name, **kwargs):
**kwargs)
runner_return = out.get('return')
if 'success' in out and not out['success']:
if isinstance(runner_return, dict) and 'Error' in runner_return:
out['success'] = False
if not out.get('success', True):
ret = {
'name': name,
'result': False,

View File

@ -71,14 +71,10 @@ def timeout(name, delete=0, reject=0):
reject_set.add(id_)
for id_ in remove:
keyapi.delete_key(id_)
if id_ in __context__[ktr]:
del __context__[ktr][id_]
if id_ in __reg__['status']['val']:
del __reg__['status']['val'][id_]
__reg__['status']['val'].pop(id_, None)
__context__[ktr].pop(id_, None)
for id_ in reject_set:
keyapi.reject(id_)
if id_ in __context__[ktr]:
del __context__[ktr][id_]
if id_ in __reg__['status']['val']:
del __reg__['status']['val'][id_]
__reg__['status']['val'].pop(id_, None)
__context__[ktr].pop(id_, None)
return ret

View File

@ -1094,10 +1094,10 @@ def format_call(fun,
continue
extra[key] = copy.deepcopy(value)
# We'll be showing errors to the users until Salt Nitrogen comes out, after
# We'll be showing errors to the users until Salt Oxygen comes out, after
# which, errors will be raised instead.
warn_until(
'Nitrogen',
'Oxygen',
'It\'s time to start raising `SaltInvocationError` instead of '
'returning warnings',
# Let's not show the deprecation warning on the console, there's no
@ -1134,7 +1134,7 @@ def format_call(fun,
'{0}. If you were trying to pass additional data to be used '
'in a template context, please populate \'context\' with '
'\'key: value\' pairs. Your approach will work until Salt '
'Nitrogen is out.{1}'.format(
'Oxygen is out.{1}'.format(
msg,
'' if 'full' not in ret else ' Please update your state files.'
)

View File

@ -31,7 +31,7 @@ class Depends(object):
cause the function to be unloaded (or replaced)
'''
# kind -> Dependency -> list of things that depend on it
dependency_dict = defaultdict(lambda: defaultdict(set))
dependency_dict = defaultdict(lambda: defaultdict(dict))
def __init__(self, *dependencies, **kwargs):
'''
@ -72,11 +72,11 @@ class Depends(object):
frame = inspect.stack()[1][0]
# due to missing *.py files under esky we cannot use inspect.getmodule
# module name is something like salt.loaded.int.modules.test
kind = frame.f_globals['__name__'].rsplit('.', 2)[1]
_, kind, mod_name = frame.f_globals['__name__'].rsplit('.', 2)
fun_name = function.__name__
for dep in self.dependencies:
self.dependency_dict[kind][dep].add(
(frame, function, self.fallback_function)
)
self.dependency_dict[kind][dep][(mod_name, fun_name)] = \
(frame, self.fallback_function)
except Exception as exc:
log.error('Exception encountered when attempting to inspect frame in '
'dependency decorator: {0}'.format(exc))
@ -90,46 +90,44 @@ class Depends(object):
It will modify the "functions" dict and remove/replace modules that
are missing dependencies.
'''
for dependency, dependent_set in six.iteritems(cls.dependency_dict[kind]):
# check if dependency is loaded
for frame, func, fallback_function in dependent_set:
# check if you have the dependency
for dependency, dependent_dict in six.iteritems(cls.dependency_dict[kind]):
for (mod_name, func_name), (frame, fallback_function) in six.iteritems(dependent_dict):
# check if dependency is loaded
if dependency is True:
log.trace(
'Dependency for {0}.{1} exists, not unloading'.format(
frame.f_globals['__name__'].split('.')[-1],
func.__name__,
mod_name,
func_name
)
)
continue
# check if you have the dependency
if dependency in frame.f_globals \
or dependency in frame.f_locals:
log.trace(
'Dependency ({0}) already loaded inside {1}, '
'skipping'.format(
dependency,
frame.f_globals['__name__'].split('.')[-1]
mod_name
)
)
continue
log.trace(
'Unloading {0}.{1} because dependency ({2}) is not '
'imported'.format(
frame.f_globals['__name__'],
func,
mod_name,
func_name,
dependency
)
)
# if not, unload dependent_set
# if not, unload the function
if frame:
try:
func_name = frame.f_globals['__func_alias__'][func.__name__]
func_name = frame.f_globals['__func_alias__'][func_name]
except (AttributeError, KeyError):
func_name = func.__name__
pass
mod_key = '{0}.{1}'.format(frame.f_globals['__name__'].split('.')[-1],
func_name)
mod_key = '{0}.{1}'.format(mod_name, func_name)
# if we don't have this module loaded, skip it!
if mod_key not in functions:

View File

@ -11,6 +11,7 @@
from __future__ import absolute_import
# Import salt libs
import salt.ext.six as six
import salt.utils
from salt.exceptions import SaltException
@ -77,13 +78,21 @@ class BufferedReader(object):
to be read.
'''
if self.__buffered is None:
multiplier = self.__max_in_mem / self.__chunk_size
# Use floor division to force multiplier to an integer
multiplier = self.__max_in_mem // self.__chunk_size
self.__buffered = ""
else:
multiplier = 1
self.__buffered = self.__buffered[self.__chunk_size:]
data = self.__file.read(self.__chunk_size * multiplier)
if six.PY3:
# Data is a byte object in Python 3
# Decode it in order to append to self.__buffered str later
data = self.__file.read(self.__chunk_size * multiplier).decode(
__salt_system_encoding__
)
else:
data = self.__file.read(self.__chunk_size * multiplier)
if not data:
self.__file.close()
@ -92,6 +101,9 @@ class BufferedReader(object):
self.__buffered += data
return self.__buffered
# Alias next to __next__ for Py3 compatibility
__next__ = next
# Support with statements
def __enter__(self):
return self

View File

@ -490,7 +490,8 @@ class PrintOption(Option):
_FILE_TYPES.get(stat.S_IFMT(fstat[stat.ST_MODE]), '?')
)
elif arg == 'mode':
result.append(int(oct(fstat[stat.ST_MODE])[-3:]))
# PY3 compatibility: Use radix value 8 on int type-cast explicitly
result.append(int(oct(fstat[stat.ST_MODE])[-3:], 8))
elif arg == 'mtime':
result.append(fstat[stat.ST_MTIME])
elif arg == 'user':

View File

@ -198,7 +198,7 @@ def query(url,
log_url = sanitize_url(url_full, hide_fields)
log.debug('Requesting URL {0} using {1} method'.format(log_url, method))
if method == 'POST':
if method == 'POST' and log.isEnabledFor(logging.TRACE):
# Make sure no secret fields show up in logs
if isinstance(data, dict):
log_data = data.copy()
@ -537,7 +537,7 @@ def query(url,
log.trace(('Cannot Trace Log Response Text: {0}. This may be due to '
'incompatibilities between requests and logging.').format(exc))
if text_out is not None and os.path.exists(text_out):
if text_out is not None:
with salt.utils.fopen(text_out, 'w') as tof:
tof.write(result_text)

View File

@ -702,8 +702,17 @@ class SignalHandlingMultiprocessingProcess(MultiprocessingProcess):
def default_signals(*signals):
old_signals = {}
for signum in signals:
old_signals[signum] = signal.getsignal(signum)
signal.signal(signum, signal.SIG_DFL)
try:
signal.signal(signum, signal.SIG_DFL)
old_signals[signum] = signal.getsignal(signum)
except ValueError as exc:
# This happens when a netapi module attempts to run a function
# using wheel_async, because the process trying to register signals
# will not be the main PID.
log.trace(
'Failed to register signal for signum %d: %s',
signum, exc
)
# Do whatever is needed with the reset signals
yield

View File

@ -58,7 +58,7 @@ class Reactor(salt.utils.process.SignalHandlingMultiprocessingProcess, salt.stat
react = {}
if glob_ref.startswith('salt://'):
glob_ref = self.minion.functions['cp.cache_file'](glob_ref)
glob_ref = self.minion.functions['cp.cache_file'](glob_ref) or ''
globbed_ref = glob.glob(glob_ref)
if not globbed_ref:
log.error('Can not render SLS {0} for tag {1}. File missing or not found.'.format(glob_ref, tag))

View File

@ -147,7 +147,6 @@ def query(key, keyid, method='GET', params=None, headers=None,
if not data:
data = None
response = None
if method == 'PUT':
if local_file:
data = salt.utils.fopen(local_file, 'r')
@ -165,6 +164,7 @@ def query(key, keyid, method='GET', params=None, headers=None,
data=data,
verify=verify_ssl,
stream=True)
response = result.content
else:
result = requests.request(method,
requesturl,

View File

@ -56,7 +56,7 @@ class InputSanitizer(object):
:param value:
:return:
'''
return re.sub(r'[^a-zA-Z0-9.-]', '', InputSanitizer.trim(value))
return re.sub(r'[^a-zA-Z0-9.-]', '', InputSanitizer.trim(value)).strip('.')
id = hostname

View File

@ -37,6 +37,8 @@ STATE_FUNCTION_RUNNING_RE = re.compile(
INTEGRATION_TEST_DIR = os.path.dirname(
os.path.normpath(os.path.abspath(__file__))
)
if os.name == 'nt':
INTEGRATION_TEST_DIR = INTEGRATION_TEST_DIR.replace('\\', '\\\\')
CODE_DIR = os.path.dirname(os.path.dirname(INTEGRATION_TEST_DIR))
# Import Salt Testing libs

View File

@ -52,6 +52,7 @@ class SysModuleTest(integration.ModuleCase):
'vsphere.get_service_instance_via_proxy',
'vsphere.gets_service_instance_via_proxy',
'vsphere.supports_proxies',
'vsphere.test_vcenter_connection',
'vsphere.wraps',
'yumpkg.expand_repo_def',
'yumpkg5.expand_repo_def',

View File

@ -579,6 +579,28 @@ class FileTest(integration.ModuleCase, integration.SaltReturnAssertsMixIn):
for typ in managed_files:
os.remove(managed_files[typ])
@destructiveTest
@skipIf(salt.utils.is_windows(), 'Windows does not support "mode" kwarg. Skipping.')
def test_managed_check_cmd(self):
'''
Test file.managed passing a basic check_cmd kwarg. See Issue #38111.
'''
ret = self.run_state(
'file.managed',
name='/tmp/sudoers',
user='root',
group='root',
mode=440,
check_cmd='visudo -c -s -f'
)
self.assertSaltTrueReturn(ret)
self.assertInSaltComment('Empty file', ret)
self.assertEqual(ret['file_|-/tmp/sudoers_|-/tmp/sudoers_|-managed']['changes'],
{'new': 'file /tmp/sudoers created', 'mode': '0440'})
# Clean Up File
os.remove('/tmp/sudoers')
def test_directory(self):
'''
file.directory

View File

@ -18,6 +18,7 @@ import random
import tempfile
import shutil
import sys
import hashlib
# Import salt libs
import salt
@ -89,6 +90,12 @@ def parse():
default=False,
action='store_true',
help='Each Minion claims a different version grain')
parser.add_option(
'--rand-machine-id',
dest='rand_machine_id',
default=False,
action='store_true',
help='Each Minion claims a different machine id grain')
parser.add_option(
'-k',
'--keep-modules',
@ -103,6 +110,11 @@ def parse():
action='store_true',
help=('Run the minions with debug output of the swarm going to '
'the terminal'))
parser.add_option(
'--temp-dir',
dest='temp_dir',
default=None,
help='Place temporary files/directories here')
parser.add_option(
'--no-clean',
action='store_true',
@ -142,15 +154,18 @@ class Swarm(object):
self.opts = opts
self.raet_port = 4550
# If given a root_dir, keep the tmp files there as well
if opts['root_dir']:
tmpdir = os.path.join(opts['root_dir'], 'tmp')
# If given a temp_dir, use it for temporary files
if opts['temp_dir']:
self.swarm_root = opts['temp_dir']
else:
tmpdir = opts['root_dir']
self.swarm_root = tempfile.mkdtemp(
prefix='mswarm-root', suffix='.d',
dir=tmpdir)
# If given a root_dir, keep the tmp files there as well
if opts['root_dir']:
tmpdir = os.path.join(opts['root_dir'], 'tmp')
else:
tmpdir = opts['root_dir']
self.swarm_root = tempfile.mkdtemp(
prefix='mswarm-root', suffix='.d',
dir=tmpdir)
if self.opts['transport'] == 'zeromq':
self.pki = self._pki_dir()
@ -158,21 +173,24 @@ class Swarm(object):
self.confs = set()
random.seed(0)
def _pki_dir(self):
'''
Create the shared pki directory
'''
path = os.path.join(self.swarm_root, 'pki')
os.makedirs(path)
if not os.path.exists(path):
os.makedirs(path)
print('Creating shared pki keys for the swarm on: {0}'.format(path))
subprocess.call(
'salt-key -c {0} --gen-keys minion --gen-keys-dir {0} '
'--log-file {1} --user {2}'.format(
path, os.path.join(path, 'keys.log'), self.opts['user'],
), shell=True
)
print('Keys generated')
print('Creating shared pki keys for the swarm on: {0}'.format(path))
subprocess.call(
'salt-key -c {0} --gen-keys minion --gen-keys-dir {0} '
'--log-file {1} --user {2}'.format(
path, os.path.join(path, 'keys.log'), self.opts['user'],
), shell=True
)
print('Keys generated')
return path
def start(self):
@ -274,7 +292,8 @@ class MinionSwarm(Swarm):
)
dpath = os.path.join(self.swarm_root, minion_id)
os.makedirs(dpath)
if not os.path.exists(dpath):
os.makedirs(dpath)
data.update({
'id': minion_id,
@ -287,11 +306,12 @@ class MinionSwarm(Swarm):
if self.opts['transport'] == 'zeromq':
minion_pkidir = os.path.join(dpath, 'pki')
os.makedirs(minion_pkidir)
minion_pem = os.path.join(self.pki, 'minion.pem')
minion_pub = os.path.join(self.pki, 'minion.pub')
shutil.copy(minion_pem, minion_pkidir)
shutil.copy(minion_pub, minion_pkidir)
if not os.path.exists(minion_pkidir):
os.makedirs(minion_pkidir)
minion_pem = os.path.join(self.pki, 'minion.pem')
minion_pub = os.path.join(self.pki, 'minion.pub')
shutil.copy(minion_pem, minion_pkidir)
shutil.copy(minion_pub, minion_pkidir)
data['pki_dir'] = minion_pkidir
elif self.opts['transport'] == 'raet':
data['transport'] = 'raet'
@ -318,6 +338,8 @@ class MinionSwarm(Swarm):
data['grains']['os'] = random.choice(OSES)
if self.opts['rand_ver']:
data['grains']['saltversion'] = random.choice(VERS)
if self.opts['rand_machine_id']:
data['grains']['machine_id'] = hashlib.md5(minion_id).hexdigest()
with open(path, 'w+') as fp_:
yaml.dump(data, fp_)

View File

@ -352,9 +352,20 @@ class SaltTestsuiteParser(SaltCoverageTestingParser):
# Turn on expensive tests execution
os.environ['EXPENSIVE_TESTS'] = 'True'
import salt.utils
if salt.utils.is_windows():
import salt.utils.win_functions
current_user = salt.utils.win_functions.get_current_user()
if current_user == 'SYSTEM':
is_admin = True
else:
is_admin = salt.utils.win_functions.is_admin(current_user)
else:
is_admin = os.geteuid() == 0
if self.options.coverage and any((
self.options.name,
os.geteuid() != 0,
is_admin,
not self.options.run_destructive)) \
and self._check_enabled_suites(include_unit=True):
self.error(

View File

@ -54,8 +54,9 @@ class DocTestCase(TestCase):
if 'man' in key \
or key.endswith('doc_test.py') \
or key.endswith('doc/conf.py') \
or key.endswith('/conventions/documentation.rst') \
or key.endswith('doc/topics/releases/2016.11.2.rst') \
or key.endswith('/conventions/documentation.rst'):
or key.endswith('doc/topics/releases/2016.3.5.rst'):
continue
# Set up test return dict

View File

@ -37,6 +37,7 @@ import os
try:
import boto3
from botocore.exceptions import ClientError
from botocore import __version__ as found_botocore_version
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
@ -51,17 +52,19 @@ if 'SuSE' in platform.dist():
# which was added in boto 2.8.0
# https://github.com/boto/boto/commit/33ac26b416fbb48a60602542b4ce15dcc7029f12
required_boto3_version = '1.2.1'
required_botocore_version = '1.5.2'
region = 'us-east-1'
access_key = 'GKTADJGHEIQSXMKKRBJ08H'
secret_key = 'askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs'
conn_parameters = {'region': region, 'key': access_key, 'keyid': secret_key, 'profile': {}}
conn_parameters = {'region': region, 'key': access_key,
'keyid': secret_key, 'profile': {}}
error_message = 'An error occurred (101) when calling the {0} operation: Test-defined error'
error_content = {
'Error': {
'Code': 101,
'Message': "Test-defined error"
}
'Error': {
'Code': 101,
'Message': "Test-defined error"
}
}
function_ret = dict(FunctionName='testfunction',
Runtime='python2.7',
@ -74,7 +77,8 @@ function_ret = dict(FunctionName='testfunction',
CodeSize=199,
FunctionArn='arn:lambda:us-east-1:1234:Something',
LastModified='yes',
VpcConfig=None)
VpcConfig=None,
Environment=None)
alias_ret = dict(AliasArn='arn:lambda:us-east-1:1234:Something',
Name='testalias',
FunctionVersion='3',
@ -108,14 +112,17 @@ def _has_required_boto():
return False
elif LooseVersion(boto3.__version__) < LooseVersion(required_boto3_version):
return False
elif LooseVersion(found_botocore_version) < LooseVersion(required_botocore_version):
return False
else:
return True
@skipIf(HAS_BOTO is False, 'The boto module must be installed.')
@skipIf(_has_required_boto() is False, 'The boto3 module must be greater than'
' or equal to version {0}'
.format(required_boto3_version))
@skipIf(_has_required_boto() is False,
('The boto3 module must be greater than or equal to version {0}, '
'and botocore must be greater than or equal to {1}'.format(
required_boto3_version, required_botocore_version)))
@skipIf(NO_MOCK, NO_MOCK_REASON)
class BotoLambdaTestCaseBase(TestCase):
conn = None
@ -127,7 +134,8 @@ class BotoLambdaTestCaseBase(TestCase):
# connections keep getting cached from prior tests, can't find the
# correct context object to clear it. So randomize the cache key, to prevent any
# cache hits
conn_parameters['key'] = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(50))
conn_parameters['key'] = ''.join(random.choice(
string.ascii_lowercase + string.digits) for _ in range(50))
self.patcher = patch('boto3.session.Session')
self.addCleanup(self.patcher.stop)
@ -139,8 +147,10 @@ class BotoLambdaTestCaseBase(TestCase):
class TempZipFile(object):
def __enter__(self):
with NamedTemporaryFile(suffix='.zip', prefix='salt_test_', delete=False) as tmp:
with NamedTemporaryFile(
suffix='.zip', prefix='salt_test_', delete=False) as tmp:
to_write = '###\n'
if six.PY3:
to_write = salt.utils.to_bytes(to_write)
@ -166,7 +176,8 @@ class BotoLambdaFunctionTestCase(BotoLambdaTestCaseBase, BotoLambdaTestCaseMixin
Tests checking lambda function existence when the lambda function already exists
'''
self.conn.list_functions.return_value = {'Functions': [function_ret]}
func_exists_result = boto_lambda.function_exists(FunctionName=function_ret['FunctionName'], **conn_parameters)
func_exists_result = boto_lambda.function_exists(
FunctionName=function_ret['FunctionName'], **conn_parameters)
self.assertTrue(func_exists_result['exists'])
@ -175,7 +186,8 @@ class BotoLambdaFunctionTestCase(BotoLambdaTestCaseBase, BotoLambdaTestCaseMixin
Tests checking lambda function existence when the lambda function does not exist
'''
self.conn.list_functions.return_value = {'Functions': [function_ret]}
func_exists_result = boto_lambda.function_exists(FunctionName='myfunc', **conn_parameters)
func_exists_result = boto_lambda.function_exists(
FunctionName='myfunc', **conn_parameters)
self.assertFalse(func_exists_result['exists'])
@ -183,10 +195,13 @@ class BotoLambdaFunctionTestCase(BotoLambdaTestCaseBase, BotoLambdaTestCaseMixin
'''
Tests checking lambda function existence when boto returns an error
'''
self.conn.list_functions.side_effect = ClientError(error_content, 'list_functions')
func_exists_result = boto_lambda.function_exists(FunctionName='myfunc', **conn_parameters)
self.conn.list_functions.side_effect = ClientError(
error_content, 'list_functions')
func_exists_result = boto_lambda.function_exists(
FunctionName='myfunc', **conn_parameters)
self.assertEqual(func_exists_result.get('error', {}).get('message'), error_message.format('list_functions'))
self.assertEqual(func_exists_result.get('error', {}).get(
'message'), error_message.format('list_functions'))
def test_that_when_creating_a_function_from_zipfile_succeeds_the_create_function_method_returns_true(self):
'''
@ -195,12 +210,13 @@ class BotoLambdaFunctionTestCase(BotoLambdaTestCaseBase, BotoLambdaTestCaseMixin
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
with TempZipFile() as zipfile:
self.conn.create_function.return_value = function_ret
lambda_creation_result = boto_lambda.create_function(FunctionName='testfunction',
Runtime='python2.7',
Role='myrole',
Handler='file.method',
ZipFile=zipfile,
**conn_parameters)
lambda_creation_result = boto_lambda.create_function(
FunctionName='testfunction',
Runtime='python2.7',
Role='myrole',
Handler='file.method',
ZipFile=zipfile,
**conn_parameters)
self.assertTrue(lambda_creation_result['created'])
@ -210,13 +226,14 @@ class BotoLambdaFunctionTestCase(BotoLambdaTestCaseBase, BotoLambdaTestCaseMixin
'''
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
self.conn.create_function.return_value = function_ret
lambda_creation_result = boto_lambda.create_function(FunctionName='testfunction',
Runtime='python2.7',
Role='myrole',
Handler='file.method',
S3Bucket='bucket',
S3Key='key',
**conn_parameters)
lambda_creation_result = boto_lambda.create_function(
FunctionName='testfunction',
Runtime='python2.7',
Role='myrole',
Handler='file.method',
S3Bucket='bucket',
S3Key='key',
**conn_parameters)
self.assertTrue(lambda_creation_result['created'])
@ -226,12 +243,13 @@ class BotoLambdaFunctionTestCase(BotoLambdaTestCaseBase, BotoLambdaTestCaseMixin
'''
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
with self.assertRaisesRegexp(SaltInvocationError,
'Either ZipFile must be specified, or S3Bucket and S3Key must be provided.'):
lambda_creation_result = boto_lambda.create_function(FunctionName='testfunction',
Runtime='python2.7',
Role='myrole',
Handler='file.method',
**conn_parameters)
'Either ZipFile must be specified, or S3Bucket and S3Key must be provided.'):
lambda_creation_result = boto_lambda.create_function(
FunctionName='testfunction',
Runtime='python2.7',
Role='myrole',
Handler='file.method',
**conn_parameters)
def test_that_when_creating_a_function_with_zipfile_and_s3_raises_a_salt_invocation_error(self):
'''
@ -239,31 +257,35 @@ class BotoLambdaFunctionTestCase(BotoLambdaTestCaseBase, BotoLambdaTestCaseMixin
'''
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
with self.assertRaisesRegexp(SaltInvocationError,
'Either ZipFile must be specified, or S3Bucket and S3Key must be provided.'):
'Either ZipFile must be specified, or S3Bucket and S3Key must be provided.'):
with TempZipFile() as zipfile:
lambda_creation_result = boto_lambda.create_function(FunctionName='testfunction',
Runtime='python2.7',
Role='myrole',
Handler='file.method',
ZipFile=zipfile,
S3Bucket='bucket',
S3Key='key',
**conn_parameters)
lambda_creation_result = boto_lambda.create_function(
FunctionName='testfunction',
Runtime='python2.7',
Role='myrole',
Handler='file.method',
ZipFile=zipfile,
S3Bucket='bucket',
S3Key='key',
**conn_parameters)
def test_that_when_creating_a_function_fails_the_create_function_method_returns_error(self):
'''
tests False function not created.
'''
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
self.conn.create_function.side_effect = ClientError(error_content, 'create_function')
self.conn.create_function.side_effect = ClientError(
error_content, 'create_function')
with TempZipFile() as zipfile:
lambda_creation_result = boto_lambda.create_function(FunctionName='testfunction',
Runtime='python2.7',
Role='myrole',
Handler='file.method',
ZipFile=zipfile,
**conn_parameters)
self.assertEqual(lambda_creation_result.get('error', {}).get('message'), error_message.format('create_function'))
lambda_creation_result = boto_lambda.create_function(
FunctionName='testfunction',
Runtime='python2.7',
Role='myrole',
Handler='file.method',
ZipFile=zipfile,
**conn_parameters)
self.assertEqual(lambda_creation_result.get('error', {}).get(
'message'), error_message.format('create_function'))
def test_that_when_deleting_a_function_succeeds_the_delete_function_method_returns_true(self):
'''
@ -271,8 +293,8 @@ class BotoLambdaFunctionTestCase(BotoLambdaTestCaseBase, BotoLambdaTestCaseMixin
'''
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
result = boto_lambda.delete_function(FunctionName='testfunction',
Qualifier=1,
**conn_parameters)
Qualifier=1,
**conn_parameters)
self.assertTrue(result['deleted'])
@ -281,9 +303,10 @@ class BotoLambdaFunctionTestCase(BotoLambdaTestCaseBase, BotoLambdaTestCaseMixin
tests False function not deleted.
'''
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
self.conn.delete_function.side_effect = ClientError(error_content, 'delete_function')
self.conn.delete_function.side_effect = ClientError(
error_content, 'delete_function')
result = boto_lambda.delete_function(FunctionName='testfunction',
**conn_parameters)
**conn_parameters)
self.assertFalse(result['deleted'])
def test_that_when_describing_function_it_returns_the_dict_of_properties_returns_true(self):
@ -293,7 +316,8 @@ class BotoLambdaFunctionTestCase(BotoLambdaTestCaseBase, BotoLambdaTestCaseMixin
self.conn.list_functions.return_value = {'Functions': [function_ret]}
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
result = boto_lambda.describe_function(FunctionName=function_ret['FunctionName'], **conn_parameters)
result = boto_lambda.describe_function(
FunctionName=function_ret['FunctionName'], **conn_parameters)
self.assertEqual(result, {'function': function_ret})
@ -303,7 +327,8 @@ class BotoLambdaFunctionTestCase(BotoLambdaTestCaseBase, BotoLambdaTestCaseMixin
'''
self.conn.list_functions.return_value = {'Functions': []}
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
result = boto_lambda.describe_function(FunctionName='testfunction', **conn_parameters)
result = boto_lambda.describe_function(
FunctionName='testfunction', **conn_parameters)
self.assertFalse(result['function'])
@ -311,8 +336,10 @@ class BotoLambdaFunctionTestCase(BotoLambdaTestCaseBase, BotoLambdaTestCaseMixin
'''
Tests describing parameters failure
'''
self.conn.list_functions.side_effect = ClientError(error_content, 'list_functions')
result = boto_lambda.describe_function(FunctionName='testfunction', **conn_parameters)
self.conn.list_functions.side_effect = ClientError(
error_content, 'list_functions')
result = boto_lambda.describe_function(
FunctionName='testfunction', **conn_parameters)
self.assertTrue('error' in result)
def test_that_when_updating_a_function_succeeds_the_update_function_method_returns_true(self):
@ -321,7 +348,8 @@ class BotoLambdaFunctionTestCase(BotoLambdaTestCaseBase, BotoLambdaTestCaseMixin
'''
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
self.conn.update_function_config.return_value = function_ret
result = boto_lambda.update_function_config(FunctionName=function_ret['FunctionName'], Role='myrole', **conn_parameters)
result = boto_lambda.update_function_config(
FunctionName=function_ret['FunctionName'], Role='myrole', **conn_parameters)
self.assertTrue(result['updated'])
@ -330,11 +358,13 @@ class BotoLambdaFunctionTestCase(BotoLambdaTestCaseBase, BotoLambdaTestCaseMixin
tests False function not updated.
'''
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
self.conn.update_function_configuration.side_effect = ClientError(error_content, 'update_function')
self.conn.update_function_configuration.side_effect = ClientError(
error_content, 'update_function')
result = boto_lambda.update_function_config(FunctionName='testfunction',
Role='myrole',
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'), error_message.format('update_function'))
Role='myrole',
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'),
error_message.format('update_function'))
def test_that_when_updating_function_code_from_zipfile_succeeds_the_update_function_method_returns_true(self):
'''
@ -343,7 +373,9 @@ class BotoLambdaFunctionTestCase(BotoLambdaTestCaseBase, BotoLambdaTestCaseMixin
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
with TempZipFile() as zipfile:
self.conn.update_function_code.return_value = function_ret
result = boto_lambda.update_function_code(FunctionName=function_ret['FunctionName'], ZipFile=zipfile, **conn_parameters)
result = boto_lambda.update_function_code(
FunctionName=function_ret['FunctionName'],
ZipFile=zipfile, **conn_parameters)
self.assertTrue(result['updated'])
@ -353,10 +385,11 @@ class BotoLambdaFunctionTestCase(BotoLambdaTestCaseBase, BotoLambdaTestCaseMixin
'''
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
self.conn.update_function_code.return_value = function_ret
result = boto_lambda.update_function_code(FunctionName='testfunction',
S3Bucket='bucket',
S3Key='key',
**conn_parameters)
result = boto_lambda.update_function_code(
FunctionName='testfunction',
S3Bucket='bucket',
S3Key='key',
**conn_parameters)
self.assertTrue(result['updated'])
@ -365,31 +398,39 @@ class BotoLambdaFunctionTestCase(BotoLambdaTestCaseBase, BotoLambdaTestCaseMixin
tests Creating a function without code
'''
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
with self.assertRaisesRegexp(SaltInvocationError,
'Either ZipFile must be specified, or S3Bucket and S3Key must be provided.'):
result = boto_lambda.update_function_code(FunctionName='testfunction',
**conn_parameters)
with self.assertRaisesRegexp(
SaltInvocationError,
('Either ZipFile must be specified, or S3Bucket '
'and S3Key must be provided.')):
result = boto_lambda.update_function_code(
FunctionName='testfunction',
**conn_parameters)
def test_that_when_updating_function_code_fails_the_update_function_method_returns_error(self):
'''
tests False function not updated.
'''
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
self.conn.update_function_code.side_effect = ClientError(error_content, 'update_function_code')
result = boto_lambda.update_function_code(FunctionName='testfunction',
S3Bucket='bucket',
S3Key='key',
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'), error_message.format('update_function_code'))
self.conn.update_function_code.side_effect = ClientError(
error_content, 'update_function_code')
result = boto_lambda.update_function_code(
FunctionName='testfunction',
S3Bucket='bucket',
S3Key='key',
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'),
error_message.format('update_function_code'))
def test_that_when_listing_function_versions_succeeds_the_list_function_versions_method_returns_true(self):
'''
tests True function versions listed.
'''
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
self.conn.list_versions_by_function.return_value = {'Versions': [function_ret]}
result = boto_lambda.list_function_versions(FunctionName='testfunction',
**conn_parameters)
self.conn.list_versions_by_function.return_value = {
'Versions': [function_ret]}
result = boto_lambda.list_function_versions(
FunctionName='testfunction',
**conn_parameters)
self.assertTrue(result['Versions'])
@ -399,8 +440,9 @@ class BotoLambdaFunctionTestCase(BotoLambdaTestCaseBase, BotoLambdaTestCaseMixin
'''
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
self.conn.list_versions_by_function.return_value = {'Versions': []}
result = boto_lambda.list_function_versions(FunctionName='testfunction',
**conn_parameters)
result = boto_lambda.list_function_versions(
FunctionName='testfunction',
**conn_parameters)
self.assertFalse(result['Versions'])
def test_that_when_listing_function_versions_fails_the_list_function_versions_method_returns_error(self):
@ -408,10 +450,13 @@ class BotoLambdaFunctionTestCase(BotoLambdaTestCaseBase, BotoLambdaTestCaseMixin
tests False function versions error.
'''
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
self.conn.list_versions_by_function.side_effect = ClientError(error_content, 'list_versions_by_function')
result = boto_lambda.list_function_versions(FunctionName='testfunction',
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'), error_message.format('list_versions_by_function'))
self.conn.list_versions_by_function.side_effect = ClientError(
error_content, 'list_versions_by_function')
result = boto_lambda.list_function_versions(
FunctionName='testfunction',
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'),
error_message.format('list_versions_by_function'))
@skipIf(HAS_BOTO is False, 'The boto module must be installed.')
@ -423,15 +468,17 @@ class BotoLambdaAliasTestCase(BotoLambdaTestCaseBase, BotoLambdaTestCaseMixin):
'''
TestCase for salt.modules.boto_lambda module aliases
'''
def test_that_when_creating_an_alias_succeeds_the_create_alias_method_returns_true(self):
'''
tests True alias created.
'''
self.conn.create_alias.return_value = alias_ret
result = boto_lambda.create_alias(FunctionName='testfunction',
Name=alias_ret['Name'],
FunctionVersion=alias_ret['FunctionVersion'],
**conn_parameters)
Name=alias_ret['Name'],
FunctionVersion=alias_ret[
'FunctionVersion'],
**conn_parameters)
self.assertTrue(result['created'])
@ -439,12 +486,15 @@ class BotoLambdaAliasTestCase(BotoLambdaTestCaseBase, BotoLambdaTestCaseMixin):
'''
tests False alias not created.
'''
self.conn.create_alias.side_effect = ClientError(error_content, 'create_alias')
self.conn.create_alias.side_effect = ClientError(
error_content, 'create_alias')
result = boto_lambda.create_alias(FunctionName='testfunction',
Name=alias_ret['Name'],
FunctionVersion=alias_ret['FunctionVersion'],
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'), error_message.format('create_alias'))
Name=alias_ret['Name'],
FunctionVersion=alias_ret[
'FunctionVersion'],
**conn_parameters)
self.assertEqual(result.get('error', {}).get(
'message'), error_message.format('create_alias'))
def test_that_when_deleting_an_alias_succeeds_the_delete_alias_method_returns_true(self):
'''
@ -460,7 +510,8 @@ class BotoLambdaAliasTestCase(BotoLambdaTestCaseBase, BotoLambdaTestCaseMixin):
'''
tests False alias not deleted.
'''
self.conn.delete_alias.side_effect = ClientError(error_content, 'delete_alias')
self.conn.delete_alias.side_effect = ClientError(
error_content, 'delete_alias')
result = boto_lambda.delete_alias(FunctionName='testfunction',
Name=alias_ret['Name'],
**conn_parameters)
@ -491,12 +542,14 @@ class BotoLambdaAliasTestCase(BotoLambdaTestCaseBase, BotoLambdaTestCaseMixin):
'''
Tests checking lambda alias existence when boto returns an error
'''
self.conn.list_aliases.side_effect = ClientError(error_content, 'list_aliases')
self.conn.list_aliases.side_effect = ClientError(
error_content, 'list_aliases')
result = boto_lambda.alias_exists(FunctionName='testfunction',
Name=alias_ret['Name'],
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'), error_message.format('list_aliases'))
self.assertEqual(result.get('error', {}).get(
'message'), error_message.format('list_aliases'))
def test_that_when_describing_alias_it_returns_the_dict_of_properties_returns_true(self):
'''
@ -525,7 +578,8 @@ class BotoLambdaAliasTestCase(BotoLambdaTestCaseBase, BotoLambdaTestCaseMixin):
'''
Tests describing parameters failure
'''
self.conn.list_aliases.side_effect = ClientError(error_content, 'list_aliases')
self.conn.list_aliases.side_effect = ClientError(
error_content, 'list_aliases')
result = boto_lambda.describe_alias(FunctionName='testfunction',
Name=alias_ret['Name'],
**conn_parameters)
@ -547,11 +601,13 @@ class BotoLambdaAliasTestCase(BotoLambdaTestCaseBase, BotoLambdaTestCaseMixin):
'''
tests False alias not updated.
'''
self.conn.update_alias.side_effect = ClientError(error_content, 'update_alias')
self.conn.update_alias.side_effect = ClientError(
error_content, 'update_alias')
result = boto_lambda.update_alias(FunctionName='testfunction',
Name=alias_ret['Name'],
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'), error_message.format('update_alias'))
self.assertEqual(result.get('error', {}).get(
'message'), error_message.format('update_alias'))
@skipIf(HAS_BOTO is False, 'The boto module must be installed.')
@ -563,16 +619,17 @@ class BotoLambdaEventSourceMappingTestCase(BotoLambdaTestCaseBase, BotoLambdaTes
'''
TestCase for salt.modules.boto_lambda module mappings
'''
def test_that_when_creating_a_mapping_succeeds_the_create_event_source_mapping_method_returns_true(self):
'''
tests True mapping created.
'''
self.conn.create_event_source_mapping.return_value = event_source_mapping_ret
result = boto_lambda.create_event_source_mapping(
EventSourceArn=event_source_mapping_ret['EventSourceArn'],
FunctionName=event_source_mapping_ret['FunctionArn'],
StartingPosition='LATEST',
**conn_parameters)
EventSourceArn=event_source_mapping_ret['EventSourceArn'],
FunctionName=event_source_mapping_ret['FunctionArn'],
StartingPosition='LATEST',
**conn_parameters)
self.assertTrue(result['created'])
@ -580,12 +637,13 @@ class BotoLambdaEventSourceMappingTestCase(BotoLambdaTestCaseBase, BotoLambdaTes
'''
tests False mapping not created.
'''
self.conn.create_event_source_mapping.side_effect = ClientError(error_content, 'create_event_source_mapping')
self.conn.create_event_source_mapping.side_effect = ClientError(
error_content, 'create_event_source_mapping')
result = boto_lambda.create_event_source_mapping(
EventSourceArn=event_source_mapping_ret['EventSourceArn'],
FunctionName=event_source_mapping_ret['FunctionArn'],
StartingPosition='LATEST',
**conn_parameters)
EventSourceArn=event_source_mapping_ret['EventSourceArn'],
FunctionName=event_source_mapping_ret['FunctionArn'],
StartingPosition='LATEST',
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'),
error_message.format('create_event_source_mapping'))
@ -593,11 +651,12 @@ class BotoLambdaEventSourceMappingTestCase(BotoLambdaTestCaseBase, BotoLambdaTes
'''
tests True mapping ids listed.
'''
self.conn.list_event_source_mappings.return_value = {'EventSourceMappings': [event_source_mapping_ret]}
self.conn.list_event_source_mappings.return_value = {
'EventSourceMappings': [event_source_mapping_ret]}
result = boto_lambda.get_event_source_mapping_ids(
EventSourceArn=event_source_mapping_ret['EventSourceArn'],
FunctionName=event_source_mapping_ret['FunctionArn'],
**conn_parameters)
EventSourceArn=event_source_mapping_ret['EventSourceArn'],
FunctionName=event_source_mapping_ret['FunctionArn'],
**conn_parameters)
self.assertTrue(result)
@ -605,31 +664,34 @@ class BotoLambdaEventSourceMappingTestCase(BotoLambdaTestCaseBase, BotoLambdaTes
'''
tests False no mapping ids listed.
'''
self.conn.list_event_source_mappings.return_value = {'EventSourceMappings': []}
self.conn.list_event_source_mappings.return_value = {
'EventSourceMappings': []}
result = boto_lambda.get_event_source_mapping_ids(
EventSourceArn=event_source_mapping_ret['EventSourceArn'],
FunctionName=event_source_mapping_ret['FunctionArn'],
**conn_parameters)
EventSourceArn=event_source_mapping_ret['EventSourceArn'],
FunctionName=event_source_mapping_ret['FunctionArn'],
**conn_parameters)
self.assertFalse(result)
def test_that_when_listing_event_source_mapping_ids_fails_the_get_event_source_mapping_ids_method_returns_error(self):
'''
tests False mapping ids error.
'''
self.conn.list_event_source_mappings.side_effect = ClientError(error_content, 'list_event_source_mappings')
self.conn.list_event_source_mappings.side_effect = ClientError(
error_content, 'list_event_source_mappings')
result = boto_lambda.get_event_source_mapping_ids(
EventSourceArn=event_source_mapping_ret['EventSourceArn'],
FunctionName=event_source_mapping_ret['FunctionArn'],
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'), error_message.format('list_event_source_mappings'))
EventSourceArn=event_source_mapping_ret['EventSourceArn'],
FunctionName=event_source_mapping_ret['FunctionArn'],
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'),
error_message.format('list_event_source_mappings'))
def test_that_when_deleting_an_event_source_mapping_by_UUID_succeeds_the_delete_event_source_mapping_method_returns_true(self):
'''
tests True mapping deleted.
'''
result = boto_lambda.delete_event_source_mapping(
UUID=event_source_mapping_ret['UUID'],
**conn_parameters)
UUID=event_source_mapping_ret['UUID'],
**conn_parameters)
self.assertTrue(result['deleted'])
@skipIf(True, 'This appears to leak memory and crash the unit test suite')
@ -637,28 +699,32 @@ class BotoLambdaEventSourceMappingTestCase(BotoLambdaTestCaseBase, BotoLambdaTes
'''
tests True mapping deleted.
'''
self.conn.list_event_source_mappings.return_value = {'EventSourceMappings': [event_source_mapping_ret]}
self.conn.list_event_source_mappings.return_value = {
'EventSourceMappings': [event_source_mapping_ret]}
result = boto_lambda.delete_event_source_mapping(
EventSourceArn=event_source_mapping_ret['EventSourceArn'],
FunctionName=event_source_mapping_ret['FunctionArn'],
**conn_parameters)
EventSourceArn=event_source_mapping_ret['EventSourceArn'],
FunctionName=event_source_mapping_ret['FunctionArn'],
**conn_parameters)
self.assertTrue(result['deleted'])
def test_that_when_deleting_an_event_source_mapping_without_identifier_the_delete_event_source_mapping_method_raises_saltinvocationexception(self):
'''
tests Deleting a mapping without identifier
'''
with self.assertRaisesRegexp(SaltInvocationError,
'Either UUID must be specified, or EventSourceArn and FunctionName must be provided.'):
with self.assertRaisesRegexp(
SaltInvocationError,
('Either UUID must be specified, or EventSourceArn '
'and FunctionName must be provided.')):
result = boto_lambda.delete_event_source_mapping(**conn_parameters)
def test_that_when_deleting_an_event_source_mapping_fails_the_delete_event_source_mapping_method_returns_false(self):
'''
tests False mapping not deleted.
'''
self.conn.delete_event_source_mapping.side_effect = ClientError(error_content, 'delete_event_source_mapping')
self.conn.delete_event_source_mapping.side_effect = ClientError(
error_content, 'delete_event_source_mapping')
result = boto_lambda.delete_event_source_mapping(UUID=event_source_mapping_ret['UUID'],
**conn_parameters)
**conn_parameters)
self.assertFalse(result['deleted'])
def test_that_when_checking_if_an_event_source_mapping_exists_and_the_event_source_mapping_exists_the_event_source_mapping_exists_method_returns_true(self):
@ -668,8 +734,8 @@ class BotoLambdaEventSourceMappingTestCase(BotoLambdaTestCaseBase, BotoLambdaTes
'''
self.conn.get_event_source_mapping.return_value = event_source_mapping_ret
result = boto_lambda.event_source_mapping_exists(
UUID=event_source_mapping_ret['UUID'],
**conn_parameters)
UUID=event_source_mapping_ret['UUID'],
**conn_parameters)
self.assertTrue(result['exists'])
def test_that_when_checking_if_an_event_source_mapping_exists_and_the_event_source_mapping_does_not_exist_the_event_source_mapping_exists_method_returns_false(self):
@ -679,19 +745,21 @@ class BotoLambdaEventSourceMappingTestCase(BotoLambdaTestCaseBase, BotoLambdaTes
'''
self.conn.get_event_source_mapping.return_value = None
result = boto_lambda.event_source_mapping_exists(
UUID='other_UUID',
**conn_parameters)
UUID='other_UUID',
**conn_parameters)
self.assertFalse(result['exists'])
def test_that_when_checking_if_an_event_source_mapping_exists_and_boto3_returns_an_error_the_event_source_mapping_exists_method_returns_error(self):
'''
Tests checking lambda event_source_mapping existence when boto returns an error
'''
self.conn.get_event_source_mapping.side_effect = ClientError(error_content, 'list_event_source_mappings')
self.conn.get_event_source_mapping.side_effect = ClientError(
error_content, 'list_event_source_mappings')
result = boto_lambda.event_source_mapping_exists(
UUID=event_source_mapping_ret['UUID'],
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'), error_message.format('list_event_source_mappings'))
UUID=event_source_mapping_ret['UUID'],
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'),
error_message.format('list_event_source_mappings'))
def test_that_when_describing_event_source_mapping_it_returns_the_dict_of_properties_returns_true(self):
'''
@ -699,9 +767,10 @@ class BotoLambdaEventSourceMappingTestCase(BotoLambdaTestCaseBase, BotoLambdaTes
'''
self.conn.get_event_source_mapping.return_value = event_source_mapping_ret
result = boto_lambda.describe_event_source_mapping(
UUID=event_source_mapping_ret['UUID'],
**conn_parameters)
self.assertEqual(result, {'event_source_mapping': event_source_mapping_ret})
UUID=event_source_mapping_ret['UUID'],
**conn_parameters)
self.assertEqual(
result, {'event_source_mapping': event_source_mapping_ret})
def test_that_when_describing_event_source_mapping_it_returns_the_dict_of_properties_returns_false(self):
'''
@ -709,18 +778,19 @@ class BotoLambdaEventSourceMappingTestCase(BotoLambdaTestCaseBase, BotoLambdaTes
'''
self.conn.get_event_source_mapping.return_value = None
result = boto_lambda.describe_event_source_mapping(
UUID=event_source_mapping_ret['UUID'],
**conn_parameters)
UUID=event_source_mapping_ret['UUID'],
**conn_parameters)
self.assertFalse(result['event_source_mapping'])
def test_that_when_describing_event_source_mapping_on_client_error_it_returns_error(self):
'''
Tests describing parameters failure
'''
self.conn.get_event_source_mapping.side_effect = ClientError(error_content, 'get_event_source_mapping')
self.conn.get_event_source_mapping.side_effect = ClientError(
error_content, 'get_event_source_mapping')
result = boto_lambda.describe_event_source_mapping(
UUID=event_source_mapping_ret['UUID'],
**conn_parameters)
UUID=event_source_mapping_ret['UUID'],
**conn_parameters)
self.assertTrue('error' in result)
def test_that_when_updating_an_event_source_mapping_succeeds_the_update_event_source_mapping_method_returns_true(self):
@ -729,9 +799,9 @@ class BotoLambdaEventSourceMappingTestCase(BotoLambdaTestCaseBase, BotoLambdaTes
'''
self.conn.update_event_source_mapping.return_value = event_source_mapping_ret
result = boto_lambda.update_event_source_mapping(
UUID=event_source_mapping_ret['UUID'],
FunctionName=event_source_mapping_ret['FunctionArn'],
**conn_parameters)
UUID=event_source_mapping_ret['UUID'],
FunctionName=event_source_mapping_ret['FunctionArn'],
**conn_parameters)
self.assertTrue(result['updated'])
@ -739,12 +809,14 @@ class BotoLambdaEventSourceMappingTestCase(BotoLambdaTestCaseBase, BotoLambdaTes
'''
tests False event_source_mapping not updated.
'''
self.conn.update_event_source_mapping.side_effect = ClientError(error_content, 'update_event_source_mapping')
self.conn.update_event_source_mapping.side_effect = ClientError(
error_content, 'update_event_source_mapping')
result = boto_lambda.update_event_source_mapping(
UUID=event_source_mapping_ret['UUID'],
FunctionName=event_source_mapping_ret['FunctionArn'],
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'), error_message.format('update_event_source_mapping'))
UUID=event_source_mapping_ret['UUID'],
FunctionName=event_source_mapping_ret['FunctionArn'],
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'),
error_message.format('update_event_source_mapping'))
if __name__ == '__main__':

View File

@ -517,8 +517,9 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin):
'''
Tests describing parameters via vpc id if vpc exist
'''
# With moto 0.4.25 is_default is set to True. 0.4.24 and older, is_default is False
if _get_moto_version() >= LooseVersion('0.4.25'):
# With moto 0.4.25 through 0.4.30, is_default is set to True.
# 0.4.24 and older and 0.4.31 and newer, is_default is False
if LooseVersion('0.4.25') <= _get_moto_version() < LooseVersion('0.4.31'):
is_default = True
else:
is_default = False

View File

@ -117,7 +117,7 @@ class LinuxLVMTestCase(TestCase):
'Volume Group Size (kB)': 'L',
'Volume Group Status': 'C'}})
def test__lvdisplay(self):
def test_lvdisplay(self):
'''
Return information about the logical volume(s)
'''

View File

@ -19,6 +19,8 @@ ensure_in_syspath('../../')
# Import Salt Libs
from salt.modules import timezone
import salt.ext.six as six
import salt.utils
# Globals
timezone.__salt__ = {}
@ -76,7 +78,10 @@ class TimezoneTestCase(TestCase):
def create_tempfile_with_contents(self, contents):
temp = NamedTemporaryFile(delete=False)
temp.write(contents)
if six.PY3:
temp.write(salt.utils.to_bytes(contents))
else:
temp.write(contents)
temp.close()
self.tempfiles.append(temp)
return temp

View File

@ -32,6 +32,7 @@ from unit.modules.boto_lambda_test import BotoLambdaTestCaseMixin, TempZipFile
try:
import boto3
from botocore.exceptions import ClientError
from botocore import __version__ as found_botocore_version
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
@ -42,17 +43,19 @@ except ImportError:
# which was added in boto 2.8.0
# https://github.com/boto/boto/commit/33ac26b416fbb48a60602542b4ce15dcc7029f12
required_boto3_version = '1.2.1'
required_botocore_version = '1.5.2'
region = 'us-east-1'
access_key = 'GKTADJGHEIQSXMKKRBJ08H'
secret_key = 'askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs'
conn_parameters = {'region': region, 'key': access_key, 'keyid': secret_key, 'profile': {}}
conn_parameters = {'region': region, 'key': access_key,
'keyid': secret_key, 'profile': {}}
error_message = 'An error occurred (101) when calling the {0} operation: Test-defined error'
error_content = {
'Error': {
'Code': 101,
'Message': "Test-defined error"
}
'Error': {
'Code': 101,
'Message': "Test-defined error"
}
}
function_ret = dict(FunctionName='testfunction',
Runtime='python2.7',
@ -84,8 +87,10 @@ opts = salt.config.DEFAULT_MINION_OPTS
context = {}
utils = salt.loader.utils(opts, whitelist=['boto3'], context=context)
serializers = salt.loader.serializers(opts)
funcs = salt.loader.minion_mods(opts, context=context, utils=utils, whitelist=['boto_lambda'])
salt_states = salt.loader.states(opts=opts, functions=funcs, utils=utils, whitelist=['boto_lambda'], serializers=serializers)
funcs = salt.loader.minion_mods(
opts, context=context, utils=utils, whitelist=['boto_lambda'])
salt_states = salt.loader.states(opts=opts, functions=funcs, utils=utils, whitelist=[
'boto_lambda'], serializers=serializers)
def _has_required_boto():
@ -97,14 +102,17 @@ def _has_required_boto():
return False
elif LooseVersion(boto3.__version__) < LooseVersion(required_boto3_version):
return False
elif LooseVersion(found_botocore_version) < LooseVersion(required_botocore_version):
return False
else:
return True
@skipIf(HAS_BOTO is False, 'The boto module must be installed.')
@skipIf(_has_required_boto() is False, 'The boto3 module must be greater than'
' or equal to version {0}'
.format(required_boto3_version))
@skipIf(_has_required_boto() is False,
('The boto3 module must be greater than or equal to version {0}, '
'and botocore must be greater than or equal to {1}'.format(
required_boto3_version, required_botocore_version)))
@skipIf(NO_MOCK, NO_MOCK_REASON)
class BotoLambdaStateTestCaseBase(TestCase):
conn = None
@ -115,7 +123,8 @@ class BotoLambdaStateTestCaseBase(TestCase):
# connections keep getting cached from prior tests, can't find the
# correct context object to clear it. So randomize the cache key, to prevent any
# cache hits
conn_parameters['key'] = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(50))
conn_parameters['key'] = ''.join(random.choice(
string.ascii_lowercase + string.digits) for _ in range(50))
self.patcher = patch('boto3.session.Session')
self.addCleanup(self.patcher.stop)
@ -135,17 +144,18 @@ class BotoLambdaFunctionTestCase(BotoLambdaStateTestCaseBase, BotoLambdaTestCase
'''
Tests present on a function that does not exist.
'''
self.conn.list_functions.side_effect = [{'Functions': []}, {'Functions': [function_ret]}]
self.conn.list_functions.side_effect = [
{'Functions': []}, {'Functions': [function_ret]}]
self.conn.create_function.return_value = function_ret
with patch.dict(funcs, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
with TempZipFile() as zipfile:
result = salt_states['boto_lambda.function_present'](
'function present',
FunctionName=function_ret['FunctionName'],
Runtime=function_ret['Runtime'],
Role=function_ret['Role'],
Handler=function_ret['Handler'],
ZipFile=zipfile)
'function present',
FunctionName=function_ret['FunctionName'],
Runtime=function_ret['Runtime'],
Role=function_ret['Role'],
Handler=function_ret['Handler'],
ZipFile=zipfile)
self.assertTrue(result['result'])
self.assertEqual(result['changes']['new']['function']['FunctionName'],
@ -164,20 +174,22 @@ class BotoLambdaFunctionTestCase(BotoLambdaStateTestCaseBase, BotoLambdaTestCase
encoded = sha.encode()
encoded.strip.return_value = function_ret['CodeSha256']
result = salt_states['boto_lambda.function_present'](
'function present',
FunctionName=function_ret['FunctionName'],
Runtime=function_ret['Runtime'],
Role=function_ret['Role'],
Handler=function_ret['Handler'],
ZipFile=zipfile,
Description=function_ret['Description'],
Timeout=function_ret['Timeout'])
'function present',
FunctionName=function_ret['FunctionName'],
Runtime=function_ret['Runtime'],
Role=function_ret['Role'],
Handler=function_ret['Handler'],
ZipFile=zipfile,
Description=function_ret['Description'],
Timeout=function_ret['Timeout'])
self.assertTrue(result['result'])
self.assertEqual(result['changes'], {})
def test_present_with_failure(self):
self.conn.list_functions.side_effect = [{'Functions': []}, {'Functions': [function_ret]}]
self.conn.create_function.side_effect = ClientError(error_content, 'create_function')
self.conn.list_functions.side_effect = [
{'Functions': []}, {'Functions': [function_ret]}]
self.conn.create_function.side_effect = ClientError(
error_content, 'create_function')
with patch.dict(funcs, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
with TempZipFile() as zipfile:
with patch('hashlib.sha256') as sha256:
@ -187,14 +199,14 @@ class BotoLambdaFunctionTestCase(BotoLambdaStateTestCaseBase, BotoLambdaTestCase
encoded = sha.encode()
encoded.strip.return_value = function_ret['CodeSha256']
result = salt_states['boto_lambda.function_present'](
'function present',
FunctionName=function_ret['FunctionName'],
Runtime=function_ret['Runtime'],
Role=function_ret['Role'],
Handler=function_ret['Handler'],
ZipFile=zipfile,
Description=function_ret['Description'],
Timeout=function_ret['Timeout'])
'function present',
FunctionName=function_ret['FunctionName'],
Runtime=function_ret['Runtime'],
Role=function_ret['Role'],
Handler=function_ret['Handler'],
ZipFile=zipfile,
Description=function_ret['Description'],
Timeout=function_ret['Timeout'])
self.assertFalse(result['result'])
self.assertTrue('An error occurred' in result['comment'])
@ -209,14 +221,17 @@ class BotoLambdaFunctionTestCase(BotoLambdaStateTestCaseBase, BotoLambdaTestCase
def test_absent_when_function_exists(self):
self.conn.list_functions.return_value = {'Functions': [function_ret]}
result = salt_states['boto_lambda.function_absent']('test', function_ret['FunctionName'])
result = salt_states['boto_lambda.function_absent'](
'test', function_ret['FunctionName'])
self.assertTrue(result['result'])
self.assertEqual(result['changes']['new']['function'], None)
def test_absent_with_failure(self):
self.conn.list_functions.return_value = {'Functions': [function_ret]}
self.conn.delete_function.side_effect = ClientError(error_content, 'delete_function')
result = salt_states['boto_lambda.function_absent']('test', function_ret['FunctionName'])
self.conn.delete_function.side_effect = ClientError(
error_content, 'delete_function')
result = salt_states['boto_lambda.function_absent'](
'test', function_ret['FunctionName'])
self.assertFalse(result['result'])
self.assertTrue('An error occurred' in result['comment'])
@ -224,17 +239,18 @@ class BotoLambdaFunctionTestCase(BotoLambdaStateTestCaseBase, BotoLambdaTestCase
self.conn.list_functions.return_value = {'Functions': [function_ret]}
self.conn.update_function_code.return_value = function_ret
self.conn.get_policy.return_value = {
"Policy": json.dumps(
{"Version": "2012-10-17",
"Statement": [
{"Condition":
{"ArnLike": {"AWS:SourceArn": "arn:aws:events:us-east-1:9999999999:rule/fooo"}},
"Action": "lambda:InvokeFunction",
"Resource": "arn:aws:lambda:us-east-1:999999999999:function:testfunction",
"Effect": "Allow",
"Principal": {"Service": "events.amazonaws.com"},
"Sid": "AWSEvents_foo-bar999999999999"}],
"Id": "default"})
"Policy": json.dumps(
{"Version": "2012-10-17",
"Statement": [
{"Condition":
{"ArnLike": {
"AWS:SourceArn": "arn:aws:events:us-east-1:9999999999:rule/fooo"}},
"Action": "lambda:InvokeFunction",
"Resource": "arn:aws:lambda:us-east-1:999999999999:function:testfunction",
"Effect": "Allow",
"Principal": {"Service": "events.amazonaws.com"},
"Sid": "AWSEvents_foo-bar999999999999"}],
"Id": "default"})
}
with patch.dict(funcs, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
@ -246,31 +262,32 @@ class BotoLambdaFunctionTestCase(BotoLambdaStateTestCaseBase, BotoLambdaTestCase
encoded = sha.encode()
encoded.strip.return_value = function_ret['CodeSha256']
result = salt_states['boto_lambda.function_present'](
'function present',
FunctionName=function_ret['FunctionName'],
Runtime=function_ret['Runtime'],
Role=function_ret['Role'],
Handler=function_ret['Handler'],
ZipFile=zipfile,
Description=function_ret['Description'],
Timeout=function_ret['Timeout'])
'function present',
FunctionName=function_ret['FunctionName'],
Runtime=function_ret['Runtime'],
Role=function_ret['Role'],
Handler=function_ret['Handler'],
ZipFile=zipfile,
Description=function_ret['Description'],
Timeout=function_ret['Timeout'])
self.assertTrue(result['result'])
self.assertEqual(result['changes'], {
'old': {
'Permissions': {
'AWSEvents_foo-bar999999999999':
{'Action': 'lambda:InvokeFunction',
'Principal': 'events.amazonaws.com',
'SourceArn': 'arn:aws:events:us-east-1:9999999999:rule/fooo'}}},
'new': {
'Permissions': {
'AWSEvents_foo-bar999999999999': {}}}})
'old': {
'Permissions': {
'AWSEvents_foo-bar999999999999':
{'Action': 'lambda:InvokeFunction',
'Principal': 'events.amazonaws.com',
'SourceArn': 'arn:aws:events:us-east-1:9999999999:rule/fooo'}}},
'new': {
'Permissions': {
'AWSEvents_foo-bar999999999999': {}}}})
@skipIf(HAS_BOTO is False, 'The boto module must be installed.')
@skipIf(_has_required_boto() is False, 'The boto3 module must be greater than'
' or equal to version {0}'
.format(required_boto3_version))
@skipIf(_has_required_boto() is False,
('The boto3 module must be greater than or equal to version {0}, '
'and botocore must be greater than or equal to {1}'.format(
required_boto3_version, required_botocore_version)))
@skipIf(NO_MOCK, NO_MOCK_REASON)
class BotoLambdaAliasTestCase(BotoLambdaStateTestCaseBase, BotoLambdaTestCaseMixin):
'''
@ -281,13 +298,14 @@ class BotoLambdaAliasTestCase(BotoLambdaStateTestCaseBase, BotoLambdaTestCaseMix
'''
Tests present on a alias that does not exist.
'''
self.conn.list_aliases.side_effect = [{'Aliases': []}, {'Aliases': [alias_ret]}]
self.conn.list_aliases.side_effect = [
{'Aliases': []}, {'Aliases': [alias_ret]}]
self.conn.create_alias.return_value = alias_ret
result = salt_states['boto_lambda.alias_present'](
'alias present',
FunctionName='testfunc',
Name=alias_ret['Name'],
FunctionVersion=alias_ret['FunctionVersion'])
'alias present',
FunctionName='testfunc',
Name=alias_ret['Name'],
FunctionVersion=alias_ret['FunctionVersion'])
self.assertTrue(result['result'])
self.assertEqual(result['changes']['new']['alias']['Name'],
@ -297,22 +315,24 @@ class BotoLambdaAliasTestCase(BotoLambdaStateTestCaseBase, BotoLambdaTestCaseMix
self.conn.list_aliases.return_value = {'Aliases': [alias_ret]}
self.conn.create_alias.return_value = alias_ret
result = salt_states['boto_lambda.alias_present'](
'alias present',
FunctionName='testfunc',
Name=alias_ret['Name'],
FunctionVersion=alias_ret['FunctionVersion'],
Description=alias_ret['Description'])
'alias present',
FunctionName='testfunc',
Name=alias_ret['Name'],
FunctionVersion=alias_ret['FunctionVersion'],
Description=alias_ret['Description'])
self.assertTrue(result['result'])
self.assertEqual(result['changes'], {})
def test_present_with_failure(self):
self.conn.list_aliases.side_effect = [{'Aliases': []}, {'Aliases': [alias_ret]}]
self.conn.create_alias.side_effect = ClientError(error_content, 'create_alias')
self.conn.list_aliases.side_effect = [
{'Aliases': []}, {'Aliases': [alias_ret]}]
self.conn.create_alias.side_effect = ClientError(
error_content, 'create_alias')
result = salt_states['boto_lambda.alias_present'](
'alias present',
FunctionName='testfunc',
Name=alias_ret['Name'],
FunctionVersion=alias_ret['FunctionVersion'])
'alias present',
FunctionName='testfunc',
Name=alias_ret['Name'],
FunctionVersion=alias_ret['FunctionVersion'])
self.assertFalse(result['result'])
self.assertTrue('An error occurred' in result['comment'])
@ -322,36 +342,38 @@ class BotoLambdaAliasTestCase(BotoLambdaStateTestCaseBase, BotoLambdaTestCaseMix
'''
self.conn.list_aliases.return_value = {'Aliases': [alias_ret]}
result = salt_states['boto_lambda.alias_absent'](
'alias absent',
FunctionName='testfunc',
Name='myalias')
'alias absent',
FunctionName='testfunc',
Name='myalias')
self.assertTrue(result['result'])
self.assertEqual(result['changes'], {})
def test_absent_when_alias_exists(self):
self.conn.list_aliases.return_value = {'Aliases': [alias_ret]}
result = salt_states['boto_lambda.alias_absent'](
'alias absent',
FunctionName='testfunc',
Name=alias_ret['Name'])
'alias absent',
FunctionName='testfunc',
Name=alias_ret['Name'])
self.assertTrue(result['result'])
self.assertEqual(result['changes']['new']['alias'], None)
def test_absent_with_failure(self):
self.conn.list_aliases.return_value = {'Aliases': [alias_ret]}
self.conn.delete_alias.side_effect = ClientError(error_content, 'delete_alias')
self.conn.delete_alias.side_effect = ClientError(
error_content, 'delete_alias')
result = salt_states['boto_lambda.alias_absent'](
'alias absent',
FunctionName='testfunc',
Name=alias_ret['Name'])
'alias absent',
FunctionName='testfunc',
Name=alias_ret['Name'])
self.assertFalse(result['result'])
self.assertTrue('An error occurred' in result['comment'])
@skipIf(HAS_BOTO is False, 'The boto module must be installed.')
@skipIf(_has_required_boto() is False, 'The boto3 module must be greater than'
' or equal to version {0}'
.format(required_boto3_version))
@skipIf(_has_required_boto() is False,
('The boto3 module must be greater than or equal to version {0}, '
'and botocore must be greater than or equal to {1}'.format(
required_boto3_version, required_botocore_version)))
@skipIf(NO_MOCK, NO_MOCK_REASON)
class BotoLambdaEventSourceMappingTestCase(BotoLambdaStateTestCaseBase, BotoLambdaTestCaseMixin):
'''
@ -362,42 +384,49 @@ class BotoLambdaEventSourceMappingTestCase(BotoLambdaStateTestCaseBase, BotoLamb
'''
Tests present on a event_source_mapping that does not exist.
'''
self.conn.list_event_source_mappings.side_effect = [{'EventSourceMappings': []}, {'EventSourceMappings': [event_source_mapping_ret]}]
self.conn.list_event_source_mappings.side_effect = [
{'EventSourceMappings': []}, {'EventSourceMappings': [event_source_mapping_ret]}]
self.conn.get_event_source_mapping.return_value = event_source_mapping_ret
self.conn.create_event_source_mapping.return_value = event_source_mapping_ret
result = salt_states['boto_lambda.event_source_mapping_present'](
'event source mapping present',
EventSourceArn=event_source_mapping_ret['EventSourceArn'],
FunctionName='myfunc',
StartingPosition='LATEST')
'event source mapping present',
EventSourceArn=event_source_mapping_ret[
'EventSourceArn'],
FunctionName='myfunc',
StartingPosition='LATEST')
self.assertTrue(result['result'])
self.assertEqual(result['changes']['new']['event_source_mapping']['UUID'],
event_source_mapping_ret['UUID'])
def test_present_when_event_source_mapping_exists(self):
self.conn.list_event_source_mappings.return_value = {'EventSourceMappings': [event_source_mapping_ret]}
self.conn.list_event_source_mappings.return_value = {
'EventSourceMappings': [event_source_mapping_ret]}
self.conn.get_event_source_mapping.return_value = event_source_mapping_ret
self.conn.create_event_source_mapping.return_value = event_source_mapping_ret
result = salt_states['boto_lambda.event_source_mapping_present'](
'event source mapping present',
EventSourceArn=event_source_mapping_ret['EventSourceArn'],
FunctionName=event_source_mapping_ret['FunctionArn'],
StartingPosition='LATEST',
BatchSize=event_source_mapping_ret['BatchSize'])
'event source mapping present',
EventSourceArn=event_source_mapping_ret[
'EventSourceArn'],
FunctionName=event_source_mapping_ret['FunctionArn'],
StartingPosition='LATEST',
BatchSize=event_source_mapping_ret['BatchSize'])
self.assertTrue(result['result'])
self.assertEqual(result['changes'], {})
def test_present_with_failure(self):
self.conn.list_event_source_mappings.side_effect = [{'EventSourceMappings': []}, {'EventSourceMappings': [event_source_mapping_ret]}]
self.conn.list_event_source_mappings.side_effect = [
{'EventSourceMappings': []}, {'EventSourceMappings': [event_source_mapping_ret]}]
self.conn.get_event_source_mapping.return_value = event_source_mapping_ret
self.conn.create_event_source_mapping.side_effect = ClientError(error_content, 'create_event_source_mapping')
self.conn.create_event_source_mapping.side_effect = ClientError(
error_content, 'create_event_source_mapping')
result = salt_states['boto_lambda.event_source_mapping_present'](
'event source mapping present',
EventSourceArn=event_source_mapping_ret['EventSourceArn'],
FunctionName=event_source_mapping_ret['FunctionArn'],
StartingPosition='LATEST',
BatchSize=event_source_mapping_ret['BatchSize'])
'event source mapping present',
EventSourceArn=event_source_mapping_ret[
'EventSourceArn'],
FunctionName=event_source_mapping_ret['FunctionArn'],
StartingPosition='LATEST',
BatchSize=event_source_mapping_ret['BatchSize'])
self.assertFalse(result['result'])
self.assertTrue('An error occurred' in result['comment'])
@ -405,31 +434,39 @@ class BotoLambdaEventSourceMappingTestCase(BotoLambdaStateTestCaseBase, BotoLamb
'''
Tests absent on a event_source_mapping that does not exist.
'''
self.conn.list_event_source_mappings.return_value = {'EventSourceMappings': []}
self.conn.list_event_source_mappings.return_value = {
'EventSourceMappings': []}
result = salt_states['boto_lambda.event_source_mapping_absent'](
'event source mapping absent',
EventSourceArn=event_source_mapping_ret['EventSourceArn'],
FunctionName='myfunc')
'event source mapping absent',
EventSourceArn=event_source_mapping_ret[
'EventSourceArn'],
FunctionName='myfunc')
self.assertTrue(result['result'])
self.assertEqual(result['changes'], {})
def test_absent_when_event_source_mapping_exists(self):
self.conn.list_event_source_mappings.return_value = {'EventSourceMappings': [event_source_mapping_ret]}
self.conn.list_event_source_mappings.return_value = {
'EventSourceMappings': [event_source_mapping_ret]}
self.conn.get_event_source_mapping.return_value = event_source_mapping_ret
result = salt_states['boto_lambda.event_source_mapping_absent'](
'event source mapping absent',
EventSourceArn=event_source_mapping_ret['EventSourceArn'],
FunctionName='myfunc')
'event source mapping absent',
EventSourceArn=event_source_mapping_ret[
'EventSourceArn'],
FunctionName='myfunc')
self.assertTrue(result['result'])
self.assertEqual(result['changes']['new']['event_source_mapping'], None)
self.assertEqual(result['changes']['new'][
'event_source_mapping'], None)
def test_absent_with_failure(self):
self.conn.list_event_source_mappings.return_value = {'EventSourceMappings': [event_source_mapping_ret]}
self.conn.list_event_source_mappings.return_value = {
'EventSourceMappings': [event_source_mapping_ret]}
self.conn.get_event_source_mapping.return_value = event_source_mapping_ret
self.conn.delete_event_source_mapping.side_effect = ClientError(error_content, 'delete_event_source_mapping')
self.conn.delete_event_source_mapping.side_effect = ClientError(
error_content, 'delete_event_source_mapping')
result = salt_states['boto_lambda.event_source_mapping_absent'](
'event source mapping absent',
EventSourceArn=event_source_mapping_ret['EventSourceArn'],
FunctionName='myfunc')
'event source mapping absent',
EventSourceArn=event_source_mapping_ret[
'EventSourceArn'],
FunctionName='myfunc')
self.assertFalse(result['result'])
self.assertTrue('An error occurred' in result['comment'])

View File

@ -0,0 +1,94 @@
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import skipIf, TestCase
from salttesting.mock import (
MagicMock,
NO_MOCK,
NO_MOCK_REASON,
patch)
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt Libs
from salt.states import openvswitch_port
openvswitch_port.__salt__ = {}
openvswitch_port.__opts__ = {'test': False}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class OpenvswitchPortTestCase(TestCase):
'''
Test cases for salt.states.openvswitch_port
'''
# 'present' function tests: 1
def test_present(self):
'''
Test to verify that the named port exists on bridge, eventually creates it.
'''
name = 'salt'
bridge = 'br-salt'
ret = {'name': name,
'result': None,
'comment': '',
'changes': {}}
mock = MagicMock(return_value=True)
mock_l = MagicMock(return_value=['salt'])
mock_n = MagicMock(return_value=[])
with patch.dict(openvswitch_port.__salt__, {'openvswitch.bridge_exists': mock,
'openvswitch.port_list': mock_l
}):
comt = 'Port salt already exists.'
ret.update({'comment': comt, 'result': True})
self.assertDictEqual(openvswitch_port.present(name, bridge), ret)
with patch.dict(openvswitch_port.__salt__, {'openvswitch.bridge_exists': mock,
'openvswitch.port_list': mock_n,
'openvswitch.port_add': mock
}):
comt = 'Port salt created on bridge br-salt.'
ret.update({'comment': comt, 'result': True, 'changes':
{'salt':
{'new': 'Created port salt on bridge br-salt.',
'old': 'No port named salt present.',
},
}
})
self.assertDictEqual(openvswitch_port.present(name, bridge), ret)
with patch.dict(openvswitch_port.__salt__, {'openvswitch.bridge_exists': mock,
'openvswitch.port_list': mock_n,
'openvswitch.port_add': mock,
'openvswitch.interface_get_options': mock_n,
'openvswitch.interface_get_type': MagicMock(return_value=''),
'openvswitch.port_create_gre': mock,
'dig.check_ip': mock,
}):
comt = 'Port salt created on bridge br-salt.'
self.maxDiff = None
ret.update({'result': True,
'comment': 'Created GRE tunnel interface salt with remote ip 10.0.0.1 and key 1 on bridge br-salt.',
'changes':
{'salt':
{
'new': 'Created GRE tunnel interface salt with remote ip 10.0.0.1 and key 1 on bridge br-salt.',
'old': 'No GRE tunnel interface salt with remote ip 10.0.0.1 and key 1 on bridge br-salt present.',
},
}
})
self.assertDictEqual(openvswitch_port.present(name, bridge, tunnel_type="gre", id=1, remote="10.0.0.1"), ret)
if __name__ == '__main__':
from integration import run_tests
run_tests(OpenvswitchPortTestCase, needs_daemon=False)

View File

@ -117,6 +117,7 @@ class ClearReqTestCases(BaseZMQReqCase, ReqChannelMixin):
raise tornado.gen.Return((payload, {'fun': 'send_clear'}))
@skipIf(True, 'Skipping flaky test until Jenkins is moved to C7.')
@skipIf(ON_SUSE, 'Skipping until https://github.com/saltstack/salt/issues/32902 gets fixed')
class AESReqTestCases(BaseZMQReqCase, ReqChannelMixin):
def setUp(self):
@ -142,6 +143,9 @@ class AESReqTestCases(BaseZMQReqCase, ReqChannelMixin):
'''
Test a variety of bad requests, make sure that we get some sort of error
'''
# TODO: This test should be re-enabled when Jenkins moves to C7.
# Once the version of salt-testing is increased to something newer than the September
# release of salt-testing, the @flaky decorator should be applied to this test.
msgs = ['', [], tuple()]
for msg in msgs:
with self.assertRaises(salt.exceptions.AuthenticationError):

View File

@ -15,6 +15,7 @@ ensure_in_syspath('../../')
# Import salt libs
import integration
import salt.ext.six as six
import salt.utils
import salt.utils.find
@ -122,11 +123,19 @@ class TestFind(TestCase):
min_size, max_size = salt.utils.find._parse_size('+1m')
self.assertEqual(min_size, 1048576)
self.assertEqual(max_size, sys.maxint)
# sys.maxint has been removed in Python3. Use maxsize instead.
if six.PY3:
self.assertEqual(max_size, sys.maxsize)
else:
self.assertEqual(max_size, sys.maxint)
min_size, max_size = salt.utils.find._parse_size('+1M')
self.assertEqual(min_size, 1048576)
self.assertEqual(max_size, sys.maxint)
# sys.maxint has been removed in Python3. Use maxsize instead.
if six.PY3:
self.assertEqual(max_size, sys.maxsize)
else:
self.assertEqual(max_size, sys.maxint)
def test_option_requires(self):
option = salt.utils.find.Option()

View File

@ -0,0 +1,57 @@
# -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import
from salt.ext.six import text_type as text
# Import Salt Libs
from salt.utils.sanitizers import clean
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import NO_MOCK, NO_MOCK_REASON
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
@skipIf(NO_MOCK, NO_MOCK_REASON)
class SanitizersTestCase(TestCase):
'''
TestCase for sanitizers
'''
def test_sanitized_trim(self):
'''
Test sanitized input for trimming
'''
value = u' sample '
response = clean.trim(value)
assert response == 'sample'
assert type(response) == text
def test_sanitized_filename(self):
'''
Test sanitized input for filename
'''
value = '/absolute/path/to/the/file.txt'
response = clean.filename(value)
assert response == 'file.txt'
value = '../relative/path/to/the/file.txt'
response = clean.filename(value)
assert response == 'file.txt'
def test_sanitized_hostname(self):
'''
Test sanitized input for hostname (id)
'''
value = ' ../ ../some/dubious/hostname '
response = clean.hostname(value)
assert response == 'somedubioushostname'
test_sanitized_id = test_sanitized_hostname
if __name__ == '__main__':
from integration import run_tests
run_tests(SanitizersTestCase, needs_daemon=False)