Merge branch 'ini_manage-dry-run' of https://github.com/slivik/salt into ini_manage-dry-run

This commit is contained in:
slivik 2018-06-08 10:45:42 +02:00
commit d837cba3a3
216 changed files with 12615 additions and 2362 deletions

4
.github/stale.yml vendored
View File

@ -1,8 +1,8 @@
# Probot Stale configuration file
# Number of days of inactivity before an issue becomes stale
# 710 is approximately 1 year and 11 months
daysUntilStale: 710
# 690 is approximately 1 year and 11 months
daysUntilStale: 690
# Number of days of inactivity before a stale issue is closed
daysUntilClose: 7

2
.gitignore vendored
View File

@ -11,6 +11,8 @@ MANIFEST
*.wpr
*.wpu
*.DS_Store
.pytest_cache
Pipfile.lock
# virtualenv
# - ignores directories of a virtualenv when you create it right on

View File

@ -170,6 +170,7 @@ additional-builtins=__opts__,
__proxy__,
__serializers__,
__reg__,
__executors__,
__events__
# List of strings which can identify a callback function by name. A callback

View File

@ -267,6 +267,7 @@ additional-builtins=__opts__,
__proxy__,
__serializers__,
__reg__,
__executors__,
__events__

View File

@ -2,8 +2,8 @@
source 'https://rubygems.org'
gem 'test-kitchen', :git => 'https://github.com/gtmanfred/test-kitchen.git'
gem 'kitchen-salt', :git => 'https://github.com/saltstack/kitchen-salt.git'
gem 'test-kitchen', '~>1.21'
gem 'kitchen-salt', '~>0.2'
gem 'kitchen-sync'
gem 'git'
@ -20,7 +20,7 @@ group :windows do
gem 'vagrant-wrapper'
gem 'kitchen-vagrant'
gem 'winrm', '~>2.0'
gem 'winrm-fs', :git => 'https://github.com/gtmanfred/winrm-fs.git'
gem 'winrm-fs', :git => 'https://github.com/WinRb/winrm-fs.git'
end
group :ec2 do

40
Pipfile Normal file
View File

@ -0,0 +1,40 @@
[[source]]
url = "https://pypi.org/simple"
verify_ssl = true
name = "pypi"
[packages]
Jinja2 = "*"
msgpack-python = ">0.3,!=0.5.5"
PyYAML = "*"
MarkupSafe = "*"
requests = ">=1.0.0"
tornado = ">=4.2.1,<5.0"
pycrypto = ">=2.6.1"
pyzmq = ">=2.2.0"
[dev-packages]
mock = ">=2.0.0"
apache-libcloud = ">=0.14.0"
boto = ">=2.32.1"
boto3 = ">=1.2.1"
moto = ">=0.3.6"
SaltPyLint = ">=v2017.3.6"
pytest = ">=3.5.0"
[packages.futures]
# Required by Tornado to handle threads stuff.
version = ">=2.0"
markers = "python_version < '3.0'"
[dev-packages.pytest-salt]
git = "git://github.com/saltstack/pytest-salt.git"
ref = "master"
[dev-packages.httpretty]
# httpretty Needs to be here for now even though it's a dependency of boto.
# A pip install on a fresh system will decide to target httpretty 0.8.10 to
# satisfy other requirements, and httpretty 0.8.10 has bugs in setup.py that
# prevent it from being successfully installed (at least on Python 3.4).
version = "*"
markers = "python_version >= '3.4'"

View File

@ -546,6 +546,10 @@
# targeted with the normal -N argument to salt-ssh.
#ssh_list_nodegroups: {}
# salt-ssh has the ability to update the flat roster file if a minion is not
# found in the roster. Set this to True to enable it.
#ssh_update_roster: False
##### Master Module Management #####
##########################################
# Manage how master side modules are loaded.

View File

@ -341757,7 +341757,6 @@ netacl_example:
Or:
.INDENT 7.0
.INDENT 3.5
.sp
.nf
.ft C
netacl_example:

View File

@ -112,6 +112,10 @@ Authentication Options
Specify the SSH private key file to be used for authentication.
.. option:: --priv-passwd=SSH_PRIV_PASSWD
Specify the SSH private key file's passphrase if need be.
.. option:: -i, --ignore-host-keys
By default ssh host keys are honored and connections will ask for

View File

@ -1141,6 +1141,19 @@ The ssh password to log in with.
ssh_passwd: ''
.. conf_master:: ssh_priv_passwd
``ssh_priv_passwd``
--------------
Default: ``''``
Passphrase for ssh private key file.
.. code-block:: yaml
ssh_priv_passwd: ''
.. conf_master:: ssh_port
``ssh_port``

View File

@ -2795,7 +2795,7 @@ The level of messages to send to the console. See also :conf_log:`log_level`.
``log_level_logfile``
---------------------
Default: ``info``
Default: ``warning``
The level of messages to send to the log file. See also
:conf_log:`log_level_logfile`. When it is not set explicitly
@ -3275,3 +3275,31 @@ URL of the repository:
Replace ``<commit_id>`` with the SHA1 hash of a commit ID. Specifying a commit
ID is useful in that it allows one to revert back to a previous version in the
event that an error is introduced in the latest revision of the repo.
``ssh_merge_pillar``
--------------------
.. versionadded:: 2018.3.2
Default: ``True``
Merges the compiled pillar data with the pillar data already available globally.
This is useful when using ``salt-ssh`` or ``salt-call --local`` and overriding the pillar
data in a state file:
.. code-block:: yaml
apply_showpillar:
module.run:
- name: state.apply
- mods:
- showpillar
- kwargs:
pillar:
test: "foo bar"
If set to ``True`` the ``showpillar`` state will have access to the
global pillar data.
If set to ``False`` only the overriding pillar data will be available
to the ``showpillar`` state.

View File

@ -11,5 +11,6 @@ executors modules
:template: autosummary.rst.tmpl
direct_call
docker
splay
sudo

View File

@ -0,0 +1,6 @@
salt.executors.docker module
============================
.. automodule:: salt.executors.docker
:members:

View File

@ -83,3 +83,7 @@ option set by commandline or API ``data.get('executor_opts',
{}).get('splaytime')`` should be used. So if an option is safe and must be
accessible by user executor should check it in both places, but if an option is
unsafe it should be read from the only config ignoring the passed request data.
There is also a function named ``all_missing_func`` which the name of the
``func`` is passed, which can be used to verify if the command should still be
run, even if it is not loaded in minion_mods.

View File

@ -1,6 +1,6 @@
=====================
==================
salt.modules.swarm
=====================
==================
.. automodule:: salt.modules.swarm
:memebers:
:members:

View File

@ -861,6 +861,17 @@ Reload
after a state finishes. ``reload_pillar`` and ``reload_grains`` can also be set.
See :ref:`Reloading Modules <reloading-modules>`.
.. code-block:: yaml
grains_refresh:
module.run:
- name: saltutil.refresh_grains
- reload_grains: true
grains_read:
module.run:
- name: grains.items
.. _unless-requisite:
Unless

View File

@ -40,8 +40,9 @@ Beacons are typically enabled by placing a ``beacons:`` top level block in
beacons:
inotify:
/etc/important_file: {}
/opt: {}
- files:
/etc/important_file: {}
/opt: {}
The beacon system, like many others in Salt, can also be configured via the
minion pillar, grains, or local config file.
@ -50,6 +51,8 @@ minion pillar, grains, or local config file.
The `inotify` beacon only works on OSes that have `inotify` kernel support.
Currently this excludes FreeBSD, macOS, and Windows.
All beacon configuration is done using list based configuration.
Beacon Monitoring Interval
--------------------------
@ -61,21 +64,23 @@ and 10-second intervals:
beacons:
inotify:
/etc/important_file: {}
/opt: {}
interval: 5
disable_during_state_run: True
- files:
/etc/important_file: {}
/opt: {}
- interval: 5
- disable_during_state_run: True
load:
1m:
- 0.0
- 2.0
5m:
- 0.0
- 1.5
15m:
- 0.1
- 1.0
interval: 10
- averages:
1m:
- 0.0
- 2.0
5m:
- 0.0
- 1.5
15m:
- 0.1
- 1.0
- interval: 10
.. _avoid-beacon-event-loops:
@ -96,8 +101,9 @@ which point the normal beacon interval will resume.
beacons:
inotify:
/etc/important_file: {}
disable_during_state_run: True
- files:
/etc/important_file: {}
- disable_during_state_run: True
.. _beacon-example:
@ -137,10 +143,11 @@ On the Salt minion, add the following configuration to
beacons:
inotify:
/etc/important_file:
mask:
- modify
disable_during_state_run: True
- files:
/etc/important_file:
mask:
- modify
- disable_during_state_run: True
Save the configuration file and restart the minion service. The beacon is now
set up to notify salt upon modifications made to the file.

View File

@ -235,9 +235,10 @@ etc) will be created in.
network_resource_group
----------------------
Optional. If specified, then the VM will be connected to the network resources
in this group, rather than the group that it was created in. The VM interfaces
and IPs will remain in the configured ``resource_group`` with the VM.
Optional. If specified, then the VM will be connected to the virtual network
in this resource group, rather than the parent resource group of the instance.
The VM interfaces and IPs will remain in the configured ``resource_group`` with
the VM.
network
-------
@ -286,6 +287,24 @@ availability_set
----------------
Optional. If set, the VM will be added to the specified availability set.
volumes
-------
Optional. A list of dictionaries describing data disks to attach to the instance can
be specified using this setting. The data disk dictionaries are passed entirely to the
`Azure DataDisk object
<https://docs.microsoft.com/en-us/python/api/azure.mgmt.compute.v2017_12_01.models.datadisk?view=azure-python>`_
, so ad-hoc options can be handled as long as they are valid properties of the object.
.. code-block:: yaml
volumes:
- disk_size_gb: 50
caching: ReadWrite
- disk_size_gb: 100
caching: ReadWrite
managed_disk:
storage_account_type: Standard_LRS
cleanup_disks
-------------
Optional. Default is ``False``. If set to ``True``, disks will be cleaned up

View File

@ -173,8 +173,8 @@ Pillar Configuration
====================
It is possible to configure cloud providers using pillars. This is only used when inside the cloud
module. You can setup a variable called ``cloud`` that contains your profile and provider to pass
that information to the cloud servers instead of having to copy the full configuration to every
module. You can setup a variable called ``cloud`` that contains your profile, provider, and map to
pass that information to the cloud servers instead of having to copy the full configuration to every
minion. In your pillar file, you would use something like this:
.. code-block:: yaml
@ -198,6 +198,26 @@ minion. In your pillar file, you would use something like this:
image: CentOS 7
script_args: git develop
maps:
my-dev-map:
ubuntu-openstack:
- dev-test01
- dev-test02
- dev-test03
- dev-test04
my-prd-map:
ubuntu-openstack:
- prd-web01
- prd-web02
minion:
id: custom-minion-id-app1-stack1-frontend
grains:
roles:
- webserver
deployment: datacenter4-openstack
- prod-db01
- prod-db02
Cloud Configurations
====================
@ -345,7 +365,7 @@ OpenStack
---------
Using Salt for OpenStack uses the `shade <https://docs.openstack.org/shade/latest/>` driver managed by the
openstack-infra team.
openstack-infra team.
This driver can be configured using the ``/etc/openstack/clouds.yml`` file with
`os-client-config <https://docs.openstack.org/os-client-config/latest/>`

View File

@ -44,7 +44,7 @@ at ``/etc/salt/cloud.profiles`` or ``/etc/salt/cloud.profiles.d/*.conf``:
linode_1024:
provider: my-linode-config
size: Linode 2048
size: Linode 2GB
image: CentOS 7
location: London, England, UK
@ -77,12 +77,14 @@ command:
----------
linode:
----------
Linode 1024:
Linode 2GB:
----------
AVAIL:
----------
10:
500
11:
500
2:
500
3:
@ -100,11 +102,19 @@ command:
CORES:
1
DISK:
24
50
HOURLY:
0.015
LABEL:
Linode 1024
Linode 2GB
PLANID:
2
PRICE:
10.0
RAM:
2048
XFER:
2000
...SNIP...

View File

@ -88,9 +88,9 @@ Any top level data element from your profile may be overridden in the map file:
fedora_small:
- web1:
size: t2.micro
size: t2.micro
- web2:
size: t2.nano
size: t2.nano
As of Salt 2017.7.0, nested elements are merged, and can can be specified
individually without having to repeat the complete definition for each top

View File

@ -135,17 +135,6 @@ provider, profile, or map blocks use ssh_port option.
ssh_port: 2222
SSH Port
========
By default ssh port is set to port 22. If you want to use a custom port in
provider, profile, or map blocks use ssh_port option.
.. code-block:: yaml
ssh_port: 2222
Delete SSH Keys
===============
When Salt Cloud deploys an instance, the SSH pub key for the instance is added

View File

@ -269,6 +269,11 @@ grain will override that core grain. Similarly, grains from
``/etc/salt/minion`` override both core grains and custom grain modules, and
grains in ``_grains`` will override *any* grains of the same name.
For custom grains, if the function takes an argument ``grains``, then the
previously rendered grains will be passed in. Because the rest of the grains
could be rendered in any order, the only grains that can be relied upon to be
passed in are ``core`` grains. This was added in the Fluorine release.
Examples of Grains
==================

View File

@ -6,7 +6,7 @@ Debian GNU/Linux / Raspbian
Debian GNU/Linux distribution and some derivatives such as Raspbian already
have included Salt packages to their repositories. However, current stable
release codenamed "Jessie" contains old outdated Salt release. It is
Debian release contains old outdated Salt releases. It is
recommended to use SaltStack repository for Debian as described
:ref:`below <installation-debian-repo>`.
@ -33,11 +33,13 @@ Instructions are at https://repo.saltstack.com/#debian.
Installation from the Debian / Raspbian Official Repository
===========================================================
Stretch (Testing) and Sid (Unstable) distributions are already contain mostly
up-to-date Salt packages built by Debian Salt Team. You can install Salt
components directly from Debian.
The Debian distributions contain mostly old Salt packages
built by the Debian Salt Team. You can install Salt
components directly from Debian but it is recommended to
use the instructions above for the packages from the official
Salt repository.
On Jessie (Stable) there is an option to install Salt minion from Stretch with
On Jessie there is an option to install Salt minion from Stretch with
`python-tornado` dependency from `jessie-backports` repositories.
To install fresh release of Salt minion on Jessie:
@ -79,7 +81,7 @@ To install fresh release of Salt minion on Jessie:
apt-get update
apt-get install python-zmq python-tornado/stretch salt-common/stretch
#. Install Salt minion package from Stretch:
#. Install Salt minion package from Latest Debian Release:
.. code-block:: bash

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,17 @@
===========================
In Progress: Salt 2017.7.7 Release Notes
===========================
Version 2017.7.7 is an **unreleased** bugfix release for :ref:`2017.7.0 <release-2017-7-0>`.
This release is still in progress and has not been released yet.
New win_snmp behavior
---------------------
`win_snmp.get_community_names` now returns the SNMP settings actually in effect
on the box. If settings are managed via GroupPolicy, those settings will be
returned. Otherwise, normal settings are returned.
`win_snmp.set_community_names` now raises a CommandExecutionError when SNMP
settings are being managed by GroupPolicy

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,16 @@
========================================
In Progress: Salt 2018.3.2 Release Notes
========================================
Version 2018.3.2 is an **unreleased** bugfix release for :ref:`2018.3.0 <release-2018-3-0>`.
This release is still in progress and has not been released yet.
Changes to win_timezone
=======================
Improves timezone detection by using the pytz module.
``timezone.get_offset`` and ``timezone.get_zonecode`` now work properly.
Adds ``timezone.list`` to list supported timezones in either Windows or Unix
format.

View File

@ -4,6 +4,34 @@
Salt Release Notes - Codename Fluorine
======================================
New Docker Proxy Minion
-----------------------
Docker containers can now be treated as actual minions without installing salt
in the container, using the new :py:mod:`docker proxy minion <salt.proxy.docker>`.
This proxy minion uses the :py:mod:`docker executor <salt.executors.docker>` to
pass commands to the docker container using :py:func:`docker.call
<salt.modules.dockermod.call>`. Any state module calls are passed through the
corresponding function from the :py:mod:`docker <salt.modules.dockermod>`
module.
.. code-block:: yaml
proxy:
proxytype: docker
name: keen_proskuriakova
Grains Dictionary Passed into Custom Grains
-------------------------------------------
Starting in this release, if a custom grains function accepts a variable named
``grains``, the Grains dictionary of the already compiled grains will be passed
in. Because of the non-deterministic order that grains are rendered in, the
only grains that can be relied upon to be passed in are ``core.py`` grains,
since those are compiled first.
"Virtual Package" Support Dropped for APT
-----------------------------------------
@ -471,6 +499,12 @@ a minimal tarball using runners and include that. But this is only possible, whe
Salt version is also available on the Master machine, although does not need to be directly
installed together with the older Python interpreter.
SaltSSH now support private key's passphrase. You can configure it by:
* `--priv-passwd` for salt-ssh cli
* `salt_priv_passwd` for salt master configure file
* `priv_passwd` for salt roster file
========================
Salt-Cloud major updates
@ -508,6 +542,12 @@ it is not possible to pass ``test=False`` on the command-line to override a
minion in permanent test mode and so the ``test:False`` option must still be set
in the orchestration file.
states.event
--------------
The :ref:`event.send <salt.states.event.send>` state does not know the results of
the sent event, so returns changed every state run. It can now be set to
return changed or unchanged.
============================
LDAP External Authentication
============================

View File

@ -53,6 +53,7 @@ The information which can be stored in a roster ``target`` is the following:
priv: # File path to ssh private key, defaults to salt-ssh.rsa
# The priv can also be set to agent-forwarding to not specify
# a key, but use ssh agent forwarding
priv_passwd: # Passphrase for ssh private key
timeout: # Number of seconds to wait for response when establishing
# an SSH connection
minion_opts: # Dictionary of minion opts

View File

@ -13,17 +13,17 @@
_salt_get_grains(){
if [ "$1" = 'local' ] ; then
salt-call --out=txt -- grains.ls | sed 's/^.*\[//' | tr -d ",']" |sed 's:\([a-z0-9]\) :\1\: :g'
salt-call --log-level=error --out=txt -- grains.ls | sed 's/^.*\[//' | tr -d ",']" |sed 's:\([a-z0-9]\) :\1\: :g'
else
salt '*' --timeout 2 --hide-timeout --out=txt -- grains.ls | sed 's/^.*\[//' | tr -d ",']" |sed 's:\([a-z0-9]\) :\1\: :g'
salt '*' --timeout 2 --hide-timeout --log-level=error --out=txt -- grains.ls | sed 's/^.*\[//' | tr -d ",']" |sed 's:\([a-z0-9]\) :\1\: :g'
fi
}
_salt_get_grain_values(){
if [ "$1" = 'local' ] ; then
salt-call --out=txt -- grains.item $1 |sed 's/^\S*:\s//' |grep -v '^\s*$'
salt-call --log-level=error --out=txt -- grains.item $1 |sed 's/^\S*:\s//' |grep -v '^\s*$'
else
salt '*' --timeout 2 --hide-timeout --out=txt -- grains.item $1 |sed 's/^\S*:\s//' |grep -v '^\s*$'
salt '*' --timeout 2 --hide-timeout --log-level=error --out=txt -- grains.item $1 |sed 's/^\S*:\s//' |grep -v '^\s*$'
fi
}
@ -34,8 +34,24 @@ _salt_get_keys(){
done
}
_salt(){
CACHE_DIR="$HOME/.cache/salt-comp-cache_functions"
_salt_list_functions(){
# salt-call: get all functions on this minion
# salt: get all functions on all minions
# sed: remove all array overhead and convert to newline separated list
# sort: chop out doubled entries, so overhead is minimal later during actual completion
if [ "$1" = 'local' ] ; then
salt-call --log-level=quiet --out=txt -- sys.list_functions \
| sed "s/^.*\[//;s/[],']//g;s/ /\n/g" \
| sort -u
else
salt '*' --timeout 2 --hide-timeout --log-level=quiet --out=txt -- sys.list_functions \
| sed "s/^.*\[//;s/[],']//g;s/ /\n/g" \
| sort -u
fi
}
_salt_get_coms() {
CACHE_DIR="$HOME/.cache/salt-${1}-comp-cache_functions"
local _salt_cache_functions=${SALT_COMP_CACHE_FUNCTIONS:=$CACHE_DIR}
local _salt_cache_timeout=${SALT_COMP_CACHE_TIMEOUT:='last hour'}
@ -43,6 +59,17 @@ _salt(){
mkdir -p "$(dirname ${_salt_cache_functions})"
fi
# Regenerate cache if timed out
if [[ "$(stat --format=%Z ${_salt_cache_functions} 2>/dev/null)" -lt "$(date --date="${_salt_cache_timeout}" +%s)" ]]; then
_salt_list_functions $1 > "${_salt_cache_functions}"
fi
# filter results, to only print the part to next dot (or end of function)
sed 's/^\('${cur}'\(\.\|[^.]*\)\)\?.*/\1/' "${_salt_cache_functions}" | sort -u
}
_salt(){
local cur prev opts _salt_grains _salt_coms pprev ppprev
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
@ -129,22 +156,10 @@ _salt(){
;;
esac
# Regenerate cache if timed out
if [[ "$(stat --format=%Z ${_salt_cache_functions} 2>/dev/null)" -lt "$(date --date="${_salt_cache_timeout}" +%s)" ]]; then
# salt: get all functions on all minions
# sed: remove all array overhead and convert to newline separated list
# sort: chop out doubled entries, so overhead is minimal later during actual completion
salt '*' --timeout 2 --hide-timeout --out=txt -- sys.list_functions \
| sed "s/^.*\[//;s/[],']//g;s/ /\n/g" \
| sort -u \
> "${_salt_cache_functions}"
fi
# filter results, to only print the part to next dot (or end of function)
_salt_coms="$(sed 's/^\('${cur}'\(\.\|[^.]*\)\)\?.*/\1/' "${_salt_cache_functions}" | sort -u)"
_salt_coms=$(_salt_get_coms remote)
# If there are still dots in the suggestion, do not append space
grep "^${cur}.*\." "${_salt_cache_functions}" &>/dev/null && compopt -o nospace
grep "^${cur}.*\." "${_salt_coms}" &>/dev/null && compopt -o nospace
all="${opts} ${_salt_coms}"
COMPREPLY=( $(compgen -W "${all}" -- ${cur}) )
@ -276,7 +291,11 @@ _saltcall(){
;;
esac
_salt_coms="$(salt-call --out=txt -- sys.list_functions|sed 's/^.*\[//' | tr -d ",']" )"
_salt_coms=$(_salt_get_coms local)
# If there are still dots in the suggestion, do not append space
grep "^${cur}.*\." "${_salt_coms}" &>/dev/null && compopt -o nospace
COMPREPLY=( $(compgen -W "${opts} ${_salt_coms}" -- ${cur} ))
return 0
}

View File

@ -4,7 +4,7 @@ Documentation=man:salt-master(1) file:///usr/share/doc/salt/html/contents.html h
After=network.target
[Service]
LimitNOFILE=16384
LimitNOFILE=100000
Type=simple
ExecStart=/usr/bin/salt-master
TasksMax=infinity

165
pkg/windows/sign.bat Normal file
View File

@ -0,0 +1,165 @@
:: ############################################################################
::
:: FILE: sign.bat
::
:: DESCRIPTION: Signing and Hashing script for Salt builds on Windows.
:: Requires an official Code Signing Certificate and drivers
:: installed to sign the files. Generates hashes in MD5 and
:: SHA256 in a file of the same name with a `.md5` or
:: `.sha256` extension.
::
:: NOTE: This script is used internally by SaltStack to sign and
:: hash Windows Installer builds and uses resources not
:: available to the community, such as SaltStack's Code
:: Signing Certificate. It is placed here for version
:: control.
::
:: COPYRIGHT: (c) 2012-2018 by the SaltStack Team
::
:: LICENSE: Apache 2.0
:: ORGANIZATION: SaltStack, Inc (saltstack.com)
:: CREATED: 2017
::
:: ############################################################################
::
:: USAGE: The script must be located in a directory that has the installer
:: files in a subfolder named with the major version, ie: `2018.3`.
:: Insert the key fob that contains the code signing certificate. Run
:: the script passing the full version: `.\sign.bat 2018.3.1`.
::
:: The script will sign the installers and generate the corresponding
:: hash files. These can then be uploaded to the salt repo.
::
:: The files must be in the following format:
:: <Series>\Salt-Minion-<Version>-<Python Version>-<System Architecture>-Setup.exe
:: So, for a Salt Minion installer for 2018.3.1 on AMD64 for Python 3
:: file would be placed in a subdirectory named `2018.3` and the file
:: would be named: `Salt-Minion-2018.3.1-Py3-AMD64-Setup.exe`. This
:: is how the file is created by the NSI Script anyway.
::
:: ############################################################################
@ echo off
if [%1]==[] (
echo You must pass a version
goto quit
) else (
set "Version=%~1"
)
for /F "tokens=1,2 delims=." %%a in ("%Version%") do (set Series=%%a.%%b)
:: Sign Installer Files
echo ===========================================================================
echo Signing...
echo ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
signtool.exe sign /t http://timestamp.digicert.com ^
"%Series%\Salt-Minion-%Version%-AMD64-Setup.exe" ^
"%Series%\Salt-Minion-%Version%-x86-Setup.exe" ^
"%Series%\Salt-%Version%-AMD64-Setup.exe" ^
"%Series%\Salt-%Version%-x86-Setup.exe" ^
"%Series%\Salt-%Version%-Py2-AMD64-Setup.exe" ^
"%Series%\Salt-%Version%-Py2-x86-Setup.exe" ^
"%Series%\Salt-%Version%-Py3-AMD64-Setup.exe" ^
"%Series%\Salt-%Version%-Py3-x86-Setup.exe" ^
"%Series%\Salt-Minion-%Version%-Py2-AMD64-Setup.exe" ^
"%Series%\Salt-Minion-%Version%-Py2-x86-Setup.exe" ^
"%Series%\Salt-Minion-%Version%-Py3-AMD64-Setup.exe" ^
"%Series%\Salt-Minion-%Version%-Py3-x86-Setup.exe"
echo ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
echo Signing Complete
echo ===========================================================================
:: Create Hash files
echo ===========================================================================
echo Creating Hashes...
echo ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
set "file_name=Salt-Minion-%Version%-AMD64-Setup.exe"
set "file=.\%Series%\%file_name%"
if exist "%file%" (
echo - %file_name%
powershell -c "$hash = (Get-FileHash -Algorithm MD5 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.md5\""
powershell -c "$hash = (Get-FileHash -Algorithm SHA256 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.sha256\"")
set "file_name=Salt-Minion-%Version%-x86-Setup.exe"
set "file=.\%Series%\%file_name%"
if exist "%file%" (
echo - %file_name%
powershell -c "$hash = (Get-FileHash -Algorithm MD5 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.md5\""
powershell -c "$hash = (Get-FileHash -Algorithm SHA256 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.sha256\"")
set "file_name=Salt-%Version%-AMD64-Setup.exe"
set "file=.\%Series%\%file_name%"
if exist "%file%" (
echo - %file_name%
powershell -c "$hash = (Get-FileHash -Algorithm MD5 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.md5\""
powershell -c "$hash = (Get-FileHash -Algorithm SHA256 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.sha256\"")
set "file_name=Salt-%Version%-x86-Setup.exe"
set "file=.\%Series%\%file_name%"
if exist "%file%" (
echo - %file_name%
powershell -c "$hash = (Get-FileHash -Algorithm MD5 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.md5\""
powershell -c "$hash = (Get-FileHash -Algorithm SHA256 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.sha256\"")
set "file_name=Salt-%Version%-Py2-AMD64-Setup.exe"
set "file=.\%Series%\%file_name%"
if exist "%file%" (
echo - %file_name%
powershell -c "$hash = (Get-FileHash -Algorithm MD5 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.md5\""
powershell -c "$hash = (Get-FileHash -Algorithm SHA256 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.sha256\"")
set "file_name=Salt-%Version%-Py2-x86-Setup.exe"
set "file=.\%Series%\%file_name%"
if exist "%file%" (
echo - %file_name%
powershell -c "$hash = (Get-FileHash -Algorithm MD5 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.md5\""
powershell -c "$hash = (Get-FileHash -Algorithm SHA256 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.sha256\"")
set "file_name=Salt-%Version%-Py3-AMD64-Setup.exe"
set "file=.\%Series%\%file_name%"
if exist "%file%" (
echo - %file_name%
powershell -c "$hash = (Get-FileHash -Algorithm MD5 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.md5\""
powershell -c "$hash = (Get-FileHash -Algorithm SHA256 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.sha256\"")
set "file_name=Salt-%Version%-Py3-x86-Setup.exe"
set "file=.\%Series%\%file_name%"
if exist "%file%" (
echo - %file_name%
powershell -c "$hash = (Get-FileHash -Algorithm MD5 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.md5\""
powershell -c "$hash = (Get-FileHash -Algorithm SHA256 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.sha256\"")
set "file_name=Salt-Minion-%Version%-Py2-AMD64-Setup.exe"
set "file=.\%Series%\%file_name%"
if exist "%file%" (
echo - %file_name%
powershell -c "$hash = (Get-FileHash -Algorithm MD5 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.md5\""
powershell -c "$hash = (Get-FileHash -Algorithm SHA256 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.sha256\"")
set "file_name=Salt-Minion-%Version%-Py2-x86-Setup.exe"
set "file=.\%Series%\%file_name%"
if exist "%file%" (
echo - %file_name%
powershell -c "$hash = (Get-FileHash -Algorithm MD5 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.md5\""
powershell -c "$hash = (Get-FileHash -Algorithm SHA256 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.sha256\"")
set "file_name=Salt-Minion-%Version%-Py3-AMD64-Setup.exe"
set "file=.\%Series%\%file_name%"
if exist "%file%" (
echo - %file_name%
powershell -c "$hash = (Get-FileHash -Algorithm MD5 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.md5\""
powershell -c "$hash = (Get-FileHash -Algorithm SHA256 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.sha256\"")
set "file_name=Salt-Minion-%Version%-Py3-x86-Setup.exe"
set "file=.\%Series%\%file_name%"
if exist "%file%" (
echo - %file_name%
powershell -c "$hash = (Get-FileHash -Algorithm MD5 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.md5\""
powershell -c "$hash = (Get-FileHash -Algorithm SHA256 \"%file%\").Hash; Out-File -InputObject $hash\" %file_name%\" -FilePath \"%file%.sha256\"")
echo ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
echo Hashing Complete
echo ===========================================================================
:quit

View File

@ -25,7 +25,7 @@ _modules(){
fi
if _cache_invalid salt/modules || ! _retrieve_cache salt/modules; then
_funcs=( ${${(Q)${${(s. .)"$(_call_program salt-call-cmd salt-call --local --out txt sys.list_functions)"}%%[],]##}#\[}:#local:} )
_funcs=( ${${(Q)${${(s. .)"$(_call_program salt-call-cmd salt-call --local --log-level error --out txt sys.list_functions)"}%%[],]##}#\[}:#local:} )
_store_cache salt/modules _funcs
fi
@ -40,7 +40,7 @@ _runners(){
fi
if _cache_invalid salt/runners || ! _retrieve_cache salt/runners; then
_runs=( ${${(Q)${${(s. .)"$(_call_program salt-call-cmd salt-call --local --out txt sys.list_runner_functions)"}%%[],]##}#\[}:#local:} )
_runs=( ${${(Q)${${(s. .)"$(_call_program salt-call-cmd salt-call --local --log-level error --out txt sys.list_runner_functions)"}%%[],]##}#\[}:#local:} )
_store_cache salt/runners _runs
fi

4
pytest.ini Normal file
View File

@ -0,0 +1,4 @@
[pytest]
addopts = --ssh-tests -ra -sv
testpaths = tests
norecursedirs = tests/kitchen

View File

@ -1,4 +0,0 @@
-r base.txt
# Required by Tornado to handle threads stuff.
futures>=2.0

View File

@ -1 +0,0 @@
-r base.txt

View File

@ -1,8 +1,12 @@
Jinja2
msgpack-python>0.3,!=0.5.5
# This should be changed to msgpack-python for Packages
# msgpack-python>0.3,!=0.5.5
msgpack>=0.5,!=0.5.5
PyYAML
MarkupSafe
requests>=1.0.0
tornado>=4.2.1,<5.0
tornado>=4.2.1,<6.0; python_version < '3'
tornado>=4.2.1,<5.0; python_version >= '3.4'
# Required by Tornado to handle threads stuff.
futures>=2.0
futures>=2.0; python_version < '3.0'

17
requirements/dev.txt Normal file
View File

@ -0,0 +1,17 @@
-r base.txt
mock>=2.0.0
apache-libcloud>=0.14.0
boto>=2.32.1
boto3>=1.2.1
moto>=0.3.6
SaltPyLint>=v2017.3.6
pytest>=3.5.0
git+https://github.com/saltstack/pytest-salt.git@master#egg=pytest-salt
testinfra>=1.7.0
# httpretty Needs to be here for now even though it's a dependency of boto.
# A pip install on a fresh system will decide to target httpretty 0.8.10 to
# satisfy other requirements, and httpretty 0.8.10 has bugs in setup.py that
# prevent it from being successfully installed (at least on Python 3.4).
httpretty; python_version >= '3.4'

View File

@ -1,12 +1,2 @@
-r base-py2.txt
mock>=2.0.0
apache-libcloud>=0.14.0
boto>=2.32.1
boto3>=1.2.1
moto>=0.3.6
SaltPyLint>=v2017.3.6
pytest>=3.5.0
git+https://github.com/eisensheng/pytest-catchlog.git@develop#egg=Pytest-catchlog
git+https://github.com/saltstack/pytest-salt.git@master#egg=pytest-salt
testinfra>=1.7.0
# This is a legacy file, use dev.txt
-r dev.txt

View File

@ -1,17 +1,2 @@
-r base-py3.txt
mock>=2.0.0
apache-libcloud>=0.14.0
boto>=2.32.1
boto3>=1.2.1
moto>=0.3.6
# httpretty Needs to be here for now even though it's a dependency of boto.
# A pip install on a fresh system will decide to target httpretty 0.8.10 to
# satisfy other requirements, and httpretty 0.8.10 has bugs in setup.py that
# prevent it from being successfully installed (at least on Python 3.4).
httpretty
SaltPyLint>=v2017.2.29
pytest>=3.5.0
git+https://github.com/saltstack/pytest-salt.git@master#egg=pytest-salt
git+https://github.com/eisensheng/pytest-catchlog.git@develop#egg=Pytest-catchlog
testinfra>=1.7.0
# This is a legacy file, use dev.txt
-r dev.txt

View File

@ -182,7 +182,7 @@ def beacon(config):
for item in _config['txt']:
changes_key = 'txt.' + salt.utils.stringutils.to_unicode(item)
if _config['txt'][item].startswith('grains.'):
grain = _config['txt'][item][6:]
grain = _config['txt'][item][7:]
grain_index = None
square_bracket = grain.find('[')
if square_bracket != -1 and grain[-1] == ']':

View File

@ -10,10 +10,10 @@ Beacon to fire events at failed login of users
# Import python libs
from __future__ import absolute_import, unicode_literals
import datetime
import logging
import os
import struct
import time
# Import Salt Libs
import salt.utils.stringutils
@ -59,13 +59,51 @@ def __virtual__():
return False
def _validate_time_range(trange, status, msg):
'''
Check time range
'''
# If trange is empty, just return the current status & msg
if not trange:
return status, msg
if not isinstance(trange, dict):
status = False
msg = ('The time_range parameter for '
'btmp beacon must '
'be a dictionary.')
if not all(k in trange for k in ('start', 'end')):
status = False
msg = ('The time_range parameter for '
'btmp beacon must contain '
'start & end options.')
return status, msg
def _gather_group_members(group, groups, users):
'''
Gather group members
'''
_group = __salt__['group.info'](group)
if not _group:
log.warning('Group %s does not exist, ignoring.', group)
return
for member in _group['members']:
if member not in users:
users[member] = groups[group]
def _check_time_range(time_range, now):
'''
Check time range
'''
if _TIME_SUPPORTED:
_start = int(time.mktime(dateutil_parser.parse(time_range['start']).timetuple()))
_end = int(time.mktime(dateutil_parser.parse(time_range['end']).timetuple()))
_start = dateutil_parser.parse(time_range['start'])
_end = dateutil_parser.parse(time_range['end'])
return bool(_start <= now <= _end)
else:
@ -85,53 +123,63 @@ def validate(config):
'''
Validate the beacon configuration
'''
vstatus = True
vmsg = 'Valid beacon configuration'
# Configuration for load beacon should be a list of dicts
if not isinstance(config, list):
return False, ('Configuration for btmp beacon must '
'be a list.')
vstatus = False
vmsg = ('Configuration for btmp beacon must '
'be a list.')
else:
_config = {}
list(map(_config.update, config))
if 'users' in _config:
if not isinstance(_config['users'], dict):
return False, ('User configuration for btmp beacon must '
'be a dictionary.')
vstatus = False
vmsg = ('User configuration for btmp beacon must '
'be a dictionary.')
else:
for user in _config['users']:
if _config['users'][user] and \
'time_range' in _config['users'][user]:
_time_range = _config['users'][user]['time_range']
if not isinstance(_time_range, dict):
return False, ('The time_range parameter for '
'btmp beacon must '
'be a dictionary.')
else:
if not all(k in _time_range for k in ('start', 'end')):
return False, ('The time_range parameter for '
'btmp beacon must contain '
'start & end options.')
_time_range = _config['users'][user].get('time_range', {})
vstatus, vmsg = _validate_time_range(_time_range,
vstatus,
vmsg)
if not vstatus:
return vstatus, vmsg
if 'groups' in _config:
if not isinstance(_config['groups'], dict):
vstatus = False
vmsg = ('Group configuration for btmp beacon must '
'be a dictionary.')
else:
for group in _config['groups']:
_time_range = _config['groups'][group].get('time_range', {})
vstatus, vmsg = _validate_time_range(_time_range,
vstatus,
vmsg)
if not vstatus:
return vstatus, vmsg
if 'defaults' in _config:
if not isinstance(_config['defaults'], dict):
return False, ('Defaults configuration for btmp beacon must '
'be a dictionary.')
vstatus = False
vmsg = ('Defaults configuration for btmp beacon must '
'be a dictionary.')
else:
if 'time_range' in _config['defaults']:
_time_range = _config['defaults']['time_range']
if not isinstance(_time_range, dict):
return False, ('The time_range parameter for '
'btmp beacon must '
'be a dictionary.')
else:
if not all(k in _time_range for k in ('start', 'end')):
return False, ('The time_range parameter for '
'btmp beacon must contain '
'start & end options.')
_time_range = _config['defaults'].get('time_range', {})
vstatus, vmsg = _validate_time_range(_time_range,
vstatus,
vmsg)
if not vstatus:
return vstatus, vmsg
return True, 'Valid beacon configuration'
return vstatus, vmsg
# TODO: add support for only firing events for specific users and login times
def beacon(config):
'''
Read the last btmp file and return information on the failed logins
@ -161,16 +209,34 @@ def beacon(config):
time_range:
start: '8am'
end: '4pm'
beacons:
btmp:
- groups:
users:
time_range:
start: '8am'
end: '4pm'
- defaults:
time_range:
start: '8am'
end: '4pm'
.. versionadded:: Fluorine
'''
ret = []
users = None
users = {}
groups = {}
defaults = None
for config_item in config:
if 'users' in config_item:
users = config_item['users']
if 'groups' in config_item:
groups = config_item['groups']
if 'defaults' in config_item:
defaults = config_item['defaults']
@ -183,7 +249,7 @@ def beacon(config):
else:
fp_.seek(loc)
while True:
now = int(time.time())
now = datetime.datetime.now()
raw = fp_.read(SIZE)
if len(raw) != SIZE:
return ret
@ -197,6 +263,9 @@ def beacon(config):
event[field] = salt.utils.stringutils.to_unicode(event[field])
event[field] = event[field].strip('\x00')
for group in groups:
_gather_group_members(group, groups, users)
if users:
if event['user'] in users:
_user = users[event['user']]

View File

@ -58,14 +58,18 @@ def validate(config):
_config = {}
list(map(_config.update, config))
for item in _config.get('interfaces', {}):
interfaces = _config.get('interfaces', {})
if isinstance(interfaces, list):
#Old syntax
return False, ('interfaces section for network_settings beacon'
' must be a dictionary.')
for item in interfaces:
if not isinstance(_config['interfaces'][item], dict):
return False, ('Configuration for network_settings beacon '
' must be a list of dictionaries.')
else:
if not all(j in ATTRS for j in _config['interfaces'][item]):
return False, ('Invalid configuration item in Beacon '
'configuration.')
return False, ('Interface attributes for network_settings beacon'
' must be a dictionary.')
if not all(j in ATTRS for j in _config['interfaces'][item]):
return False, ('Invalid attributes in beacon configuration.')
return True, 'Valid beacon configuration'
@ -103,12 +107,12 @@ def beacon(config):
beacons:
network_settings:
- interfaces:
- eth0:
ipaddr:
promiscuity:
onvalue: 1
- eth1:
linkmode:
eth0:
ipaddr:
promiscuity:
onvalue: 1
eth1:
linkmode:
The config above will check for value changes on eth0 ipaddr and eth1 linkmode. It will also
emit if the promiscuity value changes to 1.
@ -125,9 +129,9 @@ def beacon(config):
network_settings:
- coalesce: True
- interfaces:
- eth0:
ipaddr:
promiscuity:
eth0:
ipaddr:
promiscuity:
'''
_config = {}

View File

@ -10,10 +10,10 @@ Beacon to fire events at login of users as registered in the wtmp file
# Import Python libs
from __future__ import absolute_import, unicode_literals
import datetime
import logging
import os
import struct
import time
# Import salt libs
import salt.utils.stringutils
@ -59,13 +59,51 @@ def __virtual__():
return False
def _validate_time_range(trange, status, msg):
'''
Check time range
'''
# If trange is empty, just return the current status & msg
if not trange:
return status, msg
if not isinstance(trange, dict):
status = False
msg = ('The time_range parameter for '
'wtmp beacon must '
'be a dictionary.')
if not all(k in trange for k in ('start', 'end')):
status = False
msg = ('The time_range parameter for '
'wtmp beacon must contain '
'start & end options.')
return status, msg
def _gather_group_members(group, groups, users):
'''
Gather group members
'''
_group = __salt__['group.info'](group)
if not _group:
log.warning('Group %s does not exist, ignoring.', group)
return
for member in _group['members']:
if member not in users:
users[member] = groups[group]
def _check_time_range(time_range, now):
'''
Check time range
'''
if _TIME_SUPPORTED:
_start = int(time.mktime(dateutil_parser.parse(time_range['start']).timetuple()))
_end = int(time.mktime(dateutil_parser.parse(time_range['end']).timetuple()))
_start = dateutil_parser.parse(time_range['start'])
_end = dateutil_parser.parse(time_range['end'])
return bool(_start <= now <= _end)
else:
@ -85,51 +123,62 @@ def validate(config):
'''
Validate the beacon configuration
'''
vstatus = True
vmsg = 'Valid beacon configuration'
# Configuration for wtmp beacon should be a list of dicts
if not isinstance(config, list):
return False, ('Configuration for wtmp beacon must be a list.')
vstatus = False
vmsg = ('Configuration for wtmp beacon must be a list.')
else:
_config = {}
list(map(_config.update, config))
if 'users' in _config:
if not isinstance(_config['users'], dict):
return False, ('User configuration for btmp beacon must '
'be a dictionary.')
vstatus = False
vmsg = ('User configuration for wtmp beacon must '
'be a dictionary.')
else:
for user in _config['users']:
if _config['users'][user] and \
'time_range' in _config['users'][user]:
_time_range = _config['users'][user]['time_range']
if not isinstance(_time_range, dict):
return False, ('The time_range parameter for '
'btmp beacon must '
'be a dictionary.')
else:
if not all(k in _time_range for k in ('start', 'end')):
return False, ('The time_range parameter for '
'btmp beacon must contain '
'start & end options.')
_time_range = _config['users'][user].get('time_range', {})
vstatus, vmsg = _validate_time_range(_time_range,
vstatus,
vmsg)
if not vstatus:
return vstatus, vmsg
if 'groups' in _config:
if not isinstance(_config['groups'], dict):
vstatus = False
vmsg = ('Group configuration for wtmp beacon must '
'be a dictionary.')
else:
for group in _config['groups']:
_time_range = _config['groups'][group].get('time_range', {})
vstatus, vmsg = _validate_time_range(_time_range,
vstatus,
vmsg)
if not vstatus:
return vstatus, vmsg
if 'defaults' in _config:
if not isinstance(_config['defaults'], dict):
return False, ('Defaults configuration for btmp beacon must '
'be a dictionary.')
vstatus = False
vmsg = ('Defaults configuration for wtmp beacon must '
'be a dictionary.')
else:
if 'time_range' in _config['defaults']:
_time_range = _config['defaults']['time_range']
if not isinstance(_time_range, dict):
return False, ('The time_range parameter for '
'btmp beacon must '
'be a dictionary.')
else:
if not all(k in _time_range for k in ('start', 'end')):
return False, ('The time_range parameter for '
'btmp beacon must contain '
'start & end options.')
return True, 'Valid beacon configuration'
_time_range = _config['defaults'].get('time_range', {})
vstatus, vmsg = _validate_time_range(_time_range,
vstatus,
vmsg)
if not vstatus:
return vstatus, vmsg
return vstatus, vmsg
# TODO: add support for only firing events for specific users and login times
def beacon(config):
'''
Read the last wtmp file and return information on the logins
@ -159,16 +208,34 @@ def beacon(config):
time_range:
start: '8am'
end: '4pm'
beacons:
wtmp:
- groups:
users:
time_range:
start: '8am'
end: '4pm'
- defaults:
time_range:
start: '8am'
end: '4pm'
.. versionadded:: Fluorine
'''
ret = []
users = None
users = {}
groups = {}
defaults = None
for config_item in config:
if 'users' in config_item:
users = config_item['users']
if 'groups' in config_item:
groups = config_item['groups']
if 'defaults' in config_item:
defaults = config_item['defaults']
@ -181,7 +248,7 @@ def beacon(config):
else:
fp_.seek(loc)
while True:
now = int(time.time())
now = datetime.datetime.now()
raw = fp_.read(SIZE)
if len(raw) != SIZE:
return ret
@ -195,6 +262,9 @@ def beacon(config):
event[field] = salt.utils.stringutils.to_unicode(event[field])
event[field] = event[field].strip('\x00')
for group in groups:
_gather_group_members(group, groups, users)
if users:
if event['user'] in users:
_user = users[event['user']]

View File

@ -401,6 +401,8 @@ class SaltCMD(salt.utils.parsers.SaltCMDOptionParser):
retcode = 0
# if there is a dict with retcode, use that
if isinstance(ret, dict) and ret.get('retcode', 0) != 0:
if isinstance(ret.get('retcode', 0), dict):
return max(six.itervalues(ret.get('retcode', {0: 0})))
return ret['retcode']
# if its a boolean, False means 1
elif isinstance(ret, bool) and not ret:

View File

@ -281,6 +281,10 @@ class SSH(object):
salt.config.DEFAULT_MASTER_OPTS['ssh_passwd']
),
'priv': priv,
'priv_passwd': self.opts.get(
'ssh_priv_passwd',
salt.config.DEFAULT_MASTER_OPTS['ssh_priv_passwd']
),
'timeout': self.opts.get(
'ssh_timeout',
salt.config.DEFAULT_MASTER_OPTS['ssh_timeout']
@ -407,7 +411,7 @@ class SSH(object):
'host': hostname,
'user': user,
}
if not self.opts.get('ssh_skip_roster'):
if self.opts.get('ssh_update_roster'):
self._update_roster()
def get_pubkey(self):
@ -824,6 +828,7 @@ class Single(object):
port=None,
passwd=None,
priv=None,
priv_passwd=None,
timeout=30,
sudo=False,
tty=False,
@ -885,6 +890,7 @@ class Single(object):
'port': port,
'passwd': passwd,
'priv': priv,
'priv_passwd': priv_passwd,
'timeout': timeout,
'sudo': sudo,
'tty': tty,

View File

@ -24,6 +24,7 @@ log = logging.getLogger(__name__)
SSH_PASSWORD_PROMPT_RE = re.compile(r'(?:.*)[Pp]assword(?: for .*)?:', re.M)
KEY_VALID_RE = re.compile(r'.*\(yes\/no\).*')
SSH_PRIVATE_KEY_PASSWORD_PROMPT_RE = re.compile(r'Enter passphrase for key', re.M)
# Keep these in sync with ./__init__.py
RSTR = '_edbc7885e4f9aac9b83b35999b68d015148caf467b78fa39c05f669c0ff89878'
@ -76,6 +77,7 @@ class Shell(object):
port=None,
passwd=None,
priv=None,
priv_passwd=None,
timeout=None,
sudo=False,
tty=False,
@ -92,6 +94,7 @@ class Shell(object):
self.port = port
self.passwd = six.text_type(passwd) if passwd else passwd
self.priv = priv
self.priv_passwd = priv_passwd
self.timeout = timeout
self.sudo = sudo
self.tty = tty
@ -399,6 +402,11 @@ class Shell(object):
if buff and RSTR_RE.search(buff):
# We're getting results back, don't try to send passwords
send_password = False
if buff and SSH_PRIVATE_KEY_PASSWORD_PROMPT_RE.search(buff):
if not self.priv_passwd:
return '', 'Private key file need passphrase', 254
term.sendline(self.priv_passwd)
continue
if buff and SSH_PASSWORD_PROMPT_RE.search(buff) and send_password:
if not self.passwd:
return '', 'Permission denied, no authentication information', 254

View File

@ -203,6 +203,10 @@ class CloudClient(object):
profile['profile'] = name
self.opts['profiles'].update({name: profile})
self.opts['providers'][provider][driver]['profiles'].update({name: profile})
for name, map_dct in six.iteritems(pillars.pop('maps', {})):
if 'maps' not in self.opts:
self.opts['maps'] = {}
self.opts['maps'][name] = map_dct
self.opts.update(pillars)
def _opts_defaults(self, **kwargs):
@ -356,7 +360,7 @@ class CloudClient(object):
def map_run(self, path=None, **kwargs):
'''
Pass in a location for a map to execute
To execute a map
'''
kwarg = {}
if path:
@ -1760,16 +1764,27 @@ class Map(Cloud):
def read(self):
'''
Read in the specified map file and return the map structure
Read in the specified map and return the map structure
'''
map_ = None
if self.opts.get('map', None) is None:
if self.opts.get('map_data', None) is None:
return {}
if self.opts.get('map_pillar', None) is None:
pass
elif self.opts.get('map_pillar') not in self.opts.get('maps'):
log.error(
'The specified map not found in pillar at \'cloud:maps:{0}\''.format(
self.opts['map_pillar'])
)
raise SaltCloudNotFound()
else:
# 'map_pillar' is provided, try to use it
map_ = self.opts['maps'][self.opts.get('map_pillar')]
else:
# 'map_data' is provided, try to use it
map_ = self.opts['map_data']
if not map_:
else:
# 'map' is provided, try to use it
local_minion_opts = copy.deepcopy(self.opts)
local_minion_opts['file_client'] = 'local'
self.minion = salt.minion.MasterMinion(local_minion_opts)
@ -1802,10 +1817,13 @@ class Map(Cloud):
)
return {}
if 'include' in map_:
map_ = salt.config.include_config(
map_, self.opts['map'], verbose=False
)
if 'include' in map_:
map_ = salt.config.include_config(
map_, self.opts['map'], verbose=False
)
if not map_:
return {}
# Create expected data format if needed
for profile, mapped in six.iteritems(map_.copy()):

View File

@ -329,13 +329,17 @@ def get_conn(client_type):
return client
def get_location(call=None): # pylint: disable=unused-argument
def get_location(call=None, kwargs=None): # pylint: disable=unused-argument
'''
Return the location that is configured for this provider
'''
if not kwargs:
kwargs = {}
vm_dict = get_configured_provider()
vm_dict.update(kwargs)
return config.get_cloud_config_value(
'location',
get_configured_provider(), __opts__, search_global=False
vm_dict, __opts__, search_global=False
)
@ -766,20 +770,28 @@ def create_network_interface(call=None, kwargs=None):
)
if kwargs.get('network_resource_group') is None:
kwargs['resource_group'] = config.get_cloud_config_value(
kwargs['network_resource_group'] = config.get_cloud_config_value(
'resource_group', vm_, __opts__, search_global=False
)
else:
kwargs['resource_group'] = kwargs['network_resource_group']
if kwargs.get('iface_name') is None:
kwargs['iface_name'] = '{0}-iface0'.format(vm_['name'])
subnet_obj = netconn.subnets.get(
resource_group_name=kwargs['resource_group'],
virtual_network_name=kwargs['network'],
subnet_name=kwargs['subnet'],
)
try:
subnet_obj = netconn.subnets.get(
resource_group_name=kwargs['network_resource_group'],
virtual_network_name=kwargs['network'],
subnet_name=kwargs['subnet'],
)
except CloudError as exc:
raise SaltCloudSystemExit(
'{0} (Resource Group: "{1}", VNET: "{2}", Subnet: "{3}")'.format(
exc.message,
kwargs['network_resource_group'],
kwargs['network'],
kwargs['subnet']
)
)
ip_kwargs = {}
ip_configurations = None
@ -1158,11 +1170,11 @@ def request_instance(vm_):
volume['vhd'] = VirtualHardDisk(volume['vhd'])
if 'image' in volume:
volume['create_option'] = DiskCreateOptionTypes.from_image
volume['create_option'] = 'from_image'
elif 'attach' in volume:
volume['create_option'] = DiskCreateOptionTypes.attach
volume['create_option'] = 'attach'
else:
volume['create_option'] = DiskCreateOptionTypes.empty
volume['create_option'] = 'empty'
data_disks.append(DataDisk(**volume))
if vm_['image'].startswith('http') or vm_.get('vhd') == 'unmanaged':
@ -1377,10 +1389,10 @@ def create(vm_):
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'azurearm', vm_['driver']
)
location = get_location(vm_)
vm_['location'] = location
if not vm_.get('location'):
vm_['location'] = get_location(kwargs=vm_)
log.info('Creating Cloud VM %s in %s', vm_['name'], location)
log.info('Creating Cloud VM %s in %s', vm_['name'], vm_['location'])
vm_request = request_instance(vm_=vm_)

View File

@ -2913,6 +2913,15 @@ def stop(name, call=None):
instance_id = _get_node(name)['instanceId']
__utils__['cloud.fire_event'](
'event',
'stopping instance',
'salt/cloud/{0}/stopping'.format(name),
args={'name': name, 'instance_id': instance_id},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
params = {'Action': 'StopInstances',
'InstanceId.1': instance_id}
result = aws.query(params,
@ -2937,6 +2946,15 @@ def start(name, call=None):
instance_id = _get_node(name)['instanceId']
__utils__['cloud.fire_event'](
'event',
'starting instance',
'salt/cloud/{0}/starting'.format(name),
args={'name': name, 'instance_id': instance_id},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
params = {'Action': 'StartInstances',
'InstanceId.1': instance_id}
result = aws.query(params,

View File

@ -993,6 +993,59 @@ def get_password(vm_):
)
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning('An outdated Linode plan label was detected in your Cloud Profile ({}).'
' Please update the profile to use'
' the new label format ({}) for the requested Linode plan size.'.format(label, new_label))
label = new_label
return sizes[label]['PLANID']
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
@ -1020,7 +1073,9 @@ def get_plan_id(kwargs=None, call=None):
'The get_plan_id function requires a \'label\'.'
)
return avail_sizes()[label]['PLANID']
label = _decode_linode_plan_label(label)
return label
def get_private_ip(vm_):

View File

@ -991,6 +991,7 @@ VALID_OPTS = {
'ssh_identities_only': bool,
'ssh_log_file': six.string_types,
'ssh_config_file': six.string_types,
'ssh_merge_pillar': bool,
# Enable ioflo verbose logging. Warning! Very verbose!
'ioflo_verbose': int,
@ -1501,6 +1502,7 @@ DEFAULT_MINION_OPTS = {
},
'discovery': False,
'schedule': {},
'ssh_merge_pillar': True
}
DEFAULT_MASTER_OPTS = {
@ -1773,6 +1775,7 @@ DEFAULT_MASTER_OPTS = {
'syndic_jid_forward_cache_hwm': 100,
'regen_thin': False,
'ssh_passwd': '',
'ssh_priv_passwd': '',
'ssh_port': '22',
'ssh_sudo': False,
'ssh_sudo_user': '',
@ -2108,7 +2111,7 @@ def _validate_ssh_minion_opts(opts):
for opt_name in list(ssh_minion_opts):
if re.match('^[a-z0-9]+fs_', opt_name, flags=re.IGNORECASE) \
or 'pillar' in opt_name \
or ('pillar' in opt_name and not 'ssh_merge_pillar' == opt_name) \
or opt_name in ('fileserver_backend',):
log.warning(
'\'%s\' is not a valid ssh_minion_opts parameter, ignoring',
@ -3335,7 +3338,7 @@ def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):
if isinstance(vm_[name], types.GeneratorType):
value = next(vm_[name], '')
else:
if isinstance(value, dict):
if isinstance(value, dict) and isinstance(vm_[name], dict):
value.update(vm_[name].copy())
else:
value = deepcopy(vm_[name])

View File

@ -54,6 +54,9 @@ class RosterEntryConfig(Schema):
priv = StringItem(title='Private Key',
description='File path to ssh private key, defaults to salt-ssh.rsa',
min_length=1)
priv_passwd = SecretItem(title='Private Key passphrase',
description='Passphrase for private key file',
min_length=1)
passwd_or_priv_requirement = AnyOfItem(items=(RequirementsItem(requirements=['passwd']),
RequirementsItem(requirements=['priv'])))(flatten=True)
sudo = BooleanItem(title='Sudo',

46
salt/executors/docker.py Normal file
View File

@ -0,0 +1,46 @@
# -*- coding: utf-8 -*-
'''
Docker executor module
.. versionadded: Fluorine
Used with the docker proxy minion.
'''
from __future__ import absolute_import, unicode_literals
__virtualname__ = 'docker'
DOCKER_MOD_MAP = {
'state.sls': 'docker.sls',
'state.apply': 'docker.apply',
'state.highstate': 'docker.highstate',
}
def __virtual__():
if 'proxy' not in __opts__:
return False, 'Docker executor is only meant to be used with Docker Proxy Minions'
if __opts__.get('proxy', {}).get('proxytype') != __virtualname__:
return False, 'Proxytype does not match: {0}'.format(__virtualname__)
return True
def execute(opts, data, func, args, kwargs):
'''
Directly calls the given function with arguments
'''
if data['fun'] == 'saltutil.find_job':
return __executors__['direct_call.execute'](opts, data, func, args, kwargs)
if data['fun'] in DOCKER_MOD_MAP:
return __executors__['direct_call.execute'](opts, data, __salt__[DOCKER_MOD_MAP[data['fun']]], [opts['proxy']['name']] + args, kwargs)
return __salt__['docker.call'](opts['proxy']['name'], data['fun'], *args, **kwargs)
def allow_missing_func(function): # pylint: disable=unused-argument
'''
Allow all calls to be passed through to docker container.
The docker call will use direct_call, which will return back if the module
was unable to be run.
'''
return True

View File

@ -1389,14 +1389,12 @@ class RemoteClient(Client):
'''
Return the metadata derived from the master_tops system
'''
salt.utils.versions.warn_until(
'Magnesium',
'The _ext_nodes master function has '
'been renamed to _master_tops. To ensure '
'compatibility when using older Salt masters '
'we continue to pass the function as _ext_nodes.'
log.debug(
'The _ext_nodes master function has been renamed to _master_tops. '
'To ensure compatibility when using older Salt masters we will '
'continue to invoke the function as _ext_nodes until the '
'Magnesium release.'
)
# TODO: Change back to _master_tops
# for Magnesium release
load = {'cmd': '_ext_nodes',

View File

@ -850,7 +850,8 @@ class FSChan(object):
self.opts['__fs_update'] = True
else:
self.fs.update()
self.cmd_stub = {'master_tops': {}}
self.cmd_stub = {'master_tops': {},
'ext_nodes': {}}
def send(self, load, tries=None, timeout=None, raw=False): # pylint: disable=unused-argument
'''

View File

@ -253,7 +253,7 @@ def file_hash(load, fnd):
except OSError:
pass
return file_hash(load, fnd)
if os.path.getmtime(path) == mtime:
if str(os.path.getmtime(path)) == mtime:
# check if mtime changed
ret['hsum'] = hsum
return ret

View File

@ -399,6 +399,32 @@ def _sunos_cpudata():
return grains
def _aix_cpudata():
'''
Return CPU information for AIX systems
'''
# Provides:
# cpuarch
# num_cpus
# cpu_model
# cpu_flags
grains = {}
cmd = salt.utils.path.which('prtconf')
if cmd:
data = __salt__['cmd.run']('{0}'.format(cmd)) + os.linesep
for dest, regstring in (('cpuarch', r'(?im)^\s*Processor\s+Type:\s+(\S+)'),
('cpu_flags', r'(?im)^\s*Processor\s+Version:\s+(\S+)'),
('cpu_model', r'(?im)^\s*Processor\s+Implementation\s+Mode:\s+(.*)'),
('num_cpus', r'(?im)^\s*Number\s+Of\s+Processors:\s+(\S+)')):
for regex in [re.compile(r) for r in [regstring]]:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains[dest] = res.group(1).strip().replace("'", '')
else:
log.error('The \'prtconf\' binary was not found in $PATH.')
return grains
def _linux_memdata():
'''
Return the memory information for Linux-like systems
@ -494,6 +520,34 @@ def _sunos_memdata():
return grains
def _aix_memdata():
'''
Return the memory information for AIX systems
'''
grains = {'mem_total': 0, 'swap_total': 0}
prtconf = salt.utils.path.which('prtconf')
if prtconf:
for line in __salt__['cmd.run'](prtconf, python_shell=True).splitlines():
comps = [x for x in line.strip().split(' ') if x]
if len(comps) > 2 and 'Memory' in comps[0] and 'Size' in comps[1]:
grains['mem_total'] = int(comps[2])
break
else:
log.error('The \'prtconf\' binary was not found in $PATH.')
swap_cmd = salt.utils.path.which('swap')
if swap_cmd:
swap_data = __salt__['cmd.run']('{0} -s'.format(swap_cmd)).split()
try:
swap_total = (int(swap_data[-2]) + int(swap_data[-6])) * 4
except ValueError:
swap_total = None
grains['swap_total'] = swap_total
else:
log.error('The \'swap\' binary was not found in $PATH.')
return grains
def _windows_memdata():
'''
Return the memory information for Windows systems
@ -522,11 +576,32 @@ def _memdata(osdata):
grains.update(_osx_memdata())
elif osdata['kernel'] == 'SunOS':
grains.update(_sunos_memdata())
elif osdata['kernel'] == 'AIX':
grains.update(_aix_memdata())
elif osdata['kernel'] == 'Windows' and HAS_WMI:
grains.update(_windows_memdata())
return grains
def _aix_get_machine_id():
'''
Parse the output of lsattr -El sys0 for os_uuid
'''
grains = {}
cmd = salt.utils.path.which('lsattr')
if cmd:
data = __salt__['cmd.run']('{0} -El sys0'.format(cmd)) + os.linesep
uuid_regexes = [re.compile(r'(?im)^\s*os_uuid\s+(\S+)\s+(.*)')]
for regex in uuid_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains['machine_id'] = res.group(1).strip()
break
else:
log.error('The \'lsattr\' binary was not found in $PATH.')
return grains
def _windows_virtual(osdata):
'''
Returns what type of virtual hardware is under the hood, kvm or physical
@ -605,11 +680,7 @@ def _virtual(osdata):
if not salt.utils.platform.is_windows() and osdata['kernel'] not in skip_cmds:
if salt.utils.path.which('virt-what'):
_cmds = ['virt-what']
else:
log.debug(
'Please install \'virt-what\' to improve results of the '
'\'virtual\' grain.'
)
# Check if enable_lspci is True or False
if __opts__.get('enable_lspci', True) is True:
# /proc/bus/pci does not exists, lspci will fail
@ -789,14 +860,6 @@ def _virtual(osdata):
elif command == 'virtinfo':
grains['virtual'] = 'LDOM'
break
else:
if osdata['kernel'] not in skip_cmds:
log.debug(
'All tools for virtual hardware identification failed to '
'execute because they do not exist on the system running this '
'instance or the user does not have the necessary permissions '
'to execute them. Grains output might not be accurate.'
)
choices = ('Linux', 'HP-UX')
isdir = os.path.isdir
@ -984,6 +1047,63 @@ def _virtual(osdata):
return grains
def _virtual_hv(osdata):
'''
Returns detailed hypervisor information from sysfs
Currently this seems to be used only by Xen
'''
grains = {}
# Bail early if we're not running on Xen
try:
if 'xen' not in osdata['virtual']:
return grains
except KeyError:
return grains
# Try to get the exact hypervisor version from sysfs
try:
version = {}
for fn in ('major', 'minor', 'extra'):
with salt.utils.files.fopen('/sys/hypervisor/version/{}'.format(fn), 'r') as fhr:
version[fn] = salt.utils.stringutils.to_unicode(fhr.read().strip())
grains['virtual_hv_version'] = '{}.{}{}'.format(version['major'], version['minor'], version['extra'])
grains['virtual_hv_version_info'] = [version['major'], version['minor'], version['extra']]
except (IOError, OSError, KeyError):
pass
# Try to read and decode the supported feature set of the hypervisor
# Based on https://github.com/brendangregg/Misc/blob/master/xen/xen-features.py
# Table data from include/xen/interface/features.h
xen_feature_table = {0: 'writable_page_tables',
1: 'writable_descriptor_tables',
2: 'auto_translated_physmap',
3: 'supervisor_mode_kernel',
4: 'pae_pgdir_above_4gb',
5: 'mmu_pt_update_preserve_ad',
7: 'gnttab_map_avail_bits',
8: 'hvm_callback_vector',
9: 'hvm_safe_pvclock',
10: 'hvm_pirqs',
11: 'dom0',
12: 'grant_map_identity',
13: 'memory_op_vnode_supported',
14: 'ARM_SMCCC_supported'}
try:
with salt.utils.files.fopen('/sys/hypervisor/properties/features', 'r') as fhr:
features = salt.utils.stringutils.to_unicode(fhr.read().strip())
enabled_features = []
for bit, feat in six.iteritems(xen_feature_table):
if int(features, 16) & (1 << bit):
enabled_features.append(feat)
grains['virtual_hv_features'] = features
grains['virtual_hv_features_list'] = enabled_features
except (IOError, OSError, KeyError):
pass
return grains
def _ps(osdata):
'''
Return the ps grain
@ -1231,6 +1351,7 @@ _OS_NAME_MAP = {
'synology': 'Synology',
'nilrt': 'NILinuxRT',
'nilrt-xfce': 'NILinuxRT-XFCE',
'poky': 'Poky',
'manjaro': 'Manjaro',
'manjarolin': 'Manjaro',
'antergos': 'Antergos',
@ -1306,6 +1427,7 @@ _OS_FAMILY_MAP = {
'KDE neon': 'Debian',
'Void': 'Void',
'IDMS': 'Debian',
'AIX': 'AIX',
}
@ -1759,6 +1881,15 @@ def os_data():
grains.update(_bsd_cpudata(grains))
grains.update(_osx_gpudata())
grains.update(_osx_platform_data())
elif grains['kernel'] == 'AIX':
osrelease = __salt__['cmd.run']('oslevel')
osrelease_techlevel = __salt__['cmd.run']('oslevel -r')
osname = __salt__['cmd.run']('uname')
grains['os'] = 'AIX'
grains['osfullname'] = osname
grains['osrelease'] = osrelease
grains['osrelease_techlevel'] = osrelease_techlevel
grains.update(_aix_cpudata())
else:
grains['os'] = grains['kernel']
if grains['kernel'] == 'FreeBSD':
@ -1790,7 +1921,7 @@ def os_data():
osarch = __salt__['cmd.run']('dpkg --print-architecture').strip()
elif grains.get('os_family') == 'RedHat':
osarch = __salt__['cmd.run']('rpm --eval %{_host_cpu}').strip()
elif grains.get('os_family') == 'NILinuxRT':
elif grains.get('os_family') in ('NILinuxRT', 'Poky'):
archinfo = {}
for line in __salt__['cmd.run']('opkg print-architecture').splitlines():
if line.startswith('arch'):
@ -1810,6 +1941,7 @@ def os_data():
# Load the virtual machine info
grains.update(_virtual(grains))
grains.update(_virtual_hv(grains))
grains.update(_ps(grains))
if grains.get('osrelease', ''):
@ -2098,6 +2230,10 @@ def get_machine_id():
'''
# Provides:
# machine-id
if platform.system() == 'AIX':
return _aix_get_machine_id()
locations = ['/etc/machine-id', '/var/lib/dbus/machine-id']
existing_locations = [loc for loc in locations if os.path.exists(loc)]
if not existing_locations:
@ -2261,6 +2397,7 @@ def _hw_data(osdata):
hwdata = {
'manufacturer': 'manufacturer',
'serialnumber': 'serial#',
'productname': 'DeviceDesc',
}
for grain_name, cmd_key in six.iteritems(hwdata):
result = __salt__['cmd.run_all']('fw_printenv {0}'.format(cmd_key))
@ -2362,9 +2499,9 @@ def _hw_data(osdata):
product_regexes = [
re.compile(r) for r in [
r'(?im)^\s*System\s+Configuration:\s*.*?sun\d\S+\s(.*)', # prtdiag
r'(?im)^\s*banner-name:\s*(.*)', # prtconf
r'(?im)^\s*product-name:\s*(.*)', # prtconf
r'(?im)^\s*System\s+Configuration:\s*.*?sun\d\S+[^\S\r\n]*(.*)', # prtdiag
r'(?im)^[^\S\r\n]*banner-name:[^\S\r\n]*(.*)', # prtconf
r'(?im)^[^\S\r\n]*product-name:[^\S\r\n]*(.*)', # prtconf
]
]
@ -2431,8 +2568,31 @@ def _hw_data(osdata):
for regex in product_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains['product'] = res.group(1).strip().replace("'", "")
break
t_productname = res.group(1).strip().replace("'", "")
if t_productname:
grains['product'] = t_productname
grains['productname'] = t_productname
break
elif osdata['kernel'] == 'AIX':
cmd = salt.utils.path.which('prtconf')
if data:
data = __salt__['cmd.run']('{0}'.format(cmd)) + os.linesep
for dest, regstring in (('serialnumber', r'(?im)^\s*Machine\s+Serial\s+Number:\s+(\S+)'),
('systemfirmware', r'(?im)^\s*Firmware\s+Version:\s+(.*)')):
for regex in [re.compile(r) for r in [regstring]]:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains[dest] = res.group(1).strip().replace("'", '')
product_regexes = [re.compile(r'(?im)^\s*System\s+Model:\s+(\S+)')]
for regex in product_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains['manufacturer'], grains['productname'] = res.group(1).strip().replace("'", "").split(",")
break
else:
log.error('The \'prtconf\' binary was not found in $PATH.')
return grains

View File

@ -22,6 +22,7 @@ from zipimport import zipimporter
# Import salt libs
import salt.config
import salt.syspaths
import salt.utils.args
import salt.utils.context
import salt.utils.data
import salt.utils.dictupdate
@ -751,10 +752,13 @@ def grains(opts, force_refresh=False, proxy=None):
# proxymodule for retrieving information from the connected
# device.
log.trace('Loading %s grain', key)
if funcs[key].__code__.co_argcount == 1:
ret = funcs[key](proxy)
else:
ret = funcs[key]()
parameters = salt.utils.args.get_function_argspec(funcs[key]).args
kwargs = {}
if 'proxy' in parameters:
kwargs['proxy'] = proxy
if 'grains' in parameters:
kwargs['grains'] = grains_data
ret = funcs[key](**kwargs)
except Exception:
if salt.utils.platform.is_proxy():
log.info('The following CRITICAL message may not be an error; the proxy may not be completely established yet.')
@ -967,12 +971,14 @@ def executors(opts, functions=None, context=None, proxy=None):
'''
Returns the executor modules
'''
return LazyLoader(
executors = LazyLoader(
_module_dirs(opts, 'executors', 'executor'),
opts,
tag='executor',
pack={'__salt__': functions, '__context__': context or {}, '__proxy__': proxy or {}},
)
executors.pack['__executors__'] = executors
return executors
def cache(opts, serial):
@ -1629,7 +1635,11 @@ class LazyLoader(salt.utils.lazy.LazyDict):
return True
# if the modulename isn't in the whitelist, don't bother
if self.whitelist and mod_name not in self.whitelist:
raise KeyError
log.error(
'Failed to load function %s because its module (%s) is '
'not in the whitelist: %s', key, mod_name, self.whitelist
)
raise KeyError(key)
def _inner_load(mod_name):
for name in self._iter_files(mod_name):

View File

@ -103,6 +103,20 @@ class SysLogHandler(ExcInfoOnLogLevelFormatMixIn, logging.handlers.SysLogHandler
'''
Syslog handler which properly handles exc_info on a per handler basis
'''
def handleError(self, record):
'''
Override the default error handling mechanism for py3
Deal with syslog os errors when the log file does not exist
'''
handled = False
if sys.stderr and sys.version_info >= (3, 5, 4):
t, v, tb = sys.exc_info()
if t.__name__ in 'FileNotFoundError':
sys.stderr.write('[WARNING ] The log_file does not exist. Logging not setup correctly or syslog service not started.\n')
handled = True
if not handled:
super(SysLogHandler, self).handleError(record)
class RotatingFileHandler(ExcInfoOnLogLevelFormatMixIn, logging.handlers.RotatingFileHandler, NewStyleClassMixIn):

View File

@ -1565,7 +1565,15 @@ class Minion(MinionBase):
fp_.write(minion_instance.serial.dumps(sdata))
ret = {'success': False}
function_name = data['fun']
if function_name in minion_instance.functions:
executors = data.get('module_executors') or \
getattr(minion_instance, 'module_executors', []) or \
opts.get('module_executors', ['direct_call'])
allow_missing_funcs = any([
minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name)
for executor in executors
if '{0}.allow_missing_func' in minion_instance.executors
])
if function_name in minion_instance.functions or allow_missing_funcs is True:
try:
minion_blackout_violation = False
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
@ -1583,14 +1591,17 @@ class Minion(MinionBase):
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
func = minion_instance.functions[function_name]
args, kwargs = load_args_and_kwargs(
func,
data['arg'],
data)
if function_name in minion_instance.functions:
func = minion_instance.functions[function_name]
args, kwargs = load_args_and_kwargs(
func,
data['arg'],
data)
else:
# only run if function_name is not in minion_instance.functions and allow_missing_funcs is True
func = function_name
args, kwargs = data['arg'], data
minion_instance.functions.pack['__context__']['retcode'] = 0
executors = data.get('module_executors') or opts.get('module_executors', ['direct_call'])
if isinstance(executors, six.string_types):
executors = [executors]
elif not isinstance(executors, list) or not executors:
@ -3597,6 +3608,7 @@ class ProxyMinion(Minion):
self._running = False
raise SaltSystemExit(code=-1, msg=errmsg)
self.module_executors = self.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])()
proxy_init_fn = self.proxy[fq_proxyname + '.init']
proxy_init_fn(self.opts)
@ -3747,6 +3759,9 @@ class ProxyMinion(Minion):
minion_instance.proxy.reload_modules()
fq_proxyname = opts['proxy']['proxytype']
minion_instance.module_executors = minion_instance.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])()
proxy_init_fn = minion_instance.proxy[fq_proxyname + '.init']
proxy_init_fn(opts)
if not hasattr(minion_instance, 'serial'):

View File

@ -410,9 +410,9 @@ def list_(name,
item.sort()
if verbose:
ret = {'dirs': sorted(dirs),
'files': sorted(files),
'links': sorted(links)}
ret = {'dirs': sorted(salt.utils.data.decode_list(dirs)),
'files': sorted(salt.utils.data.decode_list(files)),
'links': sorted(salt.utils.data.decode_list(links))}
ret['top_level_dirs'] = [x for x in ret['dirs']
if x.count('/') == 1]
ret['top_level_files'] = [x for x in ret['files']

View File

@ -53,7 +53,6 @@ import time
# Import Salt libs
from salt.exceptions import SaltInvocationError, CommandExecutionError
import salt.utils.boto3
import salt.utils.compat
import salt.utils.versions

View File

@ -54,7 +54,6 @@ import logging
import time
# Import Salt libs
import salt.utils.boto3
import salt.utils.compat
import salt.utils.versions
from salt.exceptions import SaltInvocationError
@ -646,6 +645,10 @@ def disassociate_vpc_from_hosted_zone(HostedZoneId=None, Name=None, VPCId=None,
r = conn.disassociate_vpc_from_hosted_zone(**args)
return _wait_for_sync(r['ChangeInfo']['Id'], conn)
except ClientError as e:
if e.response.get('Error', {}).get('Code') == 'VPCAssociationNotFound':
log.debug('No VPC Association exists.')
# return True since the current state is the desired one
return True
if tries and e.response.get('Error', {}).get('Code') == 'Throttling':
log.debug('Throttled by AWS API.')
time.sleep(3)

View File

@ -85,7 +85,6 @@ import datetime
# Import Salt libs
from salt.ext import six
import salt.utils.boto3
import salt.utils.compat
import salt.utils.json
import salt.utils.versions
@ -184,7 +183,7 @@ def _find_apis_by_name(name, description=None,
apis = _filter_apis_desc(description, apis)
return {'restapi': [_convert_datetime_str(api) for api in apis]}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def describe_apis(name=None, description=None, region=None, key=None, keyid=None, profile=None):
@ -252,7 +251,7 @@ def create_api(name, description, cloneFrom=None,
api = _convert_datetime_str(api)
return {'created': True, 'restapi': api} if api else {'created': False}
except ClientError as e:
return {'created': False, 'error': salt.utils.boto3.get_error(e)}
return {'created': False, 'error': __utils__['boto3.get_error'](e)}
def delete_api(name, description=None, region=None, key=None, keyid=None, profile=None):
@ -283,7 +282,7 @@ def delete_api(name, description=None, region=None, key=None, keyid=None, profil
else:
return {'deleted': False}
except ClientError as e:
return {'deleted': False, 'error': salt.utils.boto3.get_error(e)}
return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
def describe_api_resources(restApiId, region=None, key=None, keyid=None, profile=None):
@ -304,7 +303,7 @@ def describe_api_resources(restApiId, region=None, key=None, keyid=None, profile
return {'resources': resources}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def describe_api_resource(restApiId, path,
@ -365,7 +364,7 @@ def create_api_resources(restApiId, path,
else:
return {'created': False, 'error': 'unexpected error.'}
except ClientError as e:
return {'created': False, 'error': salt.utils.boto3.get_error(e)}
return {'created': False, 'error': __utils__['boto3.get_error'](e)}
def delete_api_resources(restApiId, path,
@ -394,7 +393,7 @@ def delete_api_resources(restApiId, path,
else:
return {'deleted': False, 'error': 'no resource found by {0}'.format(path)}
except ClientError as e:
return {'created': False, 'error': salt.utils.boto3.get_error(e)}
return {'created': False, 'error': __utils__['boto3.get_error'](e)}
def describe_api_resource_method(restApiId, resourcePath, httpMethod,
@ -422,7 +421,7 @@ def describe_api_resource_method(restApiId, resourcePath, httpMethod,
method = conn.get_method(restApiId=restApiId, resourceId=resource['id'], httpMethod=httpMethod)
return {'method': method}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def describe_api_key(apiKey, region=None, key=None, keyid=None, profile=None):
@ -441,7 +440,7 @@ def describe_api_key(apiKey, region=None, key=None, keyid=None, profile=None):
response = conn.get_api_key(apiKey=apiKey)
return {'apiKey': _convert_datetime_str(response)}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def describe_api_keys(region=None, key=None, keyid=None, profile=None):
@ -461,7 +460,7 @@ def describe_api_keys(region=None, key=None, keyid=None, profile=None):
return {'apiKeys': [_convert_datetime_str(apikey) for apikey in apikeys]}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def create_api_key(name, description, enabled=True, stageKeys=None,
@ -499,7 +498,7 @@ def create_api_key(name, description, enabled=True, stageKeys=None,
return {'created': True, 'apiKey': _convert_datetime_str(response)}
except ClientError as e:
return {'created': False, 'error': salt.utils.boto3.get_error(e)}
return {'created': False, 'error': __utils__['boto3.get_error'](e)}
def delete_api_key(apiKey, region=None, key=None, keyid=None, profile=None):
@ -518,7 +517,7 @@ def delete_api_key(apiKey, region=None, key=None, keyid=None, profile=None):
conn.delete_api_key(apiKey=apiKey)
return {'deleted': True}
except ClientError as e:
return {'deleted': False, 'error': salt.utils.boto3.get_error(e)}
return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
def _api_key_patch_replace(conn, apiKey, path, value):
@ -571,7 +570,7 @@ def update_api_key_description(apiKey, description, region=None, key=None, keyid
response = _api_key_patch_replace(conn, apiKey, '/description', description)
return {'updated': True, 'apiKey': _convert_datetime_str(response)}
except ClientError as e:
return {'updated': False, 'error': salt.utils.boto3.get_error(e)}
return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
def enable_api_key(apiKey, region=None, key=None, keyid=None, profile=None):
@ -590,7 +589,7 @@ def enable_api_key(apiKey, region=None, key=None, keyid=None, profile=None):
response = _api_key_patch_replace(conn, apiKey, '/enabled', 'True')
return {'apiKey': _convert_datetime_str(response)}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def disable_api_key(apiKey, region=None, key=None, keyid=None, profile=None):
@ -609,7 +608,7 @@ def disable_api_key(apiKey, region=None, key=None, keyid=None, profile=None):
response = _api_key_patch_replace(conn, apiKey, '/enabled', 'False')
return {'apiKey': _convert_datetime_str(response)}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def associate_api_key_stagekeys(apiKey, stagekeyslist, region=None, key=None, keyid=None, profile=None):
@ -630,7 +629,7 @@ def associate_api_key_stagekeys(apiKey, stagekeyslist, region=None, key=None, ke
response = _api_key_patch_add(conn, apiKey, pvlist)
return {'associated': True, 'apiKey': _convert_datetime_str(response)}
except ClientError as e:
return {'associated': False, 'error': salt.utils.boto3.get_error(e)}
return {'associated': False, 'error': __utils__['boto3.get_error'](e)}
def disassociate_api_key_stagekeys(apiKey, stagekeyslist, region=None, key=None, keyid=None, profile=None):
@ -651,7 +650,7 @@ def disassociate_api_key_stagekeys(apiKey, stagekeyslist, region=None, key=None,
response = _api_key_patch_remove(conn, apiKey, pvlist)
return {'disassociated': True}
except ClientError as e:
return {'disassociated': False, 'error': salt.utils.boto3.get_error(e)}
return {'disassociated': False, 'error': __utils__['boto3.get_error'](e)}
def describe_api_deployments(restApiId, region=None, key=None, keyid=None, profile=None):
@ -679,7 +678,7 @@ def describe_api_deployments(restApiId, region=None, key=None, keyid=None, profi
return {'deployments': [_convert_datetime_str(deployment) for deployment in deployments]}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def describe_api_deployment(restApiId, deploymentId, region=None, key=None, keyid=None, profile=None):
@ -698,7 +697,7 @@ def describe_api_deployment(restApiId, deploymentId, region=None, key=None, keyi
deployment = conn.get_deployment(restApiId=restApiId, deploymentId=deploymentId)
return {'deployment': _convert_datetime_str(deployment)}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def activate_api_deployment(restApiId, stageName, deploymentId,
@ -721,7 +720,7 @@ def activate_api_deployment(restApiId, stageName, deploymentId,
'value': deploymentId}])
return {'set': True, 'response': _convert_datetime_str(response)}
except ClientError as e:
return {'set': False, 'error': salt.utils.boto3.get_error(e)}
return {'set': False, 'error': __utils__['boto3.get_error'](e)}
def create_api_deployment(restApiId, stageName, stageDescription='', description='', cacheClusterEnabled=False,
@ -748,7 +747,7 @@ def create_api_deployment(restApiId, stageName, stageDescription='', description
variables=variables)
return {'created': True, 'deployment': _convert_datetime_str(deployment)}
except ClientError as e:
return {'created': False, 'error': salt.utils.boto3.get_error(e)}
return {'created': False, 'error': __utils__['boto3.get_error'](e)}
def delete_api_deployment(restApiId, deploymentId, region=None, key=None, keyid=None, profile=None):
@ -767,7 +766,7 @@ def delete_api_deployment(restApiId, deploymentId, region=None, key=None, keyid=
conn.delete_deployment(restApiId=restApiId, deploymentId=deploymentId)
return {'deleted': True}
except ClientError as e:
return {'deleted': False, 'error': salt.utils.boto3.get_error(e)}
return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
def overwrite_api_stage_variables(restApiId, stageName, variables, region=None, key=None, keyid=None, profile=None):
@ -813,7 +812,7 @@ def overwrite_api_stage_variables(restApiId, stageName, variables, region=None,
return {'overwrite': True, 'stage': _convert_datetime_str(stage)}
except ClientError as e:
return {'overwrite': False, 'error': salt.utils.boto3.get_error(e)}
return {'overwrite': False, 'error': __utils__['boto3.get_error'](e)}
def describe_api_stage(restApiId, stageName, region=None, key=None, keyid=None, profile=None):
@ -832,7 +831,7 @@ def describe_api_stage(restApiId, stageName, region=None, key=None, keyid=None,
stage = conn.get_stage(restApiId=restApiId, stageName=stageName)
return {'stage': _convert_datetime_str(stage)}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def describe_api_stages(restApiId, deploymentId, region=None, key=None, keyid=None, profile=None):
@ -851,7 +850,7 @@ def describe_api_stages(restApiId, deploymentId, region=None, key=None, keyid=No
stages = conn.get_stages(restApiId=restApiId, deploymentId=deploymentId)
return {'stages': [_convert_datetime_str(stage) for stage in stages['item']]}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def create_api_stage(restApiId, stageName, deploymentId, description='',
@ -877,7 +876,7 @@ def create_api_stage(restApiId, stageName, deploymentId, description='',
cacheClusterSize=cacheClusterSize, variables=variables)
return {'created': True, 'stage': _convert_datetime_str(stage)}
except ClientError as e:
return {'created': False, 'error': salt.utils.boto3.get_error(e)}
return {'created': False, 'error': __utils__['boto3.get_error'](e)}
def delete_api_stage(restApiId, stageName, region=None, key=None, keyid=None, profile=None):
@ -896,7 +895,7 @@ def delete_api_stage(restApiId, stageName, region=None, key=None, keyid=None, pr
conn.delete_stage(restApiId=restApiId, stageName=stageName)
return {'deleted': True}
except ClientError as e:
return {'deleted': False, 'error': salt.utils.boto3.get_error(e)}
return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
def flush_api_stage_cache(restApiId, stageName, region=None, key=None, keyid=None, profile=None):
@ -915,7 +914,7 @@ def flush_api_stage_cache(restApiId, stageName, region=None, key=None, keyid=Non
conn.flush_stage_cache(restApiId=restApiId, stageName=stageName)
return {'flushed': True}
except ClientError as e:
return {'flushed': False, 'error': salt.utils.boto3.get_error(e)}
return {'flushed': False, 'error': __utils__['boto3.get_error'](e)}
def create_api_method(restApiId, resourcePath, httpMethod, authorizationType,
@ -947,7 +946,7 @@ def create_api_method(restApiId, resourcePath, httpMethod, authorizationType,
return {'created': False, 'error': 'Failed to create method'}
except ClientError as e:
return {'created': False, 'error': salt.utils.boto3.get_error(e)}
return {'created': False, 'error': __utils__['boto3.get_error'](e)}
def describe_api_method(restApiId, resourcePath, httpMethod, region=None, key=None, keyid=None, profile=None):
@ -970,7 +969,7 @@ def describe_api_method(restApiId, resourcePath, httpMethod, region=None, key=No
return {'method': _convert_datetime_str(method)}
return {'error': 'get API method failed: no such resource'}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def delete_api_method(restApiId, resourcePath, httpMethod, region=None, key=None, keyid=None, profile=None):
@ -993,7 +992,7 @@ def delete_api_method(restApiId, resourcePath, httpMethod, region=None, key=None
return {'deleted': True}
return {'deleted': False, 'error': 'get API method failed: no such resource'}
except ClientError as e:
return {'deleted': False, 'error': salt.utils.boto3.get_error(e)}
return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
def create_api_method_response(restApiId, resourcePath, httpMethod, statusCode, responseParameters=None,
@ -1023,7 +1022,7 @@ def create_api_method_response(restApiId, resourcePath, httpMethod, statusCode,
return {'created': True, 'response': response}
return {'created': False, 'error': 'no such resource'}
except ClientError as e:
return {'created': False, 'error': salt.utils.boto3.get_error(e)}
return {'created': False, 'error': __utils__['boto3.get_error'](e)}
def delete_api_method_response(restApiId, resourcePath, httpMethod, statusCode,
@ -1048,7 +1047,7 @@ def delete_api_method_response(restApiId, resourcePath, httpMethod, statusCode,
return {'deleted': True}
return {'deleted': False, 'error': 'no such resource'}
except ClientError as e:
return {'deleted': False, 'error': salt.utils.boto3.get_error(e)}
return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
def describe_api_method_response(restApiId, resourcePath, httpMethod, statusCode,
@ -1073,7 +1072,7 @@ def describe_api_method_response(restApiId, resourcePath, httpMethod, statusCode
return {'response': _convert_datetime_str(response)}
return {'error': 'no such resource'}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def describe_api_models(restApiId, region=None, key=None, keyid=None, profile=None):
@ -1092,7 +1091,7 @@ def describe_api_models(restApiId, region=None, key=None, keyid=None, profile=No
models = _multi_call(conn.get_models, 'items', restApiId=restApiId)
return {'models': [_convert_datetime_str(model) for model in models]}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def describe_api_model(restApiId, modelName, flatten=True, region=None, key=None, keyid=None, profile=None):
@ -1111,7 +1110,7 @@ def describe_api_model(restApiId, modelName, flatten=True, region=None, key=None
model = conn.get_model(restApiId=restApiId, modelName=modelName, flatten=flatten)
return {'model': _convert_datetime_str(model)}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def api_model_exists(restApiId, modelName, region=None, key=None, keyid=None, profile=None):
@ -1155,7 +1154,7 @@ def update_api_model_schema(restApiId, modelName, schema, region=None, key=None,
response = _api_model_patch_replace(conn, restApiId, modelName, '/schema', schema_json)
return {'updated': True, 'model': _convert_datetime_str(response)}
except ClientError as e:
return {'updated': False, 'error': salt.utils.boto3.get_error(e)}
return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
def delete_api_model(restApiId, modelName, region=None, key=None, keyid=None, profile=None):
@ -1174,7 +1173,7 @@ def delete_api_model(restApiId, modelName, region=None, key=None, keyid=None, pr
conn.delete_model(restApiId=restApiId, modelName=modelName)
return {'deleted': True}
except ClientError as e:
return {'deleted': False, 'error': salt.utils.boto3.get_error(e)}
return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
def create_api_model(restApiId, modelName, modelDescription, schema, contentType='application/json',
@ -1197,7 +1196,7 @@ def create_api_model(restApiId, modelName, modelDescription, schema, contentType
schema=schema_json, contentType=contentType)
return {'created': True, 'model': _convert_datetime_str(model)}
except ClientError as e:
return {'created': False, 'error': salt.utils.boto3.get_error(e)}
return {'created': False, 'error': __utils__['boto3.get_error'](e)}
def describe_api_integration(restApiId, resourcePath, httpMethod, region=None, key=None, keyid=None, profile=None):
@ -1220,7 +1219,7 @@ def describe_api_integration(restApiId, resourcePath, httpMethod, region=None, k
return {'integration': _convert_datetime_str(integration)}
return {'error': 'no such resource'}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def describe_api_integration_response(restApiId, resourcePath, httpMethod, statusCode,
@ -1245,7 +1244,7 @@ def describe_api_integration_response(restApiId, resourcePath, httpMethod, statu
return {'response': _convert_datetime_str(response)}
return {'error': 'no such resource'}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def delete_api_integration(restApiId, resourcePath, httpMethod, region=None, key=None, keyid=None, profile=None):
@ -1268,7 +1267,7 @@ def delete_api_integration(restApiId, resourcePath, httpMethod, region=None, key
return {'deleted': True}
return {'deleted': False, 'error': 'no such resource'}
except ClientError as e:
return {'deleted': False, 'error': salt.utils.boto3.get_error(e)}
return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
def delete_api_integration_response(restApiId, resourcePath, httpMethod, statusCode,
@ -1293,7 +1292,7 @@ def delete_api_integration_response(restApiId, resourcePath, httpMethod, statusC
return {'deleted': True}
return {'deleted': False, 'error': 'no such resource'}
except ClientError as e:
return {'deleted': False, 'error': salt.utils.boto3.get_error(e)}
return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
def _get_role_arn(name, region=None, key=None, keyid=None, profile=None):
@ -1350,7 +1349,7 @@ def create_api_integration(restApiId, resourcePath, httpMethod, integrationType,
return {'created': True, 'integration': integration}
return {'created': False, 'error': 'no such resource'}
except ClientError as e:
return {'created': False, 'error': salt.utils.boto3.get_error(e)}
return {'created': False, 'error': __utils__['boto3.get_error'](e)}
def create_api_integration_response(restApiId, resourcePath, httpMethod, statusCode, selectionPattern,
@ -1383,7 +1382,7 @@ def create_api_integration_response(restApiId, resourcePath, httpMethod, statusC
return {'created': True, 'response': response}
return {'created': False, 'error': 'no such resource'}
except ClientError as e:
return {'created': False, 'error': salt.utils.boto3.get_error(e)}
return {'created': False, 'error': __utils__['boto3.get_error'](e)}
def _filter_plans(attr, name, plans):
@ -1419,7 +1418,7 @@ def describe_usage_plans(name=None, plan_id=None, region=None, key=None, keyid=N
return {'plans': [_convert_datetime_str(plan) for plan in plans]}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def _validate_throttle(throttle):
@ -1498,7 +1497,7 @@ def create_usage_plan(name, description=None, throttle=None, quota=None, region=
res = conn.create_usage_plan(**values)
return {'created': True, 'result': res}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
except (TypeError, ValueError) as e:
return {'error': six.text_type(e)}
@ -1572,7 +1571,7 @@ def update_usage_plan(plan_id, throttle=None, quota=None, region=None, key=None,
return {'updated': False}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
except (TypeError, ValueError) as e:
return {'error': six.text_type(e)}
@ -1601,7 +1600,7 @@ def delete_usage_plan(plan_id, region=None, key=None, keyid=None, profile=None):
res = conn.delete_usage_plan(usagePlanId=plan_id)
return {'deleted': True, 'usagePlanId': plan_id}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def _update_usage_plan_apis(plan_id, apis, op, region=None, key=None, keyid=None, profile=None):
@ -1635,7 +1634,7 @@ def _update_usage_plan_apis(plan_id, apis, op, region=None, key=None, keyid=None
patchOperations=patchOperations)
return {'success': True, 'result': res}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
except Exception as e:
return {'error': e}

View File

@ -74,7 +74,6 @@ except ImportError:
# Import Salt libs
import salt.utils.boto3
import salt.utils.compat
import salt.utils.json
import salt.utils.odict as odict
@ -886,7 +885,7 @@ def enter_standby(name, instance_ids, should_decrement_desired_capacity=False,
AutoScalingGroupName=name,
ShouldDecrementDesiredCapacity=should_decrement_desired_capacity)
except ClientError as e:
err = salt.utils.boto3.get_error(e)
err = __utils__['boto3.get_error'](e)
if e.response.get('Error', {}).get('Code') == 'ResourceNotFoundException':
return {'exists': False}
return {'error': err}
@ -912,7 +911,7 @@ def exit_standby(name, instance_ids, should_decrement_desired_capacity=False,
InstanceIds=instance_ids,
AutoScalingGroupName=name)
except ClientError as e:
err = salt.utils.boto3.get_error(e)
err = __utils__['boto3.get_error'](e)
if e.response.get('Error', {}).get('Code') == 'ResourceNotFoundException':
return {'exists': False}
return {'error': err}

View File

@ -55,7 +55,6 @@ import logging
# Import Salt libs
from salt.ext import six
import salt.utils.boto3
import salt.utils.compat
import salt.utils.versions
@ -117,7 +116,7 @@ def exists(Name,
conn.get_trail_status(Name=Name)
return {'exists': True}
except ClientError as e:
err = salt.utils.boto3.get_error(e)
err = __utils__['boto3.get_error'](e)
if e.response.get('Error', {}).get('Code') == 'TrailNotFoundException':
return {'exists': False}
return {'error': err}
@ -167,7 +166,7 @@ def create(Name,
log.warning('Trail was not created')
return {'created': False}
except ClientError as e:
return {'created': False, 'error': salt.utils.boto3.get_error(e)}
return {'created': False, 'error': __utils__['boto3.get_error'](e)}
def delete(Name,
@ -191,7 +190,7 @@ def delete(Name,
conn.delete_trail(Name=Name)
return {'deleted': True}
except ClientError as e:
return {'deleted': False, 'error': salt.utils.boto3.get_error(e)}
return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
def describe(Name,
@ -224,10 +223,10 @@ def describe(Name,
else:
return {'trail': None}
except ClientError as e:
err = salt.utils.boto3.get_error(e)
err = __utils__['boto3.get_error'](e)
if e.response.get('Error', {}).get('Code') == 'TrailNotFoundException':
return {'trail': None}
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def status(Name,
@ -265,10 +264,10 @@ def status(Name,
else:
return {'trail': None}
except ClientError as e:
err = salt.utils.boto3.get_error(e)
err = __utils__['boto3.get_error'](e)
if e.response.get('Error', {}).get('Code') == 'TrailNotFoundException':
return {'trail': None}
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def list(region=None, key=None, keyid=None, profile=None):
@ -293,7 +292,7 @@ def list(region=None, key=None, keyid=None, profile=None):
log.warning('No trails found')
return {'trails': trails.get('trailList', [])}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def update(Name,
@ -340,7 +339,7 @@ def update(Name,
log.warning('Trail was not created')
return {'updated': False}
except ClientError as e:
return {'updated': False, 'error': salt.utils.boto3.get_error(e)}
return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
def start_logging(Name,
@ -364,7 +363,7 @@ def start_logging(Name,
conn.start_logging(Name=Name)
return {'started': True}
except ClientError as e:
return {'started': False, 'error': salt.utils.boto3.get_error(e)}
return {'started': False, 'error': __utils__['boto3.get_error'](e)}
def stop_logging(Name,
@ -388,7 +387,7 @@ def stop_logging(Name,
conn.stop_logging(Name=Name)
return {'stopped': True}
except ClientError as e:
return {'stopped': False, 'error': salt.utils.boto3.get_error(e)}
return {'stopped': False, 'error': __utils__['boto3.get_error'](e)}
def _get_trail_arn(name, region=None, key=None, keyid=None, profile=None):
@ -433,7 +432,7 @@ def add_tags(Name,
profile=profile), TagsList=tagslist)
return {'tagged': True}
except ClientError as e:
return {'tagged': False, 'error': salt.utils.boto3.get_error(e)}
return {'tagged': False, 'error': __utils__['boto3.get_error'](e)}
def remove_tags(Name,
@ -464,7 +463,7 @@ def remove_tags(Name,
profile=profile), TagsList=tagslist)
return {'tagged': True}
except ClientError as e:
return {'tagged': False, 'error': salt.utils.boto3.get_error(e)}
return {'tagged': False, 'error': __utils__['boto3.get_error'](e)}
def list_tags(Name,
@ -497,4 +496,4 @@ def list_tags(Name,
tagdict[tag.get('Key')] = tag.get('Value')
return {'tags': tagdict}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}

View File

@ -81,7 +81,6 @@ from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt libs
import salt.utils.boto3
import salt.utils.compat
import salt.utils.versions
@ -131,7 +130,7 @@ def _find_identity_pool_ids(name, pool_id, conn):
'''
ids = []
if pool_id is None:
for pools in salt.utils.boto3.paged_call(conn.list_identity_pools,
for pools in __utils__['boto3.paged_call'](conn.list_identity_pools,
marker_flag='NextToken', marker_arg='NextToken', MaxResults=25):
for pool in pools['IdentityPools']:
if pool['IdentityPoolName'] == name:
@ -174,7 +173,7 @@ def describe_identity_pools(IdentityPoolName, IdentityPoolId=None,
else:
return {'identity_pools': None}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def create_identity_pool(IdentityPoolName,
@ -216,7 +215,7 @@ def create_identity_pool(IdentityPoolName,
return {'created': True, 'identity_pool': response}
except ClientError as e:
return {'created': False, 'error': salt.utils.boto3.get_error(e)}
return {'created': False, 'error': __utils__['boto3.get_error'](e)}
def delete_identity_pools(IdentityPoolName, IdentityPoolId=None,
@ -250,7 +249,7 @@ def delete_identity_pools(IdentityPoolName, IdentityPoolId=None,
else:
return {'deleted': False, 'count': count}
except ClientError as e:
return {'deleted': False, 'error': salt.utils.boto3.get_error(e)}
return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
def get_identity_pool_roles(IdentityPoolName, IdentityPoolId=None,
@ -284,7 +283,7 @@ def get_identity_pool_roles(IdentityPoolName, IdentityPoolId=None,
else:
return {'identity_pool_roles': None}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def _get_role_arn(name, **conn_params):
@ -349,7 +348,7 @@ def set_identity_pool_roles(IdentityPoolId, AuthenticatedRole=None, Unauthentica
return {'set': True, 'roles': Roles}
except ClientError as e:
return {'set': False, 'error': salt.utils.boto3.get_error(e)}
return {'set': False, 'error': __utils__['boto3.get_error'](e)}
def update_identity_pool(IdentityPoolId,
@ -420,4 +419,4 @@ def update_identity_pool(IdentityPoolId,
return {'updated': True, 'identity_pool': response}
except ClientError as e:
return {'updated': False, 'error': salt.utils.boto3.get_error(e)}
return {'updated': False, 'error': __utils__['boto3.get_error'](e)}

View File

@ -56,6 +56,7 @@ import salt.utils.data
import salt.utils.json
import salt.utils.versions
from salt.ext import six
from salt.ext.six.moves import map
from salt.exceptions import SaltInvocationError, CommandExecutionError
# Import third party libs
@ -65,6 +66,7 @@ try:
import boto.ec2
# pylint: enable=unused-import
from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType
from boto.ec2.networkinterface import NetworkInterfaceSpecification, NetworkInterfaceCollection
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
@ -1003,14 +1005,19 @@ def run(image_id, name=None, tags=None, key_name=None, security_groups=None,
return False
security_group_ids += [r]
if all((network_interface_id, network_interface_name)):
raise SaltInvocationError('Only one of network_interface_id or '
'network_interface_name may be provided.')
network_interface_args = list(map(int, [network_interface_id is not None,
network_interface_name is not None,
network_interfaces is not None]))
if sum(network_interface_args) > 1:
raise SaltInvocationError('Only one of network_interface_id, '
'network_interface_name or '
'network_interfaces may be provided.')
if network_interface_name:
result = get_network_interface_id(network_interface_name,
region=region, key=key,
keyid=keyid,
profile=profile)
region=region, key=key,
keyid=keyid,
profile=profile)
network_interface_id = result['result']
if not network_interface_id:
log.warning(
@ -1019,17 +1026,20 @@ def run(image_id, name=None, tags=None, key_name=None, security_groups=None,
)
if network_interface_id:
interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
interface = NetworkInterfaceSpecification(
network_interface_id=network_interface_id,
device_index=0
)
device_index=0)
else:
interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
interface = NetworkInterfaceSpecification(
subnet_id=subnet_id,
groups=security_group_ids,
device_index=0
)
interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface)
device_index=0)
if network_interfaces:
interfaces_specs = [NetworkInterfaceSpecification(**x) for x in network_interfaces]
interfaces = NetworkInterfaceCollection(*interfaces_specs)
else:
interfaces = NetworkInterfaceCollection(interface)
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)

View File

@ -80,7 +80,6 @@ import logging
# Import Salt libs
from salt.ext import six
import salt.utils.boto3
import salt.utils.compat
import salt.utils.json
import salt.utils.versions
@ -148,7 +147,7 @@ def exists(DomainName,
except ClientError as e:
if e.response.get('Error', {}).get('Code') == 'ResourceNotFoundException':
return {'exists': False}
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def status(DomainName,
@ -179,7 +178,7 @@ def status(DomainName,
else:
return {'domain': None}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def describe(DomainName,
@ -208,7 +207,7 @@ def describe(DomainName,
else:
return {'domain': None}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def create(DomainName, ElasticsearchClusterConfig=None, EBSOptions=None,
@ -262,7 +261,7 @@ def create(DomainName, ElasticsearchClusterConfig=None, EBSOptions=None,
log.warning('Domain was not created')
return {'created': False}
except ClientError as e:
return {'created': False, 'error': salt.utils.boto3.get_error(e)}
return {'created': False, 'error': __utils__['boto3.get_error'](e)}
def delete(DomainName, region=None, key=None, keyid=None, profile=None):
@ -285,7 +284,7 @@ def delete(DomainName, region=None, key=None, keyid=None, profile=None):
conn.delete_elasticsearch_domain(DomainName=DomainName)
return {'deleted': True}
except ClientError as e:
return {'deleted': False, 'error': salt.utils.boto3.get_error(e)}
return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
def update(DomainName, ElasticsearchClusterConfig=None, EBSOptions=None,
@ -335,7 +334,7 @@ def update(DomainName, ElasticsearchClusterConfig=None, EBSOptions=None,
return {'updated': False}
return {'updated': True}
except ClientError as e:
return {'updated': False, 'error': salt.utils.boto3.get_error(e)}
return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
def add_tags(DomainName=None, ARN=None,
@ -378,7 +377,7 @@ def add_tags(DomainName=None, ARN=None,
conn.add_tags(ARN=ARN, TagList=tagslist)
return {'tagged': True}
except ClientError as e:
return {'tagged': False, 'error': salt.utils.boto3.get_error(e)}
return {'tagged': False, 'error': __utils__['boto3.get_error'](e)}
def remove_tags(TagKeys, DomainName=None, ARN=None,
@ -417,7 +416,7 @@ def remove_tags(TagKeys, DomainName=None, ARN=None,
TagKeys=TagKeys)
return {'tagged': True}
except ClientError as e:
return {'tagged': False, 'error': salt.utils.boto3.get_error(e)}
return {'tagged': False, 'error': __utils__['boto3.get_error'](e)}
def list_tags(DomainName=None, ARN=None,
@ -462,4 +461,4 @@ def list_tags(DomainName=None, ARN=None,
tagdict[tag.get('Key')] = tag.get('Value')
return {'tags': tagdict}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}

View File

@ -54,7 +54,6 @@ import salt.utils.versions
# Import third-party libs
try:
# pylint: disable=unused-import
import salt.utils.boto3
import boto3
import botocore
# pylint: enable=unused-import

View File

@ -55,7 +55,6 @@ import logging
import datetime
# Import Salt libs
import salt.utils.boto3
import salt.utils.compat
import salt.utils.json
import salt.utils.versions
@ -125,7 +124,7 @@ def thing_type_exists(thingTypeName,
else:
return {'exists': False}
except ClientError as e:
err = salt.utils.boto3.get_error(e)
err = __utils__['boto3.get_error'](e)
if e.response.get('Error', {}).get('Code') == 'ResourceNotFoundException':
return {'exists': False}
return {'error': err}
@ -162,7 +161,7 @@ def describe_thing_type(thingTypeName,
else:
return {'thing_type': None}
except ClientError as e:
err = salt.utils.boto3.get_error(e)
err = __utils__['boto3.get_error'](e)
if e.response.get('Error', {}).get('Code') == 'ResourceNotFoundException':
return {'thing_type': None}
return {'error': err}
@ -207,7 +206,7 @@ def create_thing_type(thingTypeName, thingTypeDescription,
log.warning('thing type was not created')
return {'created': False}
except ClientError as e:
return {'created': False, 'error': salt.utils.boto3.get_error(e)}
return {'created': False, 'error': __utils__['boto3.get_error'](e)}
def deprecate_thing_type(thingTypeName, undoDeprecate=False,
@ -238,7 +237,7 @@ def deprecate_thing_type(thingTypeName, undoDeprecate=False,
deprecated = True if undoDeprecate is False else False
return {'deprecated': deprecated}
except ClientError as e:
return {'deprecated': False, 'error': salt.utils.boto3.get_error(e)}
return {'deprecated': False, 'error': __utils__['boto3.get_error'](e)}
def delete_thing_type(thingTypeName,
@ -264,7 +263,7 @@ def delete_thing_type(thingTypeName,
conn.delete_thing_type(thingTypeName=thingTypeName)
return {'deleted': True}
except ClientError as e:
err = salt.utils.boto3.get_error(e)
err = __utils__['boto3.get_error'](e)
if e.response.get('Error', {}).get('Code') == 'ResourceNotFoundException':
return {'deleted': True}
return {'deleted': False, 'error': err}
@ -291,7 +290,7 @@ def policy_exists(policyName,
conn.get_policy(policyName=policyName)
return {'exists': True}
except ClientError as e:
err = salt.utils.boto3.get_error(e)
err = __utils__['boto3.get_error'](e)
if e.response.get('Error', {}).get('Code') == 'ResourceNotFoundException':
return {'exists': False}
return {'error': err}
@ -331,7 +330,7 @@ def create_policy(policyName, policyDocument,
log.warning('Policy was not created')
return {'created': False}
except ClientError as e:
return {'created': False, 'error': salt.utils.boto3.get_error(e)}
return {'created': False, 'error': __utils__['boto3.get_error'](e)}
def delete_policy(policyName,
@ -355,7 +354,7 @@ def delete_policy(policyName,
conn.delete_policy(policyName=policyName)
return {'deleted': True}
except ClientError as e:
return {'deleted': False, 'error': salt.utils.boto3.get_error(e)}
return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
def describe_policy(policyName,
@ -383,10 +382,10 @@ def describe_policy(policyName,
else:
return {'policy': None}
except ClientError as e:
err = salt.utils.boto3.get_error(e)
err = __utils__['boto3.get_error'](e)
if e.response.get('Error', {}).get('Code') == 'ResourceNotFoundException':
return {'policy': None}
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def policy_version_exists(policyName, policyVersionId,
@ -411,10 +410,10 @@ def policy_version_exists(policyName, policyVersionId,
policyversionId=policyVersionId)
return {'exists': bool(policy)}
except ClientError as e:
err = salt.utils.boto3.get_error(e)
err = __utils__['boto3.get_error'](e)
if e.response.get('Error', {}).get('Code') == 'ResourceNotFoundException':
return {'exists': False}
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def create_policy_version(policyName, policyDocument, setAsDefault=False,
@ -449,7 +448,7 @@ def create_policy_version(policyName, policyDocument, setAsDefault=False,
log.warning('Policy version was not created')
return {'created': False}
except ClientError as e:
return {'created': False, 'error': salt.utils.boto3.get_error(e)}
return {'created': False, 'error': __utils__['boto3.get_error'](e)}
def delete_policy_version(policyName, policyVersionId,
@ -474,7 +473,7 @@ def delete_policy_version(policyName, policyVersionId,
policyVersionId=policyVersionId)
return {'deleted': True}
except ClientError as e:
return {'deleted': False, 'error': salt.utils.boto3.get_error(e)}
return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
def describe_policy_version(policyName, policyVersionId,
@ -503,10 +502,10 @@ def describe_policy_version(policyName, policyVersionId,
else:
return {'policy': None}
except ClientError as e:
err = salt.utils.boto3.get_error(e)
err = __utils__['boto3.get_error'](e)
if e.response.get('Error', {}).get('Code') == 'ResourceNotFoundException':
return {'policy': None}
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def list_policies(region=None, key=None, keyid=None, profile=None):
@ -533,7 +532,7 @@ def list_policies(region=None, key=None, keyid=None, profile=None):
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
policies = []
for ret in salt.utils.boto3.paged_call(conn.list_policies,
for ret in __utils__['boto3.paged_call'](conn.list_policies,
marker_flag='nextMarker',
marker_arg='marker'):
policies.extend(ret['policies'])
@ -541,7 +540,7 @@ def list_policies(region=None, key=None, keyid=None, profile=None):
log.warning('No policies found')
return {'policies': policies}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def list_policy_versions(policyName,
@ -567,7 +566,7 @@ def list_policy_versions(policyName,
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
vers = []
for ret in salt.utils.boto3.paged_call(conn.list_policy_versions,
for ret in __utils__['boto3.paged_call'](conn.list_policy_versions,
marker_flag='nextMarker',
marker_arg='marker',
policyName=policyName):
@ -576,7 +575,7 @@ def list_policy_versions(policyName,
log.warning('No versions found')
return {'policyVersions': vers}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def set_default_policy_version(policyName, policyVersionId,
@ -603,7 +602,7 @@ def set_default_policy_version(policyName, policyVersionId,
policyVersionId=str(policyVersionId)) # future lint: disable=blacklisted-function
return {'changed': True}
except ClientError as e:
return {'changed': False, 'error': salt.utils.boto3.get_error(e)}
return {'changed': False, 'error': __utils__['boto3.get_error'](e)}
def list_principal_policies(principal,
@ -629,7 +628,7 @@ def list_principal_policies(principal,
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
vers = []
for ret in salt.utils.boto3.paged_call(conn.list_principal_policies,
for ret in __utils__['boto3.paged_call'](conn.list_principal_policies,
principal=principal,
marker_flag='nextMarker',
marker_arg='marker'):
@ -638,7 +637,7 @@ def list_principal_policies(principal,
log.warning('No policies found')
return {'policies': vers}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def attach_principal_policy(policyName, principal,
@ -664,7 +663,7 @@ def attach_principal_policy(policyName, principal,
principal=principal)
return {'attached': True}
except ClientError as e:
return {'attached': False, 'error': salt.utils.boto3.get_error(e)}
return {'attached': False, 'error': __utils__['boto3.get_error'](e)}
def detach_principal_policy(policyName, principal,
@ -689,7 +688,7 @@ def detach_principal_policy(policyName, principal,
principal=principal)
return {'detached': True}
except ClientError as e:
return {'detached': False, 'error': salt.utils.boto3.get_error(e)}
return {'detached': False, 'error': __utils__['boto3.get_error'](e)}
def topic_rule_exists(ruleName,
@ -718,10 +717,10 @@ def topic_rule_exists(ruleName,
# use, it's more useful to assume lack of existence than to assume a
# genuine authorization problem; authorization problems should not be
# the common case.
err = salt.utils.boto3.get_error(e)
err = __utils__['boto3.get_error'](e)
if e.response.get('Error', {}).get('Code') == 'UnauthorizedException':
return {'exists': False}
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def create_topic_rule(ruleName, sql, actions, description,
@ -754,7 +753,7 @@ def create_topic_rule(ruleName, sql, actions, description,
})
return {'created': True}
except ClientError as e:
return {'created': False, 'error': salt.utils.boto3.get_error(e)}
return {'created': False, 'error': __utils__['boto3.get_error'](e)}
def replace_topic_rule(ruleName, sql, actions, description,
@ -787,7 +786,7 @@ def replace_topic_rule(ruleName, sql, actions, description,
})
return {'replaced': True}
except ClientError as e:
return {'replaced': False, 'error': salt.utils.boto3.get_error(e)}
return {'replaced': False, 'error': __utils__['boto3.get_error'](e)}
def delete_topic_rule(ruleName,
@ -811,7 +810,7 @@ def delete_topic_rule(ruleName,
conn.delete_topic_rule(ruleName=ruleName)
return {'deleted': True}
except ClientError as e:
return {'deleted': False, 'error': salt.utils.boto3.get_error(e)}
return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
def describe_topic_rule(ruleName,
@ -840,7 +839,7 @@ def describe_topic_rule(ruleName,
else:
return {'rule': None}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def list_topic_rules(topic=None, ruleDisabled=None,
@ -873,7 +872,7 @@ def list_topic_rules(topic=None, ruleDisabled=None,
if ruleDisabled is not None:
kwargs['ruleDisabled'] = ruleDisabled
rules = []
for ret in salt.utils.boto3.paged_call(conn.list_topic_rules,
for ret in __utils__['boto3.paged_call'](conn.list_topic_rules,
marker_flag='nextToken',
marker_arg='nextToken',
**kwargs):
@ -882,4 +881,4 @@ def list_topic_rules(topic=None, ruleDisabled=None,
log.warning('No rules found')
return {'rules': rules}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}

View File

@ -53,7 +53,6 @@ import logging
import time
# Import Salt libs
import salt.utils.boto3
import salt.utils.compat
import salt.utils.odict as odict
import salt.utils.versions
@ -153,7 +152,7 @@ def exists(name, tags=None, region=None, key=None, keyid=None, profile=None):
rds = conn.describe_db_instances(DBInstanceIdentifier=name)
return {'exists': bool(rds)}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def option_group_exists(name, tags=None, region=None, key=None, keyid=None,
@ -171,7 +170,7 @@ def option_group_exists(name, tags=None, region=None, key=None, keyid=None,
rds = conn.describe_option_groups(OptionGroupName=name)
return {'exists': bool(rds)}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def parameter_group_exists(name, tags=None, region=None, key=None, keyid=None,
@ -193,7 +192,7 @@ def parameter_group_exists(name, tags=None, region=None, key=None, keyid=None,
resp = {}
if e.response['Error']['Code'] == 'DBParameterGroupNotFound':
resp['exists'] = False
resp['error'] = salt.utils.boto3.get_error(e)
resp['error'] = __utils__['boto3.get_error'](e)
return resp
@ -218,7 +217,7 @@ def subnet_group_exists(name, tags=None, region=None, key=None, keyid=None,
if "DBSubnetGroupNotFoundFault" in e.message:
return {'exists': False}
else:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def create(name, allocated_storage, db_instance_class, engine,
@ -317,7 +316,7 @@ def create(name, allocated_storage, db_instance_class, engine,
log.info('Instance status after 10 seconds is: %s', stat)
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def create_read_replica(name, source_name, db_instance_class=None,
@ -375,7 +374,7 @@ def create_read_replica(name, source_name, db_instance_class=None,
return {'exists': bool(rds_replica)}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def create_option_group(name, engine_name, major_engine_version,
@ -408,7 +407,7 @@ def create_option_group(name, engine_name, major_engine_version,
return {'exists': bool(rds)}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def create_parameter_group(name, db_parameter_group_family, description,
@ -444,7 +443,7 @@ def create_parameter_group(name, db_parameter_group_family, description,
return {'exists': bool(rds), 'message':
'Created RDS parameter group {0}'.format(name)}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def create_subnet_group(name, description, subnet_ids, tags=None,
@ -475,7 +474,7 @@ def create_subnet_group(name, description, subnet_ids, tags=None,
return {'created': bool(rds)}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def update_parameter_group(name, parameters, apply_method="pending-reboot",
@ -520,7 +519,7 @@ def update_parameter_group(name, parameters, apply_method="pending-reboot",
Parameters=param_list)
return {'results': bool(res)}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def describe(name, tags=None, region=None, key=None, keyid=None,
@ -568,7 +567,7 @@ def describe(name, tags=None, region=None, key=None, keyid=None,
else:
return {'rds': None}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
except IndexError:
return {'rds': None}
@ -597,7 +596,7 @@ def describe_db_instances(name=None, filters=None, jmespath='DBInstances',
except ClientError as e:
code = getattr(e, 'response', {}).get('Error', {}).get('Code')
if code != 'DBInstanceNotFound':
log.error(salt.utils.boto3.get_error(e))
log.error(__utils__['boto3.get_error'](e))
return []
@ -647,7 +646,7 @@ def get_endpoint(name, tags=None, region=None, key=None, keyid=None,
return endpoint
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
return endpoint
@ -706,7 +705,7 @@ def delete(name, skip_final_snapshot=None, final_db_snapshot_identifier=None,
'deleted.', timeout, name)
time.sleep(10)
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def delete_option_group(name, region=None, key=None, keyid=None, profile=None):
@ -731,7 +730,7 @@ def delete_option_group(name, region=None, key=None, keyid=None, profile=None):
return {'deleted': bool(res), 'message':
'Deleted RDS option group {0}.'.format(name)}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def delete_parameter_group(name, region=None, key=None, keyid=None,
@ -753,7 +752,7 @@ def delete_parameter_group(name, region=None, key=None, keyid=None,
return {'deleted': bool(r), 'message':
'Deleted RDS parameter group {0}.'.format(name)}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def delete_subnet_group(name, region=None, key=None, keyid=None,
@ -775,7 +774,7 @@ def delete_subnet_group(name, region=None, key=None, keyid=None,
return {'deleted': bool(r), 'message':
'Deleted RDS subnet group {0}.'.format(name)}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def describe_parameter_group(name, Filters=None, MaxRecords=None, Marker=None,
@ -817,7 +816,7 @@ def describe_parameter_group(name, Filters=None, MaxRecords=None, Marker=None,
return {'results': bool(info), 'message':
'Got RDS descrition for group {0}.'.format(name)}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def describe_parameters(name, Source=None, MaxRecords=None, Marker=None,
@ -873,7 +872,7 @@ def describe_parameters(name, Source=None, MaxRecords=None, Marker=None,
ret['parameters'] = parameters
return ret
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def modify_db_instance(name,
@ -950,7 +949,7 @@ def modify_db_instance(name,
'Modified RDS db instance {0}.'.format(name),
'results': dict(info)}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
return {'error': __utils__['boto3.get_error'](e)}
def _tag_doc(tags):

View File

@ -473,6 +473,9 @@ def authorize(name=None, source_group_name=None,
log.error(msg)
return False
except boto.exception.EC2ResponseError as e:
# if we are trying to add the same rule then we are already in the desired state, return true
if e.error_code == 'InvalidPermission.Duplicate':
return True
msg = ('Failed to add rule to security group {0} with id {1}.'
.format(group.name, group.id))
log.error(msg)

View File

@ -134,8 +134,6 @@ import time
import random
# Import Salt libs
import salt.utils.boto
import salt.utils.boto3
import salt.utils.compat
import salt.utils.versions
from salt.exceptions import SaltInvocationError, CommandExecutionError
@ -279,7 +277,7 @@ def _create_resource(resource, name=None, tags=None, region=None, key=None,
log.warning(e)
return {'created': False, 'error': {'message': e}}
except BotoServerError as e:
return {'created': False, 'error': salt.utils.boto.get_error(e)}
return {'created': False, 'error': __utils__['boto.get_error'](e)}
def _delete_resource(resource, name=None, resource_id=None, region=None,
@ -323,7 +321,7 @@ def _delete_resource(resource, name=None, resource_id=None, region=None,
e = '{0} was not deleted.'.format(resource)
return {'deleted': False, 'error': {'message': e}}
except BotoServerError as e:
return {'deleted': False, 'error': salt.utils.boto.get_error(e)}
return {'deleted': False, 'error': __utils__['boto.get_error'](e)}
def _get_resource(resource, name=None, resource_id=None, region=None,
@ -451,7 +449,7 @@ def get_resource_id(resource, name=None, resource_id=None, region=None,
return {'id': _get_resource_id(resource, name, region=region, key=key,
keyid=keyid, profile=profile)}
except BotoServerError as e:
return {'error': salt.utils.boto.get_error(e)}
return {'error': __utils__['boto.get_error'](e)}
def resource_exists(resource, name=None, resource_id=None, tags=None,
@ -478,7 +476,7 @@ def resource_exists(resource, name=None, resource_id=None, tags=None,
key=key, keyid=keyid,
profile=profile))}
except BotoServerError as e:
return {'error': salt.utils.boto.get_error(e)}
return {'error': __utils__['boto.get_error'](e)}
def _find_vpcs(vpc_id=None, vpc_name=None, cidr=None, tags=None,
@ -570,7 +568,7 @@ def get_id(name=None, cidr=None, tags=None, region=None, key=None, keyid=None,
return {'id': _get_id(vpc_name=name, cidr=cidr, tags=tags, region=region,
key=key, keyid=keyid, profile=profile)}
except BotoServerError as e:
return {'error': salt.utils.boto.get_error(e)}
return {'error': __utils__['boto.get_error'](e)}
def exists(vpc_id=None, name=None, cidr=None, tags=None, region=None, key=None,
@ -593,7 +591,7 @@ def exists(vpc_id=None, name=None, cidr=None, tags=None, region=None, key=None,
vpc_ids = _find_vpcs(vpc_id=vpc_id, vpc_name=name, cidr=cidr, tags=tags,
region=region, key=key, keyid=keyid, profile=profile)
except BotoServerError as err:
boto_err = salt.utils.boto.get_error(err)
boto_err = __utils__['boto.get_error'](err)
if boto_err.get('aws', {}).get('code') == 'InvalidVpcID.NotFound':
# VPC was not found: handle the error and return False.
return {'exists': False}
@ -643,7 +641,7 @@ def create(cidr_block, instance_tenancy=None, vpc_name=None,
log.warning('VPC was not created')
return {'created': False}
except BotoServerError as e:
return {'created': False, 'error': salt.utils.boto.get_error(e)}
return {'created': False, 'error': __utils__['boto.get_error'](e)}
def delete(vpc_id=None, name=None, vpc_name=None, tags=None,
@ -693,7 +691,7 @@ def delete(vpc_id=None, name=None, vpc_name=None, tags=None,
log.warning('VPC %s was not deleted.', vpc_id)
return {'deleted': False}
except BotoServerError as e:
return {'deleted': False, 'error': salt.utils.boto.get_error(e)}
return {'deleted': False, 'error': __utils__['boto.get_error'](e)}
def describe(vpc_id=None, vpc_name=None, region=None, key=None,
@ -722,7 +720,7 @@ def describe(vpc_id=None, vpc_name=None, region=None, key=None,
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)
except BotoServerError as err:
boto_err = salt.utils.boto.get_error(err)
boto_err = __utils__['boto.get_error'](err)
if boto_err.get('aws', {}).get('code') == 'InvalidVpcID.NotFound':
# VPC was not found: handle the error and return None.
return {'vpc': None}
@ -736,7 +734,7 @@ def describe(vpc_id=None, vpc_name=None, region=None, key=None,
try:
vpcs = conn.get_all_vpcs(**filter_parameters)
except BotoServerError as err:
return {'error': salt.utils.boto.get_error(err)}
return {'error': __utils__['boto.get_error'](err)}
if vpcs:
vpc = vpcs[0] # Found!
@ -806,7 +804,7 @@ def describe_vpcs(vpc_id=None, name=None, cidr=None, tags=None,
return {'vpcs': []}
except BotoServerError as e:
return {'error': salt.utils.boto.get_error(e)}
return {'error': __utils__['boto.get_error'](e)}
def _find_subnets(subnet_name=None, vpc_id=None, cidr=None, tags=None, conn=None):
@ -871,7 +869,7 @@ def create_subnet(vpc_id=None, cidr_block=None, vpc_name=None,
if not vpc_id:
return {'created': False, 'error': {'message': 'VPC {0} does not exist.'.format(vpc_name or vpc_id)}}
except BotoServerError as e:
return {'created': False, 'error': salt.utils.boto.get_error(e)}
return {'created': False, 'error': __utils__['boto.get_error'](e)}
subnet_object_dict = _create_resource('subnet', name=subnet_name, tags=tags, vpc_id=vpc_id,
availability_zone=availability_zone,
@ -939,7 +937,7 @@ def subnet_exists(subnet_id=None, name=None, subnet_name=None, cidr=None,
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
except BotoServerError as err:
return {'error': salt.utils.boto.get_error(err)}
return {'error': __utils__['boto.get_error'](err)}
filter_parameters = {'filters': {}}
if subnet_id:
@ -957,7 +955,7 @@ def subnet_exists(subnet_id=None, name=None, subnet_name=None, cidr=None,
try:
subnets = conn.get_all_subnets(**filter_parameters)
except BotoServerError as err:
boto_err = salt.utils.boto.get_error(err)
boto_err = __utils__['boto.get_error'](err)
if boto_err.get('aws', {}).get('code') == 'InvalidSubnetID.NotFound':
# Subnet was not found: handle the error and return False.
return {'exists': False}
@ -1000,7 +998,7 @@ def get_subnet_association(subnets, region=None, key=None, keyid=None,
# subnet_ids=subnets can accept either a string or a list
subnets = conn.get_all_subnets(subnet_ids=subnets)
except BotoServerError as e:
return {'error': salt.utils.boto.get_error(e)}
return {'error': __utils__['boto.get_error'](e)}
# using a set to store vpc_ids - the use of set prevents duplicate
# vpc_id values
@ -1040,7 +1038,7 @@ def describe_subnet(subnet_id=None, subnet_name=None, region=None,
subnet = _get_resource('subnet', name=subnet_name, resource_id=subnet_id,
region=region, key=key, keyid=keyid, profile=profile)
except BotoServerError as e:
return {'error': salt.utils.boto.get_error(e)}
return {'error': __utils__['boto.get_error'](e)}
if not subnet:
return {'subnet': None}
@ -1121,7 +1119,7 @@ def describe_subnets(subnet_ids=None, subnet_names=None, vpc_id=None, cidr=None,
return {'subnets': subnets_list}
except BotoServerError as e:
return {'error': salt.utils.boto.get_error(e)}
return {'error': __utils__['boto.get_error'](e)}
def create_internet_gateway(internet_gateway_name=None, vpc_id=None,
@ -1163,7 +1161,7 @@ def create_internet_gateway(internet_gateway_name=None, vpc_id=None,
)
return r
except BotoServerError as e:
return {'created': False, 'error': salt.utils.boto.get_error(e)}
return {'created': False, 'error': __utils__['boto.get_error'](e)}
def delete_internet_gateway(internet_gateway_id=None,
@ -1217,7 +1215,7 @@ def delete_internet_gateway(internet_gateway_id=None,
region=region, key=key, keyid=keyid,
profile=profile)
except BotoServerError as e:
return {'deleted': False, 'error': salt.utils.boto.get_error(e)}
return {'deleted': False, 'error': __utils__['boto.get_error'](e)}
def _find_nat_gateways(nat_gateway_id=None, subnet_id=None, subnet_name=None, vpc_id=None, vpc_name=None,
@ -1258,7 +1256,7 @@ def _find_nat_gateways(nat_gateway_id=None, subnet_id=None, subnet_name=None, vp
conn3 = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)
nat_gateways = []
for ret in salt.utils.boto3.paged_call(conn3.describe_nat_gateways,
for ret in __utils__['boto3.paged_call'](conn3.describe_nat_gateways,
marker_flag='NextToken', marker_arg='NextToken',
**filter_parameters):
for gw in ret.get('NatGateways', []):
@ -1381,7 +1379,7 @@ def create_nat_gateway(subnet_id=None,
r = conn3.create_nat_gateway(SubnetId=subnet_id, AllocationId=allocation_id)
return {'created': True, 'id': r.get('NatGateway', {}).get('NatGatewayId')}
except BotoServerError as e:
return {'created': False, 'error': salt.utils.boto.get_error(e)}
return {'created': False, 'error': __utils__['boto.get_error'](e)}
def delete_nat_gateway(nat_gateway_id,
@ -1457,7 +1455,7 @@ def delete_nat_gateway(nat_gateway_id,
conn3.release_address(AllocationId=addr.get('AllocationId'))
return {'deleted': True}
except BotoServerError as e:
return {'deleted': False, 'error': salt.utils.boto.get_error(e)}
return {'deleted': False, 'error': __utils__['boto.get_error'](e)}
def create_customer_gateway(vpn_connection_type, ip_address, bgp_asn,
@ -1578,7 +1576,7 @@ def create_dhcp_options(domain_name=None, domain_name_servers=None, ntp_servers=
)
return r
except BotoServerError as e:
return {'created': False, 'error': salt.utils.boto.get_error(e)}
return {'created': False, 'error': __utils__['boto.get_error'](e)}
def get_dhcp_options(dhcp_options_name=None, dhcp_options_id=None,
@ -1609,7 +1607,7 @@ def get_dhcp_options(dhcp_options_name=None, dhcp_options_id=None,
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = conn.get_all_dhcp_options(dhcp_options_ids=[dhcp_options_id])
except BotoServerError as e:
return {'error': salt.utils.boto.get_error(e)}
return {'error': __utils__['boto.get_error'](e)}
if not r:
return {'dhcp_options': None}
@ -1672,7 +1670,7 @@ def associate_dhcp_options_to_vpc(dhcp_options_id, vpc_id=None, vpc_name=None,
dhcp_options_id, vpc_id)
return {'associated': False, 'error': {'message': 'DHCP options could not be associated.'}}
except BotoServerError as e:
return {'associated': False, 'error': salt.utils.boto.get_error(e)}
return {'associated': False, 'error': __utils__['boto.get_error'](e)}
def dhcp_options_exists(dhcp_options_id=None, name=None, dhcp_options_name=None,
@ -1725,7 +1723,7 @@ def create_network_acl(vpc_id=None, vpc_name=None, network_acl_name=None,
try:
vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)
except BotoServerError as e:
return {'created': False, 'error': salt.utils.boto.get_error(e)}
return {'created': False, 'error': __utils__['boto.get_error'](e)}
if not vpc_id:
return {'created': False,
@ -1756,7 +1754,7 @@ def create_network_acl(vpc_id=None, vpc_name=None, network_acl_name=None,
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
association_id = conn.associate_network_acl(r['id'], subnet_id)
except BotoServerError as e:
return {'created': False, 'error': salt.utils.boto.get_error(e)}
return {'created': False, 'error': __utils__['boto.get_error'](e)}
r['association_id'] = association_id
return r
@ -1871,7 +1869,7 @@ def associate_network_acl_to_subnet(network_acl_id=None, subnet_id=None,
network_acl_id, subnet_id)
return {'associated': False, 'error': {'message': 'ACL could not be assocaited.'}}
except BotoServerError as e:
return {'associated': False, 'error': salt.utils.boto.get_error(e)}
return {'associated': False, 'error': __utils__['boto.get_error'](e)}
def disassociate_network_acl(subnet_id=None, vpc_id=None, subnet_name=None, vpc_name=None,
@ -1910,7 +1908,7 @@ def disassociate_network_acl(subnet_id=None, vpc_id=None, subnet_name=None, vpc_
association_id = conn.disassociate_network_acl(subnet_id, vpc_id=vpc_id)
return {'disassociated': True, 'association_id': association_id}
except BotoServerError as e:
return {'disassociated': False, 'error': salt.utils.boto.get_error(e)}
return {'disassociated': False, 'error': __utils__['boto.get_error'](e)}
def _create_network_acl_entry(network_acl_id=None, rule_number=None, protocol=None,
@ -1963,7 +1961,7 @@ def _create_network_acl_entry(network_acl_id=None, rule_number=None, protocol=No
log.warning('Network ACL entry was not %s', rkey)
return {rkey: created}
except BotoServerError as e:
return {rkey: False, 'error': salt.utils.boto.get_error(e)}
return {rkey: False, 'error': __utils__['boto.get_error'](e)}
def create_network_acl_entry(network_acl_id=None, rule_number=None, protocol=None,
@ -2046,7 +2044,7 @@ def delete_network_acl_entry(network_acl_id=None, rule_number=None, egress=None,
log.warning('Network ACL was not deleted')
return {'deleted': deleted}
except BotoServerError as e:
return {'deleted': False, 'error': salt.utils.boto.get_error(e)}
return {'deleted': False, 'error': __utils__['boto.get_error'](e)}
def create_route_table(vpc_id=None, vpc_name=None, route_table_name=None,
@ -2182,7 +2180,7 @@ def route_exists(destination_cidr_block, route_table_name=None, route_table_id=N
log.warning('Route %s does not exist.', destination_cidr_block)
return {'exists': False}
except BotoServerError as e:
return {'error': salt.utils.boto.get_error(e)}
return {'error': __utils__['boto.get_error'](e)}
def associate_route_table(route_table_id=None, subnet_id=None,
@ -2234,7 +2232,7 @@ def associate_route_table(route_table_id=None, subnet_id=None,
route_table_id, subnet_id)
return {'association_id': association_id}
except BotoServerError as e:
return {'associated': False, 'error': salt.utils.boto.get_error(e)}
return {'associated': False, 'error': __utils__['boto.get_error'](e)}
def disassociate_route_table(association_id, region=None, key=None, keyid=None, profile=None):
@ -2261,7 +2259,7 @@ def disassociate_route_table(association_id, region=None, key=None, keyid=None,
log.warning('Route table with association id %s has not been disassociated.', association_id)
return {'disassociated': False}
except BotoServerError as e:
return {'disassociated': False, 'error': salt.utils.boto.get_error(e)}
return {'disassociated': False, 'error': __utils__['boto.get_error'](e)}
def replace_route_table_association(association_id, route_table_id, region=None, key=None, keyid=None, profile=None):
@ -2283,7 +2281,7 @@ def replace_route_table_association(association_id, route_table_id, region=None,
route_table_id, association_id)
return {'replaced': True, 'association_id': association_id}
except BotoServerError as e:
return {'replaced': False, 'error': salt.utils.boto.get_error(e)}
return {'replaced': False, 'error': __utils__['boto.get_error'](e)}
def create_route(route_table_id=None, destination_cidr_block=None,
@ -2364,7 +2362,7 @@ def create_route(route_table_id=None, destination_cidr_block=None,
nat_gateway_id = gws[0]['NatGatewayId']
except BotoServerError as e:
return {'created': False, 'error': salt.utils.boto.get_error(e)}
return {'created': False, 'error': __utils__['boto.get_error'](e)}
if not nat_gateway_id:
return _create_resource('route', route_table_id=route_table_id,
@ -2380,7 +2378,7 @@ def create_route(route_table_id=None, destination_cidr_block=None,
NatGatewayId=nat_gateway_id)
return {'created': True, 'id': ret.get('NatGatewayId')}
except BotoServerError as e:
return {'created': False, 'error': salt.utils.boto.get_error(e)}
return {'created': False, 'error': __utils__['boto.get_error'](e)}
def delete_route(route_table_id=None, destination_cidr_block=None,
@ -2413,7 +2411,7 @@ def delete_route(route_table_id=None, destination_cidr_block=None,
return {'created': False,
'error': {'message': 'route table {0} does not exist.'.format(route_table_name)}}
except BotoServerError as e:
return {'created': False, 'error': salt.utils.boto.get_error(e)}
return {'created': False, 'error': __utils__['boto.get_error'](e)}
return _delete_resource(resource='route', resource_id=route_table_id,
destination_cidr_block=destination_cidr_block,
@ -2469,7 +2467,7 @@ def replace_route(route_table_id=None, destination_cidr_block=None,
)
return {'replaced': False}
except BotoServerError as e:
return {'replaced': False, 'error': salt.utils.boto.get_error(e)}
return {'replaced': False, 'error': __utils__['boto.get_error'](e)}
def describe_route_table(route_table_id=None, route_table_name=None,
@ -2530,7 +2528,7 @@ def describe_route_table(route_table_id=None, route_table_name=None,
return route_table
except BotoServerError as e:
return {'error': salt.utils.boto.get_error(e)}
return {'error': __utils__['boto.get_error'](e)}
def describe_route_tables(route_table_id=None, route_table_name=None,
@ -2616,7 +2614,7 @@ def describe_route_tables(route_table_id=None, route_table_name=None,
return tables
except BotoServerError as e:
return {'error': salt.utils.boto.get_error(e)}
return {'error': __utils__['boto.get_error'](e)}
def _create_dhcp_options(conn, domain_name=None, domain_name_servers=None, ntp_servers=None, netbios_name_servers=None,
@ -2826,7 +2824,7 @@ def request_vpc_peering_connection(requester_vpc_id=None, requester_vpc_name=Non
return {'msg': msg}
except botocore.exceptions.ClientError as err:
log.error('Got an error while trying to request vpc peering')
return {'error': salt.utils.boto.get_error(err)}
return {'error': __utils__['boto.get_error'](err)}
def _get_peering_connection_ids(name, conn):
@ -2953,7 +2951,7 @@ def accept_vpc_peering_connection( # pylint: disable=too-many-arguments
return {'msg': 'VPC peering connection accepted.'}
except botocore.exceptions.ClientError as err:
log.error('Got an error while trying to accept vpc peering')
return {'error': salt.utils.boto.get_error(err)}
return {'error': __utils__['boto.get_error'](err)}
def _vpc_peering_conn_id_for_name(name, conn):
@ -3031,7 +3029,7 @@ def delete_vpc_peering_connection(conn_id=None, conn_name=None, region=None,
conn.delete_vpc_peering_connection(DryRun=dry_run, VpcPeeringConnectionId=conn_id)
return {'msg': 'VPC peering connection deleted.'}
except botocore.exceptions.ClientError as err:
e = salt.utils.boto.get_error(err)
e = __utils__['boto.get_error'](err)
log.error('Failed to delete VPC peering %s: %s', conn_name or conn_id, e)
return {'error': e}

View File

@ -204,11 +204,21 @@ def map_run(path=None, **kwargs):
'''
Execute a salt cloud map file
Cloud Map data can be retrieved from several sources:
- a local file (provide the path to the file to the 'path' argument)
- a JSON-formatted map directly (provide the appropriately formatted to using the 'map_data' argument)
- the Salt Pillar (provide the map name of under 'pillar:cloud:maps' to the 'map_pillar' argument)
Note: Only one of these sources can be read at a time. The options are listed in their order of precedence.
CLI Examples:
.. code-block:: bash
salt minionname cloud.map_run /path/to/cloud.map
salt minionname cloud.map_run path=/path/to/cloud.map
salt minionname cloud.map_run map_pillar='<map_pillar>'
.. versionchanged:: 2018.3.1
salt minionname cloud.map_run map_data='<actual map data>'
'''
client = _get_client()

View File

@ -43,7 +43,7 @@ from salt.ext import six
from salt.exceptions import CommandExecutionError, TimedProcTimeoutError, \
SaltInvocationError
from salt.log import LOG_LEVELS
from salt.ext.six.moves import range, zip
from salt.ext.six.moves import range, zip, map
# Only available on POSIX systems, nonfatal on windows
try:
@ -410,6 +410,19 @@ def _run(cmd,
return win_runas(cmd, runas, password, cwd)
if runas and salt.utils.platform.is_darwin():
# we need to insert the user simulation into the command itself and not
# just run it from the environment on macOS as that
# method doesn't work properly when run as root for certain commands.
if isinstance(cmd, (list, tuple)):
cmd = ' '.join(map(_cmd_quote, cmd))
cmd = 'su -l {0} -c "{1}"'.format(runas, cmd)
# set runas to None, because if you try to run `su -l` as well as
# simulate the environment macOS will prompt for the password of the
# user and will cause salt to hang.
runas = None
if runas:
# Save the original command before munging it
try:

View File

@ -201,6 +201,7 @@ import pipes
import re
import shutil
import string
import sys
import time
import uuid
import subprocess
@ -257,6 +258,7 @@ __func_alias__ = {
'signal_': 'signal',
'start_': 'start',
'tag_': 'tag',
'apply_': 'apply'
}
# Minimum supported versions
@ -271,6 +273,13 @@ NOTSET = object()
__virtualname__ = 'docker'
__virtual_aliases__ = ('dockerng', 'moby')
__proxyenabled__ = ['docker']
__outputter__ = {
'sls': 'highstate',
'apply_': 'highstate',
'highstate': 'highstate',
}
def __virtual__():
'''
@ -6586,6 +6595,9 @@ def _compile_state(sls_opts, mods=None):
'''
st_ = HighState(sls_opts)
if not mods:
return st_.compile_low_chunks()
high_data, errors = st_.render_highstate({sls_opts['saltenv']: mods})
high_data, ext_errors = st_.state.reconcile_extend(high_data)
errors += ext_errors
@ -6658,7 +6670,7 @@ def call(name, function, *args, **kwargs):
try:
salt_argv = [
'python',
'python{0}'.format(sys.version_info[0]),
os.path.join(thin_dest_path, 'salt-call'),
'--metadata',
'--local',
@ -6692,6 +6704,27 @@ def call(name, function, *args, **kwargs):
run_all(name, subprocess.list2cmdline(rm_thin_argv))
def apply_(name, mods=None, **kwargs):
'''
.. versionadded:: Flourine
Apply states! This function will call highstate or state.sls based on the
arguments passed in, ``apply`` is intended to be the main gateway for
all state executions.
CLI Example:
.. code-block:: bash
salt 'docker' docker.apply web01
salt 'docker' docker.apply web01 test
salt 'docker' docker.apply web01 test,pkgs
'''
if mods:
return sls(name, mods, **kwargs)
return highstate(name, **kwargs)
def sls(name, mods=None, **kwargs):
'''
Apply the states defined by the specified SLS modules to the running
@ -6809,6 +6842,31 @@ def sls(name, mods=None, **kwargs):
return ret
def highstate(name, saltenv='base', **kwargs):
'''
Apply a highstate to the running container
.. versionadded:: Flourine
The container does not need to have Salt installed, but Python is required.
name
Container name or ID
saltenv : base
Specify the environment from which to retrieve the SLS indicated by the
`mods` parameter.
CLI Example:
.. code-block:: bash
salt myminion docker.highstate compassionate_mirzakhani
'''
return sls(name, saltenv='base', **kwargs)
def sls_build(repository,
tag='latest',
base='opensuse/python',

View File

@ -14,8 +14,8 @@ from __future__ import absolute_import, print_function, unicode_literals
import datetime
import difflib
import errno
import fileinput
import fnmatch
import io
import itertools
import logging
import operator
@ -61,6 +61,7 @@ import salt.utils.stringutils
import salt.utils.templates
import salt.utils.url
import salt.utils.user
import salt.utils.data
from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError, get_error_message as _get_error_message
from salt.utils.files import HASHES, HASHES_REVMAP
@ -1722,18 +1723,19 @@ def _regex_to_static(src, regex):
return None
try:
src = re.search(regex, src, re.M)
compiled = re.compile(regex, re.DOTALL)
src = [line for line in src if compiled.search(line) or line.count(regex)]
except Exception as ex:
raise CommandExecutionError("{0}: '{1}'".format(_get_error_message(ex), regex))
return src and src.group().rstrip('\r') or regex
return src and src or []
def _assert_occurrence(src, probe, target, amount=1):
def _assert_occurrence(probe, target, amount=1):
'''
Raise an exception, if there are different amount of specified occurrences in src.
'''
occ = src.count(probe)
occ = len(probe)
if occ > amount:
msg = 'more than'
elif occ < amount:
@ -1749,7 +1751,7 @@ def _assert_occurrence(src, probe, target, amount=1):
return occ
def _get_line_indent(src, line, indent):
def _set_line_indent(src, line, indent):
'''
Indent the line with the source line.
'''
@ -1762,7 +1764,36 @@ def _get_line_indent(src, line, indent):
break
idt.append(c)
return ''.join(idt) + line.strip()
return ''.join(idt) + line.lstrip()
def _get_eol(line):
match = re.search('((?<!\r)\n|\r(?!\n)|\r\n)$', line)
return match and match.group() or ''
def _set_line_eol(src, line):
'''
Add line ending
'''
line_ending = _get_eol(src) or os.linesep
return line.rstrip() + line_ending
def _insert_line_before(idx, body, content, indent):
if not idx or (idx and _starts_till(body[idx - 1], content) < 0):
cnd = _set_line_indent(body[idx], content, indent)
body.insert(idx, cnd)
return body
def _insert_line_after(idx, body, content, indent):
# No duplicates or append, if "after" is the last line
next_line = idx + 1 < len(body) and body[idx + 1] or None
if next_line is None or _starts_till(next_line, content) < 0:
cnd = _set_line_indent(body[idx], content, indent)
body.insert(idx + 1, cnd)
return body
def line(path, content=None, match=None, mode=None, location=None,
@ -1893,132 +1924,110 @@ def line(path, content=None, match=None, mode=None, location=None,
match = content
with salt.utils.files.fopen(path, mode='r') as fp_:
body = salt.utils.stringutils.to_unicode(fp_.read())
body_before = hashlib.sha256(salt.utils.stringutils.to_bytes(body)).hexdigest()
body = salt.utils.data.decode_list(fp_.readlines())
body_before = hashlib.sha256(salt.utils.stringutils.to_bytes(''.join(body))).hexdigest()
# Add empty line at the end if last line ends with eol.
# Allows simpler code
if body and _get_eol(body[-1]):
body.append('')
after = _regex_to_static(body, after)
before = _regex_to_static(body, before)
match = _regex_to_static(body, match)
if os.stat(path).st_size == 0 and mode in ('delete', 'replace'):
log.warning('Cannot find text to {0}. File \'{1}\' is empty.'.format(mode, path))
body = ''
elif mode == 'delete':
body = os.linesep.join([line for line in body.split(os.linesep) if line.find(match) < 0])
elif mode == 'replace':
body = os.linesep.join([(_get_line_indent(file_line, content, indent)
if (file_line.find(match) > -1 and not file_line == content) else file_line)
for file_line in body.split(os.linesep)])
body = []
elif mode == 'delete' and match:
body = [line for line in body if line != match[0]]
elif mode == 'replace' and match:
idx = body.index(match[0])
file_line = body.pop(idx)
body.insert(idx, _set_line_indent(file_line, content, indent))
elif mode == 'insert':
if not location and not before and not after:
raise CommandExecutionError('On insert must be defined either "location" or "before/after" conditions.')
if not location:
if before and after:
_assert_occurrence(body, before, 'before')
_assert_occurrence(body, after, 'after')
_assert_occurrence(before, 'before')
_assert_occurrence(after, 'after')
out = []
lines = body.split(os.linesep)
in_range = False
for line in lines:
if line.find(after) > -1:
for line in body:
if line == after[0]:
in_range = True
elif line.find(before) > -1 and in_range:
out.append(_get_line_indent(line, content, indent))
elif line == before[0] and in_range:
cnd = _set_line_indent(line, content, indent)
out.append(cnd)
out.append(line)
body = os.linesep.join(out)
body = out
if before and not after:
_assert_occurrence(body, before, 'before')
out = []
lines = body.split(os.linesep)
for idx in range(len(lines)):
_line = lines[idx]
if _line.find(before) > -1:
cnd = _get_line_indent(_line, content, indent)
if not idx or (idx and _starts_till(lines[idx - 1], cnd) < 0): # Job for replace instead
out.append(cnd)
out.append(_line)
body = os.linesep.join(out)
_assert_occurrence(before, 'before')
idx = body.index(before[0])
body = _insert_line_before(idx, body, content, indent)
elif after and not before:
_assert_occurrence(body, after, 'after')
out = []
lines = body.split(os.linesep)
for idx, _line in enumerate(lines):
out.append(_line)
cnd = _get_line_indent(_line, content, indent)
# No duplicates or append, if "after" is the last line
if (_line.find(after) > -1 and
(lines[((idx + 1) < len(lines)) and idx + 1 or idx].strip() != cnd or
idx + 1 == len(lines))):
out.append(cnd)
body = os.linesep.join(out)
_assert_occurrence(after, 'after')
idx = body.index(after[0])
body = _insert_line_after(idx, body, content, indent)
else:
if location == 'start':
body = os.linesep.join((content, body))
if body:
body.insert(0, _set_line_eol(body[0], content))
else:
body.append(content + os.linesep)
elif location == 'end':
body = os.linesep.join((body, _get_line_indent(body[-1], content, indent) if body else content))
body.append(_set_line_indent(body[-1], content, indent) if body else content)
elif mode == 'ensure':
after = after and after.strip()
before = before and before.strip()
if before and after:
_assert_occurrence(body, before, 'before')
_assert_occurrence(body, after, 'after')
_assert_occurrence(before, 'before')
_assert_occurrence(after, 'after')
is_there = bool(body.count(content))
is_there = bool([l for l in body if l.count(content)])
if not is_there:
out = []
body = body.split(os.linesep)
for idx, line in enumerate(body):
out.append(line)
if line.find(content) > -1:
is_there = True
if not is_there:
if idx < (len(body) - 1) and line.find(after) > -1 and body[idx + 1].find(before) > -1:
out.append(content)
elif line.find(after) > -1:
raise CommandExecutionError('Found more than one line between '
'boundaries "before" and "after".')
body = os.linesep.join(out)
idx = body.index(after[0])
if idx < (len(body) - 1) and body[idx + 1] == before[0]:
cnd = _set_line_indent(body[idx], content, indent)
body.insert(idx + 1, cnd)
else:
raise CommandExecutionError('Found more than one line between '
'boundaries "before" and "after".')
elif before and not after:
_assert_occurrence(body, before, 'before')
body = body.split(os.linesep)
out = []
for idx in range(len(body)):
if body[idx].find(before) > -1:
prev = (idx > 0 and idx or 1) - 1
out.append(_get_line_indent(body[idx], content, indent))
if _starts_till(out[prev], content) > -1:
del out[prev]
out.append(body[idx])
body = os.linesep.join(out)
_assert_occurrence(before, 'before')
idx = body.index(before[0])
body = _insert_line_before(idx, body, content, indent)
elif not before and after:
_assert_occurrence(body, after, 'after')
body = body.split(os.linesep)
skip = None
out = []
for idx in range(len(body)):
if skip != body[idx]:
out.append(body[idx])
_assert_occurrence(after, 'after')
if body[idx].find(after) > -1:
next_line = idx + 1 < len(body) and body[idx + 1] or None
if next_line is not None and _starts_till(next_line, content) > -1:
skip = next_line
out.append(_get_line_indent(body[idx], content, indent))
body = os.linesep.join(out)
idx = body.index(after[0])
body = _insert_line_after(idx, body, content, indent)
else:
raise CommandExecutionError("Wrong conditions? "
"Unable to ensure line without knowing "
"where to put it before and/or after.")
changed = body_before != hashlib.sha256(salt.utils.stringutils.to_bytes(body)).hexdigest()
if body:
for idx, line in enumerate(body):
if not _get_eol(line) and idx+1 < len(body):
prev = idx and idx-1 or 1
body[idx] = _set_line_eol(body[prev], line)
# We do not need empty line at the end anymore
if '' == body[-1]:
body.pop()
changed = body_before != hashlib.sha256(salt.utils.stringutils.to_bytes(''.join(body))).hexdigest()
if backup and changed and __opts__['test'] is False:
try:
@ -2032,12 +2041,9 @@ def line(path, content=None, match=None, mode=None, location=None,
if changed:
if show_changes:
with salt.utils.files.fopen(path, 'r') as fp_:
path_content = [salt.utils.stringutils.to_unicode(x)
for x in fp_.read().splitlines(True)]
path_content = salt.utils.data.decode_list(fp_.read().splitlines(True))
changes_diff = ''.join(difflib.unified_diff(
path_content,
[salt.utils.stringutils.to_unicode(x)
for x in body.splitlines(True)]
path_content, body
))
if __opts__['test'] is False:
fh_ = None
@ -2045,7 +2051,7 @@ def line(path, content=None, match=None, mode=None, location=None,
# Make sure we match the file mode from salt.utils.files.fopen
mode = 'wb' if six.PY2 and salt.utils.platform.is_windows() else 'w'
fh_ = salt.utils.atomicfile.atomic_open(path, mode)
fh_.write(body)
fh_.write(''.join(body))
finally:
if fh_:
fh_.close()
@ -2535,10 +2541,16 @@ def blockreplace(path,
if not os.path.exists(path):
raise SaltInvocationError('File not found: {0}'.format(path))
if not __utils__['files.is_text'](path):
raise SaltInvocationError(
'Cannot perform string replacements on a binary file: {0}'
.format(path)
try:
file_encoding = __utils__['files.get_encoding'](path)
except CommandExecutionError:
file_encoding = None
if __utils__['files.is_binary'](path):
if not file_encoding:
raise SaltInvocationError(
'Cannot perform string replacements on a binary file: {0}'
.format(path)
)
if append_newline is None and not content.endswith((os.linesep, '\n')):
@ -2593,18 +2605,9 @@ def blockreplace(path,
# We do not use in-place editing to avoid file attrs modifications when
# no changes are required and to avoid any file access on a partially
# written file.
#
# We could also use salt.utils.filebuffer.BufferedReader
try:
fi_file = fileinput.input(
path,
inplace=False,
backup=False,
bufsize=1,
mode='rb')
fi_file = io.open(path, mode='r', encoding=file_encoding, newline='')
for line in fi_file:
line = salt.utils.stringutils.to_unicode(line)
write_line_to_new_file = True
if linesep is None:
@ -2709,7 +2712,7 @@ def blockreplace(path,
try:
fh_ = salt.utils.atomicfile.atomic_open(path, 'wb')
for line in new_file:
fh_.write(salt.utils.stringutils.to_bytes(line))
fh_.write(salt.utils.stringutils.to_bytes(line, encoding=file_encoding))
finally:
fh_.close()

View File

@ -3813,15 +3813,23 @@ def remote_refs(url,
https_pass=None,
ignore_retcode=False,
output_encoding=None,
saltenv='base'):
saltenv='base',
**kwargs):
'''
.. versionadded:: 2015.8.0
Return the remote refs for the specified URL
Return the remote refs for the specified URL by running ``git ls-remote``.
url
URL of the remote repository
filter
Optionally provide a ref name to ``git ls-remote``. This can be useful
to make this function run faster on repositories with many
branches/tags.
.. versionadded:: Fluorine
heads : False
Restrict output to heads. Can be combined with ``tags``.
@ -3893,7 +3901,13 @@ def remote_refs(url,
.. code-block:: bash
salt myminion git.remote_refs https://github.com/saltstack/salt.git
salt myminion git.remote_refs https://github.com/saltstack/salt.git filter=develop
'''
kwargs = salt.utils.args.clean_kwargs(**kwargs)
filter_ = kwargs.pop('filter', None)
if kwargs:
salt.utils.invalid_kwargs(kwargs)
command = ['git', 'ls-remote']
if heads:
command.append('--heads')
@ -3906,6 +3920,8 @@ def remote_refs(url,
https_only=True))
except ValueError as exc:
raise SaltInvocationError(exc.__str__())
if filter_:
command.append(filter_)
output = _git_run(command,
user=user,
password=password,

View File

@ -50,12 +50,12 @@ from salt.ext.six.moves import zip
# Import 3rd-party libs
try:
import pylxd
PYLXD_AVAILABLE = True
HAS_PYLXD = True
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
except ImportError:
PYLXD_AVAILABLE = False
HAS_PYLXD = False
# Set up logging
import logging
@ -87,7 +87,7 @@ _connection_pool = {}
def __virtual__():
if PYLXD_AVAILABLE:
if HAS_PYLXD:
if (LooseVersion(pylxd_version()) <
LooseVersion(_pylxd_minimal_version)):
return (
@ -3579,40 +3579,39 @@ def _pylxd_model_to_dict(obj):
# Monkey patching for missing functionality in pylxd
#
import pylxd.exceptions # NOQA
if HAS_PYLXD:
import pylxd.exceptions # NOQA
if not hasattr(pylxd.exceptions, 'NotFound'):
# Old version of pylxd
if not hasattr(pylxd.exceptions, 'NotFound'):
# Old version of pylxd
class NotFound(pylxd.exceptions.LXDAPIException):
"""An exception raised when an object is not found."""
class NotFound(pylxd.exceptions.LXDAPIException):
"""An exception raised when an object is not found."""
pylxd.exceptions.NotFound = NotFound
pylxd.exceptions.NotFound = NotFound
try:
from pylxd.container import Container
except ImportError:
from pylxd.models.container import Container
try:
from pylxd.container import Container
except ImportError:
from pylxd.models.container import Container
class FilesManager(Container.FilesManager):
class FilesManager(Container.FilesManager):
def put(self, filepath, data, mode=None, uid=None, gid=None):
headers = {}
if mode is not None:
if isinstance(mode, int):
mode = oct(mode)
elif not mode.startswith('0'):
mode = '0{0}'.format(mode)
headers['X-LXD-mode'] = mode
if uid is not None:
headers['X-LXD-uid'] = six.text_type(uid)
if gid is not None:
headers['X-LXD-gid'] = six.text_type(gid)
response = self._client.api.containers[
self._container.name].files.post(
params={'path': filepath}, data=data, headers=headers)
return response.status_code == 200
def put(self, filepath, data, mode=None, uid=None, gid=None):
headers = {}
if mode is not None:
if isinstance(mode, int):
mode = oct(mode)
elif not mode.startswith('0'):
mode = '0{0}'.format(mode)
headers['X-LXD-mode'] = mode
if uid is not None:
headers['X-LXD-uid'] = six.text_type(uid)
if gid is not None:
headers['X-LXD-gid'] = six.text_type(gid)
response = self._client.api.containers[
self._container.name].files.post(
params={'path': filepath}, data=data, headers=headers)
return response.status_code == 200
Container.FilesManager = FilesManager
Container.FilesManager = FilesManager

View File

@ -32,7 +32,6 @@ import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
import salt.utils.mac_utils
from salt.exceptions import CommandExecutionError
from salt.utils.versions import LooseVersion as _LooseVersion
@ -274,13 +273,11 @@ def list_(name=None, runas=None):
return launchctl('list',
label,
return_stdout=True,
output_loglevel='trace',
runas=runas)
# Collect information on all services: will raise an error if it fails
return launchctl('list',
return_stdout=True,
output_loglevel='trace',
runas=runas)
@ -535,7 +532,7 @@ def enabled(name, runas=None):
return False
def disabled(name, runas=None):
def disabled(name, runas=None, domain='system'):
'''
Check if the specified service is not enabled. This is the opposite of
``service.enabled``
@ -544,6 +541,8 @@ def disabled(name, runas=None):
:param str runas: User to run launchctl commands
:param str domain: domain to check for disabled services. Default is system.
:return: True if the specified service is NOT enabled, otherwise False
:rtype: bool
@ -553,8 +552,22 @@ def disabled(name, runas=None):
salt '*' service.disabled org.cups.cupsd
'''
# A service is disabled if it is not enabled
return not enabled(name, runas=runas)
ret = False
disabled = launchctl('print-disabled',
domain,
return_stdout=True,
output_loglevel='trace',
runas=runas)
for service in disabled.split("\n"):
if name in service:
srv_name = service.split("=>")[0].split("\"")[1]
status = service.split("=>")[1]
if name != srv_name:
pass
else:
return True if 'true' in status.lower() else False
return False
def get_all(runas=None):

View File

@ -74,8 +74,7 @@ def _atrun_enabled():
# Collect information on service: will raise an error if it fails
salt.utils.mac_utils.launchctl('list',
label,
return_stdout=True,
output_loglevel='quiet')
return_stdout=True)
return True
except CommandExecutionError:
return False
@ -111,9 +110,8 @@ def _enable_atrun():
return False
salt.utils.mac_utils.launchctl('enable',
'system/{0}'.format(label),
output_loglevel='quiet')
salt.utils.mac_utils.launchctl('load', path, output_loglevel='quiet')
'system/{0}'.format(label))
salt.utils.mac_utils.launchctl('load', path)
return _atrun_enabled()

View File

@ -152,85 +152,15 @@ Optional small program to encrypt data without needing salt modules.
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import base64
import os
# Import Salt libs
from salt.ext import six
import salt.syspaths
import salt.utils.files
import salt.utils.platform
import salt.utils.stringutils
import salt.utils.versions
import salt.utils.win_functions
import salt.utils.win_dacl
REQ_ERROR = None
try:
import libnacl.secret
import libnacl.sealed
except (ImportError, OSError) as e:
REQ_ERROR = 'libnacl import error, perhaps missing python libnacl package or should update.'
import salt.utils.nacl
__virtualname__ = 'nacl'
def __virtual__():
return (REQ_ERROR is None, REQ_ERROR)
def _get_config(**kwargs):
'''
Return configuration
'''
config = {
'box_type': 'sealedbox',
'sk': None,
'sk_file': os.path.join(__opts__['pki_dir'], 'master', 'nacl'),
'pk': None,
'pk_file': os.path.join(__opts__['pki_dir'], 'master', 'nacl.pub'),
}
config_key = '{0}.config'.format(__virtualname__)
try:
config.update(__salt__['config.get'](config_key, {}))
except (NameError, KeyError) as e:
# likly using salt-run so fallback to __opts__
config.update(__opts__.get(config_key, {}))
# pylint: disable=C0201
for k in set(config.keys()) & set(kwargs.keys()):
config[k] = kwargs[k]
return config
def _get_sk(**kwargs):
'''
Return sk
'''
config = _get_config(**kwargs)
key = config['sk']
sk_file = config['sk_file']
if not key and sk_file:
with salt.utils.files.fopen(sk_file, 'rb') as keyf:
key = salt.utils.stringutils.to_unicode(keyf.read()).rstrip('\n')
if key is None:
raise Exception('no key or sk_file found')
return base64.b64decode(key)
def _get_pk(**kwargs):
'''
Return pk
'''
config = _get_config(**kwargs)
pubkey = config['pk']
pk_file = config['pk_file']
if not pubkey and pk_file:
with salt.utils.files.fopen(pk_file, 'rb') as keyf:
pubkey = salt.utils.stringutils.to_unicode(keyf.read()).rstrip('\n')
if pubkey is None:
raise Exception('no pubkey or pk_file found')
pubkey = six.text_type(pubkey)
return base64.b64decode(pubkey)
return salt.utils.nacl.check_requirements()
def keygen(sk_file=None, pk_file=None, **kwargs):
@ -253,66 +183,8 @@ def keygen(sk_file=None, pk_file=None, **kwargs):
salt-call nacl.keygen sk_file=/etc/salt/pki/master/nacl pk_file=/etc/salt/pki/master/nacl.pub
salt-call --local nacl.keygen
'''
if 'keyfile' in kwargs:
salt.utils.versions.warn_until(
'Neon',
'The \'keyfile\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk_file\' argument instead.'
)
sk_file = kwargs['keyfile']
if sk_file is None:
kp = libnacl.public.SecretKey()
return {'sk': base64.b64encode(kp.sk), 'pk': base64.b64encode(kp.pk)}
if pk_file is None:
pk_file = '{0}.pub'.format(sk_file)
if sk_file and pk_file is None:
if not os.path.isfile(sk_file):
kp = libnacl.public.SecretKey()
with salt.utils.files.fopen(sk_file, 'wb') as keyf:
keyf.write(base64.b64encode(kp.sk))
if salt.utils.platform.is_windows():
cur_user = salt.utils.win_functions.get_current_user()
salt.utils.win_dacl.set_owner(sk_file, cur_user)
salt.utils.win_dacl.set_permissions(sk_file, cur_user, 'full_control', 'grant', reset_perms=True, protected=True)
else:
# chmod 0600 file
os.chmod(sk_file, 1536)
return 'saved sk_file: {0}'.format(sk_file)
else:
raise Exception('sk_file:{0} already exist.'.format(sk_file))
if sk_file is None and pk_file:
raise Exception('sk_file: Must be set inorder to generate a public key.')
if os.path.isfile(sk_file) and os.path.isfile(pk_file):
raise Exception('sk_file:{0} and pk_file:{1} already exist.'.format(sk_file, pk_file))
if os.path.isfile(sk_file) and not os.path.isfile(pk_file):
# generate pk using the sk
with salt.utils.files.fopen(sk_file, 'rb') as keyf:
sk = salt.utils.stringutils.to_unicode(keyf.read()).rstrip('\n')
sk = base64.b64decode(sk)
kp = libnacl.public.SecretKey(sk)
with salt.utils.files.fopen(pk_file, 'wb') as keyf:
keyf.write(base64.b64encode(kp.pk))
return 'saved pk_file: {0}'.format(pk_file)
kp = libnacl.public.SecretKey()
with salt.utils.files.fopen(sk_file, 'wb') as keyf:
keyf.write(base64.b64encode(kp.sk))
if salt.utils.platform.is_windows():
cur_user = salt.utils.win_functions.get_current_user()
salt.utils.win_dacl.set_owner(sk_file, cur_user)
salt.utils.win_dacl.set_permissions(sk_file, cur_user, 'full_control', 'grant', reset_perms=True, protected=True)
else:
# chmod 0600 file
os.chmod(sk_file, 1536)
with salt.utils.files.fopen(pk_file, 'wb') as keyf:
keyf.write(base64.b64encode(kp.pk))
return 'saved sk_file:{0} pk_file: {1}'.format(sk_file, pk_file)
kwargs['opts'] = __opts__
return salt.utils.nacl.keygen(sk_file, pk_file, **kwargs)
def enc(data, **kwargs):
@ -321,31 +193,8 @@ def enc(data, **kwargs):
box_type: secretbox, sealedbox(default)
'''
if 'keyfile' in kwargs:
salt.utils.versions.warn_until(
'Neon',
'The \'keyfile\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk_file\' argument instead.'
)
kwargs['sk_file'] = kwargs['keyfile']
if 'key' in kwargs:
salt.utils.versions.warn_until(
'Neon',
'The \'key\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk\' argument instead.'
)
kwargs['sk'] = kwargs['key']
# ensure data is in bytes
data = salt.utils.stringutils.to_bytes(data)
box_type = _get_config(**kwargs)['box_type']
if box_type == 'sealedbox':
return sealedbox_encrypt(data, **kwargs)
if box_type == 'secretbox':
return secretbox_encrypt(data, **kwargs)
return sealedbox_encrypt(data, **kwargs)
kwargs['opts'] = __opts__
return salt.utils.nacl.enc(data, **kwargs)
def enc_file(name, out=None, **kwargs):
@ -365,20 +214,8 @@ def enc_file(name, out=None, **kwargs):
salt-run nacl.enc_file name=/tmp/id_rsa box_type=secretbox \
sk_file=/etc/salt/pki/master/nacl.pub
'''
try:
data = __salt__['cp.get_file_str'](name)
except Exception as e:
# likly using salt-run so fallback to local filesystem
with salt.utils.files.fopen(name, 'rb') as f:
data = salt.utils.stringutils.to_unicode(f.read())
d = enc(data, **kwargs)
if out:
if os.path.isfile(out):
raise Exception('file:{0} already exist.'.format(out))
with salt.utils.files.fopen(out, 'wb') as f:
f.write(salt.utils.stringutils.to_bytes(d))
return 'Wrote: {0}'.format(out)
return d
kwargs['opts'] = __opts__
return salt.utils.nacl.enc_file(name, out, **kwargs)
def dec(data, **kwargs):
@ -387,37 +224,8 @@ def dec(data, **kwargs):
box_type: secretbox, sealedbox(default)
'''
if 'keyfile' in kwargs:
salt.utils.versions.warn_until(
'Neon',
'The \'keyfile\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk_file\' argument instead.'
)
kwargs['sk_file'] = kwargs['keyfile']
# set boxtype to `secretbox` to maintain backward compatibility
kwargs['box_type'] = 'secretbox'
if 'key' in kwargs:
salt.utils.versions.warn_until(
'Neon',
'The \'key\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk\' argument instead.'
)
kwargs['sk'] = kwargs['key']
# set boxtype to `secretbox` to maintain backward compatibility
kwargs['box_type'] = 'secretbox'
# ensure data is in bytes
data = salt.utils.stringutils.to_bytes(data)
box_type = _get_config(**kwargs)['box_type']
if box_type == 'sealedbox':
return sealedbox_decrypt(data, **kwargs)
if box_type == 'secretbox':
return secretbox_decrypt(data, **kwargs)
return sealedbox_decrypt(data, **kwargs)
kwargs['opts'] = __opts__
return salt.utils.nacl.dec(data, **kwargs)
def dec_file(name, out=None, **kwargs):
@ -437,20 +245,8 @@ def dec_file(name, out=None, **kwargs):
salt-run nacl.dec_file name=/tmp/id_rsa.nacl box_type=secretbox \
sk_file=/etc/salt/pki/master/nacl.pub
'''
try:
data = __salt__['cp.get_file_str'](name)
except Exception as e:
# likly using salt-run so fallback to local filesystem
with salt.utils.files.fopen(name, 'rb') as f:
data = salt.utils.stringutils.to_unicode(f.read())
d = dec(data, **kwargs)
if out:
if os.path.isfile(out):
raise Exception('file:{0} already exist.'.format(out))
with salt.utils.files.fopen(out, 'wb') as f:
f.write(salt.utils.stringutils.to_bytes(d))
return 'Wrote: {0}'.format(out)
return d
kwargs['opts'] = __opts__
return salt.utils.nacl.dec_file(name, out, **kwargs)
def sealedbox_encrypt(data, **kwargs):
@ -466,12 +262,8 @@ def sealedbox_encrypt(data, **kwargs):
salt-call --local nacl.sealedbox_encrypt datatoenc pk_file=/etc/salt/pki/master/nacl.pub
salt-call --local nacl.sealedbox_encrypt datatoenc pk='vrwQF7cNiNAVQVAiS3bvcbJUnF0cN6fU9YTZD9mBfzQ='
'''
# ensure data is in bytes
data = salt.utils.stringutils.to_bytes(data)
pk = _get_pk(**kwargs)
b = libnacl.sealed.SealedBox(pk)
return base64.b64encode(b.encrypt(data))
kwargs['opts'] = __opts__
return salt.utils.nacl.sealedbox_encrypt(data, **kwargs)
def sealedbox_decrypt(data, **kwargs):
@ -486,16 +278,8 @@ def sealedbox_decrypt(data, **kwargs):
salt-call --local nacl.sealedbox_decrypt data='pEXHQM6cuaF7A=' sk_file=/etc/salt/pki/master/nacl
salt-call --local nacl.sealedbox_decrypt data='pEXHQM6cuaF7A=' sk='YmFkcGFzcwo='
'''
if data is None:
return None
# ensure data is in bytes
data = salt.utils.stringutils.to_bytes(data)
sk = _get_sk(**kwargs)
keypair = libnacl.public.SecretKey(sk)
b = libnacl.sealed.SealedBox(keypair)
return b.decrypt(base64.b64decode(data))
kwargs['opts'] = __opts__
return salt.utils.nacl.sealedbox_decrypt(data, **kwargs)
def secretbox_encrypt(data, **kwargs):
@ -511,12 +295,8 @@ def secretbox_encrypt(data, **kwargs):
salt-call --local nacl.secretbox_encrypt datatoenc sk_file=/etc/salt/pki/master/nacl
salt-call --local nacl.secretbox_encrypt datatoenc sk='YmFkcGFzcwo='
'''
# ensure data is in bytes
data = salt.utils.stringutils.to_bytes(data)
sk = _get_sk(**kwargs)
b = libnacl.secret.SecretBox(sk)
return base64.b64encode(b.encrypt(data))
kwargs['opts'] = __opts__
return salt.utils.nacl.secretbox_encrypt(data, **kwargs)
def secretbox_decrypt(data, **kwargs):
@ -532,12 +312,5 @@ def secretbox_decrypt(data, **kwargs):
salt-call --local nacl.secretbox_decrypt data='pEXHQM6cuaF7A=' sk_file=/etc/salt/pki/master/nacl
salt-call --local nacl.secretbox_decrypt data='pEXHQM6cuaF7A=' sk='YmFkcGFzcwo='
'''
if data is None:
return None
# ensure data is in bytes
data = salt.utils.stringutils.to_bytes(data)
key = _get_sk(**kwargs)
b = libnacl.secret.SecretBox(key=key)
return b.decrypt(base64.b64decode(data))
kwargs['opts'] = __opts__
return salt.utils.nacl.secretbox_decrypt(data, **kwargs)

View File

@ -119,7 +119,10 @@ def __virtual__():
os.path.exists(os.path.join(NILRT_MODULE_STATE_PATH, 'modules.dep.md5sum'))):
_update_nilrt_module_dep_info()
return __virtualname__
return (False, "Module opkg only works on nilrt based systems")
if os.path.isdir(OPKG_CONFDIR):
return __virtualname__
return False, "Module opkg only works on OpenEmbedded based systems"
def latest_version(*names, **kwargs):

View File

@ -627,7 +627,7 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
'''
if 'no_chown' in kwargs:
salt.utils.versions.warn_until(
'Flourine',
'Fluorine',
'The no_chown argument has been deprecated and is no longer used. '
'Its functionality was removed in Boron.')
kwargs.pop('no_chown')

View File

@ -181,7 +181,9 @@ def latest_version(*names, **kwargs):
out = __salt__['cmd.run'](cmd, output_loglevel='trace')
for line in out.splitlines():
p = line.split(',' if _supports_parsing() else None)
if line.startswith('No results found for'):
return pkglist
p = line.split(';' if _supports_parsing() else None)
if p and p[0] in ('=:', '<:', '>:', ''):
# These are explanation comments
@ -190,7 +192,7 @@ def latest_version(*names, **kwargs):
s = _splitpkg(p[0])
if s:
if not s[0] in pkglist:
if len(p) > 1 and p[1] == '<':
if len(p) > 1 and p[1] in ('<', '', '='):
pkglist[s[0]] = s[1]
else:
pkglist[s[0]] = ''
@ -669,7 +671,6 @@ def file_dict(*packages):
for package in packages:
cmd = ['pkg_info', '-qL', package]
ret = __salt__['cmd.run_all'](cmd, output_loglevel='trace')
files[package] = []
for line in ret['stderr'].splitlines():
errors.append(line)
@ -681,7 +682,7 @@ def file_dict(*packages):
continue # unexpected string
ret = {'errors': errors, 'files': files}
for field in ret:
for field in list(ret):
if not ret[field] or ret[field] == '':
del ret[field]
return ret

View File

@ -65,21 +65,22 @@ def __virtual__():
# The module will be exposed as `rpmbuild` on non-RPM based systems
return 'rpmbuild'
else:
return False, 'The rpmbuild module could not be loaded: requires python-gnupg, gpg, rpm, rpmbuild, mock and createrepo utilities to be installed'
return False, 'The rpmbuild module could not be loaded: requires python-gnupg, ' \
'gpg, rpm, rpmbuild, mock and createrepo utilities to be installed'
def _create_rpmmacros():
def _create_rpmmacros(runas='root'):
'''
Create the .rpmmacros file in user's home directory
'''
home = os.path.expanduser('~')
rpmbuilddir = os.path.join(home, 'rpmbuild')
if not os.path.isdir(rpmbuilddir):
os.makedirs(rpmbuilddir)
__salt__['file.makedirs_perms'](name=rpmbuilddir, user=runas, group='mock')
mockdir = os.path.join(home, 'mock')
if not os.path.isdir(mockdir):
os.makedirs(mockdir)
__salt__['file.makedirs_perms'](name=mockdir, user=runas, group='mock')
rpmmacros = os.path.join(home, '.rpmmacros')
with salt.utils.files.fopen(rpmmacros, 'w') as afile:
@ -92,7 +93,7 @@ def _create_rpmmacros():
afile.write('%_gpg_name packaging@saltstack.com\n')
def _mk_tree():
def _mk_tree(runas='root'):
'''
Create the rpm build tree
'''
@ -100,7 +101,7 @@ def _mk_tree():
paths = ['BUILD', 'RPMS', 'SOURCES', 'SPECS', 'SRPMS']
for path in paths:
full = os.path.join(basedir, path)
os.makedirs(full)
__salt__['file.makedirs_perms'](name=full, user=runas, group='mock')
return basedir
@ -116,7 +117,7 @@ def _get_spec(tree_base, spec, template, saltenv='base'):
saltenv=saltenv)
def _get_src(tree_base, source, saltenv='base'):
def _get_src(tree_base, source, saltenv='base', runas='root'):
'''
Get the named sources and place them into the tree_base
'''
@ -127,6 +128,7 @@ def _get_src(tree_base, source, saltenv='base'):
lsrc = __salt__['cp.get_url'](source, dest, saltenv=saltenv)
else:
shutil.copy(source, dest)
__salt__['file.chown'](path=dest, user=runas, group='mock')
def _get_distset(tgt):
@ -171,7 +173,7 @@ def _get_deps(deps, tree_base, saltenv='base'):
return deps_list
def make_src_pkg(dest_dir, spec, sources, env=None, template=None, saltenv='base'):
def make_src_pkg(dest_dir, spec, sources, env=None, template=None, saltenv='base', runas='root'):
'''
Create a source rpm from the given spec file and sources
@ -179,33 +181,74 @@ def make_src_pkg(dest_dir, spec, sources, env=None, template=None, saltenv='base
.. code-block:: bash
salt '*' pkgbuild.make_src_pkg /var/www/html/ https://raw.githubusercontent.com/saltstack/libnacl/master/pkg/rpm/python-libnacl.spec https://pypi.python.org/packages/source/l/libnacl/libnacl-1.3.5.tar.gz
salt '*' pkgbuild.make_src_pkg /var/www/html/
https://raw.githubusercontent.com/saltstack/libnacl/master/pkg/rpm/python-libnacl.spec
https://pypi.python.org/packages/source/l/libnacl/libnacl-1.3.5.tar.gz
This example command should build the libnacl SOURCE package and place it in
/var/www/html/ on the minion
.. versionchanged:: 2017.7.0
dest_dir
The directory on the minion to place the built package(s)
spec
The location of the spec file (used for rpms)
sources
The list of package sources
env
A dictionary of environment variables to be set prior to execution.
template
Run the spec file through a templating engine
Optional arguement, allows for no templating engine used to be
if none is desired.
saltenv
The saltenv to use for files downloaded from the salt filesever
runas
The user to run the build process as
.. versionadded:: 2018.3.2
.. note::
using SHA256 as digest and minimum level dist el6
'''
_create_rpmmacros()
tree_base = _mk_tree()
_create_rpmmacros(runas)
tree_base = _mk_tree(runas)
spec_path = _get_spec(tree_base, spec, template, saltenv)
__salt__['file.chown'](path=spec_path, user=runas, group='mock')
__salt__['file.chown'](path=tree_base, user=runas, group='mock')
if isinstance(sources, six.string_types):
sources = sources.split(',')
for src in sources:
_get_src(tree_base, src, saltenv)
_get_src(tree_base, src, saltenv, runas)
# make source rpms for dist el6 with SHA256, usable with mock on other dists
cmd = 'rpmbuild --verbose --define "_topdir {0}" -bs --define "dist .el6" {1}'.format(tree_base, spec_path)
__salt__['cmd.run'](cmd)
retrc = __salt__['cmd.retcode'](cmd, runas=runas)
if retrc != 0:
raise SaltInvocationError(
'Make source package for destination directory {0}, spec {1}, sources {2}, failed '
'with return error {3}, check logs for further details'.format(
dest_dir,
spec,
sources,
retrc)
)
srpms = os.path.join(tree_base, 'SRPMS')
ret = []
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
__salt__['file.makedirs_perms'](name=dest_dir, user=runas, group='mock')
for fn_ in os.listdir(srpms):
full = os.path.join(srpms, fn_)
tgt = os.path.join(dest_dir, fn_)
@ -232,14 +275,16 @@ def build(runas,
.. code-block:: bash
salt '*' pkgbuild.build mock epel-7-x86_64 /var/www/html https://raw.githubusercontent.com/saltstack/libnacl/master/pkg/rpm/python-libnacl.spec https://pypi.python.org/packages/source/l/libnacl/libnacl-1.3.5.tar.gz
salt '*' pkgbuild.build mock epel-7-x86_64 /var/www/html
https://raw.githubusercontent.com/saltstack/libnacl/master/pkg/rpm/python-libnacl.spec
https://pypi.python.org/packages/source/l/libnacl/libnacl-1.3.5.tar.gz
This example command should build the libnacl package for rhel 7 using user
mock and place it in /var/www/html/ on the minion
'''
ret = {}
try:
os.makedirs(dest_dir)
__salt__['file.chown'](path=dest_dir, user=runas, group='mock')
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
@ -247,7 +292,7 @@ def build(runas,
srpm_build_dir = tempfile.mkdtemp()
try:
srpms = make_src_pkg(srpm_build_dir, spec, sources,
env, template, saltenv)
env, template, saltenv, runas)
except Exception as exc:
shutil.rmtree(srpm_build_dir)
log.error('Failed to make src package')
@ -259,17 +304,18 @@ def build(runas,
deps_dir = tempfile.mkdtemp()
deps_list = _get_deps(deps, deps_dir, saltenv)
retrc = 0
for srpm in srpms:
dbase = os.path.dirname(srpm)
results_dir = tempfile.mkdtemp()
try:
__salt__['cmd.run']('chown {0} -R {1}'.format(runas, dbase))
__salt__['cmd.run']('chown {0} -R {1}'.format(runas, results_dir))
__salt__['file.chown'](path=dbase, user=runas, group='mock')
__salt__['file.chown'](path=results_dir, user=runas, group='mock')
cmd = 'mock --root={0} --resultdir={1} --init'.format(tgt, results_dir)
__salt__['cmd.run'](cmd, runas=runas)
retrc |= __salt__['cmd.retcode'](cmd, runas=runas)
if deps_list and not deps_list.isspace():
cmd = 'mock --root={0} --resultdir={1} --install {2} {3}'.format(tgt, results_dir, deps_list, noclean)
__salt__['cmd.run'](cmd, runas=runas)
retrc |= __salt__['cmd.retcode'](cmd, runas=runas)
noclean += ' --no-clean'
cmd = 'mock --root={0} --resultdir={1} {2} {3} {4}'.format(
@ -278,17 +324,20 @@ def build(runas,
distset,
noclean,
srpm)
__salt__['cmd.run'](cmd, runas=runas)
cmd = ['rpm', '-qp', '--queryformat',
'{0}/%{{name}}/%{{version}}-%{{release}}'.format(log_dir),
srpm]
log_dest = __salt__['cmd.run_stdout'](cmd, python_shell=False)
retrc |= __salt__['cmd.retcode'](cmd, runas=runas)
cmdlist = [
'rpm',
'-qp',
'--queryformat',
'{0}/%{{name}}/%{{version}}-%{{release}}'.format(log_dir),
srpm]
log_dest = __salt__['cmd.run_stdout'](cmdlist, python_shell=False)
for filename in os.listdir(results_dir):
full = os.path.join(results_dir, filename)
if filename.endswith('src.rpm'):
sdest = os.path.join(srpm_dir, filename)
try:
os.makedirs(srpm_dir)
__salt__['file.makedirs_perms'](name=srpm_dir, user=runas, group='mock')
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
@ -301,7 +350,7 @@ def build(runas,
else:
log_file = os.path.join(log_dest, filename)
try:
os.makedirs(log_dest)
__salt__['file.makedirs_perms'](name=log_dest, user=runas, group='mock')
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
@ -311,6 +360,15 @@ def build(runas,
log.error('Error building from %s: %s', srpm, exc)
finally:
shutil.rmtree(results_dir)
if retrc != 0:
raise SaltInvocationError(
'Building packages for destination directory {0}, spec {1}, sources {2}, failed '
'with return error {3}, check logs for further details'.format(
dest_dir,
spec,
sources,
retrc)
)
shutil.rmtree(deps_dir)
shutil.rmtree(srpm_build_dir)
return ret
@ -433,7 +491,7 @@ def make_repo(repodir,
phrase = ''
if keyid is not None:
## import_keys
# import_keys
pkg_pub_key_file = '{0}/{1}'.format(gnupghome, __salt__['pillar.get']('gpg_pkg_pub_keyname', None))
pkg_priv_key_file = '{0}/{1}'.format(gnupghome, __salt__['pillar.get']('gpg_pkg_priv_keyname', None))
@ -477,14 +535,21 @@ def make_repo(repodir,
# need to update rpm with public key
cmd = 'rpm --import {0}'.format(pkg_pub_key_file)
__salt__['cmd.run'](cmd, runas=runas, use_vt=True)
retrc = __salt__['cmd.retcode'](cmd, runas=runas, use_vt=True)
if retrc != 0:
raise SaltInvocationError(
'Failed to import public key from file {0} with return '
'error {1}, check logs for further details'.format(
pkg_pub_key_file,
retrc)
)
## sign_it_here
# sign_it_here
# interval of 0.125 is really too fast on some systems
interval = 0.5
for file in os.listdir(repodir):
if file.endswith('.rpm'):
abs_file = os.path.join(repodir, file)
for fileused in os.listdir(repodir):
if fileused.endswith('.rpm'):
abs_file = os.path.join(repodir, fileused)
number_retries = timeout / interval
times_looped = 0
error_msg = 'Failed to sign file {0}'.format(abs_file)

View File

@ -21,6 +21,7 @@ import salt.utils.files
import salt.utils.path
import salt.utils.stringutils
import salt.utils.decorators as decorators
import salt.utils.versions
from salt.exceptions import CommandExecutionError, SaltInvocationError
# Import 3rd-party libs
@ -502,12 +503,11 @@ def fcontext_get_policy(name, filetype=None, sel_type=None, sel_user=None, sel_l
return ret
def fcontext_add_or_delete_policy(action, name, filetype=None, sel_type=None, sel_user=None, sel_level=None):
def fcontext_add_policy(name, filetype=None, sel_type=None, sel_user=None, sel_level=None):
'''
.. versionadded:: 2017.7.0
.. versionadded:: Fluorine
Sets or deletes the SELinux policy for a given filespec and other
optional parameters.
Adds the SELinux policy for a given filespec and other optional parameters.
Returns the result of the call to semanage.
@ -535,10 +535,116 @@ def fcontext_add_or_delete_policy(action, name, filetype=None, sel_type=None, se
CLI Example:
.. code-block:: bash
salt '*' selinux.fcontext_add_policy my-policy
'''
return _fcontext_add_or_delete_policy('add', name, filetype, sel_type, sel_user, sel_level)
def fcontext_delete_policy(name, filetype=None, sel_type=None, sel_user=None, sel_level=None):
'''
.. versionadded:: Fluorine
Deletes the SELinux policy for a given filespec and other optional parameters.
Returns the result of the call to semanage.
Note that you don't have to remove an entry before setting a new
one for a given filespec and filetype, as adding one with semanage
automatically overwrites a previously configured SELinux context.
name
filespec of the file or directory. Regex syntax is allowed.
file_type
The SELinux filetype specification. Use one of [a, f, d, c, b,
s, l, p]. See also ``man semanage-fcontext``. Defaults to 'a'
(all files).
sel_type
SELinux context type. There are many.
sel_user
SELinux user. Use ``semanage login -l`` to determine which ones
are available to you.
sel_level
The MLS range of the SELinux context.
CLI Example:
.. code-block:: bash
salt '*' selinux.fcontext_delete_policy my-policy
'''
return _fcontext_add_or_delete_policy('delete', name, filetype, sel_type, sel_user, sel_level)
def fcontext_add_or_delete_policy(action, name, filetype=None, sel_type=None, sel_user=None, sel_level=None):
'''
.. versionadded:: 2017.7.0
Adds or deletes the SELinux policy for a given filespec and other optional parameters.
Returns the result of the call to semanage.
Note that you don't have to remove an entry before setting a new
one for a given filespec and filetype, as adding one with semanage
automatically overwrites a previously configured SELinux context.
.. warning::
Use :mod:`selinux.fcontext_add_policy()<salt.modules.selinux.fcontext_add_policy>`,
or :mod:`selinux.fcontext_delete_policy()<salt.modules.selinux.fcontext_delete_policy>`.
.. deprecated:: Fluorine
action
The action to perform. Either ``add`` or ``delete``.
name
filespec of the file or directory. Regex syntax is allowed.
file_type
The SELinux filetype specification. Use one of [a, f, d, c, b,
s, l, p]. See also ``man semanage-fcontext``. Defaults to 'a'
(all files).
sel_type
SELinux context type. There are many.
sel_user
SELinux user. Use ``semanage login -l`` to determine which ones
are available to you.
sel_level
The MLS range of the SELinux context.
CLI Example:
.. code-block:: bash
salt '*' selinux.fcontext_add_or_delete_policy add my-policy
'''
salt.utils.versions.warn_until(
'Sodium',
'The \'selinux.fcontext_add_or_delete_policy\' module has been deprecated. Please use the '
'\'selinux.fcontext_add_policy\' and \'selinux.fcontext_delete_policy\' modules instead. '
'Support for the \'selinux.fcontext_add_or_delete_policy\' module will be removed in Salt '
'{version}.'
)
return _fcontext_add_or_delete_policy(action, name, filetype, sel_type, sel_user, sel_level)
def _fcontext_add_or_delete_policy(action, name, filetype=None, sel_type=None, sel_user=None, sel_level=None):
'''
.. versionadded:: Fluorine
Performs the action as called from ``fcontext_add_policy`` or ``fcontext_delete_policy``.
Returns the result of the call to semanage.
'''
if action not in ['add', 'delete']:
raise SaltInvocationError('Actions supported are "add" and "delete", not "{0}".'.format(action))
cmd = 'semanage fcontext --{0}'.format(action)
@ -642,13 +748,13 @@ def port_get_policy(name, sel_type=None, protocol=None, port=None):
* port (the port(s) and/or port range(s))
name
The protocol and port spec. Can be formatted as (tcp|udp)/(port|port-range).
The protocol and port spec. Can be formatted as ``(tcp|udp)/(port|port-range)``.
sel_type
The SELinux Type.
protocol
The protocol for the port (tcp|udp). Required if name is not formatted.
The protocol for the port, ``tcp`` or ``udp``. Required if name is not formatted.
port
The port or port range. Required if name is not formatted.
@ -680,25 +786,22 @@ def port_get_policy(name, sel_type=None, protocol=None, port=None):
'port': parts.group(3).strip(), }
def port_add_or_delete_policy(action, name, sel_type=None, protocol=None, port=None, sel_range=None):
def port_add_policy(name, sel_type=None, protocol=None, port=None, sel_range=None):
'''
.. versionadded:: Fluorine
Adds or deletes the SELinux policy for a given protocol and port.
Adds the SELinux policy for a given protocol and port.
Returns the result of the call to semanage.
action
The action to perform. Either 'add' or 'delete'.
name
The protocol and port spec. Can be formatted as (tcp|udp)/(port|port-range).
The protocol and port spec. Can be formatted as ``(tcp|udp)/(port|port-range)``.
sel_type
The SELinux Type. Required for 'add'.
The SELinux Type. Required.
protocol
The protocol for the port (tcp|udp). Required if name is not formatted.
The protocol for the port, ``tcp`` or ``udp``. Required if name is not formatted.
port
The port or port range. Required if name is not formatted.
@ -710,8 +813,46 @@ def port_add_or_delete_policy(action, name, sel_type=None, protocol=None, port=N
.. code-block:: bash
salt '*' selinux.port_add_or_delete_policy add tcp/8080 http_port_t
salt '*' selinux.port_add_or_delete_policy add foobar http_port_t protocol=tcp port=8091
salt '*' selinux.port_add_policy add tcp/8080 http_port_t
salt '*' selinux.port_add_policy add foobar http_port_t protocol=tcp port=8091
'''
return _port_add_or_delete_policy('add', name, sel_type, protocol, port, sel_range)
def port_delete_policy(name, protocol=None, port=None):
'''
.. versionadded:: Fluorine
Deletes the SELinux policy for a given protocol and port.
Returns the result of the call to semanage.
name
The protocol and port spec. Can be formatted as ``(tcp|udp)/(port|port-range)``.
protocol
The protocol for the port, ``tcp`` or ``udp``. Required if name is not formatted.
port
The port or port range. Required if name is not formatted.
CLI Example:
.. code-block:: bash
salt '*' selinux.port_delete_policy tcp/8080
salt '*' selinux.port_delete_policy foobar protocol=tcp port=8091
'''
return _port_add_or_delete_policy('delete', name, None, protocol, port, None)
def _port_add_or_delete_policy(action, name, sel_type=None, protocol=None, port=None, sel_range=None):
'''
.. versionadded:: Fluorine
Performs the action as called from ``port_add_policy`` or ``port_delete_policy``.
Returns the result of the call to semanage.
'''
if action not in ['add', 'delete']:
raise SaltInvocationError('Actions supported are "add" and "delete", not "{0}".'.format(action))

View File

@ -2084,9 +2084,9 @@ def pkg(pkg_path,
# Verify that the tarball does not extract outside of the intended root
members = s_pkg.getmembers()
for member in members:
if member.path.startswith((os.sep, '..{0}'.format(os.sep))):
if salt.utils.stringutils.to_unicode(member.path).startswith((os.sep, '..{0}'.format(os.sep))):
return {}
elif '..{0}'.format(os.sep) in member.path:
elif '..{0}'.format(os.sep) in salt.utils.stringutils.to_unicode(member.path):
return {}
s_pkg.extractall(root)
s_pkg.close()

View File

@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
"""
'''
Docker Swarm Module using Docker's Python SDK
=============================================
@ -18,12 +17,17 @@ Dependencies
Docker Python SDK
-----------------
pip install -U docker
.. code-block:: bash
pip install -U docker
More information: https://docker-py.readthedocs.io/en/stable/
"""
'''
# Import python libraries
from __future__ import absolute_import, unicode_literals, print_function
# Import Salt libs
import salt.utils.json
try:
@ -41,7 +45,7 @@ def __virtual__():
'''
if HAS_DOCKER:
return __virtualname__
return False, 'The swarm module failed to load: Docker python module is not avaialble.'
return False, 'The swarm module failed to load: Docker python module is not available.'
def __init__(self):
@ -58,7 +62,7 @@ def swarm_tokens():
.. code-block:: bash
salt '*' swarm.swarm_tokens
salt '*' swarm.swarm_tokens
'''
client = docker.APIClient(base_url='unix://var/run/docker.sock')
service = client.inspect_swarm()
@ -72,37 +76,39 @@ def swarm_init(advertise_addr=str,
Initalize Docker on Minion as a Swarm Manager
advertise_addr
The ip of the manager
The ip of the manager
listen_addr
Listen address used for inter-manager communication,
as well as determining the networking interface used
for the VXLAN Tunnel Endpoint (VTEP).
This can either be an address/port combination in
the form 192.168.1.1:4567,
or an interface followed by a port number,
like eth0:4567
Listen address used for inter-manager communication,
as well as determining the networking interface used
for the VXLAN Tunnel Endpoint (VTEP).
This can either be an address/port combination in
the form 192.168.1.1:4567,
or an interface followed by a port number,
like eth0:4567
force_new_cluster
Force a new cluster if True is passed
Force a new cluster if True is passed
CLI Example:
.. code-block:: bash
salt '*' swarm.swarm_init advertise_addr='192.168.50.10' listen_addr='0.0.0.0' force_new_cluster=False
salt '*' swarm.swarm_init advertise_addr='192.168.50.10' listen_addr='0.0.0.0' force_new_cluster=False
'''
try:
salt_return = {}
__context__['client'].swarm.init(advertise_addr,
listen_addr,
force_new_cluster)
output = 'Docker swarm has been Initalized on '+ __context__['server_name'] + ' and the worker/manager Join token is below'
output = 'Docker swarm has been initialized on {0} ' \
'and the worker/manager Join token is below'.format(__context__['server_name'])
salt_return.update({'Comment': output,
'Tokens': swarm_tokens()})
except TypeError:
salt_return = {}
salt_return.update({'Error': 'Please make sure your passing advertise_addr, listen_addr and force_new_cluster correctly.'})
salt_return.update({'Error': 'Please make sure you are passing advertise_addr, '
'listen_addr and force_new_cluster correctly.'})
return salt_return
@ -113,21 +119,22 @@ def joinswarm(remote_addr=int,
Join a Swarm Worker to the cluster
remote_addr
The manager node you want to connect to for the swarm
The manager node you want to connect to for the swarm
listen_addr
Listen address used for inter-manager communication if the node gets promoted to manager,
as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP)
Listen address used for inter-manager communication if the node gets promoted to manager,
as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP)
token
Either the manager join token or the worker join token.
You can get the worker or manager token via `salt '*' swarm.swarm_tokens`
Either the manager join token or the worker join token.
You can get the worker or manager token via ``salt '*' swarm.swarm_tokens``
CLI Example:
.. code-block:: bash
salt '*' swarm.joinswarm remote_addr=192.168.50.10 listen_addr='0.0.0.0' token='SWMTKN-1-64tux2g0701r84ofq93zppcih0pe081akq45owe9ts61f30x4t-06trjugdu7x2z47j938s54il'
salt '*' swarm.joinswarm remote_addr=192.168.50.10 listen_addr='0.0.0.0' \
token='SWMTKN-1-64tux2g0701r84ofq93zppcih0pe081akq45owe9ts61f30x4t-06trjugdu7x2z47j938s54il'
'''
try:
salt_return = {}
@ -138,7 +145,8 @@ def joinswarm(remote_addr=int,
salt_return.update({'Comment': output, 'Manager_Addr': remote_addr})
except TypeError:
salt_return = {}
salt_return.update({'Error': 'Please make sure this minion is not part of a swarm and your passing remote_addr, listen_addr and token correctly.'})
salt_return.update({'Error': 'Please make sure this minion is not part of a swarm and you are '
'passing remote_addr, listen_addr and token correctly.'})
return salt_return
@ -147,13 +155,13 @@ def leave_swarm(force=bool):
Force the minion to leave the swarm
force
Will force the minion/worker/manager to leave the swarm
Will force the minion/worker/manager to leave the swarm
CLI Example:
.. code-block:: bash
salt '*' swarm.leave_swarm force=False
salt '*' swarm.leave_swarm force=False
'''
salt_return = {}
__context__['client'].swarm.leave(force=force)
@ -173,30 +181,32 @@ def service_create(image=str,
Create Docker Swarm Service Create
image
The docker image
The docker image
name
Is the service name
Is the service name
command
The docker command to run in the container at launch
The docker command to run in the container at launch
hostname
The hostname of the containers
The hostname of the containers
replicas
How many replicas you want running in the swarm
How many replicas you want running in the swarm
target_port
The target port on the container
The target port on the container
published_port
port thats published on the host/os
port thats published on the host/os
CLI Example:
.. code-block:: bash
salt '*' swarm.service_create image=httpd name=Test_Service command=None hostname=salthttpd replicas=6 target_port=80 published_port=80
salt '*' swarm.service_create image=httpd name=Test_Service \
command=None hostname=salthttpd replicas=6 target_port=80 published_port=80
'''
try:
salt_return = {}
@ -219,7 +229,8 @@ def service_create(image=str,
'Published_Port': published_port})
except TypeError:
salt_return = {}
salt_return.update({'Error': 'Please make sure your passing arguments correctly [image, name, command, hostname, replicas, target_port and published_port]'})
salt_return.update({'Error': 'Please make sure you are passing arguments correctly '
'[image, name, command, hostname, replicas, target_port and published_port]'})
return salt_return
@ -228,12 +239,13 @@ def swarm_service_info(service_name=str):
Swarm Service Information
service_name
The name of the service that you want information on about the service
The name of the service that you want information on about the service
CLI Example:
.. code-block:: bash
salt '*' swarm.swarm_service_info service_name=Test_Service
salt '*' swarm.swarm_service_info service_name=Test_Service
'''
try:
salt_return = {}
@ -282,12 +294,13 @@ def remove_service(service=str):
Remove Swarm Service
service
The name of the service
The name of the service
CLI Example:
.. code-block:: bash
salt '*' swarm.remove_service service=Test_Service
salt '*' swarm.remove_service service=Test_Service
'''
try:
salt_return = {}
@ -306,12 +319,13 @@ def node_ls(server=str):
Displays Information about Swarm Nodes with passing in the server
server
The minion/server name
The minion/server name
CLI Example:
.. code-block:: bash
salt '*' swarm.node_ls server=minion1
salt '*' swarm.node_ls server=minion1
'''
try:
salt_return = {}
@ -327,7 +341,7 @@ def node_ls(server=str):
role = items['Spec']['Role']
availability = items['Spec']['Availability']
status = items['Status']
Version = items['Version']['Index']
version = items['Version']['Index']
salt_return.update({'Docker Version': docker_version,
'Platform': platform,
'Hostname': hostnames,
@ -335,7 +349,7 @@ def node_ls(server=str):
'Roles': role,
'Availability': availability,
'Status': status,
'Version': Version})
'Version': version})
except TypeError:
salt_return = {}
salt_return.update({'Error': 'The server arg is missing or you not targeting a Manager node?'})
@ -347,15 +361,16 @@ def remove_node(node_id=str, force=bool):
Remove a node from a swarm and the target needs to be a swarm manager
node_id
The node id from the return of swarm.node_ls
The node id from the return of swarm.node_ls
force
Forcefully remove the node/minion from the service
Forcefully remove the node/minion from the service
CLI Example:
.. code-block:: bash
salt '*' swarm.remove_node node_id=z4gjbe9rwmqahc2a91snvolm5 force=false
salt '*' swarm.remove_node node_id=z4gjbe9rwmqahc2a91snvolm5 force=false
'''
client = docker.APIClient(base_url='unix://var/run/docker.sock')
try:
@ -380,24 +395,26 @@ def update_node(availability=str,
Updates docker swarm nodes/needs to target a manager node/minion
availability
Drain or Active
Drain or Active
node_name
minion/node
minion/node
role
role of manager or worker
role of manager or worker
node_id
The Id and that can be obtained via swarm.node_ls
The Id and that can be obtained via swarm.node_ls
version
Is obtained by swarm.node_ls
Is obtained by swarm.node_ls
CLI Example:
.. code-block:: bash
salt '*' docker_util.update_node availability=drain node_name=minion2 role=worker node_id=3k9x7t8m4pel9c0nqr3iajnzp version=19
salt '*' docker_util.update_node availability=drain node_name=minion2 \
role=worker node_id=3k9x7t8m4pel9c0nqr3iajnzp version=19
'''
client = docker.APIClient(base_url='unix://var/run/docker.sock')
try:

View File

@ -60,17 +60,18 @@ def _get_gecos(name):
Retrieve GECOS field info and return it in dictionary form
'''
gecos_field = salt.utils.stringutils.to_unicode(
pwd.getpwnam(_quote_username(name)).pw_gecos).split(',', 3)
pwd.getpwnam(_quote_username(name)).pw_gecos).split(',', 4)
if not gecos_field:
return {}
else:
# Assign empty strings for any unspecified trailing GECOS fields
while len(gecos_field) < 4:
while len(gecos_field) < 5:
gecos_field.append('')
return {'fullname': salt.utils.data.decode(gecos_field[0]),
'roomnumber': salt.utils.data.decode(gecos_field[1]),
'workphone': salt.utils.data.decode(gecos_field[2]),
'homephone': salt.utils.data.decode(gecos_field[3])}
'homephone': salt.utils.data.decode(gecos_field[3]),
'other': salt.utils.data.decode(gecos_field[4])}
def _build_gecos(gecos_dict):
@ -78,10 +79,11 @@ def _build_gecos(gecos_dict):
Accepts a dictionary entry containing GECOS field names and their values,
and returns a full GECOS comment string, to be used with usermod.
'''
return '{0},{1},{2},{3}'.format(gecos_dict.get('fullname', ''),
gecos_dict.get('roomnumber', ''),
gecos_dict.get('workphone', ''),
gecos_dict.get('homephone', ''))
return '{0},{1},{2},{3},{4}'.format(gecos_dict.get('fullname', ''),
gecos_dict.get('roomnumber', ''),
gecos_dict.get('workphone', ''),
gecos_dict.get('homephone', ''),
gecos_dict.get('other', ''),).rstrip(',')
def _update_gecos(name, key, value, root=None):
@ -124,6 +126,7 @@ def add(name,
roomnumber='',
workphone='',
homephone='',
other='',
createhome=True,
loginclass=None,
root=None,
@ -237,6 +240,8 @@ def add(name,
chworkphone(name, workphone)
if homephone:
chhomephone(name, homephone)
if other:
chother(name, other)
return True
@ -507,6 +512,19 @@ def chhomephone(name, homephone):
return _update_gecos(name, 'homephone', homephone)
def chother(name, other):
'''
Change the user's other GECOS attribute
CLI Example:
.. code-block:: bash
salt '*' user.chother foobar
'''
return _update_gecos(name, 'other', other)
def chloginclass(name, loginclass, root=None):
'''
Change the default login class of the user
@ -588,9 +606,9 @@ def _format_info(data):
Return user information in a pretty way
'''
# Put GECOS info into a list
gecos_field = salt.utils.stringutils.to_unicode(data.pw_gecos).split(',', 3)
# Make sure our list has at least four elements
while len(gecos_field) < 4:
gecos_field = salt.utils.stringutils.to_unicode(data.pw_gecos).split(',', 4)
# Make sure our list has at least five elements
while len(gecos_field) < 5:
gecos_field.append('')
return {'gid': data.pw_gid,
@ -603,7 +621,8 @@ def _format_info(data):
'fullname': gecos_field[0],
'roomnumber': gecos_field[1],
'workphone': gecos_field[2],
'homephone': gecos_field[3]}
'homephone': gecos_field[3],
'other': gecos_field[4]}
@salt.utils.decorators.path.which('id')

View File

@ -324,7 +324,7 @@ def _get_nics(dom):
Get domain network interfaces from a libvirt domain object.
'''
nics = {}
doc = minidom.parse(_StringIO(dom.getXMLDesc(0)))
doc = minidom.parse(_StringIO(dom.XMLDesc(0)))
for node in doc.getElementsByTagName('devices'):
i_nodes = node.getElementsByTagName('interface')
for i_node in i_nodes:
@ -366,7 +366,7 @@ def _get_graphics(dom):
'listen': 'None',
'port': 'None',
'type': 'None'}
xml = dom.getXMLDesc(0)
xml = dom.XMLDesc(0)
ssock = _StringIO(xml)
doc = minidom.parse(ssock)
for node in doc.getElementsByTagName('domain'):
@ -382,7 +382,7 @@ def _get_disks(dom):
Get domain disks from a libvirt domain object.
'''
disks = {}
doc = minidom.parse(_StringIO(dom.getXMLDesc(0)))
doc = minidom.parse(_StringIO(dom.XMLDesc(0)))
for elem in doc.getElementsByTagName('disk'):
sources = elem.getElementsByTagName('source')
targets = elem.getElementsByTagName('target')
@ -1620,7 +1620,7 @@ def start(name, **kwargs):
salt '*' virt.start <domain>
'''
conn = __get_conn(**kwargs)
ret = _get_domain(conn, name).create == 0
ret = _get_domain(conn, name).create() == 0
conn.close()
return ret
@ -2266,7 +2266,7 @@ def vm_diskstats(vm_=None, **kwargs):
'''
Extract the disk devices names from the domain XML definition
'''
doc = minidom.parse(_StringIO(dom.getXMLDesc(0)))
doc = minidom.parse(_StringIO(dom.XMLDesc(0)))
disks = []
for elem in doc.getElementsByTagName('disk'):
targets = elem.getElementsByTagName('target')

View File

@ -30,7 +30,7 @@ import shutil # do not remove, used in imported file.py functions
import re # do not remove, used in imported file.py functions
import string # do not remove, used in imported file.py functions
import sys # do not remove, used in imported file.py functions
import fileinput # do not remove, used in imported file.py functions
import io # do not remove, used in imported file.py functions
import fnmatch # do not remove, used in imported file.py functions
import mmap # do not remove, used in imported file.py functions
import glob # do not remove, used in imported file.py functions
@ -58,8 +58,9 @@ from salt.modules.file import (check_hash, # pylint: disable=W0611
RE_FLAG_TABLE, blockreplace, prepend, seek_read, seek_write, rename,
lstat, path_exists_glob, write, pardir, join, HASHES, HASHES_REVMAP,
comment, uncomment, _add_flags, comment_line, _regex_to_static,
_get_line_indent, apply_template_on_contents, dirname, basename,
list_backups_dir, _assert_occurrence, _starts_till)
_set_line_indent, apply_template_on_contents, dirname, basename,
list_backups_dir, _assert_occurrence, _starts_till, _set_line_eol, _get_eol,
_insert_line_after, _insert_line_before)
from salt.modules.file import normpath as normpath_
from salt.utils.functools import namespaced_function as _namespaced_function
@ -116,8 +117,9 @@ def __virtual__():
global blockreplace, prepend, seek_read, seek_write, rename, lstat
global write, pardir, join, _add_flags, apply_template_on_contents
global path_exists_glob, comment, uncomment, _mkstemp_copy
global _regex_to_static, _get_line_indent, dirname, basename
global _regex_to_static, _set_line_indent, dirname, basename
global list_backups_dir, normpath_, _assert_occurrence, _starts_till
global _insert_line_before, _insert_line_after, _set_line_eol, _get_eol
replace = _namespaced_function(replace, globals())
search = _namespaced_function(search, globals())
@ -172,7 +174,11 @@ def __virtual__():
uncomment = _namespaced_function(uncomment, globals())
comment_line = _namespaced_function(comment_line, globals())
_regex_to_static = _namespaced_function(_regex_to_static, globals())
_get_line_indent = _namespaced_function(_get_line_indent, globals())
_set_line_indent = _namespaced_function(_set_line_indent, globals())
_set_line_eol = _namespaced_function(_set_line_eol, globals())
_get_eol = _namespaced_function(_get_eol, globals())
_insert_line_after = _namespaced_function(_insert_line_after, globals())
_insert_line_before = _namespaced_function(_insert_line_before, globals())
_mkstemp_copy = _namespaced_function(_mkstemp_copy, globals())
_add_flags = _namespaced_function(_add_flags, globals())
apply_template_on_contents = _namespaced_function(apply_template_on_contents, globals())

View File

@ -35,7 +35,7 @@ Current known limitations
- lxml
- uuid
- struct
- salt.modules.reg
- salt.utils.win_reg
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
@ -98,7 +98,7 @@ try:
import lxml
import struct
from lxml import etree
from salt.modules.reg import Registry as Registry
from salt.utils.win_reg import Registry
HAS_WINDOWS_MODULES = True
TRUE_VALUE_XPATH = etree.XPath('.//*[local-name() = "trueValue"]')
FALSE_VALUE_XPATH = etree.XPath('.//*[local-name() = "falseValue"]')
@ -3345,9 +3345,11 @@ def __virtual__():
'''
Only works on Windows systems
'''
if salt.utils.platform.is_windows() and HAS_WINDOWS_MODULES:
return __virtualname__
return False
if not salt.utils.platform.is_windows():
return False, 'win_lgpo: Not a Windows System'
if not HAS_WINDOWS_MODULES:
return False, 'win_lgpo: Required modules failed to load'
return __virtualname__
def _updateNamespace(item, new_namespace):
@ -6045,7 +6047,7 @@ def set_(computer_policy=None, user_policy=None,
else:
raise SaltInvocationError(msg)
if policy_namespace and policy_name in _admTemplateData[policy_namespace] and the_policy is not None:
log.debug('setting == %s', _admTemplateData[policy_namespace][policy_name].lower())
log.debug('setting == %s', six.text_type(_admTemplateData[policy_namespace][policy_name]).lower())
log.debug(six.text_type(_admTemplateData[policy_namespace][policy_name]).lower())
if six.text_type(_admTemplateData[policy_namespace][policy_name]).lower() != 'disabled' \
and six.text_type(_admTemplateData[policy_namespace][policy_name]).lower() != 'not configured':

View File

@ -9,16 +9,20 @@ import logging
# Import Salt libs
import salt.utils.platform
from salt.exceptions import SaltInvocationError
from salt.exceptions import SaltInvocationError, CommandExecutionError
# Import 3rd party libs
from salt.ext import six
_HKEY = 'HKLM'
_SNMP_KEY = r'SYSTEM\CurrentControlSet\Services\SNMP\Parameters'
_AGENT_KEY = r'{0}\RFC1156Agent'.format(_SNMP_KEY)
_COMMUNITIES_KEY = r'{0}\ValidCommunities'.format(_SNMP_KEY)
_SNMP_GPO_KEY = r'SOFTWARE\Policies\SNMP\Parameters'
_COMMUNITIES_GPO_KEY = r'{0}\ValidCommunities'.format(_SNMP_GPO_KEY)
_PERMISSION_TYPES = {'None': 1,
'Notify': 2,
'Read Only': 4,
@ -284,6 +288,21 @@ def get_community_names():
'''
Get the current accepted SNMP community names and their permissions.
If community names are being managed by Group Policy, those values will be
returned instead like this:
.. code-block:: bash
TestCommunity:
Managed by GPO
Community names managed normally will denote the permission instead:
.. code-block:: bash
TestCommunity:
Read Only
Returns:
dict: A dictionary of community names and permissions.
@ -294,25 +313,69 @@ def get_community_names():
salt '*' win_snmp.get_community_names
'''
ret = dict()
current_values = __salt__['reg.list_values'](
_HKEY, _COMMUNITIES_KEY, include_default=False)
# The communities are stored as the community name with a numeric permission
# value. Convert the numeric value to the text equivalent, as present in the
# Windows SNMP service GUI.
if isinstance(current_values, list):
for current_value in current_values:
# Look in GPO settings first
if __salt__['reg.key_exists'](_HKEY, _COMMUNITIES_GPO_KEY):
# Ignore error values
if not isinstance(current_value, dict):
continue
_LOG.debug('Loading communities from Group Policy settings')
permissions = six.text_type()
for permission_name in _PERMISSION_TYPES:
if current_value['vdata'] == _PERMISSION_TYPES[permission_name]:
permissions = permission_name
break
ret[current_value['vname']] = permissions
current_values = __salt__['reg.list_values'](
_HKEY, _COMMUNITIES_GPO_KEY, include_default=False)
# GPO settings are different in that they do not designate permissions
# They are a numbered list of communities like so:
#
# {1: "community 1",
# 2: "community 2"}
#
# Denote that it is being managed by Group Policy.
#
# community 1:
# Managed by GPO
# community 2:
# Managed by GPO
if isinstance(current_values, list):
for current_value in current_values:
# Ignore error values
if not isinstance(current_value, dict):
continue
ret[current_value['vdata']] = 'Managed by GPO'
if not ret:
_LOG.debug('Loading communities from SNMP settings')
current_values = __salt__['reg.list_values'](
_HKEY, _COMMUNITIES_KEY, include_default=False)
# The communities are stored as the community name with a numeric
# permission value. Like this (4 = Read Only):
#
# {"community 1": 4,
# "community 2": 4}
#
# Convert the numeric value to the text equivalent, as present in the
# Windows SNMP service GUI.
#
# community 1:
# Read Only
# community 2:
# Read Only
if isinstance(current_values, list):
for current_value in current_values:
# Ignore error values
if not isinstance(current_value, dict):
continue
permissions = six.text_type()
for permission_name in _PERMISSION_TYPES:
if current_value['vdata'] == _PERMISSION_TYPES[permission_name]:
permissions = permission_name
break
ret[current_value['vname']] = permissions
if not ret:
_LOG.debug('Unable to find existing communities.')
@ -323,6 +386,11 @@ def set_community_names(communities):
'''
Manage the SNMP accepted community names and their permissions.
.. note::
Settings managed by Group Policy will always take precedence over those
set using the SNMP interface. Therefore if this function finds Group
Policy settings it will raise a CommandExecutionError
Args:
communities (dict): A dictionary of SNMP community names and
permissions. The possible permissions can be found via
@ -331,6 +399,10 @@ def set_community_names(communities):
Returns:
bool: True if successful, otherwise False
Raises:
CommandExecutionError:
If SNMP settings are being managed by Group Policy
CLI Example:
.. code-block:: bash
@ -339,6 +411,11 @@ def set_community_names(communities):
'''
values = dict()
if __salt__['reg.key_exists'](_HKEY, _COMMUNITIES_GPO_KEY):
_LOG.debug('Communities on this system are managed by Group Policy')
raise CommandExecutionError(
'Communities on this system are managed by Group Policy')
current_communities = get_community_names()
if communities == current_communities:

View File

@ -511,7 +511,7 @@ def get_system_info():
.. code-block:: bash
salt 'minion-id' system.get_info
salt 'minion-id' system.get_system_info
'''
os_type = {1: 'Work Station',
2: 'Domain Controller',

View File

@ -96,6 +96,7 @@ TASK_TRIGGER_SESSION_STATE_CHANGE = 11
duration = {'Immediately': 'PT0M',
'Indefinitely': 'PT0M',
'Do not wait': 'PT0M',
'15 seconds': 'PT15S',
'30 seconds': 'PT30S',
'1 minute': 'PT1M',
'5 minutes': 'PT5M',
@ -1381,10 +1382,16 @@ def info(name, location='\\'):
trigger['end_date'] = end_date
trigger['end_time'] = end_time
trigger['enabled'] = triggerObj.Enabled
if triggerObj.RandomDelay == '':
trigger['random_delay'] = False
else:
trigger['random_delay'] = _reverse_lookup(duration, triggerObj.RandomDelay)
if hasattr(triggerObj, 'RandomDelay'):
if triggerObj.RandomDelay:
trigger['random_delay'] = _reverse_lookup(duration, triggerObj.RandomDelay)
else:
trigger['random_delay'] = False
if hasattr(triggerObj, 'Delay'):
if triggerObj.Delay:
trigger['delay'] = _reverse_lookup(duration, triggerObj.Delay)
else:
trigger['delay'] = False
triggers.append(trigger)
properties['settings'] = settings
@ -1623,6 +1630,7 @@ def add_trigger(name=None,
repeat_duration=None,
repeat_stop_at_duration_end=False,
execution_time_limit=None,
delay=None,
**kwargs):
r'''
@ -1687,9 +1695,9 @@ def add_trigger(name=None,
:param str random_delay: The delay time that is randomly added to the start
time of the trigger. Valid values are:
- 30 seconds
= 1 minute
- 1 minute
- 30 minutes
= 1 hour
- 1 hour
- 8 hours
- 1 day
@ -1725,6 +1733,16 @@ def add_trigger(name=None,
- 1 day
- 3 days (default)
:param str delay: The time the trigger waits after its activation to start the task.
Valid values are:
- 15 seconds
- 30 seconds
- 1 minute
- 30 minutes
- 1 hour
- 8 hours
- 1 day
**kwargs**
There are optional keyword arguments determined by the type of trigger
@ -1976,6 +1994,8 @@ def add_trigger(name=None,
# Settings
trigger.StartBoundary = start_boundary
# Advanced Settings
if delay:
trigger.Delay = _lookup_first(duration, delay)
if random_delay:
trigger.RandomDelay = _lookup_first(duration, random_delay)
if repeat_interval:

View File

@ -6,487 +6,224 @@ from __future__ import absolute_import, unicode_literals, print_function
# Import Python libs
import logging
import re
from datetime import datetime
# Import Salt libs
import salt.utils.path
import salt.utils.platform
from salt.exceptions import CommandExecutionError
# Import 3rd party libs
try:
import pytz
HAS_PYTZ = True
except ImportError:
HAS_PYTZ = False
log = logging.getLogger(__name__)
# Maybe put in a different file ... ? %-0
# http://unicode.org/repos/cldr/trunk/common/supplemental/windowsZones.xml
LINTOWIN = {
'Africa/Abidjan': 'Greenwich Standard Time',
'Africa/Accra': 'Greenwich Standard Time',
'Africa/Addis_Ababa': 'E. Africa Standard Time',
'Africa/Algiers': 'W. Central Africa Standard Time',
'Africa/Asmera': 'E. Africa Standard Time',
'Africa/Bamako': 'Greenwich Standard Time',
'Africa/Bangui': 'W. Central Africa Standard Time',
'Africa/Banjul': 'Greenwich Standard Time',
'Africa/Bissau': 'Greenwich Standard Time',
'Africa/Blantyre': 'South Africa Standard Time',
'Africa/Brazzaville': 'W. Central Africa Standard Time',
'Africa/Bujumbura': 'South Africa Standard Time',
'Africa/Cairo': 'Egypt Standard Time',
'Africa/Casablanca': 'Morocco Standard Time',
'Africa/Conakry': 'Greenwich Standard Time',
'Africa/Dakar': 'Greenwich Standard Time',
'Africa/Dar_es_Salaam': 'E. Africa Standard Time',
'Africa/Djibouti': 'E. Africa Standard Time',
'Africa/Douala': 'W. Central Africa Standard Time',
'Africa/El_Aaiun': 'Greenwich Standard Time',
'Africa/Freetown': 'Greenwich Standard Time',
'Africa/Gaborone': 'South Africa Standard Time',
'Africa/Harare': 'South Africa Standard Time',
'Africa/Johannesburg': 'South Africa Standard Time',
'Africa/Juba': 'E. Africa Standard Time',
'Africa/Kampala': 'E. Africa Standard Time',
'Africa/Khartoum': 'E. Africa Standard Time',
'Africa/Kigali': 'South Africa Standard Time',
'Africa/Kinshasa': 'W. Central Africa Standard Time',
'Africa/Lagos': 'W. Central Africa Standard Time',
'Africa/Libreville': 'W. Central Africa Standard Time',
'Africa/Lome': 'Greenwich Standard Time',
'Africa/Luanda': 'W. Central Africa Standard Time',
'Africa/Lubumbashi': 'South Africa Standard Time',
'Africa/Lusaka': 'South Africa Standard Time',
'Africa/Malabo': 'W. Central Africa Standard Time',
'Africa/Maputo': 'South Africa Standard Time',
'Africa/Maseru': 'South Africa Standard Time',
'Africa/Mbabane': 'South Africa Standard Time',
'Africa/Mogadishu': 'E. Africa Standard Time',
'Africa/Monrovia': 'Greenwich Standard Time',
'Africa/Nairobi': 'E. Africa Standard Time',
'Africa/Ndjamena': 'W. Central Africa Standard Time',
'Africa/Niamey': 'W. Central Africa Standard Time',
'Africa/Nouakchott': 'Greenwich Standard Time',
'Africa/Ouagadougou': 'Greenwich Standard Time',
'Africa/Porto-Novo': 'W. Central Africa Standard Time',
'Africa/Sao_Tome': 'Greenwich Standard Time',
'Africa/Tripoli': 'W. Europe Standard Time',
'Africa/Tunis': 'W. Central Africa Standard Time',
'Africa/Windhoek': 'Namibia Standard Time',
'America/Anchorage': 'Alaskan Standard Time',
'America/Juneau': 'Alaskan Standard Time',
'America/Nome': 'Alaskan Standard Time',
'America/Sitka': 'Alaskan Standard Time',
'America/Yakutat': 'Alaskan Standard Time',
'America/Anguilla': 'SA Western Standard Time',
'America/Antigua': 'SA Western Standard Time',
'America/Aruba': 'SA Western Standard Time',
'America/Asuncion': 'Paraguay Standard Time',
'America/Bahia': 'Bahia Standard Time',
'America/Barbados': 'SA Western Standard Time',
'America/Belize': 'Central America Standard Time',
'America/Blanc-Sablon': 'SA Western Standard Time',
'America/Bogota': 'SA Pacific Standard Time',
'America/Buenos_Aires': 'Argentina Standard Time',
'America/Argentina/La_Rioja': 'Argentina Standard Time',
'America/Argentina/Rio_Gallegos': 'Argentina Standard Time',
'America/Argentina/Salta': 'Argentina Standard Time',
'America/Argentina/San_Juan': 'Argentina Standard Time',
'America/Argentina/San_Luis': 'Argentina Standard Time',
'America/Argentina/Tucuman': 'Argentina Standard Time',
'America/Argentina/Ushuaia': 'Argentina Standard Time',
'America/Catamarca': 'Argentina Standard Time',
'America/Cordoba': 'Argentina Standard Time',
'America/Jujuy': 'Argentina Standard Time',
'America/Mendoza': 'Argentina Standard Time',
'America/Caracas': 'Venezuela Standard Time',
'America/Cayenne': 'SA Eastern Standard Time',
'America/Cayman': 'SA Pacific Standard Time',
'America/Chicago': 'Central Standard Time',
'America/Indiana/Knox': 'Central Standard Time',
'America/Indiana/Tell_City': 'Central Standard Time',
'America/Menominee': 'Central Standard Time',
'America/North_Dakota/Beulah': 'Central Standard Time',
'America/North_Dakota/Center': 'Central Standard Time',
'America/North_Dakota/New_Salem': 'Central Standard Time',
'America/Chihuahua': 'Mountain Standard Time (Mexico)',
'America/Mazatlan': 'Mountain Standard Time (Mexico)',
'America/Coral_Harbour': 'SA Pacific Standard Time',
'America/Costa_Rica': 'Central America Standard Time',
'America/Cuiaba': 'Central Brazilian Standard Time',
'America/Campo_Grande': 'Central Brazilian Standard Time',
'America/Curacao': 'SA Western Standard Time',
'America/Danmarkshavn': 'UTC',
'America/Dawson_Creek': 'US Mountain Standard Time',
'America/Creston': 'US Mountain Standard Time',
'America/Denver': 'Mountain Standard Time',
'America/Boise': 'Mountain Standard Time',
'America/Shiprock': 'Mountain Standard Time',
'America/Dominica': 'SA Western Standard Time',
'America/Edmonton': 'Mountain Standard Time',
'America/Cambridge_Bay': 'Mountain Standard Time',
'America/Inuvik': 'Mountain Standard Time',
'America/Yellowknife': 'Mountain Standard Time',
'America/El_Salvador': 'Central America Standard Time',
'America/Fortaleza': 'SA Eastern Standard Time',
'America/Belem': 'SA Eastern Standard Time',
'America/Maceio': 'SA Eastern Standard Time',
'America/Recife': 'SA Eastern Standard Time',
'America/Santarem': 'SA Eastern Standard Time',
'America/Godthab': 'Greenland Standard Time',
'America/Grand_Turk': 'Eastern Standard Time',
'America/Grenada': 'SA Western Standard Time',
'America/Guadeloupe': 'SA Western Standard Time',
'America/Guatemala': 'Central America Standard Time',
'America/Guayaquil': 'SA Pacific Standard Time',
'America/Guyana': 'SA Western Standard Time',
'America/Halifax': 'Atlantic Standard Time',
'America/Glace_Bay': 'Atlantic Standard Time',
'America/Goose_Bay': 'Atlantic Standard Time',
'America/Moncton': 'Atlantic Standard Time',
'America/Hermosillo': 'US Mountain Standard Time',
'America/Indianapolis': 'US Eastern Standard Time',
'America/Indiana/Marengo': 'US Eastern Standard Time',
'America/Indiana/Vevay': 'US Eastern Standard Time',
'America/Jamaica': 'SA Pacific Standard Time',
'America/Kralendijk': 'SA Western Standard Time',
'America/La_Paz': 'SA Western Standard Time',
'America/Lima': 'SA Pacific Standard Time',
'America/Los_Angeles': 'Pacific Standard Time',
'America/Lower_Princes': 'SA Western Standard Time',
'America/Managua': 'Central America Standard Time',
'America/Manaus': 'SA Western Standard Time',
'America/Boa_Vista': 'SA Western Standard Time',
'America/Eirunepe': 'SA Western Standard Time',
'America/Porto_Velho': 'SA Western Standard Time',
'America/Rio_Branco': 'SA Western Standard Time',
'America/Marigot': 'SA Western Standard Time',
'America/Martinique': 'SA Western Standard Time',
'America/Matamoros': 'Central Standard Time',
'America/Mexico_City': 'Central Standard Time (Mexico)',
'America/Bahia_Banderas': 'Central Standard Time (Mexico)',
'America/Cancun': 'Central Standard Time (Mexico)',
'America/Merida': 'Central Standard Time (Mexico)',
'America/Monterrey': 'Central Standard Time (Mexico)',
'America/Montevideo': 'Montevideo Standard Time',
'America/Montserrat': 'SA Western Standard Time',
'America/Nassau': 'Eastern Standard Time',
'America/New_York': 'Eastern Standard Time',
'America/Detroit': 'Eastern Standard Time',
'America/Indiana/Petersburg': 'Eastern Standard Time',
'America/Indiana/Vincennes': 'Eastern Standard Time',
'America/Indiana/Winamac': 'Eastern Standard Time',
'America/Kentucky/Monticello': 'Eastern Standard Time',
'America/Louisville': 'Eastern Standard Time',
'America/Noronha': 'UTC-02',
'America/Ojinaga': 'Mountain Standard Time',
'America/Panama': 'SA Pacific Standard Time',
'America/Paramaribo': 'SA Eastern Standard Time',
'America/Phoenix': 'US Mountain Standard Time',
'America/Port-au-Prince': 'SA Pacific Standard Time',
'America/Port_of_Spain': 'SA Western Standard Time',
'America/Puerto_Rico': 'SA Western Standard Time',
'America/Regina': 'Canada Central Standard Time',
'America/Swift_Current': 'Canada Central Standard Time',
'America/Santa_Isabel': 'Pacific Standard Time (Mexico)',
'America/Santiago': 'Pacific SA Standard Time',
'America/Santo_Domingo': 'SA Western Standard Time',
'America/Sao_Paulo': 'E. South America Standard Time',
'America/Araguaina': 'E. South America Standard Time',
'America/Scoresbysund': 'Azores Standard Time',
'America/St_Barthelemy': 'SA Western Standard Time',
'America/St_Johns': 'Newfoundland Standard Time',
'America/St_Kitts': 'SA Western Standard Time',
'America/St_Lucia': 'SA Western Standard Time',
'America/St_Thomas': 'SA Western Standard Time',
'America/St_Vincent': 'SA Western Standard Time',
'America/Tegucigalpa': 'Central America Standard Time',
'America/Thule': 'Atlantic Standard Time',
'America/Tijuana': 'Pacific Standard Time',
'America/Toronto': 'Eastern Standard Time',
'America/Iqaluit': 'Eastern Standard Time',
'America/Montreal': 'Eastern Standard Time',
'America/Nipigon': 'Eastern Standard Time',
'America/Pangnirtung': 'Eastern Standard Time',
'America/Thunder_Bay': 'Eastern Standard Time',
'America/Tortola': 'SA Western Standard Time',
'America/Whitehorse': 'Pacific Standard Time',
'America/Vancouver': 'Pacific Standard Time',
'America/Dawson': 'Pacific Standard Time',
'America/Winnipeg': 'Central Standard Time',
'America/Rainy_River': 'Central Standard Time',
'America/Rankin_Inlet': 'Central Standard Time',
'America/Resolute': 'Central Standard Time',
'Antarctica/Casey': 'W. Australia Standard Time',
'Antarctica/Davis': 'SE Asia Standard Time',
'Antarctica/DumontDUrville': 'West Pacific Standard Time',
'Antarctica/Macquarie': 'Central Pacific Standard Time',
'Antarctica/Mawson': 'West Asia Standard Time',
'Antarctica/Palmer': 'Pacific SA Standard Time',
'Antarctica/Rothera': 'SA Eastern Standard Time',
'Antarctica/South_Pole': 'New Zealand Standard Time',
'Antarctica/McMurdo': 'New Zealand Standard Time',
'Antarctica/Syowa': 'E. Africa Standard Time',
'Antarctica/Vostok': 'Central Asia Standard Time',
'Arctic/Longyearbyen': 'W. Europe Standard Time',
'Asia/Aden': 'Arab Standard Time',
'Asia/Almaty': 'Central Asia Standard Time',
'Asia/Qyzylorda': 'Central Asia Standard Time',
'Asia/Amman': 'Jordan Standard Time',
'Asia/Ashgabat': 'West Asia Standard Time',
'Asia/Baghdad': 'Arabic Standard Time',
'Asia/Bahrain': 'Arab Standard Time',
'Asia/Baku': 'Azerbaijan Standard Time',
'Asia/Bangkok': 'SE Asia Standard Time',
'Asia/Beirut': 'Middle East Standard Time',
'Asia/Bishkek': 'Central Asia Standard Time',
'Asia/Brunei': 'Singapore Standard Time',
'Asia/Calcutta': 'India Standard Time',
'Asia/Colombo': 'Sri Lanka Standard Time',
'Asia/Damascus': 'Syria Standard Time',
'Asia/Dhaka': 'Bangladesh Standard Time',
'Asia/Dili': 'Tokyo Standard Time',
'Asia/Dubai': 'Arabian Standard Time',
'Asia/Dushanbe': 'West Asia Standard Time',
'Asia/Gaza': 'Egypt Standard Time',
'Asia/Hebron': 'Egypt Standard Time',
'Asia/Hong_Kong': 'China Standard Time',
'Asia/Hovd': 'SE Asia Standard Time',
'Asia/Irkutsk': 'North Asia East Standard Time',
'Asia/Jakarta': 'SE Asia Standard Time',
'Asia/Pontianak': 'SE Asia Standard Time',
'Asia/Jayapura': 'Tokyo Standard Time',
'Asia/Jerusalem': 'Israel Standard Time',
'Asia/Kabul': 'Afghanistan Standard Time',
'Asia/Karachi': 'Pakistan Standard Time',
'Asia/Kathmandu': 'Nepal Standard Time',
'Asia/Katmandu': 'Nepal Standard Time',
'Asia/Krasnoyarsk': 'North Asia Standard Time',
'Asia/Kuala_Lumpur': 'Singapore Standard Time',
'Asia/Kuching': 'Singapore Standard Time',
'Asia/Kuwait': 'Arab Standard Time',
'Asia/Macau': 'China Standard Time',
'Asia/Magadan': 'Magadan Standard Time',
'Asia/Anadyr Asia/Kamchatka': 'Magadan Standard Time',
'Asia/Kamchatka': 'Magadan Standard Time',
'Asia/Makassar': 'Singapore Standard Time',
'Asia/Manila': 'Singapore Standard Time',
'Asia/Muscat': 'Arabian Standard Time',
'Asia/Nicosia': 'E. Europe Standard Time',
'Asia/Novosibirsk': 'N. Central Asia Standard Time',
'Asia/Novokuznetsk': 'N. Central Asia Standard Time',
'Asia/Omsk': 'N. Central Asia Standard Time',
'Asia/Oral': 'West Asia Standard Time',
'Asia/Aqtau': 'West Asia Standard Time',
'Asia/Aqtobe': 'West Asia Standard Time',
'Asia/Phnom_Penh': 'SE Asia Standard Time',
'Asia/Pyongyang': 'Korea Standard Time',
'Asia/Qatar': 'Arab Standard Time',
'Asia/Rangoon': 'Myanmar Standard Time',
'Asia/Riyadh': 'Arab Standard Time',
'Asia/Saigon': 'SE Asia Standard Time',
'Asia/Seoul': 'Korea Standard Time',
'Asia/Shanghai': 'China Standard Time',
'Asia/Chongqing': 'China Standard Time',
'Asia/Harbin': 'China Standard Time',
'Asia/Kashgar': 'China Standard Time',
'Asia/Urumqi': 'China Standard Time',
'Asia/Singapore': 'Singapore Standard Time',
'Asia/Taipei': 'Taipei Standard Time',
'Asia/Tashkent': 'West Asia Standard Time',
'Asia/Samarkand': 'West Asia Standard Time',
'Asia/Tbilisi': 'Georgian Standard Time',
'Asia/Tehran': 'Iran Standard Time',
'Asia/Thimphu': 'Bangladesh Standard Time',
'Asia/Tokyo': 'Tokyo Standard Time',
'Asia/Ulaanbaatar': 'Ulaanbaatar Standard Time',
'Asia/Choibalsan': 'Ulaanbaatar Standard Time',
'Asia/Vientiane': 'SE Asia Standard Time',
'Asia/Vladivostok': 'Vladivostok Standard Time',
'Asia/Ust-Nera': 'Vladivostok Standard Time',
'Asia/Sakhalin': 'Vladivostok Standard Time',
'Asia/Yakutsk': 'Yakutsk Standard Time',
'Asia/Khandyga': 'Yakutsk Standard Time',
'Asia/Yekaterinburg': 'Ekaterinburg Standard Time',
'Asia/Yerevan': 'Caucasus Standard Time',
'Atlantic/Azores': 'Azores Standard Time',
'Atlantic/Bermuda': 'Atlantic Standard Time',
'Atlantic/Canary': 'GMT Standard Time',
'Atlantic/Cape_Verde': 'Cape Verde Standard Time',
'Atlantic/Faeroe': 'GMT Standard Time',
'Atlantic/Reykjavik': 'Greenwich Standard Time',
'Atlantic/South_Georgia': 'UTC-02',
'Atlantic/St_Helena': 'Greenwich Standard Time',
'Atlantic/Stanley': 'SA Eastern Standard Time',
'Australia/Adelaide': 'Cen. Australia Standard Time',
'Australia/Broken_Hill': 'Cen. Australia Standard Time',
'Australia/Brisbane': 'E. Australia Standard Time',
'Australia/Lindeman': 'E. Australia Standard Time',
'Australia/Darwin': 'AUS Central Standard Time',
'Australia/Hobart': 'Tasmania Standard Time',
'Australia/Currie': 'Tasmania Standard Time',
'Australia/Perth': 'W. Australia Standard Time',
'Australia/Sydney': 'AUS Eastern Standard Time',
'Australia/Melbourne': 'AUS Eastern Standard Time',
'CST6CDT': 'Central Standard Time',
'EST5EDT': 'Eastern Standard Time',
'Etc/UTC': 'UTC',
'Etc/GMT': 'UTC',
'Etc/GMT+1': 'Cape Verde Standard Time',
'Etc/GMT+10': 'Hawaiian Standard Time',
'Etc/GMT+11': 'UTC-11',
'Etc/GMT+12': 'Dateline Standard Time',
'Etc/GMT+2': 'UTC-02',
'Etc/GMT+3': 'SA Eastern Standard Time',
'Etc/GMT+4': 'SA Western Standard Time',
'Etc/GMT+5': 'SA Pacific Standard Time',
'Etc/GMT+6': 'Central America Standard Time',
'Etc/GMT+7': 'US Mountain Standard Time',
'Etc/GMT-1': 'W. Central Africa Standard Time',
'Etc/GMT-10': 'West Pacific Standard Time',
'Etc/GMT-11': 'Central Pacific Standard Time',
'Etc/GMT-12': 'UTC+12',
'Etc/GMT-13': 'Tonga Standard Time',
'Etc/GMT-2': 'South Africa Standard Time',
'Etc/GMT-3': 'E. Africa Standard Time',
'Etc/GMT-4': 'Arabian Standard Time',
'Etc/GMT-5': 'West Asia Standard Time',
'Etc/GMT-6': 'Central Asia Standard Time',
'Etc/GMT-7': 'SE Asia Standard Time',
'Etc/GMT-8': 'Singapore Standard Time',
'Etc/GMT-9': 'Tokyo Standard Time',
'Europe/Amsterdam': 'W. Europe Standard Time',
'Europe/Andorra': 'W. Europe Standard Time',
'Europe/Athens': 'GTB Standard Time',
'Europe/Belgrade': 'Central Europe Standard Time',
'Europe/Berlin': 'W. Europe Standard Time',
'Europe/Busingen': 'W. Europe Standard Time',
'Europe/Bratislava': 'Central Europe Standard Time',
'Europe/Brussels': 'Romance Standard Time',
'Europe/Bucharest': 'GTB Standard Time',
'Europe/Budapest': 'Central Europe Standard Time',
'Europe/Chisinau': 'GTB Standard Time',
'Europe/Copenhagen': 'Romance Standard Time',
'Europe/Dublin': 'GMT Standard Time',
'Europe/Gibraltar': 'W. Europe Standard Time',
'Europe/Guernsey': 'GMT Standard Time',
'Europe/Helsinki': 'FLE Standard Time',
'Europe/Isle_of_Man': 'GMT Standard Time',
'Europe/Istanbul': 'Turkey Standard Time',
'Europe/Jersey': 'GMT Standard Time',
'Europe/Kaliningrad': 'Kaliningrad Standard Time',
'Europe/Kiev': 'FLE Standard Time',
'Europe/Simferopol': 'FLE Standard Time',
'Europe/Uzhgorod': 'FLE Standard Time',
'Europe/Zaporozhye': 'FLE Standard Time',
'Europe/Lisbon': 'GMT Standard Time',
'Atlantic/Madeira': 'GMT Standard Time',
'Europe/Ljubljana': 'Central Europe Standard Time',
'Europe/London': 'GMT Standard Time',
'Europe/Luxembourg': 'W. Europe Standard Time',
'Europe/Madrid': 'Romance Standard Time',
'Africa/Ceuta': 'Romance Standard Time',
'Europe/Malta': 'W. Europe Standard Time',
'Europe/Mariehamn': 'FLE Standard Time',
'Europe/Minsk': 'Kaliningrad Standard Time',
'Europe/Monaco': 'W. Europe Standard Time',
'Europe/Moscow': 'Russian Standard Time',
'Europe/Volgograd': 'Russian Standard Time',
'Europe/Samara': 'Russian Standard Time',
'Europe/Oslo': 'W. Europe Standard Time',
'Europe/Paris': 'Romance Standard Time',
'Europe/Podgorica': 'Central Europe Standard Time',
'Europe/Prague': 'Central Europe Standard Time',
'Europe/Riga': 'FLE Standard Time',
'Europe/Rome': 'W. Europe Standard Time',
'Europe/San_Marino': 'W. Europe Standard Time',
'Europe/Sarajevo': 'Central European Standard Time',
'Europe/Skopje': 'Central European Standard Time',
'Europe/Sofia': 'FLE Standard Time',
'Europe/Stockholm': 'W. Europe Standard Time',
'Europe/Tallinn': 'FLE Standard Time',
'Europe/Tirane': 'Central Europe Standard Time',
'Europe/Vaduz': 'W. Europe Standard Time',
'Europe/Vatican': 'W. Europe Standard Time',
'Europe/Vienna': 'W. Europe Standard Time',
'Europe/Vilnius': 'FLE Standard Time',
'Europe/Warsaw': 'Central European Standard Time',
'Europe/Zagreb': 'Central European Standard Time',
'Europe/Zurich': 'W. Europe Standard Time',
'Indian/Antananarivo': 'E. Africa Standard Time',
'Indian/Chagos': 'Central Asia Standard Time',
'Indian/Christmas': 'SE Asia Standard Time',
'Indian/Cocos': 'Myanmar Standard Time',
'Indian/Comoro': 'E. Africa Standard Time',
'Indian/Kerguelen': 'West Asia Standard Time',
'Indian/Mahe': 'Mauritius Standard Time',
'Indian/Maldives': 'West Asia Standard Time',
'Indian/Mauritius': 'Mauritius Standard Time',
'Indian/Mayotte': 'E. Africa Standard Time',
'Indian/Reunion': 'Mauritius Standard Time',
'MST7MDT': 'Mountain Standard Time',
'PST8PDT': 'Pacific Standard Time',
'Pacific/Apia': 'Samoa Standard Time',
'Pacific/Auckland': 'New Zealand Standard Time',
'Pacific/Efate': 'Central Pacific Standard Time',
'Pacific/Enderbury': 'Tonga Standard Time',
'Pacific/Fakaofo': 'Tonga Standard Time',
'Pacific/Fiji': 'Fiji Standard Time',
'Pacific/Funafuti': 'UTC+12',
'Pacific/Galapagos': 'Central America Standard Time',
'Pacific/Guadalcanal': 'Central Pacific Standard Time',
'Pacific/Guam': 'West Pacific Standard Time',
'Pacific/Honolulu': 'Hawaiian Standard Time',
'Pacific/Johnston': 'Hawaiian Standard Time',
'Pacific/Majuro Pacific/Kwajalein': 'UTC+12',
'Pacific/Midway': 'UTC-11',
'Pacific/Nauru': 'UTC+12',
'Pacific/Niue': 'UTC-11',
'Pacific/Noumea': 'Central Pacific Standard Time',
'Pacific/Pago_Pago': 'UTC-11',
'Pacific/Palau': 'Tokyo Standard Time',
'Pacific/Ponape': 'Central Pacific Standard Time',
'Pacific/Kosrae': 'Central Pacific Standard Time',
'Pacific/Port_Moresby': 'West Pacific Standard Time',
'Pacific/Rarotonga': 'Hawaiian Standard Time',
'Pacific/Saipan': 'West Pacific Standard Time',
'Pacific/Tahiti': 'Hawaiian Standard Time',
'Pacific/Tarawa': 'UTC+12',
'Pacific/Tongatapu': 'Tonga Standard Time',
'Pacific/Truk': 'West Pacific Standard Time',
'Pacific/Wake': 'UTC+12',
'Pacific/Wallis': 'UTC+12'
}
# Define the module's virtual name
__virtualname__ = 'timezone'
class TzMapper(object):
def __init__(self, unix_to_win):
self.win_to_unix = {k.lower(): v for k, v in unix_to_win.items()}
self.unix_to_win = {v.lower(): k for k, v in unix_to_win.items()}
def add(self, k, v):
self.unix_to_win[k.lower()] = v
self.win_to_unix[v.lower()] = k
def remove(self, k):
self.win_to_unix.pop(self.unix_to_win.pop(k.lower()).lower())
def get_win(self, key, default=None):
return self.unix_to_win.get(key.lower(), default)
def get_unix(self, key, default=None):
return self.win_to_unix.get(key.lower(), default)
def list_win(self):
return sorted(self.unix_to_win.values())
def list_unix(self):
return sorted(self.win_to_unix.values())
mapper = TzMapper({
'AUS Central Standard Time': 'Australia/Darwin',
'AUS Eastern Standard Time': 'Australia/Sydney',
'Afghanistan Standard Time': 'Asia/Kabul',
'Alaskan Standard Time': 'America/Anchorage',
'Aleutian Standard Time': 'America/Adak',
'Altai Standard Time': 'Asia/Barnaul',
'Arab Standard Time': 'Asia/Riyadh',
'Arabian Standard Time': 'Asia/Dubai',
'Arabic Standard Time': 'Asia/Baghdad',
'Argentina Standard Time': 'America/Buenos_Aires',
'Astrakhan Standard Time': 'Europe/Astrakhan',
'Atlantic Standard Time': 'America/Halifax',
'Aus Central W. Standard Time': 'Australia/Eucla',
'Azerbaijan Standard Time': 'Asia/Baku',
'Azores Standard Time': 'Atlantic/Azores',
'Bahia Standard Time': 'America/Bahia',
'Bangladesh Standard Time': 'Asia/Dhaka',
'Belarus Standard Time': 'Europe/Minsk',
'Bougainville Standard Time': 'Pacific/Bougainville',
'Canada Central Standard Time': 'America/Regina',
'Cape Verde Standard Time': 'Atlantic/Cape_Verde',
'Caucasus Standard Time': 'Asia/Yerevan',
'Cen. Australia Standard Time': 'Australia/Adelaide',
'Central America Standard Time': 'America/Guatemala',
'Central Asia Standard Time': 'Asia/Almaty',
'Central Brazilian Standard Time': 'America/Cuiaba',
'Central Europe Standard Time': 'Europe/Budapest',
'Central European Standard Time': 'Europe/Warsaw',
'Central Pacific Standard Time': 'Pacific/Guadalcanal',
'Central Standard Time': 'America/Chicago',
'Central Standard Time (Mexico)': 'America/Mexico_City',
'Chatham Islands Standard Time': 'Pacific/Chatham',
'China Standard Time': 'Asia/Shanghai',
'Cuba Standard Time': 'America/Havana',
'Dateline Standard Time': 'Etc/GMT+12',
'E. Africa Standard Time': 'Africa/Nairobi',
'E. Australia Standard Time': 'Australia/Brisbane',
'E. Europe Standard Time': 'Europe/Chisinau',
'E. South America Standard Time': 'America/Sao_Paulo',
'Easter Island Standard Time': 'Pacific/Easter',
'Eastern Standard Time': 'America/New_York',
'Eastern Standard Time (Mexico)': 'America/Cancun',
'Egypt Standard Time': 'Africa/Cairo',
'Ekaterinburg Standard Time': 'Asia/Yekaterinburg',
'FLE Standard Time': 'Europe/Kiev',
'Fiji Standard Time': 'Pacific/Fiji',
'GMT Standard Time': 'Europe/London',
'GTB Standard Time': 'Europe/Bucharest',
'Georgian Standard Time': 'Asia/Tbilisi',
'Greenland Standard Time': 'America/Godthab',
'Greenwich Standard Time': 'Atlantic/Reykjavik',
'Haiti Standard Time': 'America/Port-au-Prince',
'Hawaiian Standard Time': 'Pacific/Honolulu',
'India Standard Time': 'Asia/Calcutta',
'Iran Standard Time': 'Asia/Tehran',
'Israel Standard Time': 'Asia/Jerusalem',
'Jordan Standard Time': 'Asia/Amman',
'Kaliningrad Standard Time': 'Europe/Kaliningrad',
'Korea Standard Time': 'Asia/Seoul',
'Libya Standard Time': 'Africa/Tripoli',
'Line Islands Standard Time': 'Pacific/Kiritimati',
'Lord Howe Standard Time': 'Australia/Lord_Howe',
'Magadan Standard Time': 'Asia/Magadan',
'Magallanes Standard Time': 'America/Punta_Arenas',
'Marquesas Standard Time': 'Pacific/Marquesas',
'Mauritius Standard Time': 'Indian/Mauritius',
'Middle East Standard Time': 'Asia/Beirut',
'Montevideo Standard Time': 'America/Montevideo',
'Morocco Standard Time': 'Africa/Casablanca',
'Mountain Standard Time': 'America/Denver',
'Mountain Standard Time (Mexico)': 'America/Chihuahua',
'Myanmar Standard Time': 'Asia/Rangoon',
'N. Central Asia Standard Time': 'Asia/Novosibirsk',
'Namibia Standard Time': 'Africa/Windhoek',
'Nepal Standard Time': 'Asia/Katmandu',
'New Zealand Standard Time': 'Pacific/Auckland',
'Newfoundland Standard Time': 'America/St_Johns',
'Norfolk Standard Time': 'Pacific/Norfolk',
'North Asia East Standard Time': 'Asia/Irkutsk',
'North Asia Standard Time': 'Asia/Krasnoyarsk',
'North Korea Standard Time': 'Asia/Pyongyang',
'Omsk Standard Time': 'Asia/Omsk',
'Pacific SA Standard Time': 'America/Santiago',
'Pacific Standard Time': 'America/Los_Angeles',
'Pacific Standard Time (Mexico)': 'America/Tijuana',
'Pakistan Standard Time': 'Asia/Karachi',
'Paraguay Standard Time': 'America/Asuncion',
'Romance Standard Time': 'Europe/Paris',
'Russia Time Zone 10': 'Asia/Srednekolymsk',
'Russia Time Zone 11': 'Asia/Kamchatka',
'Russia Time Zone 3': 'Europe/Samara',
'Russian Standard Time': 'Europe/Moscow',
'SA Eastern Standard Time': 'America/Cayenne',
'SA Pacific Standard Time': 'America/Bogota',
'SA Western Standard Time': 'America/La_Paz',
'SE Asia Standard Time': 'Asia/Bangkok',
'Saint Pierre Standard Time': 'America/Miquelon',
'Sakhalin Standard Time': 'Asia/Sakhalin',
'Samoa Standard Time': 'Pacific/Apia',
'Saratov Standard Time': 'Europe/Saratov',
'Singapore Standard Time': 'Asia/Singapore',
'South Africa Standard Time': 'Africa/Johannesburg',
'Sri Lanka Standard Time': 'Asia/Colombo',
'Syria Standard Time': 'Asia/Damascus',
'Taipei Standard Time': 'Asia/Taipei',
'Tasmania Standard Time': 'Australia/Hobart',
'Tocantins Standard Time': 'America/Araguaina',
'Tokyo Standard Time': 'Asia/Tokyo',
'Tomsk Standard Time': 'Asia/Tomsk',
'Tonga Standard Time': 'Pacific/Tongatapu',
'Transbaikal Standard Time': 'Asia/Chita',
'Turkey Standard Time': 'Europe/Istanbul',
'Turks And Caicos Standard Time': 'America/Grand_Turk',
'US Eastern Standard Time': 'America/Indianapolis',
'US Mountain Standard Time': 'America/Phoenix',
'UTC': 'Etc/GMT',
'UTC+12': 'Etc/GMT-12',
'UTC+13': 'Etc/GMT-13',
'UTC-02': 'Etc/GMT+2',
'UTC-08': 'Etc/GMT+8',
'UTC-09': 'Etc/GMT+9',
'UTC-11': 'Etc/GMT+11',
'Ulaanbaatar Standard Time': 'Asia/Ulaanbaatar',
'Venezuela Standard Time': 'America/Caracas',
'Vladivostok Standard Time': 'Asia/Vladivostok',
'W. Australia Standard Time': 'Australia/Perth',
'W. Central Africa Standard Time': 'Africa/Lagos',
'W. Europe Standard Time': 'Europe/Berlin',
'W. Mongolia Standard Time': 'Asia/Hovd',
'West Asia Standard Time': 'Asia/Tashkent',
'West Bank Standard Time': 'Asia/Hebron',
'West Pacific Standard Time': 'Pacific/Port_Moresby',
'Yakutsk Standard Time': 'Asia/Yakutsk'})
def __virtual__():
'''
Only load on windows
'''
if salt.utils.platform.is_windows() and salt.utils.path.which('tzutil'):
return __virtualname__
return (False, "Module win_timezone: tzutil not found or is not on Windows client")
if not __utils__['platform.is_windows']():
return False, "Module win_timezone: Not on Windows client"
if not HAS_PYTZ:
return False, "Module win_timezone: pytz not found"
if not __utils__['path.which']('tzutil'):
return False, "Module win_timezone: tzutil not found"
return __virtualname__
def get_zone():
'''
Get current timezone (i.e. America/Denver)
Returns:
str: Timezone in unix format
CLI Example:
.. code-block:: bash
salt '*' timezone.get_zone
'''
winzone = __salt__['cmd.run'](['tzutil', '/g'], python_shell=False)
for key in LINTOWIN:
if LINTOWIN[key] == winzone:
return key
return False
win_zone = __utils__['reg.read_value'](
hive='HKLM',
key='SYSTEM\\CurrentControlSet\\Control\\TimeZoneInformation',
vname='TimeZoneKeyName')['vdata']
return mapper.get_unix(win_zone.lower(), 'Unknown')
def get_offset():
'''
Get current numeric timezone offset from UCT (i.e. -0700)
Get current numeric timezone offset from UTC (i.e. -0700)
Returns:
str: Offset from UTC
CLI Example:
@ -494,52 +231,44 @@ def get_offset():
salt '*' timezone.get_offset
'''
string = False
zone = __salt__['cmd.run'](['tzutil', '/g'], python_shell=False)
prev = ''
zone_list = __salt__['cmd.run'](['tzutil', '/l'],
python_shell=False,
output_loglevel='trace').splitlines()
for line in zone_list:
if zone == line:
string = prev
break
else:
prev = line
if not string:
return False
reg = re.search(r"\(UTC(.\d\d:\d\d)\) .*", string, re.M)
if not reg:
ret = '0000'
else:
ret = reg.group(1).replace(':', '')
return ret
# http://craigglennie.com/programming/python/2013/07/21/working-with-timezones-using-Python-and-pytz-localize-vs-normalize/
tz_object = pytz.timezone(get_zone())
utc_time = pytz.utc.localize(datetime.utcnow())
loc_time = utc_time.astimezone(tz_object)
norm_time = tz_object.normalize(loc_time)
return norm_time.strftime('%z')
def get_zonecode():
'''
Get current timezone (i.e. PST, MDT, etc)
Returns:
str: An abbreviated timezone code
CLI Example:
.. code-block:: bash
salt '*' timezone.get_zonecode
'''
# Still not implemented on windows
return False
tz_object = pytz.timezone(get_zone())
loc_time = tz_object.localize(datetime.utcnow())
return loc_time.tzname()
def set_zone(timezone):
'''
Unlinks, then symlinks /etc/localtime to the set timezone.
Sets the timezone using the tzutil.
The timezone is crucial to several system processes, each of which SHOULD
be restarted (for instance, whatever you system uses as its cron and
syslog daemons). This will not be magically done for you!
Args:
timezone (str): A valid timezone
Returns:
bool: ``True`` if successful, otherwise ``False``
Raises:
CommandExecutionError: If invalid timezone is passed
CLI Example:
@ -547,15 +276,40 @@ def set_zone(timezone):
salt '*' timezone.set_zone 'America/Denver'
'''
cmd = ['tzutil', '/s', LINTOWIN[timezone]]
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
# if it's one of the key's just use it
if timezone.lower() in mapper.win_to_unix:
win_zone = timezone
elif timezone.lower() in mapper.unix_to_win:
# if it's one of the values, use the key
win_zone = mapper.get_win(timezone)
else:
# Raise error because it's neither key nor value
raise CommandExecutionError('Invalid timezone passed: {0}'.format(timezone))
# Set the value
cmd = ['tzutil', '/s', win_zone]
res = __salt__['cmd.run_all'](cmd, python_shell=False)
if res['retcode']:
raise CommandExecutionError('tzutil encountered an error setting '
'timezone: {0}'.format(timezone),
info=res)
return zone_compare(timezone)
def zone_compare(timezone):
'''
Checks the md5sum between the given timezone, and the one set in
/etc/localtime. Returns True if they match, and False if not. Mostly useful
for running state checks.
Compares the given timezone with the machine timezone. Mostly useful for
running state checks.
Args:
timezone (str):
The timezone to compare. This can be in Windows or Unix format. Can
be any of the values returned by the ``timezone.list`` function
Returns:
bool: ``True`` if they match, otherwise ``False``
Example:
@ -563,21 +317,68 @@ def zone_compare(timezone):
salt '*' timezone.zone_compare 'America/Denver'
'''
cmd = ['tzutil', '/g']
return __salt__['cmd.run'](cmd, python_shell=False) == LINTOWIN[timezone]
# if it's one of the key's just use it
if timezone.lower() in mapper.win_to_unix:
check_zone = timezone
elif timezone.lower() in mapper.unix_to_win:
# if it's one of the values, use the key
check_zone = mapper.get_win(timezone)
else:
# Raise error because it's neither key nor value
raise CommandExecutionError('Invalid timezone passed: {0}'
''.format(timezone))
return get_zone() == mapper.get_unix(check_zone, 'Unknown')
def list(unix_style=True):
'''
Return a list of Timezones that this module supports. These can be in either
Unix or Windows format.
.. version-added:: 2018.3.2
Args:
unix_style (bool):
``True`` returns Unix-style timezones. ``False`` returns
Windows-style timezones. Default is ``True``
Returns:
list: A list of supported timezones
CLI Example:
.. code-block:: bash
# Unix-style timezones
salt '*' timezone.list
# Windows-style timezones
salt '*' timezone.list unix_style=False
'''
if unix_style:
return mapper.list_unix()
else:
return mapper.list_win()
def get_hwclock():
'''
Get current hardware clock setting (UTC or localtime)
.. note::
The hardware clock is always local time on Windows so this will always
return "localtime"
CLI Example:
.. code-block:: bash
salt '*' timezone.get_hwclock
'''
# Need to search for a way to figure it out ...
# The hardware clock is always localtime on Windows
return 'localtime'
@ -585,11 +386,15 @@ def set_hwclock(clock):
'''
Sets the hardware clock to be either UTC or localtime
.. note::
The hardware clock is always local time on Windows so this will always
return ``False``
CLI Example:
.. code-block:: bash
salt '*' timezone.set_hwclock UTC
'''
# Need to search for a way to figure it out ...
# The hardware clock is always localtime on Windows
return False

Some files were not shown because too many files have changed in this diff Show More