Merge branch '2017.7' into 2017.7

This commit is contained in:
angeloudy 2017-12-20 09:51:34 +11:00 committed by GitHub
commit cf411f8984
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
235 changed files with 7264 additions and 2084 deletions

View File

@ -12,4 +12,10 @@ Remove this section if not relevant
Yes/No Yes/No
### Commits signed with GPG?
Yes/No
Please review [Salt's Contributing Guide](https://docs.saltstack.com/en/latest/topics/development/contributing.html) for best practices. Please review [Salt's Contributing Guide](https://docs.saltstack.com/en/latest/topics/development/contributing.html) for best practices.
See GitHub's [page on GPG signing](https://help.github.com/articles/signing-commits-using-gpg/) for more information about signing commits with GPG.

5
.gitignore vendored
View File

@ -91,3 +91,8 @@ tests/integration/cloud/providers/pki/minions
# Ignore tox virtualenvs # Ignore tox virtualenvs
/.tox/ /.tox/
# Ignore kitchen stuff
.kitchen
.bundle
Gemfile.lock

193
.kitchen.yml Normal file
View File

@ -0,0 +1,193 @@
---
<% vagrant = system('which vagrant 2>/dev/null >/dev/null') %>
<% version = '2017.7.1' %>
<% platformsfile = ENV['SALT_KITCHEN_PLATFORMS'] || '.kitchen/platforms.yml' %>
<% driverfile = ENV['SALT_KITCHEN_DRIVER'] || '.kitchen/driver.yml' %>
<% if File.exists?(driverfile) %>
<%= ERB.new(File.read(driverfile)).result %>
<% else %>
driver:
name: docker
use_sudo: false
hostname: salt
privileged: true
username: root
volume:
- /var/run/docker.sock:/docker.sock
cap_add:
- sys_admin
disable_upstart: false
provision_command:
- echo 'L /run/docker.sock - - - - /docker.sock' > /etc/tmpfiles.d/docker.conf
transport:
name: sftp
<% end %>
sudo: false
provisioner:
name: salt_solo
salt_install: bootstrap
salt_version: latest
salt_bootstrap_url: https://bootstrap.saltstack.com
salt_bootstrap_options: -X stable <%= version %>
log_level: info
require_chef: false
remote_states:
name: git://github.com/saltstack/salt-jenkins.git
branch: 2017.7
repo: git
testingdir: /testing
salt_copy_filter:
- .bundle
- .git
- .gitignore
- .kitchen
- .kitchen.yml
- Gemfile
- Gemfile.lock
- README.rst
- .travis.yml
state_top:
base:
"*":
- git.salt
- kitchen
<% if File.exists?(platformsfile) %>
<%= ERB.new(File.read(platformsfile)).result %>
<% else %>
platforms:
- name: fedora
driver_config:
image: fedora:latest
run_command: /usr/lib/systemd/systemd
provisioner:
salt_bootstrap_options: -X git v<%= version %> >/dev/null
- name: centos-7
driver_config:
run_command: /usr/lib/systemd/systemd
- name: centos-6
driver_config:
run_command: /sbin/init
provision_command:
- yum install -y upstart
provisioner:
salt_bootstrap_options: -P -y -x python2.7 -X git v<%= version %> >/dev/null
- name: ubuntu-rolling
driver_config:
image: ubuntu:rolling
run_command: /lib/systemd/systemd
provisioner:
salt_bootstrap_url: https://raw.githubusercontent.com/saltstack/salt-bootstrap/develop/bootstrap-salt.sh
- name: ubuntu-16.04
driver_config:
run_command: /lib/systemd/systemd
- name: ubuntu-14.04
driver_config:
run_command: /sbin/init
provision_command:
- rm -f /sbin/initctl
- dpkg-divert --local --rename --remove /sbin/initctl
- name: debian-8
driver_config:
run_command: /lib/systemd/systemd
provision_command:
- apt-get install -y dbus
- echo 'L /run/docker.sock - - - - /docker.sock' > /etc/tmpfiles.d/docker.conf
- name: debian-9
driver_config:
run_command: /lib/systemd/systemd
- name: arch
driver_config:
image: base/archlinux
run_command: /usr/lib/systemd/systemd
provision_command:
- pacman -Syu --noconfirm systemd
- systemctl enable sshd
- echo 'L /run/docker.sock - - - - /docker.sock' > /etc/tmpfiles.d/docker.conf
provisioner:
salt_bootstrap_options: -X git v<%= version %> >/dev/null
- name: opensuse
driver_config:
run_command: /usr/lib/systemd/systemd
provision_command:
- systemctl enable sshd.service
- echo 'L /run/docker.sock - - - - /docker.sock' > /etc/tmpfiles.d/docker.conf
provisioner:
salt_bootstrap_options: -X git v<%= version %> >/dev/null
<% if vagrant != false %>
- name: windows-2012r2
driver:
box: mwrock/Windows2012R2
communicator: winrm
name: vagrant
gui: true
username: administrator
password: Pass@word1
provisioner:
init_environment: |
Clear-Host
$AddedLocation ="c:\salt"
$Reg = "Registry::HKLM\System\CurrentControlSet\Control\Session Manager\Environment"
$OldPath = (Get-ItemProperty -Path "$Reg" -Name PATH).Path
$NewPath= $OldPath + ; + $AddedLocation
Set-ItemProperty -Path "$Reg" -Name PATH Value $NewPath
salt_bootstrap_url: https://raw.githubusercontent.com/saltstack/salt-bootstrap/develop/bootstrap-salt.ps1
salt_bootstrap_options: ''
- name: windows-2016
driver:
box: mwrock/Windows2016
communicator: winrm
name: vagrant
username: Vagrant
password: vagrant
gui: true
provisioner:
init_environment: |
Clear-Host
$AddedLocation ="c:\salt;c:\salt\bin\Scripts"
$Reg = "Registry::HKLM\System\CurrentControlSet\Control\Session Manager\Environment"
$OldPath = (Get-ItemProperty -Path "$Reg" -Name PATH).Path
$NewPath= $OldPath + ; + $AddedLocation
Set-ItemProperty -Path "$Reg" -Name PATH Value $NewPath
salt_bootstrap_url: https://raw.githubusercontent.com/saltstack/salt-bootstrap/develop/bootstrap-salt.ps1
salt_bootstrap_options: ''
<% end %>
<% end %>
suites:
- name: py2
provisioner:
pillars:
top.sls:
base:
"*":
- jenkins
jenkins.sls:
testing_dir: /tmp/kitchen/testing
clone_repo: false
salttesting_namespec: salttesting==2017.6.1
- name: py3
excludes:
- centos-6
- ubuntu-14.04
provisioner:
pillars:
top.sls:
base:
"*":
- jenkins
jenkins.sls:
testing_dir: /tmp/kitchen/testing
clone_repo: false
py3: true
salttesting_namespec: salttesting==2017.6.1
verifier:
name: shell
remote_exec: true
sudo: false
live_stream: {}
<% if ENV['TESTOPTS'].nil? %>
command: '$(kitchen) /tmp/kitchen/testing/tests/runtests.py --run-destructive --sysinfo --transport=zeromq --output-columns=80 --ssh --coverage-xml=/tmp/coverage.xml --xml=/tmp/xml-unittests-output'
<% else %>
command: '$(kitchen) /tmp/kitchen/testing/tests/runtests.py --run-destructive --output-columns 80 <%= ENV["TESTOPTS"] %>'
<% end %>

24
Gemfile Normal file
View File

@ -0,0 +1,24 @@
# This file is only used for running the test suite with kitchen-salt.
source 'https://rubygems.org'
gem 'test-kitchen'
gem 'kitchen-salt', :git => 'https://github.com/saltstack/kitchen-salt.git'
gem 'kitchen-sync'
gem 'git'
group :docker do
gem 'kitchen-docker', :git => 'https://github.com/test-kitchen/kitchen-docker.git'
end
group :opennebula do
gem 'kitchen-opennebula', :git => 'https://github.com/gtmanfred/kitchen-opennebula.git'
gem 'xmlrpc'
end
group :windows do
gem 'vagrant-wrapper'
gem 'kitchen-vagrant'
gem 'winrm', '~>2.0'
gem 'winrm-fs', '~>1.0'
end

View File

@ -67,8 +67,8 @@ Engage SaltStack
`SaltConf`_, **User Groups and Meetups** - SaltStack has a vibrant and `global `SaltConf`_, **User Groups and Meetups** - SaltStack has a vibrant and `global
community`_ of customers, users, developers and enthusiasts. Connect with other community`_ of customers, users, developers and enthusiasts. Connect with other
Salted folks in your area of the world, or join `SaltConf16`_, the SaltStack Salted folks in your area of the world, or join `SaltConf18`_, the SaltStack
annual user conference, April 19-21 in Salt Lake City. Please let us know if annual user conference, September 10-14 in Salt Lake City. Please let us know if
you would like to start a user group or if we should add your existing you would like to start a user group or if we should add your existing
SaltStack user group to this list by emailing: info@saltstack.com SaltStack user group to this list by emailing: info@saltstack.com
@ -91,7 +91,7 @@ services`_ offerings.
.. _SaltConf: http://www.youtube.com/user/saltstack .. _SaltConf: http://www.youtube.com/user/saltstack
.. _global community: http://www.meetup.com/pro/saltstack/ .. _global community: http://www.meetup.com/pro/saltstack/
.. _SaltConf16: http://saltconf.com/ .. _SaltConf18: http://saltconf.com/
.. _SaltStack education offerings: http://saltstack.com/training/ .. _SaltStack education offerings: http://saltstack.com/training/
.. _SaltStack Certified Engineer (SSCE): http://saltstack.com/certification/ .. _SaltStack Certified Engineer (SSCE): http://saltstack.com/certification/
.. _SaltStack professional services: http://saltstack.com/services/ .. _SaltStack professional services: http://saltstack.com/services/

View File

@ -235,13 +235,13 @@
# cause sub minion process to restart. # cause sub minion process to restart.
#auth_safemode: False #auth_safemode: False
# Ping Master to ensure connection is alive (seconds). # Ping Master to ensure connection is alive (minutes).
#ping_interval: 0 #ping_interval: 0
# To auto recover minions if master changes IP address (DDNS) # To auto recover minions if master changes IP address (DDNS)
# auth_tries: 10 # auth_tries: 10
# auth_safemode: False # auth_safemode: False
# ping_interval: 90 # ping_interval: 2
# #
# Minions won't know master is missing until a ping fails. After the ping fail, # Minions won't know master is missing until a ping fails. After the ping fail,
# the minion will attempt authentication and likely fails out and cause a restart. # the minion will attempt authentication and likely fails out and cause a restart.

View File

@ -19,14 +19,18 @@ Salt SSH allows for salt routines to be executed using only SSH for transport
Options Options
======= =======
.. program:: salt-ssh
.. include:: _includes/common-options.rst
.. option:: --hard-crash
Raise any original exception rather than exiting gracefully. Default: False.
.. option:: -r, --raw, --raw-shell .. option:: -r, --raw, --raw-shell
Execute a raw shell command. Execute a raw shell command.
.. option:: --priv
Specify the SSH private key file to be used for authentication.
.. option:: --roster .. option:: --roster
Define which roster system to use, this defines if a database backend, Define which roster system to use, this defines if a database backend,
@ -53,38 +57,117 @@ Options
the more running process the faster communication should be, default the more running process the faster communication should be, default
is 25. is 25.
.. option:: --extra-filerefs=EXTRA_FILEREFS
Pass in extra files to include in the state tarball.
.. option:: --min-extra-modules=MIN_EXTRA_MODS
One or comma-separated list of extra Python modulesto be included
into Minimal Salt.
.. option:: --thin-extra-modules=THIN_EXTRA_MODS
One or comma-separated list of extra Python modulesto be included
into Thin Salt.
.. option:: -v, --verbose
Turn on command verbosity, display jid.
.. option:: -s, --static
Return the data from minions as a group after they all return.
.. option:: -w, --wipe
Remove the deployment of the salt files when done executing.
.. option:: -W, --rand-thin-dir
Select a random temp dir to deploy on the remote system. The dir
will be cleaned after the execution.
.. option:: -t, --regen-thin, --thin
Trigger a thin tarball regeneration. This is needed if custom
grains/modules/states have been added or updated.
.. option:: --python2-bin=PYTHON2_BIN
Path to a python2 binary which has salt installed.
.. option:: --python3-bin=PYTHON3_BIN
Path to a python3 binary which has salt installed.
.. option:: --jid=JID
Pass a JID to be used instead of generating one.
Authentication Options
----------------------
.. option:: --priv=SSH_PRIV
Specify the SSH private key file to be used for authentication.
.. option:: -i, --ignore-host-keys .. option:: -i, --ignore-host-keys
Disables StrictHostKeyChecking to relax acceptance of new and unknown By default ssh host keys are honored and connections will ask for
host keys. approval. Use this option to disable StrictHostKeyChecking.
.. option:: --no-host-keys .. option:: --no-host-keys
Fully ignores ssh host keys which by default are honored and connections Fully ignores ssh host keys which by default are honored and connections
would ask for approval. Useful if the host key of a remote server has would ask for approval. Useful if the host key of a remote server has
changed and would still error with --ignore-host-keys. changed and would still error with --ignore-host-keys.
.. option:: --user=SSH_USER
Set the default user to attempt to use when authenticating.
.. option:: --passwd .. option:: --passwd
Set the default password to attempt to use when authenticating. Set the default password to attempt to use when authenticating.
.. option:: --askpass
Interactively ask for the SSH password with no echo - avoids password
in process args and stored in history.
.. option:: --key-deploy .. option:: --key-deploy
Set this flag to attempt to deploy the authorized ssh key with all Set this flag to attempt to deploy the authorized ssh key with all
minions. This combined with --passwd can make initial deployment of keys minions. This combined with --passwd can make initial deployment of keys
very fast and easy. very fast and easy.
.. program:: salt .. option:: --identities-only
.. include:: _includes/common-options.rst Use the only authentication identity files configured in the ssh_config
files. See IdentitiesOnly flag in man ssh_config.
.. include:: _includes/target-selection-ssh.rst .. option:: --sudo
Run command via sudo.
Scan Roster Options
-------------------
.. option:: --scan-ports=SSH_SCAN_PORTS
Comma-separated list of ports to scan in the scan roster.
.. option:: --scan-timeout=SSH_SCAN_TIMEOUT
Scanning socket timeout for the scan roster.
.. include:: _includes/logging-options.rst .. include:: _includes/logging-options.rst
.. |logfile| replace:: /var/log/salt/ssh .. |logfile| replace:: /var/log/salt/ssh
.. |loglevel| replace:: ``warning`` .. |loglevel| replace:: ``warning``
.. include:: _includes/target-selection-ssh.rst
.. include:: _includes/output-options.rst .. include:: _includes/output-options.rst

View File

@ -225,15 +225,16 @@ enclosing brackets ``[`` and ``]``:
Default: ``{}`` Default: ``{}``
This can be used to control logging levels more specifically. The example sets This can be used to control logging levels more specifically, based on log call name. The example sets
the main salt library at the 'warning' level, but sets ``salt.modules`` to log the main salt library at the 'warning' level, sets ``salt.modules`` to log
at the ``debug`` level: at the ``debug`` level, and sets a custom module to the ``all`` level:
.. code-block:: yaml .. code-block:: yaml
log_granular_levels: log_granular_levels:
'salt': 'warning' 'salt': 'warning'
'salt.modules': 'debug' 'salt.modules': 'debug'
'salt.loader.saltmaster.ext.module.custom_module': 'all'
External Logging Handlers External Logging Handlers
------------------------- -------------------------

View File

@ -303,6 +303,20 @@ option on the Salt master.
master_port: 4506 master_port: 4506
.. conf_minion:: publish_port
``publish_port``
---------------
Default: ``4505``
The port of the master publish server, this needs to coincide with the publish_port
option on the Salt master.
.. code-block:: yaml
publish_port: 4505
.. conf_minion:: user .. conf_minion:: user
``user`` ``user``
@ -869,7 +883,7 @@ restart.
Default: ``0`` Default: ``0``
Instructs the minion to ping its master(s) every n number of seconds. Used Instructs the minion to ping its master(s) every n number of minutes. Used
primarily as a mitigation technique against minion disconnects. primarily as a mitigation technique against minion disconnects.
.. code-block:: yaml .. code-block:: yaml
@ -1164,7 +1178,7 @@ be able to execute a certain module. The ``sys`` module is built into the minion
and cannot be disabled. and cannot be disabled.
This setting can also tune the minion. Because all modules are loaded into system This setting can also tune the minion. Because all modules are loaded into system
memory, disabling modules will lover the minion's memory footprint. memory, disabling modules will lower the minion's memory footprint.
Modules should be specified according to their file name on the system and not by Modules should be specified according to their file name on the system and not by
their virtual name. For example, to disable ``cmd``, use the string ``cmdmod`` which their virtual name. For example, to disable ``cmd``, use the string ``cmdmod`` which

View File

@ -376,6 +376,22 @@ The above example will force the minion to use the :py:mod:`systemd
.. __: https://github.com/saltstack/salt/issues/new .. __: https://github.com/saltstack/salt/issues/new
Logging Restrictions
--------------------
As a rule, logging should not be done anywhere in a Salt module before it is
loaded. This rule apples to all code that would run before the ``__virtual__()``
function, as well as the code within the ``__virtual__()`` function itself.
If logging statements are made before the virtual function determines if
the module should be loaded, then those logging statements will be called
repeatedly. This clutters up log files unnecessarily.
Exceptions may be considered for logging statements made at the ``trace`` level.
However, it is better to provide the necessary information by another means.
One method is to :ref:`return error information <modules-error-info>` in the
``__virtual__()`` function.
.. _modules-virtual-name: .. _modules-virtual-name:
``__virtualname__`` ``__virtualname__``

View File

@ -6,7 +6,7 @@ Introduced in Salt version ``2017.7.0`` it is now possible to run select states
in parallel. This is accomplished very easily by adding the ``parallel: True`` in parallel. This is accomplished very easily by adding the ``parallel: True``
option to your state declaration: option to your state declaration:
.. code_block:: yaml .. code-block:: yaml
nginx: nginx:
service.running: service.running:
@ -24,7 +24,7 @@ state to finish.
Given this example: Given this example:
.. code_block:: yaml .. code-block:: yaml
sleep 10: sleep 10:
cmd.run: cmd.run:
@ -74,16 +74,16 @@ also complete.
Things to be Careful of Things to be Careful of
======================= =======================
Parallel States does not prevent you from creating parallel conflicts on your Parallel States do not prevent you from creating parallel conflicts on your
system. This means that if you start multiple package installs using Salt then system. This means that if you start multiple package installs using Salt then
the package manager will block or fail. If you attempt to manage the same file the package manager will block or fail. If you attempt to manage the same file
with multiple states in parallel then the result can produce an unexpected with multiple states in parallel then the result can produce an unexpected
file. file.
Make sure that the states you choose to run in parallel do not conflict, or Make sure that the states you choose to run in parallel do not conflict, or
else, like in and parallel programming environment, the outcome may not be else, like in any parallel programming environment, the outcome may not be
what you expect. Doing things like just making all states run in parallel what you expect. Doing things like just making all states run in parallel
will almost certinly result in unexpected behavior. will almost certainly result in unexpected behavior.
With that said, running states in parallel should be safe the vast majority With that said, running states in parallel should be safe the vast majority
of the time and the most likely culprit for unexpected behavior is running of the time and the most likely culprit for unexpected behavior is running

View File

@ -40,7 +40,7 @@ Set up an initial profile at /etc/salt/cloud.profiles or in the /etc/salt/cloud.
.. code-block:: yaml .. code-block:: yaml
scalewa-ubuntu: scaleway-ubuntu:
provider: my-scaleway-config provider: my-scaleway-config
image: Ubuntu Trusty (14.04 LTS) image: Ubuntu Trusty (14.04 LTS)

View File

@ -95,19 +95,19 @@ globally available or passed in through function arguments, file data, etc.
Mocking Loader Modules Mocking Loader Modules
---------------------- ----------------------
Salt loader modules use a series of globally available dunder variables, Salt loader modules use a series of globally available dunder variables,
``__salt__``, ``__opts__``, ``__pillar__``, etc. To facilitate testing these ``__salt__``, ``__opts__``, ``__pillar__``, etc. To facilitate testing these
modules a mixin class was created, ``LoaderModuleMockMixin`` which can be found modules a mixin class was created, ``LoaderModuleMockMixin`` which can be found
in ``tests/support/mixins.py``. The reason for the exitance of this class is in ``tests/support/mixins.py``. The reason for the existance of this class is
because, historycally, and because it was easier, one would add these dunder because historiclly and because it was easier, one would add these dunder
variables directly on the imported module. This however, introduces unexpected variables directly on the imported module. This however, introduces unexpected
behavior when running the full test suite since those attributes would not be behavior when running the full test suite since those attributes would not be
removed once we were done testing the module and would therefor leak to other removed once we were done testing the module and would therefore leak to other
modules being tested with unpredictable results. This is the kind of work that modules being tested with unpredictable results. This is the kind of work that
should be defered to mock, and that's exactly what this mixin class does. should be deferred to mock, and that's exactly what this mixin class does.
As an example, if one needs to specify some options which should be available As an example, if one needs to specify some options which should be available
to the module being tests one should do: to the module being tested one should do:
.. code-block:: python .. code-block:: python
@ -122,8 +122,8 @@ to the module being tests one should do:
} }
} }
Consider this more extensive example from Consider this more extensive example from
``tests/unit/modules/test_libcloud_dns.py``:: ``tests/unit/modules/test_libcloud_dns.py``:
.. code-block:: python .. code-block:: python
@ -173,10 +173,10 @@ Consider this more extensive example from
return {libcloud_dns: module_globals} return {libcloud_dns: module_globals}
What happens on the above example is that, we mock a call to What happens in the above example is we mock a call to
`__salt__['config.option']` to return the configuration needed for the `__salt__['config.option']` to return the configuration needed for the
execution of the tests. Additionally, if the ``libcloud`` library is not execution of the tests. Additionally, if the ``libcloud`` library is not
available, since that's not actually part of whats being tested, we mocked that available, since that's not actually part of what's being tested, we mocked that
import by patching ``sys.modules`` when tests are running. import by patching ``sys.modules`` when tests are running.
@ -245,7 +245,7 @@ To understand how one might integrate Mock into writing a unit test for Salt,
let's imagine a scenario in which we're testing an execution module that's let's imagine a scenario in which we're testing an execution module that's
designed to operate on a database. Furthermore, let's imagine two separate designed to operate on a database. Furthermore, let's imagine two separate
methods, here presented in pseduo-code in an imaginary execution module called methods, here presented in pseduo-code in an imaginary execution module called
'db.py. 'db.py'.
.. code-block:: python .. code-block:: python
@ -319,7 +319,7 @@ function into ``__salt__`` that's actually a MagicMock instance.
def show_patch(self): def show_patch(self):
with patch.dict(my_module.__salt__, with patch.dict(my_module.__salt__,
{'function.to_replace': MagicMock()}: {'function.to_replace': MagicMock()}):
# From this scope, carry on with testing, with a modified __salt__! # From this scope, carry on with testing, with a modified __salt__!

View File

@ -218,6 +218,7 @@ Server configuration values and their defaults:
# Bind to LDAP anonymously to determine group membership # Bind to LDAP anonymously to determine group membership
# Active Directory does not allow anonymous binds without special configuration # Active Directory does not allow anonymous binds without special configuration
# In addition, if auth.ldap.anonymous is True, empty bind passwords are not permitted.
auth.ldap.anonymous: False auth.ldap.anonymous: False
# FOR TESTING ONLY, this is a VERY insecure setting. # FOR TESTING ONLY, this is a VERY insecure setting.
@ -257,7 +258,11 @@ and groups, it re-authenticates as the user running the Salt commands.
If you are already aware of the structure of your DNs and permissions in your LDAP store are set such that If you are already aware of the structure of your DNs and permissions in your LDAP store are set such that
users can look up their own group memberships, then the first and second users can be the same. To tell Salt this is users can look up their own group memberships, then the first and second users can be the same. To tell Salt this is
the case, omit the ``auth.ldap.bindpw`` parameter. You can template the ``binddn`` like this: the case, omit the ``auth.ldap.bindpw`` parameter. Note this is not the same thing as using an anonymous bind.
Most LDAP servers will not permit anonymous bind, and as mentioned above, if `auth.ldap.anonymous` is False you
cannot use an empty password.
You can template the ``binddn`` like this:
.. code-block:: yaml .. code-block:: yaml

View File

@ -80,12 +80,21 @@ same way as in the above example, only without a top-level ``grains:`` key:
.. note:: .. note::
The content of ``/etc/salt/grains`` is ignored if you specify grains in the minion config. Grains in ``/etc/salt/grains`` are ignored if you specify the same grains in the minion config.
.. note:: .. note::
Grains are static, and since they are not often changed, they will need a grains refresh when they are updated. You can do this by calling: ``salt minion saltutil.refresh_modules`` Grains are static, and since they are not often changed, they will need a grains refresh when they are updated. You can do this by calling: ``salt minion saltutil.refresh_modules``
.. note::
You can equally configure static grains for Proxy Minions.
As multiple Proxy Minion processes can run on the same machine, you need
to index the files using the Minion ID, under ``/etc/salt/proxy.d/<minion ID>/grains``.
For example, the grains for the Proxy Minion ``router1`` can be defined
under ``/etc/salt/proxy.d/router1/grains``, while the grains for the
Proxy Minion ``switch7`` can be put in ``/etc/salt/proxy.d/switch7/grains``.
Matching Grains in the Top File Matching Grains in the Top File
=============================== ===============================
@ -305,3 +314,9 @@ Syncing grains can be done a number of ways, they are automatically synced when
above) the grains can be manually synced and reloaded by calling the above) the grains can be manually synced and reloaded by calling the
:mod:`saltutil.sync_grains <salt.modules.saltutil.sync_grains>` or :mod:`saltutil.sync_grains <salt.modules.saltutil.sync_grains>` or
:mod:`saltutil.sync_all <salt.modules.saltutil.sync_all>` functions. :mod:`saltutil.sync_all <salt.modules.saltutil.sync_all>` functions.
.. note::
When the :conf_minion:`grains_cache` is set to False, the grains dictionary is built
and stored in memory on the minion. Every time the minion restarts or
``saltutil.refresh_grains`` is run, the grain dictionary is rebuilt from scratch.

View File

@ -1526,6 +1526,54 @@ Returns:
.. jinja_ref:: jinja-in-files .. jinja_ref:: jinja-in-files
Escape filters
--------------
.. jinja_ref:: regex_escape
``regex_escape``
----------------
.. versionadded:: 2017.7.0
Allows escaping of strings so they can be interpreted literally by another function.
Example:
.. code-block:: jinja
regex_escape = {{ 'https://example.com?foo=bar%20baz' | regex_escape }}
will be rendered as:
.. code-block:: text
regex_escape = https\:\/\/example\.com\?foo\=bar\%20baz
Set Theory Filters
------------------
.. jinja_ref:: unique
``unique``
----------
.. versionadded:: 2017.7.0
Performs set math using Jinja filters.
Example:
.. code-block:: jinja
unique = {{ ['foo', 'foo', 'bar'] | unique }}
will be rendered as:
.. code-block:: text
unique = ['foo', 'bar']
Jinja in Files Jinja in Files
============== ==============

View File

@ -202,7 +202,7 @@ this.
# /srv/salt/orch/deploy.sls # /srv/salt/orch/deploy.sls
{% set servers = salt['pillar.get']('servers', 'test') %} {% set servers = salt['pillar.get']('servers', 'test') %}
{% set master = salt['pillat.get']('master', 'salt') %} {% set master = salt['pillar.get']('master', 'salt') %}
create_instance: create_instance:
salt.runner: salt.runner:
- name: cloud.profile - name: cloud.profile

View File

@ -4,9 +4,21 @@ Salt 2016.11.8 Release Notes
Version 2016.11.8 is a bugfix release for :ref:`2016.11.0 <release-2016-11-0>`.] Version 2016.11.8 is a bugfix release for :ref:`2016.11.0 <release-2016-11-0>`.]
Anonymous Binds and LDAP/Active Directory
-----------------------------------------
When auth.ldap.anonymous is set to False, the bind password can no longer be empty.
Changes for v2016.11.7..v2016.11.8 Changes for v2016.11.7..v2016.11.8
---------------------------------- ----------------------------------
Security Fix
============
CVE-2017-14695 Directory traversal vulnerability in minion id validation in SaltStack. Allows remote minions with incorrect credentials to authenticate to a master via a crafted minion ID. Credit for discovering the security flaw goes to: Julian Brost (julian@0x4a42.net)
CVE-2017-14696 Remote Denial of Service with a specially crafted authentication request. Credit for discovering the security flaw goes to: Julian Brost (julian@0x4a42.net)
Extended changelog courtesy of Todd Stansell (https://github.com/tjstansell/salt-changelogs): Extended changelog courtesy of Todd Stansell (https://github.com/tjstansell/salt-changelogs):
*Generated at: 2017-09-11T14:52:27Z* *Generated at: 2017-09-11T14:52:27Z*

View File

@ -0,0 +1,6 @@
============================
Salt 2016.11.9 Release Notes
============================
Version 2016.11.9 is a bugfix release for :ref:`2016.11.0 <release-2016-11-0>`.]

View File

@ -7,23 +7,9 @@ Version 2016.3.8 is a bugfix release for :ref:`2016.3.0 <release-2016-3-0>`.
Changes for v2016.3.7..v2016.3.8 Changes for v2016.3.7..v2016.3.8
-------------------------------- --------------------------------
New master configuration option `allow_minion_key_revoke`, defaults to True. This option Security Fix
controls whether a minion can request that the master revoke its key. When True, a minion ============
can request a key revocation and the master will comply. If it is False, the key will not
be revoked by the msater.
New master configuration option `require_minion_sign_messages` CVE-2017-14695 Directory traversal vulnerability in minion id validation in SaltStack. Allows remote minions with incorrect credentials to authenticate to a master via a crafted minion ID. Credit for discovering the security flaw goes to: Julian Brost (julian@0x4a42.net)
This requires that minions cryptographically sign the messages they
publish to the master. If minions are not signing, then log this information
at loglevel 'INFO' and drop the message without acting on it.
New master configuration option `drop_messages_signature_fail` CVE-2017-14696 Remote Denial of Service with a specially crafted authentication request. Credit for discovering the security flaw goes to: Julian Brost (julian@0x4a42.net)
Drop messages from minions when their signatures do not validate.
Note that when this option is False but `require_minion_sign_messages` is True
minions MUST sign their messages but the validity of their signatures
is ignored.
New minion configuration option `minion_sign_messages`
Causes the minion to cryptographically sign the payload of messages it places
on the event bus for the master. The payloads are signed with the minion's
private key so the master can verify the signature with its public key.

View File

@ -0,0 +1,29 @@
===========================
Salt 2016.3.9 Release Notes
===========================
Version 2016.3.9 is a bugfix release for :ref:`2016.3.0 <release-2016-3-0>`.
Changes for v2016.3.7..v2016.3.9
--------------------------------
New master configuration option `allow_minion_key_revoke`, defaults to True. This option
controls whether a minion can request that the master revoke its key. When True, a minion
can request a key revocation and the master will comply. If it is False, the key will not
be revoked by the msater.
New master configuration option `require_minion_sign_messages`
This requires that minions cryptographically sign the messages they
publish to the master. If minions are not signing, then log this information
at loglevel 'INFO' and drop the message without acting on it.
New master configuration option `drop_messages_signature_fail`
Drop messages from minions when their signatures do not validate.
Note that when this option is False but `require_minion_sign_messages` is True
minions MUST sign their messages but the validity of their signatures
is ignored.
New minion configuration option `minion_sign_messages`
Causes the minion to cryptographically sign the payload of messages it places
on the event bus for the master. The payloads are signed with the minion's
private key so the master can verify the signature with its public key.

View File

@ -14,18 +14,33 @@ CVE-2017-14695 Directory traversal vulnerability in minion id validation in Salt
CVE-2017-14696 Remote Denial of Service with a specially crafted authentication request. Credit for discovering the security flaw goes to: Julian Brost (julian@0x4a42.net) CVE-2017-14696 Remote Denial of Service with a specially crafted authentication request. Credit for discovering the security flaw goes to: Julian Brost (julian@0x4a42.net)
Known Issues
============
On 2017.7.2 when using salt-api and cherrypy version 5.6.0, issue `#43581`_ will occur when starting the salt-api service. We have patched the cherry-py packages for python-cherrypy-5.6.0-2 from repo.saltstack.com. If you are using python-cherrypy-5.6.0-1 please ensure to run `yum install python-cherrypy` to install the new patched version.
Extended changelog courtesy of Todd Stansell (https://github.com/tjstansell/salt-changelogs): Extended changelog courtesy of Todd Stansell (https://github.com/tjstansell/salt-changelogs):
*Generated at: 2017-09-26T21:06:19Z* *Generated at: 2017-10-02T21:10:14Z*
Statistics: Statistics
==========
- Total Merges: **326** - Total Merges: **328**
- Total Issue references: **133** - Total Issue references: **134**
- Total PR references: **389** - Total PR references: **391**
Changes: Changes
=======
- **PR** `#43868`_: (*rallytime*) Back-port `#43847`_ to 2017.7.2
* Fix to module.run
- **PR** `#43756`_: (*gtmanfred*) split build and install for pkg osx
@ *2017-09-26T20:51:28Z*
* 88414d5 Merge pull request `#43756`_ from gtmanfred/2017.7.2
* f7df41f split build and install for pkg osx
- **PR** `#43585`_: (*rallytime*) Back-port `#43330`_ to 2017.7.2 - **PR** `#43585`_: (*rallytime*) Back-port `#43330`_ to 2017.7.2
@ *2017-09-19T17:33:34Z* @ *2017-09-19T17:33:34Z*
@ -3104,6 +3119,13 @@ Changes:
.. _`#475`: https://github.com/saltstack/salt/issues/475 .. _`#475`: https://github.com/saltstack/salt/issues/475
.. _`#480`: https://github.com/saltstack/salt/issues/480 .. _`#480`: https://github.com/saltstack/salt/issues/480
.. _`#495`: https://github.com/saltstack/salt/issues/495 .. _`#495`: https://github.com/saltstack/salt/issues/495
.. _`#43581`: https://github.com/saltstack/salt/issues/43581
.. _`#43756`: https://github.com/saltstack/salt/pull/43756
.. _`#43847`: https://github.com/saltstack/salt/pull/43847
.. _`#43868`: https://github.com/saltstack/salt/pull/43868
.. _`#475`: https://github.com/saltstack/salt/issues/475
.. _`#480`: https://github.com/saltstack/salt/issues/480
.. _`#495`: https://github.com/saltstack/salt/issues/495
.. _`bp-37424`: https://github.com/saltstack/salt/pull/37424 .. _`bp-37424`: https://github.com/saltstack/salt/pull/37424
.. _`bp-39366`: https://github.com/saltstack/salt/pull/39366 .. _`bp-39366`: https://github.com/saltstack/salt/pull/39366
.. _`bp-41543`: https://github.com/saltstack/salt/pull/41543 .. _`bp-41543`: https://github.com/saltstack/salt/pull/41543

View File

@ -0,0 +1,6 @@
============================
Salt 2017.7.3 Release Notes
============================
Version 2017.7.3 is a bugfix release for :ref:`2017.7.0 <release-2017-7-0>`.

View File

@ -57,7 +57,15 @@ Writing Thorium Formulas
======================== ========================
Like some other Salt subsystems, Thorium uses its own directory structure. The Like some other Salt subsystems, Thorium uses its own directory structure. The
default location for this structure is ``/srv/thorium/``, but it can be changed default location for this structure is ``/srv/thorium/``, but it can be changed
using the ``thorium_roots_dir`` setting in the ``master`` configuration file. using the ``thorium_roots`` setting in the ``master`` configuration file.
Example ``thorium_roots`` configuration:
.. code-block:: yaml
thorium_roots:
base:
- /etc/salt/thorium
The Thorium top.sls File The Thorium top.sls File

View File

@ -27,7 +27,7 @@ Installing Dependencies
======================= =======================
Both pygit2_ and GitPython_ are supported Python interfaces to git. If Both pygit2_ and GitPython_ are supported Python interfaces to git. If
compatible versions of both are installed, pygit2_ will preferred. In these compatible versions of both are installed, pygit2_ will be preferred. In these
cases, GitPython_ can be forced using the :conf_master:`gitfs_provider` cases, GitPython_ can be forced using the :conf_master:`gitfs_provider`
parameter in the master config file. parameter in the master config file.

View File

@ -33,3 +33,5 @@ Tutorials Index
* :ref:`The macOS (Maverick) Developer Step By Step Guide To Salt Installation <tutorial-macos-walk-through>` * :ref:`The macOS (Maverick) Developer Step By Step Guide To Salt Installation <tutorial-macos-walk-through>`
* :ref:`SaltStack Walk-through <tutorial-salt-walk-through>` * :ref:`SaltStack Walk-through <tutorial-salt-walk-through>`
* :ref:`Writing Salt Tests <tutorial-salt-testing>` * :ref:`Writing Salt Tests <tutorial-salt-testing>`
* :ref:`Running Salt States and Commands in Docker Containers <docker-sls>`
* :ref:`Preseed Minion with Accepted Key <tutorial-preseed-key>`

View File

@ -23,7 +23,7 @@ Supported Operating Systems
.. note:: .. note::
In the event you do not see your distribution or version available please In the event you do not see your distribution or version available please
review the develop branch on GitHub as it main contain updates that are review the develop branch on GitHub as it may contain updates that are
not present in the stable release: not present in the stable release:
https://github.com/saltstack/salt-bootstrap/tree/develop https://github.com/saltstack/salt-bootstrap/tree/develop

View File

@ -88,7 +88,8 @@ sudo $PKGRESOURCES/build_env.sh $PYVER
echo -n -e "\033]0;Build: Install Salt\007" echo -n -e "\033]0;Build: Install Salt\007"
sudo rm -rf $SRCDIR/build sudo rm -rf $SRCDIR/build
sudo rm -rf $SRCDIR/dist sudo rm -rf $SRCDIR/dist
sudo $PYTHON $SRCDIR/setup.py build -e "$PYTHON -E -s" install sudo $PYTHON $SRCDIR/setup.py build -e "$PYTHON -E -s"
sudo $PYTHON $SRCDIR/setup.py install
############################################################################ ############################################################################
# Build Package # Build Package

View File

@ -67,7 +67,7 @@ _su_cmd() {
_get_pid() { _get_pid() {
netstat $NS_NOTRIM -ap --protocol=unix 2>$ERROR_TO_DEVNULL \ netstat -n $NS_NOTRIM -ap --protocol=unix 2>$ERROR_TO_DEVNULL \
| sed -r -e "\|\s${SOCK_DIR}/minion_event_${MINION_ID_HASH}_pub\.ipc$|"'!d; s|/.*||; s/.*\s//;' \ | sed -r -e "\|\s${SOCK_DIR}/minion_event_${MINION_ID_HASH}_pub\.ipc$|"'!d; s|/.*||; s/.*\s//;' \
| uniq | uniq
} }
@ -155,7 +155,7 @@ start() {
printf "\nPROCESSES:\n" >&2 printf "\nPROCESSES:\n" >&2
ps wwwaxu | grep '[s]alt-minion' >&2 ps wwwaxu | grep '[s]alt-minion' >&2
printf "\nSOCKETS:\n" >&2 printf "\nSOCKETS:\n" >&2
netstat $NS_NOTRIM -ap --protocol=unix | grep 'salt.*minion' >&2 netstat -n $NS_NOTRIM -ap --protocol=unix | grep 'salt.*minion' >&2
printf "\nLOG_FILE:\n" >&2 printf "\nLOG_FILE:\n" >&2
tail -n 20 "$LOG_FILE" >&2 tail -n 20 "$LOG_FILE" >&2
printf "\nENVIRONMENT:\n" >&2 printf "\nENVIRONMENT:\n" >&2

View File

@ -110,6 +110,10 @@ class _LDAPConnection(object):
self.ldap.set_option(ldap.OPT_REFERRALS, 0) # Needed for AD self.ldap.set_option(ldap.OPT_REFERRALS, 0) # Needed for AD
if not anonymous: if not anonymous:
if self.bindpw is None or len(self.bindpw) < 1:
raise CommandExecutionError(
'LDAP bind password is not set: password cannot be empty if auth.ldap.anonymous is False'
)
self.ldap.simple_bind_s(self.binddn, self.bindpw) self.ldap.simple_bind_s(self.binddn, self.bindpw)
except Exception as ldap_error: except Exception as ldap_error:
raise CommandExecutionError( raise CommandExecutionError(

View File

@ -186,19 +186,60 @@ class Beacon(object):
else: else:
self.opts['beacons'][name].append({'enabled': enabled_value}) self.opts['beacons'][name].append({'enabled': enabled_value})
def list_beacons(self): def _get_beacons(self,
include_opts=True,
include_pillar=True):
'''
Return the beacons data structure
'''
beacons = {}
if include_pillar:
pillar_beacons = self.opts.get('pillar', {}).get('beacons', {})
if not isinstance(pillar_beacons, dict):
raise ValueError('Beacons must be of type dict.')
beacons.update(pillar_beacons)
if include_opts:
opts_beacons = self.opts.get('beacons', {})
if not isinstance(opts_beacons, dict):
raise ValueError('Beacons must be of type dict.')
beacons.update(opts_beacons)
return beacons
def list_beacons(self,
include_pillar=True,
include_opts=True):
''' '''
List the beacon items List the beacon items
include_pillar: Whether to include beacons that are
configured in pillar, default is True.
include_opts: Whether to include beacons that are
configured in opts, default is True.
''' '''
beacons = self._get_beacons(include_pillar, include_opts)
# Fire the complete event back along with the list of beacons # Fire the complete event back along with the list of beacons
evt = salt.utils.event.get_event('minion', opts=self.opts) evt = salt.utils.event.get_event('minion', opts=self.opts)
b_conf = self.functions['config.merge']('beacons') evt.fire_event({'complete': True, 'beacons': beacons},
self.opts['beacons'].update(b_conf)
evt.fire_event({'complete': True, 'beacons': self.opts['beacons']},
tag='/salt/minion/minion_beacons_list_complete') tag='/salt/minion/minion_beacons_list_complete')
return True return True
def list_available_beacons(self):
'''
List the available beacons
'''
_beacons = ['{0}'.format(_beacon.replace('.beacon', ''))
for _beacon in self.beacons if '.beacon' in _beacon]
# Fire the complete event back along with the list of beacons
evt = salt.utils.event.get_event('minion', opts=self.opts)
evt.fire_event({'complete': True, 'beacons': _beacons},
tag='/salt/minion/minion_beacons_list_available_complete')
return True
def add_beacon(self, name, beacon_data): def add_beacon(self, name, beacon_data):
''' '''
Add a beacon item Add a beacon item
@ -207,16 +248,23 @@ class Beacon(object):
data = {} data = {}
data[name] = beacon_data data[name] = beacon_data
if name in self.opts['beacons']: if name in self._get_beacons(include_opts=False):
log.info('Updating settings for beacon ' comment = 'Cannot update beacon item {0}, ' \
'item: {0}'.format(name)) 'because it is configured in pillar.'.format(name)
complete = False
else: else:
log.info('Added new beacon item {0}'.format(name)) if name in self.opts['beacons']:
self.opts['beacons'].update(data) comment = 'Updating settings for beacon ' \
'item: {0}'.format(name)
else:
comment = 'Added new beacon item: {0}'.format(name)
complete = True
self.opts['beacons'].update(data)
# Fire the complete event back along with updated list of beacons # Fire the complete event back along with updated list of beacons
evt = salt.utils.event.get_event('minion', opts=self.opts) evt = salt.utils.event.get_event('minion', opts=self.opts)
evt.fire_event({'complete': True, 'beacons': self.opts['beacons']}, evt.fire_event({'complete': complete, 'comment': comment,
'beacons': self.opts['beacons']},
tag='/salt/minion/minion_beacon_add_complete') tag='/salt/minion/minion_beacon_add_complete')
return True return True
@ -229,15 +277,21 @@ class Beacon(object):
data = {} data = {}
data[name] = beacon_data data[name] = beacon_data
log.info('Updating settings for beacon ' if name in self._get_beacons(include_opts=False):
'item: {0}'.format(name)) comment = 'Cannot modify beacon item {0}, ' \
self.opts['beacons'].update(data) 'it is configured in pillar.'.format(name)
complete = False
else:
comment = 'Updating settings for beacon ' \
'item: {0}'.format(name)
complete = True
self.opts['beacons'].update(data)
# Fire the complete event back along with updated list of beacons # Fire the complete event back along with updated list of beacons
evt = salt.utils.event.get_event('minion', opts=self.opts) evt = salt.utils.event.get_event('minion', opts=self.opts)
evt.fire_event({'complete': True, 'beacons': self.opts['beacons']}, evt.fire_event({'complete': complete, 'comment': comment,
'beacons': self.opts['beacons']},
tag='/salt/minion/minion_beacon_modify_complete') tag='/salt/minion/minion_beacon_modify_complete')
return True return True
def delete_beacon(self, name): def delete_beacon(self, name):
@ -245,13 +299,22 @@ class Beacon(object):
Delete a beacon item Delete a beacon item
''' '''
if name in self.opts['beacons']: if name in self._get_beacons(include_opts=False):
log.info('Deleting beacon item {0}'.format(name)) comment = 'Cannot delete beacon item {0}, ' \
del self.opts['beacons'][name] 'it is configured in pillar.'.format(name)
complete = False
else:
if name in self.opts['beacons']:
del self.opts['beacons'][name]
comment = 'Deleting beacon item: {0}'.format(name)
else:
comment = 'Beacon item {0} not found.'.format(name)
complete = True
# Fire the complete event back along with updated list of beacons # Fire the complete event back along with updated list of beacons
evt = salt.utils.event.get_event('minion', opts=self.opts) evt = salt.utils.event.get_event('minion', opts=self.opts)
evt.fire_event({'complete': True, 'beacons': self.opts['beacons']}, evt.fire_event({'complete': complete, 'comment': comment,
'beacons': self.opts['beacons']},
tag='/salt/minion/minion_beacon_delete_complete') tag='/salt/minion/minion_beacon_delete_complete')
return True return True
@ -289,11 +352,19 @@ class Beacon(object):
Enable a beacon Enable a beacon
''' '''
self._update_enabled(name, True) if name in self._get_beacons(include_opts=False):
comment = 'Cannot enable beacon item {0}, ' \
'it is configured in pillar.'.format(name)
complete = False
else:
self._update_enabled(name, True)
comment = 'Enabling beacon item {0}'.format(name)
complete = True
# Fire the complete event back along with updated list of beacons # Fire the complete event back along with updated list of beacons
evt = salt.utils.event.get_event('minion', opts=self.opts) evt = salt.utils.event.get_event('minion', opts=self.opts)
evt.fire_event({'complete': True, 'beacons': self.opts['beacons']}, evt.fire_event({'complete': complete, 'comment': comment,
'beacons': self.opts['beacons']},
tag='/salt/minion/minion_beacon_enabled_complete') tag='/salt/minion/minion_beacon_enabled_complete')
return True return True
@ -303,11 +374,19 @@ class Beacon(object):
Disable a beacon Disable a beacon
''' '''
self._update_enabled(name, False) if name in self._get_beacons(include_opts=False):
comment = 'Cannot disable beacon item {0}, ' \
'it is configured in pillar.'.format(name)
complete = False
else:
self._update_enabled(name, False)
comment = 'Disabling beacon item {0}'.format(name)
complete = True
# Fire the complete event back along with updated list of beacons # Fire the complete event back along with updated list of beacons
evt = salt.utils.event.get_event('minion', opts=self.opts) evt = salt.utils.event.get_event('minion', opts=self.opts)
evt.fire_event({'complete': True, 'beacons': self.opts['beacons']}, evt.fire_event({'complete': complete, 'comment': comment,
'beacons': self.opts['beacons']},
tag='/salt/minion/minion_beacon_disabled_complete') tag='/salt/minion/minion_beacon_disabled_complete')
return True return True

View File

@ -3,6 +3,8 @@
Beacon to monitor temperature, humidity and pressure using the SenseHat Beacon to monitor temperature, humidity and pressure using the SenseHat
of a Raspberry Pi. of a Raspberry Pi.
.. versionadded:: 2017.7.0
:maintainer: Benedikt Werner <1benediktwerner@gmail.com> :maintainer: Benedikt Werner <1benediktwerner@gmail.com>
:maturity: new :maturity: new
:depends: sense_hat Python module :depends: sense_hat Python module

View File

@ -160,6 +160,7 @@ class Master(parsers.MasterOptionParser, DaemonsMixin): # pylint: disable=no-in
self.config['user'], self.config['user'],
permissive=self.config['permissive_pki_access'], permissive=self.config['permissive_pki_access'],
pki_dir=self.config['pki_dir'], pki_dir=self.config['pki_dir'],
root_dir=self.config['root_dir'],
) )
# Clear out syndics from cachedir # Clear out syndics from cachedir
for syndic_file in os.listdir(self.config['syndic_dir']): for syndic_file in os.listdir(self.config['syndic_dir']):
@ -280,6 +281,7 @@ class Minion(parsers.MinionOptionParser, DaemonsMixin): # pylint: disable=no-in
self.config['user'], self.config['user'],
permissive=self.config['permissive_pki_access'], permissive=self.config['permissive_pki_access'],
pki_dir=self.config['pki_dir'], pki_dir=self.config['pki_dir'],
root_dir=self.config['root_dir'],
) )
except OSError as error: except OSError as error:
self.environment_failure(error) self.environment_failure(error)
@ -467,6 +469,7 @@ class ProxyMinion(parsers.ProxyMinionOptionParser, DaemonsMixin): # pylint: dis
self.config['user'], self.config['user'],
permissive=self.config['permissive_pki_access'], permissive=self.config['permissive_pki_access'],
pki_dir=self.config['pki_dir'], pki_dir=self.config['pki_dir'],
root_dir=self.config['root_dir'],
) )
except OSError as error: except OSError as error:
self.environment_failure(error) self.environment_failure(error)
@ -575,6 +578,7 @@ class Syndic(parsers.SyndicOptionParser, DaemonsMixin): # pylint: disable=no-in
self.config['user'], self.config['user'],
permissive=self.config['permissive_pki_access'], permissive=self.config['permissive_pki_access'],
pki_dir=self.config['pki_dir'], pki_dir=self.config['pki_dir'],
root_dir=self.config['root_dir'],
) )
except OSError as error: except OSError as error:
self.environment_failure(error) self.environment_failure(error)

View File

@ -32,7 +32,10 @@ class SPM(parsers.SPMParser):
v_dirs = [ v_dirs = [
self.config['cachedir'], self.config['cachedir'],
] ]
verify_env(v_dirs, self.config['user'],) verify_env(v_dirs,
self.config['user'],
root_dir=self.config['root_dir'],
)
verify_log(self.config) verify_log(self.config)
client = salt.spm.SPMClient(ui, self.config) client = salt.spm.SPMClient(ui, self.config)
client.run(self.args) client.run(self.args)

View File

@ -1582,7 +1582,10 @@ class LocalClient(object):
timeout=timeout, timeout=timeout,
tgt=tgt, tgt=tgt,
tgt_type=tgt_type, tgt_type=tgt_type,
expect_minions=(verbose or show_timeout), # (gtmanfred) expect_minions is popped here incase it is passed from a client
# call. If this is not popped, then it would be passed twice to
# get_iter_returns.
expect_minions=(kwargs.pop('expect_minions', False) or verbose or show_timeout),
**kwargs **kwargs
): ):
log.debug('return event: %s', ret) log.debug('return event: %s', ret)

View File

@ -240,7 +240,7 @@ class SyncClientMixin(object):
def low(self, fun, low, print_event=True, full_return=False): def low(self, fun, low, print_event=True, full_return=False):
''' '''
Check for deprecated usage and allow until Salt Oxygen. Check for deprecated usage and allow until Salt Fluorine.
''' '''
msg = [] msg = []
if 'args' in low: if 'args' in low:
@ -251,7 +251,7 @@ class SyncClientMixin(object):
low['kwarg'] = low.pop('kwargs') low['kwarg'] = low.pop('kwargs')
if msg: if msg:
salt.utils.warn_until('Oxygen', ' '.join(msg)) salt.utils.warn_until('Fluorine', ' '.join(msg))
return self._low(fun, low, print_event=print_event, full_return=full_return) return self._low(fun, low, print_event=print_event, full_return=full_return)

View File

@ -723,6 +723,7 @@ class Single(object):
self.thin_dir = kwargs['thin_dir'] self.thin_dir = kwargs['thin_dir']
elif self.winrm: elif self.winrm:
saltwinshell.set_winvars(self) saltwinshell.set_winvars(self)
self.python_env = kwargs.get('ssh_python_env')
else: else:
if user: if user:
thin_dir = DEFAULT_THIN_DIR.replace('%%USER%%', user) thin_dir = DEFAULT_THIN_DIR.replace('%%USER%%', user)
@ -782,6 +783,10 @@ class Single(object):
self.serial = salt.payload.Serial(opts) self.serial = salt.payload.Serial(opts)
self.wfuncs = salt.loader.ssh_wrapper(opts, None, self.context) self.wfuncs = salt.loader.ssh_wrapper(opts, None, self.context)
self.shell = salt.client.ssh.shell.gen_shell(opts, **args) self.shell = salt.client.ssh.shell.gen_shell(opts, **args)
if self.winrm:
# Determine if Windows client is x86 or AMD64
arch, _, _ = self.shell.exec_cmd('powershell $ENV:PROCESSOR_ARCHITECTURE')
self.arch = arch.strip()
self.thin = thin if thin else salt.utils.thin.thin_path(opts['cachedir']) self.thin = thin if thin else salt.utils.thin.thin_path(opts['cachedir'])
def __arg_comps(self): def __arg_comps(self):
@ -903,6 +908,8 @@ class Single(object):
ret = json.dumps({'local': opts_pkg}) ret = json.dumps({'local': opts_pkg})
return ret, retcode return ret, retcode
if 'known_hosts_file' in self.opts:
opts_pkg['known_hosts_file'] = self.opts['known_hosts_file']
opts_pkg['file_roots'] = self.opts['file_roots'] opts_pkg['file_roots'] = self.opts['file_roots']
opts_pkg['pillar_roots'] = self.opts['pillar_roots'] opts_pkg['pillar_roots'] = self.opts['pillar_roots']
opts_pkg['ext_pillar'] = self.opts['ext_pillar'] opts_pkg['ext_pillar'] = self.opts['ext_pillar']

View File

@ -6,6 +6,7 @@ Create ssh executor system
from __future__ import absolute_import from __future__ import absolute_import
# Import python libs # Import python libs
import os import os
import time
import copy import copy
import json import json
import logging import logging
@ -21,6 +22,8 @@ import salt.loader
import salt.minion import salt.minion
import salt.log import salt.log
from salt.ext.six import string_types from salt.ext.six import string_types
import salt.ext.six as six
from salt.exceptions import SaltInvocationError
__func_alias__ = { __func_alias__ = {
'apply_': 'apply' 'apply_': 'apply'
@ -28,6 +31,47 @@ __func_alias__ = {
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
def _set_retcode(ret, highstate=None):
'''
Set the return code based on the data back from the state system
'''
# Set default retcode to 0
__context__['retcode'] = 0
if isinstance(ret, list):
__context__['retcode'] = 1
return
if not salt.utils.check_state_result(ret, highstate=highstate):
__context__['retcode'] = 2
def _check_pillar(kwargs, pillar=None):
'''
Check the pillar for errors, refuse to run the state if there are errors
in the pillar and return the pillar errors
'''
if kwargs.get('force'):
return True
pillar_dict = pillar if pillar is not None else __pillar__
if '_errors' in pillar_dict:
return False
return True
def _wait(jid):
'''
Wait for all previously started state jobs to finish running
'''
if jid is None:
jid = salt.utils.jid.gen_jid()
states = _prior_running_states(jid)
while states:
time.sleep(1)
states = _prior_running_states(jid)
def _merge_extra_filerefs(*args): def _merge_extra_filerefs(*args):
''' '''
Takes a list of filerefs and returns a merged list Takes a list of filerefs and returns a merged list
@ -127,6 +171,100 @@ def sls(mods, saltenv='base', test=None, exclude=None, **kwargs):
return stdout return stdout
def running(concurrent=False):
'''
Return a list of strings that contain state return data if a state function
is already running. This function is used to prevent multiple state calls
from being run at the same time.
CLI Example:
.. code-block:: bash
salt '*' state.running
'''
ret = []
if concurrent:
return ret
active = __salt__['saltutil.is_running']('state.*')
for data in active:
err = (
'The function "{0}" is running as PID {1} and was started at '
'{2} with jid {3}'
).format(
data['fun'],
data['pid'],
salt.utils.jid.jid_to_time(data['jid']),
data['jid'],
)
ret.append(err)
return ret
def _prior_running_states(jid):
'''
Return a list of dicts of prior calls to state functions. This function is
used to queue state calls so only one is run at a time.
'''
ret = []
active = __salt__['saltutil.is_running']('state.*')
for data in active:
try:
data_jid = int(data['jid'])
except ValueError:
continue
if data_jid < int(jid):
ret.append(data)
return ret
def _check_queue(queue, kwargs):
'''
Utility function to queue the state run if requested
and to check for conflicts in currently running states
'''
if queue:
_wait(kwargs.get('__pub_jid'))
else:
conflict = running(concurrent=kwargs.get('concurrent', False))
if conflict:
__context__['retcode'] = 1
return conflict
def _get_opts(**kwargs):
'''
Return a copy of the opts for use, optionally load a local config on top
'''
opts = copy.deepcopy(__opts__)
if 'localconfig' in kwargs:
return salt.config.minion_config(kwargs['localconfig'], defaults=opts)
if 'saltenv' in kwargs:
saltenv = kwargs['saltenv']
if saltenv is not None and not isinstance(saltenv, six.string_types):
opts['environment'] = str(kwargs['saltenv'])
else:
opts['environment'] = kwargs['saltenv']
if 'pillarenv' in kwargs:
pillarenv = kwargs['pillarenv']
if pillarenv is not None and not isinstance(pillarenv, six.string_types):
opts['pillarenv'] = str(kwargs['pillarenv'])
else:
opts['pillarenv'] = kwargs['pillarenv']
return opts
def _get_initial_pillar(opts):
return __pillar__ if __opts__['__cli'] == 'salt-call' \
and opts['pillarenv'] == __opts__['pillarenv'] \
else None
def low(data, **kwargs): def low(data, **kwargs):
''' '''
Execute a single low data call Execute a single low data call
@ -199,6 +337,21 @@ def low(data, **kwargs):
return stdout return stdout
def _get_test_value(test=None, **kwargs):
'''
Determine the correct value for the test flag.
'''
ret = True
if test is None:
if salt.utils.test_mode(test=test, **kwargs):
ret = True
else:
ret = __opts__.get('test', None)
else:
ret = test
return ret
def high(data, **kwargs): def high(data, **kwargs):
''' '''
Execute the compound calls stored in a single set of high data Execute the compound calls stored in a single set of high data
@ -289,6 +442,143 @@ def apply_(mods=None,
return highstate(**kwargs) return highstate(**kwargs)
def request(mods=None,
**kwargs):
'''
.. versionadded:: 2017.7.3
Request that the local admin execute a state run via
`salt-call state.run_request`
All arguments match state.apply
CLI Example:
.. code-block:: bash
salt '*' state.request
salt '*' state.request test
salt '*' state.request test,pkgs
'''
kwargs['test'] = True
ret = apply_(mods, **kwargs)
notify_path = os.path.join(__opts__['cachedir'], 'req_state.p')
serial = salt.payload.Serial(__opts__)
req = check_request()
req.update({kwargs.get('name', 'default'): {
'test_run': ret,
'mods': mods,
'kwargs': kwargs
}
})
cumask = os.umask(0o77)
try:
if salt.utils.is_windows():
# Make sure cache file isn't read-only
__salt__['cmd.run']('attrib -R "{0}"'.format(notify_path))
with salt.utils.fopen(notify_path, 'w+b') as fp_:
serial.dump(req, fp_)
except (IOError, OSError):
msg = 'Unable to write state request file {0}. Check permission.'
log.error(msg.format(notify_path))
os.umask(cumask)
return ret
def check_request(name=None):
'''
.. versionadded:: 2017.7.3
Return the state request information, if any
CLI Example:
.. code-block:: bash
salt '*' state.check_request
'''
notify_path = os.path.join(__opts__['cachedir'], 'req_state.p')
serial = salt.payload.Serial(__opts__)
if os.path.isfile(notify_path):
with salt.utils.fopen(notify_path, 'rb') as fp_:
req = serial.load(fp_)
if name:
return req[name]
return req
return {}
def clear_request(name=None):
'''
.. versionadded:: 2017.7.3
Clear out the state execution request without executing it
CLI Example:
.. code-block:: bash
salt '*' state.clear_request
'''
notify_path = os.path.join(__opts__['cachedir'], 'req_state.p')
serial = salt.payload.Serial(__opts__)
if not os.path.isfile(notify_path):
return True
if not name:
try:
os.remove(notify_path)
except (IOError, OSError):
pass
else:
req = check_request()
if name in req:
req.pop(name)
else:
return False
cumask = os.umask(0o77)
try:
if salt.utils.is_windows():
# Make sure cache file isn't read-only
__salt__['cmd.run']('attrib -R "{0}"'.format(notify_path))
with salt.utils.fopen(notify_path, 'w+b') as fp_:
serial.dump(req, fp_)
except (IOError, OSError):
msg = 'Unable to write state request file {0}. Check permission.'
log.error(msg.format(notify_path))
os.umask(cumask)
return True
def run_request(name='default', **kwargs):
'''
.. versionadded:: 2017.7.3
Execute the pending state request
CLI Example:
.. code-block:: bash
salt '*' state.run_request
'''
req = check_request()
if name not in req:
return {}
n_req = req[name]
if 'mods' not in n_req or 'kwargs' not in n_req:
return {}
req[name]['kwargs'].update(kwargs)
if 'test' in n_req['kwargs']:
n_req['kwargs'].pop('test')
if req:
ret = apply_(n_req['mods'], **n_req['kwargs'])
try:
os.remove(os.path.join(__opts__['cachedir'], 'req_state.p'))
except (IOError, OSError):
pass
return ret
return {}
def highstate(test=None, **kwargs): def highstate(test=None, **kwargs):
''' '''
Retrieve the state data from the salt master for this minion and execute it Retrieve the state data from the salt master for this minion and execute it
@ -478,6 +768,99 @@ def show_lowstate():
return st_.compile_low_chunks() return st_.compile_low_chunks()
def sls_id(id_, mods, test=None, queue=False, **kwargs):
'''
Call a single ID from the named module(s) and handle all requisites
The state ID comes *before* the module ID(s) on the command line.
id
ID to call
mods
Comma-delimited list of modules to search for given id and its requisites
.. versionadded:: 2017.7.3
saltenv : base
Specify a salt fileserver environment to be used when applying states
pillarenv
Specify a Pillar environment to be used when applying states. This
can also be set in the minion config file using the
:conf_minion:`pillarenv` option. When neither the
:conf_minion:`pillarenv` minion config option nor this CLI argument is
used, all Pillar environments will be merged together.
CLI Example:
.. code-block:: bash
salt '*' state.sls_id my_state my_module
salt '*' state.sls_id my_state my_module,a_common_module
'''
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
orig_test = __opts__.get('test', None)
opts = _get_opts(**kwargs)
opts['test'] = _get_test_value(test, **kwargs)
# Since this is running a specific ID within a specific SLS file, fall back
# to the 'base' saltenv if none is configured and none was passed.
if opts['environment'] is None:
opts['environment'] = 'base'
try:
st_ = salt.state.HighState(opts,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.HighState(opts,
initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']):
__context__['retcode'] = 5
err = ['Pillar failed to render with the following messages:']
err += __pillar__['_errors']
return err
if isinstance(mods, six.string_types):
split_mods = mods.split(',')
st_.push_active()
try:
high_, errors = st_.render_highstate({opts['environment']: split_mods})
finally:
st_.pop_active()
errors += st_.state.verify_high(high_)
# Apply requisites to high data
high_, req_in_errors = st_.state.requisite_in(high_)
if req_in_errors:
# This if statement should not be necessary if there were no errors,
# but it is required to get the unit tests to pass.
errors.extend(req_in_errors)
if errors:
__context__['retcode'] = 1
return errors
chunks = st_.state.compile_high_data(high_)
ret = {}
for chunk in chunks:
if chunk.get('__id__', '') == id_:
ret.update(st_.state.call_chunk(chunk, {}, chunks))
_set_retcode(ret, highstate=highstate)
# Work around Windows multiprocessing bug, set __opts__['test'] back to
# value from before this function was run.
__opts__['test'] = orig_test
if not ret:
raise SaltInvocationError(
'No matches for ID \'{0}\' found in SLS \'{1}\' within saltenv '
'\'{2}\''.format(id_, mods, opts['environment'])
)
return ret
def show_sls(mods, saltenv='base', test=None, **kwargs): def show_sls(mods, saltenv='base', test=None, **kwargs):
''' '''
Display the state data from a specific sls or list of sls files on the Display the state data from a specific sls or list of sls files on the

View File

@ -66,7 +66,8 @@ class SaltCloud(parsers.SaltCloudParser):
if self.config['verify_env']: if self.config['verify_env']:
verify_env( verify_env(
[os.path.dirname(self.config['conf_file'])], [os.path.dirname(self.config['conf_file'])],
salt_master_user salt_master_user,
root_dir=self.config['root_dir'],
) )
logfile = self.config['log_file'] logfile = self.config['log_file']
if logfile is not None and not logfile.startswith('tcp://') \ if logfile is not None and not logfile.startswith('tcp://') \

View File

@ -2400,9 +2400,10 @@ def create_attach_volumes(name, kwargs, call=None):
'-a or --action.' '-a or --action.'
) )
volumes = kwargs['volumes'] volumes = literal_eval(kwargs['volumes'])
node = kwargs['node'] node = kwargs['node']
node_data = _expand_node(node) conn = get_conn()
node_data = _expand_node(conn.ex_get_node(node))
letter = ord('a') - 1 letter = ord('a') - 1
for idx, volume in enumerate(volumes): for idx, volume in enumerate(volumes):
@ -2412,9 +2413,9 @@ def create_attach_volumes(name, kwargs, call=None):
'disk_name': volume_name, 'disk_name': volume_name,
'location': node_data['extra']['zone']['name'], 'location': node_data['extra']['zone']['name'],
'size': volume['size'], 'size': volume['size'],
'type': volume['type'], 'type': volume.get('type', 'pd-standard'),
'image': volume['image'], 'image': volume.get('image', None),
'snapshot': volume['snapshot'] 'snapshot': volume.get('snapshot', None)
} }
create_disk(volume_dict, 'function') create_disk(volume_dict, 'function')
@ -2580,7 +2581,10 @@ def create(vm_=None, call=None):
ssh_user, ssh_key = __get_ssh_credentials(vm_) ssh_user, ssh_key = __get_ssh_credentials(vm_)
vm_['ssh_host'] = __get_host(node_data, vm_) vm_['ssh_host'] = __get_host(node_data, vm_)
vm_['key_filename'] = ssh_key vm_['key_filename'] = ssh_key
__utils__['cloud.bootstrap'](vm_, __opts__)
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(node_dict)
log.info('Created Cloud VM \'{0[name]}\''.format(vm_)) log.info('Created Cloud VM \'{0[name]}\''.format(vm_))
log.trace( log.trace(
@ -2598,7 +2602,7 @@ def create(vm_=None, call=None):
transport=__opts__['transport'] transport=__opts__['transport']
) )
return node_dict return ret
def update_pricing(kwargs=None, call=None): def update_pricing(kwargs=None, call=None):

View File

@ -462,18 +462,54 @@ def create(vm_):
return ret return ret
except Exception as e: # pylint: disable=broad-except except Exception as e: # pylint: disable=broad-except
# Try to clean up in as much cases as possible do_cleanup(cleanup)
log.info('Cleaning up after exception clean up items: {0}'.format(cleanup)) # throw the root cause after cleanup
for leftover in cleanup:
what = leftover['what']
item = leftover['item']
if what == 'domain':
destroy_domain(conn, item)
if what == 'volume':
item.delete()
raise e raise e
def do_cleanup(cleanup):
'''
Clean up clone domain leftovers as much as possible.
Extra robust clean up in order to deal with some small changes in libvirt
behavior over time. Passed in volumes and domains are deleted, any errors
are ignored. Used when cloning/provisioning a domain fails.
:param cleanup: list containing dictonaries with two keys: 'what' and 'item'.
If 'what' is domain the 'item' is a libvirt domain object.
If 'what' is volume then the item is a libvirt volume object.
Returns:
none
.. versionadded: 2017.7.3
'''
log.info('Cleaning up after exception')
for leftover in cleanup:
what = leftover['what']
item = leftover['item']
if what == 'domain':
log.info('Cleaning up {0} {1}'.format(what, item.name()))
try:
item.destroy()
log.debug('{0} {1} forced off'.format(what, item.name()))
except libvirtError:
pass
try:
item.undefineFlags(libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE+
libvirt.VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA+
libvirt.VIR_DOMAIN_UNDEFINE_NVRAM)
log.debug('{0} {1} undefined'.format(what, item.name()))
except libvirtError:
pass
if what == 'volume':
try:
item.delete()
log.debug('{0} {1} cleaned up'.format(what, item.name()))
except libvirtError:
pass
def destroy(name, call=None): def destroy(name, call=None):
""" """
This function irreversibly destroys a virtual machine on the cloud provider. This function irreversibly destroys a virtual machine on the cloud provider.

View File

@ -4571,7 +4571,8 @@ def _list_nodes(full=False):
pass pass
vms[name]['id'] = vm.find('ID').text vms[name]['id'] = vm.find('ID').text
vms[name]['image'] = vm.find('TEMPLATE').find('TEMPLATE_ID').text if vm.find('TEMPLATE').find('TEMPLATE_ID'):
vms[name]['image'] = vm.find('TEMPLATE').find('TEMPLATE_ID').text
vms[name]['name'] = name vms[name]['name'] = name
vms[name]['size'] = {'cpu': cpu_size, 'memory': memory_size} vms[name]['size'] = {'cpu': cpu_size, 'memory': memory_size}
vms[name]['state'] = vm.find('STATE').text vms[name]['state'] = vm.find('STATE').text

View File

@ -48,6 +48,10 @@ log = logging.getLogger(__name__)
# The name salt will identify the lib by # The name salt will identify the lib by
__virtualname__ = 'virtualbox' __virtualname__ = 'virtualbox'
#if no clone mode is specified in the virtualbox profile
#then default to 0 which was the old default value
DEFAULT_CLONE_MODE = 0
def __virtual__(): def __virtual__():
''' '''
@ -85,6 +89,30 @@ def get_configured_provider():
return configured return configured
def map_clonemode(vm_info):
"""
Convert the virtualbox config file values for clone_mode into the integers the API requires
"""
mode_map = {
'state': 0,
'child': 1,
'all': 2
}
if not vm_info:
return DEFAULT_CLONE_MODE
if 'clonemode' not in vm_info:
return DEFAULT_CLONE_MODE
if vm_info['clonemode'] in mode_map:
return mode_map[vm_info['clonemode']]
else:
raise SaltCloudSystemExit(
"Illegal clonemode for virtualbox profile. Legal values are: {}".format(','.join(mode_map.keys()))
)
def create(vm_info): def create(vm_info):
""" """
Creates a virtual machine from the given VM information. Creates a virtual machine from the given VM information.
@ -102,6 +130,7 @@ def create(vm_info):
profile: <dict> profile: <dict>
driver: <provider>:<profile> driver: <provider>:<profile>
clonefrom: <vm_name> clonefrom: <vm_name>
clonemode: <mode> (default: state, choices: state, child, all)
} }
@type vm_info dict @type vm_info dict
@return dict of resulting vm. !!!Passwords can and should be included!!! @return dict of resulting vm. !!!Passwords can and should be included!!!
@ -133,6 +162,9 @@ def create(vm_info):
key_filename = config.get_cloud_config_value( key_filename = config.get_cloud_config_value(
'private_key', vm_info, __opts__, search_global=False, default=None 'private_key', vm_info, __opts__, search_global=False, default=None
) )
clone_mode = map_clonemode(vm_info)
wait_for_pattern = vm_info['waitforpattern'] if 'waitforpattern' in vm_info.keys() else None
interface_index = vm_info['interfaceindex'] if 'interfaceindex' in vm_info.keys() else 0
log.debug("Going to fire event: starting create") log.debug("Going to fire event: starting create")
__utils__['cloud.fire_event']( __utils__['cloud.fire_event'](
@ -147,7 +179,8 @@ def create(vm_info):
# to create the virtual machine. # to create the virtual machine.
request_kwargs = { request_kwargs = {
'name': vm_info['name'], 'name': vm_info['name'],
'clone_from': vm_info['clonefrom'] 'clone_from': vm_info['clonefrom'],
'clone_mode': clone_mode
} }
__utils__['cloud.fire_event']( __utils__['cloud.fire_event'](
@ -163,17 +196,17 @@ def create(vm_info):
# Booting and deploying if needed # Booting and deploying if needed
if power: if power:
vb_start_vm(vm_name, timeout=boot_timeout) vb_start_vm(vm_name, timeout=boot_timeout)
ips = vb_wait_for_network_address(wait_for_ip_timeout, machine_name=vm_name) ips = vb_wait_for_network_address(wait_for_ip_timeout, machine_name=vm_name, wait_for_pattern=wait_for_pattern)
if len(ips): if len(ips):
ip = ips[0] ip = ips[interface_index]
log.info("[ {0} ] IPv4 is: {1}".format(vm_name, ip)) log.info("[ {0} ] IPv4 is: {1}".format(vm_name, ip))
# ssh or smb using ip and install salt only if deploy is True # ssh or smb using ip and install salt only if deploy is True
if deploy: if deploy:
vm_info['key_filename'] = key_filename vm_info['key_filename'] = key_filename
vm_info['ssh_host'] = ip vm_info['ssh_host'] = ip
res = __utils__['cloud.bootstrap'](vm_info) res = __utils__['cloud.bootstrap'](vm_info, __opts__)
vm_result.update(res) vm_result.update(res)
__utils__['cloud.fire_event']( __utils__['cloud.fire_event'](

View File

@ -938,7 +938,7 @@ VALID_OPTS = {
'queue_dirs': list, 'queue_dirs': list,
# Instructs the minion to ping its master(s) every n number of seconds. Used # Instructs the minion to ping its master(s) every n number of minutes. Used
# primarily as a mitigation technique against minion disconnects. # primarily as a mitigation technique against minion disconnects.
'ping_interval': int, 'ping_interval': int,

View File

@ -607,6 +607,9 @@ class AsyncAuth(object):
raise tornado.gen.Return('retry') raise tornado.gen.Return('retry')
else: else:
raise SaltClientError('Attempt to authenticate with the salt master failed with timeout error') raise SaltClientError('Attempt to authenticate with the salt master failed with timeout error')
if not isinstance(payload, dict):
log.error('Sign-in attempt failed: %s', payload)
raise tornado.gen.Return(False)
if 'load' in payload: if 'load' in payload:
if 'ret' in payload['load']: if 'ret' in payload['load']:
if not payload['load']['ret']: if not payload['load']['ret']:

View File

@ -586,7 +586,18 @@ class RemoteFuncs(object):
ret = {} ret = {}
if not salt.utils.verify.valid_id(self.opts, load['id']): if not salt.utils.verify.valid_id(self.opts, load['id']):
return ret return ret
match_type = load.get('tgt_type', 'glob') expr_form = load.get('expr_form')
if expr_form is not None and 'tgt_type' not in load:
salt.utils.warn_until(
u'Neon',
u'_mine_get: minion {0} uses pre-Nitrogen API key '
u'"expr_form". Accepting for backwards compatibility '
u'but this is not guaranteed '
u'after the Neon release'.format(load['id'])
)
match_type = expr_form
else:
match_type = load.get('tgt_type', 'glob')
if match_type.lower() == 'pillar': if match_type.lower() == 'pillar':
match_type = 'pillar_exact' match_type = 'pillar_exact'
if match_type.lower() == 'compound': if match_type.lower() == 'compound':

View File

@ -10,6 +10,7 @@ import socket
import ctypes import ctypes
import os import os
import ipaddress import ipaddress
import salt.ext.six as six
class sockaddr(ctypes.Structure): class sockaddr(ctypes.Structure):
@ -36,7 +37,7 @@ def inet_pton(address_family, ip_string):
# This will catch IP Addresses such as 10.1.2 # This will catch IP Addresses such as 10.1.2
if address_family == socket.AF_INET: if address_family == socket.AF_INET:
try: try:
ipaddress.ip_address(ip_string.decode()) ipaddress.ip_address(six.u(ip_string))
except ValueError: except ValueError:
raise socket.error('illegal IP address string passed to inet_pton') raise socket.error('illegal IP address string passed to inet_pton')
return socket.inet_aton(ip_string) return socket.inet_aton(ip_string)

View File

@ -622,12 +622,15 @@ class Client(object):
def on_header(hdr): def on_header(hdr):
if write_body[1] is not False and write_body[2] is None: if write_body[1] is not False and write_body[2] is None:
if not hdr.strip() and 'Content-Type' not in write_body[1]: if not hdr.strip() and 'Content-Type' not in write_body[1]:
# We've reached the end of the headers and not yet # If write_body[0] is True, then we are not following a
# found the Content-Type. Reset write_body[0] so that # redirect (initial response was a 200 OK). So there is
# we properly follow the redirect. Note that slicing is # no need to reset write_body[0].
# used below to ensure that we re-use the same list if write_body[0] is not True:
# rather than creating a new one. # We are following a redirect, so we need to reset
write_body[0:2] = (None, False) # write_body[0] so that we properly follow it.
write_body[0] = None
# We don't need the HTTPHeaders object anymore
write_body[1] = False
return return
# Try to find out what content type encoding is used if # Try to find out what content type encoding is used if
# this is a text file # this is a text file

View File

@ -716,12 +716,14 @@ def _virtual(osdata):
pass pass
if os.path.isfile('/proc/1/cgroup'): if os.path.isfile('/proc/1/cgroup'):
try: try:
with salt.utils.fopen('/proc/1/cgroup', 'r') as fhr:
if ':/lxc/' in fhr.read():
grains['virtual_subtype'] = 'LXC'
with salt.utils.fopen('/proc/1/cgroup', 'r') as fhr: with salt.utils.fopen('/proc/1/cgroup', 'r') as fhr:
fhr_contents = fhr.read() fhr_contents = fhr.read()
if ':/docker/' in fhr_contents or ':/system.slice/docker' in fhr_contents: if ':/lxc/' in fhr_contents:
grains['virtual_subtype'] = 'LXC'
else:
if any(x in fhr_contents
for x in (':/system.slice/docker', ':/docker/',
':/docker-ce/')):
grains['virtual_subtype'] = 'Docker' grains['virtual_subtype'] = 'Docker'
except IOError: except IOError:
pass pass
@ -1367,7 +1369,10 @@ def os_data():
.format(' '.join(init_cmdline)) .format(' '.join(init_cmdline))
) )
# Add lsb grains on any distro with lsb-release # Add lsb grains on any distro with lsb-release. Note that this import
# can fail on systems with lsb-release installed if the system package
# does not install the python package for the python interpreter used by
# Salt (i.e. python2 or python3)
try: try:
import lsb_release # pylint: disable=import-error import lsb_release # pylint: disable=import-error
release = lsb_release.get_distro_information() release = lsb_release.get_distro_information()
@ -1416,7 +1421,13 @@ def os_data():
if 'VERSION_ID' in os_release: if 'VERSION_ID' in os_release:
grains['lsb_distrib_release'] = os_release['VERSION_ID'] grains['lsb_distrib_release'] = os_release['VERSION_ID']
if 'PRETTY_NAME' in os_release: if 'PRETTY_NAME' in os_release:
grains['lsb_distrib_codename'] = os_release['PRETTY_NAME'] codename = os_release['PRETTY_NAME']
# https://github.com/saltstack/salt/issues/44108
if os_release['ID'] == 'debian':
codename_match = re.search(r'\((\w+)\)$', codename)
if codename_match:
codename = codename_match.group(1)
grains['lsb_distrib_codename'] = codename
if 'CPE_NAME' in os_release: if 'CPE_NAME' in os_release:
if ":suse:" in os_release['CPE_NAME'] or ":opensuse:" in os_release['CPE_NAME']: if ":suse:" in os_release['CPE_NAME'] or ":opensuse:" in os_release['CPE_NAME']:
grains['os'] = "SUSE" grains['os'] = "SUSE"

View File

@ -12,6 +12,7 @@ import logging
# Import salt libs # Import salt libs
import salt.utils import salt.utils
__proxyenabled__ = ['*']
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@ -31,16 +32,33 @@ def config():
if 'conf_file' not in __opts__: if 'conf_file' not in __opts__:
return {} return {}
if os.path.isdir(__opts__['conf_file']): if os.path.isdir(__opts__['conf_file']):
gfn = os.path.join( if salt.utils.is_proxy():
__opts__['conf_file'], gfn = os.path.join(
'grains' __opts__['conf_file'],
) 'proxy.d',
__opts__['id'],
'grains'
)
else:
gfn = os.path.join(
__opts__['conf_file'],
'grains'
)
else: else:
gfn = os.path.join( if salt.utils.is_proxy():
os.path.dirname(__opts__['conf_file']), gfn = os.path.join(
'grains' os.path.dirname(__opts__['conf_file']),
) 'proxy.d',
__opts__['id'],
'grains'
)
else:
gfn = os.path.join(
os.path.dirname(__opts__['conf_file']),
'grains'
)
if os.path.isfile(gfn): if os.path.isfile(gfn):
log.debug('Loading static grains from %s', gfn)
with salt.utils.fopen(gfn, 'rb') as fp_: with salt.utils.fopen(gfn, 'rb') as fp_:
try: try:
return yaml.safe_load(fp_.read()) return yaml.safe_load(fp_.read())

View File

@ -128,12 +128,12 @@ def setup_handlers():
callable(transport_registry.compute_scope)): callable(transport_registry.compute_scope)):
conf_extras = transport_registry.compute_scope(url, dsn_config) conf_extras = transport_registry.compute_scope(url, dsn_config)
dsn_config.update(conf_extras) dsn_config.update(conf_extras)
options.update({ options.update({
'project': dsn_config['SENTRY_PROJECT'], 'project': dsn_config['SENTRY_PROJECT'],
'servers': dsn_config['SENTRY_SERVERS'], 'servers': dsn_config['SENTRY_SERVERS'],
'public_key': dsn_config['SENTRY_PUBLIC_KEY'], 'public_key': dsn_config['SENTRY_PUBLIC_KEY'],
'secret_key': dsn_config['SENTRY_SECRET_KEY'] 'secret_key': dsn_config['SENTRY_SECRET_KEY']
}) })
except ValueError as exc: except ValueError as exc:
log.info( log.info(
'Raven failed to parse the configuration provided ' 'Raven failed to parse the configuration provided '

View File

@ -862,6 +862,10 @@ class MinionManager(MinionBase):
failed = False failed = False
while True: while True:
try: try:
if minion.opts.get('beacons_before_connect', False):
minion.setup_beacons(before_connect=True)
if minion.opts.get('scheduler_before_connect', False):
minion.setup_scheduler(before_connect=True)
yield minion.connect_master(failed=failed) yield minion.connect_master(failed=failed)
minion.tune_in(start=False) minion.tune_in(start=False)
break break
@ -935,7 +939,8 @@ class Minion(MinionBase):
# Flag meaning minion has finished initialization including first connect to the master. # Flag meaning minion has finished initialization including first connect to the master.
# True means the Minion is fully functional and ready to handle events. # True means the Minion is fully functional and ready to handle events.
self.ready = False self.ready = False
self.jid_queue = jid_queue self.jid_queue = jid_queue or []
self.periodic_callbacks = {}
if io_loop is None: if io_loop is None:
if HAS_ZMQ: if HAS_ZMQ:
@ -967,6 +972,19 @@ class Minion(MinionBase):
# post_master_init # post_master_init
if not salt.utils.is_proxy(): if not salt.utils.is_proxy():
self.opts['grains'] = salt.loader.grains(opts) self.opts['grains'] = salt.loader.grains(opts)
else:
if self.opts.get('beacons_before_connect', False):
log.warning(
'\'beacons_before_connect\' is not supported '
'for proxy minions. Setting to False'
)
self.opts['beacons_before_connect'] = False
if self.opts.get('scheduler_before_connect', False):
log.warning(
'\'scheduler_before_connect\' is not supported '
'for proxy minions. Setting to False'
)
self.opts['scheduler_before_connect'] = False
log.info('Creating minion process manager') log.info('Creating minion process manager')
@ -1070,19 +1088,22 @@ class Minion(MinionBase):
pillarenv=self.opts.get('pillarenv') pillarenv=self.opts.get('pillarenv')
).compile_pillar() ).compile_pillar()
self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if not self.ready:
self.serial = salt.payload.Serial(self.opts) self._setup_core()
self.mod_opts = self._prep_mod_opts() elif self.connected and self.opts['pillar']:
self.matcher = Matcher(self.opts, self.functions) # The pillar has changed due to the connection to the master.
self.beacons = salt.beacons.Beacon(self.opts, self.functions) # Reload the functions so that they can use the new pillar data.
uid = salt.utils.get_uid(user=self.opts.get('user', None)) self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) if hasattr(self, 'schedule'):
self.schedule.functions = self.functions
self.schedule.returners = self.returners
self.schedule = salt.utils.schedule.Schedule( if not hasattr(self, 'schedule'):
self.opts, self.schedule = salt.utils.schedule.Schedule(
self.functions, self.opts,
self.returners, self.functions,
cleanup=[master_event(type='alive')]) self.returners,
cleanup=[master_event(type='alive')])
# add default scheduling jobs to the minions scheduler # add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions: if self.opts['mine_enabled'] and 'mine.update' in self.functions:
@ -1136,9 +1157,6 @@ class Minion(MinionBase):
self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True)
self.schedule.delete_job(master_event(type='failback'), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True)
self.grains_cache = self.opts['grains']
self.ready = True
def _return_retry_timer(self): def _return_retry_timer(self):
''' '''
Based on the minion configuration, either return a randomized timer or Based on the minion configuration, either return a randomized timer or
@ -1896,6 +1914,8 @@ class Minion(MinionBase):
func = data.get('func', None) func = data.get('func', None)
name = data.get('name', None) name = data.get('name', None)
beacon_data = data.get('beacon_data', None) beacon_data = data.get('beacon_data', None)
include_pillar = data.get(u'include_pillar', None)
include_opts = data.get(u'include_opts', None)
if func == 'add': if func == 'add':
self.beacons.add_beacon(name, beacon_data) self.beacons.add_beacon(name, beacon_data)
@ -1912,7 +1932,9 @@ class Minion(MinionBase):
elif func == 'disable_beacon': elif func == 'disable_beacon':
self.beacons.disable_beacon(name) self.beacons.disable_beacon(name)
elif func == 'list': elif func == 'list':
self.beacons.list_beacons() self.beacons.list_beacons(include_opts, include_pillar)
elif func == u'list_available':
self.beacons.list_available_beacons()
def environ_setenv(self, tag, data): def environ_setenv(self, tag, data):
''' '''
@ -2176,6 +2198,118 @@ class Minion(MinionBase):
except (ValueError, NameError): except (ValueError, NameError):
pass pass
def _setup_core(self):
'''
Set up the core minion attributes.
This is safe to call multiple times.
'''
if not self.ready:
# First call. Initialize.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
uid = salt.utils.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
self.grains_cache = self.opts['grains']
self.ready = True
def setup_beacons(self, before_connect=False):
'''
Set up the beacons.
This is safe to call multiple times.
'''
self._setup_core()
loop_interval = self.opts['loop_interval']
new_periodic_callbacks = {}
if 'beacons' not in self.periodic_callbacks:
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
def handle_beacons():
# Process Beacons
beacons = None
try:
beacons = self.process_beacons(self.functions)
except Exception:
log.critical('The beacon errored: ', exc_info=True)
if beacons and self.connected:
self._fire_master(events=beacons)
new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop)
if before_connect:
# Make sure there is a chance for one iteration to occur before connect
handle_beacons()
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
periodic_cb.start()
self.periodic_callbacks.update(new_periodic_callbacks)
def setup_scheduler(self, before_connect=False):
'''
Set up the scheduler.
This is safe to call multiple times.
'''
self._setup_core()
loop_interval = self.opts['loop_interval']
new_periodic_callbacks = {}
if 'schedule' not in self.periodic_callbacks:
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[master_event(type='alive')])
try:
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
if self.opts['grains_refresh_every'] > 1:
log.debug(
'Enabling the grains refresher. Will run every {0} minutes.'.format(
self.opts['grains_refresh_every'])
)
else: # Clean up minute vs. minutes in log message
log.debug(
'Enabling the grains refresher. Will run every {0} minute.'.format(
self.opts['grains_refresh_every'])
)
self._refresh_grains_watcher(
abs(self.opts['grains_refresh_every'])
)
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
exc)
)
# TODO: actually listen to the return and change period
def handle_schedule():
self.process_schedule(self, loop_interval)
new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000, io_loop=self.io_loop)
if before_connect:
# Make sure there is a chance for one iteration to occur before connect
handle_schedule()
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
periodic_cb.start()
self.periodic_callbacks.update(new_periodic_callbacks)
# Main Minion Tune In # Main Minion Tune In
def tune_in(self, start=True): def tune_in(self, start=True):
''' '''
@ -2187,6 +2321,10 @@ class Minion(MinionBase):
log.debug('Minion \'{0}\' trying to tune in'.format(self.opts['id'])) log.debug('Minion \'{0}\' trying to tune in'.format(self.opts['id']))
if start: if start:
if self.opts.get('beacons_before_connect', False):
self.setup_beacons(before_connect=True)
if self.opts.get('scheduler_before_connect', False):
self.setup_scheduler(before_connect=True)
self.sync_connect_master() self.sync_connect_master()
if self.connected: if self.connected:
self._fire_master_minion_start() self._fire_master_minion_start()
@ -2201,31 +2339,9 @@ class Minion(MinionBase):
# On first startup execute a state run if configured to do so # On first startup execute a state run if configured to do so
self._state_run() self._state_run()
loop_interval = self.opts['loop_interval'] self.setup_beacons()
self.setup_scheduler()
try:
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
if self.opts['grains_refresh_every'] > 1:
log.debug(
'Enabling the grains refresher. Will run every {0} minutes.'.format(
self.opts['grains_refresh_every'])
)
else: # Clean up minute vs. minutes in log message
log.debug(
'Enabling the grains refresher. Will run every {0} minute.'.format(
self.opts['grains_refresh_every'])
)
self._refresh_grains_watcher(
abs(self.opts['grains_refresh_every'])
)
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
exc)
)
self.periodic_callbacks = {}
# schedule the stuff that runs every interval # schedule the stuff that runs every interval
ping_interval = self.opts.get('ping_interval', 0) * 60 ping_interval = self.opts.get('ping_interval', 0) * 60
if ping_interval > 0 and self.connected: if ping_interval > 0 and self.connected:
@ -2243,30 +2359,7 @@ class Minion(MinionBase):
except Exception: except Exception:
log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG)
self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000, io_loop=self.io_loop) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000, io_loop=self.io_loop)
self.periodic_callbacks['ping'].start()
self.periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
def handle_beacons():
# Process Beacons
beacons = None
try:
beacons = self.process_beacons(self.functions)
except Exception:
log.critical('The beacon errored: ', exc_info=True)
if beacons and self.connected:
self._fire_master(events=beacons, sync=False)
self.periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop)
# TODO: actually listen to the return and change period
def handle_schedule():
self.process_schedule(self, loop_interval)
if hasattr(self, 'schedule'):
self.periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000, io_loop=self.io_loop)
# start all the other callbacks
for periodic_cb in six.itervalues(self.periodic_callbacks):
periodic_cb.start()
# add handler to subscriber # add handler to subscriber
if hasattr(self, 'pub_channel') and self.pub_channel is not None: if hasattr(self, 'pub_channel') and self.pub_channel is not None:

View File

@ -125,7 +125,7 @@ def cert(name,
salt 'gitlab.example.com' acme.cert dev.example.com "[gitlab.example.com]" test_cert=True renew=14 webroot=/opt/gitlab/embedded/service/gitlab-rails/public salt 'gitlab.example.com' acme.cert dev.example.com "[gitlab.example.com]" test_cert=True renew=14 webroot=/opt/gitlab/embedded/service/gitlab-rails/public
''' '''
cmd = [LEA, 'certonly', '--quiet'] cmd = [LEA, 'certonly', '--non-interactive']
cert_file = _cert_file(name, 'cert') cert_file = _cert_file(name, 'cert')
if not __salt__['file.file_exists'](cert_file): if not __salt__['file.file_exists'](cert_file):

View File

@ -29,7 +29,6 @@ import json
import yaml import yaml
# pylint: disable=no-name-in-module,import-error,redefined-builtin # pylint: disable=no-name-in-module,import-error,redefined-builtin
import salt.ext.six as six import salt.ext.six as six
from salt.ext.six.moves import range
from salt.ext.six.moves.urllib.error import HTTPError from salt.ext.six.moves.urllib.error import HTTPError
from salt.ext.six.moves.urllib.request import Request as _Request, urlopen as _urlopen from salt.ext.six.moves.urllib.request import Request as _Request, urlopen as _urlopen
# pylint: enable=no-name-in-module,import-error,redefined-builtin # pylint: enable=no-name-in-module,import-error,redefined-builtin
@ -1558,7 +1557,7 @@ def _consolidate_repo_sources(sources):
combined_comps = set(repo.comps).union(set(combined.comps)) combined_comps = set(repo.comps).union(set(combined.comps))
consolidated[key].comps = list(combined_comps) consolidated[key].comps = list(combined_comps)
else: else:
consolidated[key] = sourceslist.SourceEntry(_strip_uri(repo.line)) consolidated[key] = sourceslist.SourceEntry(salt.utils.pkg.deb.strip_uri(repo.line))
if repo.file != base_file: if repo.file != base_file:
delete_files.add(repo.file) delete_files.add(repo.file)
@ -1666,7 +1665,7 @@ def list_repos():
repo['dist'] = source.dist repo['dist'] = source.dist
repo['type'] = source.type repo['type'] = source.type
repo['uri'] = source.uri.rstrip('/') repo['uri'] = source.uri.rstrip('/')
repo['line'] = _strip_uri(source.line.strip()) repo['line'] = salt.utils.pkg.deb.strip_uri(source.line.strip())
repo['architectures'] = getattr(source, 'architectures', []) repo['architectures'] = getattr(source, 'architectures', [])
repos.setdefault(source.uri, []).append(repo) repos.setdefault(source.uri, []).append(repo)
return repos return repos
@ -2412,18 +2411,6 @@ def file_dict(*packages):
return __salt__['lowpkg.file_dict'](*packages) return __salt__['lowpkg.file_dict'](*packages)
def _strip_uri(repo):
'''
Remove the trailing slash from the URI in a repo definition
'''
splits = repo.split()
for idx in range(len(splits)):
if any(splits[idx].startswith(x)
for x in ('http://', 'https://', 'ftp://')):
splits[idx] = splits[idx].rstrip('/')
return ' '.join(splits)
def expand_repo_def(**kwargs): def expand_repo_def(**kwargs):
''' '''
Take a repository definition and expand it to the full pkg repository dict Take a repository definition and expand it to the full pkg repository dict
@ -2439,7 +2426,7 @@ def expand_repo_def(**kwargs):
_check_apt() _check_apt()
sanitized = {} sanitized = {}
repo = _strip_uri(kwargs['repo']) repo = salt.utils.pkg.deb.strip_uri(kwargs['repo'])
if repo.startswith('ppa:') and __grains__['os'] in ('Ubuntu', 'Mint', 'neon'): if repo.startswith('ppa:') and __grains__['os'] in ('Ubuntu', 'Mint', 'neon'):
dist = __grains__['lsb_distrib_codename'] dist = __grains__['lsb_distrib_codename']
owner_name, ppa_name = repo[4:].split('/', 1) owner_name, ppa_name = repo[4:].split('/', 1)

View File

@ -498,16 +498,16 @@ def tar(options, tarfile, sources=None, dest=None,
.. code-block:: bash .. code-block:: bash
salt '*' archive.tar -cjvf /tmp/salt.tar.bz2 {{grains.saltpath}} template=jinja salt '*' archive.tar cjvf /tmp/salt.tar.bz2 {{grains.saltpath}} template=jinja
CLI Examples: CLI Examples:
.. code-block:: bash .. code-block:: bash
# Create a tarfile # Create a tarfile
salt '*' archive.tar -cjvf /tmp/tarfile.tar.bz2 /tmp/file_1,/tmp/file_2 salt '*' archive.tar cjvf /tmp/tarfile.tar.bz2 /tmp/file_1,/tmp/file_2
# Create a tarfile using globbing (2017.7.0 and later) # Create a tarfile using globbing (2017.7.0 and later)
salt '*' archive.tar -cjvf /tmp/tarfile.tar.bz2 '/tmp/file_*' salt '*' archive.tar cjvf /tmp/tarfile.tar.bz2 '/tmp/file_*'
# Unpack a tarfile # Unpack a tarfile
salt '*' archive.tar xf foo.tar dest=/target/directory salt '*' archive.tar xf foo.tar dest=/target/directory
''' '''

View File

@ -169,6 +169,9 @@ def atrm(*args):
if not args: if not args:
return {'jobs': {'removed': [], 'tag': None}} return {'jobs': {'removed': [], 'tag': None}}
# Convert all to strings
args = [str(arg) for arg in args]
if args[0] == 'all': if args[0] == 'all':
if len(args) > 1: if len(args) > 1:
opts = list(list(map(str, [j['job'] for j in atq(args[1])['jobs']]))) opts = list(list(map(str, [j['job'] for j in atq(args[1])['jobs']])))
@ -178,7 +181,7 @@ def atrm(*args):
ret = {'jobs': {'removed': opts, 'tag': None}} ret = {'jobs': {'removed': opts, 'tag': None}}
else: else:
opts = list(list(map(str, [i['job'] for i in atq()['jobs'] opts = list(list(map(str, [i['job'] for i in atq()['jobs']
if i['job'] in args]))) if str(i['job']) in args])))
ret = {'jobs': {'removed': opts, 'tag': None}} ret = {'jobs': {'removed': opts, 'tag': None}}
# Shim to produce output similar to what __virtual__() should do # Shim to produce output similar to what __virtual__() should do

View File

@ -27,12 +27,22 @@ __func_alias__ = {
} }
def list_(return_yaml=True): def list_(return_yaml=True,
include_pillar=True,
include_opts=True):
''' '''
List the beacons currently configured on the minion List the beacons currently configured on the minion
:param return_yaml: Whether to return YAML formatted output, default True :param return_yaml: Whether to return YAML formatted output,
:return: List of currently configured Beacons. default True
:param include_pillar: Whether to include beacons that are
configured in pillar, default is True.
:param include_opts: Whether to include beacons that are
configured in opts, default is True.
:return: List of currently configured Beacons.
CLI Example: CLI Example:
@ -45,7 +55,10 @@ def list_(return_yaml=True):
try: try:
eventer = salt.utils.event.get_event('minion', opts=__opts__) eventer = salt.utils.event.get_event('minion', opts=__opts__)
res = __salt__['event.fire']({'func': 'list'}, 'manage_beacons') res = __salt__['event.fire']({'func': 'list',
'include_pillar': include_pillar,
'include_opts': include_opts},
'manage_beacons')
if res: if res:
event_ret = eventer.get_event(tag='/salt/minion/minion_beacons_list_complete', wait=30) event_ret = eventer.get_event(tag='/salt/minion/minion_beacons_list_complete', wait=30)
log.debug('event_ret {0}'.format(event_ret)) log.debug('event_ret {0}'.format(event_ret))
@ -69,6 +82,47 @@ def list_(return_yaml=True):
return {'beacons': {}} return {'beacons': {}}
def list_available(return_yaml=True):
'''
List the beacons currently available on the minion
:param return_yaml: Whether to return YAML formatted output, default True
:return: List of currently configured Beacons.
CLI Example:
.. code-block:: bash
salt '*' beacons.list_available
'''
beacons = None
try:
eventer = salt.utils.event.get_event('minion', opts=__opts__)
res = __salt__['event.fire']({'func': 'list_available'}, 'manage_beacons')
if res:
event_ret = eventer.get_event(tag='/salt/minion/minion_beacons_list_available_complete', wait=30)
if event_ret and event_ret['complete']:
beacons = event_ret['beacons']
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret = {}
ret['result'] = False
ret['comment'] = 'Event module not available. Beacon add failed.'
return ret
if beacons:
if return_yaml:
tmp = {'beacons': beacons}
yaml_out = yaml.safe_dump(tmp, default_flow_style=False)
return yaml_out
else:
return beacons
else:
return {'beacons': {}}
def add(name, beacon_data, **kwargs): def add(name, beacon_data, **kwargs):
''' '''
Add a beacon on the minion Add a beacon on the minion
@ -91,6 +145,10 @@ def add(name, beacon_data, **kwargs):
ret['comment'] = 'Beacon {0} is already configured.'.format(name) ret['comment'] = 'Beacon {0} is already configured.'.format(name)
return ret return ret
if name not in list_available(return_yaml=False):
ret['comment'] = 'Beacon "{0}" is not available.'.format(name)
return ret
if 'test' in kwargs and kwargs['test']: if 'test' in kwargs and kwargs['test']:
ret['result'] = True ret['result'] = True
ret['comment'] = 'Beacon: {0} would be added.'.format(name) ret['comment'] = 'Beacon: {0} would be added.'.format(name)
@ -130,7 +188,10 @@ def add(name, beacon_data, **kwargs):
if name in beacons and beacons[name] == beacon_data: if name in beacons and beacons[name] == beacon_data:
ret['result'] = True ret['result'] = True
ret['comment'] = 'Added beacon: {0}.'.format(name) ret['comment'] = 'Added beacon: {0}.'.format(name)
return ret else:
ret['result'] = False
ret['comment'] = event_ret['comment']
return ret
except KeyError: except KeyError:
# Effectively a no-op, since we can't really return without an event system # Effectively a no-op, since we can't really return without an event system
ret['comment'] = 'Event module not available. Beacon add failed.' ret['comment'] = 'Event module not available. Beacon add failed.'
@ -215,7 +276,10 @@ def modify(name, beacon_data, **kwargs):
if name in beacons and beacons[name] == beacon_data: if name in beacons and beacons[name] == beacon_data:
ret['result'] = True ret['result'] = True
ret['comment'] = 'Modified beacon: {0}.'.format(name) ret['comment'] = 'Modified beacon: {0}.'.format(name)
return ret else:
ret['result'] = False
ret['comment'] = event_ret['comment']
return ret
except KeyError: except KeyError:
# Effectively a no-op, since we can't really return without an event system # Effectively a no-op, since we can't really return without an event system
ret['comment'] = 'Event module not available. Beacon add failed.' ret['comment'] = 'Event module not available. Beacon add failed.'
@ -257,6 +321,9 @@ def delete(name, **kwargs):
ret['result'] = True ret['result'] = True
ret['comment'] = 'Deleted beacon: {0}.'.format(name) ret['comment'] = 'Deleted beacon: {0}.'.format(name)
return ret return ret
else:
ret['result'] = False
ret['comment'] = event_ret['comment']
except KeyError: except KeyError:
# Effectively a no-op, since we can't really return without an event system # Effectively a no-op, since we can't really return without an event system
ret['comment'] = 'Event module not available. Beacon add failed.' ret['comment'] = 'Event module not available. Beacon add failed.'
@ -279,7 +346,7 @@ def save():
ret = {'comment': [], ret = {'comment': [],
'result': True} 'result': True}
beacons = list_(return_yaml=False) beacons = list_(return_yaml=False, include_pillar=False)
# move this file into an configurable opt # move this file into an configurable opt
sfn = '{0}/{1}/beacons.conf'.format(__opts__['config_dir'], sfn = '{0}/{1}/beacons.conf'.format(__opts__['config_dir'],
@ -332,7 +399,7 @@ def enable(**kwargs):
else: else:
ret['result'] = False ret['result'] = False
ret['comment'] = 'Failed to enable beacons on minion.' ret['comment'] = 'Failed to enable beacons on minion.'
return ret return ret
except KeyError: except KeyError:
# Effectively a no-op, since we can't really return without an event system # Effectively a no-op, since we can't really return without an event system
ret['comment'] = 'Event module not available. Beacons enable job failed.' ret['comment'] = 'Event module not available. Beacons enable job failed.'
@ -372,7 +439,7 @@ def disable(**kwargs):
else: else:
ret['result'] = False ret['result'] = False
ret['comment'] = 'Failed to disable beacons on minion.' ret['comment'] = 'Failed to disable beacons on minion.'
return ret return ret
except KeyError: except KeyError:
# Effectively a no-op, since we can't really return without an event system # Effectively a no-op, since we can't really return without an event system
ret['comment'] = 'Event module not available. Beacons enable job failed.' ret['comment'] = 'Event module not available. Beacons enable job failed.'
@ -435,7 +502,10 @@ def enable_beacon(name, **kwargs):
else: else:
ret['result'] = False ret['result'] = False
ret['comment'] = 'Failed to enable beacon {0} on minion.'.format(name) ret['comment'] = 'Failed to enable beacon {0} on minion.'.format(name)
return ret else:
ret['result'] = False
ret['comment'] = event_ret['comment']
return ret
except KeyError: except KeyError:
# Effectively a no-op, since we can't really return without an event system # Effectively a no-op, since we can't really return without an event system
ret['comment'] = 'Event module not available. Beacon enable job failed.' ret['comment'] = 'Event module not available. Beacon enable job failed.'
@ -488,7 +558,10 @@ def disable_beacon(name, **kwargs):
else: else:
ret['result'] = False ret['result'] = False
ret['comment'] = 'Failed to disable beacon on minion.' ret['comment'] = 'Failed to disable beacon on minion.'
return ret else:
ret['result'] = False
ret['comment'] = event_ret['comment']
return ret
except KeyError: except KeyError:
# Effectively a no-op, since we can't really return without an event system # Effectively a no-op, since we can't really return without an event system
ret['comment'] = 'Event module not available. Beacon disable job failed.' ret['comment'] = 'Event module not available. Beacon disable job failed.'

View File

@ -51,6 +51,7 @@ import datetime
import logging import logging
import json import json
import sys import sys
import time
import email.mime.multipart import email.mime.multipart
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@ -675,11 +676,23 @@ def get_scaling_policy_arn(as_group, scaling_policy_name, region=None,
salt '*' boto_asg.get_scaling_policy_arn mygroup mypolicy salt '*' boto_asg.get_scaling_policy_arn mygroup mypolicy
''' '''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
policies = conn.get_all_policies(as_group=as_group) retries = 30
for policy in policies: while retries > 0:
if policy.name == scaling_policy_name: retries -= 1
return policy.policy_arn try:
log.error('Could not convert: {0}'.format(as_group)) policies = conn.get_all_policies(as_group=as_group)
for policy in policies:
if policy.name == scaling_policy_name:
return policy.policy_arn
log.error('Could not convert: {0}'.format(as_group))
return None
except boto.exception.BotoServerError as e:
if e.error_code != 'Throttling':
raise
log.debug('Throttled by API, will retry in 5 seconds')
time.sleep(5)
log.error('Maximum number of retries exceeded')
return None return None
@ -761,11 +774,18 @@ def get_instances(name, lifecycle_state="InService", health_status="Healthy",
# get full instance info, so that we can return the attribute # get full instance info, so that we can return the attribute
instances = ec2_conn.get_only_instances(instance_ids=instance_ids) instances = ec2_conn.get_only_instances(instance_ids=instance_ids)
if attributes: if attributes:
return [[getattr(instance, attr).encode("ascii") for attr in attributes] for instance in instances] return [[_convert_attribute(instance, attr) for attr in attributes] for instance in instances]
else: else:
# properly handle case when not all instances have the requested attribute # properly handle case when not all instances have the requested attribute
return [getattr(instance, attribute).encode("ascii") for instance in instances if getattr(instance, attribute)] return [_convert_attribute(instance, attribute) for instance in instances if getattr(instance, attribute)]
return [getattr(instance, attribute).encode("ascii") for instance in instances]
def _convert_attribute(instance, attribute):
if attribute == "tags":
tags = dict(getattr(instance, attribute))
return {key.encode("utf-8"): value.encode("utf-8") for key, value in six.iteritems(tags)}
return getattr(instance, attribute).encode("ascii")
def enter_standby(name, instance_ids, should_decrement_desired_capacity=False, def enter_standby(name, instance_ids, should_decrement_desired_capacity=False,

View File

@ -493,10 +493,17 @@ def update_parameter_group(name, parameters, apply_method="pending-reboot",
param_list = [] param_list = []
for key, value in six.iteritems(parameters): for key, value in six.iteritems(parameters):
item = (key, value, apply_method) item = odict.OrderedDict()
item.update({'ParameterName': key})
item.update({'ApplyMethod': apply_method})
if type(value) is bool:
item.update({'ParameterValue': 'on' if value else 'off'})
else:
item.update({'ParameterValue': str(value)})
param_list.append(item) param_list.append(item)
if not len(param_list):
return {'results': False} if not len(param_list):
return {'results': False}
try: try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
@ -779,6 +786,7 @@ def describe_parameters(name, Source=None, MaxRecords=None, Marker=None,
'message': 'Could not establish a connection to RDS'} 'message': 'Could not establish a connection to RDS'}
kwargs = {} kwargs = {}
kwargs.update({'DBParameterGroupName': name})
for key in ('Marker', 'Source'): for key in ('Marker', 'Source'):
if locals()[key] is not None: if locals()[key] is not None:
kwargs[key] = str(locals()[key]) kwargs[key] = str(locals()[key])
@ -786,26 +794,23 @@ def describe_parameters(name, Source=None, MaxRecords=None, Marker=None,
if locals()['MaxRecords'] is not None: if locals()['MaxRecords'] is not None:
kwargs['MaxRecords'] = int(locals()['MaxRecords']) kwargs['MaxRecords'] = int(locals()['MaxRecords'])
r = conn.describe_db_parameters(DBParameterGroupName=name, **kwargs) pag = conn.get_paginator('describe_db_parameters')
pit = pag.paginate(**kwargs)
if not r:
return {'result': False,
'message': 'Failed to get RDS parameters for group {0}.'
.format(name)}
results = r['Parameters']
keys = ['ParameterName', 'ParameterValue', 'Description', keys = ['ParameterName', 'ParameterValue', 'Description',
'Source', 'ApplyType', 'DataType', 'AllowedValues', 'Source', 'ApplyType', 'DataType', 'AllowedValues',
'IsModifieable', 'MinimumEngineVersion', 'ApplyMethod'] 'IsModifieable', 'MinimumEngineVersion', 'ApplyMethod']
parameters = odict.OrderedDict() parameters = odict.OrderedDict()
ret = {'result': True} ret = {'result': True}
for result in results:
data = odict.OrderedDict()
for k in keys:
data[k] = result.get(k)
parameters[result.get('ParameterName')] = data for p in pit:
for result in p['Parameters']:
data = odict.OrderedDict()
for k in keys:
data[k] = result.get(k)
parameters[result.get('ParameterName')] = data
ret['parameters'] = parameters ret['parameters'] = parameters
return ret return ret

View File

@ -3127,6 +3127,12 @@ def run_bg(cmd,
Note that ``env`` represents the environment variables for the command, and Note that ``env`` represents the environment variables for the command, and
should be formatted as a dict, or a YAML string which resolves to a dict. should be formatted as a dict, or a YAML string which resolves to a dict.
.. note::
If the init system is systemd and the backgrounded task should run even if the salt-minion process
is restarted, prepend ``systemd-run --scope`` to the command. This will reparent the process in its
own scope separate from salt-minion, and will not be affected by restarting the minion service.
:param str cmd: The command to run. ex: 'ls -lart /home' :param str cmd: The command to run. ex: 'ls -lart /home'
:param str cwd: The current working directory to execute the command in. :param str cwd: The current working directory to execute the command in.

View File

@ -147,8 +147,24 @@ def _render_tab(lst):
cron['cmd'] cron['cmd']
) )
) )
for spec in lst['special']: for cron in lst['special']:
ret.append('{0} {1}\n'.format(spec['spec'], spec['cmd'])) if cron['comment'] is not None or cron['identifier'] is not None:
comment = '#'
if cron['comment']:
comment += ' {0}'.format(
cron['comment'].rstrip().replace('\n', '\n# '))
if cron['identifier']:
comment += ' {0}:{1}'.format(SALT_CRON_IDENTIFIER,
cron['identifier'])
comment += '\n'
ret.append(comment)
ret.append('{0}{1} {2}\n'.format(
cron['commented'] is True and '#DISABLED#' or '',
cron['spec'],
cron['cmd']
)
)
return ret return ret
@ -317,7 +333,15 @@ def list_tab(user):
continue continue
dat['spec'] = comps[0] dat['spec'] = comps[0]
dat['cmd'] = ' '.join(comps[1:]) dat['cmd'] = ' '.join(comps[1:])
dat['identifier'] = identifier
dat['comment'] = comment
dat['commented'] = False
if commented_cron_job:
dat['commented'] = True
ret['special'].append(dat) ret['special'].append(dat)
identifier = None
comment = None
commented_cron_job = False
elif line.startswith('#'): elif line.startswith('#'):
# It's a comment! Catch it! # It's a comment! Catch it!
comment_line = line.lstrip('# ') comment_line = line.lstrip('# ')
@ -363,11 +387,17 @@ def list_tab(user):
ret['pre'].append(line) ret['pre'].append(line)
return ret return ret
# For consistency's sake # For consistency's sake
ls = salt.utils.alias_function(list_tab, 'ls') ls = salt.utils.alias_function(list_tab, 'ls')
def set_special(user, special, cmd): def set_special(user,
special,
cmd,
commented=False,
comment=None,
identifier=None):
''' '''
Set up a special command in the crontab. Set up a special command in the crontab.
@ -379,11 +409,60 @@ def set_special(user, special, cmd):
''' '''
lst = list_tab(user) lst = list_tab(user)
for cron in lst['special']: for cron in lst['special']:
if special == cron['spec'] and cmd == cron['cmd']: cid = _cron_id(cron)
if _cron_matched(cron, cmd, identifier):
test_setted_id = (
cron['identifier'] is None
and SALT_CRON_NO_IDENTIFIER
or cron['identifier'])
tests = [(cron['comment'], comment),
(cron['commented'], commented),
(identifier, test_setted_id),
(cron['spec'], special)]
if cid or identifier:
tests.append((cron['cmd'], cmd))
if any([_needs_change(x, y) for x, y in tests]):
rm_special(user, cmd, identifier=cid)
# Use old values when setting the new job if there was no
# change needed for a given parameter
if not _needs_change(cron['spec'], special):
special = cron['spec']
if not _needs_change(cron['commented'], commented):
commented = cron['commented']
if not _needs_change(cron['comment'], comment):
comment = cron['comment']
if not _needs_change(cron['cmd'], cmd):
cmd = cron['cmd']
if (
cid == SALT_CRON_NO_IDENTIFIER
):
if identifier:
cid = identifier
if (
cid == SALT_CRON_NO_IDENTIFIER
and cron['identifier'] is None
):
cid = None
cron['identifier'] = cid
if not cid or (
cid and not _needs_change(cid, identifier)
):
identifier = cid
jret = set_special(user, special, cmd, commented=commented,
comment=comment, identifier=identifier)
if jret == 'new':
return 'updated'
else:
return jret
return 'present' return 'present'
spec = {'spec': special, cron = {'spec': special,
'cmd': cmd} 'cmd': cmd,
lst['special'].append(spec) 'identifier': identifier,
'comment': comment,
'commented': commented}
lst['special'].append(cron)
comdat = _write_cron_lines(user, _render_tab(lst)) comdat = _write_cron_lines(user, _render_tab(lst))
if comdat['retcode']: if comdat['retcode']:
# Failed to commit, return the error # Failed to commit, return the error
@ -536,7 +615,7 @@ def set_job(user,
return 'new' return 'new'
def rm_special(user, special, cmd): def rm_special(user, cmd, special=None, identifier=None):
''' '''
Remove a special cron job for a specified user. Remove a special cron job for a specified user.
@ -544,22 +623,28 @@ def rm_special(user, special, cmd):
.. code-block:: bash .. code-block:: bash
salt '*' cron.rm_job root @hourly /usr/bin/foo salt '*' cron.rm_special root /usr/bin/foo
''' '''
lst = list_tab(user) lst = list_tab(user)
ret = 'absent' ret = 'absent'
rm_ = None rm_ = None
for ind in range(len(lst['special'])): for ind in range(len(lst['special'])):
if lst['special'][ind]['cmd'] == cmd and \ if rm_ is not None:
lst['special'][ind]['spec'] == special: break
lst['special'].pop(ind) if _cron_matched(lst['special'][ind], cmd, identifier=identifier):
rm_ = ind if special is None:
# No special param was specified
rm_ = ind
else:
if lst['special'][ind]['spec'] == special:
rm_ = ind
if rm_ is not None: if rm_ is not None:
lst['special'].pop(rm_)
ret = 'removed' ret = 'removed'
comdat = _write_cron_lines(user, _render_tab(lst)) comdat = _write_cron_lines(user, _render_tab(lst))
if comdat['retcode']: if comdat['retcode']:
# Failed to commit # Failed to commit, return the error
return comdat['stderr'] return comdat['stderr']
return ret return ret
@ -610,6 +695,7 @@ def rm_job(user,
return comdat['stderr'] return comdat['stderr']
return ret return ret
rm = salt.utils.alias_function(rm_job, 'rm') rm = salt.utils.alias_function(rm_job, 'rm')

View File

@ -1388,7 +1388,7 @@ def _parse_settings_eth(opts, iface_type, enabled, iface):
for opt in ['up_cmds', 'pre_up_cmds', 'post_up_cmds', for opt in ['up_cmds', 'pre_up_cmds', 'post_up_cmds',
'down_cmds', 'pre_down_cmds', 'post_down_cmds']: 'down_cmds', 'pre_down_cmds', 'post_down_cmds']:
if opt in opts: if opt in opts:
iface_data['inet'][opt] = opts[opt] iface_data[def_addrfam][opt] = opts[opt]
for addrfam in ['inet', 'inet6']: for addrfam in ['inet', 'inet6']:
if 'addrfam' in iface_data[addrfam] and iface_data[addrfam]['addrfam'] == addrfam: if 'addrfam' in iface_data[addrfam] and iface_data[addrfam]['addrfam'] == addrfam:

View File

@ -910,8 +910,8 @@ def compare_container(first, second, ignore=None):
ret.setdefault(conf_dict, {})[item] = {'old': image1, 'new': image2} ret.setdefault(conf_dict, {})[item] = {'old': image1, 'new': image2}
else: else:
if item == 'Links': if item == 'Links':
val1 = _scrub_links(val1, first) val1 = sorted(_scrub_links(val1, first))
val2 = _scrub_links(val2, second) val2 = sorted(_scrub_links(val2, second))
if val1 != val2: if val1 != val2:
ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2} ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2}
# Check for optionally-present items that were in the second container # Check for optionally-present items that were in the second container
@ -933,8 +933,8 @@ def compare_container(first, second, ignore=None):
ret.setdefault(conf_dict, {})[item] = {'old': image1, 'new': image2} ret.setdefault(conf_dict, {})[item] = {'old': image1, 'new': image2}
else: else:
if item == 'Links': if item == 'Links':
val1 = _scrub_links(val1, first) val1 = sorted(_scrub_links(val1, first))
val2 = _scrub_links(val2, second) val2 = sorted(_scrub_links(val2, second))
if val1 != val2: if val1 != val2:
ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2} ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2}
return ret return ret

View File

@ -1861,14 +1861,14 @@ def line(path, content=None, match=None, mode=None, location=None,
if changed: if changed:
if show_changes: if show_changes:
with salt.utils.fopen(path, 'r') as fp_: with salt.utils.fopen(path, 'r') as fp_:
path_content = _splitlines_preserving_trailing_newline( path_content = fp_.read().splitlines(True)
fp_.read()) changes_diff = ''.join(difflib.unified_diff(path_content, body.splitlines(True)))
changes_diff = ''.join(difflib.unified_diff(
path_content, _splitlines_preserving_trailing_newline(body)))
if __opts__['test'] is False: if __opts__['test'] is False:
fh_ = None fh_ = None
try: try:
fh_ = salt.utils.atomicfile.atomic_open(path, 'w') # Make sure we match the file mode from salt.utils.fopen
mode = 'wb' if six.PY2 and salt.utils.is_windows() else 'w'
fh_ = salt.utils.atomicfile.atomic_open(path, mode)
fh_.write(body) fh_.write(body)
finally: finally:
if fh_: if fh_:
@ -3368,7 +3368,11 @@ def stats(path, hash_type=None, follow_symlinks=True):
pstat = os.lstat(path) pstat = os.lstat(path)
except OSError: except OSError:
# Not a broken symlink, just a nonexistent path # Not a broken symlink, just a nonexistent path
return ret # NOTE: The file.directory state checks the content of the error
# message in this exception. Any changes made to the message for this
# exception will reflect the file.directory state as well, and will
# likely require changes there.
raise CommandExecutionError('Path not found: {0}'.format(path))
else: else:
if follow_symlinks: if follow_symlinks:
pstat = os.stat(path) pstat = os.stat(path)
@ -3832,8 +3836,15 @@ def get_managed(
parsed_scheme = urlparsed_source.scheme parsed_scheme = urlparsed_source.scheme
parsed_path = os.path.join( parsed_path = os.path.join(
urlparsed_source.netloc, urlparsed_source.path).rstrip(os.sep) urlparsed_source.netloc, urlparsed_source.path).rstrip(os.sep)
unix_local_source = parsed_scheme in ('file', '')
if parsed_scheme and parsed_scheme.lower() in 'abcdefghijklmnopqrstuvwxyz': if unix_local_source:
sfn = parsed_path
if not os.path.exists(sfn):
msg = 'Local file source {0} does not exist'.format(sfn)
return '', {}, msg
if parsed_scheme and parsed_scheme.lower() in string.ascii_lowercase:
parsed_path = ':'.join([parsed_scheme, parsed_path]) parsed_path = ':'.join([parsed_scheme, parsed_path])
parsed_scheme = 'file' parsed_scheme = 'file'
@ -3841,9 +3852,10 @@ def get_managed(
source_sum = __salt__['cp.hash_file'](source, saltenv) source_sum = __salt__['cp.hash_file'](source, saltenv)
if not source_sum: if not source_sum:
return '', {}, 'Source file {0} not found'.format(source) return '', {}, 'Source file {0} not found'.format(source)
elif not source_hash and parsed_scheme == 'file': elif not source_hash and unix_local_source:
source_sum = _get_local_file_source_sum(parsed_path) source_sum = _get_local_file_source_sum(parsed_path)
elif not source_hash and source.startswith(os.sep): elif not source_hash and source.startswith(os.sep):
# This should happen on Windows
source_sum = _get_local_file_source_sum(source) source_sum = _get_local_file_source_sum(source)
else: else:
if not skip_verify: if not skip_verify:
@ -4193,12 +4205,6 @@ def check_perms(name, ret, user, group, mode, follow_symlinks=False):
# Check permissions # Check permissions
perms = {} perms = {}
cur = stats(name, follow_symlinks=follow_symlinks) cur = stats(name, follow_symlinks=follow_symlinks)
if not cur:
# NOTE: The file.directory state checks the content of the error
# message in this exception. Any changes made to the message for this
# exception will reflect the file.directory state as well, and will
# likely require changes there.
raise CommandExecutionError('{0} does not exist'.format(name))
perms['luser'] = cur['user'] perms['luser'] = cur['user']
perms['lgroup'] = cur['group'] perms['lgroup'] = cur['group']
perms['lmode'] = salt.utils.normalize_mode(cur['mode']) perms['lmode'] = salt.utils.normalize_mode(cur['mode'])
@ -4498,11 +4504,18 @@ def check_file_meta(
''' '''
changes = {} changes = {}
if not source_sum: if not source_sum:
source_sum = {} source_sum = dict()
lstats = stats(name, hash_type=source_sum.get('hash_type', None), follow_symlinks=False)
try:
lstats = stats(name, hash_type=source_sum.get('hash_type', None),
follow_symlinks=False)
except CommandExecutionError:
lstats = {}
if not lstats: if not lstats:
changes['newfile'] = name changes['newfile'] = name
return changes return changes
if 'hsum' in source_sum: if 'hsum' in source_sum:
if source_sum['hsum'] != lstats['sum']: if source_sum['hsum'] != lstats['sum']:
if not sfn and source: if not sfn and source:
@ -4741,21 +4754,22 @@ def manage_file(name,
if source_sum and ('hsum' in source_sum): if source_sum and ('hsum' in source_sum):
source_sum['hsum'] = source_sum['hsum'].lower() source_sum['hsum'] = source_sum['hsum'].lower()
if source and not sfn: if source:
# File is not present, cache it
sfn = __salt__['cp.cache_file'](source, saltenv)
if not sfn: if not sfn:
return _error( # File is not present, cache it
ret, 'Source file \'{0}\' not found'.format(source)) sfn = __salt__['cp.cache_file'](source, saltenv)
htype = source_sum.get('hash_type', __opts__['hash_type']) if not sfn:
# Recalculate source sum now that file has been cached return _error(
source_sum = { ret, 'Source file \'{0}\' not found'.format(source))
'hash_type': htype, htype = source_sum.get('hash_type', __opts__['hash_type'])
'hsum': get_hash(sfn, form=htype) # Recalculate source sum now that file has been cached
} source_sum = {
'hash_type': htype,
'hsum': get_hash(sfn, form=htype)
}
if keep_mode: if keep_mode:
if _urlparse(source).scheme in ('salt', 'file') \ if _urlparse(source).scheme in ('salt', 'file', ''):
or source.startswith('/'):
try: try:
mode = __salt__['cp.stat_file'](source, saltenv=saltenv, octal=True) mode = __salt__['cp.stat_file'](source, saltenv=saltenv, octal=True)
except Exception as exc: except Exception as exc:
@ -4785,7 +4799,7 @@ def manage_file(name,
# source, and we are not skipping checksum verification, then # source, and we are not skipping checksum verification, then
# verify that it matches the specified checksum. # verify that it matches the specified checksum.
if not skip_verify \ if not skip_verify \
and _urlparse(source).scheme not in ('salt', ''): and _urlparse(source).scheme != 'salt':
dl_sum = get_hash(sfn, source_sum['hash_type']) dl_sum = get_hash(sfn, source_sum['hash_type'])
if dl_sum != source_sum['hsum']: if dl_sum != source_sum['hsum']:
ret['comment'] = ( ret['comment'] = (
@ -4973,8 +4987,6 @@ def manage_file(name,
makedirs_(name, user=user, group=group, mode=dir_mode) makedirs_(name, user=user, group=group, mode=dir_mode)
if source: if source:
# It is a new file, set the diff accordingly
ret['changes']['diff'] = 'New file'
# Apply the new file # Apply the new file
if not sfn: if not sfn:
sfn = __salt__['cp.cache_file'](source, saltenv) sfn = __salt__['cp.cache_file'](source, saltenv)
@ -4998,6 +5010,8 @@ def manage_file(name,
) )
ret['result'] = False ret['result'] = False
return ret return ret
# It is a new file, set the diff accordingly
ret['changes']['diff'] = 'New file'
if not os.path.isdir(contain_dir): if not os.path.isdir(contain_dir):
if makedirs: if makedirs:
_set_mode_and_make_dirs(name, dir_mode, mode, user, group) _set_mode_and_make_dirs(name, dir_mode, mode, user, group)

View File

@ -9,6 +9,7 @@ import copy
import logging import logging
import os import os
import re import re
import stat
# Import salt libs # Import salt libs
import salt.utils import salt.utils
@ -115,6 +116,22 @@ def _expand_path(cwd, user):
return os.path.join(os.path.expanduser(to_expand), str(cwd)) return os.path.join(os.path.expanduser(to_expand), str(cwd))
def _path_is_executable_others(path):
'''
Check every part of path for executable permission
'''
prevpath = None
while path and path != prevpath:
try:
if not os.stat(path).st_mode & stat.S_IXOTH:
return False
except OSError:
return False
prevpath = path
path, _ = os.path.split(path)
return True
def _format_opts(opts): def _format_opts(opts):
''' '''
Common code to inspect opts and split them if necessary Common code to inspect opts and split them if necessary
@ -214,11 +231,12 @@ def _git_run(command, cwd=None, user=None, password=None, identity=None,
} }
# copy wrapper to area accessible by ``runas`` user # copy wrapper to area accessible by ``runas`` user
# currently no suppport in windows for wrapping git ssh # currently no support in windows for wrapping git ssh
ssh_id_wrapper = os.path.join( ssh_id_wrapper = os.path.join(
salt.utils.templates.TEMPLATE_DIRNAME, salt.utils.templates.TEMPLATE_DIRNAME,
'git/ssh-id-wrapper' 'git/ssh-id-wrapper'
) )
tmp_ssh_wrapper = None
if salt.utils.is_windows(): if salt.utils.is_windows():
for suffix in ('', ' (x86)'): for suffix in ('', ' (x86)'):
ssh_exe = ( ssh_exe = (
@ -235,12 +253,14 @@ def _git_run(command, cwd=None, user=None, password=None, identity=None,
# Use the windows batch file instead of the bourne shell script # Use the windows batch file instead of the bourne shell script
ssh_id_wrapper += '.bat' ssh_id_wrapper += '.bat'
env['GIT_SSH'] = ssh_id_wrapper env['GIT_SSH'] = ssh_id_wrapper
elif not user or _path_is_executable_others(ssh_id_wrapper):
env['GIT_SSH'] = ssh_id_wrapper
else: else:
tmp_file = salt.utils.files.mkstemp() tmp_ssh_wrapper = salt.utils.files.mkstemp()
salt.utils.files.copyfile(ssh_id_wrapper, tmp_file) salt.utils.files.copyfile(ssh_id_wrapper, tmp_ssh_wrapper)
os.chmod(tmp_file, 0o500) os.chmod(tmp_ssh_wrapper, 0o500)
os.chown(tmp_file, __salt__['file.user_to_uid'](user), -1) os.chown(tmp_ssh_wrapper, __salt__['file.user_to_uid'](user), -1)
env['GIT_SSH'] = tmp_file env['GIT_SSH'] = tmp_ssh_wrapper
if 'salt-call' not in _salt_cli \ if 'salt-call' not in _salt_cli \
and __salt__['ssh.key_is_encrypted'](id_file): and __salt__['ssh.key_is_encrypted'](id_file):
@ -270,13 +290,25 @@ def _git_run(command, cwd=None, user=None, password=None, identity=None,
redirect_stderr=redirect_stderr, redirect_stderr=redirect_stderr,
**kwargs) **kwargs)
finally: finally:
if not salt.utils.is_windows() and 'GIT_SSH' in env: # Cleanup the temporary ssh wrapper file
os.remove(env['GIT_SSH']) try:
__salt__['file.remove'](tmp_ssh_wrapper)
log.debug('Removed ssh wrapper file %s', tmp_ssh_wrapper)
except AttributeError:
# No wrapper was used
pass
except (SaltInvocationError, CommandExecutionError) as exc:
log.warning('Failed to remove ssh wrapper file %s: %s', tmp_ssh_wrapper, exc)
# Cleanup the temporary identity file # Cleanup the temporary identity file
if tmp_identity_file and os.path.exists(tmp_identity_file): try:
log.debug('Removing identity file {0}'.format(tmp_identity_file))
__salt__['file.remove'](tmp_identity_file) __salt__['file.remove'](tmp_identity_file)
log.debug('Removed identity file %s', tmp_identity_file)
except AttributeError:
# No identify file was used
pass
except (SaltInvocationError, CommandExecutionError) as exc:
log.warning('Failed to remove identity file %s: %s', tmp_identity_file, exc)
# If the command was successful, no need to try additional IDs # If the command was successful, no need to try additional IDs
if result['retcode'] == 0: if result['retcode'] == 0:

View File

@ -1,6 +1,13 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
''' '''
Return/control aspects of the grains data Return/control aspects of the grains data
Grains set or altered with this module are stored in the 'grains'
file on the minions. By default, this file is located at: ``/etc/salt/grains``
.. Note::
This does **NOT** override any grains set in the minion config file.
''' '''
# Import python libs # Import python libs
@ -118,7 +125,7 @@ def get(key, default='', delimiter=DEFAULT_TARGET_DELIM, ordered=True):
def has_value(key): def has_value(key):
''' '''
Determine whether a named value exists in the grains dictionary. Determine whether a key exists in the grains dictionary.
Given a grains dictionary that contains the following structure:: Given a grains dictionary that contains the following structure::
@ -134,7 +141,10 @@ def has_value(key):
salt '*' grains.has_value pkg:apache salt '*' grains.has_value pkg:apache
''' '''
return True if salt.utils.traverse_dict_and_list(__grains__, key, False) else False return salt.utils.traverse_dict_and_list(
__grains__,
key,
KeyError) is not KeyError
def items(sanitize=False): def items(sanitize=False):
@ -219,20 +229,44 @@ def setvals(grains, destructive=False):
raise SaltException('setvals grains must be a dictionary.') raise SaltException('setvals grains must be a dictionary.')
grains = {} grains = {}
if os.path.isfile(__opts__['conf_file']): if os.path.isfile(__opts__['conf_file']):
gfn = os.path.join( if salt.utils.is_proxy():
os.path.dirname(__opts__['conf_file']), gfn = os.path.join(
'grains' os.path.dirname(__opts__['conf_file']),
) 'proxy.d',
__opts__['id'],
'grains'
)
else:
gfn = os.path.join(
os.path.dirname(__opts__['conf_file']),
'grains'
)
elif os.path.isdir(__opts__['conf_file']): elif os.path.isdir(__opts__['conf_file']):
gfn = os.path.join( if salt.utils.is_proxy():
__opts__['conf_file'], gfn = os.path.join(
'grains' __opts__['conf_file'],
) 'proxy.d',
__opts__['id'],
'grains'
)
else:
gfn = os.path.join(
__opts__['conf_file'],
'grains'
)
else: else:
gfn = os.path.join( if salt.utils.is_proxy():
os.path.dirname(__opts__['conf_file']), gfn = os.path.join(
'grains' os.path.dirname(__opts__['conf_file']),
) 'proxy.d',
__opts__['id'],
'grains'
)
else:
gfn = os.path.join(
os.path.dirname(__opts__['conf_file']),
'grains'
)
if os.path.isfile(gfn): if os.path.isfile(gfn):
with salt.utils.fopen(gfn, 'rb') as fp_: with salt.utils.fopen(gfn, 'rb') as fp_:

View File

@ -585,7 +585,8 @@ def _parse_members(settype, members):
def _parse_member(settype, member, strict=False): def _parse_member(settype, member, strict=False):
subtypes = settype.split(':')[1].split(',') subtypes = settype.split(':')[1].split(',')
parts = member.split(' ') all_parts = member.split(' ', 1)
parts = all_parts[0].split(',')
parsed_member = [] parsed_member = []
for i in range(len(subtypes)): for i in range(len(subtypes)):
@ -610,8 +611,8 @@ def _parse_member(settype, member, strict=False):
parsed_member.append(part) parsed_member.append(part)
if len(parts) > len(subtypes): if len(all_parts) > 1:
parsed_member.append(' '.join(parts[len(subtypes):])) parsed_member.append(all_parts[1])
return parsed_member return parsed_member

View File

@ -123,7 +123,16 @@ def available():
salt '*' kmod.available salt '*' kmod.available
''' '''
ret = [] ret = []
mod_dir = os.path.join('/lib/modules/', os.uname()[2]) mod_dir = os.path.join('/lib/modules/', os.uname()[2])
built_in_file = os.path.join(mod_dir, 'modules.builtin')
if os.path.exists(built_in_file):
with salt.utils.fopen(built_in_file, 'r') as f:
for line in f:
# Strip .ko from the basename
ret.append(os.path.basename(line)[:-4])
for root, dirs, files in os.walk(mod_dir): for root, dirs, files in os.walk(mod_dir):
for fn_ in files: for fn_ in files:
if '.ko' in fn_: if '.ko' in fn_:

View File

@ -164,7 +164,7 @@ def _setup_conn(**kwargs):
if client_key_file: if client_key_file:
kubernetes.client.configuration.key_file = client_key_file kubernetes.client.configuration.key_file = client_key_file
if client_key: elif client_key:
with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k:
k.write(base64.b64decode(client_key)) k.write(base64.b64decode(client_key))
kubernetes.client.configuration.key_file = k.name kubernetes.client.configuration.key_file = k.name

View File

@ -70,7 +70,7 @@ def __init__(opts):
def _get_driver(profile): def _get_driver(profile):
config = __salt__['config.option']('libcloud_dns')[profile] config = __salt__['config.option']('libcloud_dns')[profile]
cls = get_driver(config['driver']) cls = get_driver(config['driver'])
args = config args = config.copy()
del args['driver'] del args['driver']
args['key'] = config.get('key') args['key'] = config.get('key')
args['secret'] = config.get('secret', None) args['secret'] = config.get('secret', None)

View File

@ -144,17 +144,17 @@ def _parse_acl(acl, user, group):
# Set the permissions fields # Set the permissions fields
octal = 0 octal = 0
vals['permissions'] = {} vals['permissions'] = {}
if 'r' in comps[2]: if 'r' in comps[-1]:
octal += 4 octal += 4
vals['permissions']['read'] = True vals['permissions']['read'] = True
else: else:
vals['permissions']['read'] = False vals['permissions']['read'] = False
if 'w' in comps[2]: if 'w' in comps[-1]:
octal += 2 octal += 2
vals['permissions']['write'] = True vals['permissions']['write'] = True
else: else:
vals['permissions']['write'] = False vals['permissions']['write'] = False
if 'x' in comps[2]: if 'x' in comps[-1]:
octal += 1 octal += 1
vals['permissions']['execute'] = True vals['permissions']['execute'] = True
else: else:

View File

@ -19,11 +19,12 @@ import logging
import time import time
# Import 3rdp-party libs # Import 3rdp-party libs
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin from salt.ext.six.moves import range, map # pylint: disable=import-error,redefined-builtin
from salt.ext.six import string_types from salt.ext.six import string_types
# Import salt libs # Import salt libs
import salt.utils import salt.utils
import salt.utils.files
import salt.utils.decorators as decorators import salt.utils.decorators as decorators
from salt.utils.locales import sdecode as _sdecode from salt.utils.locales import sdecode as _sdecode
from salt.exceptions import CommandExecutionError, SaltInvocationError from salt.exceptions import CommandExecutionError, SaltInvocationError
@ -520,16 +521,72 @@ def get_auto_login():
return False if ret['retcode'] else ret['stdout'] return False if ret['retcode'] else ret['stdout']
def enable_auto_login(name): def _kcpassword(password):
'''
Internal function for obfuscating the password used for AutoLogin
This is later written as the contents of the ``/etc/kcpassword`` file
.. versionadded:: 2017.7.3
Adapted from:
https://github.com/timsutton/osx-vm-templates/blob/master/scripts/support/set_kcpassword.py
Args:
password(str):
The password to obfuscate
Returns:
str: The obfuscated password
'''
# The magic 11 bytes - these are just repeated
# 0x7D 0x89 0x52 0x23 0xD2 0xBC 0xDD 0xEA 0xA3 0xB9 0x1F
key = [125, 137, 82, 35, 210, 188, 221, 234, 163, 185, 31]
key_len = len(key)
# Convert each character to a byte
password = list(map(ord, password))
# pad password length out to an even multiple of key length
remainder = len(password) % key_len
if remainder > 0:
password = password + [0] * (key_len - remainder)
# Break the password into chunks the size of len(key) (11)
for chunk_index in range(0, len(password), len(key)):
# Reset the key_index to 0 for each iteration
key_index = 0
# Do an XOR on each character of that chunk of the password with the
# corresponding item in the key
# The length of the password, or the length of the key, whichever is
# smaller
for password_index in range(chunk_index,
min(chunk_index + len(key), len(password))):
password[password_index] = password[password_index] ^ key[key_index]
key_index += 1
# Convert each byte back to a character
password = list(map(chr, password))
return ''.join(password)
def enable_auto_login(name, password):
''' '''
.. versionadded:: 2016.3.0 .. versionadded:: 2016.3.0
Configures the machine to auto login with the specified user Configures the machine to auto login with the specified user
:param str name: The user account use for auto login Args:
:return: True if successful, False if not name (str): The user account use for auto login
:rtype: bool
password (str): The password to user for auto login
.. versionadded:: 2017.7.3
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example: CLI Example:
@ -537,6 +594,7 @@ def enable_auto_login(name):
salt '*' user.enable_auto_login stevej salt '*' user.enable_auto_login stevej
''' '''
# Make the entry into the defaults file
cmd = ['defaults', cmd = ['defaults',
'write', 'write',
'/Library/Preferences/com.apple.loginwindow.plist', '/Library/Preferences/com.apple.loginwindow.plist',
@ -544,6 +602,13 @@ def enable_auto_login(name):
name] name]
__salt__['cmd.run'](cmd) __salt__['cmd.run'](cmd)
current = get_auto_login() current = get_auto_login()
# Create/Update the kcpassword file with an obfuscated password
o_password = _kcpassword(password=password)
with salt.utils.files.set_umask(0o077):
with salt.utils.fopen('/etc/kcpassword', 'w') as fd:
fd.write(o_password)
return current if isinstance(current, bool) else current.lower() == name.lower() return current if isinstance(current, bool) else current.lower() == name.lower()
@ -553,8 +618,8 @@ def disable_auto_login():
Disables auto login on the machine Disables auto login on the machine
:return: True if successful, False if not Returns:
:rtype: bool bool: ``True`` if successful, otherwise ``False``
CLI Example: CLI Example:
@ -562,6 +627,11 @@ def disable_auto_login():
salt '*' user.disable_auto_login salt '*' user.disable_auto_login
''' '''
# Remove the kcpassword file
cmd = 'rm -f /etc/kcpassword'
__salt__['cmd.run'](cmd)
# Remove the entry from the defaults file
cmd = ['defaults', cmd = ['defaults',
'delete', 'delete',
'/Library/Preferences/com.apple.loginwindow.plist', '/Library/Preferences/com.apple.loginwindow.plist',

View File

@ -6,9 +6,10 @@ Module for sending messages to Mattermost
:configuration: This module can be used by either passing an api_url and hook :configuration: This module can be used by either passing an api_url and hook
directly or by specifying both in a configuration profile in the salt directly or by specifying both in a configuration profile in the salt
master/minion config. master/minion config. For example:
For example:
.. code-block:: yaml .. code-block:: yaml
mattermost: mattermost:
hook: peWcBiMOS9HrZG15peWcBiMOS9HrZG15 hook: peWcBiMOS9HrZG15peWcBiMOS9HrZG15
api_url: https://example.com api_url: https://example.com
@ -35,6 +36,7 @@ __virtualname__ = 'mattermost'
def __virtual__(): def __virtual__():
''' '''
Return virtual name of the module. Return virtual name of the module.
:return: The virtual name of the module. :return: The virtual name of the module.
''' '''
return __virtualname__ return __virtualname__
@ -43,6 +45,7 @@ def __virtual__():
def _get_hook(): def _get_hook():
''' '''
Retrieves and return the Mattermost's configured hook Retrieves and return the Mattermost's configured hook
:return: String: the hook string :return: String: the hook string
''' '''
hook = __salt__['config.get']('mattermost.hook') or \ hook = __salt__['config.get']('mattermost.hook') or \
@ -56,6 +59,7 @@ def _get_hook():
def _get_api_url(): def _get_api_url():
''' '''
Retrieves and return the Mattermost's configured api url Retrieves and return the Mattermost's configured api url
:return: String: the api url string :return: String: the api url string
''' '''
api_url = __salt__['config.get']('mattermost.api_url') or \ api_url = __salt__['config.get']('mattermost.api_url') or \
@ -69,6 +73,7 @@ def _get_api_url():
def _get_channel(): def _get_channel():
''' '''
Retrieves the Mattermost's configured channel Retrieves the Mattermost's configured channel
:return: String: the channel string :return: String: the channel string
''' '''
channel = __salt__['config.get']('mattermost.channel') or \ channel = __salt__['config.get']('mattermost.channel') or \
@ -80,6 +85,7 @@ def _get_channel():
def _get_username(): def _get_username():
''' '''
Retrieves the Mattermost's configured username Retrieves the Mattermost's configured username
:return: String: the username string :return: String: the username string
''' '''
username = __salt__['config.get']('mattermost.username') or \ username = __salt__['config.get']('mattermost.username') or \
@ -95,14 +101,18 @@ def post_message(message,
hook=None): hook=None):
''' '''
Send a message to a Mattermost channel. Send a message to a Mattermost channel.
:param channel: The channel name, either will work. :param channel: The channel name, either will work.
:param username: The username of the poster. :param username: The username of the poster.
:param message: The message to send to the Mattermost channel. :param message: The message to send to the Mattermost channel.
:param api_url: The Mattermost api url, if not specified in the configuration. :param api_url: The Mattermost api url, if not specified in the configuration.
:param hook: The Mattermost hook, if not specified in the configuration. :param hook: The Mattermost hook, if not specified in the configuration.
:return: Boolean if message was sent successfully. :return: Boolean if message was sent successfully.
CLI Example: CLI Example:
.. code-block:: bash .. code-block:: bash
salt '*' mattermost.post_message message='Build is done" salt '*' mattermost.post_message message='Build is done"
''' '''
if not api_url: if not api_url:

View File

@ -29,6 +29,7 @@ log = logging.getLogger(__name__)
from salt.ext import six from salt.ext import six
import salt.utils.templates import salt.utils.templates
import salt.utils.napalm import salt.utils.napalm
import salt.utils.versions
from salt.utils.napalm import proxy_napalm_wrap from salt.utils.napalm import proxy_napalm_wrap
# ---------------------------------------------------------------------------------------------------------------------- # ----------------------------------------------------------------------------------------------------------------------
@ -153,6 +154,7 @@ def _config_logic(napalm_device,
loaded_result['diff'] = None loaded_result['diff'] = None
loaded_result['result'] = False loaded_result['result'] = False
loaded_result['comment'] = _compare.get('comment') loaded_result['comment'] = _compare.get('comment')
__context__['retcode'] = 1
return loaded_result return loaded_result
_loaded_res = loaded_result.get('result', False) _loaded_res = loaded_result.get('result', False)
@ -172,12 +174,15 @@ def _config_logic(napalm_device,
# make sure it notifies # make sure it notifies
# that something went wrong # that something went wrong
_explicit_close(napalm_device) _explicit_close(napalm_device)
__context__['retcode'] = 1
return loaded_result return loaded_result
loaded_result['comment'] += 'Configuration discarded.' loaded_result['comment'] += 'Configuration discarded.'
# loaded_result['result'] = False not necessary # loaded_result['result'] = False not necessary
# as the result can be true when test=True # as the result can be true when test=True
_explicit_close(napalm_device) _explicit_close(napalm_device)
if not loaded_result['result']:
__context__['retcode'] = 1
return loaded_result return loaded_result
if not test and commit_config: if not test and commit_config:
@ -208,10 +213,13 @@ def _config_logic(napalm_device,
loaded_result['result'] = False loaded_result['result'] = False
# notify if anything goes wrong # notify if anything goes wrong
_explicit_close(napalm_device) _explicit_close(napalm_device)
__context__['retcode'] = 1
return loaded_result return loaded_result
loaded_result['already_configured'] = True loaded_result['already_configured'] = True
loaded_result['comment'] = 'Already configured.' loaded_result['comment'] = 'Already configured.'
_explicit_close(napalm_device) _explicit_close(napalm_device)
if not loaded_result['result']:
__context__['retcode'] = 1
return loaded_result return loaded_result
@ -221,7 +229,7 @@ def _config_logic(napalm_device,
@proxy_napalm_wrap @proxy_napalm_wrap
def connected(**kwarvs): # pylint: disable=unused-argument def connected(**kwargs): # pylint: disable=unused-argument
''' '''
Specifies if the connection to the device succeeded. Specifies if the connection to the device succeeded.
@ -925,6 +933,7 @@ def load_config(filename=None,
debug=False, debug=False,
replace=False, replace=False,
inherit_napalm_device=None, inherit_napalm_device=None,
saltenv='base',
**kwargs): # pylint: disable=unused-argument **kwargs): # pylint: disable=unused-argument
''' '''
Applies configuration changes on the device. It can be loaded from a file or from inline string. Applies configuration changes on the device. It can be loaded from a file or from inline string.
@ -940,10 +949,21 @@ def load_config(filename=None,
To replace the config, set ``replace`` to ``True``. To replace the config, set ``replace`` to ``True``.
filename filename
Path to the file containing the desired configuration. By default is None. Path to the file containing the desired configuration.
This can be specified using the absolute path to the file,
or using one of the following URL schemes:
- ``salt://``, to fetch the template from the Salt fileserver.
- ``http://`` or ``https://``
- ``ftp://``
- ``s3://``
- ``swift://``
.. versionchanged:: 2017.7.3
text text
String containing the desired configuration. String containing the desired configuration.
This argument is ignored when ``filename`` is specified.
test: False test: False
Dry run? If set as ``True``, will apply the config, discard and return the changes. Default: ``False`` Dry run? If set as ``True``, will apply the config, discard and return the changes. Default: ``False``
@ -963,6 +983,11 @@ def load_config(filename=None,
.. versionadded:: 2016.11.2 .. versionadded:: 2016.11.2
saltenv: ``base``
Specifies the Salt environment name.
.. versionadded:: 2017.7.3
:return: a dictionary having the following keys: :return: a dictionary having the following keys:
* result (bool): if the config was applied successfully. It is ``False`` only in case of failure. In case \ * result (bool): if the config was applied successfully. It is ``False`` only in case of failure. In case \
@ -992,7 +1017,6 @@ def load_config(filename=None,
'diff': '[edit interfaces xe-0/0/5]+ description "Adding a description";' 'diff': '[edit interfaces xe-0/0/5]+ description "Adding a description";'
} }
''' '''
fun = 'load_merge_candidate' fun = 'load_merge_candidate'
if replace: if replace:
fun = 'load_replace_candidate' fun = 'load_replace_candidate'
@ -1005,21 +1029,28 @@ def load_config(filename=None,
# compare_config, discard / commit # compare_config, discard / commit
# which have to be over the same session # which have to be over the same session
napalm_device['CLOSE'] = False # pylint: disable=undefined-variable napalm_device['CLOSE'] = False # pylint: disable=undefined-variable
if filename:
text = __salt__['cp.get_file_str'](filename, saltenv=saltenv)
if text is False:
# When using salt:// or https://, if the resource is not available,
# it will either raise an exception, or return False.
ret = {
'result': False,
'out': None
}
ret['comment'] = 'Unable to read from {}. Please specify a valid file or text.'.format(filename)
log.error(ret['comment'])
return ret
_loaded = salt.utils.napalm.call( _loaded = salt.utils.napalm.call(
napalm_device, # pylint: disable=undefined-variable napalm_device, # pylint: disable=undefined-variable
fun, fun,
**{ **{
'filename': filename,
'config': text 'config': text
} }
) )
loaded_config = None loaded_config = None
if debug: if debug:
if filename: loaded_config = text
with salt.utils.fopen(filename) as rfh:
loaded_config = rfh.read()
else:
loaded_config = text
return _config_logic(napalm_device, # pylint: disable=undefined-variable return _config_logic(napalm_device, # pylint: disable=undefined-variable
_loaded, _loaded,
test=test, test=test,
@ -1065,6 +1096,10 @@ def load_template(template_name,
To replace the config, set ``replace`` to ``True``. To replace the config, set ``replace`` to ``True``.
.. warning::
The support for native NAPALM templates will be dropped in Salt Fluorine.
Implicitly, the ``template_path`` argument will be removed.
template_name template_name
Identifies path to the template source. Identifies path to the template source.
The template can be either stored on the local machine, either remotely. The template can be either stored on the local machine, either remotely.
@ -1101,6 +1136,9 @@ def load_template(template_name,
in order to find the template, this argument must be provided: in order to find the template, this argument must be provided:
``template_path: /absolute/path/to/``. ``template_path: /absolute/path/to/``.
.. note::
This argument will be deprecated beginning with release codename ``Fluorine``.
template_hash: None template_hash: None
Hash of the template file. Format: ``{hash_type: 'md5', 'hsum': <md5sum>}`` Hash of the template file. Format: ``{hash_type: 'md5', 'hsum': <md5sum>}``
@ -1267,7 +1305,11 @@ def load_template(template_name,
'out': None 'out': None
} }
loaded_config = None loaded_config = None
if template_path:
salt.utils.versions.warn_until(
'Fluorine',
'Use of `template_path` detected. This argument will be removed in Salt Fluorine.'
)
# prechecks # prechecks
if template_engine not in salt.utils.templates.TEMPLATE_REGISTRY: if template_engine not in salt.utils.templates.TEMPLATE_REGISTRY:
_loaded.update({ _loaded.update({

View File

@ -129,7 +129,7 @@ def version(*names, **kwargs):
return __salt__['pkg_resource.version'](*names, **kwargs) return __salt__['pkg_resource.version'](*names, **kwargs)
def refresh_db(): def refresh_db(**kwargs): # pylint: disable=unused-argument
''' '''
Updates the opkg database to latest packages based upon repositories Updates the opkg database to latest packages based upon repositories
@ -456,7 +456,7 @@ def purge(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument
return remove(name=name, pkgs=pkgs) return remove(name=name, pkgs=pkgs)
def upgrade(refresh=True): def upgrade(refresh=True, **kwargs): # pylint: disable=unused-argument
''' '''
Upgrades all packages via ``opkg upgrade`` Upgrades all packages via ``opkg upgrade``
@ -739,7 +739,7 @@ def list_pkgs(versions_as_list=False, **kwargs):
return ret return ret
def list_upgrades(refresh=True): def list_upgrades(refresh=True, **kwargs): # pylint: disable=unused-argument
''' '''
List all available package upgrades. List all available package upgrades.
@ -908,7 +908,7 @@ def info_installed(*names, **kwargs):
return ret return ret
def upgrade_available(name): def upgrade_available(name, **kwargs): # pylint: disable=unused-argument
''' '''
Check whether or not an upgrade is available for a given package Check whether or not an upgrade is available for a given package
@ -921,7 +921,7 @@ def upgrade_available(name):
return latest_version(name) != '' return latest_version(name) != ''
def version_cmp(pkg1, pkg2, ignore_epoch=False): def version_cmp(pkg1, pkg2, ignore_epoch=False, **kwargs): # pylint: disable=unused-argument
''' '''
Do a cmp-style comparison on two packages. Return -1 if pkg1 < pkg2, 0 if Do a cmp-style comparison on two packages. Return -1 if pkg1 < pkg2, 0 if
pkg1 == pkg2, and 1 if pkg1 > pkg2. Return None if there was a problem pkg1 == pkg2, and 1 if pkg1 > pkg2. Return None if there was a problem
@ -969,7 +969,7 @@ def version_cmp(pkg1, pkg2, ignore_epoch=False):
return None return None
def list_repos(): def list_repos(**kwargs): # pylint: disable=unused-argument
''' '''
Lists all repos on /etc/opkg/*.conf Lists all repos on /etc/opkg/*.conf
@ -1006,7 +1006,7 @@ def list_repos():
return repos return repos
def get_repo(alias): def get_repo(alias, **kwargs): # pylint: disable=unused-argument
''' '''
Display a repo from the /etc/opkg/*.conf Display a repo from the /etc/opkg/*.conf
@ -1077,7 +1077,7 @@ def _mod_repo_in_file(alias, repostr, filepath):
fhandle.writelines(output) fhandle.writelines(output)
def del_repo(alias): def del_repo(alias, **kwargs): # pylint: disable=unused-argument
''' '''
Delete a repo from /etc/opkg/*.conf Delete a repo from /etc/opkg/*.conf
@ -1191,7 +1191,7 @@ def mod_repo(alias, **kwargs):
refresh_db() refresh_db()
def file_list(*packages): def file_list(*packages, **kwargs): # pylint: disable=unused-argument
''' '''
List the files that belong to a package. Not specifying any packages will List the files that belong to a package. Not specifying any packages will
return a list of _every_ file on the system's package database (not return a list of _every_ file on the system's package database (not
@ -1212,7 +1212,7 @@ def file_list(*packages):
return {'errors': output['errors'], 'files': files} return {'errors': output['errors'], 'files': files}
def file_dict(*packages): def file_dict(*packages, **kwargs): # pylint: disable=unused-argument
''' '''
List the files that belong to a package, grouped by package. Not List the files that belong to a package, grouped by package. Not
specifying any packages will return a list of _every_ file on the system's specifying any packages will return a list of _every_ file on the system's
@ -1254,7 +1254,7 @@ def file_dict(*packages):
return {'errors': errors, 'packages': ret} return {'errors': errors, 'packages': ret}
def owner(*paths): def owner(*paths, **kwargs): # pylint: disable=unused-argument
''' '''
Return the name of the package that owns the file. Multiple file paths can Return the name of the package that owns the file. Multiple file paths can
be passed. Like :mod:`pkg.version <salt.modules.opkg.version`, if a single be passed. Like :mod:`pkg.version <salt.modules.opkg.version`, if a single

View File

@ -245,7 +245,7 @@ def install_ruby(ruby, runas=None):
ret = {} ret = {}
ret = _rbenv_exec(['install', ruby], env=env, runas=runas, ret=ret) ret = _rbenv_exec(['install', ruby], env=env, runas=runas, ret=ret)
if ret['retcode'] == 0: if ret is not False and ret['retcode'] == 0:
rehash(runas=runas) rehash(runas=runas)
return ret['stderr'] return ret['stderr']
else: else:

View File

@ -9,7 +9,7 @@ Module to provide redis functionality to Salt
.. code-block:: yaml .. code-block:: yaml
redis.host: 'localhost' redis.host: 'salt'
redis.port: 6379 redis.port: 6379
redis.db: 0 redis.db: 0
redis.password: None redis.password: None

View File

@ -24,7 +24,7 @@ Values or Entries
Values/Entries are name/data pairs. There can be many values in a key. The Values/Entries are name/data pairs. There can be many values in a key. The
(Default) value corresponds to the Key, the rest are their own value pairs. (Default) value corresponds to the Key, the rest are their own value pairs.
:depends: - winreg Python module :depends: - PyWin32
''' '''
# When production windows installer is using Python 3, Python 2 code can be removed # When production windows installer is using Python 3, Python 2 code can be removed
@ -35,14 +35,13 @@ from __future__ import unicode_literals
import sys import sys
import logging import logging
from salt.ext.six.moves import range # pylint: disable=W0622,import-error from salt.ext.six.moves import range # pylint: disable=W0622,import-error
from salt.ext import six
# Import third party libs # Import third party libs
try: try:
from salt.ext.six.moves import winreg as _winreg # pylint: disable=import-error,no-name-in-module import win32gui
from win32con import HWND_BROADCAST, WM_SETTINGCHANGE import win32api
from win32api import RegCreateKeyEx, RegSetValueEx, RegFlushKey, \ import win32con
RegCloseKey, error as win32apiError, SendMessage import pywintypes
HAS_WINDOWS_MODULES = True HAS_WINDOWS_MODULES = True
except ImportError: except ImportError:
HAS_WINDOWS_MODULES = False HAS_WINDOWS_MODULES = False
@ -60,7 +59,7 @@ __virtualname__ = 'reg'
def __virtual__(): def __virtual__():
''' '''
Only works on Windows systems with the _winreg python module Only works on Windows systems with the PyWin32
''' '''
if not salt.utils.is_windows(): if not salt.utils.is_windows():
return (False, 'reg execution module failed to load: ' return (False, 'reg execution module failed to load: '
@ -69,106 +68,76 @@ def __virtual__():
if not HAS_WINDOWS_MODULES: if not HAS_WINDOWS_MODULES:
return (False, 'reg execution module failed to load: ' return (False, 'reg execution module failed to load: '
'One of the following libraries did not load: ' 'One of the following libraries did not load: '
+ '_winreg, win32gui, win32con, win32api') + 'win32gui, win32con, win32api')
return __virtualname__ return __virtualname__
# winreg in python 2 is hard coded to use codex 'mbcs', which uses def _to_mbcs(vdata):
# encoding that the user has assign. The function _unicode_to_mbcs '''
# and _unicode_to_mbcs help with this. Converts unicode to to current users character encoding. Use this for values
returned by reg functions
'''
return salt.utils.to_unicode(vdata, 'mbcs')
def _unicode_to_mbcs(instr): def _to_unicode(vdata):
''' '''
Converts unicode to to current users character encoding. Converts from current users character encoding to unicode. Use this for
parameters being pass to reg functions
''' '''
if isinstance(instr, six.text_type): return salt.utils.to_unicode(vdata, 'utf-8')
# unicode to windows utf8
return instr.encode('mbcs')
else:
# Assume its byte str or not a str/unicode
return instr
def _mbcs_to_unicode(instr):
'''
Converts from current users character encoding to unicode.
When instr has a value of None, the return value of the function
will also be None.
'''
if instr is None or isinstance(instr, six.text_type):
return instr
else:
return six.text_type(instr, 'mbcs')
def _mbcs_to_unicode_wrap(obj, vtype):
'''
Wraps _mbcs_to_unicode for use with registry vdata
'''
if vtype == 'REG_BINARY':
# We should be able to leave it alone if the user has passed binary data in yaml with
# binary !!
# In python < 3 this should have type str and in python 3+ this should be a byte array
return obj
if isinstance(obj, list):
return [_mbcs_to_unicode(x) for x in obj]
elif isinstance(obj, six.integer_types):
return obj
else:
return _mbcs_to_unicode(obj)
class Registry(object): # pylint: disable=R0903 class Registry(object): # pylint: disable=R0903
''' '''
Delay '_winreg' usage until this module is used Delay usage until this module is used
''' '''
def __init__(self): def __init__(self):
self.hkeys = { self.hkeys = {
'HKEY_CURRENT_USER': _winreg.HKEY_CURRENT_USER, 'HKEY_CURRENT_USER': win32con.HKEY_CURRENT_USER,
'HKEY_LOCAL_MACHINE': _winreg.HKEY_LOCAL_MACHINE, 'HKEY_LOCAL_MACHINE': win32con.HKEY_LOCAL_MACHINE,
'HKEY_USERS': _winreg.HKEY_USERS, 'HKEY_USERS': win32con.HKEY_USERS,
'HKCU': _winreg.HKEY_CURRENT_USER, 'HKCU': win32con.HKEY_CURRENT_USER,
'HKLM': _winreg.HKEY_LOCAL_MACHINE, 'HKLM': win32con.HKEY_LOCAL_MACHINE,
'HKU': _winreg.HKEY_USERS, 'HKU': win32con.HKEY_USERS,
} }
self.vtype = { self.vtype = {
'REG_BINARY': _winreg.REG_BINARY, 'REG_BINARY': win32con.REG_BINARY,
'REG_DWORD': _winreg.REG_DWORD, 'REG_DWORD': win32con.REG_DWORD,
'REG_EXPAND_SZ': _winreg.REG_EXPAND_SZ, 'REG_EXPAND_SZ': win32con.REG_EXPAND_SZ,
'REG_MULTI_SZ': _winreg.REG_MULTI_SZ, 'REG_MULTI_SZ': win32con.REG_MULTI_SZ,
'REG_SZ': _winreg.REG_SZ 'REG_SZ': win32con.REG_SZ,
'REG_QWORD': win32con.REG_QWORD
} }
self.opttype = { self.opttype = {
'REG_OPTION_NON_VOLATILE': _winreg.REG_OPTION_NON_VOLATILE, 'REG_OPTION_NON_VOLATILE': 0,
'REG_OPTION_VOLATILE': _winreg.REG_OPTION_VOLATILE 'REG_OPTION_VOLATILE': 1
} }
# Return Unicode due to from __future__ import unicode_literals # Return Unicode due to from __future__ import unicode_literals
self.vtype_reverse = { self.vtype_reverse = {
_winreg.REG_BINARY: 'REG_BINARY', win32con.REG_BINARY: 'REG_BINARY',
_winreg.REG_DWORD: 'REG_DWORD', win32con.REG_DWORD: 'REG_DWORD',
_winreg.REG_EXPAND_SZ: 'REG_EXPAND_SZ', win32con.REG_EXPAND_SZ: 'REG_EXPAND_SZ',
_winreg.REG_MULTI_SZ: 'REG_MULTI_SZ', win32con.REG_MULTI_SZ: 'REG_MULTI_SZ',
_winreg.REG_SZ: 'REG_SZ', win32con.REG_SZ: 'REG_SZ',
# REG_QWORD isn't in the winreg library win32con.REG_QWORD: 'REG_QWORD'
11: 'REG_QWORD'
} }
self.opttype_reverse = { self.opttype_reverse = {
_winreg.REG_OPTION_NON_VOLATILE: 'REG_OPTION_NON_VOLATILE', 0: 'REG_OPTION_NON_VOLATILE',
_winreg.REG_OPTION_VOLATILE: 'REG_OPTION_VOLATILE' 1: 'REG_OPTION_VOLATILE'
} }
# delete_key_recursive uses this to check the subkey contains enough \ # delete_key_recursive uses this to check the subkey contains enough \
# as we do not want to remove all or most of the registry # as we do not want to remove all or most of the registry
self.subkey_slash_check = { self.subkey_slash_check = {
_winreg.HKEY_CURRENT_USER: 0, win32con.HKEY_CURRENT_USER: 0,
_winreg.HKEY_LOCAL_MACHINE: 1, win32con.HKEY_LOCAL_MACHINE: 1,
_winreg.HKEY_USERS: 1 win32con.HKEY_USERS: 1
} }
self.registry_32 = { self.registry_32 = {
True: _winreg.KEY_READ | _winreg.KEY_WOW64_32KEY, True: win32con.KEY_READ | win32con.KEY_WOW64_32KEY,
False: _winreg.KEY_READ, False: win32con.KEY_READ,
} }
def __getattr__(self, k): def __getattr__(self, k):
@ -191,21 +160,16 @@ def _key_exists(hive, key, use_32bit_registry=False):
:return: Returns True if found, False if not found :return: Returns True if found, False if not found
:rtype: bool :rtype: bool
''' '''
local_hive = _to_unicode(hive)
if PY2: local_key = _to_unicode(key)
local_hive = _mbcs_to_unicode(hive)
local_key = _unicode_to_mbcs(key)
else:
local_hive = hive
local_key = key
registry = Registry() registry = Registry()
hkey = registry.hkeys[local_hive] hkey = registry.hkeys[local_hive]
access_mask = registry.registry_32[use_32bit_registry] access_mask = registry.registry_32[use_32bit_registry]
try: try:
handle = _winreg.OpenKey(hkey, local_key, 0, access_mask) handle = win32api.RegOpenKeyEx(hkey, local_key, 0, access_mask)
_winreg.CloseKey(handle) win32api.RegCloseKey(handle)
return True return True
except WindowsError: # pylint: disable=E0602 except WindowsError: # pylint: disable=E0602
return False return False
@ -224,7 +188,10 @@ def broadcast_change():
salt '*' reg.broadcast_change salt '*' reg.broadcast_change
''' '''
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms644952(v=vs.85).aspx # https://msdn.microsoft.com/en-us/library/windows/desktop/ms644952(v=vs.85).aspx
return bool(SendMessage(HWND_BROADCAST, WM_SETTINGCHANGE, 0, 0)) _, res = win32gui.SendMessageTimeout(
win32con.HWND_BROADCAST, win32con.WM_SETTINGCHANGE, 0, 0,
win32con.SMTO_ABORTIFHUNG, 5000)
return not bool(res)
def list_keys(hive, key=None, use_32bit_registry=False): def list_keys(hive, key=None, use_32bit_registry=False):
@ -253,12 +220,8 @@ def list_keys(hive, key=None, use_32bit_registry=False):
salt '*' reg.list_keys HKLM 'SOFTWARE' salt '*' reg.list_keys HKLM 'SOFTWARE'
''' '''
if PY2: local_hive = _to_unicode(hive)
local_hive = _mbcs_to_unicode(hive) local_key = _to_unicode(key)
local_key = _unicode_to_mbcs(key)
else:
local_hive = hive
local_key = key
registry = Registry() registry = Registry()
hkey = registry.hkeys[local_hive] hkey = registry.hkeys[local_hive]
@ -266,12 +229,12 @@ def list_keys(hive, key=None, use_32bit_registry=False):
subkeys = [] subkeys = []
try: try:
handle = _winreg.OpenKey(hkey, local_key, 0, access_mask) handle = win32api.RegOpenKeyEx(hkey, local_key, 0, access_mask)
for i in range(_winreg.QueryInfoKey(handle)[0]): for i in range(win32api.RegQueryInfoKey(handle)[0]):
subkey = _winreg.EnumKey(handle, i) subkey = win32api.RegEnumKey(handle, i)
if PY2: if PY2:
subkeys.append(_mbcs_to_unicode(subkey)) subkeys.append(_to_unicode(subkey))
else: else:
subkeys.append(subkey) subkeys.append(subkey)
@ -312,13 +275,8 @@ def list_values(hive, key=None, use_32bit_registry=False, include_default=True):
salt '*' reg.list_values HKLM 'SYSTEM\\CurrentControlSet\\Services\\Tcpip' salt '*' reg.list_values HKLM 'SYSTEM\\CurrentControlSet\\Services\\Tcpip'
''' '''
local_hive = _to_unicode(hive)
if PY2: local_key = _to_unicode(key)
local_hive = _mbcs_to_unicode(hive)
local_key = _unicode_to_mbcs(key)
else:
local_hive = hive
local_key = key
registry = Registry() registry = Registry()
hkey = registry.hkeys[local_hive] hkey = registry.hkeys[local_hive]
@ -327,37 +285,21 @@ def list_values(hive, key=None, use_32bit_registry=False, include_default=True):
values = list() values = list()
try: try:
handle = _winreg.OpenKey(hkey, local_key, 0, access_mask) handle = win32api.RegOpenKeyEx(hkey, local_key, 0, access_mask)
for i in range(_winreg.QueryInfoKey(handle)[1]): for i in range(win32api.RegQueryInfoKey(handle)[1]):
vname, vdata, vtype = _winreg.EnumValue(handle, i) vname, vdata, vtype = win32api.RegEnumValue(handle, i)
if not vname:
vname = "(Default)"
value = {'hive': local_hive, value = {'hive': local_hive,
'key': local_key, 'key': local_key,
'vname': vname, 'vname': _to_mbcs(vname),
'vdata': vdata, 'vdata': _to_mbcs(vdata),
'vtype': registry.vtype_reverse[vtype], 'vtype': registry.vtype_reverse[vtype],
'success': True} 'success': True}
values.append(value) values.append(value)
if include_default:
# Get the default value for the key
value = {'hive': local_hive,
'key': local_key,
'vname': '(Default)',
'vdata': None,
'success': True}
try:
# QueryValueEx returns unicode data
vdata, vtype = _winreg.QueryValueEx(handle, '(Default)')
if vdata or vdata in [0, '']:
value['vtype'] = registry.vtype_reverse[vtype]
value['vdata'] = vdata
else:
value['comment'] = 'Empty Value'
except WindowsError: # pylint: disable=E0602
value['vdata'] = ('(value not set)')
value['vtype'] = 'REG_SZ'
values.append(value)
except WindowsError as exc: # pylint: disable=E0602 except WindowsError as exc: # pylint: disable=E0602
log.debug(exc) log.debug(exc)
log.debug(r'Cannot find key: {0}\{1}'.format(hive, key)) log.debug(r'Cannot find key: {0}\{1}'.format(hive, key))
@ -403,30 +345,19 @@ def read_value(hive, key, vname=None, use_32bit_registry=False):
salt '*' reg.read_value HKEY_LOCAL_MACHINE 'SOFTWARE\Salt' 'version' salt '*' reg.read_value HKEY_LOCAL_MACHINE 'SOFTWARE\Salt' 'version'
''' '''
# If no name is passed, the default value of the key will be returned # If no name is passed, the default value of the key will be returned
# The value name is Default # The value name is Default
# Setup the return array # Setup the return array
if PY2: local_hive = _to_unicode(hive)
ret = {'hive': _mbcs_to_unicode(hive), local_key = _to_unicode(key)
'key': _mbcs_to_unicode(key), local_vname = _to_unicode(vname)
'vname': _mbcs_to_unicode(vname),
'vdata': None,
'success': True}
local_hive = _mbcs_to_unicode(hive)
local_key = _unicode_to_mbcs(key)
local_vname = _unicode_to_mbcs(vname)
else: ret = {'hive': local_hive,
ret = {'hive': hive, 'key': local_key,
'key': key, 'vname': local_vname,
'vname': vname, 'vdata': None,
'vdata': None, 'success': True}
'success': True}
local_hive = hive
local_key = key
local_vname = vname
if not vname: if not vname:
ret['vname'] = '(Default)' ret['vname'] = '(Default)'
@ -436,19 +367,22 @@ def read_value(hive, key, vname=None, use_32bit_registry=False):
access_mask = registry.registry_32[use_32bit_registry] access_mask = registry.registry_32[use_32bit_registry]
try: try:
handle = _winreg.OpenKey(hkey, local_key, 0, access_mask) handle = win32api.RegOpenKeyEx(hkey, local_key, 0, access_mask)
try: try:
# QueryValueEx returns unicode data # RegQueryValueEx returns and accepts unicode data
vdata, vtype = _winreg.QueryValueEx(handle, local_vname) vdata, vtype = win32api.RegQueryValueEx(handle, local_vname)
if vdata or vdata in [0, '']: if vdata or vdata in [0, '']:
ret['vtype'] = registry.vtype_reverse[vtype] ret['vtype'] = registry.vtype_reverse[vtype]
ret['vdata'] = vdata if vtype == 7:
ret['vdata'] = [_to_mbcs(i) for i in vdata]
else:
ret['vdata'] = _to_mbcs(vdata)
else: else:
ret['comment'] = 'Empty Value' ret['comment'] = 'Empty Value'
except WindowsError: # pylint: disable=E0602 except WindowsError: # pylint: disable=E0602
ret['vdata'] = ('(value not set)') ret['vdata'] = ('(value not set)')
ret['vtype'] = 'REG_SZ' ret['vtype'] = 'REG_SZ'
except WindowsError as exc: # pylint: disable=E0602 except pywintypes.error as exc: # pylint: disable=E0602
log.debug(exc) log.debug(exc)
log.debug('Cannot find key: {0}\\{1}'.format(local_hive, local_key)) log.debug('Cannot find key: {0}\\{1}'.format(local_hive, local_key))
ret['comment'] = 'Cannot find key: {0}\\{1}'.format(local_hive, local_key) ret['comment'] = 'Cannot find key: {0}\\{1}'.format(local_hive, local_key)
@ -555,42 +489,47 @@ def set_value(hive,
salt '*' reg.set_value HKEY_LOCAL_MACHINE 'SOFTWARE\\Salt' 'version' '2015.5.2' \\ salt '*' reg.set_value HKEY_LOCAL_MACHINE 'SOFTWARE\\Salt' 'version' '2015.5.2' \\
vtype=REG_LIST vdata='[a,b,c]' vtype=REG_LIST vdata='[a,b,c]'
''' '''
local_hive = _to_unicode(hive)
if PY2: local_key = _to_unicode(key)
try: local_vname = _to_unicode(vname)
local_hive = _mbcs_to_unicode(hive) local_vtype = _to_unicode(vtype)
local_key = _mbcs_to_unicode(key)
local_vname = _mbcs_to_unicode(vname)
local_vtype = _mbcs_to_unicode(vtype)
local_vdata = _mbcs_to_unicode_wrap(vdata, local_vtype)
except TypeError as exc: # pylint: disable=E0602
log.error(exc, exc_info=True)
return False
else:
local_hive = hive
local_key = key
local_vname = vname
local_vdata = vdata
local_vtype = vtype
registry = Registry() registry = Registry()
hkey = registry.hkeys[local_hive] hkey = registry.hkeys[local_hive]
vtype_value = registry.vtype[local_vtype] vtype_value = registry.vtype[local_vtype]
access_mask = registry.registry_32[use_32bit_registry] | _winreg.KEY_ALL_ACCESS access_mask = registry.registry_32[use_32bit_registry] | win32con.KEY_ALL_ACCESS
# Check data type and cast to expected type
# int will automatically become long on 64bit numbers
# https://www.python.org/dev/peps/pep-0237/
# String Types to Unicode
if vtype_value in [1, 2]:
local_vdata = _to_unicode(vdata)
# Don't touch binary...
elif vtype_value == 3:
local_vdata = vdata
# Make sure REG_MULTI_SZ is a list of strings
elif vtype_value == 7:
local_vdata = [_to_unicode(i) for i in vdata]
# Everything else is int
else:
local_vdata = int(vdata)
if volatile: if volatile:
create_options = registry.opttype['REG_OPTION_VOLATILE'] create_options = registry.opttype['REG_OPTION_VOLATILE']
else: else:
create_options = registry.opttype['REG_OPTION_NON_VOLATILE'] create_options = registry.opttype['REG_OPTION_NON_VOLATILE']
try: try:
handle, _ = RegCreateKeyEx(hkey, local_key, access_mask, handle, _ = win32api.RegCreateKeyEx(hkey, local_key, access_mask,
Options=create_options) Options=create_options)
RegSetValueEx(handle, local_vname, 0, vtype_value, local_vdata) win32api.RegSetValueEx(handle, local_vname, 0, vtype_value, local_vdata)
RegFlushKey(handle) win32api.RegFlushKey(handle)
RegCloseKey(handle) win32api.RegCloseKey(handle)
broadcast_change() broadcast_change()
return True return True
except (win32apiError, SystemError, ValueError, TypeError) as exc: # pylint: disable=E0602 except (win32api.error, SystemError, ValueError, TypeError) as exc: # pylint: disable=E0602
log.error(exc, exc_info=True) log.error(exc, exc_info=True)
return False return False
@ -626,18 +565,14 @@ def delete_key_recursive(hive, key, use_32bit_registry=False):
salt '*' reg.delete_key_recursive HKLM SOFTWARE\\salt salt '*' reg.delete_key_recursive HKLM SOFTWARE\\salt
''' '''
if PY2: local_hive = _to_unicode(hive)
local_hive = _mbcs_to_unicode(hive) local_key = _to_unicode(key)
local_key = _unicode_to_mbcs(key)
else:
local_hive = hive
local_key = key
# Instantiate the registry object # Instantiate the registry object
registry = Registry() registry = Registry()
hkey = registry.hkeys[local_hive] hkey = registry.hkeys[local_hive]
key_path = local_key key_path = local_key
access_mask = registry.registry_32[use_32bit_registry] | _winreg.KEY_ALL_ACCESS access_mask = registry.registry_32[use_32bit_registry] | win32con.KEY_ALL_ACCESS
if not _key_exists(local_hive, local_key, use_32bit_registry): if not _key_exists(local_hive, local_key, use_32bit_registry):
return False return False
@ -654,17 +589,17 @@ def delete_key_recursive(hive, key, use_32bit_registry=False):
i = 0 i = 0
while True: while True:
try: try:
subkey = _winreg.EnumKey(_key, i) subkey = win32api.RegEnumKey(_key, i)
yield subkey yield subkey
i += 1 i += 1
except WindowsError: # pylint: disable=E0602 except pywintypes.error: # pylint: disable=E0602
break break
def _traverse_registry_tree(_hkey, _keypath, _ret, _access_mask): def _traverse_registry_tree(_hkey, _keypath, _ret, _access_mask):
''' '''
Traverse the registry tree i.e. dive into the tree Traverse the registry tree i.e. dive into the tree
''' '''
_key = _winreg.OpenKey(_hkey, _keypath, 0, _access_mask) _key = win32api.RegOpenKeyEx(_hkey, _keypath, 0, _access_mask)
for subkeyname in _subkeys(_key): for subkeyname in _subkeys(_key):
subkeypath = r'{0}\{1}'.format(_keypath, subkeyname) subkeypath = r'{0}\{1}'.format(_keypath, subkeyname)
_ret = _traverse_registry_tree(_hkey, subkeypath, _ret, access_mask) _ret = _traverse_registry_tree(_hkey, subkeypath, _ret, access_mask)
@ -683,8 +618,8 @@ def delete_key_recursive(hive, key, use_32bit_registry=False):
# Delete all sub_keys # Delete all sub_keys
for sub_key_path in key_list: for sub_key_path in key_list:
try: try:
key_handle = _winreg.OpenKey(hkey, sub_key_path, 0, access_mask) key_handle = win32api.RegOpenKeyEx(hkey, sub_key_path, 0, access_mask)
_winreg.DeleteKey(key_handle, '') win32api.RegDeleteKey(key_handle, '')
ret['Deleted'].append(r'{0}\{1}'.format(hive, sub_key_path)) ret['Deleted'].append(r'{0}\{1}'.format(hive, sub_key_path))
except WindowsError as exc: # pylint: disable=E0602 except WindowsError as exc: # pylint: disable=E0602
log.error(exc, exc_info=True) log.error(exc, exc_info=True)
@ -723,23 +658,18 @@ def delete_value(hive, key, vname=None, use_32bit_registry=False):
salt '*' reg.delete_value HKEY_CURRENT_USER 'SOFTWARE\\Salt' 'version' salt '*' reg.delete_value HKEY_CURRENT_USER 'SOFTWARE\\Salt' 'version'
''' '''
if PY2: local_hive = _to_unicode(hive)
local_hive = _mbcs_to_unicode(hive) local_key = _to_unicode(key)
local_key = _unicode_to_mbcs(key) local_vname = _to_unicode(vname)
local_vname = _unicode_to_mbcs(vname)
else:
local_hive = hive
local_key = key
local_vname = vname
registry = Registry() registry = Registry()
hkey = registry.hkeys[local_hive] hkey = registry.hkeys[local_hive]
access_mask = registry.registry_32[use_32bit_registry] | _winreg.KEY_ALL_ACCESS access_mask = registry.registry_32[use_32bit_registry] | win32con.KEY_ALL_ACCESS
try: try:
handle = _winreg.OpenKey(hkey, local_key, 0, access_mask) handle = win32api.RegOpenKeyEx(hkey, local_key, 0, access_mask)
_winreg.DeleteValue(handle, local_vname) win32api.RegDeleteValue(handle, local_vname)
_winreg.CloseKey(handle) win32api.RegCloseKey(handle)
broadcast_change() broadcast_change()
return True return True
except WindowsError as exc: # pylint: disable=E0602 except WindowsError as exc: # pylint: disable=E0602

View File

@ -689,9 +689,24 @@ def _parse_settings_eth(opts, iface_type, enabled, iface):
if opt in opts: if opt in opts:
result[opt] = opts[opt] result[opt] = opts[opt]
for opt in ['ipaddrs', 'ipv6addrs']: if 'ipaddrs' in opts:
if opt in opts: result['ipaddrs'] = []
result[opt] = opts[opt] for opt in opts['ipaddrs']:
if salt.utils.validate.net.ipv4_addr(opt):
ip, prefix = [i.strip() for i in opt.split('/')]
result['ipaddrs'].append({'ipaddr': ip, 'prefix': prefix})
else:
msg = 'ipv4 CIDR is invalid'
log.error(msg)
raise AttributeError(msg)
if 'ipv6addrs' in opts:
for opt in opts['ipv6addrs']:
if not salt.utils.validate.net.ipv6_addr(opt):
msg = 'ipv6 CIDR is invalid'
log.error(msg)
raise AttributeError(msg)
result['ipv6addrs'] = opts['ipv6addrs']
if 'enable_ipv6' in opts: if 'enable_ipv6' in opts:
result['enable_ipv6'] = opts['enable_ipv6'] result['enable_ipv6'] = opts['enable_ipv6']
@ -1069,8 +1084,8 @@ def build_routes(iface, **settings):
log.debug("IPv4 routes:\n{0}".format(opts4)) log.debug("IPv4 routes:\n{0}".format(opts4))
log.debug("IPv6 routes:\n{0}".format(opts6)) log.debug("IPv6 routes:\n{0}".format(opts6))
routecfg = template.render(routes=opts4) routecfg = template.render(routes=opts4, iface=iface)
routecfg6 = template.render(routes=opts6) routecfg6 = template.render(routes=opts6, iface=iface)
if settings['test']: if settings['test']:
routes = _read_temp(routecfg) routes = _read_temp(routecfg)

View File

@ -2,6 +2,8 @@
''' '''
Module for controlling the LED matrix or reading environment data on the SenseHat of a Raspberry Pi. Module for controlling the LED matrix or reading environment data on the SenseHat of a Raspberry Pi.
.. versionadded:: 2017.7.0
:maintainer: Benedikt Werner <1benediktwerner@gmail.com>, Joachim Werner <joe@suse.com> :maintainer: Benedikt Werner <1benediktwerner@gmail.com>, Joachim Werner <joe@suse.com>
:maturity: new :maturity: new
:depends: sense_hat Python module :depends: sense_hat Python module

View File

@ -99,17 +99,16 @@ def _set_retcode(ret, highstate=None):
__context__['retcode'] = 2 __context__['retcode'] = 2
def _check_pillar(kwargs, pillar=None): def _get_pillar_errors(kwargs, pillar=None):
''' '''
Check the pillar for errors, refuse to run the state if there are errors Checks all pillars (external and internal) for errors.
in the pillar and return the pillar errors Return an error message, if anywhere or None.
:param kwargs: dictionary of options
:param pillar: external pillar
:return: None or an error message
''' '''
if kwargs.get('force'): return None if kwargs.get('force') else (pillar or {}).get('_errors', __pillar__.get('_errors')) or None
return True
pillar_dict = pillar if pillar is not None else __pillar__
if '_errors' in pillar_dict:
return False
return True
def _wait(jid): def _wait(jid):
@ -411,10 +410,10 @@ def template(tem, queue=False, **kwargs):
context=__context__, context=__context__,
initial_pillar=_get_initial_pillar(opts)) initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']): errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = 5 __context__['retcode'] = 5
raise CommandExecutionError('Pillar failed to render', raise CommandExecutionError('Pillar failed to render', info=errors)
info=st_.opts['pillar']['_errors'])
if not tem.endswith('.sls'): if not tem.endswith('.sls'):
tem = '{sls}.sls'.format(sls=tem) tem = '{sls}.sls'.format(sls=tem)
@ -493,6 +492,18 @@ def apply_(mods=None,
Values passed this way will override Pillar values set via Values passed this way will override Pillar values set via
``pillar_roots`` or an external Pillar source. ``pillar_roots`` or an external Pillar source.
exclude
Exclude specific states from execution. Accepts a list of sls names, a
comma-separated string of sls names, or a list of dictionaries
containing ``sls`` or ``id`` keys. Glob-patterns may be used to match
multiple states.
.. code-block:: bash
salt '*' state.apply exclude=bar,baz
salt '*' state.apply exclude=foo*
salt '*' state.apply exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
queue : False queue : False
Instead of failing immediately when another state run is in progress, Instead of failing immediately when another state run is in progress,
queue the new state run to begin running once the other has finished. queue the new state run to begin running once the other has finished.
@ -758,6 +769,18 @@ def highstate(test=None, queue=False, **kwargs):
.. versionadded:: 2016.3.0 .. versionadded:: 2016.3.0
exclude
Exclude specific states from execution. Accepts a list of sls names, a
comma-separated string of sls names, or a list of dictionaries
containing ``sls`` or ``id`` keys. Glob-patterns may be used to match
multiple states.
.. code-block:: bash
salt '*' state.higstate exclude=bar,baz
salt '*' state.higstate exclude=foo*
salt '*' state.highstate exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
saltenv saltenv
Specify a salt fileserver environment to be used when applying states Specify a salt fileserver environment to be used when applying states
@ -872,11 +895,10 @@ def highstate(test=None, queue=False, **kwargs):
mocked=kwargs.get('mock', False), mocked=kwargs.get('mock', False),
initial_pillar=_get_initial_pillar(opts)) initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']): errors = _get_pillar_errors(kwargs, st_.opts['pillar'])
if errors:
__context__['retcode'] = 5 __context__['retcode'] = 5
err = ['Pillar failed to render with the following messages:'] return ['Pillar failed to render with the following messages:'] + errors
err += __pillar__['_errors']
return err
st_.push_active() st_.push_active()
ret = {} ret = {}
@ -893,8 +915,8 @@ def highstate(test=None, queue=False, **kwargs):
finally: finally:
st_.pop_active() st_.pop_active()
if __salt__['config.option']('state_data', '') == 'terse' or \ if isinstance(ret, dict) and (__salt__['config.option']('state_data', '') == 'terse' or
kwargs.get('terse'): kwargs.get('terse')):
ret = _filter_running(ret) ret = _filter_running(ret)
serial = salt.payload.Serial(__opts__) serial = salt.payload.Serial(__opts__)
@ -935,6 +957,18 @@ def sls(mods, test=None, exclude=None, queue=False, **kwargs):
.. versionadded:: 2016.3.0 .. versionadded:: 2016.3.0
exclude
Exclude specific states from execution. Accepts a list of sls names, a
comma-separated string of sls names, or a list of dictionaries
containing ``sls`` or ``id`` keys. Glob-patterns may be used to match
multiple states.
.. code-block:: bash
salt '*' state.sls foo,bar,baz exclude=bar,baz
salt '*' state.sls foo,bar,baz exclude=ba*
salt '*' state.sls foo,bar,baz exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
queue : False queue : False
Instead of failing immediately when another state run is in progress, Instead of failing immediately when another state run is in progress,
queue the new state run to begin running once the other has finished. queue the new state run to begin running once the other has finished.
@ -1071,11 +1105,10 @@ def sls(mods, test=None, exclude=None, queue=False, **kwargs):
mocked=kwargs.get('mock', False), mocked=kwargs.get('mock', False),
initial_pillar=_get_initial_pillar(opts)) initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']): errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = 5 __context__['retcode'] = 5
err = ['Pillar failed to render with the following messages:'] return ['Pillar failed to render with the following messages:'] + errors
err += __pillar__['_errors']
return err
orchestration_jid = kwargs.get('orchestration_jid') orchestration_jid = kwargs.get('orchestration_jid')
umask = os.umask(0o77) umask = os.umask(0o77)
@ -1090,7 +1123,6 @@ def sls(mods, test=None, exclude=None, queue=False, **kwargs):
mods = mods.split(',') mods = mods.split(',')
st_.push_active() st_.push_active()
ret = {}
try: try:
high_, errors = st_.render_highstate({opts['environment']: mods}) high_, errors = st_.render_highstate({opts['environment']: mods})
@ -1197,11 +1229,10 @@ def top(topfn, test=None, queue=False, **kwargs):
pillar_enc=pillar_enc, pillar_enc=pillar_enc,
context=__context__, context=__context__,
initial_pillar=_get_initial_pillar(opts)) initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']): errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = 5 __context__['retcode'] = 5
err = ['Pillar failed to render with the following messages:'] return ['Pillar failed to render with the following messages:'] + errors
err += __pillar__['_errors']
return err
st_.push_active() st_.push_active()
st_.opts['state_top'] = salt.utils.url.create(topfn) st_.opts['state_top'] = salt.utils.url.create(topfn)
@ -1259,10 +1290,10 @@ def show_highstate(queue=False, **kwargs):
pillar_enc=pillar_enc, pillar_enc=pillar_enc,
initial_pillar=_get_initial_pillar(opts)) initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']): errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = 5 __context__['retcode'] = 5
raise CommandExecutionError('Pillar failed to render', raise CommandExecutionError('Pillar failed to render', info=errors)
info=st_.opts['pillar']['_errors'])
st_.push_active() st_.push_active()
try: try:
@ -1293,10 +1324,10 @@ def show_lowstate(queue=False, **kwargs):
st_ = salt.state.HighState(opts, st_ = salt.state.HighState(opts,
initial_pillar=_get_initial_pillar(opts)) initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']): errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = 5 __context__['retcode'] = 5
raise CommandExecutionError('Pillar failed to render', raise CommandExecutionError('Pillar failed to render', info=errors)
info=st_.opts['pillar']['_errors'])
st_.push_active() st_.push_active()
try: try:
@ -1394,11 +1425,10 @@ def sls_id(id_, mods, test=None, queue=False, **kwargs):
st_ = salt.state.HighState(opts, st_ = salt.state.HighState(opts,
initial_pillar=_get_initial_pillar(opts)) initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']): errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = 5 __context__['retcode'] = 5
err = ['Pillar failed to render with the following messages:'] return ['Pillar failed to render with the following messages:'] + errors
err += __pillar__['_errors']
return err
if isinstance(mods, six.string_types): if isinstance(mods, six.string_types):
split_mods = mods.split(',') split_mods = mods.split(',')
@ -1480,10 +1510,10 @@ def show_low_sls(mods, test=None, queue=False, **kwargs):
st_ = salt.state.HighState(opts, initial_pillar=_get_initial_pillar(opts)) st_ = salt.state.HighState(opts, initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']): errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = 5 __context__['retcode'] = 5
raise CommandExecutionError('Pillar failed to render', raise CommandExecutionError('Pillar failed to render', info=errors)
info=st_.opts['pillar']['_errors'])
if isinstance(mods, six.string_types): if isinstance(mods, six.string_types):
mods = mods.split(',') mods = mods.split(',')
@ -1567,10 +1597,10 @@ def show_sls(mods, test=None, queue=False, **kwargs):
pillar_enc=pillar_enc, pillar_enc=pillar_enc,
initial_pillar=_get_initial_pillar(opts)) initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']): errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = 5 __context__['retcode'] = 5
raise CommandExecutionError('Pillar failed to render', raise CommandExecutionError('Pillar failed to render', info=errors)
info=st_.opts['pillar']['_errors'])
if isinstance(mods, six.string_types): if isinstance(mods, six.string_types):
mods = mods.split(',') mods = mods.split(',')
@ -1616,10 +1646,10 @@ def show_top(queue=False, **kwargs):
st_ = salt.state.HighState(opts, initial_pillar=_get_initial_pillar(opts)) st_ = salt.state.HighState(opts, initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']): errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = 5 __context__['retcode'] = 5
raise CommandExecutionError('Pillar failed to render', raise CommandExecutionError('Pillar failed to render', info=errors)
info=st_.opts['pillar']['_errors'])
errors = [] errors = []
top_ = st_.get_top() top_ = st_.get_top()

View File

@ -337,6 +337,10 @@ def zone_compare(timezone):
if 'Solaris' in __grains__['os_family'] or 'AIX' in __grains__['os_family']: if 'Solaris' in __grains__['os_family'] or 'AIX' in __grains__['os_family']:
return timezone == get_zone() return timezone == get_zone()
if 'FreeBSD' in __grains__['os_family']:
if not os.path.isfile(_get_etc_localtime_path()):
return timezone == get_zone()
tzfile = _get_etc_localtime_path() tzfile = _get_etc_localtime_path()
zonepath = _get_zone_file(timezone) zonepath = _get_zone_file(timezone)
try: try:

View File

@ -316,7 +316,7 @@ def get_site_packages(venv):
ret = __salt__['cmd.exec_code_all']( ret = __salt__['cmd.exec_code_all'](
bin_path, bin_path,
'from distutils import sysconfig; ' 'from distutils import sysconfig; '
'print sysconfig.get_python_lib()' 'print(sysconfig.get_python_lib())'
) )
if ret['retcode'] != 0: if ret['retcode'] != 0:

View File

@ -58,7 +58,7 @@ from salt.modules.file import (check_hash, # pylint: disable=W0611
lstat, path_exists_glob, write, pardir, join, HASHES, HASHES_REVMAP, lstat, path_exists_glob, write, pardir, join, HASHES, HASHES_REVMAP,
comment, uncomment, _add_flags, comment_line, _regex_to_static, comment, uncomment, _add_flags, comment_line, _regex_to_static,
_get_line_indent, apply_template_on_contents, dirname, basename, _get_line_indent, apply_template_on_contents, dirname, basename,
list_backups_dir) list_backups_dir, _assert_occurrence, _starts_till)
from salt.modules.file import normpath as normpath_ from salt.modules.file import normpath as normpath_
from salt.utils import namespaced_function as _namespaced_function from salt.utils import namespaced_function as _namespaced_function
@ -116,7 +116,7 @@ def __virtual__():
global write, pardir, join, _add_flags, apply_template_on_contents global write, pardir, join, _add_flags, apply_template_on_contents
global path_exists_glob, comment, uncomment, _mkstemp_copy global path_exists_glob, comment, uncomment, _mkstemp_copy
global _regex_to_static, _get_line_indent, dirname, basename global _regex_to_static, _get_line_indent, dirname, basename
global list_backups_dir, normpath_ global list_backups_dir, normpath_, _assert_occurrence, _starts_till
replace = _namespaced_function(replace, globals()) replace = _namespaced_function(replace, globals())
search = _namespaced_function(search, globals()) search = _namespaced_function(search, globals())
@ -179,6 +179,8 @@ def __virtual__():
basename = _namespaced_function(basename, globals()) basename = _namespaced_function(basename, globals())
list_backups_dir = _namespaced_function(list_backups_dir, globals()) list_backups_dir = _namespaced_function(list_backups_dir, globals())
normpath_ = _namespaced_function(normpath_, globals()) normpath_ = _namespaced_function(normpath_, globals())
_assert_occurrence = _namespaced_function(_assert_occurrence, globals())
_starts_till = _namespaced_function(_starts_till, globals())
else: else:
return False, 'Module win_file: Missing Win32 modules' return False, 'Module win_file: Missing Win32 modules'
@ -789,7 +791,7 @@ def chgrp(path, group):
def stats(path, hash_type='sha256', follow_symlinks=True): def stats(path, hash_type='sha256', follow_symlinks=True):
''' '''
Return a dict containing the stats for a given file Return a dict containing the stats about a given file
Under Windows, `gid` will equal `uid` and `group` will equal `user`. Under Windows, `gid` will equal `uid` and `group` will equal `user`.
@ -818,6 +820,8 @@ def stats(path, hash_type='sha256', follow_symlinks=True):
salt '*' file.stats /etc/passwd salt '*' file.stats /etc/passwd
''' '''
# This is to mirror the behavior of file.py. `check_file_meta` expects an
# empty dictionary when the file does not exist
if not os.path.exists(path): if not os.path.exists(path):
raise CommandExecutionError('Path not found: {0}'.format(path)) raise CommandExecutionError('Path not found: {0}'.format(path))
@ -1225,33 +1229,37 @@ def mkdir(path,
path (str): The full path to the directory. path (str): The full path to the directory.
owner (str): The owner of the directory. If not passed, it will be the owner (str):
account that created the directory, likely SYSTEM The owner of the directory. If not passed, it will be the account
that created the directory, likely SYSTEM
grant_perms (dict): A dictionary containing the user/group and the basic grant_perms (dict):
permissions to grant, ie: ``{'user': {'perms': 'basic_permission'}}``. A dictionary containing the user/group and the basic permissions to
You can also set the ``applies_to`` setting here. The default is grant, ie: ``{'user': {'perms': 'basic_permission'}}``. You can also
``this_folder_subfolders_files``. Specify another ``applies_to`` setting set the ``applies_to`` setting here. The default is
like this: ``this_folder_subfolders_files``. Specify another ``applies_to``
setting like this:
.. code-block:: yaml .. code-block:: yaml
{'user': {'perms': 'full_control', 'applies_to': 'this_folder'}} {'user': {'perms': 'full_control', 'applies_to': 'this_folder'}}
To set advanced permissions use a list for the ``perms`` parameter, ie: To set advanced permissions use a list for the ``perms`` parameter, ie:
.. code-block:: yaml .. code-block:: yaml
{'user': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder'}} {'user': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder'}}
deny_perms (dict): A dictionary containing the user/group and deny_perms (dict):
permissions to deny along with the ``applies_to`` setting. Use the same A dictionary containing the user/group and permissions to deny along
format used for the ``grant_perms`` parameter. Remember, deny with the ``applies_to`` setting. Use the same format used for the
permissions supersede grant permissions. ``grant_perms`` parameter. Remember, deny permissions supersede
grant permissions.
inheritance (bool): If True the object will inherit permissions from the inheritance (bool):
parent, if False, inheritance will be disabled. Inheritance setting will If True the object will inherit permissions from the parent, if
not apply to parent directories if they must be created False, inheritance will be disabled. Inheritance setting will not
apply to parent directories if they must be created
Returns: Returns:
bool: True if successful bool: True if successful
@ -1310,33 +1318,37 @@ def makedirs_(path,
path (str): The full path to the directory. path (str): The full path to the directory.
owner (str): The owner of the directory. If not passed, it will be the owner (str):
account that created the directly, likely SYSTEM The owner of the directory. If not passed, it will be the account
that created the directly, likely SYSTEM
grant_perms (dict): A dictionary containing the user/group and the basic grant_perms (dict):
permissions to grant, ie: ``{'user': {'perms': 'basic_permission'}}``. A dictionary containing the user/group and the basic permissions to
You can also set the ``applies_to`` setting here. The default is grant, ie: ``{'user': {'perms': 'basic_permission'}}``. You can also
``this_folder_subfolders_files``. Specify another ``applies_to`` setting set the ``applies_to`` setting here. The default is
like this: ``this_folder_subfolders_files``. Specify another ``applies_to``
setting like this:
.. code-block:: yaml .. code-block:: yaml
{'user': {'perms': 'full_control', 'applies_to': 'this_folder'}} {'user': {'perms': 'full_control', 'applies_to': 'this_folder'}}
To set advanced permissions use a list for the ``perms`` parameter, ie: To set advanced permissions use a list for the ``perms`` parameter, ie:
.. code-block:: yaml .. code-block:: yaml
{'user': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder'}} {'user': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder'}}
deny_perms (dict): A dictionary containing the user/group and deny_perms (dict):
permissions to deny along with the ``applies_to`` setting. Use the same A dictionary containing the user/group and permissions to deny along
format used for the ``grant_perms`` parameter. Remember, deny with the ``applies_to`` setting. Use the same format used for the
permissions supersede grant permissions. ``grant_perms`` parameter. Remember, deny permissions supersede
grant permissions.
inheritance (bool): If True the object will inherit permissions from the inheritance (bool):
parent, if False, inheritance will be disabled. Inheritance setting will If True the object will inherit permissions from the parent, if
not apply to parent directories if they must be created False, inheritance will be disabled. Inheritance setting will not
apply to parent directories if they must be created
.. note:: .. note::
@ -1421,36 +1433,40 @@ def makedirs_perms(path,
path (str): The full path to the directory. path (str): The full path to the directory.
owner (str): The owner of the directory. If not passed, it will be the owner (str):
account that created the directory, likely SYSTEM The owner of the directory. If not passed, it will be the account
that created the directory, likely SYSTEM
grant_perms (dict): A dictionary containing the user/group and the basic grant_perms (dict):
permissions to grant, ie: ``{'user': {'perms': 'basic_permission'}}``. A dictionary containing the user/group and the basic permissions to
You can also set the ``applies_to`` setting here. The default is grant, ie: ``{'user': {'perms': 'basic_permission'}}``. You can also
``this_folder_subfolders_files``. Specify another ``applies_to`` setting set the ``applies_to`` setting here. The default is
like this: ``this_folder_subfolders_files``. Specify another ``applies_to``
setting like this:
.. code-block:: yaml .. code-block:: yaml
{'user': {'perms': 'full_control', 'applies_to': 'this_folder'}} {'user': {'perms': 'full_control', 'applies_to': 'this_folder'}}
To set advanced permissions use a list for the ``perms`` parameter, ie: To set advanced permissions use a list for the ``perms`` parameter, ie:
.. code-block:: yaml .. code-block:: yaml
{'user': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder'}} {'user': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder'}}
deny_perms (dict): A dictionary containing the user/group and deny_perms (dict):
permissions to deny along with the ``applies_to`` setting. Use the same A dictionary containing the user/group and permissions to deny along
format used for the ``grant_perms`` parameter. Remember, deny with the ``applies_to`` setting. Use the same format used for the
permissions supersede grant permissions. ``grant_perms`` parameter. Remember, deny permissions supersede
grant permissions.
inheritance (bool): If True the object will inherit permissions from the inheritance (bool):
parent, if False, inheritance will be disabled. Inheritance setting will If True the object will inherit permissions from the parent, if
not apply to parent directories if they must be created False, inheritance will be disabled. Inheritance setting will not
apply to parent directories if they must be created
Returns: Returns:
bool: True if successful, otherwise raise an error bool: True if successful, otherwise raises an error
CLI Example: CLI Example:
@ -1503,45 +1519,54 @@ def check_perms(path,
deny_perms=None, deny_perms=None,
inheritance=True): inheritance=True):
''' '''
Set owner and permissions for each directory created. Set owner and permissions for each directory created. Used mostly by the
state system.
Args: Args:
path (str): The full path to the directory. path (str): The full path to the directory.
ret (dict): A dictionary to append changes to and return. If not passed, ret (dict):
will create a new dictionary to return. A dictionary to append changes to and return. If not passed, will
create a new dictionary to return.
owner (str): The owner of the directory. If not passed, it will be the owner (str):
account that created the directory, likely SYSTEM The owner of the directory. If not passed, it will be the account
that created the directory, likely SYSTEM
grant_perms (dict): A dictionary containing the user/group and the basic grant_perms (dict):
permissions to grant, ie: ``{'user': {'perms': 'basic_permission'}}``. A dictionary containing the user/group and the basic permissions to
You can also set the ``applies_to`` setting here. The default is grant, ie: ``{'user': {'perms': 'basic_permission'}}``. You can also
``this_folder_subfolders_files``. Specify another ``applies_to`` setting set the ``applies_to`` setting here. The default is
like this: ``this_folder_subfolders_files``. Specify another ``applies_to``
setting like this:
.. code-block:: yaml .. code-block:: yaml
{'user': {'perms': 'full_control', 'applies_to': 'this_folder'}} {'user': {'perms': 'full_control', 'applies_to': 'this_folder'}}
To set advanced permissions use a list for the ``perms`` parameter, ie: To set advanced permissions use a list for the ``perms`` parameter, ie:
.. code-block:: yaml .. code-block:: yaml
{'user': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder'}} {'user': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder'}}
deny_perms (dict): A dictionary containing the user/group and deny_perms (dict):
permissions to deny along with the ``applies_to`` setting. Use the same A dictionary containing the user/group and permissions to deny along
format used for the ``grant_perms`` parameter. Remember, deny with the ``applies_to`` setting. Use the same format used for the
permissions supersede grant permissions. ``grant_perms`` parameter. Remember, deny permissions supersede
grant permissions.
inheritance (bool): If True the object will inherit permissions from the inheritance (bool):
parent, if False, inheritance will be disabled. Inheritance setting will If True the object will inherit permissions from the parent, if
not apply to parent directories if they must be created False, inheritance will be disabled. Inheritance setting will not
apply to parent directories if they must be created
Returns: Returns:
bool: True if successful, otherwise raise an error dict: A dictionary of changes made to the object
Raises:
CommandExecutionError: If the object does not exist
CLI Example: CLI Example:
@ -1556,6 +1581,9 @@ def check_perms(path,
# Specify advanced attributes with a list # Specify advanced attributes with a list
salt '*' file.check_perms C:\\Temp\\ Administrators "{'jsnuffy': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'files_only'}}" salt '*' file.check_perms C:\\Temp\\ Administrators "{'jsnuffy': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'files_only'}}"
''' '''
if not os.path.exists(path):
raise CommandExecutionError('Path not found: {0}'.format(path))
path = os.path.expanduser(path) path = os.path.expanduser(path)
if not ret: if not ret:

View File

@ -36,6 +36,60 @@ def __virtual__():
return (False, "Module win_groupadd: module only works on Windows systems") return (False, "Module win_groupadd: module only works on Windows systems")
def _get_computer_object():
'''
A helper function to get the object for the local machine
Returns:
object: Returns the computer object for the local machine
'''
pythoncom.CoInitialize()
nt = win32com.client.Dispatch('AdsNameSpaces')
return nt.GetObject('', 'WinNT://.,computer')
def _get_group_object(name):
'''
A helper function to get a specified group object
Args:
name (str): The name of the object
Returns:
object: The specified group object
'''
pythoncom.CoInitialize()
nt = win32com.client.Dispatch('AdsNameSpaces')
return nt.GetObject('', 'WinNT://./' + name + ',group')
def _get_all_groups():
'''
A helper function that gets a list of group objects for all groups on the
machine
Returns:
iter: A list of objects for all groups on the machine
'''
pythoncom.CoInitialize()
nt = win32com.client.Dispatch('AdsNameSpaces')
results = nt.GetObject('', 'WinNT://.')
results.Filter = ['group']
return results
def _get_username(member):
'''
Resolve the username from the member object returned from a group query
Returns:
str: The username converted to domain\\username format
'''
return member.ADSPath.replace('WinNT://', '').replace(
'/', '\\').encode('ascii', 'backslashreplace')
def add(name, **kwargs): def add(name, **kwargs):
''' '''
Add the specified group Add the specified group
@ -60,10 +114,8 @@ def add(name, **kwargs):
'comment': ''} 'comment': ''}
if not info(name): if not info(name):
pythoncom.CoInitialize() compObj = _get_computer_object()
nt = win32com.client.Dispatch('AdsNameSpaces')
try: try:
compObj = nt.GetObject('', 'WinNT://.,computer')
newGroup = compObj.Create('group', name) newGroup = compObj.Create('group', name)
newGroup.SetInfo() newGroup.SetInfo()
ret['changes'].append('Successfully created group {0}'.format(name)) ret['changes'].append('Successfully created group {0}'.format(name))
@ -104,10 +156,8 @@ def delete(name, **kwargs):
'comment': ''} 'comment': ''}
if info(name): if info(name):
pythoncom.CoInitialize() compObj = _get_computer_object()
nt = win32com.client.Dispatch('AdsNameSpaces')
try: try:
compObj = nt.GetObject('', 'WinNT://.,computer')
compObj.Delete('group', name) compObj.Delete('group', name)
ret['changes'].append(('Successfully removed group {0}').format(name)) ret['changes'].append(('Successfully removed group {0}').format(name))
except pywintypes.com_error as com_err: except pywintypes.com_error as com_err:
@ -144,17 +194,10 @@ def info(name):
salt '*' group.info foo salt '*' group.info foo
''' '''
pythoncom.CoInitialize()
nt = win32com.client.Dispatch('AdsNameSpaces')
try: try:
groupObj = nt.GetObject('', 'WinNT://./' + name + ',group') groupObj = _get_group_object(name)
gr_name = groupObj.Name gr_name = groupObj.Name
gr_mem = [] gr_mem = [_get_username(x) for x in groupObj.members()]
for member in groupObj.members():
gr_mem.append(
member.ADSPath.replace('WinNT://', '').replace(
'/', '\\').encode('ascii', 'backslashreplace'))
except pywintypes.com_error: except pywintypes.com_error:
return False return False
@ -193,20 +236,12 @@ def getent(refresh=False):
ret = [] ret = []
pythoncom.CoInitialize() results = _get_all_groups()
nt = win32com.client.Dispatch('AdsNameSpaces')
results = nt.GetObject('', 'WinNT://.')
results.Filter = ['group']
for result in results: for result in results:
member_list = [] group = {'gid': __salt__['file.group_to_gid'](result.Name),
for member in result.members(): 'members': [_get_username(x) for x in result.members()],
member_list.append( 'name': result.Name,
member.AdsPath.replace('WinNT://', '').replace(
'/', '\\').encode('ascii', 'backslashreplace'))
group = {'gid': __salt__['file.group_to_gid'](result.name),
'members': member_list,
'name': result.name,
'passwd': 'x'} 'passwd': 'x'}
ret.append(group) ret.append(group)
__context__['group.getent'] = ret __context__['group.getent'] = ret
@ -240,17 +275,21 @@ def adduser(name, username, **kwargs):
'changes': {'Users Added': []}, 'changes': {'Users Added': []},
'comment': ''} 'comment': ''}
pythoncom.CoInitialize() try:
nt = win32com.client.Dispatch('AdsNameSpaces') groupObj = _get_group_object(name)
groupObj = nt.GetObject('', 'WinNT://./' + name + ',group') except pywintypes.com_error as com_err:
existingMembers = [] if len(com_err.excepinfo) >= 2:
for member in groupObj.members(): friendly_error = com_err.excepinfo[2].rstrip('\r\n')
existingMembers.append( ret['result'] = False
member.ADSPath.replace('WinNT://', '').replace( ret['comment'] = 'Failure accessing group {0}. {1}' \
'/', '\\').encode('ascii', 'backslashreplace').lower()) ''.format(name, friendly_error)
return ret
existingMembers = [_get_username(x) for x in groupObj.members()]
username = salt.utils.win_functions.get_sam_name(username)
try: try:
if salt.utils.win_functions.get_sam_name(username) not in existingMembers: if username not in existingMembers:
if not __opts__['test']: if not __opts__['test']:
groupObj.Add('WinNT://' + username.replace('\\', '/')) groupObj.Add('WinNT://' + username.replace('\\', '/'))
@ -299,14 +338,17 @@ def deluser(name, username, **kwargs):
'changes': {'Users Removed': []}, 'changes': {'Users Removed': []},
'comment': ''} 'comment': ''}
pythoncom.CoInitialize() try:
nt = win32com.client.Dispatch('AdsNameSpaces') groupObj = _get_group_object(name)
groupObj = nt.GetObject('', 'WinNT://./' + name + ',group') except pywintypes.com_error as com_err:
existingMembers = [] if len(com_err.excepinfo) >= 2:
for member in groupObj.members(): friendly_error = com_err.excepinfo[2].rstrip('\r\n')
existingMembers.append( ret['result'] = False
member.ADSPath.replace('WinNT://', '').replace( ret['comment'] = 'Failure accessing group {0}. {1}' \
'/', '\\').encode('ascii', 'backslashreplace').lower()) ''.format(name, friendly_error)
return ret
existingMembers = [_get_username(x) for x in groupObj.members()]
try: try:
if salt.utils.win_functions.get_sam_name(username) in existingMembers: if salt.utils.win_functions.get_sam_name(username) in existingMembers:
@ -365,10 +407,8 @@ def members(name, members_list, **kwargs):
ret['comment'].append('Members is not a list object') ret['comment'].append('Members is not a list object')
return ret return ret
pythoncom.CoInitialize()
nt = win32com.client.Dispatch('AdsNameSpaces')
try: try:
groupObj = nt.GetObject('', 'WinNT://./' + name + ',group') groupObj = _get_group_object(name)
except pywintypes.com_error as com_err: except pywintypes.com_error as com_err:
if len(com_err.excepinfo) >= 2: if len(com_err.excepinfo) >= 2:
friendly_error = com_err.excepinfo[2].rstrip('\r\n') friendly_error = com_err.excepinfo[2].rstrip('\r\n')
@ -377,12 +417,7 @@ def members(name, members_list, **kwargs):
'Failure accessing group {0}. {1}' 'Failure accessing group {0}. {1}'
).format(name, friendly_error)) ).format(name, friendly_error))
return ret return ret
existingMembers = [] existingMembers = [_get_username(x) for x in groupObj.members()]
for member in groupObj.members():
existingMembers.append(
member.ADSPath.replace('WinNT://', '').replace(
'/', '\\').encode('ascii', 'backslashreplace').lower())
existingMembers.sort() existingMembers.sort()
members_list.sort() members_list.sort()
@ -448,18 +483,14 @@ def list_groups(refresh=False):
salt '*' group.list_groups salt '*' group.list_groups
''' '''
if 'group.list_groups' in __context__ and not refresh: if 'group.list_groups' in __context__ and not refresh:
return __context__['group.getent'] return __context__['group.list_groups']
results = _get_all_groups()
ret = [] ret = []
pythoncom.CoInitialize()
nt = win32com.client.Dispatch('AdsNameSpaces')
results = nt.GetObject('', 'WinNT://.')
results.Filter = ['group']
for result in results: for result in results:
ret.append(result.name) ret.append(result.Name)
__context__['group.list_groups'] = ret __context__['group.list_groups'] = ret

File diff suppressed because it is too large Load Diff

View File

@ -39,10 +39,11 @@ import logging
import os import os
import re import re
import time import time
import sys
from functools import cmp_to_key from functools import cmp_to_key
# Import third party libs # Import third party libs
import salt.ext.six as six from salt.ext import six
# pylint: disable=import-error,no-name-in-module # pylint: disable=import-error,no-name-in-module
from salt.ext.six.moves.urllib.parse import urlparse as _urlparse from salt.ext.six.moves.urllib.parse import urlparse as _urlparse
@ -50,8 +51,12 @@ from salt.ext.six.moves.urllib.parse import urlparse as _urlparse
from salt.exceptions import (CommandExecutionError, from salt.exceptions import (CommandExecutionError,
SaltInvocationError, SaltInvocationError,
SaltRenderError) SaltRenderError)
import salt.utils import salt.utils # Can be removed once is_true, get_hash, compare_dicts are moved
import salt.utils.args
import salt.utils.files
import salt.utils.path
import salt.utils.pkg import salt.utils.pkg
import salt.utils.versions
import salt.syspaths import salt.syspaths
import salt.payload import salt.payload
from salt.exceptions import MinionError from salt.exceptions import MinionError
@ -98,7 +103,7 @@ def latest_version(*names, **kwargs):
salt '*' pkg.latest_version <package name> salt '*' pkg.latest_version <package name>
salt '*' pkg.latest_version <package1> <package2> <package3> ... salt '*' pkg.latest_version <package1> <package2> <package3> ...
''' '''
if len(names) == 0: if not names:
return '' return ''
# Initialize the return dict with empty strings # Initialize the return dict with empty strings
@ -123,6 +128,8 @@ def latest_version(*names, **kwargs):
if name in installed_pkgs: if name in installed_pkgs:
log.trace('Determining latest installed version of %s', name) log.trace('Determining latest installed version of %s', name)
try: try:
# installed_pkgs[name] Can be version number or 'Not Found'
# 'Not Found' occurs when version number is not found in the registry
latest_installed = sorted( latest_installed = sorted(
installed_pkgs[name], installed_pkgs[name],
key=cmp_to_key(_reverse_cmp_pkg_versions) key=cmp_to_key(_reverse_cmp_pkg_versions)
@ -139,6 +146,8 @@ def latest_version(*names, **kwargs):
# get latest available (from winrepo_dir) version of package # get latest available (from winrepo_dir) version of package
pkg_info = _get_package_info(name, saltenv=saltenv) pkg_info = _get_package_info(name, saltenv=saltenv)
log.trace('Raw winrepo pkg_info for {0} is {1}'.format(name, pkg_info)) log.trace('Raw winrepo pkg_info for {0} is {1}'.format(name, pkg_info))
# latest_available can be version number or 'latest' or even 'Not Found'
latest_available = _get_latest_pkg_version(pkg_info) latest_available = _get_latest_pkg_version(pkg_info)
if latest_available: if latest_available:
log.debug('Latest available version ' log.debug('Latest available version '
@ -146,9 +155,9 @@ def latest_version(*names, **kwargs):
# check, whether latest available version # check, whether latest available version
# is newer than latest installed version # is newer than latest installed version
if salt.utils.compare_versions(ver1=str(latest_available), if compare_versions(ver1=str(latest_available),
oper='>', oper='>',
ver2=str(latest_installed)): ver2=str(latest_installed)):
log.debug('Upgrade of {0} from {1} to {2} ' log.debug('Upgrade of {0} from {1} to {2} '
'is available'.format(name, 'is available'.format(name,
latest_installed, latest_installed,
@ -187,10 +196,9 @@ def upgrade_available(name, **kwargs):
# same default as latest_version # same default as latest_version
refresh = salt.utils.is_true(kwargs.get('refresh', True)) refresh = salt.utils.is_true(kwargs.get('refresh', True))
current = version(name, saltenv=saltenv, refresh=refresh).get(name) # if latest_version returns blank, the latest version is already installed or
latest = latest_version(name, saltenv=saltenv, refresh=False) # their is no package definition. This is a salt standard which could be improved.
return latest_version(name, saltenv=saltenv, refresh=refresh) != ''
return compare_versions(latest, '>', current)
def list_upgrades(refresh=True, **kwargs): def list_upgrades(refresh=True, **kwargs):
@ -221,9 +229,13 @@ def list_upgrades(refresh=True, **kwargs):
pkgs = {} pkgs = {}
for pkg in installed_pkgs: for pkg in installed_pkgs:
if pkg in available_pkgs: if pkg in available_pkgs:
# latest_version() will be blank if the latest version is installed.
# or the package name is wrong. Given we check available_pkgs, this
# should not be the case of wrong package name.
# Note: latest_version() is an expensive way to do this as it
# calls list_pkgs each time.
latest_ver = latest_version(pkg, refresh=False, saltenv=saltenv) latest_ver = latest_version(pkg, refresh=False, saltenv=saltenv)
install_ver = installed_pkgs[pkg] if latest_ver:
if compare_versions(latest_ver, '>', install_ver):
pkgs[pkg] = latest_ver pkgs[pkg] = latest_ver
return pkgs return pkgs
@ -240,7 +252,7 @@ def list_available(*names, **kwargs):
saltenv (str): The salt environment to use. Default ``base``. saltenv (str): The salt environment to use. Default ``base``.
refresh (bool): Refresh package metadata. Default ``True``. refresh (bool): Refresh package metadata. Default ``False``.
return_dict_always (bool): return_dict_always (bool):
Default ``False`` dict when a single package name is queried. Default ``False`` dict when a single package name is queried.
@ -263,11 +275,10 @@ def list_available(*names, **kwargs):
return '' return ''
saltenv = kwargs.get('saltenv', 'base') saltenv = kwargs.get('saltenv', 'base')
refresh = salt.utils.is_true(kwargs.get('refresh', True)) refresh = salt.utils.is_true(kwargs.get('refresh', False))
_refresh_db_conditional(saltenv, force=refresh)
return_dict_always = \ return_dict_always = \
salt.utils.is_true(kwargs.get('return_dict_always', False)) salt.utils.is_true(kwargs.get('return_dict_always', False))
_refresh_db_conditional(saltenv, force=refresh)
if len(names) == 1 and not return_dict_always: if len(names) == 1 and not return_dict_always:
pkginfo = _get_package_info(names[0], saltenv=saltenv) pkginfo = _get_package_info(names[0], saltenv=saltenv)
if not pkginfo: if not pkginfo:
@ -292,7 +303,9 @@ def list_available(*names, **kwargs):
def version(*names, **kwargs): def version(*names, **kwargs):
''' '''
Returns a version if the package is installed, else returns an empty string Returns a string representing the package version or an empty string if not
installed. If more than one package name is specified, a dict of
name/version pairs is returned.
Args: Args:
name (str): One or more package names name (str): One or more package names
@ -302,10 +315,11 @@ def version(*names, **kwargs):
refresh (bool): Refresh package metadata. Default ``False``. refresh (bool): Refresh package metadata. Default ``False``.
Returns: Returns:
str: version string when a single package is specified.
dict: The package name(s) with the installed versions. dict: The package name(s) with the installed versions.
.. code-block:: cfg .. code-block:: cfg
{['<version>', '<version>', ]} OR
{'<package name>': ['<version>', '<version>', ]} {'<package name>': ['<version>', '<version>', ]}
CLI Example: CLI Example:
@ -314,19 +328,25 @@ def version(*names, **kwargs):
salt '*' pkg.version <package name> salt '*' pkg.version <package name>
salt '*' pkg.version <package name01> <package name02> salt '*' pkg.version <package name01> <package name02>
'''
saltenv = kwargs.get('saltenv', 'base')
installed_pkgs = list_pkgs(refresh=kwargs.get('refresh', False)) '''
available_pkgs = get_repo_data(saltenv).get('repo') # Standard is return empty string even if not a valid name
# TODO: Look at returning an error across all platforms with
# CommandExecutionError(msg,info={'errors': errors })
# available_pkgs = get_repo_data(saltenv).get('repo')
# for name in names:
# if name in available_pkgs:
# ret[name] = installed_pkgs.get(name, '')
saltenv = kwargs.get('saltenv', 'base')
installed_pkgs = list_pkgs(saltenv=saltenv, refresh=kwargs.get('refresh', False))
if len(names) == 1:
return installed_pkgs.get(names[0], '')
ret = {} ret = {}
for name in names: for name in names:
if name in available_pkgs: ret[name] = installed_pkgs.get(name, '')
ret[name] = installed_pkgs.get(name, '')
else:
ret[name] = 'not available'
return ret return ret
@ -423,7 +443,7 @@ def _get_reg_software():
'(value not set)', '(value not set)',
'', '',
None] None]
#encoding = locale.getpreferredencoding()
reg_software = {} reg_software = {}
hive = 'HKLM' hive = 'HKLM'
@ -461,7 +481,7 @@ def _get_reg_software():
def _refresh_db_conditional(saltenv, **kwargs): def _refresh_db_conditional(saltenv, **kwargs):
''' '''
Internal use only in this module, has a different set of defaults and Internal use only in this module, has a different set of defaults and
returns True or False. And supports check the age of the existing returns True or False. And supports checking the age of the existing
generated metadata db, as well as ensure metadata db exists to begin with generated metadata db, as well as ensure metadata db exists to begin with
Args: Args:
@ -475,8 +495,7 @@ def _refresh_db_conditional(saltenv, **kwargs):
failhard (bool): failhard (bool):
If ``True``, an error will be raised if any repo SLS files failed to If ``True``, an error will be raised if any repo SLS files failed to
process. If ``False``, no error will be raised, and a dictionary process.
containing the full results will be returned.
Returns: Returns:
bool: True Fetched or Cache uptodate, False to indicate an issue bool: True Fetched or Cache uptodate, False to indicate an issue
@ -641,33 +660,10 @@ def _get_repo_details(saltenv):
# Do some safety checks on the repo_path as its contents can be removed, # Do some safety checks on the repo_path as its contents can be removed,
# this includes check for bad coding # this includes check for bad coding
system_root = os.environ.get('SystemRoot', r'C:\Windows') system_root = os.environ.get('SystemRoot', r'C:\Windows')
deny_paths = ( if not salt.utils.path.safe_path(
r'[a-z]\:\\$', # C:\, D:\, etc path=local_dest,
r'\\$', # \ allow_path='\\'.join([system_root, 'TEMP'])):
re.escape(system_root) # C:\Windows
)
# Since the above checks anything in C:\Windows, there are some
# directories we may want to make exceptions for
allow_paths = (
re.escape('\\'.join([system_root, 'TEMP'])), # C:\Windows\TEMP
)
# Check the local_dest to make sure it's not one of the bad paths
good_path = True
for d_path in deny_paths:
if re.match(d_path, local_dest, flags=re.IGNORECASE) is not None:
# Found deny path
good_path = False
# If local_dest is one of the bad paths, check for exceptions
if not good_path:
for a_path in allow_paths:
if re.match(a_path, local_dest, flags=re.IGNORECASE) is not None:
# Found exception
good_path = True
if not good_path:
raise CommandExecutionError( raise CommandExecutionError(
'Attempting to delete files from a possibly unsafe location: ' 'Attempting to delete files from a possibly unsafe location: '
'{0}'.format(local_dest) '{0}'.format(local_dest)
@ -717,8 +713,8 @@ def genrepo(**kwargs):
verbose (bool): verbose (bool):
Return verbose data structure which includes 'success_list', a list Return verbose data structure which includes 'success_list', a list
of all sls files and the package names contained within. Default of all sls files and the package names contained within.
'False' Default ``False``.
failhard (bool): failhard (bool):
If ``True``, an error will be raised if any repo SLS files failed If ``True``, an error will be raised if any repo SLS files failed
@ -761,11 +757,13 @@ def genrepo(**kwargs):
successful_verbose successful_verbose
) )
serial = salt.payload.Serial(__opts__) serial = salt.payload.Serial(__opts__)
# TODO: 2016.11 has PY2 mode as 'w+b' develop has 'w+' ? PY3 is 'wb+'
# also the reading of this is 'rb' in get_repo_data()
mode = 'w+' if six.PY2 else 'wb+' mode = 'w+' if six.PY2 else 'wb+'
with salt.utils.fopen(repo_details.winrepo_file, mode) as repo_cache: with salt.utils.fopen(repo_details.winrepo_file, mode) as repo_cache:
repo_cache.write(serial.dumps(ret)) repo_cache.write(serial.dumps(ret))
# save reading it back again. ! this breaks due to utf8 issues # For some reason we can not save ret into __context__['winrepo.data'] as this breaks due to utf8 issues
#__context__['winrepo.data'] = ret
successful_count = len(successful_verbose) successful_count = len(successful_verbose)
error_count = len(ret['errors']) error_count = len(ret['errors'])
if verbose: if verbose:
@ -800,7 +798,7 @@ def genrepo(**kwargs):
return results return results
def _repo_process_pkg_sls(file, short_path_name, ret, successful_verbose): def _repo_process_pkg_sls(filename, short_path_name, ret, successful_verbose):
renderers = salt.loader.render(__opts__, __salt__) renderers = salt.loader.render(__opts__, __salt__)
def _failed_compile(msg): def _failed_compile(msg):
@ -810,7 +808,7 @@ def _repo_process_pkg_sls(file, short_path_name, ret, successful_verbose):
try: try:
config = salt.template.compile_template( config = salt.template.compile_template(
file, filename,
renderers, renderers,
__opts__['renderer'], __opts__['renderer'],
__opts__.get('renderer_blacklist', ''), __opts__.get('renderer_blacklist', ''),
@ -825,7 +823,6 @@ def _repo_process_pkg_sls(file, short_path_name, ret, successful_verbose):
if config: if config:
revmap = {} revmap = {}
errors = [] errors = []
pkgname_ok_list = []
for pkgname, versions in six.iteritems(config): for pkgname, versions in six.iteritems(config):
if pkgname in ret['repo']: if pkgname in ret['repo']:
log.error( log.error(
@ -834,12 +831,12 @@ def _repo_process_pkg_sls(file, short_path_name, ret, successful_verbose):
) )
errors.append('package \'{0}\' already defined'.format(pkgname)) errors.append('package \'{0}\' already defined'.format(pkgname))
break break
for version, repodata in six.iteritems(versions): for version_str, repodata in six.iteritems(versions):
# Ensure version is a string/unicode # Ensure version is a string/unicode
if not isinstance(version, six.string_types): if not isinstance(version_str, six.string_types):
msg = ( msg = (
'package \'{0}\'{{0}}, version number {1} ' 'package \'{0}\'{{0}}, version number {1} '
'is not a string'.format(pkgname, version) 'is not a string'.format(pkgname, version_str)
) )
log.error( log.error(
msg.format(' within \'{0}\''.format(short_path_name)) msg.format(' within \'{0}\''.format(short_path_name))
@ -851,7 +848,7 @@ def _repo_process_pkg_sls(file, short_path_name, ret, successful_verbose):
msg = ( msg = (
'package \'{0}\'{{0}}, repo data for ' 'package \'{0}\'{{0}}, repo data for '
'version number {1} is not defined as a dictionary ' 'version number {1} is not defined as a dictionary '
.format(pkgname, version) .format(pkgname, version_str)
) )
log.error( log.error(
msg.format(' within \'{0}\''.format(short_path_name)) msg.format(' within \'{0}\''.format(short_path_name))
@ -862,8 +859,6 @@ def _repo_process_pkg_sls(file, short_path_name, ret, successful_verbose):
if errors: if errors:
ret.setdefault('errors', {})[short_path_name] = errors ret.setdefault('errors', {})[short_path_name] = errors
else: else:
if pkgname not in pkgname_ok_list:
pkgname_ok_list.append(pkgname)
ret.setdefault('repo', {}).update(config) ret.setdefault('repo', {}).update(config)
ret.setdefault('name_map', {}).update(revmap) ret.setdefault('name_map', {}).update(revmap)
successful_verbose[short_path_name] = config.keys() successful_verbose[short_path_name] = config.keys()
@ -938,7 +933,8 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
to install. (no spaces after the commas) to install. (no spaces after the commas)
refresh (bool): refresh (bool):
Boolean value representing whether or not to refresh the winrepo db Boolean value representing whether or not to refresh the winrepo db.
Default ``False``.
pkgs (list): pkgs (list):
A list of packages to install from a software repository. All A list of packages to install from a software repository. All
@ -1073,7 +1069,6 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
''' '''
ret = {} ret = {}
saltenv = kwargs.pop('saltenv', 'base') saltenv = kwargs.pop('saltenv', 'base')
refresh = salt.utils.is_true(refresh) refresh = salt.utils.is_true(refresh)
# no need to call _refresh_db_conditional as list_pkgs will do it # no need to call _refresh_db_conditional as list_pkgs will do it
@ -1094,7 +1089,7 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
for pkg in pkg_params: for pkg in pkg_params:
pkg_params[pkg] = {'version': pkg_params[pkg]} pkg_params[pkg] = {'version': pkg_params[pkg]}
if pkg_params is None or len(pkg_params) == 0: if not pkg_params:
log.error('No package definition found') log.error('No package definition found')
return {} return {}
@ -1136,11 +1131,12 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
version_num = str(version_num) version_num = str(version_num)
if not version_num: if not version_num:
# following can be version number or latest
version_num = _get_latest_pkg_version(pkginfo) version_num = _get_latest_pkg_version(pkginfo)
# Check if the version is already installed # Check if the version is already installed
if version_num in old.get(pkg_name, '').split(',') \ if version_num in old.get(pkg_name, '').split(',') \
or (old.get(pkg_name) == 'Not Found'): or (old.get(pkg_name, '') == 'Not Found'):
# Desired version number already installed # Desired version number already installed
ret[pkg_name] = {'current': version_num} ret[pkg_name] = {'current': version_num}
continue continue
@ -1259,6 +1255,7 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
log.debug('Source hash matches package hash.') log.debug('Source hash matches package hash.')
# Get install flags # Get install flags
install_flags = pkginfo[version_num].get('install_flags', '') install_flags = pkginfo[version_num].get('install_flags', '')
if options and options.get('extra_install_flags'): if options and options.get('extra_install_flags'):
install_flags = '{0} {1}'.format( install_flags = '{0} {1}'.format(
@ -1266,32 +1263,32 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
options.get('extra_install_flags', '') options.get('extra_install_flags', '')
) )
#Compute msiexec string # Compute msiexec string
use_msiexec, msiexec = _get_msiexec(pkginfo[version_num].get('msiexec', False)) use_msiexec, msiexec = _get_msiexec(pkginfo[version_num].get('msiexec', False))
# Build cmd and arguments # Build cmd and arguments
# cmd and arguments must be separated for use with the task scheduler # cmd and arguments must be separated for use with the task scheduler
cmd_shell = os.getenv('ComSpec', '{0}\\system32\\cmd.exe'.format(os.getenv('WINDIR')))
if use_msiexec: if use_msiexec:
cmd = msiexec arguments = '"{0}" /I "{1}"'.format(msiexec, cached_pkg)
arguments = ['/i', cached_pkg]
if pkginfo[version_num].get('allusers', True): if pkginfo[version_num].get('allusers', True):
arguments.append('ALLUSERS="1"') arguments = '{0} ALLUSERS=1'.format(arguments)
arguments.extend(salt.utils.shlex_split(install_flags, posix=False))
else: else:
cmd = cached_pkg arguments = '"{0}"'.format(cached_pkg)
arguments = salt.utils.shlex_split(install_flags, posix=False)
if install_flags:
arguments = '{0} {1}'.format(arguments, install_flags)
# Install the software # Install the software
# Check Use Scheduler Option # Check Use Scheduler Option
if pkginfo[version_num].get('use_scheduler', False): if pkginfo[version_num].get('use_scheduler', False):
# Create Scheduled Task # Create Scheduled Task
__salt__['task.create_task'](name='update-salt-software', __salt__['task.create_task'](name='update-salt-software',
user_name='System', user_name='System',
force=True, force=True,
action_type='Execute', action_type='Execute',
cmd=cmd, cmd=cmd_shell,
arguments=' '.join(arguments), arguments='/s /c "{0}"'.format(arguments),
start_in=cache_path, start_in=cache_path,
trigger_type='Once', trigger_type='Once',
start_date='1975-01-01', start_date='1975-01-01',
@ -1333,14 +1330,10 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
log.error('Scheduled Task failed to run') log.error('Scheduled Task failed to run')
ret[pkg_name] = {'install status': 'failed'} ret[pkg_name] = {'install status': 'failed'}
else: else:
# Combine cmd and arguments
cmd = [cmd]
cmd.extend(arguments)
# Launch the command # Launch the command
result = __salt__['cmd.run_all'](cmd, result = __salt__['cmd.run_all']('"{0}" /s /c "{1}"'.format(cmd_shell, arguments),
cache_path, cache_path,
output_loglevel='trace',
python_shell=False, python_shell=False,
redirect_stderr=True) redirect_stderr=True)
if not result['retcode']: if not result['retcode']:
@ -1419,14 +1412,17 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
.. versionadded:: 0.16.0 .. versionadded:: 0.16.0
Args: Args:
name (str): The name(s) of the package(s) to be uninstalled. Can be a name (str):
single package or a comma delimted list of packages, no spaces. The name(s) of the package(s) to be uninstalled. Can be a
single package or a comma delimited list of packages, no spaces.
version (str): version (str):
The version of the package to be uninstalled. If this option is The version of the package to be uninstalled. If this option is
used to to uninstall multiple packages, then this version will be used to to uninstall multiple packages, then this version will be
applied to all targeted packages. Recommended using only when applied to all targeted packages. Recommended using only when
uninstalling a single package. If this parameter is omitted, the uninstalling a single package. If this parameter is omitted, the
latest version will be uninstalled. latest version will be uninstalled.
pkgs (list): pkgs (list):
A list of packages to delete. Must be passed as a python list. The A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed. ``name`` parameter will be ignored if this option is passed.
@ -1516,7 +1512,6 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
removal_targets.append(version_num) removal_targets.append(version_num)
for target in removal_targets: for target in removal_targets:
# Get the uninstaller # Get the uninstaller
uninstaller = pkginfo[target].get('uninstaller', '') uninstaller = pkginfo[target].get('uninstaller', '')
cache_dir = pkginfo[target].get('cache_dir', False) cache_dir = pkginfo[target].get('cache_dir', False)
@ -1541,6 +1536,7 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
# If true, the entire directory will be cached instead of the # If true, the entire directory will be cached instead of the
# individual file. This is useful for installations that are not # individual file. This is useful for installations that are not
# single files # single files
if cache_dir and uninstaller.startswith('salt:'): if cache_dir and uninstaller.startswith('salt:'):
path, _ = os.path.split(uninstaller) path, _ = os.path.split(uninstaller)
__salt__['cp.cache_dir'](path, __salt__['cp.cache_dir'](path,
@ -1563,6 +1559,7 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
# Compare the hash of the cached installer to the source only if # Compare the hash of the cached installer to the source only if
# the file is hosted on salt: # the file is hosted on salt:
# TODO cp.cache_file does cache and hash checking? So why do it again?
if uninstaller.startswith('salt:'): if uninstaller.startswith('salt:'):
if __salt__['cp.hash_file'](uninstaller, saltenv) != \ if __salt__['cp.hash_file'](uninstaller, saltenv) != \
__salt__['cp.hash_file'](cached_pkg): __salt__['cp.hash_file'](cached_pkg):
@ -1580,14 +1577,13 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
else: else:
# Run the uninstaller directly # Run the uninstaller directly
# (not hosted on salt:, https:, etc.) # (not hosted on salt:, https:, etc.)
cached_pkg = uninstaller cached_pkg = os.path.expandvars(uninstaller)
# Fix non-windows slashes # Fix non-windows slashes
cached_pkg = cached_pkg.replace('/', '\\') cached_pkg = cached_pkg.replace('/', '\\')
cache_path, _ = os.path.split(cached_pkg) cache_path, _ = os.path.split(cached_pkg)
# Get parameters for cmd # os.path.expandvars is not required as we run everything through cmd.exe /s /c
expanded_cached_pkg = str(os.path.expandvars(cached_pkg))
# Get uninstall flags # Get uninstall flags
uninstall_flags = pkginfo[target].get('uninstall_flags', '') uninstall_flags = pkginfo[target].get('uninstall_flags', '')
@ -1596,30 +1592,32 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
uninstall_flags = '{0} {1}'.format( uninstall_flags = '{0} {1}'.format(
uninstall_flags, kwargs.get('extra_uninstall_flags', '')) uninstall_flags, kwargs.get('extra_uninstall_flags', ''))
#Compute msiexec string # Compute msiexec string
use_msiexec, msiexec = _get_msiexec(pkginfo[target].get('msiexec', False)) use_msiexec, msiexec = _get_msiexec(pkginfo[target].get('msiexec', False))
cmd_shell = os.getenv('ComSpec', '{0}\\system32\\cmd.exe'.format(os.getenv('WINDIR')))
# Build cmd and arguments # Build cmd and arguments
# cmd and arguments must be separated for use with the task scheduler # cmd and arguments must be separated for use with the task scheduler
if use_msiexec: if use_msiexec:
cmd = msiexec # Check if uninstaller is set to {guid}, if not we assume its a remote msi file.
arguments = ['/x'] # which has already been downloaded.
arguments.extend(salt.utils.shlex_split(uninstall_flags, posix=False)) arguments = '"{0}" /X "{1}"'.format(msiexec, cached_pkg)
else: else:
cmd = expanded_cached_pkg arguments = '"{0}"'.format(cached_pkg)
arguments = salt.utils.shlex_split(uninstall_flags, posix=False)
if uninstall_flags:
arguments = '{0} {1}'.format(arguments, uninstall_flags)
# Uninstall the software # Uninstall the software
# Check Use Scheduler Option # Check Use Scheduler Option
if pkginfo[target].get('use_scheduler', False): if pkginfo[target].get('use_scheduler', False):
# Create Scheduled Task # Create Scheduled Task
__salt__['task.create_task'](name='update-salt-software', __salt__['task.create_task'](name='update-salt-software',
user_name='System', user_name='System',
force=True, force=True,
action_type='Execute', action_type='Execute',
cmd=cmd, cmd=cmd_shell,
arguments=' '.join(arguments), arguments='/s /c "{0}"'.format(arguments),
start_in=cache_path, start_in=cache_path,
trigger_type='Once', trigger_type='Once',
start_date='1975-01-01', start_date='1975-01-01',
@ -1632,13 +1630,10 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
log.error('Scheduled Task failed to run') log.error('Scheduled Task failed to run')
ret[pkgname] = {'uninstall status': 'failed'} ret[pkgname] = {'uninstall status': 'failed'}
else: else:
# Build the install command
cmd = [cmd]
cmd.extend(arguments)
# Launch the command # Launch the command
result = __salt__['cmd.run_all']( result = __salt__['cmd.run_all'](
cmd, '"{0}" /s /c "{1}"'.format(cmd_shell, arguments),
output_loglevel='trace',
python_shell=False, python_shell=False,
redirect_stderr=True) redirect_stderr=True)
if not result['retcode']: if not result['retcode']:
@ -1684,11 +1679,13 @@ def purge(name=None, pkgs=None, version=None, **kwargs):
name (str): The name of the package to be deleted. name (str): The name of the package to be deleted.
version (str): The version of the package to be deleted. If this option version (str):
is used in combination with the ``pkgs`` option below, then this The version of the package to be deleted. If this option is
used in combination with the ``pkgs`` option below, then this
version will be applied to all targeted packages. version will be applied to all targeted packages.
pkgs (list): A list of packages to delete. Must be passed as a python pkgs (list):
A list of packages to delete. Must be passed as a python
list. The ``name`` parameter will be ignored if this option is list. The ``name`` parameter will be ignored if this option is
passed. passed.
@ -1822,4 +1819,20 @@ def compare_versions(ver1='', oper='==', ver2=''):
salt '*' pkg.compare_versions 1.2 >= 1.3 salt '*' pkg.compare_versions 1.2 >= 1.3
''' '''
return salt.utils.compare_versions(ver1, oper, ver2) if not ver1:
raise SaltInvocationError('compare_version, ver1 is blank')
if not ver2:
raise SaltInvocationError('compare_version, ver2 is blank')
# Support version being the special meaning of 'latest'
if ver1 == 'latest':
ver1 = str(sys.maxsize)
if ver2 == 'latest':
ver2 = str(sys.maxsize)
# Support version being the special meaning of 'Not Found'
if ver1 == 'Not Found':
ver1 = '0.0.0.0.0'
if ver2 == 'Not Found':
ver2 = '0.0.0.0.0'
return salt.utils.compare_versions(ver1, oper, ver2, ignore_epoch=True)

View File

@ -444,8 +444,9 @@ def stop(name):
try: try:
win32serviceutil.StopService(name) win32serviceutil.StopService(name)
except pywintypes.error as exc: except pywintypes.error as exc:
raise CommandExecutionError( if exc[0] != 1062:
'Failed To Stop {0}: {1}'.format(name, exc[2])) raise CommandExecutionError(
'Failed To Stop {0}: {1}'.format(name, exc[2]))
attempts = 0 attempts = 0
while info(name)['Status'] in ['Running', 'Stop Pending'] \ while info(name)['Status'] in ['Running', 'Stop Pending'] \

View File

@ -6,8 +6,7 @@ or for problem solving if your minion is having problems.
.. versionadded:: 0.12.0 .. versionadded:: 0.12.0
:depends: - pythoncom :depends: - wmi
- wmi
''' '''
# Import Python Libs # Import Python Libs

View File

@ -92,28 +92,31 @@ def halt(timeout=5, in_seconds=False):
Halt a running system. Halt a running system.
Args: Args:
timeout (int): Number of seconds before halting the system. Default is
5 seconds.
in_seconds (bool): Whether to treat timeout as seconds or minutes. timeout (int):
Number of seconds before halting the system. Default is 5 seconds.
in_seconds (bool):
Whether to treat timeout as seconds or minutes.
.. versionadded:: 2015.8.0 .. versionadded:: 2015.8.0
Returns: Returns:
bool: True if successful, otherwise False bool: ``True`` if successful, otherwise ``False``
CLI Example: CLI Example:
.. code-block:: bash .. code-block:: bash
salt '*' system.halt 5 salt '*' system.halt 5 True
''' '''
return shutdown(timeout=timeout, in_seconds=in_seconds) return shutdown(timeout=timeout, in_seconds=in_seconds)
def init(runlevel): # pylint: disable=unused-argument def init(runlevel): # pylint: disable=unused-argument
''' '''
Change the system runlevel on sysV compatible systems Change the system runlevel on sysV compatible systems. Not applicable to
Windows
CLI Example: CLI Example:
@ -136,14 +139,18 @@ def poweroff(timeout=5, in_seconds=False):
Power off a running system. Power off a running system.
Args: Args:
timeout (int): Number of seconds before powering off the system. Default
is 5 seconds.
in_seconds (bool): Whether to treat timeout as seconds or minutes. timeout (int):
Number of seconds before powering off the system. Default is 5
seconds.
in_seconds (bool):
Whether to treat timeout as seconds or minutes.
.. versionadded:: 2015.8.0 .. versionadded:: 2015.8.0
Returns: Returns:
bool: True if successful, otherwise False bool: ``True`` if successful, otherwise ``False``
CLI Example: CLI Example:
@ -160,29 +167,35 @@ def reboot(timeout=5, in_seconds=False, wait_for_reboot=False, # pylint: disabl
Reboot a running system. Reboot a running system.
Args: Args:
timeout (int): Number of minutes/seconds before rebooting the system.
Minutes vs seconds depends on the value of ``in_seconds``. Default timeout (int):
The number of minutes/seconds before rebooting the system. Use of
minutes or seconds depends on the value of ``in_seconds``. Default
is 5 minutes. is 5 minutes.
in_seconds (bool): Whether to treat timeout as seconds or minutes. in_seconds (bool):
``True`` will cause the ``timeout`` parameter to be in seconds.
``False`` will be in minutes. Default is ``False``.
.. versionadded:: 2015.8.0 .. versionadded:: 2015.8.0
wait_for_reboot (bool): Sleeps for timeout + 30 seconds after reboot has wait_for_reboot (bool)
been initiated. This may be useful for use in a highstate if a ``True`` will sleep for timeout + 30 seconds after reboot has been
reboot should be performed and the return data of the highstate is initiated. This is useful for use in a highstate. For example, you
not required. If return data is required, consider using the reboot may have states that you want to apply only after the reboot.
state instead of this module. Default is ``False``.
.. versionadded:: 2015.8.0 .. versionadded:: 2015.8.0
only_on_pending_reboot (bool): If this is set to True, then the reboot only_on_pending_reboot (bool):
will only proceed if the system reports a pending reboot. To If this is set to ``True``, then the reboot will only proceed
optionally reboot in a highstate, consider using the reboot state if the system reports a pending reboot. Setting this parameter to
instead of this module. ``True`` could be useful when calling this function from a final
housekeeping state intended to be executed at the end of a state run
(using *order: last*). Default is ``False``.
Returns: Returns:
bool: True if successful (a reboot will occur), otherwise False bool: ``True`` if successful (a reboot will occur), otherwise ``False``
CLI Example: CLI Example:
@ -191,20 +204,16 @@ def reboot(timeout=5, in_seconds=False, wait_for_reboot=False, # pylint: disabl
salt '*' system.reboot 5 salt '*' system.reboot 5
salt '*' system.reboot 5 True salt '*' system.reboot 5 True
As example of invoking this function from within a final housekeeping state Invoking this function from a final housekeeping state:
is as follows:
Example:
.. code-block:: yaml .. code-block:: yaml
final housekeeping: final_housekeeping:
module.run: module.run:
- name: system.reboot - name: system.reboot
- only_on_pending_reboot: True - only_on_pending_reboot: True
- order: last - order: last
''' '''
ret = shutdown(timeout=timeout, reboot=True, in_seconds=in_seconds, ret = shutdown(timeout=timeout, reboot=True, in_seconds=in_seconds,
only_on_pending_reboot=only_on_pending_reboot) only_on_pending_reboot=only_on_pending_reboot)
@ -221,50 +230,63 @@ def shutdown(message=None, timeout=5, force_close=True, reboot=False, # pylint:
Shutdown a running system. Shutdown a running system.
Args: Args:
message (str): A message to display to the user before shutting down.
timeout (int): The length of time that the shutdown dialog box should be message (str):
displayed, in seconds. While this dialog box is displayed, the The message to display to the user before shutting down.
shutdown can be stopped by the shutdown_abort function.
timeout (int):
The length of time (in seconds) that the shutdown dialog box should
be displayed. While this dialog box is displayed, the shutdown can
be aborted using the ``system.shutdown_abort`` function.
If timeout is not zero, InitiateSystemShutdown displays a dialog box If timeout is not zero, InitiateSystemShutdown displays a dialog box
on the specified computer. The dialog box displays the name of the on the specified computer. The dialog box displays the name of the
user who called the function, displays the message specified by the user who called the function, the message specified by the lpMessage
lpMessage parameter, and prompts the user to log off. The dialog box parameter, and prompts the user to log off. The dialog box beeps
beeps when it is created and remains on top of other windows in the when it is created and remains on top of other windows (system
system. The dialog box can be moved but not closed. A timer counts modal). The dialog box can be moved but not closed. A timer counts
down the remaining time before a forced shutdown. down the remaining time before the shutdown occurs.
If timeout is zero, the computer shuts down without displaying the If timeout is zero, the computer shuts down immediately without
dialog box, and the shutdown cannot be stopped by shutdown_abort. displaying the dialog box and cannot be stopped by
``system.shutdown_abort``.
Default is 5 minutes Default is 5 minutes
in_seconds (bool): Whether to treat timeout as seconds or minutes. in_seconds (bool):
``True`` will cause the ``timeout`` parameter to be in seconds.
``False`` will be in minutes. Default is ``False``.
.. versionadded:: 2015.8.0 .. versionadded:: 2015.8.0
force_close (bool): True to force close all open applications. False force_close (bool):
displays a dialog box instructing the user to close the ``True`` will force close all open applications. ``False`` will
applications. display a dialog box instructing the user to close open
applications. Default is ``True``.
reboot (bool): True restarts the computer immediately after shutdown. reboot (bool):
False caches to disk and safely powers down the system. ``True`` restarts the computer immediately after shutdown. ``False``
powers down the system. Default is ``False``.
only_on_pending_reboot (bool): If this is set to True, then the shutdown only_on_pending_reboot (bool): If this is set to True, then the shutdown
will only proceed if the system reports a pending reboot. To will only proceed if the system reports a pending reboot. To
optionally shutdown in a highstate, consider using the shutdown optionally shutdown in a highstate, consider using the shutdown
state instead of this module. state instead of this module.
only_on_pending_reboot (bool):
If ``True`` the shutdown will only proceed if there is a reboot
pending. ``False`` will shutdown the system. Default is ``False``.
Returns: Returns:
bool: True if successful (a shutdown or reboot will occur), otherwise bool:
False ``True`` if successful (a shutdown or reboot will occur), otherwise
``False``
CLI Example: CLI Example:
.. code-block:: bash .. code-block:: bash
salt '*' system.shutdown 5 salt '*' system.shutdown "System will shutdown in 5 minutes"
''' '''
if six.PY2: if six.PY2:
message = _to_unicode(message) message = _to_unicode(message)
@ -294,7 +316,7 @@ def shutdown_hard():
Shutdown a running system with no timeout or warning. Shutdown a running system with no timeout or warning.
Returns: Returns:
bool: True if successful, otherwise False bool: ``True`` if successful, otherwise ``False``
CLI Example: CLI Example:
@ -312,7 +334,7 @@ def shutdown_abort():
aborted. aborted.
Returns: Returns:
bool: True if successful, otherwise False bool: ``True`` if successful, otherwise ``False``
CLI Example: CLI Example:
@ -337,7 +359,7 @@ def lock():
Lock the workstation. Lock the workstation.
Returns: Returns:
bool: True if successful, otherwise False bool: ``True`` if successful, otherwise ``False``
CLI Example: CLI Example:
@ -353,12 +375,14 @@ def set_computer_name(name):
Set the Windows computer name Set the Windows computer name
Args: Args:
name (str): The new name to give the computer. Requires a reboot to take
effect. name (str):
The new name to give the computer. Requires a reboot to take effect.
Returns: Returns:
dict: Returns a dictionary containing the old and new names if dict:
successful. False if not. Returns a dictionary containing the old and new names if successful.
``False`` if not.
CLI Example: CLI Example:
@ -389,7 +413,9 @@ def get_pending_computer_name():
error message will be logged to the minion log. error message will be logged to the minion log.
Returns: Returns:
str: The pending name if restart is pending, otherwise returns None. str:
Returns the pending name if pending restart. Returns ``None`` if not
pending restart.
CLI Example: CLI Example:
@ -412,7 +438,7 @@ def get_computer_name():
Get the Windows computer name Get the Windows computer name
Returns: Returns:
str: Returns the computer name if found. Otherwise returns False str: Returns the computer name if found. Otherwise returns ``False``.
CLI Example: CLI Example:
@ -429,10 +455,12 @@ def set_computer_desc(desc=None):
Set the Windows computer description Set the Windows computer description
Args: Args:
desc (str): The computer description
desc (str):
The computer description
Returns: Returns:
bool: True if successful, otherwise False str: Description if successful, otherwise ``False``
CLI Example: CLI Example:
@ -475,8 +503,8 @@ def get_system_info():
Get system information. Get system information.
Returns: Returns:
dict: Returns a Dictionary containing information about the system to dict: Dictionary containing information about the system to include
include name, description, version, etc... name, description, version, etc...
CLI Example: CLI Example:
@ -529,7 +557,8 @@ def get_computer_desc():
Get the Windows computer description Get the Windows computer description
Returns: Returns:
str: The computer description if found, otherwise False str: Returns the computer description if found. Otherwise returns
``False``.
CLI Example: CLI Example:
@ -546,12 +575,12 @@ get_computer_description = salt.utils.alias_function(get_computer_desc, 'get_com
def get_hostname(): def get_hostname():
''' '''
.. versionadded:: 2016.3.0
Get the hostname of the windows minion Get the hostname of the windows minion
.. versionadded:: 2016.3.0
Returns: Returns:
str: The hostname of the windows minion str: Returns the hostname of the windows minion
CLI Example: CLI Example:
@ -566,16 +595,16 @@ def get_hostname():
def set_hostname(hostname): def set_hostname(hostname):
''' '''
.. versionadded:: 2016.3.0 Set the hostname of the windows minion, requires a restart before this will
be updated.
Set the hostname of the windows minion, requires a restart before this .. versionadded:: 2016.3.0
will be updated.
Args: Args:
hostname (str): The hostname to set hostname (str): The hostname to set
Returns: Returns:
bool: True if successful, otherwise False bool: ``True`` if successful, otherwise ``False``
CLI Example: CLI Example:
@ -597,37 +626,41 @@ def join_domain(domain,
account_exists=False, account_exists=False,
restart=False): restart=False):
''' '''
Join a computer to an Active Directory domain. Requires reboot. Join a computer to an Active Directory domain. Requires a reboot.
Args: Args:
domain (str): The domain to which the computer should be joined, e.g.
domain (str):
The domain to which the computer should be joined, e.g.
``example.com`` ``example.com``
username (str): Username of an account which is authorized to join username (str):
computers to the specified domain. Need to be either fully qualified Username of an account which is authorized to join computers to the
like ``user@domain.tld`` or simply ``user`` specified domain. Needs to be either fully qualified like
``user@domain.tld`` or simply ``user``
password (str): Password of the specified user password (str):
Password of the specified user
account_ou (str): The DN of the OU below which the account for this account_ou (str):
computer should be created when joining the domain, e.g. The DN of the OU below which the account for this computer should be
created when joining the domain, e.g.
``ou=computers,ou=departm_432,dc=my-company,dc=com`` ``ou=computers,ou=departm_432,dc=my-company,dc=com``
account_exists (bool): If set to ``True`` the computer will only join account_exists (bool):
the domain if the account already exists. If set to ``False`` the If set to ``True`` the computer will only join the domain if the
computer account will be created if it does not exist, otherwise it account already exists. If set to ``False`` the computer account
will use the existing account. Default is False. will be created if it does not exist, otherwise it will use the
existing account. Default is ``False``
restart (bool): Restarts the computer after a successful join restart (bool):
``True`` will restart the computer after a successful join. Default
is ``False``
.. versionadded:: 2015.8.2/2015.5.7 .. versionadded:: 2015.8.2/2015.5.7
Returns: Returns:
dict: Dictionary if successful dict: Returns a dictionary if successful, otherwise ``False``
Raises:
CommandExecutionError: Raises an error if _join_domain returns anything
other than 0
CLI Example: CLI Example:
@ -741,33 +774,41 @@ def unjoin_domain(username=None,
disable=False, disable=False,
restart=False): restart=False):
r''' r'''
Unjoin a computer from an Active Directory Domain. Requires restart. Unjoin a computer from an Active Directory Domain. Requires a restart.
Args: Args:
username (str): Username of an account which is authorized to manage
computer accounts on the domain. Need to be fully qualified like
``user@domain.tld`` or ``domain.tld\user``. If domain not specified,
the passed domain will be used. If computer account doesn't need to
be disabled, can be None.
password (str): Password of the specified user username (str):
Username of an account which is authorized to manage computer
accounts on the domain. Needs to be a fully qualified name like
``user@domain.tld`` or ``domain.tld\user``. If the domain is not
specified, the passed domain will be used. If the computer account
doesn't need to be disabled after the computer is unjoined, this can
be ``None``.
domain (str): The domain from which to unjoin the computer. Can be None. password (str):
The password of the specified user
workgroup (str): The workgroup to join the computer to. Default is domain (str):
``WORKGROUP`` The domain from which to unjoin the computer. Can be ``None``
workgroup (str):
The workgroup to join the computer to. Default is ``WORKGROUP``
.. versionadded:: 2015.8.2/2015.5.7 .. versionadded:: 2015.8.2/2015.5.7
disable (bool): Disable the computer account in Active Directory. True disable (bool):
to disable. Default is False ``True`` to disable the computer account in Active Directory.
Default is ``False``
restart (bool): Restart the computer after successful unjoin restart (bool):
``True`` will restart the computer after successful unjoin. Default
is ``False``
.. versionadded:: 2015.8.2/2015.5.7 .. versionadded:: 2015.8.2/2015.5.7
Returns: Returns:
dict: Dictionary if successful, otherwise False dict: Returns a dictionary if successful, otherwise ``False``
CLI Example: CLI Example:
@ -859,15 +900,16 @@ def get_domain_workgroup():
def _try_parse_datetime(time_str, fmts): def _try_parse_datetime(time_str, fmts):
''' '''
Attempts to parse the input time_str as a date. A helper function that attempts to parse the input time_str as a date.
Args: Args:
time_str (str): A string representing the time time_str (str): A string representing the time
fmts (list): A list of date format strings fmts (list): A list of date format strings
Returns: Returns:
datetime: A datetime object if parsed properly, otherwise None datetime: Returns a datetime object if parsed properly, otherwise None
''' '''
result = None result = None
for fmt in fmts: for fmt in fmts:
@ -910,7 +952,9 @@ def set_system_time(newtime):
Set the system time. Set the system time.
Args: Args:
newtime (str): The time to set. Can be any of the following formats.
newtime (str):
The time to set. Can be any of the following formats:
- HH:MM:SS AM/PM - HH:MM:SS AM/PM
- HH:MM AM/PM - HH:MM AM/PM
@ -918,7 +962,7 @@ def set_system_time(newtime):
- HH:MM (24 hour) - HH:MM (24 hour)
Returns: Returns:
bool: True if successful, otherwise False bool: ``True`` if successful, otherwise ``False``
CLI Example: CLI Example:
@ -951,24 +995,16 @@ def set_system_date_time(years=None,
system year will be used. (Used by set_system_date and set_system_time) system year will be used. (Used by set_system_date and set_system_time)
Args: Args:
years (int): Years digit, ie: 2015 years (int): Years digit, ie: 2015
months (int): Months digit: 1 - 12 months (int): Months digit: 1 - 12
days (int): Days digit: 1 - 31 days (int): Days digit: 1 - 31
hours (int): Hours digit: 0 - 23 hours (int): Hours digit: 0 - 23
minutes (int): Minutes digit: 0 - 59 minutes (int): Minutes digit: 0 - 59
seconds (int): Seconds digit: 0 - 59 seconds (int): Seconds digit: 0 - 59
Returns: Returns:
bool: True if successful bool: ``True`` if successful, otherwise ``False``
Raises:
CommandExecutionError: Raises an error if ``SetLocalTime`` function
fails
CLI Example: CLI Example:
@ -1037,7 +1073,7 @@ def get_system_date():
Get the Windows system date Get the Windows system date
Returns: Returns:
str: The system date str: Returns the system date
CLI Example: CLI Example:
@ -1054,7 +1090,8 @@ def set_system_date(newdate):
Set the Windows system date. Use <mm-dd-yy> format for the date. Set the Windows system date. Use <mm-dd-yy> format for the date.
Args: Args:
newdate (str): The date to set. Can be any of the following formats: newdate (str):
The date to set. Can be any of the following formats
- YYYY-MM-DD - YYYY-MM-DD
- MM-DD-YYYY - MM-DD-YYYY
@ -1063,6 +1100,9 @@ def set_system_date(newdate):
- MM/DD/YY - MM/DD/YY
- YYYY/MM/DD - YYYY/MM/DD
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example: CLI Example:
.. code-block:: bash .. code-block:: bash
@ -1087,7 +1127,7 @@ def start_time_service():
Start the Windows time service Start the Windows time service
Returns: Returns:
bool: True if successful, otherwise False. bool: ``True`` if successful, otherwise ``False``
CLI Example: CLI Example:
@ -1103,7 +1143,7 @@ def stop_time_service():
Stop the Windows time service Stop the Windows time service
Returns: Returns:
bool: True if successful, otherwise False bool: ``True`` if successful, otherwise ``False``
CLI Example: CLI Example:
@ -1122,7 +1162,8 @@ def get_pending_component_servicing():
.. versionadded:: 2016.11.0 .. versionadded:: 2016.11.0
Returns: Returns:
bool: True if a reboot is pending, otherwise False. bool: ``True`` if there are pending Component Based Servicing tasks,
otherwise ``False``
CLI Example: CLI Example:
@ -1146,12 +1187,14 @@ def get_pending_component_servicing():
def get_pending_domain_join(): def get_pending_domain_join():
''' '''
Determine whether there is a pending domain join action that requires a reboot. Determine whether there is a pending domain join action that requires a
reboot.
.. versionadded:: 2016.11.0 .. versionadded:: 2016.11.0
Returns: Returns:
bool: True if a reboot is pending, otherwise False. bool: ``True`` if there is a pending domain join action, otherwise
``False``
CLI Example: CLI Example:
@ -1193,7 +1236,8 @@ def get_pending_file_rename():
.. versionadded:: 2016.11.0 .. versionadded:: 2016.11.0
Returns: Returns:
bool: True if a reboot is pending, otherwise False. bool: ``True`` if there are pending file rename operations, otherwise
``False``
CLI Example: CLI Example:
@ -1228,7 +1272,8 @@ def get_pending_servermanager():
.. versionadded:: 2016.11.0 .. versionadded:: 2016.11.0
Returns: Returns:
bool: True if a reboot is pending, otherwise False. bool: ``True`` if there are pending Server Manager tasks, otherwise
``False``
CLI Example: CLI Example:
@ -1265,7 +1310,7 @@ def get_pending_update():
.. versionadded:: 2016.11.0 .. versionadded:: 2016.11.0
Returns: Returns:
bool: True if a reboot is pending, otherwise False. bool: ``True`` if there are pending updates, otherwise ``False``
CLI Example: CLI Example:
@ -1305,14 +1350,14 @@ def set_reboot_required_witnessed():
current boot session. Also, in the scope of this key, the name *'Reboot current boot session. Also, in the scope of this key, the name *'Reboot
required'* will be assigned the value of *1*. required'* will be assigned the value of *1*.
(For the time being, this this function is being used whenever an install For the time being, this function is being used whenever an install
completes with exit code 3010 and this usage can be extended where completes with exit code 3010 and can be extended where appropriate in the
appropriate in the future.) future.
.. versionadded:: 2016.11.0 .. versionadded:: 2016.11.0
Returns: Returns:
bool: True if registry entry set successfuly, otherwise False. bool: ``True`` if successful, otherwise ``False``
CLI Example: CLI Example:
@ -1330,16 +1375,18 @@ def set_reboot_required_witnessed():
def get_reboot_required_witnessed(): def get_reboot_required_witnessed():
''' '''
This tells us if, at any time during the current boot session the salt Determine if at any time during the current boot session the salt minion
minion witnessed an event indicating that a reboot is required. (For the witnessed an event indicating that a reboot is required.
time being, this function will return True if an install completed with exit
code 3010 during the current boot session and this usage can be extended This function will return ``True`` if an install completed with exit
where appropriate in the future) code 3010 during the current boot session and can be extended where
appropriate in the future.
.. versionadded:: 2016.11.0 .. versionadded:: 2016.11.0
Returns: Returns:
bool: True if reboot required, otherwise False. bool: ``True`` if the ``Requires reboot`` registry flag is set to ``1``,
otherwise ``False``
CLI Example: CLI Example:
@ -1361,7 +1408,7 @@ def get_pending_reboot():
.. versionadded:: 2016.11.0 .. versionadded:: 2016.11.0
Returns: Returns:
bool: True if pending reboot, otherwise False. bool: ``True`` if the system is pending reboot, otherwise ``False``
CLI Example: CLI Example:

View File

@ -1414,24 +1414,26 @@ def install(name=None,
to_install.append((pkgname, pkgstr)) to_install.append((pkgname, pkgstr))
break break
else: else:
if re.match('kernel(-.+)?', name): if pkgname is not None:
# kernel and its subpackages support multiple if re.match('kernel(-.+)?', pkgname):
# installs as their paths do not conflict. # kernel and its subpackages support multiple
# Performing a yum/dnf downgrade will be a no-op # installs as their paths do not conflict.
# so just do an install instead. It will fail if # Performing a yum/dnf downgrade will be a
# there are other interdependencies that have # no-op so just do an install instead. It will
# conflicts, and that's OK. We don't want to force # fail if there are other interdependencies
# anything, we just want to properly handle it if # that have conflicts, and that's OK. We don't
# someone tries to install a kernel/kernel-devel of # want to force anything, we just want to
# a lower version than the currently-installed one. # properly handle it if someone tries to
# TODO: find a better way to determine if a package # install a kernel/kernel-devel of a lower
# supports multiple installs. # version than the currently-installed one.
to_install.append((pkgname, pkgstr)) # TODO: find a better way to determine if a
else: # package supports multiple installs.
# None of the currently-installed versions are to_install.append((pkgname, pkgstr))
# greater than the specified version, so this is a else:
# downgrade. # None of the currently-installed versions are
to_downgrade.append((pkgname, pkgstr)) # greater than the specified version, so this
# is a downgrade.
to_downgrade.append((pkgname, pkgstr))
def _add_common_args(cmd): def _add_common_args(cmd):
''' '''

View File

@ -106,6 +106,13 @@ A REST API for Salt
expire_responses : True expire_responses : True
Whether to check for and kill HTTP responses that have exceeded the Whether to check for and kill HTTP responses that have exceeded the
default timeout. default timeout.
.. deprecated:: 2016.11.9, 2017.7.3, Oxygen
The "expire_responses" configuration setting, which corresponds
to the ``timeout_monitor`` setting in CherryPy, is no longer
supported in CherryPy versions >= 12.0.0.
max_request_body_size : ``1048576`` max_request_body_size : ``1048576``
Maximum size for the HTTP request body. Maximum size for the HTTP request body.
collect_stats : False collect_stats : False
@ -490,11 +497,14 @@ logger = logging.getLogger(__name__)
import cherrypy import cherrypy
try: try:
from cherrypy.lib import cpstats from cherrypy.lib import cpstats
except ImportError: except AttributeError:
cpstats = None cpstats = None
logger.warn('Import of cherrypy.cpstats failed. ' logger.warn('Import of cherrypy.cpstats failed. '
'Possible upstream bug: ' 'Possible upstream bug: '
'https://github.com/cherrypy/cherrypy/issues/1444') 'https://github.com/cherrypy/cherrypy/issues/1444')
except ImportError:
cpstats = None
logger.warn('Import of cherrypy.cpstats failed.')
import yaml import yaml
import salt.ext.six as six import salt.ext.six as six
@ -503,6 +513,7 @@ import salt.ext.six as six
# Import Salt libs # Import Salt libs
import salt import salt
import salt.auth import salt.auth
import salt.exceptions
import salt.utils import salt.utils
import salt.utils.event import salt.utils.event
@ -750,11 +761,18 @@ def hypermedia_handler(*args, **kwargs):
except (salt.exceptions.SaltDaemonNotRunning, except (salt.exceptions.SaltDaemonNotRunning,
salt.exceptions.SaltReqTimeoutError) as exc: salt.exceptions.SaltReqTimeoutError) as exc:
raise cherrypy.HTTPError(503, exc.strerror) raise cherrypy.HTTPError(503, exc.strerror)
except (cherrypy.TimeoutError, salt.exceptions.SaltClientTimeout): except salt.exceptions.SaltClientTimeout:
raise cherrypy.HTTPError(504) raise cherrypy.HTTPError(504)
except cherrypy.CherryPyException: except cherrypy.CherryPyException:
raise raise
except Exception as exc: except Exception as exc:
# The TimeoutError exception class was removed in CherryPy in 12.0.0, but
# Still check existence of TimeoutError and handle in CherryPy < 12.
# The check was moved down from the SaltClientTimeout error line because
# A one-line if statement throws a BaseException inheritance TypeError.
if hasattr(cherrypy, 'TimeoutError') and isinstance(exc, cherrypy.TimeoutError):
raise cherrypy.HTTPError(504)
import traceback import traceback
logger.debug("Error while processing request for: %s", logger.debug("Error while processing request for: %s",
@ -2728,8 +2746,6 @@ class API(object):
'server.socket_port': self.apiopts.get('port', 8000), 'server.socket_port': self.apiopts.get('port', 8000),
'server.thread_pool': self.apiopts.get('thread_pool', 100), 'server.thread_pool': self.apiopts.get('thread_pool', 100),
'server.socket_queue_size': self.apiopts.get('queue_size', 30), 'server.socket_queue_size': self.apiopts.get('queue_size', 30),
'engine.timeout_monitor.on': self.apiopts.get(
'expire_responses', True),
'max_request_body_size': self.apiopts.get( 'max_request_body_size': self.apiopts.get(
'max_request_body_size', 1048576), 'max_request_body_size', 1048576),
'debug': self.apiopts.get('debug', False), 'debug': self.apiopts.get('debug', False),
@ -2747,6 +2763,14 @@ class API(object):
}, },
} }
if salt.utils.version_cmp(cherrypy.__version__, '12.0.0') < 0:
# CherryPy >= 12.0 no longer supports "timeout_monitor", only set
# this config option when using an older version of CherryPy.
# See Issue #44601 for more information.
conf['global']['engine.timeout_monitor.on'] = self.apiopts.get(
'expire_responses', True
)
if cpstats and self.apiopts.get('collect_stats', False): if cpstats and self.apiopts.get('collect_stats', False):
conf['/']['tools.cpstats.on'] = True conf['/']['tools.cpstats.on'] = True

View File

@ -143,6 +143,17 @@ def get_printout(out, opts=None, **kwargs):
# See Issue #29796 for more information. # See Issue #29796 for more information.
out = opts['output'] out = opts['output']
# Handle setting the output when --static is passed.
if not out and opts.get('static'):
if opts.get('output'):
out = opts['output']
elif opts.get('fun', '').split('.')[0] == 'state':
# --static doesn't have an output set at this point, but if we're
# running a state function and "out" hasn't already been set, we
# should set the out variable to "highstate". Otherwise state runs
# are set to "nested" below. See Issue #44556 for more information.
out = 'highstate'
if out == 'text': if out == 'text':
out = 'txt' out = 'txt'
elif out is None or out == '': elif out is None or out == '':

View File

@ -23,7 +23,14 @@ def output(ret, bar, **kwargs): # pylint: disable=unused-argument
Update the progress bar Update the progress bar
''' '''
if 'return_count' in ret: if 'return_count' in ret:
bar.update(ret['return_count']) val = ret['return_count']
# Avoid to fail if targets are behind a syndic. In this case actual return count will be
# higher than targeted by MoM itself.
# TODO: implement a way to get the proper target minions count and remove this workaround.
# Details are in #44239.
if val > bar.maxval:
bar.maxval = val
bar.update(val)
return '' return ''

View File

@ -235,25 +235,25 @@ class PillarCache(object):
return fresh_pillar.compile_pillar() # FIXME We are not yet passing pillar_dirs in here return fresh_pillar.compile_pillar() # FIXME We are not yet passing pillar_dirs in here
def compile_pillar(self, *args, **kwargs): # Will likely just be pillar_dirs def compile_pillar(self, *args, **kwargs): # Will likely just be pillar_dirs
log.debug('Scanning pillar cache for information about minion {0} and saltenv {1}'.format(self.minion_id, self.saltenv)) log.debug('Scanning pillar cache for information about minion {0} and pillarenv {1}'.format(self.minion_id, self.pillarenv))
log.debug('Scanning cache: {0}'.format(self.cache._dict)) log.debug('Scanning cache: {0}'.format(self.cache._dict))
# Check the cache! # Check the cache!
if self.minion_id in self.cache: # Keyed by minion_id if self.minion_id in self.cache: # Keyed by minion_id
# TODO Compare grains, etc? # TODO Compare grains, etc?
if self.saltenv in self.cache[self.minion_id]: if self.pillarenv in self.cache[self.minion_id]:
# We have a cache hit! Send it back. # We have a cache hit! Send it back.
log.debug('Pillar cache hit for minion {0} and saltenv {1}'.format(self.minion_id, self.saltenv)) log.debug('Pillar cache hit for minion {0} and pillarenv {1}'.format(self.minion_id, self.pillarenv))
return self.cache[self.minion_id][self.saltenv] return self.cache[self.minion_id][self.pillarenv]
else: else:
# We found the minion but not the env. Store it. # We found the minion but not the env. Store it.
fresh_pillar = self.fetch_pillar() fresh_pillar = self.fetch_pillar()
self.cache[self.minion_id][self.saltenv] = fresh_pillar self.cache[self.minion_id][self.pillarenv] = fresh_pillar
log.debug('Pillar cache miss for saltenv {0} for minion {1}'.format(self.saltenv, self.minion_id)) log.debug('Pillar cache miss for pillarenv {0} for minion {1}'.format(self.pillarenv, self.minion_id))
return fresh_pillar return fresh_pillar
else: else:
# We haven't seen this minion yet in the cache. Store it. # We haven't seen this minion yet in the cache. Store it.
fresh_pillar = self.fetch_pillar() fresh_pillar = self.fetch_pillar()
self.cache[self.minion_id] = {self.saltenv: fresh_pillar} self.cache[self.minion_id] = {self.pillarenv: fresh_pillar}
log.debug('Pillar cache miss for minion {0}'.format(self.minion_id)) log.debug('Pillar cache miss for minion {0}'.format(self.minion_id))
log.debug('Current pillar cache: {0}'.format(self.cache._dict)) # FIXME hack! log.debug('Current pillar cache: {0}'.format(self.cache._dict)) # FIXME hack!
return fresh_pillar return fresh_pillar

View File

@ -6,8 +6,11 @@ from __future__ import absolute_import
# Import python libs # Import python libs
import os import os
import logging
import pickle import pickle
import logging
# Import Salt modules
import salt.utils.files
# This must be present or the Salt loader won't load this module # This must be present or the Salt loader won't load this module
__proxyenabled__ = ['dummy'] __proxyenabled__ = ['dummy']
@ -19,7 +22,7 @@ DETAILS = {}
DETAILS['services'] = {'apache': 'running', 'ntp': 'running', 'samba': 'stopped'} DETAILS['services'] = {'apache': 'running', 'ntp': 'running', 'samba': 'stopped'}
DETAILS['packages'] = {'coreutils': '1.0', 'apache': '2.4', 'tinc': '1.4', 'redbull': '999.99'} DETAILS['packages'] = {'coreutils': '1.0', 'apache': '2.4', 'tinc': '1.4', 'redbull': '999.99'}
FILENAME = os.tmpnam() FILENAME = salt.utils.files.mkstemp()
# Want logging! # Want logging!
log = logging.getLogger(__file__) log = logging.getLogger(__file__)
@ -45,7 +48,7 @@ def _load_state():
pck = open(FILENAME, 'r') # pylint: disable=W8470 pck = open(FILENAME, 'r') # pylint: disable=W8470
DETAILS = pickle.load(pck) DETAILS = pickle.load(pck)
pck.close() pck.close()
except IOError: except EOFError:
DETAILS = {} DETAILS = {}
DETAILS['initialized'] = False DETAILS['initialized'] = False
_save_state(DETAILS) _save_state(DETAILS)

View File

@ -196,9 +196,7 @@ def __virtual__():
Only return if all the modules are available Only return if all the modules are available
''' '''
if not salt.utils.which('racadm'): if not salt.utils.which('racadm'):
log.critical('fx2 proxy minion needs "racadm" to be installed.') return False, 'fx2 proxy minion needs "racadm" to be installed.'
return False
return True return True

View File

@ -16,9 +16,21 @@ Dependencies
The ``napalm`` proxy module requires NAPALM_ library to be installed: ``pip install napalm`` The ``napalm`` proxy module requires NAPALM_ library to be installed: ``pip install napalm``
Please check Installation_ for complete details. Please check Installation_ for complete details.
.. _NAPALM: https://napalm.readthedocs.io .. _NAPALM: https://napalm-automation.net/
.. _Installation: https://napalm.readthedocs.io/en/latest/installation.html .. _Installation: http://napalm.readthedocs.io/en/latest/installation/index.html
.. note::
Beginning with Salt release 2017.7.3, it is recommended to use
``napalm`` >= ``2.0.0``. The library has been unified into a monolithic
package, as in opposite to separate packages per driver. For more details
you can check `this document <https://napalm-automation.net/reunification/>`_.
While it will still work with the old packages, bear in mind that the NAPALM
core team will maintain only the main ``napalm`` package.
Moreover, for additional capabilities, the users can always define a
library that extends NAPALM's base capabilities and configure the
``provider`` option (see below).
Pillar Pillar
------ ------
@ -59,7 +71,7 @@ always_alive: ``True``
.. versionadded:: 2017.7.0 .. versionadded:: 2017.7.0
provider: ``napalm_base`` provider: ``napalm_base``
The module that provides the ``get_network_device`` function. The library that provides the ``get_network_device`` function.
This option is useful when the user has more specific needs and requires This option is useful when the user has more specific needs and requires
to extend the NAPALM capabilities using a private library implementation. to extend the NAPALM capabilities using a private library implementation.
The only constraint is that the alternative library needs to have the The only constraint is that the alternative library needs to have the
@ -129,17 +141,7 @@ from __future__ import absolute_import
import logging import logging
log = logging.getLogger(__file__) log = logging.getLogger(__file__)
# Import third party lib # Import Salt modules
try:
# will try to import NAPALM
# https://github.com/napalm-automation/napalm
# pylint: disable=W0611
import napalm_base
# pylint: enable=W0611
HAS_NAPALM = True
except ImportError:
HAS_NAPALM = False
from salt.ext import six from salt.ext import six
import salt.utils.napalm import salt.utils.napalm
@ -163,7 +165,7 @@ DETAILS = {}
def __virtual__(): def __virtual__():
return HAS_NAPALM or (False, 'Please install the NAPALM library: `pip install napalm`!') return salt.utils.napalm.virtual(__opts__, 'napalm', __file__)
# ---------------------------------------------------------------------------------------------------------------------- # ----------------------------------------------------------------------------------------------------------------------
# helper functions -- will not be exported # helper functions -- will not be exported

View File

@ -363,7 +363,8 @@ def statelist(states_dict, sid_excludes=frozenset(['include', 'exclude'])):
REQUISITES = set([ REQUISITES = set([
'require', 'require_in', 'watch', 'watch_in', 'use', 'use_in', 'listen', 'listen_in' 'require', 'require_in', 'watch', 'watch_in', 'use', 'use_in', 'listen', 'listen_in',
'onchanges', 'onchanges_in', 'onfail', 'onfail_in'
]) ])
@ -405,8 +406,8 @@ def rename_state_ids(data, sls, is_extend=False):
del data[sid] del data[sid]
REQUIRE = set(['require', 'watch', 'listen']) REQUIRE = set(['require', 'watch', 'listen', 'onchanges', 'onfail'])
REQUIRE_IN = set(['require_in', 'watch_in', 'listen_in']) REQUIRE_IN = set(['require_in', 'watch_in', 'listen_in', 'onchanges_in', 'onfail_in'])
EXTENDED_REQUIRE = {} EXTENDED_REQUIRE = {}
EXTENDED_REQUIRE_IN = {} EXTENDED_REQUIRE_IN = {}
@ -414,8 +415,8 @@ from itertools import chain
# To avoid cycles among states when each state requires the one before it: # To avoid cycles among states when each state requires the one before it:
# explicit require/watch/listen can only contain states before it # explicit require/watch/listen/onchanges/onfail can only contain states before it
# explicit require_in/watch_in/listen_in can only contain states after it # explicit require_in/watch_in/listen_in/onchanges_in/onfail_in can only contain states after it
def add_implicit_requires(data): def add_implicit_requires(data):
def T(sid, state): # pylint: disable=C0103 def T(sid, state): # pylint: disable=C0103
@ -449,7 +450,7 @@ def add_implicit_requires(data):
for _, rstate, rsid in reqs: for _, rstate, rsid in reqs:
if T(rsid, rstate) in states_after: if T(rsid, rstate) in states_after:
raise SaltRenderError( raise SaltRenderError(
'State({0}) can\'t require/watch/listen a state({1}) defined ' 'State({0}) can\'t require/watch/listen/onchanges/onfail a state({1}) defined '
'after it!'.format(tag, T(rsid, rstate)) 'after it!'.format(tag, T(rsid, rstate))
) )
@ -459,7 +460,7 @@ def add_implicit_requires(data):
for _, rstate, rsid in reqs: for _, rstate, rsid in reqs:
if T(rsid, rstate) in states_before: if T(rsid, rstate) in states_before:
raise SaltRenderError( raise SaltRenderError(
'State({0}) can\'t require_in/watch_in/listen_in a state({1}) ' 'State({0}) can\'t require_in/watch_in/listen_in/onchanges_in/onfail_in a state({1}) '
'defined before it!'.format(tag, T(rsid, rstate)) 'defined before it!'.format(tag, T(rsid, rstate))
) )
@ -571,7 +572,7 @@ def extract_state_confs(data, is_extend=False):
if not is_extend and state_id in STATE_CONF_EXT: if not is_extend and state_id in STATE_CONF_EXT:
extend = STATE_CONF_EXT[state_id] extend = STATE_CONF_EXT[state_id]
for requisite in 'require', 'watch', 'listen': for requisite in 'require', 'watch', 'listen', 'onchanges', 'onfail':
if requisite in extend: if requisite in extend:
extend[requisite] += to_dict[state_id].get(requisite, []) extend[requisite] += to_dict[state_id].get(requisite, [])
to_dict[state_id].update(STATE_CONF_EXT[state_id]) to_dict[state_id].update(STATE_CONF_EXT[state_id])

View File

@ -309,7 +309,7 @@ def _format_job_instance(job):
'Arguments': list(job.get('arg', [])), 'Arguments': list(job.get('arg', [])),
# unlikely but safeguard from invalid returns # unlikely but safeguard from invalid returns
'Target': job.get('tgt', 'unknown-target'), 'Target': job.get('tgt', 'unknown-target'),
'Target-type': job.get('tgt_type', []), 'Target-type': job.get('tgt_type', 'list'),
'User': job.get('user', 'root')} 'User': job.get('user', 'root')}
if 'metadata' in job: if 'metadata' in job:

Some files were not shown because too many files have changed in this diff Show More