Merge branch '2018.3' into issue47937

This commit is contained in:
Nicole Thomas 2018-06-27 13:41:34 -04:00 committed by GitHub
commit 34b24bb7fa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
196 changed files with 6283 additions and 2942 deletions

29
.ci/docs Normal file
View File

@ -0,0 +1,29 @@
pipeline {
agent { label 'docs' }
environment {
PYENV_ROOT = "/usr/local/pyenv"
PATH = "$PYENV_ROOT/bin:$PATH"
}
stages {
stage('setup') {
steps {
sh 'eval "$(pyenv init -)"; pyenv install 2.7.14 || echo "We already have this python."; pyenv local 2.7.14; pyenv shell 2.7.14'
sh 'eval "$(pyenv init -)"; pip install sphinx -e .'
}
}
stage('build') {
steps {
sh 'eval "$(pyenv init -)"; make -C doc clean html'
archiveArtifacts artifacts: 'doc/_build/html'
}
}
}
post {
success {
githubNotify description: "The docs job has passed, artifacts have been saved", status: "SUCCESS"
}
failure {
githubNotify description: "The docs job has failed", status: "FAILURE"
}
}
}

48
.ci/kitchen-centos7-py2 Normal file
View File

@ -0,0 +1,48 @@
pipeline {
agent { label 'kitchen-slave' }
environment {
SALT_KITCHEN_PLATFORMS = "/var/jenkins/workspace/platforms.yml"
SALT_KITCHEN_DRIVER = "/var/jenkins/workspace/driver.yml"
PATH = "/usr/local/rbenv/shims/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin:/root/bin"
RBENV_VERSION = "2.4.2"
TEST_SUITE = "py2"
TEST_PLATFORM = "centos-7"
}
stages {
stage('setup') {
steps {
sh 'bundle install --with ec2 windows --without opennebula docker'
}
}
stage('run kitchen') {
steps {
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
sshagent(credentials: ['jenkins-testing-ssh-key']) {
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
sh 'bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM || bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM'
sh 'bundle exec kitchen verify $TEST_SUITE-$TEST_PLATFORM'
}
}}
archiveArtifacts artifacts: 'artifacts/xml-unittests-output/*.xml'
}
post {
always {
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
sshagent(credentials: ['jenkins-testing-ssh-key']) {
sh 'ssh-add ~/.ssh/jenkins/jenkins-testing.pem'
sh 'bundle exec kitchen destroy $TEST_SUITE-$TEST_PLATFORM'
}
}}
}
}
}
}
post {
success {
githubNotify description: "The centos7-py2 job has passed", status: "SUCCESS"
}
failure {
githubNotify description: "The centos7-py2 job has failed", status: "FAILURE"
}
}
}

48
.ci/kitchen-centos7-py3 Normal file
View File

@ -0,0 +1,48 @@
pipeline {
agent { label 'kitchen-slave' }
environment {
SALT_KITCHEN_PLATFORMS = "/var/jenkins/workspace/platforms.yml"
SALT_KITCHEN_DRIVER = "/var/jenkins/workspace/driver.yml"
PATH = "/usr/local/rbenv/shims/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin:/root/bin"
RBENV_VERSION = "2.4.2"
TEST_SUITE = "py3"
TEST_PLATFORM = "centos-7"
}
stages {
stage('setup') {
steps {
sh 'bundle install --with ec2 windows --without opennebula docker'
}
}
stage('run kitchen') {
steps {
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
sshagent(credentials: ['jenkins-testing-ssh-key']) {
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
sh 'bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM || bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM'
sh 'bundle exec kitchen verify $TEST_SUITE-$TEST_PLATFORM'
}
}}
archiveArtifacts artifacts: 'artifacts/xml-unittests-output/*.xml'
}
post {
always {
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
sshagent(credentials: ['jenkins-testing-ssh-key']) {
sh 'ssh-add ~/.ssh/jenkins/jenkins-testing.pem'
sh 'bundle exec kitchen destroy $TEST_SUITE-$TEST_PLATFORM'
}
}}
}
}
}
}
post {
success {
githubNotify description: "The centos7-py3 job has passed", status: "SUCCESS"
}
failure {
githubNotify description: "The centos7-py3 job has failed", status: "FAILURE"
}
}
}

View File

@ -0,0 +1,48 @@
pipeline {
agent { label 'kitchen-slave' }
environment {
SALT_KITCHEN_PLATFORMS = "/var/jenkins/workspace/platforms.yml"
SALT_KITCHEN_DRIVER = "/var/jenkins/workspace/driver.yml"
PATH = "/usr/local/rbenv/shims/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin:/root/bin"
RBENV_VERSION = "2.4.2"
TEST_SUITE = "py2"
TEST_PLATFORM = "ubuntu-1604"
}
stages {
stage('setup') {
steps {
sh 'bundle install --with ec2 windows --without opennebula docker'
}
}
stage('run kitchen') {
steps {
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
sshagent(credentials: ['jenkins-testing-ssh-key']) {
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
sh 'bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM || bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM'
sh 'bundle exec kitchen verify $TEST_SUITE-$TEST_PLATFORM'
}
}}
archiveArtifacts artifacts: 'artifacts/xml-unittests-output/*.xml'
}
post {
always {
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
sshagent(credentials: ['jenkins-testing-ssh-key']) {
sh 'ssh-add ~/.ssh/jenkins/jenkins-testing.pem'
sh 'bundle exec kitchen destroy $TEST_SUITE-$TEST_PLATFORM'
}
}}
}
}
}
}
post {
success {
githubNotify description: "The ubuntu-1604-py2 job has passed", status: "SUCCESS"
}
failure {
githubNotify description: "The ubuntu-1604-py2 job has failed", status: "FAILURE"
}
}
}

View File

@ -0,0 +1,48 @@
pipeline {
agent { label 'kitchen-slave' }
environment {
SALT_KITCHEN_PLATFORMS = "/var/jenkins/workspace/platforms.yml"
SALT_KITCHEN_DRIVER = "/var/jenkins/workspace/driver.yml"
PATH = "/usr/local/rbenv/shims/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin:/root/bin"
RBENV_VERSION = "2.4.2"
TEST_SUITE = "py3"
TEST_PLATFORM = "ubuntu-1604"
}
stages {
stage('setup') {
steps {
sh 'bundle install --with ec2 windows --without opennebula docker'
}
}
stage('run kitchen') {
steps {
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
sshagent(credentials: ['jenkins-testing-ssh-key']) {
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
sh 'bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM || bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM'
sh 'bundle exec kitchen verify $TEST_SUITE-$TEST_PLATFORM'
}
}}
archiveArtifacts artifacts: 'artifacts/xml-unittests-output/*.xml'
}
post {
always {
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
sshagent(credentials: ['jenkins-testing-ssh-key']) {
sh 'ssh-add ~/.ssh/jenkins/jenkins-testing.pem'
sh 'bundle exec kitchen destroy $TEST_SUITE-$TEST_PLATFORM'
}
}}
}
}
}
}
post {
success {
githubNotify description: "The ubuntu-1604-py3 job has passed", status: "SUCCESS"
}
failure {
githubNotify description: "The ubuntu-1604-py3 job has failed", status: "FAILURE"
}
}
}

41
.ci/lint Normal file
View File

@ -0,0 +1,41 @@
pipeline {
agent { label 'pr-lint-slave' }
environment {
PYENV_ROOT = "/usr/local/pyenv"
PATH = "$PYENV_ROOT/bin:$PATH"
}
stages {
stage('setup') {
steps {
sh 'eval "$(pyenv init -)"; pyenv install 2.7.14 || echo "We already have this python."; pyenv local 2.7.14; pyenv shell 2.7.14'
sh 'eval "$(pyenv init -)"; pip install pylint SaltPyLint'
sh 'eval "$(pyenv init -)"; which pylint; pylint --version'
}
}
stage('linting') {
failFast false
parallel {
stage('salt linting') {
steps {
sh 'eval "$(pyenv init -)"; pylint --rcfile=.testing.pylintrc --disable=W1307,str-format-in-logging setup.py salt/ | tee pylint-report.xml'
archiveArtifacts artifacts: 'pylint-report.xml'
}
}
stage('test linting') {
steps {
sh 'eval "$(pyenv init -)"; pylint --rcfile=.testing.pylintrc --disable=W0232,E1002,W1307,str-format-in-logging tests/ | tee pylint-report-tests.xml'
archiveArtifacts artifacts: 'pylint-report-tests.xml'
}
}
}
}
}
post {
success {
githubNotify description: "The lint job has passed", status: "SUCCESS"
}
failure {
githubNotify description: "The lint job has failed", status: "FAILURE"
}
}
}

View File

@ -43,23 +43,20 @@ provisioner:
repo: git
testingdir: /testing
salt_copy_filter:
- .bundle
- .kitchen
- .kitchen.yml
- artifacts
- Gemfile
- Gemfile.lock
- README.rst
- .travis.yml
- '*.pyc'
- __pycache__
- '*.pyc'
- .bundle
- .tox
- .kitchen
- artifacts
- Gemfile.lock
state_top:
base:
"os:Windows":
- match: grain
- prep_windows
"*":
- git.salt
- <%= ENV['KITCHEN_STATE'] || 'git.salt' %>
pillars:
top.sls:
base:

View File

@ -250,8 +250,8 @@ on_saltstack = 'SALT_ON_SALTSTACK' in os.environ
project = 'Salt'
version = salt.version.__version__
latest_release = '2018.3.0' # latest release
previous_release = '2017.7.5' # latest release from previous branch
latest_release = '2018.3.2' # latest release
previous_release = '2017.7.6' # latest release from previous branch
previous_release_dir = '2017.7' # path on web server for previous branch
next_release = '' # next release
next_release_dir = '' # path on web server for next release branch

View File

@ -1,7 +1,7 @@
.. _all-salt.cache:
=============
cache modules
Cache Modules
=============
.. currentmodule:: salt.cache
@ -10,6 +10,8 @@ cache modules
:toctree:
:template: autosummary.rst.tmpl
localfs
consul
etcd_cache
localfs
mysql_cache
redis_cache

View File

@ -1,5 +1,5 @@
salt.cache.consul module
========================
salt.cache.consul
=================
.. automodule:: salt.cache.consul
:members:

View File

@ -1,5 +1,5 @@
salt.cache.etcd_cache module
=============================
salt.cache.etcd_cache
=====================
.. automodule:: salt.cache.etcd_cache
:members:

View File

@ -1,5 +1,5 @@
salt.cache.localfs module
=========================
salt.cache.localfs
==================
.. automodule:: salt.cache.localfs
:members:

View File

@ -1,5 +1,5 @@
salt.cache.mysql_cache module
=============================
salt.cache.mysql_cache
======================
.. automodule:: salt.cache.mysql_cache
:members:

View File

@ -1,5 +1,5 @@
salt.cache.redis_cache module
=============================
salt.cache.redis_cache
======================
.. automodule:: salt.cache.redis_cache
:members:

View File

@ -23,6 +23,7 @@ cloud modules
lxc
msazure
nova
oneandone
opennebula
openstack
parallels

View File

@ -1,6 +1,6 @@
===============================
========================
salt.cloud.clouds.aliyun
===============================
========================
.. automodule:: salt.cloud.clouds.aliyun
:members:
:members:

View File

@ -1,5 +1,6 @@
salt.cloud.clouds.vultrpy module
================================
=========================
salt.cloud.clouds.vultrpy
=========================
.. automodule:: salt.cloud.clouds.vultrpy
:members:

View File

@ -428,7 +428,7 @@ to False.
.. conf_master:: color_theme
``color_theme``
---------
---------------
Default: ``""``
@ -728,31 +728,6 @@ master event bus. The value is expressed in bytes.
max_event_size: 1048576
.. conf_master:: ping_on_rotate
``ping_on_rotate``
------------------
.. versionadded:: 2014.7.0
Default: ``False``
By default, the master AES key rotates every 24 hours. The next command
following a key rotation will trigger a key refresh from the minion which may
result in minions which do not respond to the first command after a key refresh.
To tell the master to ping all minions immediately after an AES key refresh, set
ping_on_rotate to ``True``. This should mitigate the issue where a minion does not
appear to initially respond after a key is rotated.
Note that ping_on_rotate may cause high load on the master immediately after
the key rotation event as minions reconnect. Consider this carefully if this
salt master is managing a large number of minions.
.. code-block:: yaml
ping_on_rotate: False
.. conf_master:: master_job_cache
``master_job_cache``
@ -840,6 +815,8 @@ that connect to a master via localhost.
``ping_on_rotate``
------------------
.. versionadded:: 2014.7.0
Default: ``False``
By default, the master AES key rotates every 24 hours. The next command
@ -850,9 +827,9 @@ To tell the master to ping all minions immediately after an AES key refresh,
set ``ping_on_rotate`` to ``True``. This should mitigate the issue where a
minion does not appear to initially respond after a key is rotated.
Note that ping_on_rotate may cause high load on the master immediately after
the key rotation event as minions reconnect. Consider this carefully if this
salt master is managing a large number of minions.
Note that enabling this may cause high load on the master immediately after the
key rotation event as minions reconnect. Consider this carefully if this salt
master is managing a large number of minions.
If disabled, it is recommended to handle this event by listening for the
``aes_key_rotate`` event with the ``key`` tag and acting appropriately.
@ -1085,24 +1062,27 @@ Default settings which will be inherited by all rosters.
Default: ``/etc/salt/roster``
Pass in an alternative location for the salt-ssh `flat` roster file.
Pass in an alternative location for the salt-ssh :py:mod:`flat
<salt.roster.flat>` roster file.
.. code-block:: yaml
roster_file: /root/roster
.. conf_master:: roster_file
.. conf_master:: rosters
``rosters``
---------------
-----------
Default: None
Default: ``None``
Define locations for `flat` roster files so they can be chosen when using Salt API.
An administrator can place roster files into these locations.
Then when calling Salt API, parameter 'roster_file' should contain a relative path to these locations.
That is, "roster_file=/foo/roster" will be resolved as "/etc/salt/roster.d/foo/roster" etc.
This feature prevents passing insecure custom rosters through the Salt API.
Define locations for :py:mod:`flat <salt.roster.flat>` roster files so they can
be chosen when using Salt API. An administrator can place roster files into
these locations. Then, when calling Salt API, the :conf_master:`roster_file`
parameter should contain a relative path to these locations. That is,
``roster_file=/foo/roster`` will be resolved as
``/etc/salt/roster.d/foo/roster`` etc. This feature prevents passing insecure
custom rosters through the Salt API.
.. code-block:: yaml
@ -2179,6 +2159,7 @@ Example using line statements and line comments to increase ease of use:
If your configuration options are
.. code-block:: yaml
jinja_sls_env:
line_statement_prefix: '%'
line_comment_prefix: '##'
@ -2188,7 +2169,7 @@ as a jinja statement and will interpret anything after a ``##`` as a comment.
This allows the following more convenient syntax to be used:
.. code-block:: yaml
.. code-block:: jinja
## (this comment will not stay once rendered)
# (this comment remains in the rendered template)
@ -2202,7 +2183,7 @@ This allows the following more convenient syntax to be used:
The following less convenient but equivalent syntax would have to
be used if you had not set the line_statement and line_comment options:
.. code-block:: yaml
.. code-block:: jinja
{# (this comment will not stay once rendered) #}
# (this comment remains in the rendered template)

View File

@ -623,7 +623,7 @@ This directory may contain sensitive data and should be protected accordingly.
.. conf_master:: color_theme
``color_theme``
---------
---------------
Default: ``""``
@ -1347,6 +1347,39 @@ The password used for HTTP proxy access.
proxy_password: obolus
Docker Configuration
====================
.. conf_minion:: docker.update_mine
``docker.update_mine``
----------------------
.. versionadded:: 2017.7.8,2018.3.3
.. versionchanged:: Fluorine
The default value is now ``False``
Default: ``True``
If enabled, when containers are added, removed, stopped, started, etc., the
:ref:`mine <salt-mine>` will be updated with the results of :py:func:`docker.ps
verbose=True all=True host=True <salt.modules.dockermod.ps>`. This mine data is
used by :py:func:`mine.get_docker <salt.modules.mine.get_docker>`. Set this
option to ``False`` to keep Salt from updating the mine with this information.
.. note::
This option can also be set in Grains or Pillar data, with Grains
overriding Pillar and the minion config file overriding Grains.
.. note::
Disabling this will of course keep :py:func:`mine.get_docker
<salt.modules.mine.get_docker>` from returning any information for a given
minion.
.. code-block:: yaml
docker.update_mine: False
.. conf_minion:: docker.compare_container_networks
``docker.compare_container_networks``
@ -1381,6 +1414,7 @@ Specifies which keys are examined by
- GlobalIPv6Address
- IPv6Gateway
Minion Execution Module Management
==================================
@ -2674,7 +2708,7 @@ executed in a thread.
.. conf_minion:: process_count_max
``process_count_max``
-------
---------------------
.. versionadded:: 2018.3.0

View File

@ -368,6 +368,7 @@ execution modules
s3
s6
salt_proxy
saltcheck
saltcloudmod
saltutil
schedule
@ -498,5 +499,6 @@ execution modules
znc
zoneadm
zonecfg
zookeeper
zpool
zypper

View File

@ -1,5 +1,5 @@
salt.modules.libcloud_storage module
================================
salt.modules.libcloud_storage
=============================
.. automodule:: salt.modules.libcloud_storage
:members:

View File

@ -1,6 +1,6 @@
===================
=====================
salt.modules.opsgenie
===================
=====================
.. automodule:: salt.modules.opsgenie
:members:

View File

@ -0,0 +1,6 @@
======================
salt.modules.saltcheck
======================
.. automodule:: salt.modules.saltcheck
:members:

View File

@ -0,0 +1,6 @@
=====================
salt.modules.telegram
=====================
.. automodule:: salt.modules.telegram
:members:

View File

@ -321,4 +321,5 @@ state modules
zk_concurrency
zfs
zone
zookeeper
zpool

View File

@ -1,5 +1,5 @@
salt.states.infoblox_a module
===========================
salt.states.infoblox_a
======================
.. automodule:: salt.states.infoblox_a
:members:

View File

@ -1,5 +1,5 @@
salt.states.infoblox_cname module
===========================
salt.states.infoblox_cname
==========================
.. automodule:: salt.states.infoblox_cname
:members:

View File

@ -1,5 +1,5 @@
salt.states.infoblox_host_record module
===========================
salt.states.infoblox_host_record
================================
.. automodule:: salt.states.infoblox_host_record
:members:

View File

@ -1,5 +1,5 @@
salt.states.infoblox_range module
===========================
salt.states.infoblox_range
==========================
.. automodule:: salt.states.infoblox_range
:members:

View File

@ -125,7 +125,6 @@ Cloud Provider Specifics
Getting Started With Parallels <parallels>
Getting Started With ProfitBricks <profitbricks>
Getting Started With Proxmox <proxmox>
Getting Started With Rackspace <rackspace>
Getting Started With Scaleway <scaleway>
Getting Started With Saltify <saltify>
Getting Started With SoftLayer <softlayer>

View File

@ -9,11 +9,11 @@ libvirt with qemu-kvm.
http://www.libvirt.org/
Host Dependencies
============
=================
* libvirt >= 1.2.18 (older might work)
Salt-Cloud Dependencies
============
=======================
* libvirt-python
Provider Configuration

View File

@ -110,40 +110,51 @@ The typical lifecycle of a salt job from the perspective of the master
might be as follows:
1) A command is issued on the CLI. For example, 'salt my_minion test.ping'.
2) The 'salt' command uses LocalClient to generate a request to the salt master
by connecting to the ReqServer on TCP:4506 and issuing the job.
3) The salt-master ReqServer sees the request and passes it to an available
MWorker over workers.ipc.
4) A worker picks up the request and handles it. First, it checks to ensure
that the requested user has permissions to issue the command. Then, it sends
the publish command to all connected minions. For the curious, this happens
in ClearFuncs.publish().
5) The worker announces on the master event bus that it is about to publish
a job to connected minions. This happens by placing the event on the master
event bus (master_event_pull.ipc) where the EventPublisher picks it up and
distributes it to all connected event listeners on master_event_pub.ipc.
6) The message to the minions is encrypted and sent to the Publisher via IPC
on publish_pull.ipc.
7) Connected minions have a TCP session established with the Publisher on TCP
port 4505 where they await commands. When the Publisher receives the job over
publish_pull, it sends the jobs across the wire to the minions for processing.
8) After the minions receive the request, they decrypt it and perform any
requested work, if they determine that they are targeted to do so.
9) When the minion is ready to respond, it publishes the result of its job back
to the master by sending the encrypted result back to the master on TCP 4506
where it is again picked up by the ReqServer and forwarded to an available
MWorker for processing. (Again, this happens by passing this message across
workers.ipc to an available worker.)
10) When the MWorker receives the job it decrypts it and fires an event onto
the master event bus (master_event_pull.ipc). (Again for the curious, this
happens in AESFuncs._return().
11) The EventPublisher sees this event and re-publishes it on the bus to all
connected listeners of the master event bus (on master_event_pub.ipc). This
is where the LocalClient has been waiting, listening to the event bus for
minion replies. It gathers the job and stores the result.
12) When all targeted minions have replied or the timeout has been exceeded,
the salt client displays the results of the job to the user on the CLI.
2) The 'salt' command uses LocalClient to generate a request to the salt master
by connecting to the ReqServer on TCP:4506 and issuing the job.
3) The salt-master ReqServer sees the request and passes it to an available
MWorker over workers.ipc.
4) A worker picks up the request and handles it. First, it checks to ensure
that the requested user has permissions to issue the command. Then, it sends
the publish command to all connected minions. For the curious, this happens
in ClearFuncs.publish().
5) The worker announces on the master event bus that it is about to publish a
job to connected minions. This happens by placing the event on the master
event bus (master_event_pull.ipc) where the EventPublisher picks it up and
distributes it to all connected event listeners on master_event_pub.ipc.
6) The message to the minions is encrypted and sent to the Publisher via IPC on
publish_pull.ipc.
7) Connected minions have a TCP session established with the Publisher on TCP
port 4505 where they await commands. When the Publisher receives the job
over publish_pull, it sends the jobs across the wire to the minions for
processing.
8) After the minions receive the request, they decrypt it and perform any
requested work, if they determine that they are targeted to do so.
9) When the minion is ready to respond, it publishes the result of its job back
to the master by sending the encrypted result back to the master on TCP 4506
where it is again picked up by the ReqServer and forwarded to an available
MWorker for processing. (Again, this happens by passing this message across
workers.ipc to an available worker.)
10) When the MWorker receives the job it decrypts it and fires an event onto
the master event bus (master_event_pull.ipc). (Again for the curious, this
happens in AESFuncs._return().
11) The EventPublisher sees this event and re-publishes it on the bus to all
connected listeners of the master event bus (on master_event_pub.ipc). This
is where the LocalClient has been waiting, listening to the event bus for
minion replies. It gathers the job and stores the result.
12) When all targeted minions have replied or the timeout has been exceeded,
the salt client displays the results of the job to the user on the CLI.
Salt Minion
===========

View File

@ -236,13 +236,13 @@ repository be sure to communicate with any other contributors there on pull
requests that are large or have breaking changes.
In general it is best to have another Contributor review and merge any pull
requests that you open. Feel free to `at-mention`__ other regular contributors
requests that you open. Feel free to `at-mention`_ other regular contributors
to a repository and request a review. However, there are a lot of formula
repositories so if a repository does not yet have regular contributors or if
your pull request has stayed open for more than a couple days feel free to
"selfie-merge" your own pull request.
__: https://help.github.com/articles/basic-writing-and-formatting-syntax/#mentioning-users-and-teams
.. _`at-mention`: https://help.github.com/articles/basic-writing-and-formatting-syntax/#mentioning-users-and-teams
Style
-----

View File

@ -180,6 +180,404 @@ available, since that's not actually part of what's being tested, we mocked that
import by patching ``sys.modules`` when tests are running.
Mocking Filehandles
-------------------
.. note::
This documentation applies to the 2018.3 release cycle and newer. The
extended functionality for ``mock_open`` described below does not exist in
the 2017.7 and older release branches.
Opening files in Salt is done using ``salt.utils.files.fopen()``. When testing
code that reads from files, the ``mock_open`` helper can be used to mock
filehandles. Note that is not the same ``mock_open`` as
:py:func:`unittest.mock.mock_open` from the Python standard library, but rather
a separate implementation which has additional functionality.
.. code-block:: python
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
patch
mock_open,
NO_MOCK,
NO_MOCK_REASON,
)
import salt.modules.mymod as mymod
@skipIf(NO_MOCK, NO_MOCK_REASON)
class MyAwesomeTestCase(TestCase):
def test_something(self):
fopen_mock = mock_open(read_data='foo\nbar\nbaz\n')
with patch('salt.utils.files.fopen', fopen_mock):
result = mymod.myfunc()
assert result is True
This will force any filehandle opened to mimic a filehandle which, when read,
produces the specified contents.
.. important::
**String Types**
When running tests on Python 2, ``mock_open`` will convert any ``unicode``
types to ``str`` types to more closely reproduce Python 2 behavior (file
reads are always ``str`` types in Python 2, irrespective of mode).
However, when configuring your read_data, make sure that you are using
bytestrings (e.g. ``b'foo\nbar\nbaz\n'``) when the code you are testing is
opening a file for binary reading, otherwise the tests will fail on Python
3. The mocked filehandles produced by ``mock_open`` will raise a
:py:obj:`TypeError` if you attempt to read a bytestring when opening for
non-binary reading, and similarly will not let you read a string when
opening a file for binary reading. They will also not permit bytestrings to
be "written" if the mocked filehandle was opened for non-binary writing,
and vice-versa when opened for non-binary writing. These enhancements force
test writers to write more accurate tests.
More Complex Scenarios
**********************
.. _unit-tests-multiple-file-paths:
Multiple File Paths
+++++++++++++++++++
What happens when the code being tested reads from more than one file? For
those cases, you can pass ``read_data`` as a dictionary:
.. code-block:: python
import textwrap
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
patch
mock_open,
NO_MOCK,
NO_MOCK_REASON,
)
import salt.modules.mymod as mymod
@skipIf(NO_MOCK, NO_MOCK_REASON)
class MyAwesomeTestCase(TestCase):
def test_something(self):
contents = {
'/etc/foo.conf': textwrap.dedent('''\
foo
bar
baz
'''),
'/etc/b*.conf': textwrap.dedent('''\
one
two
three
'''),
}
fopen_mock = mock_open(read_data=contents)
with patch('salt.utils.files.fopen', fopen_mock):
result = mymod.myfunc()
assert result is True
This would make ``salt.utils.files.fopen()`` produce filehandles with different
contents depending on which file was being opened by the code being tested.
``/etc/foo.conf`` and any file matching the pattern ``/etc/b*.conf`` would
work, while opening any other path would result in a
:py:obj:`FileNotFoundError` being raised (in Python 2, an ``IOError``).
Since file patterns are supported, it is possible to use a pattern of ``'*'``
to define a fallback if no other patterns match the filename being opened. The
below two ``mock_open`` calls would produce identical results:
.. code-block:: python
mock_open(read_data='foo\n')
mock_open(read_data={'*': 'foo\n'})
.. note::
Take care when specifying the ``read_data`` as a dictionary, in cases where
the patterns overlap (e.g. when both ``/etc/b*.conf`` and ``/etc/bar.conf``
are in the ``read_data``). Dictionary iteration order will determine which
pattern is attempted first, second, etc., with the exception of ``*`` which
is used when no other pattern matches. If your test case calls for
specifying overlapping patterns, and you are not running Python 3.6 or
newer, then an ``OrderedDict`` can be used to ensure matching is handled in
the desired way:
.. code-block:: python
contents = OrderedDict()
contents['/etc/bar.conf'] = 'foo\nbar\nbaz\n'
contents['/etc/b*.conf'] = IOError(errno.EACCES, 'Permission denied')
contents['*'] = 'This is a fallback for files not beginning with "/etc/b"\n'
fopen_mock = mock_open(read_data=contents)
Raising Exceptions
++++++++++++++++++
Instead of a string, an exception can also be used as the ``read_data``:
.. code-block:: python
import errno
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
patch
mock_open,
NO_MOCK,
NO_MOCK_REASON,
)
import salt.modules.mymod as mymod
@skipIf(NO_MOCK, NO_MOCK_REASON)
class MyAwesomeTestCase(TestCase):
def test_something(self):
exc = IOError(errno.EACCES, 'Permission denied')
fopen_mock = mock_open(read_data=exc)
with patch('salt.utils.files.fopen', fopen_mock):
mymod.myfunc()
The above example would raise the specified exception when any file is opened.
The expectation would be that ``mymod.myfunc()`` would gracefully handle the
IOError, so a failure to do that would result in it being raised and causing
the test to fail.
Multiple File Contents
++++++++++++++++++++++
For cases in which a file is being read more than once, and it is necessary to
test a function's behavior based on what the file looks like the second (or
third, etc.) time it is read, just specify the the contents for that file as a
list. Each time the file is opened, ``mock_open`` will cycle through the list
and produce a mocked filehandle with the specified contents. For example:
.. code-block:: python
import errno
import textwrap
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
patch
mock_open,
NO_MOCK,
NO_MOCK_REASON,
)
import salt.modules.mymod as mymod
@skipIf(NO_MOCK, NO_MOCK_REASON)
class MyAwesomeTestCase(TestCase):
def test_something(self):
contents = {
'/etc/foo.conf': [
textwrap.dedent('''\
foo
bar
'''),
textwrap.dedent('''\
foo
bar
baz
'''),
],
'/etc/b*.conf': [
IOError(errno.ENOENT, 'No such file or directory'),
textwrap.dedent('''\
one
two
three
'''),
],
}
fopen_mock = mock_open(read_data=contents)
with patch('salt.utils.files.fopen', fopen_mock):
result = mymod.myfunc()
assert result is True
Using this example, the first time ``/etc/foo.conf`` is opened, it will
simulate a file with the first string in the list as its contents, while the
second time it is opened, the simulated file's contents will be the second
string in the list.
If no more items remain in the list, then attempting to open the file will
raise a :py:obj:`RuntimeError`. In the example above, if ``/etc/foo.conf`` were
to be opened a third time, a :py:obj:`RuntimeError` would be raised.
Note that exceptions can also be mixed in with strings when using this
technique. In the above example, if ``/etc/bar.conf`` were to be opened twice,
the first time would simulate the file not existing, while the second time
would simulate a file with string defined in the second element of the list.
.. note::
Notice that the second path in the ``contents`` dictionary above
(``/etc/b*.conf``) contains an asterisk. The items in the list are cycled
through for each match of a given pattern (*not* separately for each
individual file path), so this means that only two files matching that
pattern could be opened before the next one would raise a
:py:obj:`RuntimeError`.
Accessing the Mocked Filehandles in a Test
******************************************
.. note::
The code for the ``MockOpen``, ``MockCall``, and ``MockFH`` classes
(referenced below) can be found in ``tests/support/mock.py``. There are
extensive unit tests for them located in ``tests/unit/test_mock.py``.
The above examples simply show how to mock ``salt.utils.files.fopen()`` to
simulate files with the contents you desire, but you can also access the mocked
filehandles (and more), and use them to craft assertions in your tests. To do
so, just add an ``as`` clause to the end of the ``patch`` statement:
.. code-block:: python
fopen_mock = mock_open(read_data='foo\nbar\nbaz\n')
with patch('salt.utils.files.fopen', fopen_mock) as m_open:
# do testing here
...
...
When doing this, ``m_open`` will be a ``MockOpen`` instance. It will contain
several useful attributes:
- **read_data** - A dictionary containing the ``read_data`` passed when
``mock_open`` was invoked. In the event that :ref:`multiple file paths
<unit-tests-multiple-file-paths>` are not used, then this will be a
dictionary mapping ``*`` to the ``read_data`` passed to ``mock_open``.
- **call_count** - An integer representing how many times
``salt.utils.files.fopen()`` was called to open a file.
- **calls** - A list of ``MockCall`` objects. A ``MockCall`` object is a simple
class which stores the arguments passed to it, making the positional
arguments available via its ``args`` attribute, and the keyword arguments
available via its ``kwargs`` attribute.
.. code-block:: python
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
patch
mock_open,
MockCall,
NO_MOCK,
NO_MOCK_REASON,
)
import salt.modules.mymod as mymod
@skipIf(NO_MOCK, NO_MOCK_REASON)
class MyAwesomeTestCase(TestCase):
def test_something(self):
with patch('salt.utils.files.fopen', mock_open(read_data=b'foo\n')) as m_open:
mymod.myfunc()
# Assert that only two opens attempted
assert m_open.call_count == 2
# Assert that only /etc/foo.conf was opened
assert all(call.args[0] == '/etc/foo.conf' for call in m_open.calls)
# Asser that the first open was for binary read, and the
# second was for binary write.
assert m_open.calls == [
MockCall('/etc/foo.conf', 'rb'),
MockCall('/etc/foo.conf', 'wb'),
]
Note that ``MockCall`` is imported from ``tests.support.mock`` in the above
example. Also, the second assert above is redundant since it is covered in
the final assert, but both are included simply as an example.
- **filehandles** - A dictionary mapping the unique file paths opened, to lists
of ``MockFH`` objects. Each open creates a unique ``MockFH`` object. Each
``MockFH`` object itself has a number of useful attributes:
- **filename** - The path to the file which was opened using
``salt.utils.files.fopen()``
- **call** - A ``MockCall`` object representing the arguments passed to
``salt.utils.files.fopen()``. Note that this ``MockCall`` is also available
in the parent ``MockOpen`` instance's **calls** list.
- The following methods are mocked using :py:class:`unittest.mock.Mock`
objects, and Mock's built-in asserts (as well as the call data) can be used
as you would with any other Mock object:
- **.read()**
- **.readlines()**
- **.readline()**
- **.close()**
- **.write()**
- **.writelines()**
- **.seek()**
- The read functions (**.read()**, **.readlines()**, **.readline()**) all
work as expected, as does iterating through the file line by line (i.e.
``for line in fh:``).
- The **.tell()** method is also implemented in such a way that it updates
after each time the mocked filehandle is read, and will report the correct
position. The one caveat here is that **.seek()** doesn't actually work
(it's simply mocked), and will not change the position. Additionally,
neither **.write()** or **.writelines()** will modify the mocked
filehandle's contents.
- The attributes **.write_calls** and **.writelines_calls** (no parenthesis)
are available as shorthands and correspond to lists containing the contents
passed for all calls to **.write()** and **.writelines()**, respectively.
Examples
++++++++
.. code-block:: python
with patch('salt.utils.files.fopen', mock_open(read_data=contents)) as m_open:
# Run the code you are unit testing
mymod.myfunc()
# Check that only the expected file was opened, and that it was opened
# only once.
assert m_open.call_count == 1
assert list(m_open.filehandles) == ['/etc/foo.conf']
# "opens" will be a list of all the mocked filehandles opened
opens = m_open.filehandles['/etc/foo.conf']
# Check that we wrote the expected lines ("expected" here is assumed to
# be a list of strings)
assert opens[0].write_calls == expected
.. code-block:: python
with patch('salt.utils.files.fopen', mock_open(read_data=contents)) as m_open:
# Run the code you are unit testing
mymod.myfunc()
# Check that .readlines() was called (remember, it's a Mock)
m_open.filehandles['/etc/foo.conf'][0].readlines.assert_called()
.. code-block:: python
with patch('salt.utils.files.fopen', mock_open(read_data=contents)) as m_open:
# Run the code you are unit testing
mymod.myfunc()
# Check that we read the file and also wrote to it
m_open.filehandles['/etc/foo.conf'][0].read.assert_called_once()
m_open.filehandles['/etc/foo.conf'][1].writelines.assert_called_once()
.. _`Mock()`: https://github.com/testing-cabal/mock
Naming Conventions
------------------
@ -198,7 +596,7 @@ prepended with the ``test_`` naming syntax, as described above.
If a function does not start with ``test_``, then the function acts as a "normal"
function and is not considered a testing function. It will not be included in the
test run or testing output. The same principle applies to unit test files that
do not have the ``test_*.py`` naming syntax. This test file naming convention
do not have the ``test_*.py`` naming syntax. This test file naming convention
is how the test runner recognizes that a test file contains unit tests.
@ -209,8 +607,7 @@ Most commonly, the following imports are necessary to create a unit test:
.. code-block:: python
# Import Salt Testing libs
from tests.support.unit import skipIf, TestCase
from tests.support.unit import TestCase, skipIf
If you need mock support to your tests, please also import:
@ -513,7 +910,7 @@ This function contains two raise statements and one return statement, so we
know that we will need (at least) three tests. It has two function arguments
and many references to non-builtin functions. In the tests below you will see
that MagicMock's ``patch()`` method may be used as a context manager or as a
decorator. When patching the salt dunders however, please use the context
decorator. When patching the salt dunders however, please use the context
manager approach.
There are three test functions, one for each raise and return statement in the

View File

@ -64,7 +64,7 @@ populated with values from the existing config, but they will be grayed out.
There will also be a checkbox to use the existing config. If you continue, the
existing config will be used. If the checkbox is unchecked, default values are
displayed and can be changed. If you continue, the existing config file in
``c:\salt\conf`` will be removed along with the ``c:\salt\conf\minion.d`
``c:\salt\conf`` will be removed along with the ``c:\salt\conf\minion.d``
directory. The values entered will be used with the default config.
The final page allows you to start the minion service and optionally change its

View File

@ -21,8 +21,8 @@ New in Carbon (2016.11)
-----------------------
The methodologies for network automation have been introduced in
:ref:`Carbon <release-2016-11-0.network-automation-napalm>` based on proxy
minions:
:ref:`2016.11.0 <release-2016-11-0-network-automation-napalm>`. Network
automation support is based on proxy minions.
- :mod:`NAPALM proxy <salt.proxy.napalm>`
- :mod:`Junos proxy<salt.proxy.junos>`

View File

@ -288,6 +288,8 @@ Junos Module Changes
- zeroize - Remove all configuration information on the Routing Engines and reset all key values on a device.
- file_copy - Copy file from proxy to the Junos device.
.. _release-2016-11-0-network-automation-napalm:
Network Automation: NAPALM
==========================
@ -296,7 +298,9 @@ of Salt. It is based on a the `NAPALM <https://github.com/napalm-automation/napa
library and provides facilities to manage the configuration and retrieve data
from network devices running widely used operating systems such: JunOS, IOS-XR,
eOS, IOS, NX-OS etc.
- see `the complete list of supported devices <http://napalm.readthedocs.io/en/latest/support/index.html#supported-devices>`_.
- see `the complete list of supported devices
<http://napalm.readthedocs.io/en/latest/support/index.html#supported-devices>`_.
The connection is established via the :mod:`NAPALM proxy <salt.proxy.napalm>`.

View File

@ -1,11 +1,8 @@
========================================
In Progress: Salt 2017.7.6 Release Notes
========================================
Version 2017.7.6 is an **unreleased** bugfix release for :ref:`2017.7.0
<release-2017-7-0>`. This release is still in progress and has not been
released yet.
===========================
Salt 2017.7.6 Release Notes
===========================
Version 2017.7.6 is a bugfix release for :ref:`2017.7.0 <release-2017-7-0>`.
Statistics
==========
@ -16,6 +13,15 @@ Statistics
- Contributors: **47** (`Ch3LL`_, `DmitryKuzmenko`_, `GwiYeong`_, `Quarky9`_, `RichardW42`_, `UtahDave`_, `amaclean199`_, `arif-ali`_, `baniobloom`_, `bdrung`_, `benediktwerner`_, `bmiguel-teixeira`_, `cachedout`_, `dafenko`_, `damon-atkins`_, `dwoz`_, `ezh`_, `folti`_, `fpicot`_, `frogunder`_, `garethgreenaway`_, `gtmanfred`_, `isbm`_, `jeroennijhof`_, `jfindlay`_, `jfoboss`_, `kstreee`_, `lomeroe`_, `mattp-`_, `meaksh`_, `mirceaulinic`_, `myinitialsarepm`_, `mzbroch`_, `nages13`_, `paclat`_, `pcjeff`_, `pruiz`_, `psyer`_, `rallytime`_, `s0undt3ch`_, `skizunov`_, `smitty42`_, `terminalmage`_, `twangboy`_, `vutny`_, `yagnik`_, `yannj-fr`_)
Tornado 5.0 Support for Python 2 Only
-------------------------------------
Tornado 5.0 moves to using asyncio for all python3 versions. Because of this
and changes in asyncio between python 3.4 and 3.5 to only be able to use one
ioloop, which requires some rearchitecting, support for tornado 5.0 and python3
versions of salt has been delayed to a later release.
For now, to use tornado 5.0, the python 2 version of salt must be used.
Tornado 5.0 Support for Python 2 Only
-------------------------------------

View File

@ -5,15 +5,38 @@ In Progress: Salt 2017.7.7 Release Notes
Version 2017.7.7 is an **unreleased** bugfix release for :ref:`2017.7.0 <release-2017-7-0>`.
This release is still in progress and has not been released yet.
New win_snmp behavior
=====================
The ``2017.7.7`` release contains only a single fix for Issue `#48038`_, which
is a critical bug that occurs in a multi-syndic setup where the same job is run
multiple times on a minion.
- :py:func:`win_snmp.get_community_names
<salt.modules.win_snmp.get_community_names>` now returns the SNMP settings
actually in effect on the box. If settings are managed via GroupPolicy, those
settings will be returned. Otherwise, normal settings are returned.
Statistics
==========
- :py:func:`win_snmp.set_community_names
<salt.modules.win_snmp.set_community_names>` now raises an error when SNMP
settings are being managed by GroupPolicy.
- Total Merges: **1**
- Total Issue References: **1**
- Total PR References: **2**
- Contributors: **2** (`garethgreenaway`_, `rallytime`_)
Changelog for v2017.7.6..v2017.7.7
==================================
*Generated at: 2018-06-14 15:43:34 UTC*
* **ISSUE** `#48038`_: (`austinpapp`_) jobs are not dedup'ing minion side (refs: `#48075`_)
* **PR** `#48098`_: (`rallytime`_) Back-port `#48075`_ to 2017.7.7
@ *2018-06-14 12:53:42 UTC*
* **PR** `#48075`_: (`garethgreenaway`_) [2017.7] Ensure that the shared list of jids is passed (refs: `#48098`_)
* 084de927fe Merge pull request `#48098`_ from rallytime/bp-48075-2017.7.7
* e4e62e8b3a Ensure that the shared list of jids is passed when creating the Minion. Fixes an issue when minions are pointed at multiple syndics.
.. _`#48038`: https://github.com/saltstack/salt/issues/48038
.. _`#48075`: https://github.com/saltstack/salt/pull/48075
.. _`#48098`: https://github.com/saltstack/salt/pull/48098
.. _`austinpapp`: https://github.com/austinpapp
.. _`garethgreenaway`: https://github.com/garethgreenaway
.. _`rallytime`: https://github.com/rallytime

View File

@ -0,0 +1,31 @@
========================================
In Progress: Salt 2017.7.8 Release Notes
========================================
Version 2017.7.8 is an **unreleased** bugfix release for :ref:`2017.7.0 <release-2017-7-0>`.
This release is still in progress and has not been released yet.
New win_snmp behavior
=====================
- :py:func:`win_snmp.get_community_names
<salt.modules.win_snmp.get_community_names>` now returns the SNMP settings
actually in effect on the box. If settings are managed via GroupPolicy, those
settings will be returned. Otherwise, normal settings are returned.
- :py:func:`win_snmp.set_community_names
<salt.modules.win_snmp.set_community_names>` now raises an error when SNMP
settings are being managed by GroupPolicy.
Option Added to Disable Docker Mine Updates
===========================================
When a docker container is added, removed, started, stopped, etc., the results
of a :py:func:`docker.ps verbose=True all=True host=True
<salt.modules.dockermod.ps>` are sent to the :ref:`mine <salt-mine>`, to be
used by :py:func:`mine.get_docker <salt.modules.mine.get_docker>`.
A new config option (:conf_minion:`docker.update_mine`) has been added. When
set to ``False``, Salt will not send this information to the mine. This is
useful in cases where sensitive information is stored in the container's
environment.

File diff suppressed because it is too large Load Diff

View File

@ -1,9 +1,8 @@
========================================
In Progress: Salt 2018.3.1 Release Notes
========================================
===========================
Salt 2018.3.1 Release Notes
===========================
Version 2018.3.1 is an **unreleased** bugfix release for :ref:`2018.3.0 <release-2018-3-0>`.
This release is still in progress and has not been released yet.
Version 2018.3.1 is a bugfix release for :ref:`2018.3.0 <release-2018-3-0>`.
Statistics
==========
@ -47,7 +46,66 @@ to the flat roster file. This behavior can also be enabled by setting
Changelog for v2018.3.0..v2018.3.1
==================================
*Generated at: 2018-05-30 14:09:03 UTC*
*Generated at: 2018-06-06 17:43:01 UTC*
* **ISSUE** `#47955`_: (`frogunder`_) 2018.3.1 Creating Windows machine in Amazon using salt-cloud fails. (refs: `#47989`_)
* **PR** `#47998`_: (`rallytime`_) Back-port `#47989`_ to 2018.3.1
@ *2018-06-06 17:08:04 UTC*
* **PR** `#47989`_: (`dwoz`_) Properly decode password from aws using m2crypto (refs: `#47998`_)
* 605463ca0d Merge pull request `#47998`_ from rallytime/bp-47989
* 1b7e9ac2d3 Lint fixes
* 0545152ddd Properly decode password from aws using m2crypto
* **PR** `#47965`_: (`Ch3LL`_) Add PR 47924 from 2018.3 branch
@ *2018-06-06 13:54:09 UTC*
* dbc798ac68 Merge pull request `#47965`_ from Ch3LL/gitpy_mac_3.1
* bf608abd44 Catch all exceptions in git import for salt.utils.gitfs
* **PR** `#47973`_: (`terminalmage`_) salt.modules.testinframod: fix TypeError invoking types.FunctionType
@ *2018-06-06 13:53:46 UTC*
* 864d640633 Merge pull request `#47973`_ from terminalmage/fix-testinfra
* 4518c89484 Lint: Remove unused six import
* c6816b2149 salt.modules.testinframod: fix TypeError invoking types.FunctionType
* **ISSUE** `#47236`_: (`MorphBonehunter`_) x509.private_key_managed broken after upgrade to 2018.3.0 (refs: `#47957`_)
* **PR** `#47967`_: (`rallytime`_) Back-port `#47957`_ to 2018.3.1
@ *2018-06-06 13:53:28 UTC*
* **PR** `#47957`_: (`garethgreenaway`_) [2018.8] Ensure x509 passphrase is a string (refs: `#47967`_)
* 5ddcfff420 Merge pull request `#47967`_ from rallytime/bp-47957
* 9a55579af1 removing unnecessary change
* 329b2e5956 Ensuring that when a passphrase is passed in, it is returned as a string from the passphrase callback.
* **PR** `#47902`_: (`Ch3LL`_) Remove In Progress for 2018.3.1 Release Notes
@ *2018-05-30 18:26:49 UTC*
* 9c964fdbce Merge pull request `#47902`_ from Ch3LL/rn_in_progress
* f560a151cd Remove In Progress for 2018.3.1 Release Notes
* **PR** `#47897`_: (`Ch3LL`_) Add changelog to 2018.3.1 release notes
@ *2018-05-30 15:04:42 UTC*
* ea7b4fdc08 Merge pull request `#47897`_ from Ch3LL/rn_2018
* e27ee273a7 Add == line to changelog line for release notes
* 61e56d275d Add changelog to 2018.3.1 release notes
* **ISSUE** `#47784`_: (`jpsv`_) win_lgpo.py line 5368; AttributeError: 'OrderedDict' object has no attribute 'lower' (refs: `#47848`_)
@ -520,7 +578,7 @@ Changelog for v2018.3.0..v2018.3.1
* fd9bc06aab bytes file that decodes the same utf-8 and cp1252
* **ISSUE** `#46660`_: (`mruepp`_) top file merging same does produce conflicting ids with gitfs (refs: `#46751`_, `#47354`_)
* **ISSUE** `#46660`_: (`mruepp`_) top file merging same does produce conflicting ids with gitfs (refs: `#47354`_, `#46751`_)
* **PR** `#47465`_: (`rallytime`_) Back-port `#47354`_ to 2018.3
@ *2018-05-04 13:06:04 UTC*
@ -769,9 +827,9 @@ Changelog for v2018.3.0..v2018.3.1
* **PR** `#47368`_: (`rallytime`_) [2018.3] Merge forward from 2017.7 to 2018.3
@ *2018-05-01 18:56:20 UTC*
* **PR** `#47106`_: (`DmitryKuzmenko`_) Tornado50 compatibility fixes (refs: `#47368`_, `#47374`_, `#47433`_)
* **PR** `#47106`_: (`DmitryKuzmenko`_) Tornado50 compatibility fixes (refs: `#47374`_, `#47368`_, `#47433`_)
* **PR** `#46002`_: (`isbm`_) Pyzmq 17.0.0 proper handling (refs: `#47368`_, `#47374`_)
* **PR** `#46002`_: (`isbm`_) Pyzmq 17.0.0 proper handling (refs: `#47374`_, `#47368`_)
* 0bdfaa5ffe Merge pull request `#47368`_ from rallytime/merge-2018.3
@ -1038,9 +1096,9 @@ Changelog for v2018.3.0..v2018.3.1
* **PR** `#47374`_: (`DmitryKuzmenko`_) tornado50 merge forward for 2018.3
@ *2018-04-29 16:29:12 UTC*
* **PR** `#47106`_: (`DmitryKuzmenko`_) Tornado50 compatibility fixes (refs: `#47368`_, `#47374`_, `#47433`_)
* **PR** `#47106`_: (`DmitryKuzmenko`_) Tornado50 compatibility fixes (refs: `#47374`_, `#47368`_, `#47433`_)
* **PR** `#46002`_: (`isbm`_) Pyzmq 17.0.0 proper handling (refs: `#47368`_, `#47374`_)
* **PR** `#46002`_: (`isbm`_) Pyzmq 17.0.0 proper handling (refs: `#47374`_, `#47368`_)
* 3400f829c4 Merge pull request `#47374`_ from DSRCorporation/bugs/tornado50-2018.3
@ -1140,7 +1198,7 @@ Changelog for v2018.3.0..v2018.3.1
* cc2538e08f The grp modules is not available on windows
* **ISSUE** `#46862`_: (`kivoli`_) Setting locale.system fails in 2018.3 (refs: `#46869`_, `#47280`_)
* **ISSUE** `#46862`_: (`kivoli`_) Setting locale.system fails in 2018.3 (refs: `#47280`_, `#46869`_)
* **PR** `#47280`_: (`gtmanfred`_) make sure not to send invalid information
@ *2018-04-25 17:46:45 UTC*
@ -1264,18 +1322,18 @@ Changelog for v2018.3.0..v2018.3.1
* b8630a70be Fix virtual package detection
* **ISSUE** `#47225`_: (`pruiz`_) zfs.filesystem_present takes forever on a dataset with lots (10k+) of snapshots (refs: `#47226`_, `#47227`_, `#47228`_)
* **ISSUE** `#47225`_: (`pruiz`_) zfs.filesystem_present takes forever on a dataset with lots (10k+) of snapshots (refs: `#47228`_, `#47227`_, `#47226`_)
* **PR** `#47228`_: (`pruiz`_) Fix issue `#47225`_: avoid zfs.filesystem_present slowdown when dataset has lots of snapshots (2018.3 branch)
@ *2018-04-24 13:35:21 UTC*
* **PR** `#47226`_: (`pruiz`_) Fix issue `#47225`_: avoid zfs.filesystem_present slowdown when dataset has lots of snapshots (refs: `#47227`_, `#47228`_)
* **PR** `#47226`_: (`pruiz`_) Fix issue `#47225`_: avoid zfs.filesystem_present slowdown when dataset has lots of snapshots (refs: `#47228`_, `#47227`_)
* 428e915d6a Merge pull request `#47228`_ from pruiz/pruiz/zfs-dataset-present-slow-2018.3
* cfbf136ab2 Fix issue `#47225`_: avoid zfs.filesystem_present slowdown when dataset has lots of snapshots
* **ISSUE** `#46943`_: (`Auha`_) Slack.Engine could not start (refs: `#47109`_, `#47262`_)
* **ISSUE** `#46943`_: (`Auha`_) Slack.Engine could not start (refs: `#47262`_, `#47109`_)
* **PR** `#47262`_: (`garethgreenaway`_) [2018.3] Fixes to targeting in Slack engine
@ *2018-04-24 13:18:36 UTC*
@ -1643,7 +1701,7 @@ Changelog for v2018.3.0..v2018.3.1
* 92eeaa51bd Put some error checking in the shell command
* **ISSUE** `#46943`_: (`Auha`_) Slack.Engine could not start (refs: `#47109`_, `#47262`_)
* **ISSUE** `#46943`_: (`Auha`_) Slack.Engine could not start (refs: `#47262`_, `#47109`_)
* **PR** `#47109`_: (`garethgreenaway`_) [2018.3] fixes to Slack engine
@ *2018-04-17 13:56:27 UTC*
@ -1907,7 +1965,7 @@ Changelog for v2018.3.0..v2018.3.1
* b429fc3e74 Add tests for mac_utils
* b5f67130cc Used *args and **kwargs
* b5f67130cc Used \*args and \*\*kwargs
* ed061617a2 Fix unicode_literal issue in mac_assistive
@ -2011,7 +2069,7 @@ Changelog for v2018.3.0..v2018.3.1
* **ISSUE** `#46834`_: (`oeuftete`_) strftime filter not found in 2018.3.0 (refs: `#46848`_)
* **ISSUE** `#46668`_: (`anlutro`_) Jinja2 filter strftime stopped working in salt-ssh 2018.3 (refs: `#46744`_, `#46848`_)
* **ISSUE** `#46668`_: (`anlutro`_) Jinja2 filter strftime stopped working in salt-ssh 2018.3 (refs: `#46848`_, `#46744`_)
* **PR** `#46848`_: (`garethgreenaway`_) [2018.8] salt-ssh jinja filters tests
@ *2018-04-10 16:19:51 UTC*
@ -2249,7 +2307,7 @@ Changelog for v2018.3.0..v2018.3.1
* d9511d04d4 `#43499`_, zmq setsockopt need to adapt python3
* **ISSUE** `#46862`_: (`kivoli`_) Setting locale.system fails in 2018.3 (refs: `#46869`_, `#47280`_)
* **ISSUE** `#46862`_: (`kivoli`_) Setting locale.system fails in 2018.3 (refs: `#47280`_, `#46869`_)
* **PR** `#46869`_: (`gtmanfred`_) Always return dictionary for _localectl_status
@ *2018-04-05 13:25:14 UTC*
@ -2418,7 +2476,7 @@ Changelog for v2018.3.0..v2018.3.1
* 62d64c9230 Fix missing import
* 18b1730320 Skip test that requires pywin32 on *nix platforms
* 18b1730320 Skip test that requires pywin32 on \*nix platforms
* 45dce1a485 Add reg module to globals
@ -2783,7 +2841,7 @@ Changelog for v2018.3.0..v2018.3.1
* 19bd1d9db5 handle user-data for metadata grains
* **ISSUE** `#46668`_: (`anlutro`_) Jinja2 filter strftime stopped working in salt-ssh 2018.3 (refs: `#46744`_, `#46848`_)
* **ISSUE** `#46668`_: (`anlutro`_) Jinja2 filter strftime stopped working in salt-ssh 2018.3 (refs: `#46848`_, `#46744`_)
* **PR** `#46744`_: (`garethgreenaway`_) [2018.3] Ensure salt.utils.dateutils is available for templates via salt-ssh
@ *2018-03-28 21:09:46 UTC*
@ -3442,14 +3500,14 @@ Changelog for v2018.3.0..v2018.3.1
* e0940a9fc4 Properly detect use of the state.orch alias and add orch jid to kwargs
* **ISSUE** `#42932`_: (`bobrik`_) cmd.run with bg: true doesn't fail properly (refs: `#45932`_, `#46172`_)
* **ISSUE** `#42932`_: (`bobrik`_) cmd.run with bg: true doesn't fail properly (refs: `#46172`_, `#45932`_)
* **PR** `#46172`_: (`The-Loeki`_) cmdmod: reimplementation of `#45932`_ for Oxygen
@ *2018-02-28 19:14:26 UTC*
* **PR** `#45932`_: (`The-Loeki`_) Fix cmd run_all bg error (refs: `#46172`_)
* **PR** `#39980`_: (`vutny`_) [2016.3] Allow to use `bg` kwarg for `cmd.run` state function (refs: `#45932`_, `#46172`_)
* **PR** `#39980`_: (`vutny`_) [2016.3] Allow to use `bg` kwarg for `cmd.run` state function (refs: `#46172`_, `#45932`_)
* 20d869c228 Merge pull request `#46172`_ from The-Loeki/fix_cmd_run_all_bg_oxygen
@ -4504,6 +4562,7 @@ Changelog for v2018.3.0..v2018.3.1
.. _`#47226`: https://github.com/saltstack/salt/pull/47226
.. _`#47227`: https://github.com/saltstack/salt/pull/47227
.. _`#47228`: https://github.com/saltstack/salt/pull/47228
.. _`#47236`: https://github.com/saltstack/salt/issues/47236
.. _`#47239`: https://github.com/saltstack/salt/issues/47239
.. _`#47241`: https://github.com/saltstack/salt/pull/47241
.. _`#47242`: https://github.com/saltstack/salt/pull/47242
@ -4665,6 +4724,15 @@ Changelog for v2018.3.0..v2018.3.1
.. _`#47848`: https://github.com/saltstack/salt/pull/47848
.. _`#47874`: https://github.com/saltstack/salt/pull/47874
.. _`#47881`: https://github.com/saltstack/salt/pull/47881
.. _`#47897`: https://github.com/saltstack/salt/pull/47897
.. _`#47902`: https://github.com/saltstack/salt/pull/47902
.. _`#47955`: https://github.com/saltstack/salt/issues/47955
.. _`#47957`: https://github.com/saltstack/salt/pull/47957
.. _`#47965`: https://github.com/saltstack/salt/pull/47965
.. _`#47967`: https://github.com/saltstack/salt/pull/47967
.. _`#47973`: https://github.com/saltstack/salt/pull/47973
.. _`#47989`: https://github.com/saltstack/salt/pull/47989
.. _`#47998`: https://github.com/saltstack/salt/pull/47998
.. _`AmbicaY`: https://github.com/AmbicaY
.. _`Auha`: https://github.com/Auha
.. _`Ch3LL`: https://github.com/Ch3LL
@ -4674,6 +4742,7 @@ Changelog for v2018.3.0..v2018.3.1
.. _`Kimol`: https://github.com/Kimol
.. _`L4rS6`: https://github.com/L4rS6
.. _`LukeCarrier`: https://github.com/LukeCarrier
.. _`MorphBonehunter`: https://github.com/MorphBonehunter
.. _`OrlandoArcapix`: https://github.com/OrlandoArcapix
.. _`PhilippeAB`: https://github.com/PhilippeAB
.. _`SynPrime`: https://github.com/SynPrime
@ -4708,6 +4777,7 @@ Changelog for v2018.3.0..v2018.3.1
.. _`ezh`: https://github.com/ezh
.. _`femnad`: https://github.com/femnad
.. _`folti`: https://github.com/folti
.. _`frogunder`: https://github.com/frogunder
.. _`garethgreenaway`: https://github.com/garethgreenaway
.. _`gtmanfred`: https://github.com/gtmanfred
.. _`isbm`: https://github.com/isbm

View File

@ -1,16 +1,117 @@
========================================
In Progress: Salt 2018.3.2 Release Notes
========================================
===========================
Salt 2018.3.2 Release Notes
===========================
Version 2018.3.2 is an **unreleased** bugfix release for :ref:`2018.3.0 <release-2018-3-0>`.
This release is still in progress and has not been released yet.
Version 2018.3.2 is a bugfix release for :ref:`2018.3.0 <release-2018-3-0>`.
Changes to win_timezone
=======================
The ``2018.3.2`` release contains only a small number of fixes, which are detailed
below.
Improves timezone detection by using the pytz module.
This release fixes two critical issues.
``timezone.get_offset`` and ``timezone.get_zonecode`` now work properly.
The first is Issue `#48038`_, which is a critical bug that occurs in a multi-syndic
setup where the same job is run multiple times on a minion.
Adds ``timezone.list`` to list supported timezones in either Windows or Unix
format.
The second issue is `#48130`_. This bug appears in certain setups where the Master
reports a Minion time-out, even though the job is still running on the Minion.
Both of these issues have been fixed with this release.
Statistics
==========
- Total Merges: **7**
- Total Issue References: **2**
- Total PR References: **10**
- Contributors: **4** (`cro`_, `garethgreenaway`_, `gtmanfred`_, `rallytime`_)
Changelog for v2018.3.1..v2018.3.2
==================================
*Generated at: 2018-06-17 19:17:16 UTC*
* **ISSUE** `#48130`_: (`rmarchei`_) Minion timeouts with 2018.3.1 (refs: `#48158`_)
* **PR** `#48158`_: (`gtmanfred`_) always listen when gathering job info
@ *2018-06-17 19:04:03 UTC*
* 521e926458 Merge pull request `#48158`_ from gtmanfred/2018.3.2
* cecf564433 always listen when gathering job info
* **PR** `#48138`_: (`rallytime`_) Update man pages for 2018.3.2
@ *2018-06-14 21:22:34 UTC*
* f154545aff Merge pull request `#48138`_ from rallytime/man-pages-2018.3.2
* 8c340134f5 Update man pages for 2018.3.2
* **PR** `#48137`_: (`gtmanfred`_) [2018.3.2] bootstrap kitchen branch tests with 2017.7.6
@ *2018-06-14 21:20:28 UTC*
* b49271b76d Merge pull request `#48137`_ from gtmanfred/2018.3.2
* 6128519e8b bootstrap kitchen branch tests with 2017.7.6
* **PR** `#48129`_: (`rallytime`_) Add release notes for 2018.3.2
@ *2018-06-14 15:48:36 UTC*
* 21aaf1cbc4 Merge pull request `#48129`_ from rallytime/release-notes-2018.3.2
* 0b13be0111 Add release notes for 2018.3.2
* **PR** `#48100`_: (`rallytime`_) Back-port `#48014`_ to 2018.3.2
@ *2018-06-14 12:54:52 UTC*
* **PR** `#48014`_: (`cro`_) Find job pause (refs: `#48100`_)
* 36b99ae80a Merge pull request `#48100`_ from rallytime/bp-48014
* 77feccc5c4 Lint: Add blank line
* 159b052962 One more case where returner doesn't respond
* 91b45b4cc4 Catch two cases when a returner is not able to be contacted--these would throw a stacktrace.
* **PR** `#48099`_: (`rallytime`_) Back-port `#47915`_ to 2018.3.2
@ *2018-06-14 12:54:23 UTC*
* **PR** `#47915`_: (`garethgreenaway`_) [2018.3] state runner pause resume kill (refs: `#48099`_)
* 40c1bfdec9 Merge pull request `#48099`_ from rallytime/bp-47915
* 3556850058 fixing typo in alias_function call.
* 4b0ff496fa Some fixes to the set_pause and rm_pause function in the state runner, renaming to in line with the functions in the state module. Including aliases to previous names for back-ward compatibility. Including a soft_kill function to kill running orchestration states. A new test to test soft_kill functionality.
* **ISSUE** `#48038`_: (`austinpapp`_) jobs are not dedup'ing minion side (refs: `#48075`_)
* **PR** `#48097`_: (`rallytime`_) Back-port `#48075`_ to 2018.3.2
@ *2018-06-14 12:52:44 UTC*
* **PR** `#48075`_: (`garethgreenaway`_) [2017.7] Ensure that the shared list of jids is passed (refs: `#48097`_)
* 074a97dcfa Merge pull request `#48097`_ from rallytime/bp-48075
* e4c719b55f Ensure that the shared list of jids is passed when creating the Minion. Fixes an issue when minions are pointed at multiple syndics.
.. _`#47915`: https://github.com/saltstack/salt/pull/47915
.. _`#48014`: https://github.com/saltstack/salt/pull/48014
.. _`#48038`: https://github.com/saltstack/salt/issues/48038
.. _`#48075`: https://github.com/saltstack/salt/pull/48075
.. _`#48097`: https://github.com/saltstack/salt/pull/48097
.. _`#48099`: https://github.com/saltstack/salt/pull/48099
.. _`#48100`: https://github.com/saltstack/salt/pull/48100
.. _`#48129`: https://github.com/saltstack/salt/pull/48129
.. _`#48130`: https://github.com/saltstack/salt/issues/48130
.. _`#48137`: https://github.com/saltstack/salt/pull/48137
.. _`#48138`: https://github.com/saltstack/salt/pull/48138
.. _`#48158`: https://github.com/saltstack/salt/pull/48158
.. _`austinpapp`: https://github.com/austinpapp
.. _`cro`: https://github.com/cro
.. _`garethgreenaway`: https://github.com/garethgreenaway
.. _`gtmanfred`: https://github.com/gtmanfred
.. _`rallytime`: https://github.com/rallytime
.. _`rmarchei`: https://github.com/rmarchei

View File

@ -0,0 +1,16 @@
========================================
In Progress: Salt 2018.3.3 Release Notes
========================================
Version 2018.3.3 is an **unreleased** bugfix release for :ref:`2018.3.0 <release-2018-3-0>`.
This release is still in progress and has not been released yet.
Changes to win_timezone
=======================
Improves timezone detection by using the pytz module.
``timezone.get_offset`` and ``timezone.get_zonecode`` now work properly.
Adds ``timezone.list`` to list supported timezones in either Windows or Unix
format.

View File

@ -29,7 +29,7 @@ argument value in states.
Slot syntax looks close to the simple python function call.
.. code-block::
.. code-block:: text
__slot__:salt:<module>.<function>(<args>, ..., <kwargs...>, ...)

View File

@ -597,9 +597,8 @@ overrides all levels below it):
.. code-block:: yaml
gitfs_saltenv:
- saltenv:
- dev:
- mountpoint: salt://bar
- dev:
- mountpoint: salt://bar
3. Per-remote configuration parameter

View File

@ -1,6 +1,7 @@
[Unit]
Description=The Salt Syndic daemon
After=network.target
PartOf=salt-master.service
[Service]
Type=notify

View File

@ -2,6 +2,7 @@
Description=The Salt Master Server
Documentation=man:salt-syndic(1) file:///usr/share/doc/salt/html/contents.html https://docs.saltstack.com/en/latest/contents.html
After=network.target
PartOf=salt-master.service
[Service]
Type=notify

View File

@ -197,17 +197,17 @@ Write-Output " ----------------------------------------------------------------"
Write-Output " - $script_name :: Updating PIP and SetupTools . . ."
Write-Output " ----------------------------------------------------------------"
if ( ! [bool]$Env:SALT_PIP_LOCAL_CACHE) {
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python2Dir'])\python.exe -m pip --no-cache-dir install -r $($script_path)\req_pip.txt" "python pip"
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python2Dir'])\python.exe -m pip --disable-pip-version-check --no-cache-dir install -r $($script_path)\req_pip.txt" "python pip"
} else {
$p = New-Item $Env:SALT_PIP_LOCAL_CACHE -ItemType Directory -Force # Ensure directory exists
if ( (Get-ChildItem $Env:SALT_PIP_LOCAL_CACHE | Measure-Object).Count -eq 0 ) {
# folder empty
Write-Output " pip download from req_pip.txt into empty local cache SALT_REQ_PIP $Env:SALT_PIP_LOCAL_CACHE"
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python2Dir'])\python.exe -m pip download --dest $Env:SALT_PIP_LOCAL_CACHE -r $($script_path)\req_pip.txt" "pip download"
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python2Dir'])\python.exe -m pip --disable-pip-version-check download --dest $Env:SALT_PIP_LOCAL_CACHE -r $($script_path)\req_pip.txt" "pip download"
}
Write-Output " reading from local pip cache $Env:SALT_PIP_LOCAL_CACHE"
Write-Output " If a (new) resource is missing, please delete all files in this cache, go online and repeat"
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python2Dir'])\python.exe -m pip install --no-index --find-links=$Env:SALT_PIP_LOCAL_CACHE -r $($script_path)\req_pip.txt" "pip install"
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python2Dir'])\python.exe -m pip --disable-pip-version-check install --no-index --find-links=$Env:SALT_PIP_LOCAL_CACHE -r $($script_path)\req_pip.txt" "pip install"
}
#==============================================================================
@ -218,16 +218,16 @@ Write-Output " ----------------------------------------------------------------"
Write-Output " - $script_name :: Installing pypi resources using pip . . ."
Write-Output " ----------------------------------------------------------------"
if ( ! [bool]$Env:SALT_REQ_LOCAL_CACHE) {
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python2Dir'])\python.exe -m pip --no-cache-dir install -r $($script_path)\req.txt" "pip install"
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python2Dir'])\python.exe -m pip --disable-pip-version-check --no-cache-dir install -r $($script_path)\req.txt" "pip install"
} else {
if ( (Get-ChildItem $Env:SALT_REQ_LOCAL_CACHE | Measure-Object).Count -eq 0 ) {
# folder empty
Write-Output " pip download from req.txt into empty local cache SALT_REQ $Env:SALT_REQ_LOCAL_CACHE"
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python2Dir'])\python.exe -m pip download --dest $Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req.txt" "pip download"
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python2Dir'])\python.exe -m pip --disable-pip-version-check download --dest $Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req.txt" "pip download"
}
Write-Output " reading from local pip cache $Env:SALT_REQ_LOCAL_CACHE"
Write-Output " If a (new) resource is missing, please delete all files in this cache, go online and repeat"
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python2Dir'])\python.exe -m pip install --no-index --find-links=$Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req.txt" "pip install"
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python2Dir'])\python.exe -m pip --disable-pip-version-check install --no-index --find-links=$Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req.txt" "pip install"
}
#==============================================================================

View File

@ -197,17 +197,17 @@ Write-Output " ----------------------------------------------------------------"
Write-Output " - $script_name :: Updating PIP and SetupTools . . ."
Write-Output " ----------------------------------------------------------------"
if ( ! [bool]$Env:SALT_PIP_LOCAL_CACHE) {
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python3Dir'])\python.exe -m pip --no-cache-dir install -r $($script_path)\req_pip.txt" "python pip"
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python3Dir'])\python.exe -m pip --disable-pip-version-check --no-cache-dir install -r $($script_path)\req_pip.txt" "python pip"
} else {
$p = New-Item $Env:SALT_PIP_LOCAL_CACHE -ItemType Directory -Force # Ensure directory exists
if ( (Get-ChildItem $Env:SALT_PIP_LOCAL_CACHE | Measure-Object).Count -eq 0 ) {
# folder empty
Write-Output " pip download from req_pip.txt into empty local cache SALT_REQ_PIP $Env:SALT_PIP_LOCAL_CACHE"
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python3Dir'])\python.exe -m pip download --dest $Env:SALT_PIP_LOCAL_CACHE -r $($script_path)\req_pip.txt" "pip download"
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python3Dir'])\python.exe -m pip --disable-pip-version-check download --dest $Env:SALT_PIP_LOCAL_CACHE -r $($script_path)\req_pip.txt" "pip download"
}
Write-Output " reading from local pip cache $Env:SALT_PIP_LOCAL_CACHE"
Write-Output " If a (new) resource is missing, please delete all files in this cache, go online and repeat"
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python3Dir'])\python.exe -m pip install --no-index --find-links=$Env:SALT_PIP_LOCAL_CACHE -r $($script_path)\req_pip.txt" "pip install"
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python3Dir'])\python.exe -m pip --disable-pip-version-check install --no-index --find-links=$Env:SALT_PIP_LOCAL_CACHE -r $($script_path)\req_pip.txt" "pip install"
}
#==============================================================================
@ -218,16 +218,16 @@ Write-Output " ----------------------------------------------------------------"
Write-Output " - $script_name :: Installing pypi resources using pip . . ."
Write-Output " ----------------------------------------------------------------"
if ( ! [bool]$Env:SALT_REQ_LOCAL_CACHE) {
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python3Dir'])\python.exe -m pip --no-cache-dir install -r $($script_path)\req.txt" "pip install"
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python3Dir'])\python.exe -m pip --disable-pip-version-check --no-cache-dir install -r $($script_path)\req.txt" "pip install"
} else {
if ( (Get-ChildItem $Env:SALT_REQ_LOCAL_CACHE | Measure-Object).Count -eq 0 ) {
# folder empty
Write-Output " pip download from req.txt into empty local cache SALT_REQ $Env:SALT_REQ_LOCAL_CACHE"
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python3Dir'])\python.exe -m pip download --dest $Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req.txt" "pip download"
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python3Dir'])\python.exe -m pip --disable-pip-version-check download --dest $Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req.txt" "pip download"
}
Write-Output " reading from local pip cache $Env:SALT_REQ_LOCAL_CACHE"
Write-Output " If a (new) resource is missing, please delete all files in this cache, go online and repeat"
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python3Dir'])\python.exe -m pip install --no-index --find-links=$Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req.txt" "pip install"
Start_Process_and_test_exitcode "cmd" "/c $($ini['Settings']['Python3Dir'])\python.exe -m pip --disable-pip-version-check install --no-index --find-links=$Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req.txt" "pip install"
}
#==============================================================================

View File

@ -1,4 +0,0 @@
[pytest]
addopts = --ssh-tests -ra -sv
testpaths = tests
norecursedirs = tests/kitchen

View File

@ -1,10 +1,6 @@
-r base.txt
mock>=2.0.0
apache-libcloud>=0.14.0
boto>=2.32.1
boto3>=1.2.1
moto>=0.3.6
SaltPyLint>=v2017.3.6
pytest>=3.5.0
git+https://github.com/saltstack/pytest-salt.git@master#egg=pytest-salt

View File

@ -1,3 +1,4 @@
pytest>=3.5.0
pytest-helpers-namespace
pytest-tempdir
pytest-cov

36
requirements/tests.txt Normal file
View File

@ -0,0 +1,36 @@
-r zeromq.txt
-r dev.txt
-r pytest.txt
apache-libcloud>=1.0.0
boto>=2.32.1
boto3>=1.2.1
moto>=0.3.6
docker; sys.platform != 'win32'
docker==2.7.0; sys.platform == 'win32'
virtualenv
setuptools>=30
six>=1.10.0
timelib
coverage
keyring==5.7.1
python-gnupg
python-etcd==0.4.2
GitPython
supervisor; python_version < '3'
kubernetes<4.0
psutil
pyvmomi
setproctitle
cherrypy; sys.platform != 'win32' and sys.platform != 'darwin'
pyinotify; sys.platform != 'win32' and sys.platform != 'darwin'
PyMySQL; sys.platform != 'win32' and sys.platform != 'darwin'
jsonschema
strict_rfc3339
rfc3987
jinja2
pyOpenSSL
ioflo
dnspython
SaltTesting==2017.6.1
junos-eznc
jxmlease

View File

@ -283,7 +283,7 @@ class LoadAuth(object):
return False
if load['eauth'] not in self.opts['external_auth']:
# The eauth system is not enabled, fail
log.debug('The eauth system "%s" is not enabled', load['eauth'])
log.warning('Authentication failure of type "eauth" occurred.')
return False
@ -361,6 +361,7 @@ class LoadAuth(object):
eauth = token['eauth'] if token else load['eauth']
if eauth not in self.opts['external_auth']:
# No matching module is allowed in config
log.debug('The eauth system "%s" is not enabled', eauth)
log.warning('Authorization failure occurred.')
return None
@ -371,6 +372,9 @@ class LoadAuth(object):
name = self.load_name(load) # The username we are attempting to auth with
groups = self.get_groups(load) # The groups this user belongs to
eauth_config = self.opts['external_auth'][eauth]
if not eauth_config:
log.debug('eauth "%s" configuration is empty', eauth)
if not groups:
groups = []
@ -690,6 +694,7 @@ class Resolver(object):
if fstr not in self.auth:
print(('The specified external authentication system "{0}" is '
'not available').format(eauth))
print("Available eauth types: {0}".format(", ".join(self.auth.file_mapping.keys())))
return ret
args = salt.utils.args.arg_lookup(self.auth[fstr])

View File

@ -1,11 +1,10 @@
# -*- coding: utf-8 -*-
'''
NAPALM functions
================
Watch NAPALM functions and fire events on specific triggers
===========================================================
.. versionadded:: 2018.3.0
Watch NAPALM functions and fire events on specific triggers.
.. note::
@ -14,7 +13,7 @@ Watch NAPALM functions and fire events on specific triggers.
Check the documentation for the
:mod:`NAPALM proxy module <salt.proxy.napalm>`.
_NAPALM: http://napalm.readthedocs.io/en/latest/index.html
.. _NAPALM: http://napalm.readthedocs.io/en/latest/index.html
The configuration accepts a list of Salt functions to be
invoked, and the corresponding output hierarchy that should
@ -134,7 +133,7 @@ Event structure example:
.. code-block:: json
salt/beacon/edge01.bjm01/napalm/junos/ntp.stats {
{
"_stamp": "2017-09-05T09:51:09.377202",
"args": [],
"data": {

View File

@ -20,7 +20,7 @@ import salt.utils.stringutils
import salt.utils.files
# Import 3rd-party libs
import salt.ext.six
from salt.ext import six
# pylint: disable=import-error
from salt.ext.six.moves import map
# pylint: enable=import-error
@ -190,7 +190,7 @@ def beacon(config):
event = {}
for ind, field in enumerate(FIELDS):
event[field] = pack[ind]
if isinstance(event[field], salt.ext.six.string_types):
if isinstance(event[field], six.string_types):
if isinstance(event[field], bytes):
event[field] = salt.utils.stringutils.to_unicode(event[field])
event[field] = event[field].strip('\x00')

View File

@ -120,7 +120,7 @@ class BaseCaller(object):
'''
Print out the grains
'''
grains = salt.loader.grains(self.opts)
grains = self.minion.opts.get('grains') or salt.loader.grains(self.opts)
salt.output.display_output({'local': grains}, 'grains', self.opts)
def run(self):

View File

@ -559,6 +559,11 @@ class LocalClient(object):
{'stewart': {...}}
'''
if 'expr_form' in kwargs:
# We need to re-import salt.utils.versions here
# even though it has already been imported.
# when cmd_batch is called via the NetAPI
# the module is unavailable.
import salt.utils.versions
salt.utils.versions.warn_until(
'Fluorine',
'The target type should be passed using the \'tgt_type\' '

View File

@ -83,9 +83,10 @@ def get_configured_provider():
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('personal_access_token',)
opts=__opts__,
provider=__active_provider_name__ or __virtualname__,
aliases=__virtual_aliases__,
required_keys=('personal_access_token',)
)

View File

@ -2421,7 +2421,7 @@ def wait_for_instance(
)
pprint.pprint(console)
time.sleep(5)
output = console['output_decoded']
output = salt.utils.stringutils.to_unicode(console['output_decoded'])
comps = output.split('-----BEGIN SSH HOST KEY KEYS-----')
if len(comps) < 2:
# Fail; there are no host keys
@ -4762,7 +4762,7 @@ def get_password_data(
rsa_key = kwargs['key']
pwdata = base64.b64decode(pwdata)
if HAS_M2:
key = RSA.load_key_string(rsa_key)
key = RSA.load_key_string(rsa_key.encode('ascii'))
password = key.private_decrypt(pwdata, RSA.pkcs1_padding)
else:
dsize = Crypto.Hash.SHA.digest_size

View File

@ -2374,21 +2374,30 @@ def destroy(vm_name, call=None):
def create_attach_volumes(name, kwargs, call=None):
'''
.. versionadded:: 2017.7.0
Create and attach multiple volumes to a node. The 'volumes' and 'node'
arguments are required, where 'node' is a libcloud node, and 'volumes'
is a list of maps, where each map contains:
'size': The size of the new disk in GB. Required.
'type': The disk type, either pd-standard or pd-ssd. Optional, defaults to pd-standard.
'image': An image to use for this new disk. Optional.
'snapshot': A snapshot to use for this new disk. Optional.
'auto_delete': An option(bool) to keep or remove the disk upon
instance deletion. Optional, defaults to False.
size
The size of the new disk in GB. Required.
type
The disk type, either pd-standard or pd-ssd. Optional, defaults to pd-standard.
image
An image to use for this new disk. Optional.
snapshot
A snapshot to use for this new disk. Optional.
auto_delete
An option(bool) to keep or remove the disk upon instance deletion.
Optional, defaults to False.
Volumes are attached in the order in which they are given, thus on a new
node the first volume will be /dev/sdb, the second /dev/sdc, and so on.
.. versionadded:: 2017.7.0
'''
if call != 'action':
raise SaltCloudSystemExit(

View File

@ -3,16 +3,13 @@
1&1 Cloud Server Module
=======================
=======
The 1&1 SaltStack cloud module allows a 1&1 server to
be automatically deployed and bootstrapped with Salt.
The 1&1 SaltStack cloud module allows a 1&1 server to be automatically deployed
and bootstrapped with Salt.
:depends: 1and1 >= 1.2.0
The module requires the 1&1 api_token to be provided.
The server should also be assigned a public LAN, a private LAN,
or both along with SSH key pairs.
...
The module requires the 1&1 api_token to be provided. The server should also
be assigned a public LAN, a private LAN, or both along with SSH key pairs.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/oneandone.conf``:

View File

@ -542,7 +542,7 @@ def list_subnets(conn=None, call=None, kwargs=None):
network
network to list subnets of
.. code-block::
.. code-block:: bash
salt-cloud -f list_subnets myopenstack network=salt-net

View File

@ -3325,7 +3325,7 @@ def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):
return value
def is_provider_configured(opts, provider, required_keys=(), log_message=True):
def is_provider_configured(opts, provider, required_keys=(), log_message=True, aliases=()):
'''
Check and return the first matching and fully configured cloud provider
configuration.
@ -3353,7 +3353,7 @@ def is_provider_configured(opts, provider, required_keys=(), log_message=True):
for alias, drivers in six.iteritems(opts['providers']):
for driver, provider_details in six.iteritems(drivers):
if driver != provider:
if driver != provider and driver not in aliases:
continue
# If we reached this far, we have a matching provider, let's see if

View File

@ -18,10 +18,11 @@ the saltmaster's minion pillar.
.. versionadded: 2016.3.0
:configuration: Example configuration using only a 'default' group. The default group is not special.
In addition, other groups are being loaded from pillars.
:configuration: Example configuration using only a 'default' group. The default
group is not special. In addition, other groups are being loaded from
pillars.
.. code-block:: yaml
.. code-block:: text
engines:
- slack:
@ -42,7 +43,7 @@ In addition, other groups are being loaded from pillars.
list_jobs:
cmd: jobs.list_jobs
list_commands:
cmd: pillar.get salt:engines:slack:valid_commands target=saltmaster tgt_type=list
cmd: 'pillar.get salt:engines:slack:valid_commands target=saltmaster tgt_type=list'
default_target:
target: saltmaster
tgt_type: glob
@ -54,12 +55,14 @@ In addition, other groups are being loaded from pillars.
target: saltmaster
tgt_type: list
:configuration: Example configuration using the 'default' group and a non-default group and a pillar that will be merged in
If the user is '*' (without the quotes) then the group's users or commands will match all users as appropriate
:configuration: Example configuration using the 'default' group and a
non-default group and a pillar that will be merged in If the user is '*'
(without the quotes) then the group's users or commands will match all
users as appropriate
.. versionadded: 2017.7.0
.. code-block:: yaml
.. code-block:: text
engines:
- slack:
@ -79,7 +82,7 @@ In addition, other groups are being loaded from pillars.
list_jobs:
cmd: jobs.list_jobs
list_commands:
cmd: pillar.get salt:engines:slack:valid_commands target=saltmaster tgt_type=list
cmd: 'pillar.get salt:engines:slack:valid_commands target=saltmaster tgt_type=list'
gods:
users:
- garethgreenaway
@ -401,13 +404,16 @@ class SlackClient(object):
input_valid_users = set
input_valid_commands = set
When the trigger_string prefixes the message text, yields a dictionary of {
'message_data': m_data,
'cmdline': cmdline_list, # this is a list
'channel': channel,
'user': m_data['user'],
'slack_client': sc
}
When the trigger_string prefixes the message text, yields a dictionary
of::
{
'message_data': m_data,
'cmdline': cmdline_list, # this is a list
'channel': channel,
'user': m_data['user'],
'slack_client': sc
}
else yields {'message_data': m_data} and the caller can handle that
@ -526,17 +532,18 @@ class SlackClient(object):
If no configured target is provided, the command line will be parsed
for target=foo and tgt_type=bar
Test for this:
h = {'aliases': {}, 'commands': {'cmd.run', 'pillar.get'},
'default_target': {'target': '*', 'tgt_type': 'glob'},
'targets': {'pillar.get': {'target': 'you_momma', 'tgt_type': 'list'}},
'users': {'dmangot', 'jmickle', 'pcn'}}
f = {'aliases': {}, 'commands': {'cmd.run', 'pillar.get'},
'default_target': {}, 'targets': {},'users': {'dmangot', 'jmickle', 'pcn'}}
Test for this::
g = {'aliases': {}, 'commands': {'cmd.run', 'pillar.get'},
'default_target': {'target': '*', 'tgt_type': 'glob'},
'targets': {}, 'users': {'dmangot', 'jmickle', 'pcn'}}
h = {'aliases': {}, 'commands': {'cmd.run', 'pillar.get'},
'default_target': {'target': '*', 'tgt_type': 'glob'},
'targets': {'pillar.get': {'target': 'you_momma', 'tgt_type': 'list'}},
'users': {'dmangot', 'jmickle', 'pcn'}}
f = {'aliases': {}, 'commands': {'cmd.run', 'pillar.get'},
'default_target': {}, 'targets': {},'users': {'dmangot', 'jmickle', 'pcn'}}
g = {'aliases': {}, 'commands': {'cmd.run', 'pillar.get'},
'default_target': {'target': '*', 'tgt_type': 'glob'},
'targets': {}, 'users': {'dmangot', 'jmickle', 'pcn'}}
Run each of them through ``get_configured_target(('foo', f), 'pillar.get')`` and confirm a valid target

View File

@ -947,7 +947,7 @@ def _virtual(osdata):
if os.path.isfile('/sys/devices/virtual/dmi/id/product_name'):
try:
with salt.utils.files.fopen('/sys/devices/virtual/dmi/id/product_name', 'r') as fhr:
output = fhr.read()
output = salt.utils.stringutils.to_unicode(fhr.read())
if 'VirtualBox' in output:
grains['virtual'] = 'VirtualBox'
elif 'RHEV Hypervisor' in output:
@ -1371,6 +1371,18 @@ _OS_FAMILY_MAP = {
'AIX': 'AIX'
}
# Matches any possible format:
# DISTRIB_ID="Ubuntu"
# DISTRIB_ID='Mageia'
# DISTRIB_ID=Fedora
# DISTRIB_RELEASE='10.10'
# DISTRIB_CODENAME='squeeze'
# DISTRIB_DESCRIPTION='Ubuntu 10.10'
_LSB_REGEX = re.compile((
'^(DISTRIB_(?:ID|RELEASE|CODENAME|DESCRIPTION))=(?:\'|")?'
'([\\w\\s\\.\\-_]+)(?:\'|")?'
))
def _linux_bin_exists(binary):
'''
@ -1403,32 +1415,49 @@ def _get_interfaces():
return _INTERFACES
def _parse_os_release(os_release_files):
def _parse_lsb_release():
ret = {}
try:
log.trace('Attempting to parse /etc/lsb-release')
with salt.utils.files.fopen('/etc/lsb-release') as ifile:
for line in ifile:
try:
key, value = _LSB_REGEX.match(line.rstrip('\n')).groups()[:2]
except AttributeError:
pass
else:
# Adds lsb_distrib_{id,release,codename,description}
ret['lsb_{0}'.format(key.lower())] = value.rstrip()
except (IOError, OSError) as exc:
log.trace('Failed to parse /etc/lsb-release: %s', exc)
return ret
def _parse_os_release(*os_release_files):
'''
Parse os-release and return a parameter dictionary
See http://www.freedesktop.org/software/systemd/man/os-release.html
for specification of the file format.
'''
data = dict()
ret = {}
for filename in os_release_files:
if os.path.isfile(filename):
try:
with salt.utils.files.fopen(filename) as ifile:
regex = re.compile('^([\\w]+)=(?:\'|")?(.*?)(?:\'|")?$')
for line in ifile:
match = regex.match(line.strip())
if match:
# Shell special characters ("$", quotes, backslash,
# backtick) are escaped with backslashes
ret[match.group(1)] = re.sub(
r'\\([$"\'\\`])', r'\1', match.group(2)
)
break
else:
# None of the specified os-release files exist
return data
except (IOError, OSError):
pass
with salt.utils.files.fopen(filename) as ifile:
regex = re.compile('^([\\w]+)=(?:\'|")?(.*?)(?:\'|")?$')
for line in ifile:
match = regex.match(line.strip())
if match:
# Shell special characters ("$", quotes, backslash, backtick)
# are escaped with backslashes
data[match.group(1)] = re.sub(r'\\([$"\'\\`])', r'\1', match.group(2))
return data
return ret
def os_data():
@ -1491,6 +1520,7 @@ def os_data():
elif salt.utils.platform.is_linux():
# Add SELinux grain, if you have it
if _linux_bin_exists('selinuxenabled'):
log.trace('Adding selinux grains')
grains['selinux'] = {}
grains['selinux']['enabled'] = __salt__['cmd.retcode'](
'selinuxenabled'
@ -1502,6 +1532,7 @@ def os_data():
# Add systemd grain, if you have it
if _linux_bin_exists('systemctl') and _linux_bin_exists('localectl'):
log.trace('Adding systemd grains')
grains['systemd'] = {}
systemd_info = __salt__['cmd.run'](
'systemctl --version'
@ -1511,68 +1542,72 @@ def os_data():
# Add init grain
grains['init'] = 'unknown'
log.trace('Adding init grain')
try:
os.stat('/run/systemd/system')
grains['init'] = 'systemd'
except (OSError, IOError):
if os.path.exists('/proc/1/cmdline'):
try:
with salt.utils.files.fopen('/proc/1/cmdline') as fhr:
init_cmdline = fhr.read().replace('\x00', ' ').split()
except (IOError, OSError):
pass
else:
try:
init_bin = salt.utils.path.which(init_cmdline[0])
except IndexError:
# Emtpy init_cmdline
init_bin = None
log.warning('Unable to fetch data from /proc/1/cmdline')
if init_bin is not None and init_bin.endswith('bin/init'):
supported_inits = (b'upstart', b'sysvinit', b'systemd')
edge_len = max(len(x) for x in supported_inits) - 1
try:
init_bin = salt.utils.path.which(init_cmdline[0])
except IndexError:
# Emtpy init_cmdline
init_bin = None
log.warning(
"Unable to fetch data from /proc/1/cmdline"
)
if init_bin is not None and init_bin.endswith('bin/init'):
supported_inits = (b'upstart', b'sysvinit', b'systemd')
edge_len = max(len(x) for x in supported_inits) - 1
try:
buf_size = __opts__['file_buffer_size']
except KeyError:
# Default to the value of file_buffer_size for the minion
buf_size = 262144
try:
with salt.utils.files.fopen(init_bin, 'rb') as fp_:
buf = True
edge = b''
buf_size = __opts__['file_buffer_size']
except KeyError:
# Default to the value of file_buffer_size for the minion
buf_size = 262144
try:
with salt.utils.files.fopen(init_bin, 'rb') as fp_:
buf = True
edge = b''
buf = fp_.read(buf_size).lower()
while buf:
buf = edge + buf
for item in supported_inits:
if item in buf:
if six.PY3:
item = item.decode('utf-8')
grains['init'] = item
buf = b''
break
edge = buf[-edge_len:]
buf = fp_.read(buf_size).lower()
while buf:
buf = edge + buf
for item in supported_inits:
if item in buf:
if six.PY3:
item = item.decode('utf-8')
grains['init'] = item
buf = b''
break
edge = buf[-edge_len:]
buf = fp_.read(buf_size).lower()
except (IOError, OSError) as exc:
log.error(
'Unable to read from init_bin (%s): %s',
init_bin, exc
)
elif salt.utils.path.which('supervisord') in init_cmdline:
grains['init'] = 'supervisord'
elif init_cmdline == ['runit']:
grains['init'] = 'runit'
elif '/sbin/my_init' in init_cmdline:
#Phusion Base docker container use runit for srv mgmt, but my_init as pid1
grains['init'] = 'runit'
else:
log.info(
'Could not determine init system from command line: (%s)',
' '.join(init_cmdline)
except (IOError, OSError) as exc:
log.error(
'Unable to read from init_bin (%s): %s',
init_bin, exc
)
elif salt.utils.path.which('supervisord') in init_cmdline:
grains['init'] = 'supervisord'
elif init_cmdline == ['runit']:
grains['init'] = 'runit'
elif '/sbin/my_init' in init_cmdline:
# Phusion Base docker container use runit for srv mgmt, but
# my_init as pid1
grains['init'] = 'runit'
else:
log.info(
'Could not determine init system from command line: (%s)',
' '.join(init_cmdline)
)
# Add lsb grains on any distro with lsb-release. Note that this import
# can fail on systems with lsb-release installed if the system package
# does not install the python package for the python interpreter used by
# Salt (i.e. python2 or python3)
try:
log.trace('Getting lsb_release distro information')
import lsb_release # pylint: disable=import-error
release = lsb_release.get_distro_information()
for key, value in six.iteritems(release):
@ -1585,35 +1620,21 @@ def os_data():
# Catch a NameError to workaround possible breakage in lsb_release
# See https://github.com/saltstack/salt/issues/37867
except (ImportError, NameError):
# if the python library isn't available, default to regex
if os.path.isfile('/etc/lsb-release'):
# Matches any possible format:
# DISTRIB_ID="Ubuntu"
# DISTRIB_ID='Mageia'
# DISTRIB_ID=Fedora
# DISTRIB_RELEASE='10.10'
# DISTRIB_CODENAME='squeeze'
# DISTRIB_DESCRIPTION='Ubuntu 10.10'
regex = re.compile((
'^(DISTRIB_(?:ID|RELEASE|CODENAME|DESCRIPTION))=(?:\'|")?'
'([\\w\\s\\.\\-_]+)(?:\'|")?'
))
with salt.utils.files.fopen('/etc/lsb-release') as ifile:
for line in ifile:
match = regex.match(line.rstrip('\n'))
if match:
# Adds:
# lsb_distrib_{id,release,codename,description}
grains[
'lsb_{0}'.format(match.groups()[0].lower())
] = match.groups()[1].rstrip()
# if the python library isn't available, try to parse
# /etc/lsb-release using regex
log.trace('lsb_release python bindings not available')
grains.update(_parse_lsb_release())
if grains.get('lsb_distrib_description', '').lower().startswith('antergos'):
# Antergos incorrectly configures their /etc/lsb-release,
# setting the DISTRIB_ID to "Arch". This causes the "os" grain
# to be incorrectly set to "Arch".
grains['osfullname'] = 'Antergos Linux'
elif 'lsb_distrib_id' not in grains:
os_release = _parse_os_release(['/etc/os-release', '/usr/lib/os-release'])
log.trace(
'Failed to get lsb_distrib_id, trying to parse os-release'
)
os_release = _parse_os_release('/etc/os-release', '/usr/lib/os-release')
if os_release:
if 'NAME' in os_release:
grains['lsb_distrib_id'] = os_release['NAME'].strip()
@ -1638,6 +1659,7 @@ def os_data():
elif os_release.get("VERSION") == "Tumbleweed":
grains['osfullname'] = os_release["VERSION"]
elif os.path.isfile('/etc/SuSE-release'):
log.trace('Parsing distrib info from /etc/SuSE-release')
grains['lsb_distrib_id'] = 'SUSE'
version = ''
patch = ''
@ -1659,6 +1681,7 @@ def os_data():
if not grains.get('lsb_distrib_codename'):
grains['lsb_distrib_codename'] = 'n.a'
elif os.path.isfile('/etc/altlinux-release'):
log.trace('Parsing distrib info from /etc/altlinux-release')
# ALT Linux
grains['lsb_distrib_id'] = 'altlinux'
with salt.utils.files.fopen('/etc/altlinux-release') as ifile:
@ -1674,6 +1697,7 @@ def os_data():
grains['lsb_distrib_codename'] = \
comps[3].replace('(', '').replace(')', '')
elif os.path.isfile('/etc/centos-release'):
log.trace('Parsing distrib info from /etc/centos-release')
# CentOS Linux
grains['lsb_distrib_id'] = 'CentOS'
with salt.utils.files.fopen('/etc/centos-release') as ifile:
@ -1691,6 +1715,9 @@ def os_data():
elif os.path.isfile('/etc.defaults/VERSION') \
and os.path.isfile('/etc.defaults/synoinfo.conf'):
grains['osfullname'] = 'Synology'
log.trace(
'Parsing Synology distrib info from /etc/.defaults/VERSION'
)
with salt.utils.files.fopen('/etc.defaults/VERSION', 'r') as fp_:
synoinfo = {}
for line in fp_:
@ -1714,6 +1741,10 @@ def os_data():
# Use the already intelligent platform module to get distro info
# (though apparently it's not intelligent enough to strip quotes)
log.trace(
'Getting OS name, release, and codename from '
'platform.linux_distribution()'
)
(osname, osrelease, oscodename) = \
[x.strip('"').strip("'") for x in
linux_distribution(supported_dists=_supported_dists)]
@ -1723,8 +1754,7 @@ def os_data():
# so that linux_distribution() does the /etc/lsb-release parsing, but
# we do it anyway here for the sake for full portability.
if 'osfullname' not in grains:
grains['osfullname'] = \
grains.get('lsb_distrib_id', osname).strip()
grains['osfullname'] = grains.get('lsb_distrib_id', osname).strip()
if 'osrelease' not in grains:
# NOTE: This is a workaround for CentOS 7 os-release bug
# https://bugs.centos.org/view.php?id=8359

View File

@ -342,8 +342,8 @@ class FileserverUpdate(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
A process from which to update any dynamic fileserver backends
'''
def __init__(self, opts, log_queue=None):
super(FileserverUpdate, self).__init__(log_queue=log_queue)
def __init__(self, opts, **kwargs):
super(FileserverUpdate, self).__init__(**kwargs)
self.opts = opts
self.update_threads = {}
# Avoid circular import
@ -356,11 +356,17 @@ class FileserverUpdate(salt.utils.process.SignalHandlingMultiprocessingProcess):
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(state['opts'], log_queue=state['log_queue'])
self.__init__(
state['opts'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {'opts': self.opts,
'log_queue': self.log_queue}
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def fill_buckets(self):
'''
@ -2040,6 +2046,8 @@ class ClearFuncs(object):
if not authorized:
# Authorization error occurred. Do not continue.
if auth_type == 'eauth' and not auth_list and 'username' in extra and 'eauth' in extra:
log.debug('Auth configuration for eauth "%s" and user "%s" is empty', extra['eauth'], extra['username'])
log.warning(err_msg)
return {'error': {'name': 'AuthorizationError',
'message': 'Authorization error occurred.'}}

View File

@ -1073,7 +1073,7 @@ class Minion(MinionBase):
# Flag meaning minion has finished initialization including first connect to the master.
# True means the Minion is fully functional and ready to handle events.
self.ready = False
self.jid_queue = jid_queue or []
self.jid_queue = [] if jid_queue is None else jid_queue
self.periodic_callbacks = {}
if io_loop is None:
@ -1172,10 +1172,11 @@ class Minion(MinionBase):
# I made the following 3 line oddity to preserve traceback.
# Please read PR #23978 before changing, hopefully avoiding regressions.
# Good luck, we're all counting on you. Thanks.
future_exception = self._connect_master_future.exception()
if future_exception:
# This needs to be re-raised to preserve restart_on_error behavior.
raise six.reraise(*future_exception)
if self._connect_master_future.done():
future_exception = self._connect_master_future.exception()
if future_exception:
# This needs to be re-raised to preserve restart_on_error behavior.
raise six.reraise(*future_exception)
if timeout and self._sync_connect_master_success is False:
raise SaltDaemonNotRunning('Failed to connect to the salt-master')
@ -1655,7 +1656,9 @@ class Minion(MinionBase):
)
ret['out'] = 'nested'
except TypeError as exc:
msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format(function_name, exc, func.__doc__)
msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format(
function_name, exc, func.__doc__ or ''
)
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = msg
ret['out'] = 'nested'

View File

@ -304,7 +304,7 @@ def install(name=None,
# We don't support installing specific version for now
# so transform the dict in list ignoring version provided
pkgs = [
p.keys()[0] for p in pkgs
next(iter(p)) for p in pkgs
if isinstance(p, dict)
]
pkg_to_install.extend(pkgs)

View File

@ -186,12 +186,13 @@ def list_(name,
else {'fileobj': cached.stdout, 'mode': 'r|'}
with contextlib.closing(tarfile.open(**open_kwargs)) as tar_archive:
for member in tar_archive.getmembers():
_member = salt.utils.data.decode(member.name)
if member.issym():
links.append(member.name)
links.append(_member)
elif member.isdir():
dirs.append(member.name + '/')
dirs.append(_member + '/')
else:
files.append(member.name)
files.append(_member)
return dirs, files, links
except tarfile.ReadError:

View File

@ -19,7 +19,7 @@ Management of Docker Containers
.. _lxc-attach: https://linuxcontainers.org/lxc/manpages/man1/lxc-attach.1.html
.. _nsenter: http://man7.org/linux/man-pages/man1/nsenter.1.html
.. _docker-exec: http://docs.docker.com/reference/commandline/cli/#exec
.. _`low-level API`: http://docker-py.readthedocs.io/en/stable/api.html
.. _`docker-py Low-level API`: http://docker-py.readthedocs.io/en/stable/api.html
.. _timelib: https://pypi.python.org/pypi/timelib
.. _`trusted builds`: https://blog.docker.com/2013/11/introducing-trusted-builds/
.. _`Docker Engine API`: https://docs.docker.com/engine/api/v1.33/#operation/ContainerCreate
@ -436,11 +436,20 @@ def _refresh_mine_cache(wrapped):
refresh salt mine on exit.
'''
returned = wrapped(*args, **__utils__['args.clean_kwargs'](**kwargs))
__salt__['mine.send']('docker.ps', verbose=True, all=True, host=True)
if _check_update_mine():
__salt__['mine.send']('docker.ps', verbose=True, all=True, host=True)
return returned
return wrapper
def _check_update_mine():
try:
ret = __context__['docker.update_mine']
except KeyError:
ret = __context__['docker.update_mine'] = __salt__['config.get']('docker.update_mine', default=True)
return ret
# Helper functions
def _change_state(name, action, expected, *args, **kwargs):
'''
@ -788,7 +797,7 @@ def get_client_args(limit=None):
Added ability to limit the input to specific client functions
Many functions in Salt have been written to support the full list of
arguments for a given function in docker-py's `low-level API`_. However,
arguments for a given function in the `docker-py Low-level API`_. However,
depending on the version of docker-py installed on the minion, the
available arguments may differ. This function will get the arguments for
various functions in the installed version of docker-py, to be used as a
@ -996,17 +1005,17 @@ def compare_container_networks(first, second):
:py:func:`docker_container.running <salt.states.docker_container.running>`
state), automatic IP configuration will also be checked in these cases.
This function uses the :minion_opts:`docker.compare_container_networks`
This function uses the :conf_minion:`docker.compare_container_networks`
minion config option to determine which keys to examine. This provides
flexibility in the event that features added in a future Docker release
necessitate changes to how Salt compares networks. In these cases, rather
than waiting for a new Salt release one can just set
:minion_opts:`docker.compare_container_networks`.
:conf_minion:`docker.compare_container_networks`.
.. note::
The checks for automatic IP configuration described above only apply if
``IPAMConfig`` is among the keys set for static IP checks in
:minion_opts:`docker.compare_container_networks`.
:conf_minion:`docker.compare_container_networks`.
first
Name or ID of first container (old)
@ -5069,7 +5078,7 @@ def create_network(name,
other issues to be more easily worked around. See the following links
for more information:
- docker-py `low-level API`_
- `docker-py Low-level API`_
- `Docker Engine API`_
.. versionadded:: 2018.3.0
@ -5160,6 +5169,8 @@ def create_network(name,
get an error unless you have set up a fixed IPv6 subnet. Consult
the `Docker IPv6 docs`_ for information on how to do this.
.. _`Docker IPv6 docs`: https://docs.docker.com/v17.09/engine/userguide/networking/default_network/ipv6/
attachable : False
If ``True``, and the network is in the global scope, non-service
containers on worker nodes will be able to connect to the network.

View File

@ -575,30 +575,26 @@ def lsattr(path):
return results
def chattr(*args, **kwargs):
def chattr(*files, **kwargs):
'''
.. versionadded:: 2018.3.0
Change the attributes of files
*args
list of files to modify attributes of
**kwargs - the following are valid <key,value> pairs:
Change the attributes of files. This function accepts one or more files and
the following options:
operator
add|remove
determines whether attributes should be added or removed from files
Can be wither ``add`` or ``remove``. Determines whether attributes
should be added or removed from files
attributes
acdijstuADST
string of characters representing attributes to add/remove from files
One or more of the following characters: ``acdijstuADST``, representing
attributes to add to/remove from files
version
a version number to assign to the files
a version number to assign to the file(s)
flags
[RVf]
One or more of the following characters: ``RVf``, representing
flags to assign to chattr (recurse, verbose, suppress most errors)
CLI Example:
@ -608,34 +604,34 @@ def chattr(*args, **kwargs):
salt '*' file.chattr foo1.txt foo2.txt operator=add attributes=ai
salt '*' file.chattr foo3.txt operator=remove attributes=i version=2
'''
args = [arg if salt.utils.stringutils.is_quoted(arg) else '"{0}"'.format(arg)
for arg in args]
operator = kwargs.pop('operator', None)
attributes = kwargs.pop('attributes', None)
flags = kwargs.pop('flags', None)
version = kwargs.pop('version', None)
if (operator is None) or (operator not in ['add', 'remove']):
if (operator is None) or (operator not in ('add', 'remove')):
raise SaltInvocationError(
"Need an operator: 'add' or 'remove' to modify attributes.")
if attributes is None:
raise SaltInvocationError("Need attributes: [AacDdijsTtSu]")
cmd = ['chattr']
if operator == "add":
attrs = '+{0}'.format(attributes)
elif operator == "remove":
attrs = '-{0}'.format(attributes)
flgs = ''
cmd.append(attrs)
if flags is not None:
flgs = '-{0}'.format(flags)
cmd.append('-{0}'.format(flags))
vrsn = ''
if version is not None:
vrsn = '-v {0}'.format(version)
cmd.extend(['-v', version])
cmd.extend(files)
cmd = 'chattr {0} {1} {2} {3}'.format(attrs, flgs, vrsn, ' '.join(args))
result = __salt__['cmd.run'](cmd, python_shell=False)
if bool(result):
@ -2043,7 +2039,12 @@ def line(path, content=None, match=None, mode=None, location=None,
fh_ = None
try:
# Make sure we match the file mode from salt.utils.files.fopen
mode = 'wb' if six.PY2 and salt.utils.platform.is_windows() else 'w'
if six.PY2 and salt.utils.platform.is_windows():
mode = 'wb'
body = salt.utils.stringutils.to_bytes(body)
else:
mode = 'w'
body = salt.utils.stringutils.to_str(body)
fh_ = salt.utils.atomicfile.atomic_open(path, mode)
fh_.write(body)
finally:
@ -4457,7 +4458,7 @@ def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False)
lattrs = lsattr(name)
if lattrs is not None:
# List attributes on file
perms['lattrs'] = ''.join(lattrs.get('name', ''))
perms['lattrs'] = ''.join(lattrs.get(name, ''))
# Remove attributes on file so changes can be enforced.
if perms['lattrs']:
chattr(name, operator='remove', attributes=perms['lattrs'])

View File

@ -1051,16 +1051,15 @@ def verify(text=None,
signature
Specify the filename of a detached signature.
.. versionadded:: 2018.3.0
.. versionadded:: 2018.3.0
CLI Example:
.. code-block:: bash
salt '*' gpg.verify text='Hello there. How are you?'
salt '*' gpg.verify filename='/path/to/important.file'
salt '*' gpg.verify filename='/path/to/important.file' use_passphrase=True
'''

View File

@ -97,7 +97,8 @@ def diff_objects(obja, objb):
Diff two complex infoblox objects.
This is used from salt states to detect changes in objects.
Using `func:nextavailableip` will not cause a diff if the ipaddres is in range
Using ``func:nextavailableip`` will not cause a diff if the ipaddress is in
range
'''
return libinfoblox.diff_obj(obja, objb)
@ -108,6 +109,8 @@ def is_ipaddr_in_ipfunc_range(ipaddr, ipfunc):
CLI Example:
.. code-block:: bash
salt-call infoblox.is_ipaddr_in_ipfunc_range \
ipaddr="10.0.2.2" ipfunc="func:nextavailableip:10.0.0.0/8"
'''
@ -118,10 +121,12 @@ def update_host(name, data, **api_opts):
'''
Update host record. This is a helper call to update_object.
Find a hosts `_ref` then call update_object with the record data.
Find a hosts ``_ref`` then call update_object with the record data.
CLI Example:
.. code-block:: bash
salt-call infoblox.update_host name=fqdn data={}
'''
o = get_host(name=name, **api_opts)
@ -130,8 +135,7 @@ def update_host(name, data, **api_opts):
def update_object(objref, data, **api_opts):
'''
Update raw infoblox object.
This is a low level api call.
Update raw infoblox object. This is a low level api call.
CLI Example:
@ -147,11 +151,12 @@ def update_object(objref, data, **api_opts):
def delete_object(objref, **api_opts):
'''
Delete infoblox object.
This is a low level api call.
Delete infoblox object. This is a low level api call.
CLI Example:
.. code-block:: bash
salt-call infoblox.delete_object objref=[ref_of_object]
'''
if '__opts__' in globals() and __opts__['test']:
@ -162,11 +167,12 @@ def delete_object(objref, **api_opts):
def create_object(object_type, data, **api_opts):
'''
Create raw infoblox object
This is a low level api call.
Create raw infoblox object. This is a low level api call.
CLI Example:
.. code-block:: bash
salt-call infoblox.update_object object_type=record:host data={}
'''
if '__opts__' in globals() and __opts__['test']:
@ -178,11 +184,12 @@ def create_object(object_type, data, **api_opts):
def get_object(objref, data=None, return_fields=None, max_results=None,
ensure_none_or_one_result=False, **api_opts):
'''
Get raw infoblox object.
This is a low level api call.
Get raw infoblox object. This is a low level api call.
CLI Example:
.. code-block:: bash
salt-call infoblox.get_object objref=[_ref of object]
'''
if not data:
@ -198,6 +205,8 @@ def create_cname(data, **api_opts):
CLI Example:
.. code-block:: bash
salt-call infoblox.create_cname data={ \
"comment": "cname to example server", \
"name": "example.example.com", \
@ -215,7 +224,9 @@ def get_cname(name=None, canonical=None, return_fields=None, **api_opts):
'''
Get CNAME information.
CLI Example:
CLI Examples:
.. code-block:: bash
salt-call infoblox.get_cname name=example.example.com
salt-call infoblox.get_cname canonical=example-ha-0.example.com
@ -229,10 +240,12 @@ def update_cname(name, data, **api_opts):
'''
Update CNAME. This is a helper call to update_object.
Find a CNAME `_ref` then call update_object with the record data.
Find a CNAME ``_ref`` then call update_object with the record data.
CLI Example:
.. code-block:: bash
salt-call infoblox.update_cname name=example.example.com data="{
'canonical':'example-ha-0.example.com',
'use_ttl':true,
@ -251,7 +264,9 @@ def delete_cname(name=None, canonical=None, **api_opts):
If record is not found, return True
CLI Example:
CLI Examples:
.. code-block:: bash
salt-call infoblox.delete_cname name=example.example.com
salt-call infoblox.delete_cname canonical=example-ha-0.example.com
@ -266,16 +281,13 @@ def get_host(name=None, ipv4addr=None, mac=None, return_fields=None, **api_opts)
'''
Get host information
CLI Example:
CLI Examples:
.. code-block:: bash
salt-call infoblox.get_host hostname.domain.ca
salt-call infoblox.get_host ipv4addr=123.123.122.12
salt-call infoblox.get_host mac=00:50:56:84:6e:ae
return_fields=
https://INFOBLOX/wapidoc/objects/record.host.html#fields-list
return_fields='ipv4addrs,aliases,name,configure_for_dns,extattrs,disable,view,comment,zone'
'''
infoblox = _get_infoblox(**api_opts)
host = infoblox.get_host(name=name, mac=mac, ipv4addr=ipv4addr, return_fields=return_fields)
@ -288,6 +300,8 @@ def get_host_advanced(name=None, ipv4addr=None, mac=None, **api_opts):
CLI Example:
.. code-block:: bash
salt-call infoblox.get_host_advanced hostname.domain.ca
'''
infoblox = _get_infoblox(**api_opts)
@ -299,7 +313,8 @@ def get_host_domainname(name, domains=None, **api_opts):
'''
Get host domain name
If no domains are passed, the hostname is checked for a zone in infoblox, if no zone split on first dot.
If no domains are passed, the hostname is checked for a zone in infoblox,
if no zone split on first dot.
If domains are provided, the best match out of the list is returned.
@ -309,6 +324,8 @@ def get_host_domainname(name, domains=None, **api_opts):
CLI Example:
.. code-block:: bash
salt-call uwl.get_host_domainname name=localhost.t.domain.com \
domains=['domain.com', 't.domain.com.']
@ -337,15 +354,19 @@ def get_host_hostname(name, domains=None, **api_opts):
'''
Get hostname
If no domains are passed, the hostname is checked for a zone in infoblox, if no zone split on first dot.
If no domains are passed, the hostname is checked for a zone in infoblox,
if no zone split on first dot.
If domains are provided, the best match out of the list is truncated from the fqdn leaving the hostname.
If domains are provided, the best match out of the list is truncated from
the fqdn leaving the hostname.
If no matching domains are found the fqdn is returned.
dots at end of names are ignored.
CLI Example:
CLI Examples:
.. code-block:: bash
salt-call infoblox.get_host_hostname fqdn=localhost.xxx.t.domain.com \
domains="['domain.com', 't.domain.com']"
@ -371,6 +392,8 @@ def get_host_mac(name=None, allow_array=False, **api_opts):
CLI Example:
.. code-block:: bash
salt-call infoblox.get_host_mac host=localhost.domain.com
'''
data = get_host(name=name, **api_opts)
@ -392,7 +415,9 @@ def get_host_ipv4(name=None, mac=None, allow_array=False, **api_opts):
Use `allow_array` to return possible multiple values.
CLI Example:
CLI Examples:
.. code-block:: bash
salt-call infoblox.get_host_ipv4 host=localhost.domain.com
salt-call infoblox.get_host_ipv4 mac=00:50:56:84:6e:ae
@ -416,14 +441,13 @@ def get_host_ipv4addr_info(ipv4addr=None, mac=None,
'''
Get host ipv4addr information
return_fields='mac,host,configure_for_dhcp,ipv4addr'
CLI Examples:
CLI Example:
.. code-block:: bash
salt-call infoblox.get_ipv4addr ipv4addr=123.123.122.12
salt-call infoblox.get_ipv4addr mac=00:50:56:84:6e:ae
salt-call infoblox.get_ipv4addr mac=00:50:56:84:6e:ae return_fields=host
return_fields='mac,host,configure_for_dhcp,ipv4addr'
salt-call infoblox.get_ipv4addr mac=00:50:56:84:6e:ae return_fields=host return_fields='mac,host,configure_for_dhcp,ipv4addr'
'''
infoblox = _get_infoblox(**api_opts)
return infoblox.get_host_ipv4addr_object(ipv4addr, mac, discovered_data, return_fields)
@ -437,6 +461,8 @@ def get_host_ipv6addr_info(ipv6addr=None, mac=None,
CLI Example:
.. code-block:: bash
salt-call infoblox.get_host_ipv6addr_info ipv6addr=2001:db8:85a3:8d3:1349:8a2e:370:7348
'''
infoblox = _get_infoblox(**api_opts)
@ -445,9 +471,8 @@ def get_host_ipv6addr_info(ipv6addr=None, mac=None,
def get_network(ipv4addr=None, network=None, return_fields=None, **api_opts):
'''
Get list of all networks.
This is helpful when looking up subnets to
use with func:nextavailableip
Get list of all networks. This is helpful when looking up subnets to use
with func:nextavailableip
This call is offen slow and not cached!
@ -456,6 +481,8 @@ def get_network(ipv4addr=None, network=None, return_fields=None, **api_opts):
CLI Example:
.. code-block:: bash
salt-call infoblox.get_network
'''
infoblox = _get_infoblox(**api_opts)
@ -468,6 +495,8 @@ def delete_host(name=None, mac=None, ipv4addr=None, **api_opts):
CLI Example:
.. code-block:: bash
salt-call infoblox.delete_host name=example.domain.com
salt-call infoblox.delete_host ipv4addr=123.123.122.12
salt-call infoblox.delete_host ipv4addr=123.123.122.12 mac=00:50:56:84:6e:ae
@ -483,15 +512,18 @@ def create_host(data, **api_opts):
Add host record
Avoid race conditions, use func:nextavailableip for ipv[4,6]addrs:
- func:nextavailableip:network/ZG54dfgsrDFEFfsfsLzA:10.0.0.0/8/default
- func:nextavailableip:10.0.0.0/8
- func:nextavailableip:10.0.0.0/8,external
- func:nextavailableip:10.0.0.3-10.0.0.10
- func:nextavailableip:network/ZG54dfgsrDFEFfsfsLzA:10.0.0.0/8/default
- func:nextavailableip:10.0.0.0/8
- func:nextavailableip:10.0.0.0/8,external
- func:nextavailableip:10.0.0.3-10.0.0.10
See your infoblox API for full `data` format.
CLI Example:
.. code-block:: bash
salt-call infoblox.create_host \
data =
{'name': 'hostname.example.ca',
@ -514,6 +546,8 @@ def get_ipv4_range(start_addr=None, end_addr=None, return_fields=None, **api_opt
CLI Example:
.. code-block:: bash
salt-call infoblox.get_ipv4_range start_addr=123.123.122.12
'''
infoblox = _get_infoblox(**api_opts)
@ -526,7 +560,9 @@ def delete_ipv4_range(start_addr=None, end_addr=None, **api_opts):
CLI Example:
salt-call infoblox.delete_ipv4_range start_addr=123.123.122.12
.. code-block:: bash
salt-call infoblox.delete_ipv4_range start_addr=123.123.122.12
'''
r = get_ipv4_range(start_addr, end_addr, **api_opts)
if r:
@ -544,6 +580,8 @@ def create_ipv4_range(data, **api_opts):
CLI Example:
.. code-block:: bash
salt-call infoblox.create_ipv4_range data={
start_addr: '129.97.150.160',
end_addr: '129.97.150.170'}
@ -560,6 +598,8 @@ def create_a(data, **api_opts):
CLI Example:
.. code-block:: bash
salt-call infoblox.create_a \
data =
name: 'fastlinux.math.example.ca'
@ -573,7 +613,9 @@ def get_a(name=None, ipv4addr=None, allow_array=True, **api_opts):
'''
Get A record
CLI Example:
CLI Examples:
.. code-block:: bash
salt-call infoblox.get_a name=abc.example.com
salt-call infoblox.get_a ipv4addr=192.168.3.5
@ -593,14 +635,16 @@ def delete_a(name=None, ipv4addr=None, allow_array=False, **api_opts):
'''
Delete A record
If the A record is used as a round robin you can set
`allow_array=true to delete all records for the hostname.
If the A record is used as a round robin you can set ``allow_array=True`` to
delete all records for the hostname.
CLI Example:
CLI Examples:
.. code-block:: bash
salt-call infoblox.delete_a name=abc.example.com
salt-call infoblox.delete_a ipv4addr=192.168.3.5
salt-call infoblox.delete_a name=acname.example.com allow_array=true
salt-call infoblox.delete_a name=acname.example.com allow_array=True
'''
r = get_a(name, ipv4addr, allow_array=False, **api_opts)
if not r:

View File

@ -114,7 +114,6 @@ def latest_installed():
salt '*' kernelpkg.latest_installed
.. note::
This function may not return the same value as
:py:func:`~salt.modules.kernelpkg_linux_apt.active` if a new kernel
has been installed and the system has not yet been rebooted.
@ -163,9 +162,9 @@ def upgrade(reboot=False, at_time=None):
salt '*' kernelpkg.upgrade reboot=True at_time=1
.. note::
An immediate reboot often shuts down the system before the minion
has a chance to return, resulting in errors. A minimal delay (1 minute)
is useful to ensure the result is delivered to the master.
An immediate reboot often shuts down the system before the minion has a
chance to return, resulting in errors. A minimal delay (1 minute) is
useful to ensure the result is delivered to the master.
'''
result = __salt__['pkg.install'](
name='{0}-{1}'.format(_package_prefix(), latest_available()))

View File

@ -108,7 +108,6 @@ def latest_installed():
salt '*' kernelpkg.latest_installed
.. note::
This function may not return the same value as
:py:func:`~salt.modules.kernelpkg_linux_yum.active` if a new kernel
has been installed and the system has not yet been rebooted.
@ -157,9 +156,9 @@ def upgrade(reboot=False, at_time=None):
salt '*' kernelpkg.upgrade reboot=True at_time=1
.. note::
An immediate reboot often shuts down the system before the minion
has a chance to return, resulting in errors. A minimal delay (1 minute)
is useful to ensure the result is delivered to the master.
An immediate reboot often shuts down the system before the minion has a
chance to return, resulting in errors. A minimal delay (1 minute) is
useful to ensure the result is delivered to the master.
'''
result = __salt__['pkg.upgrade'](name=_package_name())
_needs_reboot = needs_reboot()

View File

@ -411,7 +411,8 @@ def add(connect_spec, dn, attributes):
modlist = salt.utils.data.decode(
ldap.modlist.addModlist(attributes),
to_str=True
to_str=True,
preserve_tuples=True
)
try:
l.c.add_s(dn, modlist)
@ -512,7 +513,7 @@ def modify(connect_spec, dn, directives):
modlist[idx] = (mod[0], mod[1],
[_format_unicode_password(x) for x in mod[2]])
modlist = salt.utils.data.decode(modlist, to_str=True)
modlist = salt.utils.data.decode(modlist, to_str=True, preserve_tuples=True)
try:
l.c.modify_s(dn, modlist)
except ldap.LDAPError as e:
@ -581,7 +582,8 @@ def change(connect_spec, dn, before, after):
modlist = salt.utils.data.decode(
ldap.modlist.modifyModlist(before, after),
to_str=True
to_str=True,
preserve_tuples=True
)
try:
l.c.modify_s(dn, modlist)

View File

@ -249,7 +249,7 @@ def rotate(name, pattern=None, conf_file=default_conf, **kwargs):
alias for log_file
conf_file : string
optional path to alternative configuration file
**kwargs : boolean|string|int
kwargs : boolean|string|int
optional additional flags and parameters
.. note::

View File

@ -3504,7 +3504,9 @@ def bootstrap(name,
configdir = '/var/tmp/.c_{0}'.format(rstr)
cmd = 'install -m 0700 -d {0}'.format(configdir)
if run(name, cmd, python_shell=False):
if run_all(
name, cmd, path=path, python_shell=False
)['retcode'] != 0:
log.error('tmpdir %s creation failed %s', configdir, cmd)
return False
@ -3514,6 +3516,7 @@ def bootstrap(name,
copy_to(name, bs_, script, path=path)
result = run_all(name,
'sh -c "chmod +x {0}"'.format(script),
path=path,
python_shell=True)
copy_to(name, cfg_files['config'],
@ -3539,6 +3542,7 @@ def bootstrap(name,
run_all(name,
'sh -c \'if [ -f "{0}" ];then rm -f "{0}";fi\''
''.format(script),
path=path,
ignore_retcode=True,
python_shell=True)
else:

View File

@ -179,13 +179,10 @@ def persist(name, value, config='/etc/sysctl.conf', apply_change=False):
rest = rest[len(rest_v):]
if rest_v == value:
return 'Already set'
new_line = '{0}={1}'.format(name, value)
nlines.append(new_line)
nlines.append('\n')
nlines.append('{0}={1}\n'.format(name, value))
edited = True
if not edited:
nlines.append('{0}={1}'.format(name, value))
nlines.append('\n')
nlines.append('{0}={1}\n'.format(name, value))
nlines = [salt.utils.stringutils.to_str(_l) for _l in nlines]
with salt.utils.files.fopen(config, 'w+') as ofile:
ofile.writelines(nlines)

View File

@ -374,10 +374,18 @@ def flush():
def get_docker(interfaces=None, cidrs=None, with_container_id=False):
'''
Get all mine data for 'docker.get_containers' and run an aggregation
routine. The "interfaces" parameter allows for specifying which network
interfaces to select ip addresses from. The "cidrs" parameter allows for
specifying a list of cidrs which the ip address must match.
.. versionchanged:: 2017.7.8,2018.3.3
When :conf_minion:`docker.update_mine` is set to ``False`` for a given
minion, no mine data will be populated for that minion, and thus none
will be returned for it.
.. versionchanged:: Fluorine
:conf_minion:`docker.update_mine` now defaults to ``False``
Get all mine data for :py:func:`docker.ps <salt.modules.dockermod.ps_>` and
run an aggregation routine. The ``interfaces`` parameter allows for
specifying the network interfaces from which to select IP addresses. The
``cidrs`` parameter allows for specifying a list of subnets which the IP
address must match.
with_container_id
Boolean, to expose container_id in the list of results

View File

@ -121,18 +121,30 @@ def _active_mounts_aix(ret):
'''
for line in __salt__['cmd.run_stdout']('mount -p').split('\n'):
comps = re.sub(r"\s+", " ", line).split()
if comps and comps[0] == 'node' or comps[0] == '--------':
continue
if len(comps) < 8:
ret[comps[1]] = {'device': comps[0],
'fstype': comps[2],
'opts': _resolve_user_group_names(comps[6].split(','))}
else:
ret[comps[2]] = {'node': comps[0],
'device': comps[1],
'fstype': comps[3],
'opts': _resolve_user_group_names(comps[7].split(','))}
if comps:
if comps[0] == 'node' or comps[0] == '--------':
continue
comps_len = len(comps)
if line.startswith((' ', '\t')):
curr_opts = _resolve_user_group_names(comps[6].split(',')) if 7 == comps_len else []
if curr_opts:
ret[comps[1]] = {'device': comps[0],
'fstype': comps[2],
'opts': curr_opts}
else:
ret[comps[1]] = {'device': comps[0],
'fstype': comps[2]}
else:
curr_opts = _resolve_user_group_names(comps[7].split(',')) if 8 == comps_len else []
if curr_opts:
ret[comps[2]] = {'node': comps[0],
'device': comps[1],
'fstype': comps[3],
'opts': curr_opts}
else:
ret[comps[2]] = {'node': comps[0],
'device': comps[1],
'fstype': comps[3]}
return ret
@ -228,7 +240,7 @@ def active(extended=False):
ret = {}
if __grains__['os'] == 'FreeBSD':
_active_mounts_freebsd(ret)
elif __grains__['kernel'] == 'AIX':
elif 'AIX' in __grains__['kernel']:
_active_mounts_aix(ret)
elif __grains__['kernel'] == 'SunOS':
_active_mounts_solaris(ret)
@ -1044,7 +1056,7 @@ def mount(name, device, mkmnt=False, fstype='', opts='defaults', user=None, util
return False
# Darwin doesn't expect defaults when mounting without other options
if 'defaults' in opts and __grains__['os'] in ['MacOS', 'Darwin']:
if 'defaults' in opts and __grains__['os'] in ['MacOS', 'Darwin', 'AIX']:
opts = None
if isinstance(opts, six.string_types):
@ -1057,7 +1069,9 @@ def mount(name, device, mkmnt=False, fstype='', opts='defaults', user=None, util
if opts is not None:
lopts = ','.join(opts)
args = '-o {0}'.format(lopts)
if fstype:
# use of fstype on AIX is with /etc/filesystems
if fstype and 'AIX' not in __grains__['os']:
args += ' -t {0}'.format(fstype)
cmd = 'mount {0} {1} {2} '.format(args, device, name)
out = __salt__['cmd.run_all'](cmd, runas=user, python_shell=False)
@ -1084,6 +1098,10 @@ def remount(name, device, mkmnt=False, fstype='', opts='defaults', user=None):
if fstype == 'smbfs':
force_mount = True
if 'AIX' in __grains__['os']:
if opts == 'defaults':
opts = ''
if isinstance(opts, six.string_types):
opts = opts.split(',')
mnts = active()
@ -1096,7 +1114,9 @@ def remount(name, device, mkmnt=False, fstype='', opts='defaults', user=None):
umount(name, device, user=user)
lopts = ','.join(opts)
args = '-o {0}'.format(lopts)
if fstype:
# use of fstype on AIX is with /etc/filesystems
if fstype and 'AIX' not in __grains__['os']:
args += ' -t {0}'.format(fstype)
if __grains__['os'] not in ['OpenBSD', 'MacOS', 'Darwin'] or force_mount:
cmd = 'mount {0} {1} {2} '.format(args, device, name)
@ -1190,6 +1210,17 @@ def swaps():
'size': int(comps[3]),
'used': (int(comps[3]) - int(comps[4])),
'priority': '-'}
elif 'AIX' in __grains__['kernel']:
for line in __salt__['cmd.run_stdout']('swap -l').splitlines():
if line.startswith('device'):
continue
comps = line.split()
# AIX uses MB for units
ret[comps[0]] = {'type': 'device',
'size': int(comps[3][:-2]) * 1024,
'used': (int(comps[3][:-2]) - int(comps[4][:-2])) * 1024,
'priority': '-'}
elif __grains__['os'] != 'OpenBSD':
with salt.utils.files.fopen('/proc/swaps') as fp_:
for line in fp_:
@ -1242,7 +1273,7 @@ def swapon(name, priority=None):
return False
else:
cmd = 'swapon {0}'.format(name)
if priority:
if priority and 'AIX' not in __grains__['kernel']:
cmd += ' -p {0}'.format(priority)
__salt__['cmd.run'](cmd, python_shell=False)

View File

@ -137,7 +137,9 @@ def db_create(database, containment='NONE', new_database_options=None, **kwargs)
new_database_options can only be a list of strings
CLI Example:
.. code-block:: bash
salt minion mssql.db_create DB_NAME
'''
if containment not in ['NONE', 'PARTIAL']:
@ -299,18 +301,22 @@ def login_exists(login, domain='', **kwargs):
def login_create(login, new_login_password=None, new_login_domain='', new_login_roles=None, new_login_options=None, **kwargs):
'''
Creates a new login.
Does not update password of existing logins.
For Windows authentication, provide new_login_domain.
For SQL Server authentication, prvide new_login_password.
Since hashed passwords are varbinary values, if the
new_login_password is 'int / long', it will be considered
to be HASHED.
new_login_roles can only be a list of SERVER roles
new_login_options can only be a list of strings
Creates a new login. Does not update password of existing logins. For
Windows authentication, provide ``new_login_domain``. For SQL Server
authentication, prvide ``new_login_password``. Since hashed passwords are
*varbinary* values, if the ``new_login_password`` is 'int / long', it will
be considered to be HASHED.
new_login_roles
a list of SERVER roles
new_login_options
a list of strings
CLI Example:
.. code-block:: bash
salt minion mssql.login_create LOGIN_NAME database=DBNAME [new_login_password=PASSWORD]
'''
# One and only one of password and domain should be specifies
@ -408,13 +414,14 @@ def user_list(**kwargs):
def user_create(username, login=None, domain='', database=None, roles=None, options=None, **kwargs):
'''
Creates a new user.
If login is not specified, the user will be created without a login.
domain, if provided, will be prepended to username.
Creates a new user. If login is not specified, the user will be created
without a login. domain, if provided, will be prepended to username.
options can only be a list of strings
CLI Example:
.. code-block:: bash
salt minion mssql.user_create USERNAME database=DBNAME
'''
if domain and not login:

View File

@ -35,6 +35,7 @@ Module to provide MySQL compatibility to salt.
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import hashlib
import time
import logging
import re
@ -202,6 +203,12 @@ def __virtual__():
return (False, 'The mysql execution module cannot be loaded: neither MySQLdb nor PyMySQL is available.')
def __mysql_hash_password(password):
_password = hashlib.sha1(password).digest()
_password = '*{0}'.format(hashlib.sha1(_password).hexdigest().upper())
return _password
def __check_table(name, table, **connection_args):
dbc = _connect(**connection_args)
if dbc is None:
@ -1203,6 +1210,7 @@ def user_exists(user,
salt '*' mysql.user_exists 'username' passwordless=True
salt '*' mysql.user_exists 'username' password_column='authentication_string'
'''
server_version = version(**connection_args)
dbc = _connect(**connection_args)
# Did we fail to connect with the user we are checking
# Its password might have previously change with the same command/state
@ -1234,8 +1242,14 @@ def user_exists(user,
else:
qry += ' AND ' + password_column + ' = \'\''
elif password:
qry += ' AND ' + password_column + ' = PASSWORD(%(password)s)'
args['password'] = six.text_type(password)
if salt.utils.versions.version_cmp(server_version, '8.0.11') <= 0:
# Hash the password before comparing
_password = __mysql_hash_password(password)
qry += ' AND ' + password_column + ' = %(password)s'
else:
_password = password
qry += ' AND ' + password_column + ' = PASSWORD(%(password)s)'
args['password'] = six.text_type(_password)
elif password_hash:
qry += ' AND ' + password_column + ' = %(password)s'
args['password'] = password_hash
@ -1333,6 +1347,7 @@ def user_create(user,
salt '*' mysql.user_create 'username' 'hostname' password_hash='hash'
salt '*' mysql.user_create 'username' 'hostname' allow_passwordless=True
'''
server_version = version(**connection_args)
if user_exists(user, host, **connection_args):
log.info('User \'%s\'@\'%s\' already exists', user, host)
return False
@ -1353,7 +1368,10 @@ def user_create(user,
qry += ' IDENTIFIED BY %(password)s'
args['password'] = six.text_type(password)
elif password_hash is not None:
qry += ' IDENTIFIED BY PASSWORD %(password)s'
if salt.utils.versions.version_cmp(server_version, '8.0.11') <= 0:
qry += ' IDENTIFIED BY %(password)s'
else:
qry += ' IDENTIFIED BY PASSWORD %(password)s'
args['password'] = password_hash
elif salt.utils.data.is_true(allow_passwordless):
if salt.utils.data.is_true(unix_socket):
@ -1433,9 +1451,13 @@ def user_chpass(user,
salt '*' mysql.user_chpass frank localhost password_hash='hash'
salt '*' mysql.user_chpass frank localhost allow_passwordless=True
'''
server_version = version(**connection_args)
args = {}
if password is not None:
password_sql = 'PASSWORD(%(password)s)'
if salt.utils.versions.version_cmp(server_version, '8.0.11') <= 0:
password_sql = '%(password)s'
else:
password_sql = 'PASSWORD(%(password)s)'
args['password'] = password
elif password_hash is not None:
password_sql = '%(password)s'

View File

@ -16,29 +16,7 @@ Configuration
The Namecheap username, API key and URL should be set in the minion configuration
file, or in the Pillar data.
* ``requests``
.. code-block:: bash
pip install requests
- As saltstack depends on ``requests`` this shouldn't be a problem
Prerequisite Configuration
--------------------------
- The namecheap username, api key and url should be set in a minion
configuration file or pillar
.. code-block:: yaml
namecheap.name: companyname
namecheap.key: a1b2c3d4e5f67a8b9c0d1e2f3
namecheap.client_ip: 162.155.30.172
#Real url
namecheap.url: https://api.namecheap.com/xml.response
#Sandbox url
#namecheap.url: https://api.sandbox.namecheap.xml.response
.. code-block:: yaml
namecheap.name: companyname
namecheap.key: a1b2c3d4e5f67a8b9c0d1e2f3

View File

@ -16,29 +16,7 @@ Configuration
The Namecheap username, API key and URL should be set in the minion configuration
file, or in the Pillar data.
* ``requests``
.. code-block:: bash
pip install requests
- As saltstack depends on ``requests`` this shouldn't be a problem
Prerequisite Configuration
--------------------------
- The namecheap username, api key and url should be set in a minion
configuration file or pillar
.. code-block:: yaml
namecheap.name: companyname
namecheap.key: a1b2c3d4e5f67a8b9c0d1e2f3
namecheap.client_ip: 162.155.30.172
#Real url
namecheap.url: https://api.namecheap.com/xml.response
#Sandbox url
#namecheap.url: https://api.sandbox.namecheap.xml.response
.. code-block:: yaml
namecheap.name: companyname
namecheap.key: a1b2c3d4e5f67a8b9c0d1e2f3

View File

@ -110,16 +110,23 @@ def network_create(auth=None, **kwargs):
'''
Create a network
Parameters:
Defaults: shared=False, admin_state_up=True, external=False,
provider=None, project_id=None
name
Name of the network being created
name (string): Name of the network being created.
shared (bool): Set the network as shared.
admin_state_up (bool): Set the network administrative state to up.
external (bool): Whether this network is externally accessible.
provider (dict): A dict of network provider options.
project_id (string): Specify the project ID this network will be created on.
shared : False
If ``True``, set the network as shared
admin_state_up : True
If ``True``, Set the network administrative state to "up"
external : False
Control whether or not this network is externally accessible
provider
An optional Python dictionary of network provider options
project_id
The project ID on which this network will be created
CLI Example:
@ -144,16 +151,15 @@ def network_delete(auth=None, **kwargs):
'''
Delete a network
Parameters:
name: Name or ID of the network being deleted.
name_or_id
Name or ID of the network being deleted
CLI Example:
.. code-block:: bash
salt '*' neutronng.network_delete name=network1
salt '*' neutronng.network_delete \
name=1dcac318a83b4610b7a7f7ba01465548
salt '*' neutronng.network_delete name_or_id=network1
salt '*' neutronng.network_delete name_or_id=1dcac318a83b4610b7a7f7ba01465548
'''
cloud = get_operator_cloud(auth)
@ -165,10 +171,8 @@ def list_networks(auth=None, **kwargs):
'''
List networks
Parameters:
Defaults: filters=None
filters (dict): dict of filter conditions to push down
filters
A Python dictionary of filter conditions to push down
CLI Example:
@ -188,10 +192,8 @@ def network_get(auth=None, **kwargs):
'''
Get a single network
Parameters:
Defaults: filters=None
filters (dict): dict of filter conditions to push down
filters
A Python dictionary of filter conditions to push down
CLI Example:
@ -209,18 +211,57 @@ def subnet_create(auth=None, **kwargs):
'''
Create a subnet
Parameters:
Defaults: cidr=None, ip_version=4, enable_dhcp=False, subnet_name=None,
tenant_id=None, allocation_pools=None, gateway_ip=None,
disable_gateway_ip=False, dns_nameservers=None, host_routes=None,
ipv6_ra_mode=None, ipv6_address_mode=None,
use_default_subnetpool=False
network_name_or_id
The unique name or ID of the attached network. If a non-unique name is
supplied, an exception is raised.
allocation_pools:
A list of dictionaries of the start and end addresses for allocation pools.
cidr
The CIDR
dns_nameservers: A list of DNS name servers for the subnet.
host_routes: A list of host route dictionaries for the subnet.
ip_version
The IP version, which is 4 or 6.
enable_dhcp : False
Set to ``True`` if DHCP is enabled and ``False`` if disabled
subnet_name
The name of the subnet
tenant_id
The ID of the tenant who owns the network. Only administrative users
can specify a tenant ID other than their own.
allocation_pools
A list of dictionaries of the start and end addresses for the
allocation pools.
gateway_ip
The gateway IP address. When you specify both ``allocation_pools`` and
``gateway_ip``, you must ensure that the gateway IP does not overlap
with the specified allocation pools.
disable_gateway_ip : False
Set to ``True`` if gateway IP address is disabled and ``False`` if
enabled. It is not allowed with ``gateway_ip``.
dns_nameservers
A list of DNS name servers for the subnet
host_routes
A list of host route dictionaries for the subnet
ipv6_ra_mode
IPv6 Router Advertisement mode. Valid values are ``dhcpv6-stateful``,
``dhcpv6-stateless``, or ``slaac``.
ipv6_address_mode
IPv6 address mode. Valid values are ``dhcpv6-stateful``,
``dhcpv6-stateless``, or ``slaac``.
use_default_subnetpool
If ``True``, use the default subnetpool for ``ip_version`` to obtain a
CIDR. It is required to pass ``None`` to the ``cidr`` argument when
enabling this option.
CLI Example:
@ -248,19 +289,38 @@ def subnet_update(auth=None, **kwargs):
'''
Update a subnet
Parameters:
Defaults: subnet_name=None, enable_dhcp=None, gateway_ip=None,\
disable_gateway_ip=None, allocation_pools=None, \
dns_nameservers=None, host_routes=None
name_or_id
Name or ID of the subnet to update
name: Name or ID of the subnet to update.
subnet_name: The new name of the subnet.
subnet_name
The new name of the subnet
enable_dhcp
Set to ``True`` if DHCP is enabled and ``False`` if disabled
gateway_ip
The gateway IP address. When you specify both allocation_pools and
gateway_ip, you must ensure that the gateway IP does not overlap with
the specified allocation pools.
disable_gateway_ip : False
Set to ``True`` if gateway IP address is disabled and False if enabled.
It is not allowed with ``gateway_ip``.
allocation_pools
A list of dictionaries of the start and end addresses for the
allocation pools.
dns_nameservers
A list of DNS name servers for the subnet
host_routes
A list of host route dictionaries for the subnet
.. code-block:: bash
salt '*' neutronng.subnet_update name=subnet1 subnet_name=subnet2
salt '*' neutronng.subnet_update name=subnet1\
dns_nameservers='["8.8.8.8", "8.8.8.7"]'
salt '*' neutronng.subnet_update name=subnet1 dns_nameservers='["8.8.8.8", "8.8.8.7"]'
'''
cloud = get_operator_cloud(auth)
@ -272,8 +332,8 @@ def subnet_delete(auth=None, **kwargs):
'''
Delete a subnet
Parameters:
name: Name or ID of the subnet to update.
name
Name or ID of the subnet to update
CLI Example:
@ -293,10 +353,8 @@ def list_subnets(auth=None, **kwargs):
'''
List subnets
Parameters:
Defaults: filters=None
filters (dict): dict of filter conditions to push down
filters
A Python dictionary of filter conditions to push down
CLI Example:
@ -316,10 +374,8 @@ def subnet_get(auth=None, **kwargs):
'''
Get a single subnet
Parameters:
Defaults: filters=None
filters (dict): dict of filter conditions to push down
filters
A Python dictionary of filter conditions to push down
CLI Example:
@ -337,8 +393,8 @@ def security_group_create(auth=None, **kwargs):
'''
Create a security group. Use security_group_get to create default.
Parameters:
Defaults: project_id=None
project_id
The project ID on which this security group will be created
CLI Example:
@ -360,9 +416,14 @@ def security_group_update(secgroup=None, auth=None, **kwargs):
'''
Update a security group
secgroup: Name, ID or Raw Object of the security group to update.
name: New name for the security group.
description: New description for the security group.
secgroup
Name, ID or Raw Object of the security group to update
name
New name for the security group
description
New description for the security group
CLI Example:
@ -384,14 +445,14 @@ def security_group_delete(auth=None, **kwargs):
'''
Delete a security group
Parameters:
name: The name or unique ID of the security group.
name_or_id
The name or unique ID of the security group
CLI Example:
.. code-block:: bash
salt '*' neutronng.security_group_delete name=secgroup1
salt '*' neutronng.security_group_delete name_or_id=secgroup1
'''
cloud = get_operator_cloud(auth)
@ -404,10 +465,8 @@ def security_group_get(auth=None, **kwargs):
Get a single security group. This will create a default security group
if one does not exist yet for a particular project id.
Parameters:
Defaults: filters=None
filters (dict): dict of filter conditions to push down
filters
A Python dictionary of filter conditions to push down
CLI Example:
@ -429,15 +488,48 @@ def security_group_rule_create(auth=None, **kwargs):
'''
Create a rule in a security group
Parameters:
Defaults: port_range_min=None, port_range_max=None, protocol=None,
remote_ip_prefix=None, remote_group_id=None, direction='ingress',
ethertype='IPv4', project_id=None
secgroup_name_or_id
The security group name or ID to associate with this security group
rule. If a non-unique group name is given, an exception is raised.
secgroup_name_or_id:
This is the Name or Id of security group you want to create a rule in.
However, it throws errors on non-unique security group names like
'default' even when you supply a project_id
port_range_min
The minimum port number in the range that is matched by the security
group rule. If the protocol is TCP or UDP, this value must be less than
or equal to the port_range_max attribute value. If nova is used by the
cloud provider for security groups, then a value of None will be
transformed to -1.
port_range_max
The maximum port number in the range that is matched by the security
group rule. The port_range_min attribute constrains the port_range_max
attribute. If nova is used by the cloud provider for security groups,
then a value of None will be transformed to -1.
protocol
The protocol that is matched by the security group rule. Valid values
are ``None``, ``tcp``, ``udp``, and ``icmp``.
remote_ip_prefix
The remote IP prefix to be associated with this security group rule.
This attribute matches the specified IP prefix as the source IP address
of the IP packet.
remote_group_id
The remote group ID to be associated with this security group rule
direction
Either ``ingress`` or ``egress``; the direction in which the security
group rule is applied. For a compute instance, an ingress security
group rule is applied to incoming (ingress) traffic for that instance.
An egress rule is applied to traffic leaving the instance
ethertype
Must be IPv4 or IPv6, and addresses represented in CIDR must match the
ingress or egress rules
project_id
Specify the project ID this security group will be created on
(admin-only)
CLI Example:
@ -464,15 +556,14 @@ def security_group_rule_delete(auth=None, **kwargs):
'''
Delete a security group
Parameters:
rule_id (string): The unique ID of the security group rule.
name_or_id
The unique ID of the security group rule
CLI Example:
.. code-block:: bash
salt '*' neutronng.security_group_rule_delete\
rule_id=1dcac318a83b4610b7a7f7ba01465548
salt '*' neutronng.security_group_rule_delete name_or_id=1dcac318a83b4610b7a7f7ba01465548
'''
cloud = get_operator_cloud(auth)

View File

@ -47,7 +47,7 @@ def out_format(data, out='nested', opts=None, **kwargs):
opts
Dictionary of configuration options. Default: ``__opts__``.
**kwargs
kwargs
Arguments to sent to the outputter module.
CLI Example:
@ -74,7 +74,7 @@ def string_format(data, out='nested', opts=None, **kwargs):
opts
Dictionary of configuration options. Default: ``__opts__``.
**kwargs
kwargs
Arguments to sent to the outputter module.
CLI Example:
@ -101,7 +101,7 @@ def html_format(data, out='nested', opts=None, **kwargs):
opts
Dictionary of configuration options. Default: ``__opts__``.
**kwargs
kwargs
Arguments to sent to the outputter module.
CLI Example:

View File

@ -40,10 +40,10 @@ def get(key,
'''
.. versionadded:: 0.14
Attempt to retrieve the named value from pillar, if the named value is not
available return the passed default. The default return is an empty string
except ``__opts__['pillar_raise_on_missing']`` is set to True, in which
case a ``KeyError`` exception will be raised.
Attempt to retrieve the named value from :ref:`in-memory pillar data
<pillar-in-memory>`. If the pillar key is not present in the in-memory
pillar, then the value specified in the ``default`` option (described
below) will be returned.
If the merge parameter is set to ``True``, the default will be recursively
merged into the returned pillar data.
@ -62,8 +62,12 @@ def get(key,
The pillar key to get value from
default
If specified, return this value in case when named pillar value does
not exist.
The value specified by this option will be returned if the desired
pillar key does not exist.
If a default value is specified, then it will be an empty string,
unless :conf_minion:`pillar_raise_on_missing` is set to ``True``, in
which case an error will be raised.
merge : ``False``
If ``True``, the retrieved values will be merged into the passed

View File

@ -172,20 +172,7 @@ def _get_pip_bin(bin_env):
# If the python binary was passed, return it
if 'python' in os.path.basename(bin_env):
return [os.path.normpath(bin_env), '-m', 'pip']
# Try to find the python binary based on the location of pip in a
# virtual environment, should be relative
if 'pip' in os.path.basename(bin_env):
# Look in the same directory as the pip binary, and also its
# parent directories.
pip_dirname = os.path.dirname(bin_env)
pip_parent_dir = os.path.dirname(pip_dirname)
for bin_path in _search_paths(pip_dirname, pip_parent_dir):
if os.path.isfile(bin_path):
logger.debug('pip: Found python binary: %s', bin_path)
return [os.path.normpath(bin_path), '-m', 'pip']
# Couldn't find python, use the passed pip binary
# This has the limitation of being unable to update pip itself
# We have been passed a pip binary, use the pip binary.
return [os.path.normpath(bin_env)]
raise CommandExecutionError(
@ -465,6 +452,13 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
``/usr/bin/pip-2.7`` or ``/usr/bin/pip-2.6``. If a directory path is
specified, it is assumed to be a virtualenv.
.. note::
For Windows, if the pip module is being used to upgrade the pip
package, bin_env should be the path to the virtualenv or to the
python binary that should be used. The pip command is unable to
upgrade itself in Windows.
use_wheel
Prefer wheel archives (requires pip>=1.4)

View File

@ -293,9 +293,10 @@ def set_main(key, value, path=MAIN_CF):
pairs, conf_list = _parse_main(path)
new_conf = []
key_line_match = re.compile("^{0}([\\s=]|$)".format(re.escape(key)))
if key in pairs:
for line in conf_list:
if line.startswith(key):
if re.match(key_line_match, line):
new_conf.append('{0} = {1}'.format(key, value))
else:
new_conf.append(line)

View File

@ -2,22 +2,20 @@
r'''
Manage the Windows registry
-----
Hives
-----
Hives are the main sections of the registry and all begin with the word HKEY.
- HKEY_LOCAL_MACHINE
- HKEY_CURRENT_USER
- HKEY_USER
- HKEY_LOCAL_MACHINE
- HKEY_CURRENT_USER
- HKEY_USER
----
Keys
----
Keys are the folders in the registry. Keys can have many nested subkeys. Keys
can have a value assigned to them under the (Default)
-----------------
Values or Entries
-----------------
@ -25,7 +23,6 @@ Values or Entries are the name/data pairs beneath the keys and subkeys. All keys
have a default name/data pair. The name is ``(Default)`` with a displayed value
of ``(value not set)``. The actual value is Null.
-------
Example
-------

View File

@ -9,6 +9,7 @@ import logging
import os
import re
import datetime
from salt.utils.versions import LooseVersion
# Import Salt libs
import salt.utils.decorators.path
@ -604,7 +605,7 @@ def info(*packages, **attr):
# pick only latest versions
# (in case multiple packages installed, e.g. kernel)
ret = dict()
for pkg_data in reversed(sorted(_ret, key=lambda x: x['edition'])):
for pkg_data in reversed(sorted(_ret, key=lambda x: LooseVersion(x['edition']))):
pkg_name = pkg_data.pop('name')
# Filter out GPG public keys packages
if pkg_name.startswith('gpg-pubkey'):

View File

@ -213,7 +213,7 @@ def make_src_pkg(dest_dir, spec, sources, env=None, template=None, saltenv='base
runas
The user to run the build process as
.. versionadded:: 2018.3.2
.. versionadded:: 2018.3.3
.. note::

View File

@ -2,65 +2,62 @@
'''
A module for testing the logic of states and highstates
Saltcheck provides unittest like functionality requiring only the knowledge of salt module execution and yaml.
In order to run state and highstate saltcheck tests a sub-folder of a state must be creaed and named "saltcheck-tests".
Tests for a state should be created in files ending in *.tst and placed in the saltcheck-tests folder.
Multiple tests can be created in a file.
Multiple *.tst files can be created in the saltcheck-tests folder.
Salt rendering is supported in test files e.g. yaml + jinja.
The "id" of a test works in the same manner as in salt state files.
They should be unique and descriptive.
Example file system layout:
/srv/salt/apache/
init.sls
config.sls
saltcheck-tests/
pkg_and_mods.tst
config.tst
Saltcheck Test Syntax:
Unique-ID:
module_and_function:
args:
kwargs:
assertion:
expected-return:
Example test 1:
echo-test-hello:
module_and_function: test.echo
args:
- "hello"
kwargs:
assertion: assertEqual
expected-return: 'hello'
:codeauthor: William Cannon <william.cannon@gmail.com>
:maturity: new
Saltcheck provides unittest like functionality requiring only the knowledge of
salt module execution and yaml.
In order to run state and highstate saltcheck tests a sub-folder of a state must
be created and named ``saltcheck-tests``.
Tests for a state should be created in files ending in ``*.tst`` and placed in
the ``saltcheck-tests`` folder.
Multiple tests can be created in a file. Multiple ``*.tst`` files can be
created in the ``saltcheck-tests`` folder. Salt rendering is supported in test
files (e.g. ``yaml + jinja``). The ``id`` of a test works in the same manner as
in salt state files. They should be unique and descriptive.
Example file system layout:
.. code-block: txt
/srv/salt/apache/
init.sls
config.sls
saltcheck-tests/
pkg_and_mods.tst
config.tst
Example:
.. code-block:: yaml
echo-test-hello:
module_and_function: test.echo
args:
- "hello"
kwargs:
assertion: assertEqual
expected-return: 'hello'
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
import logging
import os
import time
from json import loads, dumps
try:
import salt.utils.files
import salt.utils.path
import salt.utils.yaml
import salt.client
import salt.exceptions
from salt.ext import six
except ImportError:
pass
# Import Salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.yaml
import salt.client
import salt.exceptions
from salt.ext import six
log = logging.getLogger(__name__)
@ -81,6 +78,9 @@ def update_master_cache():
Can be automated by setting "auto_update_master_cache: True" in minion config
CLI Example:
.. code-block:: bash
salt '*' saltcheck.update_master_cache
'''
__salt__['cp.cache_master']()
@ -92,7 +92,11 @@ def run_test(**kwargs):
Execute one saltcheck test and return result
:param keyword arg test:
CLI Example::
CLI Example:
.. code-block:: bash
salt '*' saltcheck.run_test
test='{"module_and_function": "test.echo",
"assertion": "assertEqual",
@ -115,8 +119,11 @@ def run_state_tests(state):
:param str state: the name of a user defined state
CLI Example::
salt '*' saltcheck.run_state_tests postfix
CLI Example:
.. code-block:: bash
salt '*' saltcheck.run_state_tests postfix
'''
scheck = SaltCheck()
paths = scheck.get_state_search_path_list()
@ -157,8 +164,11 @@ def run_highstate_tests():
'''
Execute all tests for a salt highstate and return results
CLI Example::
salt '*' saltcheck.run_highstate_tests
CLI Example:
.. code-block:: bash
salt '*' saltcheck.run_highstate_tests
'''
scheck = SaltCheck()
paths = scheck.get_state_search_path_list()
@ -203,7 +213,9 @@ def run_highstate_tests():
def _render_file(file_path):
'''call the salt utility to render a file'''
'''
call the salt utility to render a file
'''
# salt-call slsutil.renderer /srv/salt/jinjatest/saltcheck-tests/test1.tst
rendered = __salt__['slsutil.renderer'](file_path)
log.info("rendered: %s", rendered)
@ -211,19 +223,25 @@ def _render_file(file_path):
def _is_valid_module(module):
'''return a list of all modules available on minion'''
'''
Return a list of all modules available on minion
'''
modules = __salt__['sys.list_modules']()
return bool(module in modules)
def _get_auto_update_cache_value():
'''return the config value of auto_update_master_cache'''
'''
Return the config value of auto_update_master_cache
'''
__salt__['config.get']('auto_update_master_cache')
return True
def _is_valid_function(module_name, function):
'''Determine if a function is valid for a module'''
'''
Determine if a function is valid for a module
'''
try:
functions = __salt__['sys.list_functions'](module_name)
except salt.exceptions.SaltException:
@ -232,7 +250,9 @@ def _is_valid_function(module_name, function):
def _get_top_states():
''' equivalent to a salt cli: salt web state.show_top'''
'''
Equivalent to a salt cli: salt web state.show_top
'''
alt_states = []
try:
returned = __salt__['state.show_top']()
@ -245,7 +265,9 @@ def _get_top_states():
def _get_state_sls(state):
''' equivalent to a salt cli: salt web state.show_low_sls STATE'''
'''
Equivalent to a salt cli: salt web state.show_low_sls STATE
'''
sls_list_state = []
try:
returned = __salt__['state.show_low_sls'](state)
@ -281,11 +303,14 @@ class SaltCheck(object):
update_master_cache()
def __is_valid_test(self, test_dict):
'''Determine if a test contains:
a test name,
a valid module and function,
a valid assertion,
an expected return value'''
'''
Determine if a test contains:
- a test name
- a valid module and function
- a valid assertion
- an expected return value
'''
tots = 0 # need total of >= 6 to be a valid test
m_and_f = test_dict.get('module_and_function', None)
assertion = test_dict.get('assertion', None)
@ -314,7 +339,9 @@ class SaltCheck(object):
fun,
args,
kwargs):
'''Generic call of salt Caller command'''
'''
Generic call of salt Caller command
'''
value = False
try:
if args and kwargs:
@ -332,7 +359,9 @@ class SaltCheck(object):
return value
def run_test(self, test_dict):
'''Run a single saltcheck test'''
'''
Run a single saltcheck test
'''
if self.__is_valid_test(test_dict):
mod_and_func = test_dict['module_and_function']
args = test_dict.get('args', None)
@ -516,8 +545,9 @@ class SaltCheck(object):
@staticmethod
def get_state_search_path_list():
'''For the state file system, return a
list of paths to search for states'''
'''
For the state file system, return a list of paths to search for states
'''
# state cache should be updated before running this method
search_list = []
cachedir = __opts__.get('cachedir', None)
@ -533,7 +563,7 @@ class SaltCheck(object):
class StateTestLoader(object):
'''
Class loads in test files for a state
e.g. state_dir/saltcheck-tests/[1.tst, 2.tst, 3.tst]
e.g. state_dir/saltcheck-tests/[1.tst, 2.tst, 3.tst]
'''
def __init__(self, search_paths):
@ -543,7 +573,9 @@ class StateTestLoader(object):
self.test_dict = {}
def load_test_suite(self):
'''load tests either from one file, or a set of files'''
'''
Load tests either from one file, or a set of files
'''
self.test_dict = {}
for myfile in self.test_files:
# self.load_file(myfile)
@ -578,7 +610,9 @@ class StateTestLoader(object):
return
def gather_files(self, filepath):
'''gather files for a test suite'''
'''
Gather files for a test suite
'''
self.test_files = []
log.info("gather_files: %s", time.time())
filepath = filepath + os.sep + 'saltcheck-tests'
@ -594,7 +628,9 @@ class StateTestLoader(object):
@staticmethod
def convert_sls_to_paths(sls_list):
'''Converting sls to paths'''
'''
Converting sls to paths
'''
new_sls_list = []
for sls in sls_list:
sls = sls.replace(".", os.sep)
@ -603,12 +639,16 @@ class StateTestLoader(object):
@staticmethod
def convert_sls_to_path(sls):
'''Converting sls to paths'''
'''
Converting sls to paths
'''
sls = sls.replace(".", os.sep)
return sls
def add_test_files_for_sls(self, sls_path):
'''Adding test files'''
'''
Adding test files
'''
for path in self.search_paths:
full_path = path + os.sep + sls_path
rootdir = full_path

View File

@ -1991,9 +1991,9 @@ def pkg(pkg_path,
# Verify that the tarball does not extract outside of the intended root
members = s_pkg.getmembers()
for member in members:
if member.path.startswith((os.sep, '..{0}'.format(os.sep))):
if salt.utils.stringutils.to_unicode(member.path).startswith((os.sep, '..{0}'.format(os.sep))):
return {}
elif '..{0}'.format(os.sep) in member.path:
elif '..{0}'.format(os.sep) in salt.utils.stringutils.to_unicode(member.path):
return {}
s_pkg.extractall(root)
s_pkg.close()

View File

@ -16,7 +16,6 @@ import types
log = logging.getLogger(__name__)
from salt.ext import six
try:
import testinfra
from testinfra import modules
@ -218,7 +217,7 @@ def _copy_function(module_name, name=None):
comparison: eq
```
"""
log.debug('Generating function for %s module', module_name)
log.debug('Generating function for testinfra.%s', module_name)
def _run_tests(name, **methods):
success = True
@ -278,9 +277,15 @@ def _copy_function(module_name, name=None):
))
return success, pass_msgs, fail_msgs
func = _run_tests
if name is not None:
# types.FunctionType requires a str for __name__ attribute, using a
# unicode type will result in a TypeError.
name = str(name) # future lint: disable=blacklisted-function
else:
name = func.__name__
return types.FunctionType(func.__code__,
func.__globals__,
name or func.__name__,
name,
func.__defaults__,
func.__closure__)
@ -297,7 +302,7 @@ def _register_functions():
modules_ = [module_ for module_ in modules.modules]
for mod_name in modules_:
mod_func = _copy_function(mod_name, six.text_type(mod_name))
mod_func = _copy_function(mod_name, mod_name)
mod_func.__doc__ = _build_doc(mod_name)
__all__.append(mod_name)
globals()[mod_name] = mod_func

View File

@ -495,61 +495,67 @@ def set_hwclock(clock):
salt '*' timezone.set_hwclock UTC
'''
os_family = __grains__['os_family']
if os_family in ('AIX', 'NILinuxRT'):
if clock.lower() != 'utc':
raise SaltInvocationError(
'UTC is the only permitted value'
)
return True
timezone = get_zone()
if 'Solaris' in __grains__['os_family']:
if clock.lower() not in ('localtime', 'utc'):
raise SaltInvocationError(
'localtime and UTC are the only permitted values'
)
if 'sparc' in __grains__['cpuarch']:
raise SaltInvocationError(
'UTC is the only choice for SPARC architecture'
)
cmd = ['rtc', '-z', 'GMT' if clock.lower() == 'utc' else timezone]
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
zonepath = '/usr/share/zoneinfo/{0}'.format(timezone)
if not os.path.exists(zonepath):
raise CommandExecutionError(
'Zone \'{0}\' does not exist'.format(zonepath)
)
os.unlink('/etc/localtime')
os.symlink(zonepath, '/etc/localtime')
if 'Arch' in __grains__['os_family']:
cmd = ['timezonectl', 'set-local-rtc',
if salt.utils.path.which('timedatectl'):
cmd = ['timedatectl', 'set-local-rtc',
'true' if clock == 'localtime' else 'false']
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
elif 'RedHat' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="{0}"'.format(timezone))
elif 'Suse' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^TIMEZONE=.*', 'TIMEZONE="{0}"'.format(timezone))
elif 'Debian' in __grains__['os_family']:
if clock == 'UTC':
__salt__['file.sed']('/etc/default/rcS', '^UTC=.*', 'UTC=yes')
elif clock == 'localtime':
__salt__['file.sed']('/etc/default/rcS', '^UTC=.*', 'UTC=no')
elif 'Gentoo' in __grains__['os_family']:
if clock not in ('UTC', 'localtime'):
raise SaltInvocationError(
'Only \'UTC\' and \'localtime\' are allowed'
else:
os_family = __grains__['os_family']
if os_family in ('AIX', 'NILinuxRT'):
if clock.lower() != 'utc':
raise SaltInvocationError(
'UTC is the only permitted value'
)
return True
timezone = get_zone()
if 'Solaris' in __grains__['os_family']:
if clock.lower() not in ('localtime', 'utc'):
raise SaltInvocationError(
'localtime and UTC are the only permitted values'
)
if 'sparc' in __grains__['cpuarch']:
raise SaltInvocationError(
'UTC is the only choice for SPARC architecture'
)
cmd = ['rtc', '-z', 'GMT' if clock.lower() == 'utc' else timezone]
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
zonepath = '/usr/share/zoneinfo/{0}'.format(timezone)
if not os.path.exists(zonepath):
raise CommandExecutionError(
'Zone \'{0}\' does not exist'.format(zonepath)
)
if clock == 'localtime':
clock = 'local'
__salt__['file.sed'](
'/etc/conf.d/hwclock', '^clock=.*', 'clock="{0}"'.format(clock))
os.unlink('/etc/localtime')
os.symlink(zonepath, '/etc/localtime')
if 'Arch' in __grains__['os_family']:
cmd = ['timezonectl', 'set-local-rtc',
'true' if clock == 'localtime' else 'false']
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
elif 'RedHat' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="{0}"'.format(timezone))
elif 'Suse' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^TIMEZONE=.*', 'TIMEZONE="{0}"'.format(timezone))
elif 'Debian' in __grains__['os_family']:
if clock == 'UTC':
__salt__['file.sed']('/etc/default/rcS', '^UTC=.*', 'UTC=yes')
elif clock == 'localtime':
__salt__['file.sed']('/etc/default/rcS', '^UTC=.*', 'UTC=no')
elif 'Gentoo' in __grains__['os_family']:
if clock not in ('UTC', 'localtime'):
raise SaltInvocationError(
'Only \'UTC\' and \'localtime\' are allowed'
)
if clock == 'localtime':
clock = 'local'
__salt__['file.sed'](
'/etc/conf.d/hwclock', '^clock=.*', 'clock="{0}"'.format(clock))
return True

View File

@ -9059,8 +9059,7 @@ def create_vm(vm_name, cpu, memory, image, version, datacenter, datastore,
.. code-block:: bash
salt vm_minion vsphere.create_vm vm_name=vmname \
cpu='{count: 2, nested: True}' ...
salt vm_minion vsphere.create_vm vm_name=vmname cpu='{count: 2, nested: True}' ...
vm_name
Name of the virtual machine
@ -9090,7 +9089,9 @@ def create_vm(vm_name, cpu, memory, image, version, datacenter, datastore,
devices
interfaces
.. code-block:: bash
interfaces:
adapter: 'Network adapter 1'
name: vlan100
@ -9103,7 +9104,9 @@ def create_vm(vm_name, cpu, memory, image, version, datacenter, datastore,
start_connected: True
disks
.. code-block:: bash
disks:
adapter: 'Hard disk 1'
size: 16
@ -9116,14 +9119,18 @@ def create_vm(vm_name, cpu, memory, image, version, datacenter, datastore,
filename: 'vm/mydisk.vmdk'
scsi_devices
.. code-block:: bash
scsi_devices:
controller: 'SCSI controller 0'
type: paravirtual
bus_sharing: no_sharing
serial_ports
.. code-block:: bash
serial_ports:
adapter: 'Serial port 1'
type: network
@ -9138,7 +9145,9 @@ def create_vm(vm_name, cpu, memory, image, version, datacenter, datastore,
yield: False
cd_drives
.. code-block:: bash
cd_drives:
adapter: 'CD/DVD drive 0'
controller: 'IDE 0'

Some files were not shown because too many files have changed in this diff Show More