Merge branch 'develop' into sd-test

This commit is contained in:
Nicole Thomas 2018-04-04 14:32:56 -04:00 committed by GitHub
commit 2ffc3fd472
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
284 changed files with 5988 additions and 6492 deletions

8
.github/CODEOWNERS vendored
View File

@ -52,6 +52,14 @@ salt/**/thin.py @saltstack/team-ssh
# Team State
salt/state.py @saltstack/team-state
# Team SUSE
salt/**/*btrfs* @saltstack/team-suse
salt/**/*kubernetes* @saltstack/team-suse
salt/**/*pkg* @saltstack/team-suse
salt/**/*snapper* @saltstack/team-suse
salt/**/*xfs* @saltstack/team-suse
salt/**/*zypper* @saltstack/team-suse
# Team Transport
salt/transport/ @saltstack/team-transport
salt/utils/zeromq.py @saltstack/team-transport

4
.github/stale.yml vendored
View File

@ -1,8 +1,8 @@
# Probot Stale configuration file
# Number of days of inactivity before an issue becomes stale
# 780 is approximately 2 years and 2 months
daysUntilStale: 780
# 760 is approximately 2 years and 1 month
daysUntilStale: 760
# Number of days of inactivity before a stale issue is closed
daysUntilClose: 7

View File

@ -1,6 +1,6 @@
---
<% vagrant = system('which vagrant 2>/dev/null >/dev/null') %>
<% version = '2017.7.1' %>
<% version = '2017.7.4' %>
<% platformsfile = ENV['SALT_KITCHEN_PLATFORMS'] || '.kitchen/platforms.yml' %>
<% driverfile = ENV['SALT_KITCHEN_DRIVER'] || '.kitchen/driver.yml' %>
<% verifierfile = ENV['SALT_KITCHEN_VERIFIER'] || '.kitchen/verifier.yml' %>
@ -34,15 +34,16 @@ provisioner:
log_level: info
sudo: true
require_chef: false
retry_on_exit_code:
- 139
max_retries: 2
remote_states:
name: git://github.com/saltstack/salt-jenkins.git
branch: oxygen
branch: 2018.3
repo: git
testingdir: /testing
salt_copy_filter:
- .bundle
- .git
- .gitignore
- .kitchen
- .kitchen.yml
- Gemfile
@ -56,6 +57,20 @@ provisioner:
- prep_windows
"*":
- git.salt
pillars:
top.sls:
base:
"*":
- jenkins
"os:Windows":
- match: grain
- windows
jenkins.sls:
testing_dir: "{{salt.config.get('root_dir')|replace('\\', '\\\\')}}/testing"
clone_repo: false
salttesting_namespec: salttesting==2017.6.1
windows.sls:
virtualenv_path: 'c:\Python27\Scripts\pip.exe'
<% if File.exists?(platformsfile) %>
<%= ERB.new(File.read(platformsfile)).result %>
<% else %>
@ -119,29 +134,6 @@ platforms:
provisioner:
salt_bootstrap_options: -X -p rsync git v<%= version %> >/dev/null
<% if vagrant != false %>
- name: windows-2012r2
driver:
box: mwrock/Windows2012R2
name: vagrant
gui: true
customize:
cpus: 4
memory: 8192
transport:
name: winrm
username: Vagrant
password: vagrant
provisioner:
salt_bootstrap_url: https://raw.githubusercontent.com/saltstack/salt-bootstrap/develop/bootstrap-salt.ps1
salt_bootstrap_options: -version <%= version %>
verifier:
windows: true
types:
- unit
coverage_xml: false
save:
$env:TEMP/salt-runtests.log: artifacts/logs/salt-runtests.log
/salt/var/log/salt/minion: artifacts/logs/minion
- name: windows-2016
driver:
box: mwrock/Windows2016
@ -157,6 +149,13 @@ platforms:
provisioner:
salt_bootstrap_url: https://raw.githubusercontent.com/saltstack/salt-bootstrap/develop/bootstrap-salt.ps1
salt_bootstrap_options: -version <%= version %>
init_environment: |
Clear-Host
$AddedLocation ="c:\salt;c:\salt\bin\Scripts"
$Reg = "Registry::HKLM\System\CurrentControlSet\Control\Session Manager\Environment"
$OldPath = (Get-ItemProperty -Path $Reg -Name PATH).Path
$NewPath= $OldPath + ";" + $AddedLocation
Set-ItemProperty -Path $Reg -Value $NewPath -Name PATH
verifier:
windows: true
types:
@ -171,21 +170,6 @@ suites:
- name: py2
verifier:
python_bin: python2.7
provisioner:
pillars:
top.sls:
base:
"*":
- jenkins
"os:Windows":
- match: grain
- windows
jenkins.sls:
testing_dir: "{{salt.config.get('root_dir')|replace('\\', '\\\\')}}/testing"
clone_repo: false
salttesting_namespec: salttesting==2017.6.1
windows.sls:
virtualenv_path: 'c:\Python27\Scripts\pip.exe'
- name: py3
excludes:
- centos-6
@ -194,18 +178,8 @@ suites:
python_bin: python3
provisioner:
pillars:
top.sls:
base:
"*":
- jenkins
"os:Windows":
- match: grain
- windows
jenkins.sls:
testing_dir: "{{salt.config.get('root_dir')|replace('\\', '\\\\')}}/testing"
clone_repo: false
py3: true
salttesting_namespec: salttesting==2017.6.1
windows.sls:
virtualenv_path: 'c:\Python35\Scripts\pip.exe'

View File

@ -29,6 +29,25 @@ load-plugins=saltpylint.pep8,
# Don't bump this values on PyLint 1.4.0 - Know bug that ignores the passed --rcfile
jobs=1
# List of blacklisted functions and suggested replacements
#
# NOTE: This pylint check will infer the full name of the function by walking
# back up from the function name to the parent, to the parent's parent, etc.,
# and this means that functions which come from platform-specific modules need
# to be referenced using name of the module from which the function was
# imported. This happens a lot in the os and os.path modules. Functions from
# os.path should be defined using posixpath.funcname and ntpath.funcname, while
# functions from os should be defined using posix.funcname and nt.funcname.
#
# When defining a blacklisted function, the format is:
#
# <func_name>=<suggested_replacement>
#
# The replacement text will be included in the alert message.
#
blacklisted-functions=posix.umask=salt.utils.files.set_umask or get_umask,
nt.umask=salt.utils.files.set_umask or get_umask
# Allow loading of arbitrary C extensions. Extensions are imported into the
# active Python interpreter and may run arbitrary code.
unsafe-load-any-extension=no

View File

@ -2,7 +2,7 @@
source 'https://rubygems.org'
gem 'test-kitchen', :git => 'https://github.com/gtmanfred/test-kitchen.git'
gem 'test-kitchen', '~>1.20'
gem 'kitchen-salt', :git => 'https://github.com/saltstack/kitchen-salt.git'
gem 'kitchen-sync'
gem 'git'
@ -12,7 +12,7 @@ group :docker do
end
group :opennebula do
gem 'kitchen-opennebula', :git => 'https://github.com/gtmanfred/kitchen-opennebula.git'
gem 'kitchen-opennebula', '>=0.2.3'
gem 'xmlrpc'
end
@ -20,7 +20,7 @@ group :windows do
gem 'vagrant-wrapper'
gem 'kitchen-vagrant'
gem 'winrm', '~>2.0'
gem 'winrm-fs', :git => 'https://github.com/gtmanfred/winrm-fs.git'
gem 'winrm-fs', '>=1.1.1'
end
group :ec2 do

Binary file not shown.

Before

Width:  |  Height:  |  Size: 438 KiB

After

Width:  |  Height:  |  Size: 240 KiB

View File

@ -250,9 +250,9 @@ on_saltstack = 'SALT_ON_SALTSTACK' in os.environ
project = 'Salt'
version = salt.version.__version__
latest_release = '2017.7.4' # latest release
previous_release = '2016.11.9' # latest release from previous branch
previous_release_dir = '2016.11' # path on web server for previous branch
latest_release = '2018.3.0' # latest release
previous_release = '2017.7.5' # latest release from previous branch
previous_release_dir = '2017.7' # path on web server for previous branch
next_release = '' # next release
next_release_dir = '' # path on web server for next release branch
@ -341,10 +341,15 @@ rst_prolog = """\
| <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py3-AMD64-Setup.exe.md5"><strong>md5</strong></a></p>
.. |osxdownload| raw:: html
.. |osxdownloadpy2| raw:: html
<p>x86_64: <a href="https://repo.saltstack.com/osx/salt-{release}-x86_64.pkg"><strong>salt-{release}-x86_64.pkg</strong></a>
| <a href="https://repo.saltstack.com/osx/salt-{release}-x86_64.pkg.md5"><strong>md5</strong></a></p>
<p>x86_64: <a href="https://repo.saltstack.com/osx/salt-{release}-py2-x86_64.pkg"><strong>salt-{release}-py2-x86_64.pkg</strong></a>
| <a href="https://repo.saltstack.com/osx/salt-{release}-py2-x86_64.pkg.md5"><strong>md5</strong></a></p>
.. |osxdownloadpy3| raw:: html
<p>x86_64: <a href="https://repo.saltstack.com/osx/salt-{release}-py3-x86_64.pkg"><strong>salt-{release}-py3-x86_64.pkg</strong></a>
| <a href="https://repo.saltstack.com/osx/salt-{release}-py3-x86_64.pkg.md5"><strong>md5</strong></a></p>
""".format(release=release)
@ -365,7 +370,7 @@ gettext_compact = False
### HTML options
html_theme = 'saltstack2' #change to 'saltstack' to use previous theme
html_theme = os.environ.get('HTML_THEME', 'saltstack2') # set 'HTML_THEME=saltstack' to use previous theme
html_theme_path = ['_themes']
html_title = u''
html_short_title = 'Salt'

View File

@ -551,7 +551,7 @@ Default: ``0``
Memcache is an additional cache layer that keeps a limited amount of data
fetched from the minion data cache for a limited period of time in memory that
makes cache operations faster. It doesn't make much sence for the ``localfs``
makes cache operations faster. It doesn't make much sense for the ``localfs``
cache driver but helps for more complex drivers like ``consul``.
This option sets the memcache items expiration time. By default is set to ``0``

View File

@ -808,6 +808,35 @@ A value of 10 minutes is a reasonable default.
grains_refresh_every: 0
.. conf_minion:: fibre_channel_grains
``fibre_channel_grains``
------------------------
Default: ``False``
The ``fibre_channel_grains`` setting will enable the ``fc_wwn`` grain for
Fibre Channel WWN's on the minion. Since this grain is expensive, it is
disabled by default.
.. code-block:: yaml
fibre_channel_grains: True
.. conf_minion:: iscsi_grains
``iscsi_grains``
------------------------
Default: ``False``
The ``iscsi_grains`` setting will enable the ``iscsi_iqn`` grain on the
minion. Since this grain is expensive, it is disabled by default.
.. code-block:: yaml
iscsi_grains: True
.. conf_minion:: mine_enabled
``mine_enabled``

View File

@ -108,7 +108,7 @@ The frequency of keepalive checks, in minutes. It requires the
Default: ``True``
Wheter the proxy should maintain the connection with the remote
Whether the proxy should maintain the connection with the remote
device. Similarly to :conf_proxy:`proxy_keep_alive`, this option
is very specific to the design of the proxy module.
When :conf_proxy:`proxy_always_alive` is set to ``False``,
@ -126,7 +126,7 @@ has to be closed after every command.
Default: ``False``.
Wheter the pillar data to be merged into the proxy configuration options.
Whether the pillar data to be merged into the proxy configuration options.
As multiple proxies can run on the same server, we may need different
configuration options for each, while there's one single configuration file.
The solution is merging the pillar data of each proxy minion into the opts.

View File

@ -6,7 +6,7 @@ The Salt Fileserver and Client
Introduction
------------
Salt has a modular fileserver, and mulitple client classes which are used to
Salt has a modular fileserver, and multiple client classes which are used to
interact with it. This page serves as a developer's reference, to help explain
how the fileserver and clients both work.

View File

@ -334,6 +334,7 @@ execution modules
publish
puppet
purefa
purefb
pushbullet
pushover_notify
pw_group

View File

@ -0,0 +1,6 @@
===================
salt.modules.purefb
===================
.. automodule:: salt.modules.purefb
:members:

View File

@ -125,6 +125,10 @@ state modules
influxdb_retention_policy
influxdb_user
infoblox
infoblox_a
infoblox_cname
infoblox_host_record
infoblox_range
ini_manage
ipmi
ipset

View File

@ -0,0 +1,6 @@
salt.states.infoblox_a module
===========================
.. automodule:: salt.states.infoblox_a
:members:
:undoc-members:

View File

@ -0,0 +1,6 @@
salt.states.infoblox_cname module
===========================
.. automodule:: salt.states.infoblox_cname
:members:
:undoc-members:

View File

@ -0,0 +1,6 @@
salt.states.infoblox_host_record module
===========================
.. automodule:: salt.states.infoblox_host_record
:members:
:undoc-members:

View File

@ -0,0 +1,6 @@
salt.states.infoblox_range module
===========================
.. automodule:: salt.states.infoblox_range
:members:
:undoc-members:

View File

@ -12,7 +12,7 @@ option to your state declaration:
service.running:
- parallel: True
Now ``nginx`` will be started in a seperate process from the normal state run
Now ``nginx`` will be started in a separate process from the normal state run
and will therefore not block additional states.
Parallel States and Requisites

View File

@ -263,7 +263,7 @@ The use of ``require_any`` demands that one of the required states executes befo
dependent state. The state containing the ``require_any`` requisite is defined as the
dependent state. The states specified in the ``require_any`` statement are defined as the
required states. If at least one of the required state's execution succeeds, the dependent state
will then execute. If at least one of the required state's execution fails, the dependent state
will then execute. If all of the executions by the required states fail, the dependent state
will not execute.
.. code-block:: yaml

View File

@ -156,7 +156,7 @@ security_group
~~~~~~~~~~~~~~
.. versionadded:: next-release
You can specifiy a list of security groups (by name or id) that should be
You can specify a list of security groups (by name or id) that should be
assigned to the VM.
.. code-block:: yaml

View File

@ -344,7 +344,35 @@ be set in the configuration file to enable interfacing with GoGrid:
OpenStack
---------
.. automodule:: salt.cloud.clouds.openstack
Using Salt for OpenStack uses the `shade <https://docs.openstack.org/shade/latest/>` driver managed by the
openstack-infra team.
This driver can be configured using the ``/etc/openstack/clouds.yml`` file with
`os-client-config <https://docs.openstack.org/os-client-config/latest/>`
.. code-block:: yaml
myopenstack:
driver: openstack
region_name: RegionOne
cloud: mycloud
Or by just configuring the same auth block directly in the cloud provider config.
.. code-block:: yaml
myopenstack:
driver: openstack
region_name: RegionOne
auth:
username: 'demo'
password: secret
project_name: 'demo'
auth_url: 'http://openstack/identity'
Both of these methods support using the
`vendor <https://docs.openstack.org/os-client-config/latest/user/vendor-support.html>`
options.
For more information, look at :mod:`Openstack Cloud Driver Docs <salt.cloud.clouds.openstack>`
DigitalOcean
------------

View File

@ -38,7 +38,7 @@ Set up the provider cloud configuration file at ``/etc/salt/cloud.providers`` or
Profile Configuration
=====================
Linode profiles require a ``provider``, ``size``, ``image``, and ``location``. Set up an initial profile
at ``/etc/salt/cloud.profiles`` or in the ``/etc/salt/cloud.profiles.d/`` directory:
at ``/etc/salt/cloud.profiles`` or ``/etc/salt/cloud.profiles.d/*.conf``:
.. code-block:: yaml

View File

@ -0,0 +1,5 @@
==============================
Getting Started with Openstack
==============================
.. automodule:: salt.cloud.clouds.openstack

View File

@ -104,13 +104,18 @@ Here is an example of a profile:
profitbricks_production:
provider: my-profitbricks-config
image: Ubuntu-15.10-server-2016-05-01
image_password: MyPassword1
disk_type: SSD
disk_size: 40
cores: 8
cpu_family: INTEL_XEON
ram: 32768
public_lan: 1
public_ips:
- 172.217.18.174
private_lan: 2
private_ips:
- 192.168.100.10
public_firewall_rules:
Allow SSH:
protocol: TCP
@ -151,6 +156,13 @@ command:
# salt-cloud --list-sizes my-profitbricks-config
.. versionadded:: Fluorine
One or more public IP address can be reserved with the following command:
.. code-block:: bash
# salt-cloud -f reserve_ipblock my-profitbricks-config location='us/ewr' size=1
Profile Specifics:
------------------
@ -185,6 +197,14 @@ disk_type
This option allow the disk type to be set to HDD or SSD. The default is
HDD.
.. versionadded:: Fluorine
image_password
A password is set on the image for the "root" or "Administrator" account.
This field may only be set during volume creation. Only valid with
ProfitBricks supplied HDD (not ISO) images. The password must contain at
least 8 and no more than 50 characters. Only these characters are
allowed: [a-z][A-Z][0-9]
cores
This option allows you to override the number of CPU cores as defined by
the size.
@ -199,6 +219,10 @@ public_lan
LAN exists, then a new public LAN will be created. The value accepts a LAN
ID (integer).
.. versionadded:: Fluorine
public_ips
Public IPs assigned to the NIC in the public LAN.
public_firewall_rules
This option allows for a list of firewall rules assigned to the public
network interface.
@ -218,6 +242,10 @@ private_lan
LAN exists, then a new private LAN will be created. The value accepts a LAN
ID (integer).
.. versionadded:: Fluorine
private_ips
Private IPs assigned in the private LAN. NAT setting is ignored when this setting is active.
private_firewall_rules
This option allows for a list of firewall rules assigned to the private
network interface.

View File

@ -1,188 +0,0 @@
==============================
Getting Started With Rackspace
==============================
Rackspace is a major public cloud platform which may be configured using either
the `openstack` driver.
Dependencies
============
* Libcloud >= 0.13.2
Configuration
=============
To use the `openstack` driver (recommended), set up the cloud configuration at
``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/rackspace.conf``:
.. code-block:: yaml
my-rackspace-config:
# Set the location of the salt-master
#
minion:
master: saltmaster.example.com
# Configure Rackspace using the OpenStack plugin
#
identity_url: 'https://identity.api.rackspacecloud.com/v2.0/tokens'
compute_name: cloudServersOpenStack
protocol: ipv4
# Set the compute region:
#
compute_region: DFW
# Configure Rackspace authentication credentials
#
user: myname
tenant: 123456
apikey: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
driver: openstack
.. note::
.. versionchanged:: 2015.8.0
The ``provider`` parameter in cloud provider definitions was renamed to ``driver``. This
change was made to avoid confusion with the ``provider`` parameter that is used in cloud profile
definitions. Cloud provider definitions now use ``driver`` to refer to the Salt cloud module that
provides the underlying functionality to connect to a cloud host, while cloud profiles continue
to use ``provider`` to refer to provider configurations that you define.
Compute Region
==============
Rackspace currently has six compute regions which may be used:
.. code-block:: bash
DFW -> Dallas/Forth Worth
ORD -> Chicago
SYD -> Sydney
LON -> London
IAD -> Northern Virginia
HKG -> Hong Kong
Note: Currently the LON region is only available with a UK account, and UK accounts cannot access other regions
Authentication
==============
The ``user`` is the same user as is used to log into the Rackspace Control
Panel. The ``tenant`` and ``apikey`` can be found in the API Keys area of the
Control Panel. The ``apikey`` will be labeled as API Key (and may need to be
generated), and ``tenant`` will be labeled as Cloud Account Number.
An initial profile can be configured in ``/etc/salt/cloud.profiles`` or
``/etc/salt/cloud.profiles.d/rackspace.conf``:
.. code-block:: yaml
openstack_512:
provider: my-rackspace-config
size: 512 MB Standard
image: Ubuntu 12.04 LTS (Precise Pangolin)
To instantiate a machine based on this profile:
.. code-block:: bash
# salt-cloud -p openstack_512 myinstance
This will create a virtual machine at Rackspace with the name ``myinstance``.
This operation may take several minutes to complete, depending on the current
load at the Rackspace data center.
Once the instance has been created with salt-minion installed, connectivity to
it can be verified with Salt:
.. code-block:: bash
# salt myinstance test.ping
RackConnect Environments
------------------------
Rackspace offers a hybrid hosting configuration option called RackConnect that
allows you to use a physical firewall appliance with your cloud servers. When
this service is in use the public_ip assigned by nova will be replaced by a NAT
ip on the firewall. For salt-cloud to work properly it must use the newly
assigned "access ip" instead of the Nova assigned public ip. You can enable that
capability by adding this to your profiles:
.. code-block:: yaml
openstack_512:
provider: my-openstack-config
size: 512 MB Standard
image: Ubuntu 12.04 LTS (Precise Pangolin)
rackconnect: True
Managed Cloud Environments
--------------------------
Rackspace offers a managed service level of hosting. As part of the managed
service level you have the ability to choose from base of lamp installations on
cloud server images. The post build process for both the base and the lamp
installations used Chef to install things such as the cloud monitoring agent and
the cloud backup agent. It also takes care of installing the lamp stack if
selected. In order to prevent the post installation process from stomping over
the bootstrapping you can add the below to your profiles.
.. code-block:: yaml
openstack_512:
provider: my-rackspace-config
size: 512 MB Standard
image: Ubuntu 12.04 LTS (Precise Pangolin)
managedcloud: True
First and Next Generation Images
--------------------------------
Rackspace provides two sets of virtual machine images, *first*, and *next*
generation. As of ``0.8.9`` salt-cloud will default to using the *next*
generation images. To force the use of first generation images, on the profile
configuration please add:
.. code-block:: yaml
FreeBSD-9.0-512:
provider: my-rackspace-config
size: 512 MB Standard
image: FreeBSD 9.0
force_first_gen: True
Private Subnets
---------------
By default salt-cloud will not add Rackspace private networks to new servers. To enable
a private network to a server instantiated by salt cloud, add the following section
to the provider file (typically ``/etc/salt/cloud.providers.d/rackspace.conf``)
.. code-block:: yaml
networks:
- fixed:
# This is the private network
- private-network-id
# This is Rackspace's "PublicNet"
- 00000000-0000-0000-0000-000000000000
# This is Rackspace's "ServiceNet"
- 11111111-1111-1111-1111-111111111111
To get the Rackspace private network ID, go to Networking, Networks and hover over the private network name.
The order of the networks in the above code block does not map to the order of the
ethernet devices on newly created servers. Public IP will always be first ( eth0 )
followed by servicenet ( eth1 ) and then private networks.
Enabling the private network per above gives the option of using the private subnet for
all master-minion communication, including the bootstrap install of salt-minion. To
enable the minion to use the private subnet, update the master: line in the minion:
section of the providers file. To configure the master to only listen on the private
subnet IP, update the interface: line in the /etc/salt/master file to be the private
subnet IP of the salt master.

View File

@ -436,7 +436,7 @@ external resource, like a cloud virtual machine. This decorator is not normally
used by developers outside of the Salt core team.
`@destructiveTest` -- Marks a test as potentially destructive. It will not be run
by the test runner unles the ``-run-destructive`` test is expressly passed.
by the test runner unless the ``-run-destructive`` test is expressly passed.
`@requires_network` -- Requires a network connection for the test to operate
successfully. If a network connection is not detected, the test will not run.

View File

@ -98,7 +98,7 @@ Mocking Loader Modules
Salt loader modules use a series of globally available dunder variables,
``__salt__``, ``__opts__``, ``__pillar__``, etc. To facilitate testing these
modules a mixin class was created, ``LoaderModuleMockMixin`` which can be found
in ``tests/support/mixins.py``. The reason for the existance of this class is
in ``tests/support/mixins.py``. The reason for the existence of this class is
because historiclly and because it was easier, one would add these dunder
variables directly on the imported module. This however, introduces unexpected
behavior when running the full test suite since those attributes would not be

View File

@ -8,7 +8,8 @@ Installation from the Official SaltStack Repository
===================================================
**Latest stable build from the selected branch**:
|osxdownload|
|osxdownloadpy2|
|osxdownloadpy3|
The output of ``md5 <salt pkg>`` should match the contents of the
corresponding md5 file.

View File

@ -17,4 +17,4 @@ For example, to install the develop version of salt:
.. note::
SaltStack does offer commerical support for Solaris which includes packages.
SaltStack does offer commercial support for Solaris which includes packages.

View File

@ -18,7 +18,7 @@ and others):
The major difference between these two mechanism is from where results are
returned (from the Salt Master or Salt Minion). Configuring either of these
options will also make the :py:mod:`Jobs Runner functions <salt.runners.jobs>`
to automatically query the remote stores for infomation.
to automatically query the remote stores for information.
External Job Cache - Minion-Side Returner
-----------------------------------------

View File

@ -88,6 +88,33 @@ by their ``os`` grain:
- match: grain
- servers
Pillar definitions can also take a keyword argument ``ignore_missing``.
When the value of ``ignore_missing`` is ``True``, all errors for missing
pillar files are ignored. The default value for ``ignore_missing`` is
``False``.
Here is an example using the ``ignore_missing`` keyword parameter to ignore
errors for missing pillar files:
.. code-block:: yaml
base:
'*':
- servers
- systems
- ignore_missing: True
Assuming that the pillar ``servers`` exists in the fileserver backend
and the pillar ``systems`` doesn't, all pillar data from ``servers``
pillar is delivered to minions and no error for the missing pillar
``systems`` is noted under the key ``_errors`` in the pillar data
delivered to minions.
Should the ``ignore_missing`` keyword parameter have the value ``False``,
an error for the missing pillar ``systems`` would produce the value
``Specified SLS 'servers' in environment 'base' is not available on the salt master``
under the key ``_errors`` in the pillar data delivered to minions.
``/srv/pillar/packages.sls``
.. code-block:: jinja
@ -168,6 +195,28 @@ And the actual pillar file at '/srv/pillar/common_pillar.sls':
context.
Dynamic Pillar Environments
===========================
If environment ``__env__`` is specified in :conf_master:`pillar_roots`, all
environments that are not explicitly specified in :conf_master:`pillar_roots`
will map to the directories from ``__env__``. This allows one to use dynamic
git branch based environments for state/pillar files with the same file-based
pillar applying to all environments. For example:
.. code-block:: yaml
pillar_roots:
__env__:
- /srv/pillar
ext_pillar:
- git:
- __env__ https://example.com/git-pillar.git
.. versionadded:: 2017.7.5,2018.3.1
Pillar Namespace Flattening
===========================
@ -409,7 +458,7 @@ module. This module includes several functions, each of them with their own
use. These functions include:
- :py:func:`pillar.item <salt.modules.pillar.item>` - Retrieves the value of
one or more keys from the :ref:`in-memory pillar datj <pillar-in-memory>`.
one or more keys from the :ref:`in-memory pillar data <pillar-in-memory>`.
- :py:func:`pillar.items <salt.modules.pillar.items>` - Compiles a fresh pillar
dictionary and returns it, leaving the :ref:`in-memory pillar data
<pillar-in-memory>` untouched. If pillar keys are passed to this function

View File

@ -68,7 +68,7 @@ and each event tag has a list of reactor SLS files to be run.
Reactor SLS files are similar to State and Pillar SLS files. They are by
default YAML + Jinja templates and are passed familiar context variables.
Click :ref:`here <reactor-jinja-context>` for more detailed information on the
variables availble in Jinja templating.
variables available in Jinja templating.
Here is the SLS for a simple reaction:
@ -178,7 +178,7 @@ The below two examples are equivalent:
| | fromrepo: updates |
+---------------------------------+-----------------------------+
This reaction would be equvalent to running the following Salt command:
This reaction would be equivalent to running the following Salt command:
.. code-block:: bash
@ -229,7 +229,7 @@ The below two examples are equivalent:
+-------------------------------------------------+-------------------------------------------------+
Assuming that the event tag is ``foo``, and the data passed to the event is
``{'bar': 'baz'}``, then this reaction is equvalent to running the following
``{'bar': 'baz'}``, then this reaction is equivalent to running the following
Salt command:
.. code-block:: bash
@ -294,7 +294,7 @@ The below two examples are equivalent:
| - name: /tmp/foo | - /tmp/foo |
+---------------------------------+---------------------------+
This reaction is equvalent to running the following Salt command:
This reaction is equivalent to running the following Salt command:
.. code-block:: bash

View File

@ -21,7 +21,7 @@ Minion Data Cache Fixes
Added Memcache booster for the minion data cache.
Memcache is an additional cache layer that keeps a limited amount of data
fetched from the minion data cache for a limited period of time in memory that
makes cache operations faster. It doesn't make much sence for the ``localfs``
makes cache operations faster. It doesn't make much sense for the ``localfs``
cache driver but helps for more complex drivers like ``consul``.
For more details see ``memcache_expire_seconds`` and other ``memcache_*``
options in the master config reverence.

View File

@ -518,7 +518,7 @@ Changes:
* ef8e3ef569 Update win_pki.py
- **PR** `#41557`_: (*dmurphy18*) Add symbolic link for salt-proxy service similar to other serivce files
- **PR** `#41557`_: (*dmurphy18*) Add symbolic link for salt-proxy service similar to other service files
@ *2017-06-06T17:13:52Z*
* 3335fcbc7d Merge pull request `#41557`_ from dmurphy18/fix-proxy-service
@ -753,7 +753,7 @@ Changes:
* 66ab1e5184 Re-adding neutron dependency check
* cce07eefc2 Updating Neutron module to suport KeystoneAuth
* cce07eefc2 Updating Neutron module to support KeystoneAuth
- **PR** `#41409`_: (*garethgreenaway*) Fixes to ipc transport
@ *2017-05-25T21:06:27Z*
@ -926,7 +926,7 @@ Changes:
- **ISSUE** `#41306`_: (*lomeroe*) win_lgpo does not properly pack group policy version number in gpt.ini
| refs: `#41319`_ `#41307`_
- **PR** `#41307`_: (*lomeroe*) properly pack/unpack the verison numbers into a number
- **PR** `#41307`_: (*lomeroe*) properly pack/unpack the version numbers into a number
| refs: `#41319`_
* 140b0427e1 Merge pull request `#41319`_ from lomeroe/bp_41307
* 4f0aa577a5 backport 41307 to 2016.11, properly pack version numbers into single number

View File

@ -632,7 +632,7 @@ Changes:
* 3072576 Merge pull request `#42629`_ from xiaoanyunfei/tornadoapi
* 1e13383 tornado api
- **PR** `#42655`_: (*whiteinge*) Reenable cpstats for rest_cherrypy
- **PR** `#42655`_: (*whiteinge*) Re-enable cpstats for rest_cherrypy
@ *2017-08-03T20:44:10Z*
- **PR** `#33806`_: (*cachedout*) Work around upstream cherrypy bug
@ -640,7 +640,7 @@ Changes:
* f0f00fc Merge pull request `#42655`_ from whiteinge/rest_cherrypy-reenable-stats
* deb6316 Fix lint errors
* 6bd91c8 Reenable cpstats for rest_cherrypy
* 6bd91c8 Re-enable cpstats for rest_cherrypy
- **PR** `#42693`_: (*gilbsgilbs*) Fix RabbitMQ tags not properly set.
@ *2017-08-03T20:23:08Z*
@ -847,11 +847,11 @@ Changes:
* 42bb1a6 Merge pull request `#42350`_ from twangboy/win_fix_ver_grains_2016.11
* 8c04840 Detect Server OS with a desktop release name
- **PR** `#42356`_: (*meaksh*) Allow to check whether a function is available on the AliasesLoader wrapper
- **PR** `#42356`_: (*meaksh*) Allow checking whether a function is available on the AliasesLoader wrapper
@ *2017-07-19T16:56:41Z*
* 0a72e56 Merge pull request `#42356`_ from meaksh/2016.11-AliasesLoader-wrapper-fix
* 915d942 Allow to check whether a function is available on the AliasesLoader wrapper
* 915d942 Allow checking whether a function is available on the AliasesLoader wrapper
- **PR** `#42368`_: (*twangboy*) Remove build and dist directories before install (2016.11)
@ *2017-07-19T16:47:28Z*
@ -1392,7 +1392,7 @@ Changes:
* 7f69613 test and lint fixes
* 8ee4843 Suppress output of crypt context and be more specifc with whitespace vs. serial
* 8ee4843 Suppress output of crypt context and be more specific with whitespace vs. serial
* 61f817d Match serials based on output position (fix for non-English languages)

View File

@ -29,7 +29,7 @@ Significate changes (PR #43708 & #45390, damon-atkins) have been made to the pkg
- ``pkg.install`` without a ``version`` parameter no longer upgrades software if the software is already installed. Use ``pkg.install version=latest`` or in a state use ``pkg.latest`` to get the old behavior.
- ``pkg.list_pkgs`` now returns multiple versions if software installed more than once.
- ``pkg.list_pkgs`` now returns 'Not Found' when the version is not found instead of '(value not set)' which matches the contents of the sls definitions.
- ``pkg.remove()`` will wait upto 3 seconds (normally about a second) to detect changes in the registry after removing software, improving reporting of version changes.
- ``pkg.remove()`` will wait up to 3 seconds (normally about a second) to detect changes in the registry after removing software, improving reporting of version changes.
- ``pkg.remove()`` can remove ``latest`` software, if ``latest`` is defined in sls definition.
- Documentation was update for the execution module to match the style in new versions, some corrections as well.
- All install/remove commands are prefix with cmd.exe shell and cmdmod is called with a command line string instead of a list. Some sls files in saltstack/salt-winrepo-ng expected the commands to be prefixed with cmd.exe (i.e. the use of ``&``).
@ -407,7 +407,7 @@ Windows cmdmod forcing cmd to be a list (issue #43522) resolved by "cmdmod: Don'
@ *2017-11-28T21:50:19Z*
* 998d714ee7 Merge pull request `#44517`_ from whytewolf/publish_port_doc_missing
* 4b5855283a missed one place where i didnt chanbge master_port from my copy to publish_port
* 4b5855283a missed one place where i didn't change master_port from my copy to publish_port
* e4610baea5 update doc to have publish port
@ -598,7 +598,7 @@ Windows cmdmod forcing cmd to be a list (issue #43522) resolved by "cmdmod: Don'
@ *2017-10-31T17:56:34Z*
* cab54e34b5 Merge pull request `#44173`_ from twangboy/win_system_docs
* 8e111b413d Fix some of the wording and grammer errors
* 8e111b413d Fix some of the wording and grammar errors
* a12bc5ae41 Use google style docstrings
@ -831,7 +831,7 @@ Windows cmdmod forcing cmd to be a list (issue #43522) resolved by "cmdmod: Don'
- **ISSUE** `#43581`_: (*jcourington*) cherrypy stats issue
| refs: `#44021`_
- **PR** `#42655`_: (*whiteinge*) Reenable cpstats for rest_cherrypy
- **PR** `#42655`_: (*whiteinge*) Re-enable cpstats for rest_cherrypy
| refs: `#44021`_
- **PR** `#33806`_: (*cachedout*) Work around upstream cherrypy bug
| refs: `#42655`_
@ -1001,13 +1001,13 @@ Windows cmdmod forcing cmd to be a list (issue #43522) resolved by "cmdmod: Don'
* ea8d273c2b Merge pull request `#43768`_ from vutny/fix-pylint-deprecation-warnings
* f8b3fa9da1 Merge branch '2016.11' into fix-pylint-deprecation-warnings
- **PR** `#43772`_: (*gtmanfred*) dont print Minion not responding with quiet
- **PR** `#43772`_: (*gtmanfred*) don't print Minion not responding with quiet
@ *2017-09-27T15:39:18Z*
- **ISSUE** `#40311`_: (*cralston0*) --hide-timeout used with --output json --static produces unparseable JSON
| refs: `#43772`_
* 1a8cc60bb4 Merge pull request `#43772`_ from gtmanfred/2016.11
* 0194c60960 dont print Minion not responding with quiet
* 0194c60960 don't print Minion not responding with quiet
- **PR** `#43747`_: (*rallytime*) Add GPG Verification section to Contributing Docs
@ *2017-09-26T21:25:37Z*

View File

@ -41,7 +41,7 @@ Salt's policy has always been that when upgrading, the minion should never be
on a newer version than the master. Specifically with this update, because of
changes in the fileclient, the 2017.7 minion requires a 2017.7 master.
Backwards compatiblity is still maintained, so older minions can still be used.
Backwards compatibility is still maintained, so older minions can still be used.
More information can be found in the :ref:`Salt FAQ<which-version>`
@ -54,7 +54,7 @@ The :py:func:`service.masked <salt.states.service.masked>` and
added to allow Salt to manage masking of systemd units.
Additionally, the following functions in the :mod:`systemd
<salt.modules.systemd>` execution module have changed to accomodate the fact
<salt.modules.systemd>` execution module have changed to accommodate the fact
that indefinite and runtime masks can co-exist for the same unit:
- :py:func:`service.masked <salt.modules.systemd.masked>` - The return from
@ -152,7 +152,7 @@ State Module Changes
In a rare case that you have a function that needs to be called several times but
with the different parameters, an additional feature of "tagging" is to the
rescue. In order to tag a function, use a colon delimeter. For example:
rescue. In order to tag a function, use a colon delimiter. For example:
.. code-block:: yaml
@ -281,7 +281,7 @@ Minion Configuration Additions
salt-api Changes
================
The ``rest_cherrypy`` netapi module has recieved a few minor improvements:
The ``rest_cherrypy`` netapi module has received a few minor improvements:
* A CORS bugfix.
* A new ``/token`` convenience endpoint to generate Salt eauth tokens.
@ -557,7 +557,7 @@ of objects (users, databases, roles, etc.).
.. note::
With the `Moby announcement`_ coming at this year's DockerCon_, Salt's
:mod:`docker <salt.modules.dockermod>` execution module (as well as the
state modules) work interchangably when **docker** is replaced with
state modules) work interchangeably when **docker** is replaced with
**moby** (e.g. :py:func:`moby_container.running
<salt.states.docker_container.running>`, :py:func:`moby_image.present
<salt.states.docker_image.present>`, :py:func:`moby.inspect_container

View File

@ -962,7 +962,7 @@ Changes
- **PR** `#42884`_: (*Giandom*) Convert to dict type the pillar string value passed from slack
@ *2017-08-16T22:30:43Z*
- **ISSUE** `#42842`_: (*Giandom*) retreive kwargs passed with slack engine
- **ISSUE** `#42842`_: (*Giandom*) retrieve kwargs passed with slack engine
| refs: `#42884`_
* 82be9dceb6 Merge pull request `#42884`_ from Giandom/2017.7.1-fix-slack-engine-pillar-args
* 80fd733c99 Update slack.py
@ -1235,13 +1235,13 @@ Changes
* 4ce96eb1a1 Merge pull request `#42778`_ from gtmanfred/spm
* 7ef691e8da make sure to use the correct out_file
- **PR** `#42857`_: (*gtmanfred*) use older name if _create_unverified_context is unvailable
- **PR** `#42857`_: (*gtmanfred*) use older name if _create_unverified_context is unavailable
@ *2017-08-11T13:37:59Z*
- **ISSUE** `#480`_: (*zyluo*) PEP8 types clean-up
| refs: `#42857`_
* 3d05d89e09 Merge pull request `#42857`_ from gtmanfred/vmware
* c1f673eca4 use older name if _create_unverified_context is unvailable
* c1f673eca4 use older name if _create_unverified_context is unavailable
- **PR** `#42866`_: (*twangboy*) Change to GitPython version 2.1.1
@ *2017-08-11T13:23:52Z*
@ -1448,7 +1448,7 @@ Changes
| refs: `#42574`_
- **PR** `#42693`_: (*gilbsgilbs*) Fix RabbitMQ tags not properly set.
- **PR** `#42669`_: (*garethgreenaway*) [2016.11] Fixes to augeas module
- **PR** `#42655`_: (*whiteinge*) Reenable cpstats for rest_cherrypy
- **PR** `#42655`_: (*whiteinge*) Re-enable cpstats for rest_cherrypy
- **PR** `#42629`_: (*xiaoanyunfei*) tornado api
- **PR** `#42623`_: (*terminalmage*) Fix unicode constructor in custom YAML loader
- **PR** `#42574`_: (*sbojarski*) Fixed error reporting in "boto_cfn.present" function.
@ -1469,7 +1469,7 @@ Changes
* deb6316d67 Fix lint errors
* 6bd91c8b03 Reenable cpstats for rest_cherrypy
* 6bd91c8b03 Re-enable cpstats for rest_cherrypy
* 21cf15f9c3 Merge pull request `#42693`_ from gilbsgilbs/fix-rabbitmq-tags
@ -2031,7 +2031,7 @@ Changes
- **PR** `#42368`_: (*twangboy*) Remove build and dist directories before install (2016.11)
- **PR** `#42360`_: (*Ch3LL*) [2016.11] Update version numbers in doc config for 2017.7.0 release
- **PR** `#42359`_: (*Ch3LL*) [2016.3] Update version numbers in doc config for 2017.7.0 release
- **PR** `#42356`_: (*meaksh*) Allow to check whether a function is available on the AliasesLoader wrapper
- **PR** `#42356`_: (*meaksh*) Allow checking whether a function is available on the AliasesLoader wrapper
- **PR** `#42352`_: (*CorvinM*) Multiple documentation fixes
- **PR** `#42350`_: (*twangboy*) Fixes problem with Version and OS Release related grains on certain versions of Python (2016.11)
- **PR** `#42319`_: (*rallytime*) Add more documentation for config options that are missing from master/minion docs
@ -2046,7 +2046,7 @@ Changes
* 0a72e56f6b Merge pull request `#42356`_ from meaksh/2016.11-AliasesLoader-wrapper-fix
* 915d94219e Allow to check whether a function is available on the AliasesLoader wrapper
* 915d94219e Allow checking whether a function is available on the AliasesLoader wrapper
* 10eb7b7a79 Merge pull request `#42368`_ from twangboy/win_fix_build_2016.11

View File

@ -30,7 +30,7 @@ Significate changes (PR #43708 & #45390, damon-atkins) have been made to the pkg
- ``pkg.install`` without a ``version`` parameter no longer upgrades software if the software is already installed. Use ``pkg.install version=latest`` or in a state use ``pkg.latest`` to get the old behavior.
- ``pkg.list_pkgs`` now returns multiple versions if software installed more than once.
- ``pkg.list_pkgs`` now returns 'Not Found' when the version is not found instead of '(value not set)' which matches the contents of the sls definitions.
- ``pkg.remove()`` will wait upto 3 seconds (normally about a second) to detect changes in the registry after removing software, improving reporting of version changes.
- ``pkg.remove()`` will wait up to 3 seconds (normally about a second) to detect changes in the registry after removing software, improving reporting of version changes.
- ``pkg.remove()`` can remove ``latest`` software, if ``latest`` is defined in sls definition.
- Documentation was update for the execution module to match the style in new versions, some corrections as well.
- All install/remove commands are prefix with cmd.exe shell and cmdmod is called with a command line string instead of a list. Some sls files in saltstack/salt-winrepo-ng expected the commands to be prefixed with cmd.exe (i.e. the use of ``&``).
@ -69,7 +69,7 @@ Windows cmdmod forcing cmd to be a list (issue #43522) resolved by "cmdmod: Don'
- **PR** `#45664`_: (*rallytime*) Back-port `#45452`_ to 2017.7.3
@ *2018-01-24T15:33:13Z*
- **PR** `#45452`_: (*adelcast*) opkg.py: make owner fuction return value, instead of iterator
- **PR** `#45452`_: (*adelcast*) opkg.py: make owner function return value, instead of iterator
| refs: `#45664`_
* 0717f7a578 Merge pull request `#45664`_ from rallytime/`bp-45452`_
* 369720677b opkg.py: make owner function return value, instead of iterator
@ -359,7 +359,7 @@ Windows cmdmod forcing cmd to be a list (issue #43522) resolved by "cmdmod: Don'
* 66da9b47bc Merge pull request `#45299`_ from garethgreenaway/config_gate_auth_events
* 9a15ec3430 Updating versionadded string. Fixing typo.
* edfc3dc078 Adding in documention for `auth_events` configuration option
* edfc3dc078 Adding in documentation for `auth_events` configuration option
* 3ee4eabffd Fixing small typo
@ -1007,7 +1007,7 @@ Windows cmdmod forcing cmd to be a list (issue #43522) resolved by "cmdmod: Don'
* 4b60b1ec84 Merge remote branch 'refs/remotes/upstream/2017.7' into 2017.7_replace_with_newer_2016.11_win_pkg
* b46f818a57 Raise a PR to fix 2016 issues commited here, fixed issues with merge.
* b46f818a57 Raise a PR to fix 2016 issues committed here, fixed issues with merge.
* 32ef1e12ae Merge branch '2017.7' into 2017.7_replace_with_newer_2016.11_win_pkg
@ -1362,7 +1362,7 @@ Windows cmdmod forcing cmd to be a list (issue #43522) resolved by "cmdmod: Don'
* 998d714ee7 Merge pull request `#44517`_ from whytewolf/publish_port_doc_missing
* 4b5855283a missed one place where i didnt chanbge master_port from my copy to publish_port
* 4b5855283a missed one place where i didn't change master_port from my copy to publish_port
* e4610baea5 update doc to have publish port
@ -1569,9 +1569,9 @@ Windows cmdmod forcing cmd to be a list (issue #43522) resolved by "cmdmod: Don'
* 3bb385b44e removing debugging logging
* 7f0ff5a8b0 When passing IDs on the command line convert them all the strings for later comparision.
* 7f0ff5a8b0 When passing IDs on the command line convert them all the strings for later comparison.
* 99e436add4 When looking for job ids to remove based on the tag_name the comparision was comparing an INT to a STR, so the correct job id was not being returned.
* 99e436add4 When looking for job ids to remove based on the tag_name the comparison was comparing an INT to a STR, so the correct job id was not being returned.
- **PR** `#44695`_: (*gtmanfred*) pop None for runas and runas_password
@ *2017-12-01T14:35:01Z*
@ -1714,7 +1714,7 @@ Windows cmdmod forcing cmd to be a list (issue #43522) resolved by "cmdmod: Don'
* 88ef9f18fc ignore lint error on import
* 25427d845e convert key iterator to list as python 3 wont index an iterator
* 25427d845e convert key iterator to list as python 3 won't index an iterator
* bce50154e5 Merge branch '2017.7' into improve-net-load
@ -1773,13 +1773,13 @@ Windows cmdmod forcing cmd to be a list (issue #43522) resolved by "cmdmod: Don'
* c6733ac1ee pop None
- **PR** `#44616`_: (*Ch3LL*) Add Non Base Environement salt:// source integration test
- **PR** `#44616`_: (*Ch3LL*) Add Non Base Environment salt:// source integration test
@ *2017-11-22T16:13:54Z*
* d6ccf4bb30 Merge pull request `#44616`_ from Ch3LL/nonbase_test
* 80b71652e3 Merge branch '2017.7' into nonbase_test
* c9ba33432e Add Non Base Environement salt:// source integration test
* c9ba33432e Add Non Base Environment salt:// source integration test
- **PR** `#44617`_: (*Ch3LL*) Add ssh thin_dir integration test
@ *2017-11-22T16:12:51Z*
@ -1896,7 +1896,7 @@ Windows cmdmod forcing cmd to be a list (issue #43522) resolved by "cmdmod: Don'
* 1643bb7fd4 Merge pull request `#44551`_ from cloudflare/annoying-tmpnam
* ce1882943d Use salt.utils.files.mkstemp() instead
* 6689bd3b2d Dont use dangerous os.tmpnam
* 6689bd3b2d Don't use dangerous os.tmpnam
* 2d6176b0bc Fx2 proxy minion: clean return, like all the other modules
@ -2151,7 +2151,7 @@ Windows cmdmod forcing cmd to be a list (issue #43522) resolved by "cmdmod: Don'
* cab54e34b5 Merge pull request `#44173`_ from twangboy/win_system_docs
* 8e111b413d Fix some of the wording and grammer errors
* 8e111b413d Fix some of the wording and grammar errors
* a12bc5ae41 Use google style docstrings
@ -2728,7 +2728,7 @@ Windows cmdmod forcing cmd to be a list (issue #43522) resolved by "cmdmod: Don'
@ *2017-10-17T15:24:19Z*
* 6252f82f58 Merge pull request `#44133`_ from cachedout/fix_paralell_docs
* 8d1c1e21f0 Fix typos in paralell states docs
* 8d1c1e21f0 Fix typos in parallel states docs
- **PR** `#44135`_: (*timfreund*) Insert missing verb in gitfs walkthrough
@ *2017-10-17T14:32:13Z*
@ -2814,7 +2814,7 @@ Windows cmdmod forcing cmd to be a list (issue #43522) resolved by "cmdmod: Don'
- **PR** `#44021`_: (*whiteinge*) Also catch cpstats AttributeError for bad CherryPy release ~5.6.0
- **PR** `#44010`_: (*Ch3LL*) Security Fixes for 2016.3.8
- **PR** `#43977`_: (*Ch3LL*) Add Security Notes to 2016.3.8 Release Notes
- **PR** `#42655`_: (*whiteinge*) Reenable cpstats for rest_cherrypy
- **PR** `#42655`_: (*whiteinge*) Re-enable cpstats for rest_cherrypy
| refs: `#44021`_
- **PR** `#33806`_: (*cachedout*) Work around upstream cherrypy bug
| refs: `#42655`_
@ -3286,7 +3286,7 @@ Windows cmdmod forcing cmd to be a list (issue #43522) resolved by "cmdmod: Don'
- **ISSUE** `#40311`_: (*cralston0*) --hide-timeout used with --output json --static produces unparseable JSON
| refs: `#43772`_
- **PR** `#43772`_: (*gtmanfred*) dont print Minion not responding with quiet
- **PR** `#43772`_: (*gtmanfred*) don't print Minion not responding with quiet
- **PR** `#43747`_: (*rallytime*) Add GPG Verification section to Contributing Docs
* 9615ca32d5 Merge pull request `#43773`_ from rallytime/merge-2017.7
* f7035ed7da Merge branch '2017.7' into merge-2017.7
@ -3295,7 +3295,7 @@ Windows cmdmod forcing cmd to be a list (issue #43522) resolved by "cmdmod: Don'
* 1a8cc60bb4 Merge pull request `#43772`_ from gtmanfred/2016.11
* 0194c60960 dont print Minion not responding with quiet
* 0194c60960 don't print Minion not responding with quiet
* 9dee896fb9 Merge pull request `#43747`_ from rallytime/gpg-verification

View File

@ -4,6 +4,22 @@
Salt 2018.3.0 Release Notes - Codename Oxygen
=============================================
Unicode/Python 3 Compatibility Improvements
-------------------------------------------
This release fixes a number of nagging issues with Unicode strings in Salt
under Python 2 (ex. ``'ascii' codec can't decode byte 0xd0``). For best
results, use a UTF-8 locale (such as by setting the ``LANG`` environment
variable to one which supports UTF-8. For example ``en_US.UTF-8``,
``de_DE.UTF-8``, ``ru_RU.UTF-8``, ``C.UTF-8``).
Additionally, a number of Python 3 compatibility fixes have been made, many of
them having to do with file I/O and str/bytes mismatches.
We continue to work toward improving both Unicode and Python 3 compatibility
and welcome any feedback.
Lots of Docker Improvements
---------------------------
@ -465,7 +481,7 @@ Configuration
By default, automatic discovery is disabled.
..warning::
.. warning::
Due to the current limitations that will be changing in a future, before you turn on auto-discovery,
make sure your network is secured and trusted.
@ -521,6 +537,8 @@ In addition to the ``mapping`` and ``port`` options, the following additional op
match a given Master. If set to ``any`` (the default), then any match to a
key/value mapping will constitute a match.
- ``pause`` - The interval in seconds between attempts (default: 5).
- ``fibre_channel_grains`` - Enables the ``fc_wwn`` grain. (Default: False)
- ``iscsi_grains`` - Enables the ``iscsi_iqn`` grain. (Default: False)
Connection to a type instead of DNS
===================================
@ -665,6 +683,37 @@ The Windows installer will now display command-line help when a help switch
Salt Cloud Features
-------------------
OpenStack Revamp
================
The OpenStack Driver has been rewritten mostly from scratch. Salt is now using
the `shade driver <https://docs.openstack.org/shade/latest/>`.
With this, the ``nova`` driver is being deprecated.
:mod:`openstack driver <salt.cloud.clouds.openstack>`
There have also been several new modules and states added for managing OpenStack
setups using shade as well.
:mod:`keystone <salt.modules.keystoneng>`
:mod:`keystone role grant <salt.states.keystone_role_grant>`
:mod:`keystone group <salt.states.keystone_group>`
:mod:`keystone role <salt.states.keystone_role>`
:mod:`keystone service <salt.states.keystone_service>`
:mod:`keystone user <salt.states.keystone_user>`
:mod:`keystone domain <salt.states.keystone_domain>`
:mod:`keystone project <salt.states.keystone_project>`
:mod:`keystone endpoint <salt.states.keystone_endpoint>`
:mod:`glance <salt.modules.glanceng>`
:mod:`glance_image <salt.states.glance_image>`
:mod:`neutron <salt.modules.neutronng>`
:mod:`neutron subnet <salt.states.neutron_subnet>`
:mod:`neutron secgroup <salt.states.neutron_secgroup>`
:mod:`neutron secgroup rule <salt.states.neutron_secgroup_rule>`
:mod:`neutron network <salt.states.neutron_network>`
Pre-Flight Commands
===================
@ -1491,7 +1540,7 @@ The use of ``require_any`` demands that one of the required states executes befo
dependent state. The state containing the ``require_any`` requisite is defined as the
dependent state. The states specified in the ``require_any`` statement are defined as the
required states. If at least one of the required state's execution succeeds, the dependent state
will then execute. If at least one of the required state's execution fails, the dependent state
will then execute. If all of the executions by the required states fail, the dependent state
will not execute.
- ``watch_any``
@ -1557,6 +1606,14 @@ PyCrypto is used as it was in the previous releases. M2Crypto is used in the
same way as PyCrypto so there would be no compatibility issues, different nodes
could use different backends.
NaCL Module and Runner changes
------------------------------
In addition to argument changes in both the NaCL module and runner for future
deprecation in the Fluorine release, the default box_type has changed from
`secretbox` to `sealedbox`. SecretBox is data encrypted using private key
`sk` and Sealedbox is encrypted using public key `pk`
Deprecations
------------
@ -1617,6 +1674,15 @@ The ``win_service`` module had the following changes:
- The ``type`` option was removed from the ``create`` function. Please use
``service_type`` instead.
The ``nacl`` module had the following changes:
- The ``key_file`` option was replaced in the ``keygen``, ``enc`` and ``dec``
functions. Please use the ``sk_file`` option instead.
- The ``key`` option was replaced in the ``keygen``, ``enc`` and ``dec``
functions. Please use the ``sk`` option instead.
Runner Deprecations
===================
@ -1625,6 +1691,14 @@ The ``manage`` runner had the following changes:
- The ``root_user`` kwarg was removed from the ``bootstrap`` function. Please
use ``salt-ssh`` roster entries for the host instead.
The ``nacl`` runner had the following changes:
- The ``key_file`` option was replaced in the ``keygen``, ``enc`` and ``dec``
functions. Please use the ``sk_file`` option instead.
- The ``key`` option was replaced in the ``keygen``, ``enc`` and ``dec``
functions. Please use the ``sk`` option instead.
State Deprecations
==================

View File

@ -9,24 +9,89 @@ Minion Startup Events
---------------------
When a minion starts up it sends a notification on the event bus with a tag
that looks like this: `salt/minion/<minion_id>/start`. For historical reasons
that looks like this: ``salt/minion/<minion_id>/start``. For historical reasons
the minion also sends a similar event with an event tag like this:
`minion_start`. This duplication can cause a lot of clutter on the event bus
when there are many minions. Set `enable_legacy_startup_events: False` in the
minion config to ensure only the `salt/minion/<minion_id>/start` events are
``minion_start``. This duplication can cause a lot of clutter on the event bus
when there are many minions. Set ``enable_legacy_startup_events: False`` in the
minion config to ensure only the ``salt/minion/<minion_id>/start`` events are
sent.
The new :conf_minion:`enable_legacy_startup_events` minion config option
defaults to ``True``, but will be set to default to ``False`` beginning with
the Neon release of Salt.
The Salt Syndic currently sends an old style `syndic_start` event as well. The
The Salt Syndic currently sends an old style ``syndic_start`` event as well. The
syndic respects :conf_minion:`enable_legacy_startup_events` as well.
Pass Through Options to :py:func:`file.serialize <salt.states.file.serialize>` State
------------------------------------------------------------------------------------
This allows for more granular control over the way in which the dataset is
serialized. See the documentation for the new ``serializer_opts`` option in the
:py:func:`file.serialize <salt.states.file.serialize>` state for more
information.
Deprecations
------------
API Deprecations
================
Support for :ref:`LocalClient <local-client>`'s ``expr_form`` argument has
been removed. Please use ``tgt_type`` instead. This change was made due to
numerous reports of confusion among community members, since the targeting
method is published to minions as ``tgt_type``, and appears as ``tgt_type``
in the job cache as well.
Those who are using the :ref:`LocalClient <local-client>` (either directly,
or implicitly via a :ref:`netapi module <all-netapi-modules>`) need to update
their code to use ``tgt_type``.
.. code-block:: python
>>> import salt.client
>>> local = salt.client.LocalClient()
>>> local.cmd('*', 'cmd.run', ['whoami'], tgt_type='glob')
{'jerry': 'root'}
Module Deprecations
===================
The ``napalm_network`` module had the following changes:
- Support for the ``template_path`` has been removed in the ``load_template``
function. This is because support for NAPALM native templates has been
dropped.
The ``trafficserver`` module had the following changes:
- Support for the ``match_var`` function was removed. Please use the
``match_metric`` function instead.
- Support for the ``read_var`` function was removed. Please use the
``read_config`` function instead.
- Support for the ``set_var`` function was removed. Please use the
``set_config`` function instead.
The ``win_update`` module has been removed. It has been replaced by ``win_wua``
module.
The ``win_wua`` module had the following changes:
- Support for the ``download_update`` function has been removed. Please use the
``download`` function instead.
- Support for the ``download_updates`` function has been removed. Please use the
``download`` function instead.
- Support for the ``install_update`` function has been removed. Please use the
``install`` function instead.
- Support for the ``install_updates`` function has been removed. Please use the
``install`` function instead.
- Support for the ``list_update`` function has been removed. Please use the
``get`` function instead.
- Support for the ``list_updates`` function has been removed. Please use the
``list`` function instead.
Pillar Deprecations
===================
@ -49,6 +114,40 @@ The ``cache`` roster had the following changes:
State Deprecations
==================
The ``docker`` state has been removed. The following functions should be used
instead.
- The ``docker.running`` function was removed. Please update applicable SLS files
to use the ``docker_container.running`` function instead.
- The ``docker.stopped`` function was removed. Please update applicable SLS files
to use the ``docker_container.stopped`` function instead.
- The ``docker.absent`` function was removed. Please update applicable SLS files
to use the ``docker_container.absent`` function instead.
- The ``docker.absent`` function was removed. Please update applicable SLS files
to use the ``docker_container.absent`` function instead.
- The ``docker.network_present`` function was removed. Please update applicable
SLS files to use the ``docker_network.present`` function instead.
- The ``docker.network_absent`` function was removed. Please update applicable
SLS files to use the ``docker_network.absent`` function instead.
- The ``docker.image_present`` function was removed. Please update applicable SLS
files to use the ``docker_image.present`` function instead.
- The ``docker.image_absent`` function was removed. Please update applicable SLS
files to use the ``docker_image.absent`` function instead.
- The ``docker.volume_present`` function was removed. Please update applicable SLS
files to use the ``docker_volume.present`` function instead.
- The ``docker.volume_absent`` function was removed. Please update applicable SLS
files to use the ``docker_volume.absent`` function instead.
The ``docker_network`` state had the following changes:
- Support for the ``driver`` option has been removed from the ``absent`` function.
This option had no functionality in ``docker_network.absent``.
The ``git`` state had the following changes:
- Support for the ``ref`` option in the ``detached`` state has been removed.
Please use the ``rev`` option instead.
The ``k8s`` state has been removed. The following functions should be used
instead:
@ -58,3 +157,24 @@ instead:
files to use the ``kubernetes.node_label_present`` function instead.
- The ``k8s.label_folder_absent`` function was removed. Please update applicable
SLS files to use the ``kubernetes.node_label_folder_absent`` function instead.
The ``netconfig`` state had the following changes:
- Support for the ``template_path`` option in the ``managed`` state has been
removed. This is because support for NAPALM native templates has been dropped.
The ``trafficserver`` state had the following changes:
- Support for the ``set_var`` function was removed. Please use the ``config``
function instead.
The ``win_update`` state has been removed. Please use the ``win_wua`` state instead.
Utils Deprecations
==================
The ``vault`` utils module had the following changes:
- Support for specifying Vault connection data within a 'profile' has been removed.
Please see the :mod:`vault execution module <salt.modules.vault>` documentation for
details on the new configuration schema.

View File

@ -79,22 +79,12 @@ from the ``kevinopenstack`` profile above, you would use:
salt-call sdb.get sdb://kevinopenstack/password
Some drivers use slightly more complex URIs. For instance, the ``vault`` driver
requires the full path to where the key is stored, followed by a question mark,
followed by the key to be retrieved. If you were using a profile called
``myvault``, you would use a URI that looks like:
.. code-block:: bash
salt-call sdb.get 'sdb://myvault/secret/salt?saltstack'
Setting a value uses the same URI as would be used to retrieve it, followed
by the value as another argument. For the above ``myvault`` URI, you would set
a new value using a command like:
by the value as another argument.
.. code-block:: bash
salt-call sdb.set 'sdb://myvault/secret/salt?saltstack' 'super awesome'
salt-call sdb.set 'sdb://myvault/secret/salt/saltstack' 'super awesome'
Deleting values (if supported by the driver) is done pretty much the same way as
getting them. Provided that you have a profile called ``mykvstore`` that uses
@ -109,8 +99,8 @@ the runner system:
.. code-block:: bash
salt-run sdb.get 'sdb://myvault/secret/salt?saltstack'
salt-run sdb.set 'sdb://myvault/secret/salt?saltstack' 'super awesome'
salt-run sdb.get 'sdb://myvault/secret/salt/saltstack'
salt-run sdb.set 'sdb://myvault/secret/salt/saltstack' 'super awesome'
salt-run sdb.delete 'sdb://mykvstore/foobar'

View File

@ -178,7 +178,7 @@ that the text following it can be evaluated properly.
local States
~~~~~~~~~~~~
``local`` states are evaluated locally; this is analagous to issuing a state
``local`` states are evaluated locally; this is analogous to issuing a state
run using a ``salt-call --local`` command. These commands will be issued on the
local machine running the ``spm`` command, whether that machine is a master or
a minion.

View File

@ -1042,7 +1042,7 @@ Thu Sep 19 17:18:06 UTC 2013 - aboe76@gmail.com
* salt-ssh requires sshpass
* salt-syndic requires salt-master
Minor features:
- 0.17.0 release wil be last release for 0.XX.X numbering system
- 0.17.0 release will be last release for 0.XX.X numbering system
Next release will be <Year>.<Month>.<Minor>
-------------------------------------------------------------------

View File

@ -206,7 +206,7 @@ if ( ! [bool]$Env:SALT_PIP_LOCAL_CACHE) {
Start_Process_and_test_exitcode "$($ini['Settings']['Python2Dir'])\python.exe" "-m pip download --dest $Env:SALT_PIP_LOCAL_CACHE -r $($script_path)\req_pip.txt" "pip download"
}
Write-Output " reading from local pip cache $Env:SALT_PIP_LOCAL_CACHE"
Write-Output " If a (new) ressource is missing, please delete all files in this cache, go online and repeat"
Write-Output " If a (new) resource is missing, please delete all files in this cache, go online and repeat"
Start_Process_and_test_exitcode "$($ini['Settings']['Python2Dir'])\python.exe" "-m pip install --no-index --find-links=$Env:SALT_PIP_LOCAL_CACHE -r $($script_path)\req_pip.txt" "pip install"
}
@ -226,24 +226,16 @@ if ( ! [bool]$Env:SALT_REQ_LOCAL_CACHE) {
Start_Process_and_test_exitcode "$($ini['Settings']['Python2Dir'])\python.exe" "-m pip download --dest $Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req.txt" "pip download"
}
Write-Output " reading from local pip cache $Env:SALT_REQ_LOCAL_CACHE"
Write-Output " If a (new) ressource is missing, please delete all files in this cache, go online and repeat"
Write-Output " If a (new) resource is missing, please delete all files in this cache, go online and repeat"
Start_Process_and_test_exitcode "$($ini['Settings']['Python2Dir'])\python.exe" "-m pip install --no-index --find-links=$Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req.txt" "pip install"
}
#==============================================================================
# Install PyWin32 from wheel file
# Cleaning Up PyWin32
#==============================================================================
Write-Output " ----------------------------------------------------------------"
Write-Output " - $script_name :: Installing PyWin32 . . ."
Write-Output " - $script_name :: Cleaning Up PyWin32 . . ."
Write-Output " ----------------------------------------------------------------"
# Download
$file = "$($ini[$bitPrograms]['PyWin322'])"
$url = "$($ini['Settings']['SaltRepo'])/$bitFolder/$file"
$file = "$($ini['Settings']['DownloadDir'])\$bitFolder\$file"
DownloadFileWithProgress $url $file
# Install
Start_Process_and_test_exitcode "$($ini['Settings']['Scripts2Dir'])\pip.exe" "install $file " "pip install PyWin32"
# Move DLL's to Python Root
Write-Output " - $script_name :: Moving PyWin32 DLLs . . ."

View File

@ -191,7 +191,7 @@ If (!($Path.ToLower().Contains("$($ini['Settings']['Scripts3Dir'])".ToLower())))
#==============================================================================
# Update PIP and SetupTools
# caching depends on environmant variable SALT_PIP_LOCAL_CACHE
# caching depends on environment variable SALT_PIP_LOCAL_CACHE
#==============================================================================
Write-Output " ----------------------------------------------------------------"
Write-Output " - $script_name :: Updating PIP and SetupTools . . ."
@ -206,13 +206,13 @@ if ( ! [bool]$Env:SALT_PIP_LOCAL_CACHE) {
Start_Process_and_test_exitcode "$($ini['Settings']['Python3Dir'])\python.exe" "-m pip download --dest $Env:SALT_PIP_LOCAL_CACHE -r $($script_path)\req_pip.txt" "pip download"
}
Write-Output " reading from local pip cache $Env:SALT_PIP_LOCAL_CACHE"
Write-Output " If a (new) ressource is missing, please delete all files in this cache, go online and repeat"
Write-Output " If a (new) resource is missing, please delete all files in this cache, go online and repeat"
Start_Process_and_test_exitcode "$($ini['Settings']['Python3Dir'])\python.exe" "-m pip install --no-index --find-links=$Env:SALT_PIP_LOCAL_CACHE -r $($script_path)\req_pip.txt" "pip install"
}
#==============================================================================
# Install pypi resources using pip
# caching depends on environmant variable SALT_REQ_LOCAL_CACHE
# caching depends on environment variable SALT_REQ_LOCAL_CACHE
#==============================================================================
Write-Output " ----------------------------------------------------------------"
Write-Output " - $script_name :: Installing pypi resources using pip . . ."
@ -226,24 +226,16 @@ if ( ! [bool]$Env:SALT_REQ_LOCAL_CACHE) {
Start_Process_and_test_exitcode "$($ini['Settings']['Python3Dir'])\python.exe" "-m pip download --dest $Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req.txt" "pip download"
}
Write-Output " reading from local pip cache $Env:SALT_REQ_LOCAL_CACHE"
Write-Output " If a (new) ressource is missing, please delete all files in this cache, go online and repeat"
Write-Output " If a (new) resource is missing, please delete all files in this cache, go online and repeat"
Start_Process_and_test_exitcode "$($ini['Settings']['Python3Dir'])\python.exe" "-m pip install --no-index --find-links=$Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req.txt" "pip install"
}
#==============================================================================
# Install PyWin32 from wheel file
# Cleaning Up PyWin32
#==============================================================================
Write-Output " ----------------------------------------------------------------"
Write-Output " - $script_name :: Installing PyWin32 . . ."
Write-Output " - $script_name :: Cleaning Up PyWin32 . . ."
Write-Output " ----------------------------------------------------------------"
# Download
$file = "$($ini[$bitPrograms]['PyWin323'])"
$url = "$($ini['Settings']['SaltRepo'])/$bitFolder/$file"
$file = "$($ini['Settings']['DownloadDir'])\$bitFolder\$file"
DownloadFileWithProgress $url $file
# Install
Start_Process_and_test_exitcode "$($ini['Settings']['Scripts3Dir'])\pip.exe" "install $file " "pip install PyWin32"
# Move DLL's to Python Root
# The dlls have to be in Python directory and the site-packages\win32 directory

View File

@ -113,6 +113,23 @@ xcopy /Q /Y "%SrcDir%\conf\master" "%CnfDir%\"
xcopy /Q /Y "%SrcDir%\conf\minion" "%CnfDir%\"
@echo.
@echo Copying NSSM to buildenv
@echo ----------------------------------------------------------------------
:: Make sure the "prereq" directory exists
If NOT Exist "%PreDir%" mkdir "%PreDir%"
:: Set the location of the nssm to download
Set Url64="https://repo.saltstack.com/windows/dependencies/64/nssm-2.24-101-g897c7ad.exe"
Set Url32="https://repo.saltstack.com/windows/dependencies/32/nssm-2.24-101-g897c7ad.exe"
:: Check for 64 bit by finding the Program Files (x86) directory
If Defined ProgramFiles(x86) (
powershell -ExecutionPolicy RemoteSigned -File download_url_file.ps1 -url "%Url64%" -file "%BldDir%\nssm.exe"
) Else (
powershell -ExecutionPolicy RemoteSigned -File download_url_file.ps1 -url "%Url32%" -file "%BldDir%\nssm.exe"
)
@echo.
@echo Copying VCRedist to Prerequisites
@echo ----------------------------------------------------------------------
:: Make sure the "prereq" directory exists

Binary file not shown.

View File

@ -661,7 +661,6 @@ Section -Post
nsExec::Exec "nssm.exe install salt-minion $INSTDIR\bin\python.exe -E -s $INSTDIR\bin\Scripts\salt-minion -c $INSTDIR\conf -l quiet"
nsExec::Exec "nssm.exe set salt-minion Description Salt Minion from saltstack.com"
nsExec::Exec "nssm.exe set salt-minion Start SERVICE_AUTO_START"
nsExec::Exec "nssm.exe set salt-minion AppNoConsole 1"
nsExec::Exec "nssm.exe set salt-minion AppStopMethodConsole 24000"
nsExec::Exec "nssm.exe set salt-minion AppStopMethodWindow 2000"

View File

@ -2,37 +2,37 @@ backports-abc==0.5
backports.ssl-match-hostname==3.5.0.1
certifi
cffi==1.10.0
cryptography==2.1.4
CherryPy==10.2.1
cryptography==1.8.1
enum34==1.1.6
futures==3.1.1
gitdb==0.6.4
GitPython==2.1.7
idna==2.6
GitPython==2.1.3
idna==2.5
ioloop==0.1a0
ipaddress==1.0.18
Jinja2==2.10
Jinja2==2.9.6
libnacl==1.6.1
lxml==4.1.1
Mako==1.0.7
lxml==3.7.3
Mako==1.0.6
MarkupSafe==1.0
msgpack-python==0.4.8
psutil==5.4.1
pyasn1==0.4.2
pycparser==2.18
psutil==5.2.2
pyasn1==0.2.3
pycparser==2.17
pycrypto==2.6.1
pycurl==7.43.0
PyMySQL==0.7.11
pyOpenSSL==17.5.0
python-dateutil==2.6.1
python-gnupg==0.4.1
pythonnet==2.3.0
pyyaml==3.12
pyzmq==16.0.3
requests==2.18.4
pyOpenSSL==17.0.0
python-dateutil==2.6.0
python-gnupg==0.4.0
pywin32==223
PyYAML==3.12
pyzmq==16.0.2
requests==2.13.0
singledispatch==3.4.0.3
six==1.11.0
smmap==0.9.0
timelib==0.2.4
tornado==4.5.2
wheel==0.30.0
tornado==4.5.1
wheel==0.30.0a0
WMI==1.4.9

View File

@ -1,6 +1,6 @@
-r base.txt
mock
mock>=2.0.0
apache-libcloud>=0.14.0
boto>=2.32.1
boto3>=1.2.1

View File

@ -1,6 +1,6 @@
-r base.txt
mock
mock>=2.0.0
apache-libcloud>=0.14.0
boto>=2.32.1
boto3>=1.2.1

View File

@ -361,7 +361,7 @@ def groups(username, **kwargs):
[salt.utils.stringutils.to_str(_config('accountattributename')), str('cn')]) # future lint: disable=blacklisted-function
for entry, result in search_results:
for user in result[_config('accountattributename'), _config('groupattribute')]:
for user in result[_config('accountattributename')]:
if username == salt.utils.stringutils.to_unicode(user).split(',')[0].split('=')[-1]:
group_list.append(entry.split(',')[0].split('=')[-1])

View File

@ -79,7 +79,7 @@ def beacon(config):
The second one will match disks from A:\ to Z:\ on a Windows system
Note that if a regular expression are evaluated after static mount points,
which means that if a regular expression matches an other defined mount point,
which means that if a regular expression matches another defined mount point,
it will override the previously defined threshold.
'''

View File

@ -119,7 +119,7 @@ def beacon(config):
for k in ['1m', '5m', '15m']:
LAST_STATUS[k] = avg_dict[k]
if not config['emitatstartup']:
log.debug('Dont emit because emitatstartup is False')
log.debug("Don't emit because emitatstartup is False")
return ret
send_beacon = False

View File

@ -392,7 +392,7 @@ def flush(bank, key=None):
An improvement for this would be loading a custom Lua script in the Redis instance of the user
(using the ``register_script`` feature) and call it whenever we flush.
This script would only need to build this sub-tree causing problems. It can be added later and the behaviour
should not change as the user needs to explicitely allow Salt inject scripts in their Redis instance.
should not change as the user needs to explicitly allow Salt inject scripts in their Redis instance.
'''
redis_server = _get_redis_server()
redis_pipe = redis_server.pipeline()

View File

@ -9,11 +9,11 @@
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import logging
# Import Salt libs
import salt.client.netapi
import salt.utils.files
import salt.utils.parsers as parsers
from salt.utils.verify import check_user, verify_files, verify_log
@ -42,9 +42,8 @@ class SaltAPI(parsers.SaltAPIParser):
'udp://',
'file://')):
# Logfile is not using Syslog, verify
current_umask = os.umask(0o027)
with salt.utils.files.set_umask(0o027):
verify_files([logfile], self.config['user'])
os.umask(current_umask)
except OSError as err:
log.exception('Failed to prepare salt environment')
self.shutdown(err.errno)

View File

@ -30,7 +30,7 @@ class SPM(parsers.SPMParser):
self.parse_args()
self.setup_logfile_logger()
v_dirs = [
self.config['cachedir'],
self.config['spm_cache_dir'],
]
verify_env(v_dirs,
self.config['user'],

View File

@ -41,7 +41,6 @@ import salt.utils.platform
import salt.utils.stringutils
import salt.utils.user
import salt.utils.verify
import salt.utils.versions
import salt.utils.zeromq
import salt.syspaths as syspaths
from salt.exceptions import (
@ -249,7 +248,7 @@ class LocalClient(object):
return pub_data
def _check_pub_data(self, pub_data):
def _check_pub_data(self, pub_data, listen=True):
'''
Common checks on the pub_data data structure returned from running pub
'''
@ -282,7 +281,13 @@ class LocalClient(object):
print('No minions matched the target. '
'No command was sent, no jid was assigned.')
return {}
else:
# don't install event subscription listeners when the request is async
# and doesn't care. this is important as it will create event leaks otherwise
if not listen:
return pub_data
if self.opts.get('order_masters'):
self.event.subscribe('syndic/.*/{0}'.format(pub_data['jid']), 'regex')
self.event.subscribe('salt/job/{0}'.format(pub_data['jid']))
@ -315,15 +320,6 @@ class LocalClient(object):
>>> local.run_job('*', 'test.sleep', [300])
{'jid': '20131219215650131543', 'minions': ['jerry']}
'''
if 'expr_form' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = kwargs.pop('expr_form')
arg = salt.utils.args.condition_input(arg, kwarg)
try:
@ -346,7 +342,7 @@ class LocalClient(object):
# Convert to generic client error and pass along message
raise SaltClientError(general_exception)
return self._check_pub_data(pub_data)
return self._check_pub_data(pub_data, listen=listen)
def gather_minions(self, tgt, expr_form):
_res = salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form)
@ -380,15 +376,6 @@ class LocalClient(object):
>>> local.run_job_async('*', 'test.sleep', [300])
{'jid': '20131219215650131543', 'minions': ['jerry']}
'''
if 'expr_form' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = kwargs.pop('expr_form')
arg = salt.utils.args.condition_input(arg, kwarg)
try:
@ -412,7 +399,7 @@ class LocalClient(object):
# Convert to generic client error and pass along message
raise SaltClientError(general_exception)
raise tornado.gen.Return(self._check_pub_data(pub_data))
raise tornado.gen.Return(self._check_pub_data(pub_data, listen=listen))
def cmd_async(
self,
@ -437,15 +424,6 @@ class LocalClient(object):
>>> local.cmd_async('*', 'test.sleep', [300])
'20131219215921857715'
'''
if 'expr_form' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = kwargs.pop('expr_form')
arg = salt.utils.args.condition_input(arg, kwarg)
pub_data = self.run_job(tgt,
fun,
@ -453,6 +431,7 @@ class LocalClient(object):
tgt_type,
ret,
jid=jid,
listen=False,
**kwargs)
try:
return pub_data['jid']
@ -484,15 +463,6 @@ class LocalClient(object):
>>> SLC.cmd_subset('*', 'test.ping', sub=1)
{'jerry': True}
'''
if 'expr_form' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = kwargs.pop('expr_form')
minion_ret = self.cmd(tgt,
'sys.list_functions',
tgt_type=tgt_type,
@ -547,15 +517,6 @@ class LocalClient(object):
{'dave': {...}}
{'stewart': {...}}
'''
if 'expr_form' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = kwargs.pop('expr_form')
import salt.cli.batch
arg = salt.utils.args.condition_input(arg, kwarg)
opts = {'tgt': tgt,
@ -705,15 +666,6 @@ class LocalClient(object):
minion ID. A compound command will return a sub-dictionary keyed by
function name.
'''
if 'expr_form' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = kwargs.pop('expr_form')
arg = salt.utils.args.condition_input(arg, kwarg)
was_listening = self.event.cpub
@ -774,15 +726,6 @@ class LocalClient(object):
:param verbose: Print extra information about the running command
:returns: A generator
'''
if 'expr_form' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = kwargs.pop('expr_form')
arg = salt.utils.args.condition_input(arg, kwarg)
was_listening = self.event.cpub
@ -861,15 +804,6 @@ class LocalClient(object):
{'dave': {'ret': True}}
{'stewart': {'ret': True}}
'''
if 'expr_form' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = kwargs.pop('expr_form')
arg = salt.utils.args.condition_input(arg, kwarg)
was_listening = self.event.cpub
@ -937,15 +871,6 @@ class LocalClient(object):
None
{'stewart': {'ret': True}}
'''
if 'expr_form' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = kwargs.pop('expr_form')
arg = salt.utils.args.condition_input(arg, kwarg)
was_listening = self.event.cpub
@ -994,15 +919,6 @@ class LocalClient(object):
'''
Execute a salt command and return
'''
if 'expr_form' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = kwargs.pop('expr_form')
arg = salt.utils.args.condition_input(arg, kwarg)
was_listening = self.event.cpub
@ -1045,15 +961,6 @@ class LocalClient(object):
:returns: all of the information for the JID
'''
if 'expr_form' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = kwargs.pop('expr_form')
if verbose:
msg = 'Executing job with jid {0}'.format(jid)
print(msg)
@ -1124,15 +1031,6 @@ class LocalClient(object):
:returns: all of the information for the JID
'''
if 'expr_form' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = kwargs.pop('expr_form')
if not isinstance(minions, set):
if isinstance(minions, six.string_types):
minions = set([minions])
@ -1571,15 +1469,6 @@ class LocalClient(object):
'''
log.trace('func get_cli_event_returns()')
if 'expr_form' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = kwargs.pop('expr_form')
if verbose:
msg = 'Executing job with jid {0}'.format(jid)
print(msg)
@ -1766,15 +1655,6 @@ class LocalClient(object):
minions:
A set, the targets that the tgt passed should match.
'''
if 'expr_form' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = kwargs.pop('expr_form')
# Make sure the publisher is running by checking the unix socket
if (self.opts.get('ipc_mode', '') != 'tcp' and
not os.path.exists(os.path.join(self.opts['sock_dir'],
@ -1882,15 +1762,6 @@ class LocalClient(object):
minions:
A set, the targets that the tgt passed should match.
'''
if 'expr_form' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = kwargs.pop('expr_form')
# Make sure the publisher is running by checking the unix socket
if (self.opts.get('ipc_mode', '') != 'tcp' and
not os.path.exists(os.path.join(self.opts['sock_dir'],

View File

@ -13,7 +13,6 @@ import logging
import salt.config
import salt.client
import salt.utils.kinds as kinds
import salt.utils.versions
import salt.syspaths as syspaths
try:
@ -49,15 +48,6 @@ class LocalClient(salt.client.LocalClient):
'''
Publish the command!
'''
if 'expr_form' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = kwargs.pop('expr_form')
payload_kwargs = self._prep_pub(
tgt,
fun,

View File

@ -223,8 +223,8 @@ class SSH(object):
if self.opts['regen_thin']:
self.opts['ssh_wipe'] = True
if not salt.utils.path.which('ssh'):
raise salt.exceptions.SaltSystemExit('No ssh binary found in path -- ssh must be '
'installed for salt-ssh to run. Exiting.')
raise salt.exceptions.SaltSystemExit(code=-1,
msg='No ssh binary found in path -- ssh must be installed for salt-ssh to run. Exiting.')
self.opts['_ssh_version'] = ssh_version()
self.tgt_type = self.opts['selected_target_option'] \
if self.opts['selected_target_option'] else 'glob'
@ -1031,6 +1031,7 @@ class Single(object):
opts_pkg['pillar_roots'] = self.opts['pillar_roots']
opts_pkg['ext_pillar'] = self.opts['ext_pillar']
opts_pkg['extension_modules'] = self.opts['extension_modules']
opts_pkg['module_dirs'] = self.opts['module_dirs']
opts_pkg['_ssh_version'] = self.opts['_ssh_version']
opts_pkg['__master_opts__'] = self.context['master_opts']
if '_caller_cachedir' in self.opts:

View File

@ -9,7 +9,7 @@ import random
# Import Salt libs
import salt.config
import salt.utils.versions
import salt.utils.args
import salt.syspaths as syspaths
from salt.exceptions import SaltClientError # Temporary
@ -52,15 +52,6 @@ class SSHClient(object):
'''
Prepare the arguments
'''
if 'expr_form' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = kwargs.pop('expr_form')
opts = copy.deepcopy(self.opts)
opts.update(kwargs)
if timeout:
@ -88,15 +79,6 @@ class SSHClient(object):
.. versionadded:: 2015.5.0
'''
if 'expr_form' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = kwargs.pop('expr_form')
ssh = self._prep_ssh(
tgt,
fun,
@ -122,15 +104,6 @@ class SSHClient(object):
.. versionadded:: 2015.5.0
'''
if 'expr_form' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = kwargs.pop('expr_form')
ssh = self._prep_ssh(
tgt,
fun,
@ -226,14 +199,6 @@ class SSHClient(object):
.. versionadded:: 2017.7.0
'''
if 'expr_form' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = kwargs.pop('expr_form')
minion_ret = self.cmd(tgt,
'sys.list_functions',
tgt_type=tgt_type,

View File

@ -102,13 +102,15 @@ def is_windows():
def need_deployment():
'''
Salt thin needs to be deployed - prep the target directory and emit the
delimeter and exit code that signals a required deployment.
delimiter and exit code that signals a required deployment.
'''
if os.path.exists(OPTIONS.saltdir):
shutil.rmtree(OPTIONS.saltdir)
old_umask = os.umask(0o077)
old_umask = os.umask(0o077) # pylint: disable=blacklisted-function
try:
os.makedirs(OPTIONS.saltdir)
os.umask(old_umask)
finally:
os.umask(old_umask) # pylint: disable=blacklisted-function
# Verify perms on saltdir
if not is_windows():
euid = os.geteuid()
@ -158,10 +160,10 @@ def unpack_thin(thin_path):
Unpack the Salt thin archive.
'''
tfile = tarfile.TarFile.gzopen(thin_path)
old_umask = os.umask(0o077)
old_umask = os.umask(0o077) # pylint: disable=blacklisted-function
tfile.extractall(path=OPTIONS.saltdir)
tfile.close()
os.umask(old_umask)
os.umask(old_umask) # pylint: disable=blacklisted-function
try:
os.unlink(thin_path)
except OSError:
@ -189,10 +191,10 @@ def unpack_ext(ext_path):
'minion',
'extmods')
tfile = tarfile.TarFile.gzopen(ext_path)
old_umask = os.umask(0o077)
old_umask = os.umask(0o077) # pylint: disable=blacklisted-function
tfile.extractall(path=modcache)
tfile.close()
os.umask(old_umask)
os.umask(old_umask) # pylint: disable=blacklisted-function
os.unlink(ext_path)
ver_path = os.path.join(modcache, 'ext_version')
ver_dst = os.path.join(OPTIONS.saltdir, 'ext_version')
@ -299,7 +301,7 @@ def main(argv): # pylint: disable=W0613
sys.stderr.write(OPTIONS.delimiter + '\n')
sys.stderr.flush()
if OPTIONS.cmd_umask is not None:
old_umask = os.umask(OPTIONS.cmd_umask)
old_umask = os.umask(OPTIONS.cmd_umask) # pylint: disable=blacklisted-function
if OPTIONS.tty:
# Returns bytes instead of string on python 3
stdout, _ = subprocess.Popen(salt_argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
@ -313,7 +315,7 @@ def main(argv): # pylint: disable=W0613
else:
subprocess.call(salt_argv)
if OPTIONS.cmd_umask is not None:
os.umask(old_umask)
os.umask(old_umask) # pylint: disable=blacklisted-function
if __name__ == '__main__':
sys.exit(main(sys.argv))

View File

@ -18,7 +18,6 @@ import logging
import salt.client.ssh
import salt.runner
import salt.utils.args
import salt.utils.versions
log = logging.getLogger(__name__)
@ -112,8 +111,7 @@ def publish(tgt,
tgt_type='glob',
returner='',
timeout=5,
roster=None,
expr_form=None):
roster=None):
'''
Publish a command "from the minion out to other minions". In reality, the
minion does not execute this function, it is executed by the master. Thus,
@ -172,17 +170,6 @@ def publish(tgt,
'''
# remember to remove the expr_form argument from this function when
# performing the cleanup on this deprecation.
if expr_form is not None:
salt.utils.versions.warn_until(
'Fluorine',
'the target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = expr_form
return _publish(tgt,
fun,
arg=arg,
@ -199,8 +186,7 @@ def full_data(tgt,
tgt_type='glob',
returner='',
timeout=5,
roster=None,
expr_form=None):
roster=None):
'''
Return the full data about the publication, this is invoked in the same
way as the publish function
@ -222,17 +208,6 @@ def full_data(tgt,
salt-ssh '*' publish.full_data test.kwarg arg='cheese=spam'
'''
# remember to remove the expr_form argument from this function when
# performing the cleanup on this deprecation.
if expr_form is not None:
salt.utils.versions.warn_until(
'Fluorine',
'the target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = expr_form
return _publish(tgt,
fun,
arg=arg,

View File

@ -491,7 +491,7 @@ def request(mods=None,
'kwargs': kwargs
}
})
cumask = os.umask(0o77)
with salt.utils.files.set_umask(0o077):
try:
if salt.utils.platform.is_windows():
# Make sure cache file isn't read-only
@ -499,9 +499,10 @@ def request(mods=None,
with salt.utils.files.fopen(notify_path, 'w+b') as fp_:
serial.dump(req, fp_)
except (IOError, OSError):
msg = 'Unable to write state request file {0}. Check permission.'
log.error(msg.format(notify_path))
os.umask(cumask)
log.error(
'Unable to write state request file %s. Check permission.',
notify_path
)
return ret
@ -557,7 +558,7 @@ def clear_request(name=None):
req.pop(name)
else:
return False
cumask = os.umask(0o77)
with salt.utils.files.set_umask(0o077):
try:
if salt.utils.platform.is_windows():
# Make sure cache file isn't read-only
@ -565,9 +566,10 @@ def clear_request(name=None):
with salt.utils.files.fopen(notify_path, 'w+b') as fp_:
serial.dump(req, fp_)
except (IOError, OSError):
msg = 'Unable to write state request file {0}. Check permission.'
log.error(msg.format(notify_path))
os.umask(cumask)
log.error(
'Unable to write state request file %s. Check permission.',
notify_path
)
return True

View File

@ -1499,8 +1499,8 @@ class Cloud(object):
vm_name = vm_details['id']
else:
log.debug(
'vm:{0} in provider:{1} is not in name '
'list:\'{2}\''.format(vm_name, driver, names)
'vm:%s in provider:%s is not in name '
'list:\'%s\'', vm_name, driver, names
)
continue

View File

@ -533,10 +533,10 @@ def list_nodes_full(call=None):
image_ref['sku'],
image_ref['version'],
])
except TypeError:
except (TypeError, KeyError):
try:
node['image'] = node['storage_profile']['os_disk']['image']['uri']
except TypeError:
except (TypeError, KeyError):
node['image'] = None
try:
netifaces = node['network_profile']['network_interfaces']

View File

@ -264,6 +264,12 @@ def __virtual__():
if get_dependencies() is False:
return False
__utils__['versions.warn_until'](
'Neon',
'This driver has been deprecated and will be removed in the '
'{version} release of Salt. Please use the openstack driver instead.'
)
return __virtualname__

View File

@ -72,6 +72,7 @@ Or if you need to use a profile to setup some extra stuff, it can be passed as a
username: rackusername
api_key: myapikey
region_name: ORD
auth_type: rackspace_apikey
And this will pull in the profile for rackspace and setup all the correct
options for the auth_url and different api versions for services.
@ -101,6 +102,23 @@ The salt specific ones are:
This is the minimum setup required.
If metadata is set to make sure that the host has finished setting up the
`wait_for_metadata` can be set.
.. code-block:: yaml
centos:
provider: myopenstack
image: CentOS 7
size: ds1G
ssh_key_name: mykey
ssh_key_file: /root/.ssh/id_rsa
meta:
build_config: rack_user_only
wait_for_metadata:
rax_service_level_automation: Complete
rackconnect_automation_status: DEPLOYED
Anything else from the create_server_ docs can be passed through here.
- **image**: Image dict, name or ID to boot with. image is required
@ -678,12 +696,18 @@ def create(vm_):
data = request_instance(conn=conn, call='action', vm_=vm_)
log.debug('VM is now running')
def __query_node_ip(vm_):
def __query_node(vm_):
data = show_instance(vm_['name'], conn=conn, call='action')
if 'wait_for_metadata' in vm_:
for key, value in six.iteritems(vm_.get('wait_for_metadata', {})):
log.debug('Waiting for metadata: {0}={1}'.format(key, value))
if data['metadata'].get(key, None) != value:
log.debug('Metadata is not ready: {0}={1}'.format(key, data['metadata'].get(key, None)))
return False
return preferred_ip(vm_, data[ssh_interface(vm_)])
try:
ip_address = __utils__['cloud.wait_for_ip'](
__query_node_ip,
ip_address = __utils__['cloud.wait_for_fun'](
__query_node,
update_args=(vm_,)
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:

View File

@ -120,7 +120,7 @@ try:
import profitbricks
from profitbricks.client import (
ProfitBricksService, Server,
NIC, Volume, FirewallRule,
NIC, Volume, FirewallRule, IPBlock,
Datacenter, LoadBalancer, LAN,
PBNotFoundError, PBError
)
@ -348,7 +348,8 @@ def get_size(vm_):
return sizes['Small Instance']
for size in sizes:
if vm_size and six.text_type(vm_size) in (six.text_type(sizes[size]['id']), six.text_type(size)):
combinations = (six.text_type(sizes[size]['id']), six.text_type(size))
if vm_size and six.text_type(vm_size) in combinations:
return sizes[size]
raise SaltCloudNotFound(
'The specified size, \'{0}\', could not be found.'.format(vm_size)
@ -568,7 +569,8 @@ def list_nodes(conn=None, call=None):
try:
nodes = conn.list_servers(datacenter_id=datacenter_id)
except PBNotFoundError:
log.error('Failed to get nodes list from datacenter: %s', datacenter_id)
log.error('Failed to get nodes list '
'from datacenter: %s', datacenter_id)
raise
for item in nodes['items']:
@ -624,6 +626,39 @@ def list_nodes_full(conn=None, call=None):
return ret
def reserve_ipblock(call=None, kwargs=None):
'''
Reserve the IP Block
'''
if call == 'action':
raise SaltCloudSystemExit(
'The reserve_ipblock function must be called with -f or '
'--function.'
)
conn = get_conn()
if kwargs is None:
kwargs = {}
ret = {}
ret['ips'] = []
if kwargs.get('location') is None:
raise SaltCloudExecutionFailure('The "location" parameter is required')
location = kwargs.get('location')
size = 1
if kwargs.get('size') is not None:
size = kwargs.get('size')
block = conn.reserve_ipblock(IPBlock(size=size, location=location))
for item in block['properties']['ips']:
ret['ips'].append(item)
return ret
def show_instance(name, call=None):
'''
Show the details from the provider concerning an instance
@ -675,12 +710,14 @@ def _get_nics(vm_):
firewall_rules = []
# Set LAN to public if it already exists, otherwise create a new
# public LAN.
lan_id = set_public_lan(int(vm_['public_lan']))
if 'public_firewall_rules' in vm_:
firewall_rules = _get_firewall_rules(vm_['public_firewall_rules'])
nics.append(NIC(lan=lan_id,
nic = NIC(lan=set_public_lan(int(vm_['public_lan'])),
name='public',
firewall_rules=firewall_rules))
firewall_rules=firewall_rules)
if 'public_ips' in vm_:
nic.ips = _get_ip_addresses(vm_['public_ips'])
nics.append(nic)
if 'private_lan' in vm_:
firewall_rules = []
@ -689,7 +726,9 @@ def _get_nics(vm_):
nic = NIC(lan=int(vm_['private_lan']),
name='private',
firewall_rules=firewall_rules)
if 'nat' in vm_:
if 'private_ips' in vm_:
nic.ips = _get_ip_addresses(vm_['private_ips'])
if 'nat' in vm_ and 'private_ips' not in vm_:
nic.nat = vm_['nat']
nics.append(nic)
return nics
@ -1112,8 +1151,6 @@ def _get_system_volume(vm_):
'''
Construct VM system volume list from cloud profile config
'''
# Retrieve list of SSH public keys
ssh_keys = get_public_keys(vm_)
# Override system volume size if 'disk_size' is defined in cloud profile
disk_size = get_size(vm_)['disk']
@ -1124,10 +1161,17 @@ def _get_system_volume(vm_):
volume = Volume(
name='{0} Storage'.format(vm_['name']),
size=disk_size,
disk_type=get_disk_type(vm_),
ssh_keys=ssh_keys
disk_type=get_disk_type(vm_)
)
if 'image_password' in vm_:
image_password = vm_['image_password']
volume.image_password = image_password
# Retrieve list of SSH public keys
ssh_keys = get_public_keys(vm_)
volume.ssh_keys = ssh_keys
if 'image_alias' in vm_.keys():
volume.image_alias = vm_['image_alias']
else:
@ -1173,6 +1217,17 @@ def _get_data_volumes(vm_):
return ret
def _get_ip_addresses(ip_addresses):
'''
Construct a list of ip address
'''
ret = []
for item in ip_addresses:
ret.append(item)
return ret
def _get_firewall_rules(firewall_rules):
'''
Construct a list of optional firewall rules from the cloud profile.

View File

@ -2684,14 +2684,15 @@ def create(vm_):
non_hostname_chars = compile(r'[^\w-]')
if search(non_hostname_chars, vm_name):
hostName = split(non_hostname_chars, vm_name, maxsplit=1)[0]
domainName = split(non_hostname_chars, vm_name, maxsplit=1)[-1]
else:
hostName = vm_name
domainName = hostName.split('.', 1)[-1]
domainName = domain
if 'Windows' not in object_ref.config.guestFullName:
identity = vim.vm.customization.LinuxPrep()
identity.hostName = vim.vm.customization.FixedName(name=hostName)
identity.domain = domainName if hostName != domainName else domain
identity.domain = domainName
else:
identity = vim.vm.customization.Sysprep()
identity.guiUnattended = vim.vm.customization.GuiUnattended()

View File

@ -653,10 +653,11 @@ VALID_OPTS = {
's3fs_update_interval': int,
'svnfs_update_interval': int,
'git_pillar_base': six.string_types,
'git_pillar_branch': six.string_types,
'git_pillar_env': six.string_types,
'git_pillar_root': six.string_types,
# NOTE: git_pillar_base, git_pillar_branch, git_pillar_env, and
# git_pillar_root omitted here because their values could conceivably be
# loaded as non-string types, which is OK because git_pillar will normalize
# them to strings. But rather than include all the possible types they
# could be, we'll just skip type-checking.
'git_pillar_ssl_verify': bool,
'git_pillar_global_lock': bool,
'git_pillar_user': six.string_types,
@ -668,12 +669,11 @@ VALID_OPTS = {
'git_pillar_refspecs': list,
'git_pillar_includes': bool,
'git_pillar_verify_config': bool,
# NOTE: gitfs_base, gitfs_mountpoint, and gitfs_root omitted here because
# their values could conceivably be loaded as non-string types, which is OK
# because gitfs will normalize them to strings. But rather than include all
# the possible types they could be, we'll just skip type-checking.
'gitfs_remotes': list,
'gitfs_mountpoint': six.string_types,
'gitfs_root': six.string_types,
'gitfs_base': six.string_types,
'gitfs_user': six.string_types,
'gitfs_password': six.string_types,
'gitfs_insecure_auth': bool,
'gitfs_privkey': six.string_types,
'gitfs_pubkey': six.string_types,
@ -888,11 +888,14 @@ VALID_OPTS = {
'winrepo_dir': six.string_types,
'winrepo_dir_ng': six.string_types,
'winrepo_cachefile': six.string_types,
# NOTE: winrepo_branch omitted here because its value could conceivably be
# loaded as a non-string type, which is OK because winrepo will normalize
# them to strings. But rather than include all the possible types it could
# be, we'll just skip type-checking.
'winrepo_cache_expire_max': int,
'winrepo_cache_expire_min': int,
'winrepo_remotes': list,
'winrepo_remotes_ng': list,
'winrepo_branch': six.string_types,
'winrepo_ssl_verify': bool,
'winrepo_user': six.string_types,
'winrepo_password': six.string_types,
@ -1639,6 +1642,7 @@ DEFAULT_MASTER_OPTS = {
'eauth_acl_module': '',
'eauth_tokens': 'localfs',
'extension_modules': os.path.join(salt.syspaths.CACHE_DIR, 'master', 'extmods'),
'module_dirs': [],
'file_recv': False,
'file_recv_max_size': 100,
'file_buffer_size': 1048576,

View File

@ -84,8 +84,7 @@ def dropfile(cachedir, user=None):
'''
dfn = os.path.join(cachedir, '.dfn')
# set a mask (to avoid a race condition on file creation) and store original.
mask = os.umask(191)
try:
with salt.utils.files.set_umask(0o277):
log.info('Rotating AES key')
if os.path.isfile(dfn):
log.info('AES key rotation already requested')
@ -103,8 +102,6 @@ def dropfile(cachedir, user=None):
os.chown(dfn, uid, -1)
except (KeyError, ImportError, OSError, IOError):
pass
finally:
os.umask(mask) # restore original umask
def gen_keys(keydir, keyname, keysize, user=None, passphrase=None):
@ -138,17 +135,19 @@ def gen_keys(keydir, keyname, keysize, user=None, passphrase=None):
if not os.access(keydir, os.W_OK):
raise IOError('Write access denied to "{0}" for user "{1}".'.format(os.path.abspath(keydir), getpass.getuser()))
cumask = os.umask(0o277)
with salt.utils.files.set_umask(0o277):
if HAS_M2:
# if passphrase is empty or None use no cipher
if not passphrase:
gen.save_pem(priv, cipher=None)
else:
gen.save_pem(priv, cipher='des_ede3_cbc', callback=lambda x: six.b(passphrase))
gen.save_pem(
priv,
cipher='des_ede3_cbc',
callback=lambda x: salt.utils.stringutils.to_bytes(passphrase))
else:
with salt.utils.files.fopen(priv, 'wb+') as f:
f.write(gen.exportKey('PEM', passphrase))
os.umask(cumask)
if HAS_M2:
gen.save_pub_key(pub)
else:

View File

@ -202,10 +202,9 @@ def mk_key(opts, user):
os.unlink(keyfile)
key = salt.crypt.Crypticle.generate_key_string()
cumask = os.umask(191)
with salt.utils.files.set_umask(0o277):
with salt.utils.files.fopen(keyfile, 'w+') as fp_:
fp_.write(salt.utils.stringutils.to_str(key))
os.umask(cumask)
# 600 octal: Read and write access to the owner only.
# Write access is necessary since on subsequent runs, if the file
# exists, it needs to be written to again. Windows enforces this.

View File

@ -69,11 +69,11 @@ class Engine(SignalHandlingMultiprocessingProcess):
'''
Execute the given engine in a new process
'''
def __init__(self, opts, fun, config, funcs, runners, proxy, log_queue=None):
def __init__(self, opts, fun, config, funcs, runners, proxy, **kwargs):
'''
Set up the process executor
'''
super(Engine, self).__init__(log_queue=log_queue)
super(Engine, self).__init__(**kwargs)
self.opts = opts
self.config = config
self.fun = fun
@ -93,17 +93,21 @@ class Engine(SignalHandlingMultiprocessingProcess):
state['funcs'],
state['runners'],
state['proxy'],
log_queue=state['log_queue']
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {'opts': self.opts,
return {
'opts': self.opts,
'fun': self.fun,
'config': self.config,
'funcs': self.funcs,
'runners': self.runners,
'proxy': self.proxy,
'log_queue': self.log_queue}
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def run(self):
'''

View File

@ -244,13 +244,13 @@ def start(token,
- ``html``: send the output as HTML
- ``code``: send the output as code
This can be overriden when executing a command, using the ``--out-type`` argument.
This can be overridden when executing a command, using the ``--out-type`` argument.
.. versionadded:: 2017.7.0
outputter: ``nested``
The format to display the data, using the outputters available on the CLI.
This argument can also be overriden when executing a command, using the ``--out`` option.
This argument can also be overridden when executing a command, using the ``--out`` option.
.. versionadded:: 2017.7.0

View File

@ -143,8 +143,7 @@ class Client(object):
saltenv,
path)
destdir = os.path.dirname(dest)
cumask = os.umask(63)
with salt.utils.files.set_umask(0o077):
# remove destdir if it is a regular file to avoid an OSError when
# running os.makedirs below
if os.path.isfile(destdir):
@ -158,7 +157,6 @@ class Client(object):
raise
yield dest
os.umask(cumask)
def get_cachedir(self, cachedir=None):
if cachedir is None:

View File

@ -557,7 +557,7 @@ class Fileserver(object):
if '../' in path:
return fnd
if salt.utils.url.is_escaped(path):
# don't attempt to find URL query arguements in the path
# don't attempt to find URL query arguments in the path
path = salt.utils.url.unescape(path)
else:
if '?' in path:

View File

@ -15,7 +15,6 @@ from __future__ import absolute_import, print_function, unicode_literals
import os
import socket
import sys
import glob
import re
import platform
import logging
@ -71,7 +70,6 @@ __salt__ = {
'cmd.run_all': salt.modules.cmdmod._run_all_quiet,
'smbios.records': salt.modules.smbios.records,
'smbios.get': salt.modules.smbios.get,
'cmd.run_ps': salt.modules.cmdmod.powershell,
}
log = logging.getLogger(__name__)
@ -83,9 +81,8 @@ if salt.utils.platform.is_windows():
import wmi # pylint: disable=import-error
import salt.utils.winapi
import win32api
import salt.modules.reg
import salt.utils.win_reg
HAS_WMI = True
__salt__['reg.read_value'] = salt.modules.reg.read_value
except ImportError:
log.exception(
'Unable to import Python wmi module, some core grains '
@ -110,10 +107,10 @@ def _windows_cpudata():
grains['num_cpus'] = int(os.environ['NUMBER_OF_PROCESSORS'])
except ValueError:
grains['num_cpus'] = 1
grains['cpu_model'] = __salt__['reg.read_value'](
"HKEY_LOCAL_MACHINE",
"HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0",
"ProcessorNameString").get('vdata')
grains['cpu_model'] = salt.utils.win_reg.read_value(
hive="HKEY_LOCAL_MACHINE",
key="HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0",
vname="ProcessorNameString").get('vdata')
return grains
@ -821,6 +818,10 @@ def _virtual(osdata):
fhr_contents = fhr.read()
if ':/lxc/' in fhr_contents:
grains['virtual_subtype'] = 'LXC'
elif ':/kubepods/' in fhr_contents:
grains['virtual_subtype'] = 'kubernetes'
elif ':/libpod_parent/' in fhr_contents:
grains['virtual_subtype'] = 'libpod'
else:
if any(x in fhr_contents
for x in (':/system.slice/docker', ':/docker/',
@ -1494,6 +1495,12 @@ def os_data():
)
elif salt.utils.path.which('supervisord') in init_cmdline:
grains['init'] = 'supervisord'
elif salt.utils.path.which('dumb-init') in init_cmdline:
# https://github.com/Yelp/dumb-init
grains['init'] = 'dumb-init'
elif salt.utils.path.which('tini') in init_cmdline:
# https://github.com/krallin/tini
grains['init'] = 'tini'
elif init_cmdline == ['runit']:
grains['init'] = 'runit'
elif '/sbin/my_init' in init_cmdline:
@ -1921,13 +1928,18 @@ def fqdns():
interface_data=_INTERFACES)
addresses.extend(salt.utils.network.ip_addrs6(include_loopback=False,
interface_data=_INTERFACES))
err_message = 'Exception during resolving address: %s'
for ip in addresses:
try:
fqdns.add(socket.gethostbyaddr(ip)[0])
except (socket.error, socket.herror,
socket.gaierror, socket.timeout) as e:
log.error("Exception during resolving address: " + str(e))
fqdns.add(socket.getfqdn(socket.gethostbyaddr(ip)[0]))
except socket.herror as err:
if err.errno == 1:
# No FQDN for this IP address, so we don't need to know this all the time.
log.debug("Unable to resolve address %s: %s", ip, err)
else:
log.error(err_message, err)
except (socket.error, socket.gaierror, socket.timeout) as err:
log.error(err_message, err)
grains['fqdns'] = sorted(list(fqdns))
return grains
@ -2494,123 +2506,3 @@ def default_gateway():
except Exception:
continue
return grains
def fc_wwn():
'''
Return list of fiber channel HBA WWNs
'''
grains = {}
grains['fc_wwn'] = False
if salt.utils.platform.is_linux():
grains['fc_wwn'] = _linux_wwns()
elif salt.utils.platform.is_windows():
grains['fc_wwn'] = _windows_wwns()
return grains
def iscsi_iqn():
'''
Return iSCSI IQN
'''
grains = {}
grains['iscsi_iqn'] = False
if salt.utils.platform.is_linux():
grains['iscsi_iqn'] = _linux_iqn()
elif salt.utils.platform.is_windows():
grains['iscsi_iqn'] = _windows_iqn()
elif salt.utils.platform.is_aix():
grains['iscsi_iqn'] = _aix_iqn()
return grains
def _linux_iqn():
'''
Return iSCSI IQN from a Linux host.
'''
ret = []
initiator = '/etc/iscsi/initiatorname.iscsi'
try:
with salt.utils.files.fopen(initiator, 'r') as _iscsi:
for line in _iscsi:
line = line.strip()
if line.startswith('InitiatorName='):
ret.append(line.split('=', 1)[1])
except IOError as ex:
if ex.errno != os.errno.ENOENT:
log.debug("Error while accessing '%s': %s", initiator, ex)
return ret
def _aix_iqn():
'''
Return iSCSI IQN from an AIX host.
'''
ret = []
aixcmd = 'lsattr -E -l iscsi0 | grep initiator_name'
aixret = __salt__['cmd.run'](aixcmd)
if aixret[0].isalpha():
try:
ret.append(aixret.split()[1].rstrip())
except IndexError:
pass
return ret
def _linux_wwns():
'''
Return Fibre Channel port WWNs from a Linux host.
'''
ret = []
for fcfile in glob.glob('/sys/class/fc_host/*/port_name'):
with salt.utils.files.fopen(fcfile, 'r') as _wwn:
for line in _wwn:
ret.append(line.rstrip()[2:])
return ret
def _windows_iqn():
'''
Return iSCSI IQN from a Windows host.
'''
ret = []
wmic = salt.utils.path.which('wmic')
if not wmic:
return ret
namespace = r'\\root\WMI'
mspath = 'MSiSCSIInitiator_MethodClass'
get = 'iSCSINodeName'
cmdret = __salt__['cmd.run_all'](
'{0} /namespace:{1} path {2} get {3} /format:table'.format(
wmic, namespace, mspath, get))
for line in cmdret['stdout'].splitlines():
if line.startswith('iqn.'):
line = line.rstrip()
ret.append(line.rstrip())
return ret
def _windows_wwns():
'''
Return Fibre Channel port WWNs from a Windows host.
'''
ps_cmd = r'Get-WmiObject -ErrorAction Stop -class MSFC_FibrePortHBAAttributes -namespace "root\WMI" | Select -Expandproperty Attributes | %{($_.PortWWN | % {"{0:x2}" -f $_}) -join ""}'
ret = []
cmdret = __salt__['cmd.run_ps'](ps_cmd)
for line in cmdret:
ret.append(line.rstrip())
return ret

View File

@ -0,0 +1,76 @@
# -*- coding: utf-8 -*-
'''
Grains for Fibre Channel WWN's. On Windows this runs a PowerShell command that
queries WMI to get the Fibre Channel WWN's available.
.. versionadded:: 2018.3.0
To enable these grains set ``fibre_channel_grains: True``.
.. code-block:: yaml
fibre_channel_grains: True
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import glob
import logging
# Import Salt libs
import salt.modules.cmdmod
import salt.utils.platform
import salt.utils.files
__virtualname__ = 'fibre_channel'
# Get logging started
log = logging.getLogger(__name__)
def __virtual__():
if __opts__.get('fibre_channel_grains', False) is False:
return False
else:
return __virtualname__
def _linux_wwns():
'''
Return Fibre Channel port WWNs from a Linux host.
'''
ret = []
for fc_file in glob.glob('/sys/class/fc_host/*/port_name'):
with salt.utils.files.fopen(fc_file, 'r') as _wwn:
content = _wwn.read()
for line in content.splitlines():
ret.append(line.rstrip()[2:])
return ret
def _windows_wwns():
'''
Return Fibre Channel port WWNs from a Windows host.
'''
ps_cmd = r'Get-WmiObject -ErrorAction Stop ' \
r'-class MSFC_FibrePortHBAAttributes ' \
r'-namespace "root\WMI" | ' \
r'Select -Expandproperty Attributes | ' \
r'%{($_.PortWWN | % {"{0:x2}" -f $_}) -join ""}'
ret = []
cmd_ret = salt.modules.cmdmod.powershell(ps_cmd)
for line in cmd_ret:
ret.append(line.rstrip())
return ret
def fibre_channel_wwns():
'''
Return list of fiber channel HBA WWNs
'''
grains = {'fc_wwn': False}
if salt.utils.platform.is_linux():
grains['fc_wwn'] = _linux_wwns()
elif salt.utils.platform.is_windows():
grains['fc_wwn'] = _windows_wwns()
return grains

114
salt/grains/iscsi.py Normal file
View File

@ -0,0 +1,114 @@
# -*- coding: utf-8 -*-
'''
Grains for iSCSI Qualified Names (IQN).
.. versionadded:: 2018.3.0
To enable these grains set `iscsi_grains: True`.
.. code-block:: yaml
iscsi_grains: True
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
# Import Salt libs
import salt.modules.cmdmod
import salt.utils.files
import salt.utils.path
import salt.utils.platform
__virtualname__ = 'iscsi'
# Get logging started
log = logging.getLogger(__name__)
def __virtual__():
if __opts__.get('iscsi_grains', False) is False:
return False
else:
return __virtualname__
def iscsi_iqn():
'''
Return iSCSI IQN
'''
grains = {}
grains['iscsi_iqn'] = False
if salt.utils.platform.is_linux():
grains['iscsi_iqn'] = _linux_iqn()
elif salt.utils.platform.is_windows():
grains['iscsi_iqn'] = _windows_iqn()
elif salt.utils.platform.is_aix():
grains['iscsi_iqn'] = _aix_iqn()
return grains
def _linux_iqn():
'''
Return iSCSI IQN from a Linux host.
'''
ret = []
initiator = '/etc/iscsi/initiatorname.iscsi'
try:
with salt.utils.files.fopen(initiator, 'r') as _iscsi:
for line in _iscsi:
line = line.strip()
if line.startswith('InitiatorName='):
ret.append(line.split('=', 1)[1])
except IOError as ex:
if ex.errno != os.errno.ENOENT:
log.debug("Error while accessing '%s': %s", initiator, ex)
return ret
def _aix_iqn():
'''
Return iSCSI IQN from an AIX host.
'''
ret = []
aix_cmd = 'lsattr -E -l iscsi0 | grep initiator_name'
aix_ret = salt.modules.cmdmod.run(aix_cmd)
if aix_ret[0].isalpha():
try:
ret.append(aix_ret.split()[1].rstrip())
except IndexError:
pass
return ret
def _windows_iqn():
'''
Return iSCSI IQN from a Windows host.
'''
ret = []
wmic = salt.utils.path.which('wmic')
if not wmic:
return ret
namespace = r'\\root\WMI'
path = 'MSiSCSIInitiator_MethodClass'
get = 'iSCSINodeName'
cmd_ret = salt.modules.cmdmod.run_all(
'{0} /namespace:{1} path {2} get {3} /format:table'
''.format(wmic, namespace, path, get))
for line in cmd_ret['stdout'].splitlines():
if line.startswith('iqn.'):
line = line.rstrip()
ret.append(line.rstrip())
return ret

View File

@ -57,7 +57,10 @@ def _groupname():
Grain for the minion groupname
'''
if grp:
try:
groupname = grp.getgrgid(os.getgid()).gr_name
except KeyError:
groupname = ''
else:
groupname = ''

View File

@ -326,7 +326,7 @@ def host(proxy=None):
.. note::
The diference betwen ``host`` and ``hostname`` is that
The diference between ``host`` and ``hostname`` is that
``host`` provides the physical location - either domain name or IP address,
while ``hostname`` provides the hostname as configured on the device.
They are not necessarily the same.

View File

@ -1044,7 +1044,7 @@ class RaetKey(Key):
'''
Use libnacl to generate and safely save a private key
'''
import libnacl.dual # pylint: disable=3rd-party-module-not-gated
import libnacl.dual # pylint: disable=import-error,3rd-party-module-not-gated
d_key = libnacl.dual.DualSecret()
keydir, keyname, _, _ = self._get_key_attrs(keydir, keyname,
keysize, user)
@ -1440,14 +1440,13 @@ class RaetKey(Key):
keydata = {'priv': priv,
'sign': sign}
path = os.path.join(self.opts['pki_dir'], 'local.key')
c_umask = os.umask(191)
with salt.utils.files.set_umask(0o277):
if os.path.exists(path):
#mode = os.stat(path).st_mode
os.chmod(path, stat.S_IWUSR | stat.S_IRUSR)
with salt.utils.files.fopen(path, 'w+b') as fp_:
with salt.utils.files.fopen(path, 'w+') as fp_:
fp_.write(self.serial.dumps(keydata))
os.chmod(path, stat.S_IRUSR)
os.umask(c_umask)
def delete_local(self):
'''

View File

@ -22,6 +22,7 @@ from zipimport import zipimporter
import salt.config
import salt.syspaths
import salt.utils.context
import salt.utils.data
import salt.utils.dictupdate
import salt.utils.event
import salt.utils.files
@ -651,7 +652,7 @@ def _load_cached_grains(opts, cfn):
try:
serial = salt.payload.Serial(opts)
with salt.utils.files.fopen(cfn, 'rb') as fp_:
cached_grains = serial.load(fp_)
cached_grains = salt.utils.data.decode(serial.load(fp_), preserve_tuples=True)
if not cached_grains:
log.debug('Cached grains are empty, cache might be corrupted. Refreshing.')
return None
@ -791,7 +792,7 @@ def grains(opts, force_refresh=False, proxy=None):
grains_data.update(opts['grains'])
# Write cache if enabled
if opts.get('grains_cache', False):
cumask = os.umask(0o77)
with salt.utils.files.set_umask(0o077):
try:
if salt.utils.platform.is_windows():
# Late import
@ -813,13 +814,12 @@ def grains(opts, force_refresh=False, proxy=None):
# exception is.
if os.path.isfile(cfn):
os.unlink(cfn)
os.umask(cumask)
if grains_deep_merge:
salt.utils.dictupdate.update(grains_data, opts['grains'])
else:
grains_data.update(opts['grains'])
return grains_data
return salt.utils.data.decode(grains_data, preserve_tuples=True)
# TODO: get rid of? Does anyone use this? You should use raw() instead

View File

@ -117,6 +117,7 @@ __EXTERNAL_LOGGERS_CONFIGURED = False
__MP_LOGGING_LISTENER_CONFIGURED = False
__MP_LOGGING_CONFIGURED = False
__MP_LOGGING_QUEUE = None
__MP_LOGGING_LEVEL = GARBAGE
__MP_LOGGING_QUEUE_PROCESS = None
__MP_LOGGING_QUEUE_HANDLER = None
__MP_IN_MAINPROCESS = multiprocessing.current_process().name == 'MainProcess'
@ -820,6 +821,34 @@ def set_multiprocessing_logging_queue(queue):
__MP_LOGGING_QUEUE = queue
def get_multiprocessing_logging_level():
return __MP_LOGGING_LEVEL
def set_multiprocessing_logging_level(log_level):
global __MP_LOGGING_LEVEL
__MP_LOGGING_LEVEL = log_level
def set_multiprocessing_logging_level_by_opts(opts):
'''
This will set the multiprocessing logging level to the lowest
logging level of all the types of logging that are configured.
'''
global __MP_LOGGING_LEVEL
log_levels = [
LOG_LEVELS.get(opts.get('log_level', '').lower(), logging.ERROR),
LOG_LEVELS.get(opts.get('log_level_logfile', '').lower(), logging.ERROR)
]
for level in six.itervalues(opts.get('log_granular_levels', {})):
log_levels.append(
LOG_LEVELS.get(level.lower(), logging.ERROR)
)
__MP_LOGGING_LEVEL = min(log_levels)
def setup_multiprocessing_logging_listener(opts, queue=None):
global __MP_LOGGING_QUEUE_PROCESS
global __MP_LOGGING_LISTENER_CONFIGURED
@ -883,11 +912,13 @@ def setup_multiprocessing_logging(queue=None):
# Let's add a queue handler to the logging root handlers
__MP_LOGGING_QUEUE_HANDLER = SaltLogQueueHandler(queue or get_multiprocessing_logging_queue())
logging.root.addHandler(__MP_LOGGING_QUEUE_HANDLER)
# Set the logging root level to the lowest to get all messages
logging.root.setLevel(logging.GARBAGE)
# Set the logging root level to the lowest needed level to get all
# desired messages.
log_level = get_multiprocessing_logging_level()
logging.root.setLevel(log_level)
logging.getLogger(__name__).debug(
'Multiprocessing queue logging configured for the process running '
'under PID: %s', os.getpid()
'under PID: %s at log level %s', os.getpid(), log_level
)
# The above logging call will create, in some situations, a futex wait
# lock condition, probably due to the multiprocessing Queue's internal

View File

@ -139,13 +139,13 @@ class Maintenance(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
A generalized maintenance process which performs maintenance routines.
'''
def __init__(self, opts, log_queue=None):
def __init__(self, opts, **kwargs):
'''
Create a maintenance instance
:param dict opts: The salt options
'''
super(Maintenance, self).__init__(log_queue=log_queue)
super(Maintenance, self).__init__(**kwargs)
self.opts = opts
# How often do we perform the maintenance tasks
self.loop_interval = int(self.opts['loop_interval'])
@ -159,11 +159,18 @@ class Maintenance(salt.utils.process.SignalHandlingMultiprocessingProcess):
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(state['opts'], log_queue=state['log_queue'])
self.__init__(
state['opts'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {'opts': self.opts,
'log_queue': self.log_queue}
return {
'opts': self.opts,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def _post_fork_init(self):
'''
@ -578,9 +585,8 @@ class Master(SMaster):
# Check to see if we need to create a pillar cache dir
if self.opts['pillar_cache'] and not os.path.isdir(os.path.join(self.opts['cachedir'], 'pillar_cache')):
try:
prev_umask = os.umask(0o077)
with salt.utils.files.set_umask(0o077):
os.mkdir(os.path.join(self.opts['cachedir'], 'pillar_cache'))
os.umask(prev_umask)
except OSError:
pass
@ -709,6 +715,7 @@ class Master(SMaster):
kwargs = {}
if salt.utils.platform.is_windows():
kwargs['log_queue'] = salt.log.setup.get_multiprocessing_logging_queue()
kwargs['log_queue_level'] = salt.log.setup.get_multiprocessing_logging_level()
kwargs['secrets'] = SMaster.secrets
self.process_manager.add_process(
@ -758,13 +765,13 @@ class Halite(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
Manage the Halite server
'''
def __init__(self, hopts, log_queue=None):
def __init__(self, hopts, **kwargs):
'''
Create a halite instance
:param dict hopts: The halite options
'''
super(Halite, self).__init__(log_queue=log_queue)
super(Halite, self).__init__(**kwargs)
self.hopts = hopts
# __setstate__ and __getstate__ are only used on Windows.
@ -772,11 +779,18 @@ class Halite(salt.utils.process.SignalHandlingMultiprocessingProcess):
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(state['hopts'], log_queue=state['log_queue'])
self.__init__(
state['hopts'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {'hopts': self.hopts,
'log_queue': self.log_queue}
return {
'hopts': self.hopts,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def run(self):
'''
@ -791,7 +805,7 @@ class ReqServer(salt.utils.process.SignalHandlingMultiprocessingProcess):
Starts up the master request server, minions send results to this
interface.
'''
def __init__(self, opts, key, mkey, log_queue=None, secrets=None):
def __init__(self, opts, key, mkey, secrets=None, **kwargs):
'''
Create a request server
@ -802,7 +816,7 @@ class ReqServer(salt.utils.process.SignalHandlingMultiprocessingProcess):
:rtype: ReqServer
:returns: Request server
'''
super(ReqServer, self).__init__(log_queue=log_queue)
super(ReqServer, self).__init__(**kwargs)
self.opts = opts
self.master_key = mkey
# Prepare the AES key
@ -814,15 +828,24 @@ class ReqServer(salt.utils.process.SignalHandlingMultiprocessingProcess):
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(state['opts'], state['key'], state['mkey'],
log_queue=state['log_queue'], secrets=state['secrets'])
self.__init__(
state['opts'],
state['key'],
state['mkey'],
secrets=state['secrets'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {'opts': self.opts,
return {
'opts': self.opts,
'key': self.key,
'mkey': self.master_key,
'secrets': self.secrets,
'log_queue': self.log_queue,
'secrets': self.secrets}
'log_queue_level': self.log_queue_level
}
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
self.destroy(signum)
@ -834,6 +857,8 @@ class ReqServer(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
if self.log_queue is not None:
salt.log.setup.set_multiprocessing_logging_queue(self.log_queue)
if self.log_queue_level is not None:
salt.log.setup.set_multiprocessing_logging_level(self.log_queue_level)
salt.log.setup.setup_multiprocessing_logging(self.log_queue)
if self.secrets is not None:
SMaster.secrets = self.secrets
@ -864,6 +889,7 @@ class ReqServer(salt.utils.process.SignalHandlingMultiprocessingProcess):
kwargs = {}
if salt.utils.platform.is_windows():
kwargs['log_queue'] = self.log_queue
kwargs['log_queue_level'] = self.log_queue_level
# Use one worker thread if only the TCP transport is set up on
# Windows and we are using Python 2. There is load balancer
# support on Windows for the TCP transport when using Python 3.
@ -945,7 +971,10 @@ class MWorker(salt.utils.process.SignalHandlingMultiprocessingProcess):
# non-Windows platforms.
def __setstate__(self, state):
self._is_child = True
super(MWorker, self).__init__(log_queue=state['log_queue'])
super(MWorker, self).__init__(
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
self.opts = state['opts']
self.req_channels = state['req_channels']
self.mkey = state['mkey']
@ -954,13 +983,16 @@ class MWorker(salt.utils.process.SignalHandlingMultiprocessingProcess):
SMaster.secrets = state['secrets']
def __getstate__(self):
return {'opts': self.opts,
return {
'opts': self.opts,
'req_channels': self.req_channels,
'mkey': self.mkey,
'key': self.key,
'k_mtime': self.k_mtime,
'secrets': SMaster.secrets,
'log_queue': self.log_queue,
'secrets': SMaster.secrets}
'log_queue_level': self.log_queue_level
}
def _handle_signals(self, signum, sigframe):
for channel in getattr(self, 'req_channels', ()):
@ -1452,7 +1484,7 @@ class AESFuncs(object):
if load['loc']:
fp_.seek(load['loc'])
fp_.write(load['data'])
fp_.write(salt.utils.stringutils.to_bytes(load['data']))
return True
def _pillar(self, load):

View File

@ -699,7 +699,7 @@ def cmd_zip(zip_file, sources, template=None, cwd=None, runas=None):
@salt.utils.decorators.depends('zipfile', fallback_function=cmd_zip)
def zip_(zip_file, sources, template=None, cwd=None, runas=None):
def zip_(zip_file, sources, template=None, cwd=None, runas=None, zip64=False):
'''
Uses the ``zipfile`` Python module to create zip files
@ -744,6 +744,14 @@ def zip_(zip_file, sources, template=None, cwd=None, runas=None):
Create the zip file as the specified user. Defaults to the user under
which the minion is running.
zip64 : False
Used to enable ZIP64 support, necessary to create archives larger than
4 GByte in size.
If true, will create ZIP file with the ZIPp64 extension when the zipfile
is larger than 2 GB.
ZIP64 extension is disabled by default in the Python native zip support
because the default zip and unzip commands on Unix (the InfoZIP utilities)
don't support these extensions.
CLI Example:
@ -788,7 +796,7 @@ def zip_(zip_file, sources, template=None, cwd=None, runas=None):
try:
exc = None
archived_files = []
with contextlib.closing(zipfile.ZipFile(zip_file, 'w', zipfile.ZIP_DEFLATED)) as zfile:
with contextlib.closing(zipfile.ZipFile(zip_file, 'w', zipfile.ZIP_DEFLATED, zip64)) as zfile:
for src in sources:
if cwd:
src = os.path.join(cwd, src)
@ -828,6 +836,12 @@ def zip_(zip_file, sources, template=None, cwd=None, runas=None):
if exc is not None:
# Wait to raise the exception until euid/egid are restored to avoid
# permission errors in writing to minion log.
if exc == zipfile.LargeZipFile:
raise CommandExecutionError(
'Resulting zip file too large, would require ZIP64 support'
'which has not been enabled. Rerun command with zip64=True'
)
else:
raise CommandExecutionError(
'Exception encountered creating zipfile: {0}'.format(exc)
)
@ -1077,8 +1091,7 @@ def unzip(zip_file,
if not salt.utils.platform.is_windows():
perm = zfile.getinfo(target).external_attr >> 16
if perm == 0:
umask_ = os.umask(0)
os.umask(umask_)
umask_ = salt.utils.files.get_umask()
if target.endswith('/'):
perm = 0o777 & ~umask_
else:

View File

@ -927,7 +927,7 @@ def _wipe(dev):
def _wait(lfunc, log_lvl=None, log_msg=None, tries=10):
'''
Wait for lfunc to be True
:return: True if lfunc succeeded within tries, False if it didnt
:return: True if lfunc succeeded within tries, False if it didn't
'''
i = 0
while i < tries:

View File

@ -169,9 +169,7 @@ def get_all_alarms(region=None, prefix=None, key=None, keyid=None,
continue
name = prefix + alarm["name"]
del alarm["name"]
alarm_sls = []
alarm_sls.append({"name": name})
alarm_sls.append({"attributes": alarm})
alarm_sls = [{"name": name}, {"attributes": alarm}]
results["manage alarm " + name] = {"boto_cloudwatch_alarm.present":
alarm_sls}
return _safe_dump(results)

View File

@ -657,31 +657,25 @@ def find_images(ami_name=None, executable_by=None, owners=None, image_ids=None,
salt myminion boto_ec2.find_images tags='{"mytag": "value"}'
'''
retries = 30
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
while retries:
try:
filter_parameters = {'filters': {}}
if image_ids:
filter_parameters['image_ids'] = [image_ids]
if executable_by:
filter_parameters['executable_by'] = [executable_by]
if owners:
filter_parameters['owners'] = [owners]
if ami_name:
filter_parameters['filters']['name'] = ami_name
if tags:
for tag_name, tag_value in six.iteritems(tags):
filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value
images = conn.get_all_images(**filter_parameters)
log.debug('The filters criteria %s matched the following '
'images:%s', filter_parameters, images)
if images:
if return_objs:
return images
@ -689,7 +683,13 @@ def find_images(ami_name=None, executable_by=None, owners=None, image_ids=None,
else:
return False
except boto.exception.BotoServerError as exc:
log.error(exc)
if exc.error_code == 'Throttling':
log.debug("Throttled by AWS API, will retry in 5 seconds...")
time.sleep(5)
retries -= 1
continue
log.error('Failed to convert AMI name `%s` to an AMI ID: %s', ami_name, exc)
return False
return False

View File

@ -2642,7 +2642,7 @@ def _maybe_set_tags(tags, obj):
def _maybe_set_dns(conn, vpcid, dns_support, dns_hostnames):
if dns_support:
conn.modify_vpc_attribute(vpc_id=vpcid, enable_dns_support=dns_support)
log.debug('DNS spport was set to: %s on vpc %s', dns_support, vpcid)
log.debug('DNS support was set to: %s on vpc %s', dns_support, vpcid)
if dns_hostnames:
conn.modify_vpc_attribute(vpc_id=vpcid, enable_dns_hostnames=dns_hostnames)
log.debug('DNS hostnames was set to: %s on vpc %s', dns_hostnames, vpcid)

View File

@ -425,7 +425,7 @@ def _merge_list_of_dict(first, second, prepend=True):
if first and not second:
return first
# Determine overlaps
# So we dont change the position of the existing terms/filters
# So we don't change the position of the existing terms/filters
overlaps = []
merged = []
appended = []

View File

@ -919,6 +919,7 @@ def version(name, check_remote=False, source=None, pre_versions=False):
salt "*" chocolatey.version <package name> check_remote=True
'''
installed = list_(narrow=name, local_only=True)
installed = {k.lower(): v for k, v in installed.items()}
packages = {}
lower_name = name.lower()
@ -928,6 +929,7 @@ def version(name, check_remote=False, source=None, pre_versions=False):
if check_remote:
available = list_(narrow=name, pre_versions=pre_versions, source=source)
available = {k.lower(): v for k, v in available.items()}
for pkg in packages:
packages[pkg] = {'installed': installed[pkg],

View File

@ -36,6 +36,7 @@ import salt.utils.timed_subprocess
import salt.utils.user
import salt.utils.versions
import salt.utils.vt
import salt.utils.win_reg
import salt.grains.extra
from salt.ext import six
from salt.exceptions import CommandExecutionError, TimedProcTimeoutError, \
@ -529,7 +530,7 @@ def _run(cmd,
if python_shell is None:
python_shell = False
kwargs = {'cwd': cwd,
new_kwargs = {'cwd': cwd,
'shell': python_shell,
'env': run_env if six.PY3 else salt.utils.data.encode(run_env),
'stdin': six.text_type(stdin) if stdin is not None else stdin,
@ -540,6 +541,9 @@ def _run(cmd,
'bg': bg,
}
if 'stdin_raw_newlines' in kwargs:
new_kwargs['stdin_raw_newlines'] = kwargs['stdin_raw_newlines']
if umask is not None:
_umask = six.text_type(umask).lstrip('0')
@ -555,7 +559,7 @@ def _run(cmd,
_umask = None
if runas or group or umask:
kwargs['preexec_fn'] = functools.partial(
new_kwargs['preexec_fn'] = functools.partial(
salt.utils.user.chugid_and_umask,
runas,
_umask,
@ -564,9 +568,9 @@ def _run(cmd,
if not salt.utils.platform.is_windows():
# close_fds is not supported on Windows platforms if you redirect
# stdin/stdout/stderr
if kwargs['shell'] is True:
kwargs['executable'] = shell
kwargs['close_fds'] = True
if new_kwargs['shell'] is True:
new_kwargs['executable'] = shell
new_kwargs['close_fds'] = True
if not os.path.isabs(cwd) or not os.path.isdir(cwd):
raise CommandExecutionError(
@ -594,14 +598,13 @@ def _run(cmd,
if not use_vt:
# This is where the magic happens
try:
proc = salt.utils.timed_subprocess.TimedProc(cmd, **kwargs)
proc = salt.utils.timed_subprocess.TimedProc(cmd, **new_kwargs)
except (OSError, IOError) as exc:
msg = (
'Unable to run command \'{0}\' with the context \'{1}\', '
'reason: '.format(
cmd if output_loglevel is not None
else 'REDACTED',
kwargs
cmd if output_loglevel is not None else 'REDACTED',
new_kwargs
)
)
try:
@ -677,11 +680,11 @@ def _run(cmd,
ret['stdout'] = out
ret['stderr'] = err
else:
to = ''
formatted_timeout = ''
if timeout:
to = ' (timeout: {0}s)'.format(timeout)
formatted_timeout = ' (timeout: {0}s)'.format(timeout)
if output_loglevel is not None:
msg = 'Running {0} in VT{1}'.format(cmd, to)
msg = 'Running {0} in VT{1}'.format(cmd, formatted_timeout)
log.debug(log_callback(msg))
stdout, stderr = '', ''
now = time.time()
@ -690,18 +693,20 @@ def _run(cmd,
else:
will_timeout = -1
try:
proc = salt.utils.vt.Terminal(cmd,
proc = salt.utils.vt.Terminal(
cmd,
shell=True,
log_stdout=True,
log_stderr=True,
cwd=cwd,
preexec_fn=kwargs.get('preexec_fn', None),
preexec_fn=new_kwargs.get('preexec_fn', None),
env=run_env,
log_stdin_level=output_loglevel,
log_stdout_level=output_loglevel,
log_stderr_level=output_loglevel,
stream_stdout=True,
stream_stderr=True)
stream_stderr=True
)
ret['pid'] = proc.pid
while proc.has_unread_data:
try:
@ -1027,6 +1032,14 @@ def run(cmd,
.. versionadded:: Fluorine
:param bool stdin_raw_newlines : False
Normally, newlines present in ``stdin`` as ``\\n`` will be 'unescaped',
i.e. replaced with a ``\n``. Set this parameter to ``True`` to leave
the newlines as-is. This should be used when you are supplying data
using ``stdin`` that should not be modified.
.. versionadded:: Fluorine
CLI Example:
.. code-block:: bash
@ -1260,6 +1273,15 @@ def shell(cmd,
the return code will be overridden with zero.
.. versionadded:: Fluorine
:param bool stdin_raw_newlines : False
Normally, newlines present in ``stdin`` as ``\\n`` will be 'unescaped',
i.e. replaced with a ``\n``. Set this parameter to ``True`` to leave
the newlines as-is. This should be used when you are supplying data
using ``stdin`` that should not be modified.
.. versionadded:: Fluorine
CLI Example:
.. code-block:: bash
@ -1466,6 +1488,15 @@ def run_stdout(cmd,
the return code will be overridden with zero.
.. versionadded:: Fluorine
:param bool stdin_raw_newlines : False
Normally, newlines present in ``stdin`` as ``\\n`` will be 'unescaped',
i.e. replaced with a ``\n``. Set this parameter to ``True`` to leave
the newlines as-is. This should be used when you are supplying data
using ``stdin`` that should not be modified.
.. versionadded:: Fluorine
CLI Example:
.. code-block:: bash
@ -1655,6 +1686,15 @@ def run_stderr(cmd,
the return code will be overridden with zero.
.. versionadded:: Fluorine
:param bool stdin_raw_newlines : False
Normally, newlines present in ``stdin`` as ``\\n`` will be 'unescaped',
i.e. replaced with a ``\n``. Set this parameter to ``True`` to leave
the newlines as-is. This should be used when you are supplying data
using ``stdin`` that should not be modified.
.. versionadded:: Fluorine
CLI Example:
.. code-block:: bash
@ -1868,6 +1908,15 @@ def run_all(cmd,
the return code will be overridden with zero.
.. versionadded:: Fluorine
:param bool stdin_raw_newlines : False
Normally, newlines present in ``stdin`` as ``\\n`` will be 'unescaped',
i.e. replaced with a ``\n``. Set this parameter to ``True`` to leave
the newlines as-is. This should be used when you are supplying data
using ``stdin`` that should not be modified.
.. versionadded:: Fluorine
CLI Example:
.. code-block:: bash
@ -2048,6 +2097,15 @@ def retcode(cmd,
the return code will be overridden with zero.
.. versionadded:: Fluorine
:param bool stdin_raw_newlines : False
Normally, newlines present in ``stdin`` as ``\\n`` will be 'unescaped',
i.e. replaced with a ``\n``. Set this parameter to ``True`` to leave
the newlines as-is. This should be used when you are supplying data
using ``stdin`` that should not be modified.
.. versionadded:: Fluorine
CLI Example:
.. code-block:: bash
@ -2287,6 +2345,15 @@ def script(source,
the return code will be overridden with zero.
.. versionadded:: Fluorine
:param bool stdin_raw_newlines : False
Normally, newlines present in ``stdin`` as ``\\n`` will be 'unescaped',
i.e. replaced with a ``\n``. Set this parameter to ``True`` to leave
the newlines as-is. This should be used when you are supplying data
using ``stdin`` that should not be modified.
.. versionadded:: Fluorine
CLI Example:
.. code-block:: bash
@ -2521,6 +2588,15 @@ def script_retcode(source,
the return code will be overridden with zero.
.. versionadded:: Fluorine
:param bool stdin_raw_newlines : False
Normally, newlines present in ``stdin`` as ``\\n`` will be 'unescaped',
i.e. replaced with a ``\n``. Set this parameter to ``True`` to leave
the newlines as-is. This should be used when you are supplying data
using ``stdin`` that should not be modified.
.. versionadded:: Fluorine
CLI Example:
.. code-block:: bash
@ -3027,9 +3103,9 @@ def shell_info(shell, list_modules=False):
# Ensure ret['installed'] always as a value of True, False or None (not sure)
ret = {'installed': False}
if salt.utils.platform.is_windows() and shell == 'powershell':
pw_keys = __salt__['reg.list_keys'](
'HKEY_LOCAL_MACHINE',
'Software\\Microsoft\\PowerShell')
pw_keys = salt.utils.win_reg.list_keys(
hive='HKEY_LOCAL_MACHINE',
key='Software\\Microsoft\\PowerShell')
pw_keys.sort(key=int)
if len(pw_keys) == 0:
return {
@ -3038,15 +3114,15 @@ def shell_info(shell, list_modules=False):
'installed': False,
}
for reg_ver in pw_keys:
install_data = __salt__['reg.read_value'](
'HKEY_LOCAL_MACHINE',
'Software\\Microsoft\\PowerShell\\{0}'.format(reg_ver),
'Install')
install_data = salt.utils.win_reg.read_value(
hive='HKEY_LOCAL_MACHINE',
key='Software\\Microsoft\\PowerShell\\{0}'.format(reg_ver),
vname='Install')
if install_data.get('vtype') == 'REG_DWORD' and \
install_data.get('vdata') == 1:
details = __salt__['reg.list_values'](
'HKEY_LOCAL_MACHINE',
'Software\\Microsoft\\PowerShell\\{0}\\'
details = salt.utils.win_reg.list_values(
hive='HKEY_LOCAL_MACHINE',
key='Software\\Microsoft\\PowerShell\\{0}\\'
'PowerShellEngine'.format(reg_ver))
# reset data, want the newest version details only as powershell
@ -3319,6 +3395,14 @@ def powershell(cmd,
.. versionadded:: Fluorine
:param bool stdin_raw_newlines : False
Normally, newlines present in ``stdin`` as ``\\n`` will be 'unescaped',
i.e. replaced with a ``\n``. Set this parameter to ``True`` to leave
the newlines as-is. This should be used when you are supplying data
using ``stdin`` that should not be modified.
.. versionadded:: Fluorine
:returns:
:dict: A dictionary of data returned by the powershell command.
@ -3334,6 +3418,9 @@ def powershell(cmd,
python_shell = True
# Append PowerShell Object formatting
# ConvertTo-JSON is only available on PowerShell 3.0 and later
psversion = shell_info('powershell')['psversion']
if salt.utils.versions.version_cmp(psversion, '2.0') == 1:
cmd += ' | ConvertTo-JSON'
if depth is not None:
cmd += ' -Depth {0}'.format(depth)
@ -3353,7 +3440,7 @@ def powershell(cmd,
# caught in a try/catch block. For example, the `Get-WmiObject` command will
# often return a "Non Terminating Error". To fix this, make sure
# `-ErrorAction Stop` is set in the powershell command
cmd = 'try {' + cmd + '} catch { "{}" | ConvertTo-JSON}'
cmd = 'try {' + cmd + '} catch { "{}" }'
# Retrieve the response, while overriding shell with 'powershell'
response = run(cmd,
@ -3612,6 +3699,14 @@ def powershell_all(cmd,
.. versionadded:: Fluorine
:param bool stdin_raw_newlines : False
Normally, newlines present in ``stdin`` as ``\\n`` will be 'unescaped',
i.e. replaced with a ``\n``. Set this parameter to ``True`` to leave
the newlines as-is. This should be used when you are supplying data
using ``stdin`` that should not be modified.
.. versionadded:: Fluorine
:return: A dictionary with the following entries:
result
@ -3863,6 +3958,14 @@ def run_bg(cmd,
.. versionadded:: Fluorine
:param bool stdin_raw_newlines : False
Normally, newlines present in ``stdin`` as ``\\n`` will be 'unescaped',
i.e. replaced with a ``\n``. Set this parameter to ``True`` to leave
the newlines as-is. This should be used when you are supplying data
using ``stdin`` that should not be modified.
.. versionadded:: Fluorine
CLI Example:
.. code-block:: bash

View File

@ -289,12 +289,14 @@ def raw_cron(user):
# Preserve line endings
lines = sdecode(__salt__['cmd.run_stdout'](cmd,
runas=user,
ignore_retcode=True,
rstrip=False,
python_shell=False)).splitlines(True)
else:
cmd = 'crontab -u {0} -l'.format(user)
# Preserve line endings
lines = sdecode(__salt__['cmd.run_stdout'](cmd,
ignore_retcode=True,
rstrip=False,
python_shell=False)).splitlines(True)

View File

@ -1369,7 +1369,7 @@ def login(*registries):
# information is added to the config.json, since docker-py isn't designed
# to do so.
registry_auth = __pillar__.get('docker-registries', {})
ret = {}
ret = {'retcode': 0}
errors = ret.setdefault('Errors', [])
if not isinstance(registry_auth, dict):
errors.append('\'docker-registries\' Pillar value must be a dictionary')
@ -1427,6 +1427,8 @@ def login(*registries):
errors.append(login_cmd['stderr'])
elif login_cmd['stdout']:
errors.append(login_cmd['stdout'])
if errors:
ret['retcode'] = 1
return ret
@ -3798,6 +3800,7 @@ def rm_(name, force=False, volumes=False, **kwargs):
kwargs = __utils__['args.clean_kwargs'](**kwargs)
stop_ = kwargs.pop('stop', False)
timeout = kwargs.pop('timeout', None)
auto_remove = False
if kwargs:
__utils__['args.invalid_kwargs'](kwargs)
@ -3807,8 +3810,18 @@ def rm_(name, force=False, volumes=False, **kwargs):
'remove this container'.format(name)
)
if stop_ and not force:
inspect_results = inspect_container(name)
try:
auto_remove = inspect_results['HostConfig']['AutoRemove']
except KeyError:
log.error(
'Failed to find AutoRemove in inspect results, Docker API may '
'have changed. Full results: %s', inspect_results
)
stop(name, timeout=timeout)
pre = ps_(all=True)
if not auto_remove:
_client_wrapper('remove_container', name, v=volumes, force=force)
_clear_context()
return [x for x in pre if x not in ps_(all=True)]
@ -4505,7 +4518,7 @@ def pull(image,
time_started = time.time()
response = _client_wrapper('pull', image, **kwargs)
ret = {'Time_Elapsed': time.time() - time_started}
ret = {'Time_Elapsed': time.time() - time_started, 'retcode': 0}
_clear_context()
if not response:
@ -4538,6 +4551,7 @@ def pull(image,
if errors:
ret['Errors'] = errors
ret['retcode'] = 1
return ret
@ -4600,7 +4614,7 @@ def push(image,
time_started = time.time()
response = _client_wrapper('push', image, **kwargs)
ret = {'Time_Elapsed': time.time() - time_started}
ret = {'Time_Elapsed': time.time() - time_started, 'retcode': 0}
_clear_context()
if not response:
@ -4632,6 +4646,7 @@ def push(image,
if errors:
ret['Errors'] = errors
ret['retcode'] = 1
return ret
@ -4703,9 +4718,11 @@ def rmi(*names, **kwargs):
_clear_context()
ret = {'Layers': [x for x in pre_images if x not in images(all=True)],
'Tags': [x for x in pre_tags if x not in list_tags()]}
'Tags': [x for x in pre_tags if x not in list_tags()],
'retcode': 0}
if errors:
ret['Errors'] = errors
ret['retcode'] = 1
return ret
@ -6848,7 +6865,7 @@ def sls_build(repository,
.. versionadded:: 2018.3.0
dryrun: False
when set to True the container will not be commited at the end of
when set to True the container will not be committed at the end of
the build. The dryrun succeed also when the state contains errors.
**RETURN DATA**

View File

@ -220,7 +220,7 @@ def rm_(key, recurse=False, profile=None, **kwargs):
'''
.. versionadded:: 2014.7.0
Delete a key from etcd. Returns True if the key was deleted, False if it wasn
Delete a key from etcd. Returns True if the key was deleted, False if it was
not and None if there was a failure.
CLI Example:

View File

@ -5414,9 +5414,7 @@ def manage_file(name,
# Create the file, user rw-only if mode will be set to prevent
# a small security race problem before the permissions are set
if mode:
current_umask = os.umask(0o77)
with salt.utils.files.set_umask(0o077 if mode else None):
# Create a new file when test is False and source is None
if contents is None:
if not __opts__['test']:
@ -5436,9 +5434,6 @@ def manage_file(name,
ret, 'File {0} not created'.format(name)
)
if mode:
os.umask(current_umask)
if contents is not None:
# Write the static contents to a temporary file
tmp = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX,
@ -5471,8 +5466,7 @@ def manage_file(name,
# out what mode to use for the new file.
if mode is None and not salt.utils.platform.is_windows():
# Get current umask
mask = os.umask(0)
os.umask(mask)
mask = salt.utils.files.get_umask()
# Calculate the mode value that results from the umask
mode = oct((0o777 ^ mask) & 0o666)

Some files were not shown because too many files have changed in this diff Show More