Merge branch 'develop' into lvresize

This commit is contained in:
Raphael Kissos 2018-03-26 16:20:46 +03:00 committed by GitHub
commit 1179e7c727
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
108 changed files with 2556 additions and 3690 deletions

4
.github/stale.yml vendored
View File

@ -1,8 +1,8 @@
# Probot Stale configuration file
# Number of days of inactivity before an issue becomes stale
# 780 is approximately 2 years and 2 months
daysUntilStale: 780
# 770 is approximately 2 years and 1 month
daysUntilStale: 770
# Number of days of inactivity before a stale issue is closed
daysUntilClose: 7

View File

@ -36,7 +36,7 @@ provisioner:
require_chef: false
remote_states:
name: git://github.com/saltstack/salt-jenkins.git
branch: oxygen
branch: 2018.3
repo: git
testingdir: /testing
salt_copy_filter:

View File

@ -29,6 +29,25 @@ load-plugins=saltpylint.pep8,
# Don't bump this values on PyLint 1.4.0 - Know bug that ignores the passed --rcfile
jobs=1
# List of blacklisted functions and suggested replacements
#
# NOTE: This pylint check will infer the full name of the function by walking
# back up from the function name to the parent, to the parent's parent, etc.,
# and this means that functions which come from platform-specific modules need
# to be referenced using name of the module from which the function was
# imported. This happens a lot in the os and os.path modules. Functions from
# os.path should be defined using posixpath.funcname and ntpath.funcname, while
# functions from os should be defined using posix.funcname and nt.funcname.
#
# When defining a blacklisted function, the format is:
#
# <func_name>=<suggested_replacement>
#
# The replacement text will be included in the alert message.
#
blacklisted-functions=posix.umask=salt.utils.files.set_umask or get_umask,
nt.umask=salt.utils.files.set_umask or get_umask
# Allow loading of arbitrary C extensions. Extensions are imported into the
# active Python interpreter and may run arbitrary code.
unsafe-load-any-extension=no

Binary file not shown.

Before

Width:  |  Height:  |  Size: 438 KiB

After

Width:  |  Height:  |  Size: 240 KiB

View File

@ -341,10 +341,15 @@ rst_prolog = """\
| <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py3-AMD64-Setup.exe.md5"><strong>md5</strong></a></p>
.. |osxdownload| raw:: html
.. |osxdownloadpy2| raw:: html
<p>x86_64: <a href="https://repo.saltstack.com/osx/salt-{release}-x86_64.pkg"><strong>salt-{release}-x86_64.pkg</strong></a>
| <a href="https://repo.saltstack.com/osx/salt-{release}-x86_64.pkg.md5"><strong>md5</strong></a></p>
<p>x86_64: <a href="https://repo.saltstack.com/osx/salt-{release}-py2-x86_64.pkg"><strong>salt-{release}-py2-x86_64.pkg</strong></a>
| <a href="https://repo.saltstack.com/osx/salt-{release}-py2-x86_64.pkg.md5"><strong>md5</strong></a></p>
.. |osxdownloadpy3| raw:: html
<p>x86_64: <a href="https://repo.saltstack.com/osx/salt-{release}-py3-x86_64.pkg"><strong>salt-{release}-py3-x86_64.pkg</strong></a>
| <a href="https://repo.saltstack.com/osx/salt-{release}-py3-x86_64.pkg.md5"><strong>md5</strong></a></p>
""".format(release=release)

View File

@ -83,8 +83,8 @@ Glossary
to the system. State module functions should be idempotent. Some
state module functions, such as :mod:`cmd.run <salt.states.cmd.run>`
are not idempotent by default but can be made idempotent with the
proper use of requisites such as :ref:```unless`` <unless-requisite>`
and :ref:```onlyif`` <onlyif-requisite>`. For more information, *see*
proper use of requisites such as :ref:`unless <unless-requisite>`
and :ref:`onlyif <onlyif-requisite>`. For more information, *see*
`wikipedia <https://en.wikipedia.org/wiki/Idempotent>`_.
Jinja

View File

@ -334,6 +334,7 @@ execution modules
publish
puppet
purefa
purefb
pushbullet
pushover_notify
pw_group

View File

@ -0,0 +1,6 @@
===================
salt.modules.purefb
===================
.. automodule:: salt.modules.purefb
:members:

View File

@ -344,7 +344,35 @@ be set in the configuration file to enable interfacing with GoGrid:
OpenStack
---------
.. automodule:: salt.cloud.clouds.openstack
Using Salt for OpenStack uses the `shade <https://docs.openstack.org/shade/latest/>` driver managed by the
openstack-infra team.
This driver can be configured using the ``/etc/openstack/clouds.yml`` file with
`os-client-config <https://docs.openstack.org/os-client-config/latest/>`
.. code-block:: yaml
myopenstack:
driver: openstack
region_name: RegionOne
cloud: mycloud
Or by just configuring the same auth block directly in the cloud provider config.
.. code-block:: yaml
myopenstack:
driver: openstack
region_name: RegionOne
auth:
username: 'demo'
password: secret
project_name: 'demo'
auth_url: 'http://openstack/identity'
Both of these methods support using the
`vendor <https://docs.openstack.org/os-client-config/latest/user/vendor-support.html>`
options.
For more information, look at :mod:`Openstack Cloud Driver Docs <salt.cloud.clouds.openstack>`
DigitalOcean
------------

View File

@ -0,0 +1,5 @@
==============================
Getting Started with Openstack
==============================
.. automodule:: salt.cloud.clouds.openstack

View File

@ -1,188 +0,0 @@
==============================
Getting Started With Rackspace
==============================
Rackspace is a major public cloud platform which may be configured using either
the `openstack` driver.
Dependencies
============
* Libcloud >= 0.13.2
Configuration
=============
To use the `openstack` driver (recommended), set up the cloud configuration at
``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/rackspace.conf``:
.. code-block:: yaml
my-rackspace-config:
# Set the location of the salt-master
#
minion:
master: saltmaster.example.com
# Configure Rackspace using the OpenStack plugin
#
identity_url: 'https://identity.api.rackspacecloud.com/v2.0/tokens'
compute_name: cloudServersOpenStack
protocol: ipv4
# Set the compute region:
#
compute_region: DFW
# Configure Rackspace authentication credentials
#
user: myname
tenant: 123456
apikey: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
driver: openstack
.. note::
.. versionchanged:: 2015.8.0
The ``provider`` parameter in cloud provider definitions was renamed to ``driver``. This
change was made to avoid confusion with the ``provider`` parameter that is used in cloud profile
definitions. Cloud provider definitions now use ``driver`` to refer to the Salt cloud module that
provides the underlying functionality to connect to a cloud host, while cloud profiles continue
to use ``provider`` to refer to provider configurations that you define.
Compute Region
==============
Rackspace currently has six compute regions which may be used:
.. code-block:: bash
DFW -> Dallas/Forth Worth
ORD -> Chicago
SYD -> Sydney
LON -> London
IAD -> Northern Virginia
HKG -> Hong Kong
Note: Currently the LON region is only available with a UK account, and UK accounts cannot access other regions
Authentication
==============
The ``user`` is the same user as is used to log into the Rackspace Control
Panel. The ``tenant`` and ``apikey`` can be found in the API Keys area of the
Control Panel. The ``apikey`` will be labeled as API Key (and may need to be
generated), and ``tenant`` will be labeled as Cloud Account Number.
An initial profile can be configured in ``/etc/salt/cloud.profiles`` or
``/etc/salt/cloud.profiles.d/rackspace.conf``:
.. code-block:: yaml
openstack_512:
provider: my-rackspace-config
size: 512 MB Standard
image: Ubuntu 12.04 LTS (Precise Pangolin)
To instantiate a machine based on this profile:
.. code-block:: bash
# salt-cloud -p openstack_512 myinstance
This will create a virtual machine at Rackspace with the name ``myinstance``.
This operation may take several minutes to complete, depending on the current
load at the Rackspace data center.
Once the instance has been created with salt-minion installed, connectivity to
it can be verified with Salt:
.. code-block:: bash
# salt myinstance test.ping
RackConnect Environments
------------------------
Rackspace offers a hybrid hosting configuration option called RackConnect that
allows you to use a physical firewall appliance with your cloud servers. When
this service is in use the public_ip assigned by nova will be replaced by a NAT
ip on the firewall. For salt-cloud to work properly it must use the newly
assigned "access ip" instead of the Nova assigned public ip. You can enable that
capability by adding this to your profiles:
.. code-block:: yaml
openstack_512:
provider: my-openstack-config
size: 512 MB Standard
image: Ubuntu 12.04 LTS (Precise Pangolin)
rackconnect: True
Managed Cloud Environments
--------------------------
Rackspace offers a managed service level of hosting. As part of the managed
service level you have the ability to choose from base of lamp installations on
cloud server images. The post build process for both the base and the lamp
installations used Chef to install things such as the cloud monitoring agent and
the cloud backup agent. It also takes care of installing the lamp stack if
selected. In order to prevent the post installation process from stomping over
the bootstrapping you can add the below to your profiles.
.. code-block:: yaml
openstack_512:
provider: my-rackspace-config
size: 512 MB Standard
image: Ubuntu 12.04 LTS (Precise Pangolin)
managedcloud: True
First and Next Generation Images
--------------------------------
Rackspace provides two sets of virtual machine images, *first*, and *next*
generation. As of ``0.8.9`` salt-cloud will default to using the *next*
generation images. To force the use of first generation images, on the profile
configuration please add:
.. code-block:: yaml
FreeBSD-9.0-512:
provider: my-rackspace-config
size: 512 MB Standard
image: FreeBSD 9.0
force_first_gen: True
Private Subnets
---------------
By default salt-cloud will not add Rackspace private networks to new servers. To enable
a private network to a server instantiated by salt cloud, add the following section
to the provider file (typically ``/etc/salt/cloud.providers.d/rackspace.conf``)
.. code-block:: yaml
networks:
- fixed:
# This is the private network
- private-network-id
# This is Rackspace's "PublicNet"
- 00000000-0000-0000-0000-000000000000
# This is Rackspace's "ServiceNet"
- 11111111-1111-1111-1111-111111111111
To get the Rackspace private network ID, go to Networking, Networks and hover over the private network name.
The order of the networks in the above code block does not map to the order of the
ethernet devices on newly created servers. Public IP will always be first ( eth0 )
followed by servicenet ( eth1 ) and then private networks.
Enabling the private network per above gives the option of using the private subnet for
all master-minion communication, including the bootstrap install of salt-minion. To
enable the minion to use the private subnet, update the master: line in the minion:
section of the providers file. To configure the master to only listen on the private
subnet IP, update the interface: line in the /etc/salt/master file to be the private
subnet IP of the salt master.

View File

@ -8,7 +8,8 @@ Installation from the Official SaltStack Repository
===================================================
**Latest stable build from the selected branch**:
|osxdownload|
|osxdownloadpy2|
|osxdownloadpy3|
The output of ``md5 <salt pkg>`` should match the contents of the
corresponding md5 file.

View File

@ -409,7 +409,7 @@ module. This module includes several functions, each of them with their own
use. These functions include:
- :py:func:`pillar.item <salt.modules.pillar.item>` - Retrieves the value of
one or more keys from the :ref:`in-memory pillar datj <pillar-in-memory>`.
one or more keys from the :ref:`in-memory pillar data <pillar-in-memory>`.
- :py:func:`pillar.items <salt.modules.pillar.items>` - Compiles a fresh pillar
dictionary and returns it, leaving the :ref:`in-memory pillar data
<pillar-in-memory>` untouched. If pillar keys are passed to this function

View File

@ -665,6 +665,37 @@ The Windows installer will now display command-line help when a help switch
Salt Cloud Features
-------------------
OpenStack Revamp
================
The OpenStack Driver has been rewritten mostly from scratch. Salt is now using
the `shade driver <https://docs.openstack.org/shade/latest/>`.
With this, the ``nova`` driver is being deprecated.
:mod:`openstack driver <salt.cloud.clouds.openstack>`
There have also been several new modules and states added for managing OpenStack
setups using shade as well.
:mod:`keystone <salt.modules.keystoneng>`
:mod:`keystone role grant <salt.states.keystone_role_grant>`
:mod:`keystone group <salt.states.keystone_group>`
:mod:`keystone role <salt.states.keystone_role>`
:mod:`keystone service <salt.states.keystone_service>`
:mod:`keystone user <salt.states.keystone_user>`
:mod:`keystone domain <salt.states.keystone_domain>`
:mod:`keystone project <salt.states.keystone_project>`
:mod:`keystone endpoint <salt.states.keystone_endpoint>`
:mod:`glance <salt.modules.glanceng>`
:mod:`glance_image <salt.states.glance_image>`
:mod:`neutron <salt.modules.neutronng>`
:mod:`neutron subnet <salt.states.neutron_subnet>`
:mod:`neutron secgroup <salt.states.neutron_secgroup>`
:mod:`neutron secgroup rule <salt.states.neutron_secgroup_rule>`
:mod:`neutron network <salt.states.neutron_network>`
Pre-Flight Commands
===================
@ -1557,6 +1588,14 @@ PyCrypto is used as it was in the previous releases. M2Crypto is used in the
same way as PyCrypto so there would be no compatibility issues, different nodes
could use different backends.
NaCL Module and Runner changes
------------------------------
In addition to argument changes in both the NaCL module and runner for future
deprecation in the Fluorine release, the default box_type has changed from
`secretbox` to `sealedbox`. SecretBox is data encrypted using private key
`sk` and Sealedbox is encrypted using public key `pk`
Deprecations
------------
@ -1617,6 +1656,15 @@ The ``win_service`` module had the following changes:
- The ``type`` option was removed from the ``create`` function. Please use
``service_type`` instead.
The ``nacl`` module had the following changes:
- The ``key_file`` option was replaced in the ``keygen``, ``enc`` and ``dec``
functions. Please use the ``sk_file`` option instead.
- The ``key`` option was replaced in the ``keygen``, ``enc`` and ``dec``
functions. Please use the ``sk`` option instead.
Runner Deprecations
===================
@ -1625,6 +1673,14 @@ The ``manage`` runner had the following changes:
- The ``root_user`` kwarg was removed from the ``bootstrap`` function. Please
use ``salt-ssh`` roster entries for the host instead.
The ``nacl`` runner had the following changes:
- The ``key_file`` option was replaced in the ``keygen``, ``enc`` and ``dec``
functions. Please use the ``sk_file`` option instead.
- The ``key`` option was replaced in the ``keygen``, ``enc`` and ``dec``
functions. Please use the ``sk`` option instead.
State Deprecations
==================

View File

@ -27,6 +27,44 @@ syndic respects :conf_minion:`enable_legacy_startup_events` as well.
Deprecations
------------
Module Deprecations
===================
The ``trafficserver`` module had the following changes:
- Support for the ``match_var`` function was removed. Please use the
``match_metric`` function instead.
- Support for the ``read_var`` function was removed. Please use the
``read_config`` function instead.
- Support for the ``set_var`` function was removed. Please use the
``set_config`` function instead.
The ``win_update`` module has been removed. It has been replaced by ``win_wua``
module.
The ``win_wua`` module had the following changes:
- Support for the ``download_update`` function has been removed. Please use the
``download`` function instead.
- Support for the ``download_updates`` function has been removed. Please use the
``download`` function instead.
- Support for the ``install_update`` function has been removed. Please use the
``install`` function instead.
- Support for the ``install_updates`` function has been removed. Please use the
``install`` function instead.
- Support for the ``list_update`` function has been removed. Please use the
``get`` function instead.
- Support for the ``list_updates`` function has been removed. Please use the
``list`` function instead.
Pillar Deprecations
===================
The ``vault`` pillar had the following changes:
- Support for the ``profile`` argument was removed. Any options passed up until
and following the first ``path=`` are discarded.
Roster Deprecations
===================
@ -37,3 +75,57 @@ The ``cache`` roster had the following changes:
- The ``roster_order`` option now includes IPv6 in addition to IPv4 for the
``private``, ``public``, ``global`` or ``local`` settings. The syntax for these
settings has changed to ``ipv4-*`` or ``ipv6-*``, respectively.
State Deprecations
==================
The ``docker`` state has been removed. The following functions should be used
instead.
- The ``docker.running`` function was removed. Please update applicable SLS files
to use the ``docker_container.running`` function instead.
- The ``docker.stopped`` function was removed. Please update applicable SLS files
to use the ``docker_container.stopped`` function instead.
- The ``docker.absent`` function was removed. Please update applicable SLS files
to use the ``docker_container.absent`` function instead.
- The ``docker.absent`` function was removed. Please update applicable SLS files
to use the ``docker_container.absent`` function instead.
- The ``docker.network_present`` function was removed. Please update applicable
SLS files to use the ``docker_network.present`` function instead.
- The ``docker.network_absent`` function was removed. Please update applicable
SLS files to use the ``docker_network.absent`` function instead.
- The ``docker.image_present`` function was removed. Please update applicable SLS
files to use the ``docker_image.present`` function instead.
- The ``docker.image_absent`` function was removed. Please update applicable SLS
files to use the ``docker_image.absent`` function instead.
- The ``docker.volume_present`` function was removed. Please update applicable SLS
files to use the ``docker_volume.present`` function instead.
- The ``docker.volume_absent`` function was removed. Please update applicable SLS
files to use the ``docker_volume.absent`` function instead.
The ``docker_network`` state had the following changes:
- Support for the ``driver`` option has been removed from the ``absent`` function.
This option had no functionality in ``docker_network.absent``.
The ``git`` state had the following changes:
- Support for the ``ref`` option in the ``detached`` state has been removed.
Please use the ``rev`` option instead.
The ``k8s`` state has been removed. The following functions should be used
instead:
- The ``k8s.label_absent`` function was removed. Please update applicable SLS
files to use the ``kubernetes.node_label_absent`` function instead.
- The ``k8s.label_present`` function was removed. Please updated applicable SLS
files to use the ``kubernetes.node_label_present`` function instead.
- The ``k8s.label_folder_absent`` function was removed. Please update applicable
SLS files to use the ``kubernetes.node_label_folder_absent`` function instead.
The ``trafficserver`` state had the following changes:
- Support for the ``set_var`` function was removed. Please use the ``config``
function instead.
The ``win_update`` state has been removed. Please use the ``win_wua`` state instead.

View File

@ -79,22 +79,12 @@ from the ``kevinopenstack`` profile above, you would use:
salt-call sdb.get sdb://kevinopenstack/password
Some drivers use slightly more complex URIs. For instance, the ``vault`` driver
requires the full path to where the key is stored, followed by a question mark,
followed by the key to be retrieved. If you were using a profile called
``myvault``, you would use a URI that looks like:
.. code-block:: bash
salt-call sdb.get 'sdb://myvault/secret/salt?saltstack'
Setting a value uses the same URI as would be used to retrieve it, followed
by the value as another argument. For the above ``myvault`` URI, you would set
a new value using a command like:
by the value as another argument.
.. code-block:: bash
salt-call sdb.set 'sdb://myvault/secret/salt?saltstack' 'super awesome'
salt-call sdb.set 'sdb://myvault/secret/salt/saltstack' 'super awesome'
Deleting values (if supported by the driver) is done pretty much the same way as
getting them. Provided that you have a profile called ``mykvstore`` that uses
@ -109,8 +99,8 @@ the runner system:
.. code-block:: bash
salt-run sdb.get 'sdb://myvault/secret/salt?saltstack'
salt-run sdb.set 'sdb://myvault/secret/salt?saltstack' 'super awesome'
salt-run sdb.get 'sdb://myvault/secret/salt/saltstack'
salt-run sdb.set 'sdb://myvault/secret/salt/saltstack' 'super awesome'
salt-run sdb.delete 'sdb://mykvstore/foobar'

View File

@ -361,7 +361,7 @@ def groups(username, **kwargs):
[salt.utils.stringutils.to_str(_config('accountattributename')), str('cn')]) # future lint: disable=blacklisted-function
for entry, result in search_results:
for user in result[_config('accountattributename'), _config('groupattribute')]:
for user in result[_config('accountattributename')]:
if username == salt.utils.stringutils.to_unicode(user).split(',')[0].split('=')[-1]:
group_list.append(entry.split(',')[0].split('=')[-1])

View File

@ -9,11 +9,11 @@
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import logging
# Import Salt libs
import salt.client.netapi
import salt.utils.files
import salt.utils.parsers as parsers
from salt.utils.verify import check_user, verify_files, verify_log
@ -42,9 +42,8 @@ class SaltAPI(parsers.SaltAPIParser):
'udp://',
'file://')):
# Logfile is not using Syslog, verify
current_umask = os.umask(0o027)
with salt.utils.files.set_umask(0o027):
verify_files([logfile], self.config['user'])
os.umask(current_umask)
except OSError as err:
log.exception('Failed to prepare salt environment')
self.shutdown(err.errno)

View File

@ -1031,6 +1031,7 @@ class Single(object):
opts_pkg['pillar_roots'] = self.opts['pillar_roots']
opts_pkg['ext_pillar'] = self.opts['ext_pillar']
opts_pkg['extension_modules'] = self.opts['extension_modules']
opts_pkg['module_dirs'] = self.opts['module_dirs']
opts_pkg['_ssh_version'] = self.opts['_ssh_version']
opts_pkg['__master_opts__'] = self.context['master_opts']
if '_caller_cachedir' in self.opts:

View File

@ -106,9 +106,11 @@ def need_deployment():
'''
if os.path.exists(OPTIONS.saltdir):
shutil.rmtree(OPTIONS.saltdir)
old_umask = os.umask(0o077)
old_umask = os.umask(0o077) # pylint: disable=blacklisted-function
try:
os.makedirs(OPTIONS.saltdir)
os.umask(old_umask)
finally:
os.umask(old_umask) # pylint: disable=blacklisted-function
# Verify perms on saltdir
if not is_windows():
euid = os.geteuid()
@ -158,10 +160,10 @@ def unpack_thin(thin_path):
Unpack the Salt thin archive.
'''
tfile = tarfile.TarFile.gzopen(thin_path)
old_umask = os.umask(0o077)
old_umask = os.umask(0o077) # pylint: disable=blacklisted-function
tfile.extractall(path=OPTIONS.saltdir)
tfile.close()
os.umask(old_umask)
os.umask(old_umask) # pylint: disable=blacklisted-function
try:
os.unlink(thin_path)
except OSError:
@ -189,10 +191,10 @@ def unpack_ext(ext_path):
'minion',
'extmods')
tfile = tarfile.TarFile.gzopen(ext_path)
old_umask = os.umask(0o077)
old_umask = os.umask(0o077) # pylint: disable=blacklisted-function
tfile.extractall(path=modcache)
tfile.close()
os.umask(old_umask)
os.umask(old_umask) # pylint: disable=blacklisted-function
os.unlink(ext_path)
ver_path = os.path.join(modcache, 'ext_version')
ver_dst = os.path.join(OPTIONS.saltdir, 'ext_version')
@ -299,7 +301,7 @@ def main(argv): # pylint: disable=W0613
sys.stderr.write(OPTIONS.delimiter + '\n')
sys.stderr.flush()
if OPTIONS.cmd_umask is not None:
old_umask = os.umask(OPTIONS.cmd_umask)
old_umask = os.umask(OPTIONS.cmd_umask) # pylint: disable=blacklisted-function
if OPTIONS.tty:
# Returns bytes instead of string on python 3
stdout, _ = subprocess.Popen(salt_argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
@ -313,7 +315,7 @@ def main(argv): # pylint: disable=W0613
else:
subprocess.call(salt_argv)
if OPTIONS.cmd_umask is not None:
os.umask(old_umask)
os.umask(old_umask) # pylint: disable=blacklisted-function
if __name__ == '__main__':
sys.exit(main(sys.argv))

View File

@ -491,7 +491,7 @@ def request(mods=None,
'kwargs': kwargs
}
})
cumask = os.umask(0o77)
with salt.utils.files.set_umask(0o077):
try:
if salt.utils.platform.is_windows():
# Make sure cache file isn't read-only
@ -499,9 +499,10 @@ def request(mods=None,
with salt.utils.files.fopen(notify_path, 'w+b') as fp_:
serial.dump(req, fp_)
except (IOError, OSError):
msg = 'Unable to write state request file {0}. Check permission.'
log.error(msg.format(notify_path))
os.umask(cumask)
log.error(
'Unable to write state request file %s. Check permission.',
notify_path
)
return ret
@ -557,7 +558,7 @@ def clear_request(name=None):
req.pop(name)
else:
return False
cumask = os.umask(0o77)
with salt.utils.files.set_umask(0o077):
try:
if salt.utils.platform.is_windows():
# Make sure cache file isn't read-only
@ -565,9 +566,10 @@ def clear_request(name=None):
with salt.utils.files.fopen(notify_path, 'w+b') as fp_:
serial.dump(req, fp_)
except (IOError, OSError):
msg = 'Unable to write state request file {0}. Check permission.'
log.error(msg.format(notify_path))
os.umask(cumask)
log.error(
'Unable to write state request file %s. Check permission.',
notify_path
)
return True

View File

@ -264,6 +264,12 @@ def __virtual__():
if get_dependencies() is False:
return False
__utils__['versions.warn_until'](
'Neon',
'This driver has been deprecated and will be removed in the '
'{version} release of Salt. Please use the openstack driver instead.'
)
return __virtualname__

View File

@ -72,6 +72,7 @@ Or if you need to use a profile to setup some extra stuff, it can be passed as a
username: rackusername
api_key: myapikey
region_name: ORD
auth_type: rackspace_apikey
And this will pull in the profile for rackspace and setup all the correct
options for the auth_url and different api versions for services.
@ -101,6 +102,23 @@ The salt specific ones are:
This is the minimum setup required.
If metadata is set to make sure that the host has finished setting up the
`wait_for_metadata` can be set.
.. code-block:: yaml
centos:
provider: myopenstack
image: CentOS 7
size: ds1G
ssh_key_name: mykey
ssh_key_file: /root/.ssh/id_rsa
meta:
build_config: rack_user_only
wait_for_metadata:
rax_service_level_automation: Complete
rackconnect_automation_status: DEPLOYED
Anything else from the create_server_ docs can be passed through here.
- **image**: Image dict, name or ID to boot with. image is required
@ -678,12 +696,18 @@ def create(vm_):
data = request_instance(conn=conn, call='action', vm_=vm_)
log.debug('VM is now running')
def __query_node_ip(vm_):
def __query_node(vm_):
data = show_instance(vm_['name'], conn=conn, call='action')
if 'wait_for_metadata' in vm_:
for key, value in six.iteritems(vm_.get('wait_for_metadata', {})):
log.debug('Waiting for metadata: {0}={1}'.format(key, value))
if data['metadata'].get(key, None) != value:
log.debug('Metadata is not ready: {0}={1}'.format(key, data['metadata'].get(key, None)))
return False
return preferred_ip(vm_, data[ssh_interface(vm_)])
try:
ip_address = __utils__['cloud.wait_for_ip'](
__query_node_ip,
ip_address = __utils__['cloud.wait_for_fun'](
__query_node,
update_args=(vm_,)
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:

View File

@ -2684,14 +2684,15 @@ def create(vm_):
non_hostname_chars = compile(r'[^\w-]')
if search(non_hostname_chars, vm_name):
hostName = split(non_hostname_chars, vm_name, maxsplit=1)[0]
domainName = split(non_hostname_chars, vm_name, maxsplit=1)[-1]
else:
hostName = vm_name
domainName = hostName.split('.', 1)[-1]
domainName = domain
if 'Windows' not in object_ref.config.guestFullName:
identity = vim.vm.customization.LinuxPrep()
identity.hostName = vim.vm.customization.FixedName(name=hostName)
identity.domain = domainName if hostName != domainName else domain
identity.domain = domainName
else:
identity = vim.vm.customization.Sysprep()
identity.guiUnattended = vim.vm.customization.GuiUnattended()

View File

@ -653,10 +653,11 @@ VALID_OPTS = {
's3fs_update_interval': int,
'svnfs_update_interval': int,
'git_pillar_base': six.string_types,
'git_pillar_branch': six.string_types,
'git_pillar_env': six.string_types,
'git_pillar_root': six.string_types,
# NOTE: git_pillar_base, git_pillar_branch, git_pillar_env, and
# git_pillar_root omitted here because their values could conceivably be
# loaded as non-string types, which is OK because git_pillar will normalize
# them to strings. But rather than include all the possible types they
# could be, we'll just skip type-checking.
'git_pillar_ssl_verify': bool,
'git_pillar_global_lock': bool,
'git_pillar_user': six.string_types,
@ -668,12 +669,11 @@ VALID_OPTS = {
'git_pillar_refspecs': list,
'git_pillar_includes': bool,
'git_pillar_verify_config': bool,
# NOTE: gitfs_base, gitfs_mountpoint, and gitfs_root omitted here because
# their values could conceivably be loaded as non-string types, which is OK
# because gitfs will normalize them to strings. But rather than include all
# the possible types they could be, we'll just skip type-checking.
'gitfs_remotes': list,
'gitfs_mountpoint': six.string_types,
'gitfs_root': six.string_types,
'gitfs_base': six.string_types,
'gitfs_user': six.string_types,
'gitfs_password': six.string_types,
'gitfs_insecure_auth': bool,
'gitfs_privkey': six.string_types,
'gitfs_pubkey': six.string_types,
@ -888,11 +888,14 @@ VALID_OPTS = {
'winrepo_dir': six.string_types,
'winrepo_dir_ng': six.string_types,
'winrepo_cachefile': six.string_types,
# NOTE: winrepo_branch omitted here because its value could conceivably be
# loaded as a non-string type, which is OK because winrepo will normalize
# them to strings. But rather than include all the possible types it could
# be, we'll just skip type-checking.
'winrepo_cache_expire_max': int,
'winrepo_cache_expire_min': int,
'winrepo_remotes': list,
'winrepo_remotes_ng': list,
'winrepo_branch': six.string_types,
'winrepo_ssl_verify': bool,
'winrepo_user': six.string_types,
'winrepo_password': six.string_types,
@ -1639,6 +1642,7 @@ DEFAULT_MASTER_OPTS = {
'eauth_acl_module': '',
'eauth_tokens': 'localfs',
'extension_modules': os.path.join(salt.syspaths.CACHE_DIR, 'master', 'extmods'),
'module_dirs': [],
'file_recv': False,
'file_recv_max_size': 100,
'file_buffer_size': 1048576,

View File

@ -84,8 +84,7 @@ def dropfile(cachedir, user=None):
'''
dfn = os.path.join(cachedir, '.dfn')
# set a mask (to avoid a race condition on file creation) and store original.
mask = os.umask(191)
try:
with salt.utils.files.set_umask(0o277):
log.info('Rotating AES key')
if os.path.isfile(dfn):
log.info('AES key rotation already requested')
@ -103,8 +102,6 @@ def dropfile(cachedir, user=None):
os.chown(dfn, uid, -1)
except (KeyError, ImportError, OSError, IOError):
pass
finally:
os.umask(mask) # restore original umask
def gen_keys(keydir, keyname, keysize, user=None, passphrase=None):
@ -138,17 +135,19 @@ def gen_keys(keydir, keyname, keysize, user=None, passphrase=None):
if not os.access(keydir, os.W_OK):
raise IOError('Write access denied to "{0}" for user "{1}".'.format(os.path.abspath(keydir), getpass.getuser()))
cumask = os.umask(0o277)
with salt.utils.files.set_umask(0o277):
if HAS_M2:
# if passphrase is empty or None use no cipher
if not passphrase:
gen.save_pem(priv, cipher=None)
else:
gen.save_pem(priv, cipher='des_ede3_cbc', callback=lambda x: six.b(passphrase))
gen.save_pem(
priv,
cipher='des_ede3_cbc',
callback=lambda x: salt.utils.stringutils.to_bytes(passphrase))
else:
with salt.utils.files.fopen(priv, 'wb+') as f:
f.write(gen.exportKey('PEM', passphrase))
os.umask(cumask)
if HAS_M2:
gen.save_pub_key(pub)
else:

View File

@ -202,10 +202,9 @@ def mk_key(opts, user):
os.unlink(keyfile)
key = salt.crypt.Crypticle.generate_key_string()
cumask = os.umask(191)
with salt.utils.files.set_umask(0o277):
with salt.utils.files.fopen(keyfile, 'w+') as fp_:
fp_.write(salt.utils.stringutils.to_str(key))
os.umask(cumask)
# 600 octal: Read and write access to the owner only.
# Write access is necessary since on subsequent runs, if the file
# exists, it needs to be written to again. Windows enforces this.

View File

@ -69,11 +69,11 @@ class Engine(SignalHandlingMultiprocessingProcess):
'''
Execute the given engine in a new process
'''
def __init__(self, opts, fun, config, funcs, runners, proxy, log_queue=None):
def __init__(self, opts, fun, config, funcs, runners, proxy, **kwargs):
'''
Set up the process executor
'''
super(Engine, self).__init__(log_queue=log_queue)
super(Engine, self).__init__(**kwargs)
self.opts = opts
self.config = config
self.fun = fun
@ -93,17 +93,21 @@ class Engine(SignalHandlingMultiprocessingProcess):
state['funcs'],
state['runners'],
state['proxy'],
log_queue=state['log_queue']
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {'opts': self.opts,
return {
'opts': self.opts,
'fun': self.fun,
'config': self.config,
'funcs': self.funcs,
'runners': self.runners,
'proxy': self.proxy,
'log_queue': self.log_queue}
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def run(self):
'''

View File

@ -143,8 +143,7 @@ class Client(object):
saltenv,
path)
destdir = os.path.dirname(dest)
cumask = os.umask(63)
with salt.utils.files.set_umask(0o077):
# remove destdir if it is a regular file to avoid an OSError when
# running os.makedirs below
if os.path.isfile(destdir):
@ -158,7 +157,6 @@ class Client(object):
raise
yield dest
os.umask(cumask)
def get_cachedir(self, cachedir=None):
if cachedir is None:

View File

@ -1927,9 +1927,9 @@ def fqdns():
fqdns.add(socket.gethostbyaddr(ip)[0])
except (socket.error, socket.herror,
socket.gaierror, socket.timeout) as e:
log.error("Exception during resolving address: " + str(e))
log.info("Exception during resolving address: " + str(e))
grains['fqdns'] = list(fqdns)
grains['fqdns'] = sorted(list(fqdns))
return grains

View File

@ -1044,7 +1044,7 @@ class RaetKey(Key):
'''
Use libnacl to generate and safely save a private key
'''
import libnacl.dual # pylint: disable=3rd-party-module-not-gated
import libnacl.dual # pylint: disable=import-error,3rd-party-module-not-gated
d_key = libnacl.dual.DualSecret()
keydir, keyname, _, _ = self._get_key_attrs(keydir, keyname,
keysize, user)
@ -1440,14 +1440,13 @@ class RaetKey(Key):
keydata = {'priv': priv,
'sign': sign}
path = os.path.join(self.opts['pki_dir'], 'local.key')
c_umask = os.umask(191)
with salt.utils.files.set_umask(0o277):
if os.path.exists(path):
#mode = os.stat(path).st_mode
os.chmod(path, stat.S_IWUSR | stat.S_IRUSR)
with salt.utils.files.fopen(path, 'w+b') as fp_:
with salt.utils.files.fopen(path, 'w+') as fp_:
fp_.write(self.serial.dumps(keydata))
os.chmod(path, stat.S_IRUSR)
os.umask(c_umask)
def delete_local(self):
'''

View File

@ -22,6 +22,7 @@ from zipimport import zipimporter
import salt.config
import salt.syspaths
import salt.utils.context
import salt.utils.data
import salt.utils.dictupdate
import salt.utils.event
import salt.utils.files
@ -651,7 +652,7 @@ def _load_cached_grains(opts, cfn):
try:
serial = salt.payload.Serial(opts)
with salt.utils.files.fopen(cfn, 'rb') as fp_:
cached_grains = serial.load(fp_)
cached_grains = salt.utils.data.decode(serial.load(fp_))
if not cached_grains:
log.debug('Cached grains are empty, cache might be corrupted. Refreshing.')
return None
@ -791,7 +792,7 @@ def grains(opts, force_refresh=False, proxy=None):
grains_data.update(opts['grains'])
# Write cache if enabled
if opts.get('grains_cache', False):
cumask = os.umask(0o77)
with salt.utils.files.set_umask(0o077):
try:
if salt.utils.platform.is_windows():
# Late import
@ -813,13 +814,12 @@ def grains(opts, force_refresh=False, proxy=None):
# exception is.
if os.path.isfile(cfn):
os.unlink(cfn)
os.umask(cumask)
if grains_deep_merge:
salt.utils.dictupdate.update(grains_data, opts['grains'])
else:
grains_data.update(opts['grains'])
return grains_data
return salt.utils.data.decode(grains_data)
# TODO: get rid of? Does anyone use this? You should use raw() instead

View File

@ -117,6 +117,7 @@ __EXTERNAL_LOGGERS_CONFIGURED = False
__MP_LOGGING_LISTENER_CONFIGURED = False
__MP_LOGGING_CONFIGURED = False
__MP_LOGGING_QUEUE = None
__MP_LOGGING_LEVEL = GARBAGE
__MP_LOGGING_QUEUE_PROCESS = None
__MP_LOGGING_QUEUE_HANDLER = None
__MP_IN_MAINPROCESS = multiprocessing.current_process().name == 'MainProcess'
@ -820,6 +821,37 @@ def set_multiprocessing_logging_queue(queue):
__MP_LOGGING_QUEUE = queue
def get_multiprocessing_logging_level():
return __MP_LOGGING_LEVEL
def set_multiprocessing_logging_level(log_level):
global __MP_LOGGING_LEVEL
__MP_LOGGING_LEVEL = log_level
def set_multiprocessing_logging_level_by_opts(opts):
'''
This will set the multiprocessing logging level to the lowest
logging level of all the types of logging that are configured.
'''
global __MP_LOGGING_LEVEL
log_levels = []
log_levels.append(
LOG_LEVELS.get(opts.get('log_level', '').lower(), logging.ERROR)
)
log_levels.append(
LOG_LEVELS.get(opts.get('log_level_logfile', '').lower(), logging.ERROR)
)
for level in six.itervalues(opts.get('log_granular_levels', {})):
log_levels.append(
LOG_LEVELS.get(level.lower(), logging.ERROR)
)
__MP_LOGGING_LEVEL = min(log_levels)
def setup_multiprocessing_logging_listener(opts, queue=None):
global __MP_LOGGING_QUEUE_PROCESS
global __MP_LOGGING_LISTENER_CONFIGURED
@ -883,11 +915,13 @@ def setup_multiprocessing_logging(queue=None):
# Let's add a queue handler to the logging root handlers
__MP_LOGGING_QUEUE_HANDLER = SaltLogQueueHandler(queue or get_multiprocessing_logging_queue())
logging.root.addHandler(__MP_LOGGING_QUEUE_HANDLER)
# Set the logging root level to the lowest to get all messages
logging.root.setLevel(logging.GARBAGE)
# Set the logging root level to the lowest needed level to get all
# desired messages.
log_level = get_multiprocessing_logging_level()
logging.root.setLevel(log_level)
logging.getLogger(__name__).debug(
'Multiprocessing queue logging configured for the process running '
'under PID: %s', os.getpid()
'under PID: %s at log level %s', os.getpid(), log_level
)
# The above logging call will create, in some situations, a futex wait
# lock condition, probably due to the multiprocessing Queue's internal

View File

@ -139,13 +139,13 @@ class Maintenance(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
A generalized maintenance process which performs maintenance routines.
'''
def __init__(self, opts, log_queue=None):
def __init__(self, opts, **kwargs):
'''
Create a maintenance instance
:param dict opts: The salt options
'''
super(Maintenance, self).__init__(log_queue=log_queue)
super(Maintenance, self).__init__(**kwargs)
self.opts = opts
# How often do we perform the maintenance tasks
self.loop_interval = int(self.opts['loop_interval'])
@ -159,11 +159,18 @@ class Maintenance(salt.utils.process.SignalHandlingMultiprocessingProcess):
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(state['opts'], log_queue=state['log_queue'])
self.__init__(
state['opts'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {'opts': self.opts,
'log_queue': self.log_queue}
return {
'opts': self.opts,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def _post_fork_init(self):
'''
@ -578,9 +585,8 @@ class Master(SMaster):
# Check to see if we need to create a pillar cache dir
if self.opts['pillar_cache'] and not os.path.isdir(os.path.join(self.opts['cachedir'], 'pillar_cache')):
try:
prev_umask = os.umask(0o077)
with salt.utils.files.set_umask(0o077):
os.mkdir(os.path.join(self.opts['cachedir'], 'pillar_cache'))
os.umask(prev_umask)
except OSError:
pass
@ -709,6 +715,7 @@ class Master(SMaster):
kwargs = {}
if salt.utils.platform.is_windows():
kwargs['log_queue'] = salt.log.setup.get_multiprocessing_logging_queue()
kwargs['log_queue_level'] = salt.log.setup.get_multiprocessing_logging_level()
kwargs['secrets'] = SMaster.secrets
self.process_manager.add_process(
@ -758,13 +765,13 @@ class Halite(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
Manage the Halite server
'''
def __init__(self, hopts, log_queue=None):
def __init__(self, hopts, **kwargs):
'''
Create a halite instance
:param dict hopts: The halite options
'''
super(Halite, self).__init__(log_queue=log_queue)
super(Halite, self).__init__(**kwargs)
self.hopts = hopts
# __setstate__ and __getstate__ are only used on Windows.
@ -772,11 +779,18 @@ class Halite(salt.utils.process.SignalHandlingMultiprocessingProcess):
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(state['hopts'], log_queue=state['log_queue'])
self.__init__(
state['hopts'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {'hopts': self.hopts,
'log_queue': self.log_queue}
return {
'hopts': self.hopts,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def run(self):
'''
@ -791,7 +805,7 @@ class ReqServer(salt.utils.process.SignalHandlingMultiprocessingProcess):
Starts up the master request server, minions send results to this
interface.
'''
def __init__(self, opts, key, mkey, log_queue=None, secrets=None):
def __init__(self, opts, key, mkey, secrets=None, **kwargs):
'''
Create a request server
@ -802,7 +816,7 @@ class ReqServer(salt.utils.process.SignalHandlingMultiprocessingProcess):
:rtype: ReqServer
:returns: Request server
'''
super(ReqServer, self).__init__(log_queue=log_queue)
super(ReqServer, self).__init__(**kwargs)
self.opts = opts
self.master_key = mkey
# Prepare the AES key
@ -814,15 +828,24 @@ class ReqServer(salt.utils.process.SignalHandlingMultiprocessingProcess):
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(state['opts'], state['key'], state['mkey'],
log_queue=state['log_queue'], secrets=state['secrets'])
self.__init__(
state['opts'],
state['key'],
state['mkey'],
secrets=state['secrets'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {'opts': self.opts,
return {
'opts': self.opts,
'key': self.key,
'mkey': self.master_key,
'secrets': self.secrets,
'log_queue': self.log_queue,
'secrets': self.secrets}
'log_queue_level': self.log_queue_level
}
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
self.destroy(signum)
@ -834,6 +857,8 @@ class ReqServer(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
if self.log_queue is not None:
salt.log.setup.set_multiprocessing_logging_queue(self.log_queue)
if self.log_queue_level is not None:
salt.log.setup.set_multiprocessing_logging_level(self.log_queue_level)
salt.log.setup.setup_multiprocessing_logging(self.log_queue)
if self.secrets is not None:
SMaster.secrets = self.secrets
@ -864,6 +889,7 @@ class ReqServer(salt.utils.process.SignalHandlingMultiprocessingProcess):
kwargs = {}
if salt.utils.platform.is_windows():
kwargs['log_queue'] = self.log_queue
kwargs['log_queue_level'] = self.log_queue_level
# Use one worker thread if only the TCP transport is set up on
# Windows and we are using Python 2. There is load balancer
# support on Windows for the TCP transport when using Python 3.
@ -945,7 +971,10 @@ class MWorker(salt.utils.process.SignalHandlingMultiprocessingProcess):
# non-Windows platforms.
def __setstate__(self, state):
self._is_child = True
super(MWorker, self).__init__(log_queue=state['log_queue'])
super(MWorker, self).__init__(
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
self.opts = state['opts']
self.req_channels = state['req_channels']
self.mkey = state['mkey']
@ -954,13 +983,16 @@ class MWorker(salt.utils.process.SignalHandlingMultiprocessingProcess):
SMaster.secrets = state['secrets']
def __getstate__(self):
return {'opts': self.opts,
return {
'opts': self.opts,
'req_channels': self.req_channels,
'mkey': self.mkey,
'key': self.key,
'k_mtime': self.k_mtime,
'secrets': SMaster.secrets,
'log_queue': self.log_queue,
'secrets': SMaster.secrets}
'log_queue_level': self.log_queue_level
}
def _handle_signals(self, signum, sigframe):
for channel in getattr(self, 'req_channels', ()):

View File

@ -2604,7 +2604,7 @@ class Minion(MinionBase):
def ping_master():
try:
def ping_timeout_handler(*_):
if not self.opts.get('auth_safemode', True):
if self.opts.get('auth_safemode', False):
log.error('** Master Ping failed. Attempting to restart minion**')
delay = self.opts.get('random_reauth_delay', 5)
log.info('delaying random_reauth_delay %ss', delay)

View File

@ -1077,8 +1077,7 @@ def unzip(zip_file,
if not salt.utils.platform.is_windows():
perm = zfile.getinfo(target).external_attr >> 16
if perm == 0:
umask_ = os.umask(0)
os.umask(umask_)
umask_ = salt.utils.files.get_umask()
if target.endswith('/'):
perm = 0o777 & ~umask_
else:

View File

@ -829,6 +829,7 @@ def get_instances(name, lifecycle_state="InService", health_status="Healthy",
while True:
try:
asgs = conn.get_all_groups(names=[name])
break
except boto.exception.BotoServerError as e:
if retries and e.code == 'Throttling':
log.debug('Throttled by AWS API, retrying in 5 seconds...')

View File

@ -657,31 +657,25 @@ def find_images(ami_name=None, executable_by=None, owners=None, image_ids=None,
salt myminion boto_ec2.find_images tags='{"mytag": "value"}'
'''
retries = 30
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
while retries:
try:
filter_parameters = {'filters': {}}
if image_ids:
filter_parameters['image_ids'] = [image_ids]
if executable_by:
filter_parameters['executable_by'] = [executable_by]
if owners:
filter_parameters['owners'] = [owners]
if ami_name:
filter_parameters['filters']['name'] = ami_name
if tags:
for tag_name, tag_value in six.iteritems(tags):
filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value
images = conn.get_all_images(**filter_parameters)
log.debug('The filters criteria %s matched the following '
'images:%s', filter_parameters, images)
if images:
if return_objs:
return images
@ -689,7 +683,13 @@ def find_images(ami_name=None, executable_by=None, owners=None, image_ids=None,
else:
return False
except boto.exception.BotoServerError as exc:
log.error(exc)
if exc.error_code == 'Throttling':
log.debug("Throttled by AWS API, will retry in 5 seconds...")
time.sleep(5)
retries -= 1
continue
log.error('Failed to convert AMI name `%s` to an AMI ID: %s', ami_name, exc)
return False
return False

View File

@ -12,6 +12,7 @@ import functools
import glob
import logging
import os
import platform
import shutil
import subprocess
import sys
@ -3334,6 +3335,11 @@ def powershell(cmd,
python_shell = True
# Append PowerShell Object formatting
# ConvertTo-JSON is only available on Versions of Windows greater than
# `7.1.7600`. We have to use `platform.version` instead of `__grains__` here
# because this function is called by `salt/grains/core.py` before
# `__grains__` is populated
if salt.utils.versions.version_cmp(platform.version(), '7.1.7600') == 1:
cmd += ' | ConvertTo-JSON'
if depth is not None:
cmd += ' -Depth {0}'.format(depth)
@ -3353,7 +3359,7 @@ def powershell(cmd,
# caught in a try/catch block. For example, the `Get-WmiObject` command will
# often return a "Non Terminating Error". To fix this, make sure
# `-ErrorAction Stop` is set in the powershell command
cmd = 'try {' + cmd + '} catch { "{}" | ConvertTo-JSON}'
cmd = 'try {' + cmd + '} catch { "{}" }'
# Retrieve the response, while overriding shell with 'powershell'
response = run(cmd,

View File

@ -73,7 +73,8 @@ def _run_composer(action,
no_dev=None,
quiet=False,
composer_home='/root',
extra_flags=None):
extra_flags=None,
env=None):
'''
Run PHP's composer with a specific action.
@ -126,6 +127,9 @@ def _run_composer(action,
extra_flags
None, or a string containing extra flags to pass to composer.
env
A list of environment variables to be set prior to execution.
'''
if composer is not None:
if php is None:
@ -185,9 +189,15 @@ def _run_composer(action,
if optimize is True:
cmd.append('--optimize-autoloader')
if env is not None:
env = salt.utils.data.repack_dictlist(env)
env['COMPOSER_HOME'] = composer_home
else:
env = {'COMPOSER_HOME': composer_home}
result = __salt__['cmd.run_all'](cmd,
runas=runas,
env={'COMPOSER_HOME': composer_home},
env=env,
python_shell=False)
if result['retcode'] != 0:
@ -210,7 +220,8 @@ def install(directory,
optimize=None,
no_dev=None,
quiet=False,
composer_home='/root'):
composer_home='/root',
env=None):
'''
Install composer dependencies for a directory.
@ -257,6 +268,9 @@ def install(directory,
composer_home
$COMPOSER_HOME environment variable
env
A list of environment variables to be set prior to execution.
CLI Example:
.. code-block:: bash
@ -278,7 +292,8 @@ def install(directory,
optimize=optimize,
no_dev=no_dev,
quiet=quiet,
composer_home=composer_home)
composer_home=composer_home,
env=env)
return result
@ -293,7 +308,8 @@ def update(directory,
optimize=None,
no_dev=None,
quiet=False,
composer_home='/root'):
composer_home='/root',
env=None):
'''
Update composer dependencies for a directory.
@ -343,6 +359,9 @@ def update(directory,
composer_home
$COMPOSER_HOME environment variable
env
A list of environment variables to be set prior to execution.
CLI Example:
.. code-block:: bash
@ -365,7 +384,8 @@ def update(directory,
optimize=optimize,
no_dev=no_dev,
quiet=quiet,
composer_home=composer_home)
composer_home=composer_home,
env=env)
return result

View File

@ -5414,9 +5414,7 @@ def manage_file(name,
# Create the file, user rw-only if mode will be set to prevent
# a small security race problem before the permissions are set
if mode:
current_umask = os.umask(0o77)
with salt.utils.files.set_umask(0o077 if mode else None):
# Create a new file when test is False and source is None
if contents is None:
if not __opts__['test']:
@ -5436,9 +5434,6 @@ def manage_file(name,
ret, 'File {0} not created'.format(name)
)
if mode:
os.umask(current_umask)
if contents is not None:
# Write the static contents to a temporary file
tmp = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX,
@ -5471,8 +5466,7 @@ def manage_file(name,
# out what mode to use for the new file.
if mode is None and not salt.utils.platform.is_windows():
# Get current umask
mask = os.umask(0)
os.umask(mask)
mask = salt.utils.files.get_umask()
# Calculate the mode value that results from the umask
mode = oct((0o777 ^ mask) & 0o666)

View File

@ -103,6 +103,13 @@ def _auth(profile=None, api_version=2, **connection_args):
Only intended to be used within glance-enabled modules
'''
__utils__['versions.warn_until'](
'Neon',
(
'The glance module has been deprecated and will be removed in {version}. '
'Please update to using the glanceng module'
),
)
if profile:
prefix = profile + ":keystone."

View File

@ -513,7 +513,7 @@ if __name__ == '__main__':
sys.exit(1)
os.setsid()
os.umask(0)
os.umask(0o000) # pylint: disable=blacklisted-function
try:
pid = os.fork()

View File

@ -163,6 +163,13 @@ def auth(profile=None, **connection_args):
salt '*' keystone.auth
'''
__utils__['versions.warn_until'](
'Neon',
(
'The keystone module has been deprecated and will be removed in {version}. '
'Please update to using the keystoneng module',
),
)
kwargs = _get_kwargs(profile=profile, **connection_args)
disc = discover.Discover(auth_url=kwargs['auth_url'])

View File

@ -60,7 +60,9 @@ from salt.exceptions import CommandExecutionError
from salt.ext.six import iteritems
from salt.ext import six
import salt.utils.files
import salt.utils.platform
import salt.utils.templates
import salt.utils.versions
import salt.utils.yaml
from salt.exceptions import TimeoutError
from salt.ext.six.moves import range # pylint: disable=import-error
@ -223,11 +225,11 @@ def _cleanup_old(**kwargs):
cert = kubernetes.client.configuration.cert_file
key = kubernetes.client.configuration.key_file
if cert and os.path.exists(cert) and os.path.basename(cert).startswith('salt-kube-'):
salt.utils.safe_rm(cert)
salt.utils.files.safe_rm(cert)
if key and os.path.exists(key) and os.path.basename(key).startswith('salt-kube-'):
salt.utils.safe_rm(key)
salt.utils.files.safe_rm(key)
if ca and os.path.exists(ca) and os.path.basename(ca).startswith('salt-kube-'):
salt.utils.safe_rm(ca)
salt.utils.files.safe_rm(ca)
except Exception:
pass

View File

@ -164,7 +164,6 @@ import salt.utils.stringutils
import salt.utils.win_functions
import salt.utils.win_dacl
REQ_ERROR = None
try:
import libnacl.secret
@ -186,9 +185,9 @@ def _get_config(**kwargs):
config = {
'box_type': 'sealedbox',
'sk': None,
'sk_file': '/etc/salt/pki/master/nacl',
'sk_file': os.path.join(__opts__['pki_dir'], 'master/nacl'),
'pk': None,
'pk_file': '/etc/salt/pki/master/nacl.pub',
'pk_file': os.path.join(__opts__['pki_dir'], 'master/nacl.pub'),
}
config_key = '{0}.config'.format(__virtualname__)
try:
@ -233,7 +232,7 @@ def _get_pk(**kwargs):
return base64.b64decode(pubkey)
def keygen(sk_file=None, pk_file=None):
def keygen(sk_file=None, pk_file=None, **kwargs):
'''
Use libnacl to generate a keypair.
@ -253,6 +252,14 @@ def keygen(sk_file=None, pk_file=None):
salt-call nacl.keygen sk_file=/etc/salt/pki/master/nacl pk_file=/etc/salt/pki/master/nacl.pub
salt-call --local nacl.keygen
'''
if 'keyfile' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The \'keyfile\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk_file\' argument instead.'
)
sk_file = kwargs['keyfile']
if sk_file is None:
kp = libnacl.public.SecretKey()
return {'sk': base64.b64encode(kp.sk), 'pk': base64.b64encode(kp.pk)}
@ -313,6 +320,25 @@ def enc(data, **kwargs):
box_type: secretbox, sealedbox(default)
'''
if 'keyfile' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The \'keyfile\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk_file\' argument instead.'
)
kwargs['sk_file'] = kwargs['keyfile']
if 'key' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The \'key\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk\' argument instead.'
)
kwargs['sk'] = kwargs['key']
# ensure data is in bytes
data = salt.utils.stringutils.to_bytes(data)
box_type = _get_config(**kwargs)['box_type']
if box_type == 'sealedbox':
return sealedbox_encrypt(data, **kwargs)
@ -360,6 +386,31 @@ def dec(data, **kwargs):
box_type: secretbox, sealedbox(default)
'''
if 'keyfile' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The \'keyfile\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk_file\' argument instead.'
)
kwargs['sk_file'] = kwargs['keyfile']
# set boxtype to `secretbox` to maintain backward compatibility
kwargs['box_type'] = 'secretbox'
if 'key' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The \'key\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk\' argument instead.'
)
kwargs['sk'] = kwargs['key']
# set boxtype to `secretbox` to maintain backward compatibility
kwargs['box_type'] = 'secretbox'
# ensure data is in bytes
data = salt.utils.stringutils.to_bytes(data)
box_type = _get_config(**kwargs)['box_type']
if box_type == 'sealedbox':
return sealedbox_decrypt(data, **kwargs)
@ -414,6 +465,9 @@ def sealedbox_encrypt(data, **kwargs):
salt-call --local nacl.sealedbox_encrypt datatoenc pk_file=/etc/salt/pki/master/nacl.pub
salt-call --local nacl.sealedbox_encrypt datatoenc pk='vrwQF7cNiNAVQVAiS3bvcbJUnF0cN6fU9YTZD9mBfzQ='
'''
# ensure data is in bytes
data = salt.utils.stringutils.to_bytes(data)
pk = _get_pk(**kwargs)
b = libnacl.sealed.SealedBox(pk)
return base64.b64encode(b.encrypt(data))
@ -433,6 +487,10 @@ def sealedbox_decrypt(data, **kwargs):
'''
if data is None:
return None
# ensure data is in bytes
data = salt.utils.stringutils.to_bytes(data)
sk = _get_sk(**kwargs)
keypair = libnacl.public.SecretKey(sk)
b = libnacl.sealed.SealedBox(keypair)
@ -452,6 +510,9 @@ def secretbox_encrypt(data, **kwargs):
salt-call --local nacl.secretbox_encrypt datatoenc sk_file=/etc/salt/pki/master/nacl
salt-call --local nacl.secretbox_encrypt datatoenc sk='YmFkcGFzcwo='
'''
# ensure data is in bytes
data = salt.utils.stringutils.to_bytes(data)
sk = _get_sk(**kwargs)
b = libnacl.secret.SecretBox(sk)
return base64.b64encode(b.encrypt(data))
@ -472,6 +533,10 @@ def secretbox_decrypt(data, **kwargs):
'''
if data is None:
return None
# ensure data is in bytes
data = salt.utils.stringutils.to_bytes(data)
key = _get_sk(**kwargs)
b = libnacl.secret.SecretBox(key=key)
return b.decrypt(base64.b64decode(data))

View File

@ -1618,167 +1618,3 @@ def list_agents(profile=None):
'''
conn = _auth(profile)
return conn.list_agents()
# The following is a list of functions that need to be incorporated in the
# neutron module. This list should be updated as functions are added.
#
# update_ipsec_site_connection
# Updates an IPsecSiteConnection.
# update_ikepolicy Updates an IKEPolicy
# update_ipsecpolicy Updates an IPsecPolicy
# list_vips Fetches a list of all load balancer vips for a tenant.
# show_vip Fetches information of a certain load balancer vip.
# create_vip Creates a new load balancer vip.
# update_vip Updates a load balancer vip.
# delete_vip Deletes the specified load balancer vip.
# list_pools Fetches a list of all load balancer pools for a tenant.
# show_pool Fetches information of a certain load balancer pool.
# create_pool Creates a new load balancer pool.
# update_pool Updates a load balancer pool.
# delete_pool Deletes the specified load balancer pool.
# retrieve_pool_stats Retrieves stats for a certain load balancer pool.
# list_members Fetches a list of all load balancer members for
# a tenant.
# show_member Fetches information of a certain load balancer member.
# create_member Creates a new load balancer member.
# update_member Updates a load balancer member.
# delete_member Deletes the specified load balancer member.
# list_health_monitors Fetches a list of all load balancer health monitors for
# a tenant.
# show_health_monitor Fetches information of a certain load balancer
# health monitor.
# create_health_monitor
# Creates a new load balancer health monitor.
# update_health_monitor
# Updates a load balancer health monitor.
# delete_health_monitor
# Deletes the specified load balancer health monitor.
# associate_health_monitor
# Associate specified load balancer health monitor
# and pool.
# disassociate_health_monitor
# Disassociate specified load balancer health monitor
# and pool.
# create_qos_queue Creates a new queue.
# list_qos_queues Fetches a list of all queues for a tenant.
# show_qos_queue Fetches information of a certain queue.
# delete_qos_queue Deletes the specified queue.
# list_agents Fetches agents.
# show_agent Fetches information of a certain agent.
# update_agent Updates an agent.
# delete_agent Deletes the specified agent.
# list_network_gateways
# Retrieve network gateways.
# show_network_gateway Fetch a network gateway.
# create_network_gateway
# Create a new network gateway.
# update_network_gateway
# Update a network gateway.
# delete_network_gateway
# Delete the specified network gateway.
# connect_network_gateway
# Connect a network gateway to the specified network.
# disconnect_network_gateway
# Disconnect a network from the specified gateway.
# list_gateway_devices Retrieve gateway devices.
# show_gateway_device Fetch a gateway device.
# create_gateway_device
# Create a new gateway device.
# update_gateway_device
# Updates a new gateway device.
# delete_gateway_device
# Delete the specified gateway device.
# list_dhcp_agent_hosting_networks
# Fetches a list of dhcp agents hosting a network.
# list_networks_on_dhcp_agent
# Fetches a list of dhcp agents hosting a network.
# add_network_to_dhcp_agent
# Adds a network to dhcp agent.
# remove_network_from_dhcp_agent
# Remove a network from dhcp agent.
# list_l3_agent_hosting_routers
# Fetches a list of L3 agents hosting a router.
# list_routers_on_l3_agent
# Fetches a list of L3 agents hosting a router.
# add_router_to_l3_agent
# Adds a router to L3 agent.
# list_firewall_rules Fetches a list of all firewall rules for a tenant.
# show_firewall_rule Fetches information of a certain firewall rule.
# create_firewall_rule Creates a new firewall rule.
# update_firewall_rule Updates a firewall rule.
# delete_firewall_rule Deletes the specified firewall rule.
# list_firewall_policies
# Fetches a list of all firewall policies for a tenant.
# show_firewall_policy Fetches information of a certain firewall policy.
# create_firewall_policy
# Creates a new firewall policy.
# update_firewall_policy
# Updates a firewall policy.
# delete_firewall_policy
# Deletes the specified firewall policy.
# firewall_policy_insert_rule
# Inserts specified rule into firewall policy.
# firewall_policy_remove_rule
# Removes specified rule from firewall policy.
# list_firewalls Fetches a list of all firewals for a tenant.
# show_firewall Fetches information of a certain firewall.
# create_firewall Creates a new firewall.
# update_firewall Updates a firewall.
# delete_firewall Deletes the specified firewall.
# remove_router_from_l3_agent
# Remove a router from l3 agent.
# get_lbaas_agent_hosting_pool
# Fetches a loadbalancer agent hosting a pool.
# list_pools_on_lbaas_agent
# Fetches a list of pools hosted by
# the loadbalancer agent.
# list_service_providers
# Fetches service providers.
# list_credentials Fetch a list of all credentials for a tenant.
# show_credential Fetch a credential.
# create_credential Create a new credential.
# update_credential Update a credential.
# delete_credential Delete the specified credential.
# list_network_profile_bindings
# Fetch a list of all tenants associated for
# a network profile.
# list_network_profiles
# Fetch a list of all network profiles for a tenant.
# show_network_profile Fetch a network profile.
# create_network_profile
# Create a network profile.
# update_network_profile
# Update a network profile.
# delete_network_profile
# Delete the network profile.
# list_policy_profile_bindings
# Fetch a list of all tenants associated for
# a policy profile.
# list_policy_profiles Fetch a list of all network profiles for a tenant.
# show_policy_profile Fetch a network profile.
# update_policy_profile
# Update a policy profile.
# create_metering_label
# Creates a metering label.
# delete_metering_label
# Deletes the specified metering label.
# list_metering_labels Fetches a list of all metering labels for a tenant.
# show_metering_label Fetches information of a certain metering label.
# create_metering_label_rule
# Creates a metering label rule.
# delete_metering_label_rule
# Deletes the specified metering label rule.
# list_metering_label_rules
# Fetches a list of all metering label rules for a label.
# show_metering_label_rule
# Fetches information of a certain metering label rule.
# list_net_partitions Fetch a list of all network partitions for a tenant.
# show_net_partition etch a network partition.
# create_net_partition Create a network partition.
# delete_net_partition Delete the network partition.
# create_packet_filter Create a new packet filter.
# update_packet_filter Update a packet filter.
# list_packet_filters Fetch a list of all packet filters for a tenant.
# show_packet_filter Fetch information of a certain packet filter.
# delete_packet_filter Delete the specified packet filter.

View File

@ -42,8 +42,8 @@ def get(key,
Attempt to retrieve the named value from pillar, if the named value is not
available return the passed default. The default return is an empty string
except __opts__['pillar_raise_on_missing'] is set to True, in which case a
KeyError will be raised.
except ``__opts__['pillar_raise_on_missing']`` is set to True, in which
case a ``KeyError`` exception will be raised.
If the merge parameter is set to ``True``, the default will be recursively
merged into the returned pillar data.
@ -53,11 +53,18 @@ def get(key,
{'pkg': {'apache': 'httpd'}}
To retrieve the value associated with the apache key in the pkg dict this
key can be passed::
To retrieve the value associated with the ``apache`` key in the ``pkg``
dict this key can be passed as::
pkg:apache
key
The pillar key to get value from
default
If specified, return this value in case when named pillar value does
not exist.
merge : ``False``
If ``True``, the retrieved values will be merged into the passed
default. When the default and the retrieved value are both

View File

@ -338,6 +338,22 @@ def _process_requirements(requirements, cmd, cwd, saltenv, user):
return cleanup_requirements, None
def _format_env_vars(env_vars):
ret = {}
if env_vars:
if isinstance(env_vars, dict):
for key, val in six.iteritems(env_vars):
if not isinstance(key, six.string_types):
key = str(key) # future lint: disable=blacklisted-function
if not isinstance(val, six.string_types):
val = str(val) # future lint: disable=blacklisted-function
ret[key] = val
else:
raise CommandExecutionError(
'env_vars {0} is not a dictionary'.format(env_vars))
return ret
def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
requirements=None,
bin_env=None,
@ -811,16 +827,7 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
cmd_kwargs = dict(saltenv=saltenv, use_vt=use_vt, runas=user)
if env_vars:
if isinstance(env_vars, dict):
for key, val in six.iteritems(env_vars):
if not isinstance(key, six.string_types):
key = str(key) # future lint: disable=blacklisted-function
if not isinstance(val, six.string_types):
val = str(val) # future lint: disable=blacklisted-function
cmd_kwargs.setdefault('env', {})[key] = val
else:
raise CommandExecutionError(
'env_vars {0} is not a dictionary'.format(env_vars))
cmd_kwargs.setdefault('env', {}).update(_format_env_vars(env_vars))
try:
if cwd:
@ -974,7 +981,8 @@ def uninstall(pkgs=None,
def freeze(bin_env=None,
user=None,
cwd=None,
use_vt=False):
use_vt=False,
env_vars=None):
'''
Return a list of installed packages either globally or in the specified
virtualenv
@ -1027,6 +1035,8 @@ def freeze(bin_env=None,
cmd_kwargs = dict(runas=user, cwd=cwd, use_vt=use_vt, python_shell=False)
if bin_env and os.path.isdir(bin_env):
cmd_kwargs['env'] = {'VIRTUAL_ENV': bin_env}
if env_vars:
cmd_kwargs.setdefault('env', {}).update(_format_env_vars(env_vars))
result = __salt__['cmd.run_all'](cmd, **cmd_kwargs)
if result['retcode'] > 0:
@ -1038,7 +1048,8 @@ def freeze(bin_env=None,
def list_(prefix=None,
bin_env=None,
user=None,
cwd=None):
cwd=None,
env_vars=None):
'''
Filter list of installed apps from ``freeze`` and check to see if
``prefix`` exists in the list of packages installed.
@ -1067,7 +1078,7 @@ def list_(prefix=None,
if prefix is None or 'pip'.startswith(prefix):
packages['pip'] = version(bin_env)
for line in freeze(bin_env=bin_env, user=user, cwd=cwd):
for line in freeze(bin_env=bin_env, user=user, cwd=cwd, env_vars=env_vars):
if line.startswith('-f') or line.startswith('#'):
# ignore -f line as it contains --find-links directory
# ignore comment lines

View File

@ -83,6 +83,9 @@ def _to_unicode(vdata):
Converts from current users character encoding to unicode. Use this for
parameters being pass to reg functions
'''
# None does not convert to Unicode
if vdata is None:
return None
return salt.utils.stringutils.to_unicode(vdata, 'utf-8')
@ -526,13 +529,13 @@ def set_value(hive,
# https://www.python.org/dev/peps/pep-0237/
# String Types to Unicode
if vtype_value in [1, 2]:
if vtype_value in [win32con.REG_SZ, win32con.REG_EXPAND_SZ]:
local_vdata = _to_unicode(vdata)
# Don't touch binary...
elif vtype_value == 3:
elif vtype_value == win32con.REG_BINARY:
local_vdata = vdata
# Make sure REG_MULTI_SZ is a list of strings
elif vtype_value == 7:
elif vtype_value == win32con.REG_MULTI_SZ:
local_vdata = [_to_unicode(i) for i in vdata]
# Everything else is int
else:
@ -686,7 +689,6 @@ def delete_value(hive, key, vname=None, use_32bit_registry=False):
salt '*' reg.delete_value HKEY_CURRENT_USER 'SOFTWARE\\Salt' 'version'
'''
local_hive = _to_unicode(hive)
local_key = _to_unicode(key)
local_vname = _to_unicode(vname)

View File

@ -58,7 +58,7 @@ SCHEDULE_CONF = [
'after',
'return_config',
'return_kwargs',
'run_on_start'
'run_on_start',
'skip_during_range',
'run_after_skip_range',
]

View File

@ -778,7 +778,7 @@ def request(mods=None,
'kwargs': kwargs
}
})
cumask = os.umask(0o77)
with salt.utils.files.set_umask(0o077):
try:
if salt.utils.platform.is_windows():
# Make sure cache file isn't read-only
@ -790,7 +790,6 @@ def request(mods=None,
'Unable to write state request file %s. Check permission.',
notify_path
)
os.umask(cumask)
return ret
@ -844,7 +843,7 @@ def clear_request(name=None):
req.pop(name)
else:
return False
cumask = os.umask(0o77)
with salt.utils.files.set_umask(0o077):
try:
if salt.utils.platform.is_windows():
# Make sure cache file isn't read-only
@ -856,7 +855,6 @@ def clear_request(name=None):
'Unable to write state request file %s. Check permission.',
notify_path
)
os.umask(cumask)
return True
@ -1249,13 +1247,12 @@ def sls(mods, test=None, exclude=None, queue=False, **kwargs):
return ['Pillar failed to render with the following messages:'] + errors
orchestration_jid = kwargs.get('orchestration_jid')
umask = os.umask(0o77)
with salt.utils.files.set_umask(0o077):
if kwargs.get('cache'):
if os.path.isfile(cfn):
with salt.utils.files.fopen(cfn, 'rb') as fp_:
high_ = serial.load(fp_)
return st_.state.call_high(high_, orchestration_jid)
os.umask(umask)
mods = salt.utils.args.split_input(mods)
@ -1280,7 +1277,7 @@ def sls(mods, test=None, exclude=None, queue=False, **kwargs):
if __salt__['config.option']('state_data', '') == 'terse' or kwargs.get('terse'):
ret = _filter_running(ret)
cache_file = os.path.join(__opts__['cachedir'], 'sls.p')
cumask = os.umask(0o77)
with salt.utils.files.set_umask(0o077):
try:
if salt.utils.platform.is_windows():
# Make sure cache file isn't read-only
@ -1309,7 +1306,7 @@ def sls(mods, test=None, exclude=None, queue=False, **kwargs):
'Unable to write to highstate cache file %s. Do you have permissions?',
cfn
)
os.umask(cumask)
_snapper_post(opts, kwargs.get('__pub_jid', 'called localy'), snapper_pre)
return ret

View File

@ -16,7 +16,6 @@ import subprocess
# Import salt libs
import salt.utils.path
import salt.utils.stringutils
import salt.utils.versions
__virtualname__ = 'trafficserver'
@ -203,28 +202,6 @@ def restart_local(drain=False):
return _subprocess(cmd)
def match_var(regex):
'''
Display the current values of all performance statistics or configuration
variables whose names match the given regular expression.
.. deprecated:: Fluorine
Use ``match_metric`` or ``match_config`` instead.
.. code-block:: bash
salt '*' trafficserver.match_var regex
'''
salt.utils.versions.warn_until(
'Fluorine',
'The \'match_var\' function has been deprecated and will be removed in Salt '
'{version}. Please use \'match_metric\' or \'match_config\' instead.'
)
cmd = _traffic_line('-m', regex)
log.debug('Running: %s', cmd)
return _subprocess(cmd)
def match_metric(regex):
'''
Display the current values of all metrics whose names match the
@ -345,55 +322,6 @@ def set_config(variable, value):
return _subprocess(cmd)
def read_var(*args):
'''
Read variable definitions from the traffic_line command.
.. deprecated:: Fluorine
Use ``read_metric`` or ``read_config`` instead. Note that this
function does not work for Traffic Server versions >= 7.0.
.. code-block:: bash
salt '*' trafficserver.read_var proxy.process.http.tcp_hit_count_stat
'''
salt.utils.versions.warn_until(
'Fluorine',
'The \'read_var\' function has been deprecated and will be removed in Salt '
'{version}. Please use \'read_metric\' or \'read_config\' instead.'
)
ret = {}
try:
for arg in args:
log.debug('Querying: %s', arg)
cmd = '{0} {1} {2}'.format(_TRAFFICLINE, '-r', arg)
ret[arg] = _subprocess(cmd)
except KeyError:
pass
return ret
def set_var(variable, value):
'''
.. code-block:: bash
.. deprecated:: Fluorine
Use ``set_config`` instead. Note that this function does
not work for Traffic Server versions >= 7.0.
salt '*' trafficserver.set_var proxy.config.http.server_ports
'''
salt.utils.versions.warn_until(
'Fluorine',
'The \'set_var\' function has been deprecated and will be removed in Salt '
'{version}. Please use \'set_config\' instead.'
)
return set_config(variable, value)
def shutdown():
'''
Shut down Traffic Server on the local node.

View File

@ -98,7 +98,7 @@ def _find_utmp():
result[os.stat(utmp).st_mtime] = utmp
except Exception:
pass
if result > 0:
if len(result):
return result[sorted(result).pop()]
else:
return False

View File

@ -410,8 +410,7 @@ def _qemu_image_create(vm_name,
log.debug('Copying %s to %s', sfn, img_dest)
salt.utils.files.copyfile(sfn, img_dest)
mask = os.umask(0)
os.umask(mask)
mask = salt.utils.files.get_umask()
if disk_size and qcow2:
log.debug('Resize qcow2 image to %sM', disk_size)
@ -433,8 +432,7 @@ def _qemu_image_create(vm_name,
else:
# Create empty disk
try:
mask = os.umask(0)
os.umask(mask)
mask = salt.utils.files.get_umask()
if disk_size:
log.debug('Create empty image with size %sM', disk_size)

View File

@ -1,750 +0,0 @@
# -*- coding: utf-8 -*-
'''
Module for running windows updates.
This module is being deprecated and will be removed in Salt Fluorine. Please use
the ``win_wua`` module instead.
:depends: - win32com
- win32con
- win32api
- pywintypes
.. versionadded:: 2014.7.0
Set windows updates to run by category. Default behavior is to install
all updates that do not require user interaction to complete.
Optionally set ``categories`` to a category of your choice to only
install certain updates. Default is to set to install all available but driver updates.
The following example will install all Security and Critical Updates,
and download but not install standard updates.
.. code-block:: bash
salt '*' win_update.install_updates categories="['Critical Updates', 'Security Updates']"
You can also specify a number of features about the update to have a
fine grain approach to specific types of updates. These are the following
features/states of updates available for configuring:
.. code-block:: text
'UI' - User interaction required, skipped by default
'downloaded' - Already downloaded, included by default
'present' - Present on computer, included by default
'installed' - Already installed, skipped by default
'reboot' - Reboot required, included by default
'hidden' - Skip hidden updates, skipped by default
'software' - Software updates, included by default
'driver' - Driver updates, included by default
The following example installs all updates that don't require a reboot:
.. code-block:: bash
salt '*' win_update.install_updates skips="[{'reboot':True}]"
Once installed Salt will return a similar output:
.. code-block:: bash
2 : Windows Server 2012 Update (KB123456)
4 : Internet Explorer Security Update (KB098765)
2 : Malware Definition Update (KB321456)
...
The number at the beginning of the line is an OperationResultCode from the Windows Update Agent,
it's enumeration is described here: https://msdn.microsoft.com/en-us/library/windows/desktop/aa387095(v=vs.85).aspx.
The result code is then followed by the update name and its KB identifier.
'''
# pylint: disable=invalid-name,missing-docstring
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
import logging
# Import 3rd-party libs
# pylint: disable=import-error
from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=no-name-in-module,redefined-builtin
try:
import win32com.client
import pythoncom
HAS_DEPENDENCIES = True
except ImportError:
HAS_DEPENDENCIES = False
# pylint: enable=import-error
# Import Salt libs
import salt.utils.platform
import salt.utils.locales
import salt.utils.versions
log = logging.getLogger(__name__)
def __virtual__():
'''
Only works on Windows systems
'''
if salt.utils.platform.is_windows() and HAS_DEPENDENCIES:
salt.utils.versions.warn_until(
'Fluorine',
'The \'win_update\' module is being deprecated and will be removed '
'in Salt {version}. Please use the \'win_wua\' module instead.'
)
return True
return (False, "Module win_update: module has failed dependencies or is not on Windows client")
def _gather_update_categories(updateCollection):
'''
this is a convenience method to gather what categories of updates are available in any update
collection it is passed. Typically though, the download_collection.
Some known categories:
Updates
Windows 7
Critical Updates
Security Updates
Update Rollups
'''
categories = []
for i in range(updateCollection.Count):
update = updateCollection.Item(i)
for j in range(update.Categories.Count):
name = update.Categories.Item(j).Name
if name not in categories:
log.debug('found category: %s', name)
categories.append(name)
return categories
class PyWinUpdater(object):
def __init__(self, categories=None, skipUI=True, skipDownloaded=False,
skipInstalled=True, skipReboot=False, skipPresent=False,
skipSoftwareUpdates=False, skipDriverUpdates=False, skipHidden=True):
log.debug('CoInitializing the pycom system')
pythoncom.CoInitialize()
self.skipUI = skipUI
self.skipDownloaded = skipDownloaded
self.skipInstalled = skipInstalled
self.skipReboot = skipReboot
self.skipPresent = skipPresent
self.skipHidden = skipHidden
self.skipSoftwareUpdates = skipSoftwareUpdates
self.skipDriverUpdates = skipDriverUpdates
# the list of categories that the user wants to be searched for.
self.categories = categories
# the list of categories that are present in the updates found.
self.foundCategories = []
# careful not to get those two confused.
log.debug('dispatching update_session to keep the session object.')
self.update_session = win32com.client.Dispatch('Microsoft.Update.Session')
log.debug('update_session got. Now creating a win_searcher to seek out the updates')
self.win_searcher = self.update_session.CreateUpdateSearcher()
# list of updates that are applicable by current settings.
self.download_collection = win32com.client.Dispatch('Microsoft.Update.UpdateColl')
# list of updates to be installed.
self.install_collection = win32com.client.Dispatch('Microsoft.Update.UpdateColl')
# the object responsible for fetching the actual downloads.
self.win_downloader = self.update_session.CreateUpdateDownloader()
self.win_downloader.Updates = self.download_collection
# the object responsible for the installing of the updates.
self.win_installer = self.update_session.CreateUpdateInstaller()
self.win_installer.Updates = self.install_collection
# the results of the download process
self.download_results = None
# the results of the installation process
self.install_results = None
# search results from CreateUpdateSearcher()
self.search_results = None
def Search(self, searchString):
try:
log.debug('beginning search of the passed string: %s', searchString)
self.search_results = self.win_searcher.Search(searchString)
log.debug('search completed successfully.')
except Exception as exc:
log.info('search for updates failed. %s', exc)
return exc
log.debug('parsing results. %s updates were found.',
self.search_results.Updates.Count)
try:
# step through the list of the updates to ensure that the updates match the
# features desired.
for update in self.search_results.Updates:
# this skipps an update if UI updates are not desired.
if update.InstallationBehavior.CanRequestUserInput:
log.debug(U'Skipped update {0} - requests user input'.format(update.title))
continue
# if this update is already downloaded, it doesn't need to be in
# the download_collection. so skipping it unless the user mandates re-download.
if self.skipDownloaded and update.IsDownloaded:
log.debug(
'Skipped update %s - already downloaded',
update.title
)
continue
# check this update's categories against the ones desired.
for category in update.Categories:
# this is a zero guard. these tests have to be in this order
# or it will error out when the user tries to search for
# updates with out specifying categories.
if self.categories is None or category.Name in self.categories:
# adds it to the list to be downloaded.
self.download_collection.Add(update)
log.debug('added update %s', update.title)
# ever update has 2 categories. this prevents the
# from being added twice.
break
log.debug('download_collection made. gathering found categories.')
# gets the categories of the updates available in this collection of updates
self.foundCategories = _gather_update_categories(self.download_collection)
log.debug('found categories: %s',
six.text_type(self.foundCategories))
return True
except Exception as exc:
log.info('parsing updates failed. %s', exc)
return exc
def AutoSearch(self):
'''
this function generates a search string. simplifying the search function while
still providing as many features as possible.
'''
search_string = ''
searchParams = []
if self.skipInstalled:
searchParams.append('IsInstalled=0')
else:
searchParams.append('IsInstalled=1')
if self.skipHidden:
searchParams.append('IsHidden=0')
else:
searchParams.append('IsHidden=1')
if self.skipReboot:
searchParams.append('RebootRequired=0')
else:
searchParams.append('RebootRequired=1')
if self.skipPresent:
searchParams.append('IsPresent=0')
else:
searchParams.append('IsPresent=1')
for i in searchParams:
search_string += '{0} and '.format(i)
if not self.skipSoftwareUpdates and not self.skipDriverUpdates:
search_string += 'Type=\'Software\' or Type=\'Driver\''
elif not self.skipSoftwareUpdates:
search_string += 'Type=\'Software\''
elif not self.skipDriverUpdates:
search_string += 'Type=\'Driver\''
else:
return False
# if there is no type, the is nothing to search.
log.debug('generated search string: %s', search_string)
return self.Search(search_string)
def Download(self):
# chase the download_collection! do the actual download process.
try:
# if the download_collection is empty. no need to download things.
if self.download_collection.Count != 0:
self.download_results = self.win_downloader.Download()
else:
log.debug('Skipped downloading, all updates were already cached.')
return True
except Exception as exc:
log.debug('failed in the downloading %s.', exc)
return exc
def Install(self):
# beat those updates into place!
try:
# this does not draw from the download_collection. important thing to know.
# the blugger is created regardless of what the download_collection has done. but it
# will only download those updates which have been downloaded and are ready.
for update in self.search_results.Updates:
if update.IsDownloaded:
self.install_collection.Add(update)
log.debug('Updates prepared. beginning installation')
except Exception as exc:
log.info('Preparing install list failed: %s', exc)
return exc
# accept eula if not accepted
try:
for update in self.search_results.Updates:
if not update.EulaAccepted:
log.debug('Accepting EULA: %s', update.Title)
update.AcceptEula()
except Exception as exc:
log.info('Accepting Eula failed: %s', exc)
return exc
# if the blugger is empty. no point it starting the install process.
if self.install_collection.Count != 0:
log.debug('Install list created, about to install')
try:
# the call to install.
self.install_results = self.win_installer.Install()
log.info('Installation of updates complete')
return True
except Exception as exc:
log.info('Installation failed: %s', exc)
return exc
else:
log.info('no new updates.')
return True
def GetInstallationResults(self):
'''
this gets results of installation process.
'''
# if the blugger is empty, the results are nil.
log.debug('blugger has {0} updates in it'.format(self.install_collection.Count))
if self.install_collection.Count == 0:
return {}
updates = []
log.debug('repairing update list')
for i in range(self.install_collection.Count):
# this gets the result from install_results, but the title comes from the update
# collection install_collection.
updates.append('{0}: {1}'.format(
self.install_results.GetUpdateResult(i).ResultCode,
self.install_collection.Item(i).Title))
log.debug('Update results enumerated, now making a library to pass back')
results = {}
# translates the list of update results into a library that salt expects.
for i, update in enumerate(updates):
results['update {0}'.format(i)] = update
log.debug('Update information complied. returning')
return results
def GetInstallationResultsPretty(self):
'''
converts the installation results into a pretty print.
'''
updates = self.GetInstallationResults()
ret = 'The following are the updates and their return codes.\n'
for i in updates:
ret += '\t{0}\n'.format(updates[i])
return ret
def GetDownloadResults(self):
updates = []
for i in range(self.download_collection.Count):
updates.append('{0}: {1}'.format(
six.text_type(self.download_results.GetUpdateResult(i).ResultCode),
six.text_type(self.download_collection.Item(i).Title)))
results = {}
for i, update in enumerate(updates):
results['update {0}'.format(i)] = update
return results
def GetSearchResultsVerbose(self):
updates = []
log.debug('parsing results. %s updates were found.',
self.download_collection.count)
for update in self.download_collection:
if update.InstallationBehavior.CanRequestUserInput:
log.debug('Skipped update %s', update.title)
continue
# More fields can be added from https://msdn.microsoft.com/en-us/library/windows/desktop/aa386099(v=vs.85).aspx
update_com_fields = ['Categories', 'Deadline', 'Description',
'Identity', 'IsMandatory',
'KBArticleIDs', 'MaxDownloadSize', 'MinDownloadSize',
'MoreInfoUrls', 'MsrcSeverity', 'ReleaseNotes',
'SecurityBulletinIDs', 'SupportUrl', 'Title']
simple_enums = ['KBArticleIDs', 'MoreInfoUrls', 'SecurityBulletinIDs']
# update_dict = {k: getattr(update, k) for k in update_com_fields}
update_dict = {}
for f in update_com_fields:
v = getattr(update, f)
if not any([isinstance(v, bool), isinstance(v, six.string_types)]):
# Fields that require special evaluation.
if f in simple_enums:
v = [x for x in v]
elif f == 'Categories':
v = [{'Name': cat.Name, 'Description': cat.Description} for cat in v]
elif f == 'Deadline':
# Deadline will be useful and should be added.
# However, until it can be tested with a date object
# as returned by the COM, it is unclear how to
# handle this field.
continue
elif f == 'Identity':
v = {'RevisionNumber': v.RevisionNumber,
'UpdateID': v.UpdateID}
update_dict[f] = v
updates.append(update_dict)
log.debug('added update %s', update.title)
return updates
def GetSearchResults(self, fields=None):
"""Reduce full updates information to the most important information."""
updates_verbose = self.GetSearchResultsVerbose()
if fields is not None:
updates = [dict((k, v) for k, v in update.items() if k in fields)
for update in updates_verbose]
return updates
# Return list of titles.
return [update['Title'] for update in updates_verbose]
def SetCategories(self, categories):
self.categories = categories
def GetCategories(self):
return self.categories
def GetAvailableCategories(self):
return self.foundCategories
def SetSkips(self, skips):
if skips:
for i in skips:
value = i[next(six.iterkeys(i))]
skip = next(six.iterkeys(i))
self.SetSkip(skip, value)
log.debug('was asked to set %s to %s', skip, value)
def SetSkip(self, skip, state):
if skip == 'UI':
self.skipUI = state
elif skip == 'downloaded':
self.skipDownloaded = state
elif skip == 'installed':
self.skipInstalled = state
elif skip == 'reboot':
self.skipReboot = state
elif skip == 'present':
self.skipPresent = state
elif skip == 'hidden':
self.skipHidden = state
elif skip == 'software':
self.skipSoftwareUpdates = state
elif skip == 'driver':
self.skipDriverUpdates = state
log.debug('new search state: \n\tUI: %s\n\tDownload: %s\n\tInstalled: %s\n\treboot :%s\n\tPresent: %s\n\thidden: %s\n\tsoftware: %s\n\tdriver: %s',
self.skipUI, self.skipDownloaded, self.skipInstalled, self.skipReboot,
self.skipPresent, self.skipHidden, self.skipSoftwareUpdates, self.skipDriverUpdates)
def __str__(self):
results = 'There are {0} updates, by category there are:\n'.format(
self.download_collection.count)
for category in self.foundCategories:
count = 0
for update in self.download_collection:
for cat in update.Categories:
if category == cat.Name:
count += 1
results += '\t{0}: {1}\n'.format(category, count)
return results
def _search(quidditch, retries=5):
'''
a wrapper method for the pywinupdater class. I might move this into the class, but right now,
that is to much for one class I think.
'''
passed = False
clean = True
comment = ''
while not passed:
log.debug('Searching. tries left: %s', retries)
# let the updater make its own search string. MORE POWER this way.
passed = quidditch.AutoSearch()
log.debug('Done searching: %s', passed)
if isinstance(passed, Exception):
clean = False
comment += 'Failed in the seeking/parsing process:\n\t\t{0}\n'.format(passed)
retries -= 1
if retries:
comment += '{0} tries to go. retrying\n'.format(str(retries))
else:
comment += 'out of retries. this update round failed.\n'
return (comment, True, retries)
passed = False
if clean:
# bragging rights.
comment += 'Search was done without error.\n'
return (comment, True, retries)
def _download(quidditch, retries=5):
'''
another wrapper method.
'''
passed = False
clean = True
comment = ''
while not passed:
log.debug('Downloading. tries left: %s', retries)
passed = quidditch.Download()
log.debug('Done downloading: %s', passed)
if isinstance(passed, Exception):
clean = False
comment += 'Failed while trying to download updates:\n\t\t{0}\n'.format(str(passed))
retries -= 1
if retries:
comment += '{0} tries to go. retrying\n'.format(str(retries))
passed = False
else:
comment += 'out of retries. this update round failed.\n'
return (comment, False, retries)
if clean:
comment += 'Download was done without error.\n'
return (comment, True, retries)
def _install(quidditch, retries=5):
'''
and the last wrapper method. keeping things simple.
'''
passed = False
clean = True
comment = ''
while not passed:
log.debug('download_collection is this long: %s',
quidditch.install_collection.Count)
log.debug('Installing. tries left: %s', retries)
passed = quidditch.Install()
log.info('Done installing: %s', passed)
if isinstance(passed, Exception):
clean = False
comment += 'Failed while trying to install the updates.\n\t\t{0}\n'.format(str(passed))
retries -= 1
if retries:
comment += '{0} tries to go. retrying\n'.format(str(retries))
passed = False
else:
comment += 'out of retries. this update round failed.\n'
return (comment, False, retries)
if clean:
comment += 'Install was done without error.\n'
return (comment, True, retries)
# this is where the actual functions available to salt begin.
def list_updates(verbose=False, fields=None, skips=None, retries=5, categories=None):
'''
Returns a summary of available updates, grouped into their non-mutually
exclusive categories.
verbose
Return full set of results, including several fields from the COM.
fields
Return a list of specific fields for each update. The optional
values here are those at the root level of the verbose list. This
is superseded by the verbose option.
retries
Number of retries to make before giving up. This is total, not per
step.
categories
Specify the categories to list. Must be passed as a list.
.. code-block:: bash
salt '*' win_update.list_updates categories="['Updates']"
Categories include, but are not limited to, the following:
* Updates
* Windows 7
* Critical Updates
* Security Updates
* Update Rollups
CLI Examples:
.. code-block:: bash
# Normal Usage
salt '*' win_update.list_updates
# Specific Fields
salt '*' win_update.list_updates fields="['Title', 'Description']"
# List all critical updates list in verbose detail
salt '*' win_update.list_updates categories="['Critical Updates']" verbose=True
'''
log.debug('categories to search for are: %s', categories)
updates = PyWinUpdater()
if categories:
updates.SetCategories(categories)
updates.SetSkips(skips)
# this is where we be seeking the things! yar!
comment, passed, retries = _search(updates, retries)
if not passed:
return (comment, str(passed))
log.debug('verbose: %s', verbose)
if verbose:
return updates.GetSearchResultsVerbose()
return updates.GetSearchResults(fields=fields)
def download_updates(skips=None, retries=5, categories=None):
'''
Downloads all available updates, skipping those that require user
interaction.
Various aspects of the updates can be included or excluded. this feature is
still in development.
retries
Number of retries to make before giving up. This is total, not per
step.
categories
Specify the categories to update. Must be passed as a list.
.. code-block:: bash
salt '*' win_update.download_updates categories="['Updates']"
Categories include the following:
* Updates
* Windows 7
* Critical Updates
* Security Updates
* Update Rollups
CLI Examples:
.. code-block:: bash
# Normal Usage
salt '*' win_update.download_updates
# Download critical updates only
salt '*' win_update.download_updates categories="['Critical Updates']"
'''
log.debug('categories to search for are: %s', categories)
quidditch = PyWinUpdater(skipDownloaded=True)
quidditch.SetCategories(categories)
quidditch.SetSkips(skips)
# this is where we be seeking the things! yar!
comment, passed, retries = _search(quidditch, retries)
if not passed:
return (comment, str(passed))
# this is where we get all the things! i.e. download updates.
comment, passed, retries = _download(quidditch, retries)
if not passed:
return (comment, str(passed))
try:
comment = quidditch.GetDownloadResults()
except Exception as exc:
comment = 'could not get results, but updates were installed. {0}'.format(exc)
return 'Windows is up to date. \n{0}'.format(comment)
def install_updates(skips=None, retries=5, categories=None):
'''
Downloads and installs all available updates, skipping those that require
user interaction.
Add ``cached`` to only install those updates which have already been downloaded.
you can set the maximum number of retries to ``n`` in the search process by
adding: ``retries=n``
various aspects of the updates can be included or excluded. This function is
still under development.
retries
Number of retries to make before giving up. This is total, not per
step.
categories
Specify the categories to install. Must be passed as a list.
.. code-block:: bash
salt '*' win_update.install_updates categories="['Updates']"
Categories include the following:
* Updates
* Windows 7
* Critical Updates
* Security Updates
* Update Rollups
CLI Examples:
.. code-block:: bash
# Normal Usage
salt '*' win_update.install_updates
# Install all critical updates
salt '*' win_update.install_updates categories="['Critical Updates']"
'''
log.debug('categories to search for are: %s', categories)
quidditch = PyWinUpdater()
quidditch.SetCategories(categories)
quidditch.SetSkips(skips)
# this is where we be seeking the things! yar!
comment, passed, retries = _search(quidditch, retries)
if not passed:
return (comment, str(passed))
# this is where we get all the things! i.e. download updates.
comment, passed, retries = _download(quidditch, retries)
if not passed:
return (comment, str(passed))
# this is where we put things in their place!
comment, passed, retries = _install(quidditch, retries)
if not passed:
return (comment, str(passed))
try:
comment = quidditch.GetInstallationResultsPretty()
except Exception as exc:
comment = 'Could not get results, but updates were installed. {0}'.format(exc)
return 'Windows is up to date. \n{0}'.format(comment)

View File

@ -56,7 +56,6 @@ import logging
# Import Salt libs
import salt.utils.platform
import salt.utils.versions
import salt.utils.win_update
from salt.exceptions import CommandExecutionError
@ -228,87 +227,6 @@ def available(software=True,
return updates.summary() if summary else updates.list()
def list_update(name, download=False, install=False):
'''
.. deprecated:: 2017.7.0
Use :func:`get` instead
Returns details for all updates that match the search criteria
Args:
name (str):
The name of the update you're searching for. This can be the GUID, a
KB number, or any part of the name of the update. GUIDs and KBs are
preferred. Run ``list_updates`` to get the GUID for the update
you're looking for.
download (bool):
Download the update returned by this function. Run this function
first to see if the update exists, then set ``download=True`` to
download the update.
install (bool):
Install the update returned by this function. Run this function
first to see if the update exists, then set ``install=True`` to
install the update.
Returns:
dict: Returns a dict containing a list of updates that match the name if
download and install are both set to False. Should usually be a single
update, but can return multiple if a partial name is given.
If download or install is set to true it will return the results of the
operation.
.. code-block:: cfg
List of Updates:
{'<GUID>': {'Title': <title>,
'KB': <KB>,
'GUID': <the globally unique identifier for the update>
'Description': <description>,
'Downloaded': <has the update been downloaded>,
'Installed': <has the update been installed>,
'Mandatory': <is the update mandatory>,
'UserInput': <is user input required>,
'EULAAccepted': <has the EULA been accepted>,
'Severity': <update severity>,
'NeedsReboot': <is the update installed and awaiting reboot>,
'RebootBehavior': <will the update require a reboot>,
'Categories': [ '<category 1>',
'<category 2>',
...]
}
}
CLI Examples:
.. code-block:: bash
# Recommended Usage using GUID without braces
# Use this to find the status of a specific update
salt '*' win_wua.list_update 12345678-abcd-1234-abcd-1234567890ab
# Use the following if you don't know the GUID:
# Using a KB number (could possibly return multiple results)
# Not all updates have an associated KB
salt '*' win_wua.list_update KB3030298
# Using part or all of the name of the update
# Could possibly return multiple results
# Not all updates have an associated KB
salt '*' win_wua.list_update 'Microsoft Camera Codec Pack'
'''
salt.utils.versions.warn_until(
'Fluorine',
'This function is replaced by \'get\' as of Salt 2017.7.0. This '
'warning will be removed in Salt Fluorine.')
return get(name, download, install)
def get(name, download=False, install=False):
'''
.. versionadded:: 2017.7.0
@ -401,142 +319,6 @@ def get(name, download=False, install=False):
return ret if ret else updates.list()
def list_updates(software=True,
drivers=False,
summary=False,
skip_installed=True,
categories=None,
severities=None,
download=False,
install=False):
'''
.. deprecated:: 2017.7.0
Use :func:`list` instead
Returns a detailed list of available updates or a summary. If download or
install is True the same list will be downloaded and/or installed.
Args:
software (bool):
Include software updates in the results (default is True)
drivers (bool):
Include driver updates in the results (default is False)
summary (bool):
- True: Return a summary of updates available for each category.
- False (default): Return a detailed list of available updates.
skip_installed (bool):
Skip installed updates in the results (default is False)
download (bool):
(Overrides reporting functionality) Download the list of updates
returned by this function. Run this function first with
``download=False`` to see what will be downloaded, then set
``download=True`` to download the updates.
install (bool):
(Overrides reporting functionality) Install the list of updates
returned by this function. Run this function first with
``install=False`` to see what will be installed, then set
``install=True`` to install the updates.
categories (list):
Specify the categories to list. Must be passed as a list. All
categories returned by default.
Categories include the following:
* Critical Updates
* Definition Updates
* Drivers (make sure you set drivers=True)
* Feature Packs
* Security Updates
* Update Rollups
* Updates
* Update Rollups
* Windows 7
* Windows 8.1
* Windows 8.1 drivers
* Windows 8.1 and later drivers
* Windows Defender
severities (list):
Specify the severities to include. Must be passed as a list. All
severities returned by default.
Severities include the following:
* Critical
* Important
Returns:
dict: Returns a dict containing either a summary or a list of updates:
.. code-block:: cfg
List of Updates:
{'<GUID>': {'Title': <title>,
'KB': <KB>,
'GUID': <the globally unique identifier for the update>
'Description': <description>,
'Downloaded': <has the update been downloaded>,
'Installed': <has the update been installed>,
'Mandatory': <is the update mandatory>,
'UserInput': <is user input required>,
'EULAAccepted': <has the EULA been accepted>,
'Severity': <update severity>,
'NeedsReboot': <is the update installed and awaiting reboot>,
'RebootBehavior': <will the update require a reboot>,
'Categories': [ '<category 1>',
'<category 2>',
...]
}
}
Summary of Updates:
{'Total': <total number of updates returned>,
'Available': <updates that are not downloaded or installed>,
'Downloaded': <updates that are downloaded but not installed>,
'Installed': <updates installed (usually 0 unless installed=True)>,
'Categories': { <category 1>: <total for that category>,
<category 2>: <total for category 2>,
... }
}
CLI Examples:
.. code-block:: bash
# Normal Usage (list all software updates)
salt '*' win_wua.list_updates
# List all updates with categories of Critical Updates and Drivers
salt '*' win_wua.list_updates categories=['Critical Updates','Drivers']
# List all Critical Security Updates
salt '*' win_wua.list_updates categories=['Security Updates'] severities=['Critical']
# List all updates with a severity of Critical
salt '*' win_wua.list_updates severities=['Critical']
# A summary of all available updates
salt '*' win_wua.list_updates summary=True
# A summary of all Feature Packs and Windows 8.1 Updates
salt '*' win_wua.list_updates categories=['Feature Packs','Windows 8.1'] summary=True
'''
salt.utils.versions.warn_until(
'Fluorine',
'This function is replaced by \'list\' as of Salt 2017.7.0. This '
'warning will be removed in Salt Fluorine.')
return list(software, drivers, summary, skip_installed, categories,
severities, download, install)
def list(software=True,
drivers=False,
summary=False,
@ -688,74 +470,6 @@ def list(software=True,
return ret
def download_update(name):
'''
.. deprecated:: 2017.7.0
Use :func:`download` instead
Downloads a single update.
Args:
name (str):
The name of the update to download. This can be a GUID, a KB number,
or any part of the name. To ensure a single item is matched the GUID
is preferred.
.. note::
If more than one result is returned an error will be raised.
Returns:
dict: A dictionary containing the results of the download
CLI Examples:
.. code-block:: bash
salt '*' win_wua.download_update 12345678-abcd-1234-abcd-1234567890ab
salt '*' win_wua.download_update KB12312321
'''
salt.utils.versions.warn_until(
'Fluorine',
'This function is replaced by \'download\' as of Salt 2017.7.0. This '
'warning will be removed in Salt Fluorine.')
return download(name)
def download_updates(names):
'''
.. deprecated:: 2017.7.0
Use :func:`download` instead
Downloads updates that match the list of passed identifiers. It's easier to
use this function by using list_updates and setting install=True.
Args:
names (list):
A list of updates to download. This can be any combination of GUIDs,
KB numbers, or names. GUIDs or KBs are preferred.
Returns:
dict: A dictionary containing the details about the downloaded updates
CLI Examples:
.. code-block:: bash
# Normal Usage
salt '*' win_wua.download_updates guid=['12345678-abcd-1234-abcd-1234567890ab', 'KB2131233']
'''
salt.utils.versions.warn_until(
'Fluorine',
'This function is replaced by \'download\' as of Salt 2017.7.0. This '
'warning will be removed in Salt Fluorine.')
return download(names)
def download(names):
'''
.. versionadded:: 2017.7.0
@ -808,73 +522,6 @@ def download(names):
return wua.download(updates)
def install_update(name):
'''
.. deprecated:: 2017.7.0
Use :func:`install` instead
Installs a single update
Args:
name (str): The name of the update to install. This can be a GUID, a KB
number, or any part of the name. To ensure a single item is matched the
GUID is preferred.
.. note::
If no results or more than one result is returned an error will be
raised.
Returns:
dict: A dictionary containing the results of the install
CLI Examples:
.. code-block:: bash
salt '*' win_wua.install_update 12345678-abcd-1234-abcd-1234567890ab
salt '*' win_wua.install_update KB12312231
'''
salt.utils.versions.warn_until(
'Fluorine',
'This function is replaced by \'install\' as of Salt 2017.7.0. This '
'warning will be removed in Salt Fluorine.')
return install(name)
def install_updates(names):
'''
.. deprecated:: 2017.7.0
Use :func:`install` instead
Installs updates that match the list of identifiers. It may be easier to use
the list_updates function and set install=True.
Args:
names (list): A list of updates to install. This can be any combination
of GUIDs, KB numbers, or names. GUIDs or KBs are preferred.
Returns:
dict: A dictionary containing the details about the installed updates
CLI Examples:
.. code-block:: bash
# Normal Usage
salt '*' win_wua.install_updates guid=['12345678-abcd-1234-abcd-1234567890ab', 'KB12323211']
'''
salt.utils.versions.warn_until(
'Fluorine',
'This function is replaced by \'install\' as of Salt 2017.7.0. This '
'warning will be removed in Salt Fluorine.')
return install(names)
def install(names):
'''
.. versionadded:: 2017.7.0

View File

@ -756,7 +756,7 @@ def write_pem(text, path, overwrite=True, pem_type=None):
"-----BEGIN CERTIFICATE-----MIIGMzCCBBugA..." \\
path=/etc/pki/mycert.crt
'''
old_umask = os.umask(0o77)
with salt.utils.files.set_umask(0o077):
text = get_pem_entry(text, pem_type=pem_type)
_dhparams = ''
_private_key = ''
@ -777,7 +777,6 @@ def write_pem(text, path, overwrite=True, pem_type=None):
_fp.write(text)
if pem_type and pem_type == 'CERTIFICATE' and _dhparams:
_fp.write(salt.utils.stringutils.to_str(_dhparams))
os.umask(old_umask)
return 'PEM written to {0}'.format(path)

View File

@ -307,9 +307,9 @@ class EventListener(object):
'''
if request not in self.request_map:
return
for tag, future in self.request_map[request]:
for tag, matcher, future in self.request_map[request]:
# timeout the future
self._timeout_future(tag, future)
self._timeout_future(tag, matcher, future)
# remove the timeout
if future in self.timeout_map:
tornado.ioloop.IOLoop.current().remove_timeout(self.timeout_map[future])
@ -317,9 +317,22 @@ class EventListener(object):
del self.request_map[request]
@staticmethod
def prefix_matcher(mtag, tag):
if mtag is None or tag is None:
raise TypeError('mtag or tag can not be None')
return mtag.startswith(tag)
@staticmethod
def exact_matcher(mtag, tag):
if mtag is None or tag is None:
raise TypeError('mtag or tag can not be None')
return mtag == tag
def get_event(self,
request,
tag='',
matcher=prefix_matcher.__func__,
callback=None,
timeout=None
):
@ -339,40 +352,49 @@ class EventListener(object):
tornado.ioloop.IOLoop.current().add_callback(callback, future)
future.add_done_callback(handle_future)
# add this tag and future to the callbacks
self.tag_map[tag].append(future)
self.request_map[request].append((tag, future))
self.tag_map[(tag, matcher)].append(future)
self.request_map[request].append((tag, matcher, future))
if timeout:
timeout_future = tornado.ioloop.IOLoop.current().call_later(timeout, self._timeout_future, tag, future)
timeout_future = tornado.ioloop.IOLoop.current().call_later(timeout, self._timeout_future, tag, matcher, future)
self.timeout_map[future] = timeout_future
return future
def _timeout_future(self, tag, future):
def _timeout_future(self, tag, matcher, future):
'''
Timeout a specific future
'''
if tag not in self.tag_map:
if (tag, matcher) not in self.tag_map:
return
if not future.done():
future.set_exception(TimeoutException())
self.tag_map[tag].remove(future)
if len(self.tag_map[tag]) == 0:
del self.tag_map[tag]
self.tag_map[(tag, matcher)].remove(future)
if len(self.tag_map[(tag, matcher)]) == 0:
del self.tag_map[(tag, matcher)]
def _handle_event_socket_recv(self, raw):
'''
Callback for events on the event sub socket
'''
mtag, data = self.event.unpack(raw, self.event.serial)
# see if we have any futures that need this info:
for tag_prefix, futures in six.iteritems(self.tag_map):
if mtag.startswith(tag_prefix):
for (tag, matcher), futures in six.iteritems(self.tag_map):
try:
is_matched = matcher(mtag, tag)
except Exception as e:
log.error('Failed to run a matcher.', exc_info=True)
is_matched = False
if not is_matched:
continue
for future in futures:
if future.done():
continue
future.set_result({'data': data, 'tag': mtag})
self.tag_map[tag_prefix].remove(future)
self.tag_map[(tag, matcher)].remove(future)
if future in self.timeout_map:
tornado.ioloop.IOLoop.current().remove_timeout(self.timeout_map[future])
del self.timeout_map[future]
@ -924,55 +946,73 @@ class SaltAPIHandler(BaseSaltAPIHandler): # pylint: disable=W0223
if self.application.opts['order_masters']:
syndic_min_wait = tornado.gen.sleep(self.application.opts['syndic_wait'])
job_not_running = self.job_not_running(pub_data['jid'],
# To ensure job_not_running and all_return are terminated by each other, communicate using a future
is_finished = Future()
job_not_running_future = self.job_not_running(pub_data['jid'],
chunk['tgt'],
f_call['kwargs']['tgt_type'],
minions_remaining=minions_remaining
is_finished,
minions_remaining=list(minions_remaining),
)
# if we have a min_wait, do that
if syndic_min_wait is not None:
yield syndic_min_wait
# we are completed when either all minions return or the job isn't running anywhere
chunk_ret = yield self.all_returns(pub_data['jid'],
finish_futures=[job_not_running],
minions_remaining=minions_remaining,
)
raise tornado.gen.Return(chunk_ret)
all_return_future = self.all_returns(pub_data['jid'],
is_finished,
minions_remaining=list(minions_remaining),
)
yield job_not_running_future
raise tornado.gen.Return((yield all_return_future))
@tornado.gen.coroutine
def all_returns(self,
jid,
finish_futures=None,
is_finished,
minions_remaining=None,
):
'''
Return a future which will complete once all returns are completed
(according to minions_remaining), or one of the passed in "finish_futures" completes
(according to minions_remaining), or one of the passed in "is_finished" completes
'''
if finish_futures is None:
finish_futures = []
if minions_remaining is None:
minions_remaining = []
ret_tag = tagify([jid, 'ret'], 'job')
chunk_ret = {}
minion_events = {}
for minion in minions_remaining:
tag = tagify([jid, 'ret', minion], 'job')
minion_event = self.application.event_listener.get_event(self,
tag=tag,
matcher=EventListener.exact_matcher,
timeout=self.application.opts['timeout'])
minion_events[minion_event] = minion
while True:
ret_event = self.application.event_listener.get_event(self,
tag=ret_tag,
)
f = yield Any([ret_event] + finish_futures)
if f in finish_futures:
raise tornado.gen.Return(chunk_ret)
event = f.result()
chunk_ret[event['data']['id']] = event['data']['return']
# its possible to get a return that wasn't in the minion_remaining list
f = yield Any(minion_events.keys() + [is_finished])
try:
minions_remaining.remove(event['data']['id'])
if f is is_finished:
for event in minion_events:
if not event.done():
event.set_result(None)
raise tornado.gen.Return(chunk_ret)
f_result = f.result()
chunk_ret[f_result['data']['id']] = f_result['data']['return']
except TimeoutException:
pass
# clear finished event future
try:
minions_remaining.remove(minion_events[f])
del minion_events[f]
except ValueError:
pass
if len(minions_remaining) == 0:
if not is_finished.done():
is_finished.set_result(True)
raise tornado.gen.Return(chunk_ret)
@tornado.gen.coroutine
@ -980,6 +1020,7 @@ class SaltAPIHandler(BaseSaltAPIHandler): # pylint: disable=W0223
jid,
tgt,
tgt_type,
is_finished,
minions_remaining=None,
):
'''
@ -998,12 +1039,21 @@ class SaltAPIHandler(BaseSaltAPIHandler): # pylint: disable=W0223
minion_running = False
while True:
try:
event = yield self.application.event_listener.get_event(self,
event = self.application.event_listener.get_event(self,
tag=ping_tag,
timeout=self.application.opts['gather_job_timeout'],
)
f = yield Any([event, is_finished])
# When finished entire routine, cleanup other futures and return result
if f is is_finished:
if not event.done():
event.set_result(None)
raise tornado.gen.Return(True)
event = f.result()
except TimeoutException:
if not minion_running:
if not is_finished.done():
is_finished.set_result(True)
raise tornado.gen.Return(True)
else:
ping_pub_data = yield self.saltclients['local'](tgt,

129
salt/pillar/netbox.py Normal file
View File

@ -0,0 +1,129 @@
# -*- coding: utf-8 -*-
'''
A module that adds data to the Pillar structure from a NetBox API.
Configuring the NetBox ext_pillar
====================================
.. code-block:: yaml
ext_pillar:
- netbox:
api_url: http://netbox_url.com/api/
The following are optional, and determine whether or not the module will
attempt to configure the ``proxy`` pillar data for use with the napalm
proxy-minion:
.. code-block:: yaml
proxy_return: True
proxy_username: admin
api_token: 123abc
Create a token in your NetBox instance at
http://netbox_url.com/user/api-tokens/
By default, this module will query the NetBox API for the platform associated
with the device, and use the 'NAPALM driver' field to set the napalm
proxy-minion driver. (Currently only 'napalm' is supported for drivertype.)
This module assumes you will use SSH keys to authenticate to the network device
If password authentication is desired, it is recommended to create another
``proxy`` key in pillar_roots (or git_pillar) with just the ``passwd`` key and
use :py:func:`salt.renderers.gpg <salt.renderers.gpg>` to encrypt the value.
If any additional options for the proxy setup are needed they should also be
configured in pillar_roots.
'''
from __future__ import absolute_import, print_function, unicode_literals
import logging
try:
import requests
import ipaddress
_HAS_DEPENDENCIES = True
except ImportError:
_HAS_DEPENDENCIES = False
log = logging.getLogger(__name__)
def __virtual__():
return _HAS_DEPENDENCIES
def ext_pillar(minion_id, pillar, *args, **kwargs):
'''
Query NetBox API for minion data
'''
# Pull settings from kwargs
api_url = kwargs['api_url'].rstrip('/')
api_token = kwargs.get('api_token', None)
proxy_username = kwargs.get('proxy_username', None)
proxy_return = kwargs.get('proxy_return', True)
ret = {}
headers = {}
if api_token:
headers['Authorization'] = 'Token ' + api_token
# Fetch device from API
device_results = requests.get(
api_url + '/dcim/devices/',
params={'name': minion_id, },
headers=headers,
)
# Check status code for API call
if device_results.status_code != requests.codes.ok:
log.warn('API query failed for "%s", status code: %d',
minion_id, device_results.status_code)
# Assign results from API call to "netbox" key
try:
devices = device_results.json()['results']
if len(devices) == 1:
ret['netbox'] = devices[0]
elif len(devices) > 1:
log.error('More than one device found for "%s"', minion_id)
except Exception:
log.error('Device not found for "%s"', minion_id)
if proxy_return:
# Attempt to add "proxy" key, based on platform API call
try:
# Fetch device from API
platform_results = requests.get(
ret['netbox']['platform']['url'],
headers=headers,
)
# Check status code for API call
if platform_results.status_code != requests.codes.ok:
log.info('API query failed for "%s", status code: %d',
minion_id, platform_results.status_code)
# Assign results from API call to "proxy" key if the platform has a
# napalm_driver defined.
napalm_driver = platform_results.json().get('napalm_driver')
if napalm_driver:
ret['proxy'] = {
'host': str(ipaddress.IPv4Interface(
ret['netbox']['primary_ip4']['address']).ip),
'driver': napalm_driver,
'proxytype': 'napalm',
}
if proxy_username:
ret['proxy']['username'] = proxy_username
except Exception:
log.debug(
'Could not create proxy config data for "%s"', minion_id)
return ret

View File

@ -58,8 +58,9 @@ The ``it-admins`` configuration below returns the Pillar ``it-admins`` by:
- filtering for:
- members of the group ``it-admins``
- objects with ``objectclass=user``
- returning the data of users (``mode: map``), where each user is a dictionary
containing the configured string or list attributes.
- returning the data of users (``mode: map``) as a list of dictionaries, where
each user is a dictionary containing the configured string or list attributes,
and the user dictionaries are combined to a list.
**Configuration:**
@ -106,6 +107,118 @@ The ``it-admins`` configuration below returns the Pillar ``it-admins`` by:
- cn=team02,ou=groups,dc=company
Dict Mode
---------
The ``it-admins`` configuration below returns the Pillar ``it-admins`` by:
- filtering for:
- members of the group ``it-admins``
- objects with ``objectclass=user``
- returning the data of users (``mode: dict``), where each user is a dictionary
containing the configured string or list attributes, and the user dictionaries
are combined to a dictionary using the value of the LDAP attribute defined in the
``dict_key_attr`` configuration option (defaults to ``dn`` or ``distinguishedName``)
as the key.
**Configuration:**
.. code-block:: yaml
salt-users:
server: ldap.company.tld
port: 389
tls: true
dn: 'dc=company,dc=tld'
binddn: 'cn=salt-pillars,ou=users,dc=company,dc=tld'
bindpw: bi7ieBai5Ano
referrals: false
anonymous: false
mode: dict
dn: 'ou=users,dc=company,dc=tld'
filter: '(&(memberof=cn=it-admins,ou=groups,dc=company,dc=tld)(objectclass=user))'
attrs:
- cn
- displayName
- givenName
- sn
lists:
- memberOf
**Result:**
.. code-block:: yaml
salt-users:
cn=johndoe,ou=users,dc=company,dc=tld:
- cn: cn=johndoe,ou=users,dc=company,dc=tld
displayName: John Doe
givenName: John
sn: Doe
memberOf:
- cn=it-admins,ou=groups,dc=company,dc=tld
- cn=team01,ou=groups,dc=company
cn=janedoe,ou=users,dc=company,dc=tld:
- cn: cn=janedoe,ou=users,dc=company,dc=tld
displayName: Jane Doe
givenName: Jane
sn: Doe
memberOf:
- cn=it-admins,ou=groups,dc=company,dc=tld
- cn=team02,ou=groups,dc=company
**Configuration:**
.. code-block:: yaml
salt-users:
server: ldap.company.tld
port: 389
tls: true
dn: 'dc=company,dc=tld'
binddn: 'cn=salt-pillars,ou=users,dc=company,dc=tld'
bindpw: bi7ieBai5Ano
referrals: false
anonymous: false
mode: dict
dict_key_attr: displayName
dn: 'ou=users,dc=company,dc=tld'
filter: '(&(memberof=cn=it-admins,ou=groups,dc=company,dc=tld)(objectclass=user))'
attrs:
- dn
- cn
- givenName
- sn
lists:
- memberOf
**Result:**
.. code-block:: yaml
salt-users:
John Doe:
- dn: cn=johndoe,ou=users,dc=company,dc=tld
cn: cn=johndoe,ou=users,dc=company,dc=tld
givenName: John
sn: Doe
memberOf:
- cn=it-admins,ou=groups,dc=company,dc=tld
- cn=team01,ou=groups,dc=company
Jane Doe:
- dn: cn=janedoe,ou=users,dc=company,dc=tld
cn: cn=janedoe,ou=users,dc=company,dc=tld
givenName: Jane
sn: Doe
memberOf:
- cn=it-admins,ou=groups,dc=company,dc=tld
- cn=team02,ou=groups,dc=company
List Mode
---------
@ -193,6 +306,7 @@ def _result_to_dict(data, result, conf, source):
'''
attrs = _config('attrs', conf) or []
lists = _config('lists', conf) or []
dict_key_attr = _config('dict_key_attr', conf) or 'dn'
# TODO:
# deprecate the default 'mode: split' and make the more
# straightforward 'mode: dict' the new default
@ -213,6 +327,30 @@ def _result_to_dict(data, result, conf, source):
if key in lists:
ret[key] = record.get(key)
data[source].append(ret)
elif mode == 'dict':
data[source] = {}
for record in result:
ret = {}
distinguished_name = record[0]
log.debug('dn: %s', distinguished_name)
if 'dn' in attrs or 'distinguishedName' in attrs:
ret['dn'] = distinguished_name
record = record[1]
log.debug('record: %s', record)
for key in record:
if key in attrs:
for item in record.get(key):
ret[key] = item
if key in lists:
ret[key] = record.get(key)
if dict_key_attr in ['dn', 'distinguishedName']:
dict_key = distinguished_name
else:
dict_key = ','.join(sorted(record.get(dict_key_attr, [])))
try:
data[source][dict_key].append(ret)
except KeyError:
data[source][dict_key] = [ret]
elif mode == 'split':
for key in result[0][1]:
if key in attrs:
@ -250,7 +388,8 @@ def _do_search(conf):
scope = _config('scope', conf)
_lists = _config('lists', conf) or []
_attrs = _config('attrs', conf) or []
attrs = _lists + _attrs
_dict_key_attr = _config('dict_key_attr', conf) or 'dn'
attrs = _lists + _attrs + [_dict_key_attr]
if not attrs:
attrs = None
# Perform the search

View File

@ -52,9 +52,6 @@ Multiple Vault sources may also be used:
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt libs
import salt.utils.versions
log = logging.getLogger(__name__)
__func_alias__ = {
@ -77,12 +74,6 @@ def ext_pillar(minion_id, # pylint: disable=W0613
'''
comps = conf.split()
if not comps[0].startswith('path='):
salt.utils.versions.warn_until(
'Fluorine',
'The \'profile\' argument has been deprecated. Any parts up until '
'and following the first "path=" are discarded'
)
paths = [comp for comp in comps if comp.startswith('path=')]
if not paths:
log.error('"%s" is not a valid Vault ext_pillar config', conf)

View File

@ -8,9 +8,6 @@ This is often useful if you wish to store your pillars in source control or
share your pillar data with others that you trust. I don't advise making your pillars public
regardless if they are encrypted or not.
When generating keys and encrypting passwords use --local when using salt-call for extra
security. Also consider using just the salt runner nacl when encrypting pillar passwords.
:configuration: The following configuration defaults can be
define (pillar or config files) Avoid storing private keys in pillars! Ensure master does not have `pillar_opts=True`:
@ -30,7 +27,7 @@ security. Also consider using just the salt runner nacl when encrypting pillar p
.. code-block:: bash
salt-call nacl.enc sk_file=/etc/salt/pki/master/nacl pk_file=/etc/salt/pki/master/nacl.pub
salt-run nacl.enc sk_file=/etc/salt/pki/master/nacl pk_file=/etc/salt/pki/master/nacl.pub
The nacl lib uses 32byte keys, these keys are base64 encoded to make your life more simple.
@ -38,9 +35,9 @@ To generate your `sk_file` and `pk_file` use:
.. code-block:: bash
salt-call --local nacl.keygen sk_file=/etc/salt/pki/master/nacl
salt-run nacl.keygen sk_file=/etc/salt/pki/master/nacl
# or if you want to work without files.
salt-call --local nacl.keygen
salt-run nacl.keygen
local:
----------
pk:
@ -59,14 +56,14 @@ Sealedbox only has one key that is for both encryption and decryption.
.. code-block:: bash
salt-call --local nacl.enc asecretpass pk=/kfGX7PbWeu099702PBbKWLpG/9p06IQRswkdWHCDk0=
salt-run nacl.enc asecretpass pk=/kfGX7PbWeu099702PBbKWLpG/9p06IQRswkdWHCDk0=
tqXzeIJnTAM9Xf0mdLcpEdklMbfBGPj2oTKmlgrm3S1DTVVHNnh9h8mU1GKllGq/+cYsk6m5WhGdk58=
To decrypt the data:
.. code-block:: bash
salt-call --local nacl.dec data='tqXzeIJnTAM9Xf0mdLcpEdklMbfBGPj2oTKmlgrm3S1DTVVHNnh9h8mU1GKllGq/+cYsk6m5WhGdk58=' \
salt-run nacl.dec data='tqXzeIJnTAM9Xf0mdLcpEdklMbfBGPj2oTKmlgrm3S1DTVVHNnh9h8mU1GKllGq/+cYsk6m5WhGdk58=' \
sk='SVWut5SqNpuPeNzb1b9y6b2eXg2PLIog43GBzp48Sow='
When the keys are defined in the master config you can use them from the nacl runner
@ -94,7 +91,7 @@ The developer can then use a less-secure system to encrypt data.
.. code-block:: bash
salt-call --local nacl.enc apassword
salt-run nacl.enc apassword
Pillar files can include protected data that the salt master decrypts:
@ -111,42 +108,7 @@ Larger files like certificates can be encrypted with:
.. code-block:: bash
salt-call nacl.enc_file /tmp/cert.crt out=/tmp/cert.nacl
# or more advanced
cert=$(cat /tmp/cert.crt)
salt-call --out=newline_values_only nacl.enc_pub data="$cert" > /tmp/cert.nacl
In pillars rended with jinja be sure to include `|json` so line breaks are encoded:
.. code-block:: jinja
cert: "{{salt.nacl.dec('S2uogToXkgENz9...085KYt')|json}}"
In states rendered with jinja it is also good pratice to include `|json`:
.. code-block:: jinja
{{sls}} private key:
file.managed:
- name: /etc/ssl/private/cert.key
- mode: 700
- contents: "{{pillar['pillarexample']['cert_key']|json}}"
Optional small program to encrypt data without needing salt modules.
.. code-block:: python
#!/bin/python3
import sys, base64, libnacl.sealed
pk = base64.b64decode('YOURPUBKEY')
b = libnacl.sealed.SealedBox(pk)
data = sys.stdin.buffer.read()
print(base64.b64encode(b.encrypt(data)).decode())
.. code-block:: bash
echo 'apassword' | nacl_enc.py
salt-run nacl.enc_file /tmp/cert.crt out=/tmp/cert.nacl
'''
@ -158,6 +120,7 @@ import os
# Import Salt libs
import salt.utils.files
import salt.utils.platform
import salt.utils.stringutils
import salt.utils.win_functions
import salt.utils.win_dacl
import salt.syspaths
@ -186,9 +149,9 @@ def _get_config(**kwargs):
config = {
'box_type': 'sealedbox',
'sk': None,
'sk_file': '/etc/salt/pki/master/nacl',
'sk_file': os.path.join(__opts__['pki_dir'], 'nacl'),
'pk': None,
'pk_file': '/etc/salt/pki/master/nacl.pub',
'pk_file': os.path.join(__opts__['pki_dir'], 'nacl.pub'),
}
config_key = '{0}.config'.format(__virtualname__)
try:
@ -233,7 +196,7 @@ def _get_pk(**kwargs):
return base64.b64decode(pubkey)
def keygen(sk_file=None, pk_file=None):
def keygen(sk_file=None, pk_file=None, **kwargs):
'''
Use libnacl to generate a keypair.
@ -248,11 +211,20 @@ def keygen(sk_file=None, pk_file=None):
.. code-block:: bash
salt-call nacl.keygen
salt-call nacl.keygen sk_file=/etc/salt/pki/master/nacl
salt-call nacl.keygen sk_file=/etc/salt/pki/master/nacl pk_file=/etc/salt/pki/master/nacl.pub
salt-call --local nacl.keygen
salt-run nacl.keygen
salt-run nacl.keygen sk_file=/etc/salt/pki/master/nacl
salt-run nacl.keygen sk_file=/etc/salt/pki/master/nacl pk_file=/etc/salt/pki/master/nacl.pub
salt-run nacl.keygen
'''
if 'keyfile' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The \'keyfile\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk_file\' argument instead.'
)
sk_file = kwargs['keyfile']
if sk_file is None:
kp = libnacl.public.SecretKey()
return {'sk': base64.b64encode(kp.sk), 'pk': base64.b64encode(kp.pk)}
@ -313,6 +285,26 @@ def enc(data, **kwargs):
box_type: secretbox, sealedbox(default)
'''
if 'keyfile' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The \'keyfile\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk_file\' argument instead.'
)
kwargs['sk_file'] = kwargs['keyfile']
if 'key' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The \'key\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk\' argument instead.'
)
kwargs['sk'] = kwargs['key']
# ensure data is bytes
data = salt.utils.stringutils.to_bytes(data)
box_type = _get_config(**kwargs)['box_type']
if box_type == 'sealedbox':
return sealedbox_encrypt(data, **kwargs)
@ -334,7 +326,6 @@ def enc_file(name, out=None, **kwargs):
.. code-block:: bash
salt-run nacl.enc_file name=/tmp/id_rsa
salt-call nacl.enc_file name=salt://crt/mycert out=/tmp/cert
salt-run nacl.enc_file name=/tmp/id_rsa box_type=secretbox \
sk_file=/etc/salt/pki/master/nacl.pub
'''
@ -360,6 +351,31 @@ def dec(data, **kwargs):
box_type: secretbox, sealedbox(default)
'''
if 'keyfile' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The \'keyfile\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk_file\' argument instead.'
)
kwargs['sk_file'] = kwargs['keyfile']
# set boxtype to `secretbox` to maintain backward compatibility
kwargs['box_type'] = 'secretbox'
if 'key' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The \'key\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk\' argument instead.'
)
kwargs['sk'] = kwargs['key']
# set boxtype to `secretbox` to maintain backward compatibility
kwargs['box_type'] = 'secretbox'
# ensure data is bytes
data = salt.utils.stringutils.to_bytes(data)
box_type = _get_config(**kwargs)['box_type']
if box_type == 'sealedbox':
return sealedbox_decrypt(data, **kwargs)
@ -381,7 +397,6 @@ def dec_file(name, out=None, **kwargs):
.. code-block:: bash
salt-run nacl.dec_file name=/tmp/id_rsa.nacl
salt-call nacl.dec_file name=salt://crt/mycert.nacl out=/tmp/id_rsa
salt-run nacl.dec_file name=/tmp/id_rsa.nacl box_type=secretbox \
sk_file=/etc/salt/pki/master/nacl.pub
'''
@ -411,9 +426,10 @@ def sealedbox_encrypt(data, **kwargs):
.. code-block:: bash
salt-run nacl.sealedbox_encrypt datatoenc
salt-call --local nacl.sealedbox_encrypt datatoenc pk_file=/etc/salt/pki/master/nacl.pub
salt-call --local nacl.sealedbox_encrypt datatoenc pk='vrwQF7cNiNAVQVAiS3bvcbJUnF0cN6fU9YTZD9mBfzQ='
'''
# ensure data is bytes
data = salt.utils.stringutils.to_bytes(data)
pk = _get_pk(**kwargs)
b = libnacl.sealed.SealedBox(pk)
return base64.b64encode(b.encrypt(data))
@ -427,12 +443,16 @@ def sealedbox_decrypt(data, **kwargs):
.. code-block:: bash
salt-call nacl.sealedbox_decrypt pEXHQM6cuaF7A=
salt-call --local nacl.sealedbox_decrypt data='pEXHQM6cuaF7A=' sk_file=/etc/salt/pki/master/nacl
salt-call --local nacl.sealedbox_decrypt data='pEXHQM6cuaF7A=' sk='YmFkcGFzcwo='
salt-run nacl.sealedbox_decrypt pEXHQM6cuaF7A=
salt-run nacl.sealedbox_decrypt data='pEXHQM6cuaF7A=' sk_file=/etc/salt/pki/master/nacl
salt-run nacl.sealedbox_decrypt data='pEXHQM6cuaF7A=' sk='YmFkcGFzcwo='
'''
if data is None:
return None
# ensure data is bytes
data = salt.utils.stringutils.to_bytes(data)
sk = _get_sk(**kwargs)
keypair = libnacl.public.SecretKey(sk)
b = libnacl.sealed.SealedBox(keypair)
@ -449,9 +469,12 @@ def secretbox_encrypt(data, **kwargs):
.. code-block:: bash
salt-run nacl.secretbox_encrypt datatoenc
salt-call --local nacl.secretbox_encrypt datatoenc sk_file=/etc/salt/pki/master/nacl
salt-call --local nacl.secretbox_encrypt datatoenc sk='YmFkcGFzcwo='
salt-run nacl.secretbox_encrypt datatoenc sk_file=/etc/salt/pki/master/nacl
salt-run nacl.secretbox_encrypt datatoenc sk='YmFkcGFzcwo='
'''
# ensure data is bytes
data = salt.utils.stringutils.to_bytes(data)
sk = _get_sk(**kwargs)
b = libnacl.secret.SecretBox(sk)
return base64.b64encode(b.encrypt(data))
@ -466,12 +489,16 @@ def secretbox_decrypt(data, **kwargs):
.. code-block:: bash
salt-call nacl.secretbox_decrypt pEXHQM6cuaF7A=
salt-call --local nacl.secretbox_decrypt data='pEXHQM6cuaF7A=' sk_file=/etc/salt/pki/master/nacl
salt-call --local nacl.secretbox_decrypt data='pEXHQM6cuaF7A=' sk='YmFkcGFzcwo='
salt-run nacl.secretbox_decrypt pEXHQM6cuaF7A=
salt-run nacl.secretbox_decrypt data='pEXHQM6cuaF7A=' sk_file=/etc/salt/pki/master/nacl
salt-run nacl.secretbox_decrypt data='pEXHQM6cuaF7A=' sk='YmFkcGFzcwo='
'''
if data is None:
return None
# ensure data is bytes
data = salt.utils.stringutils.to_bytes(data)
key = _get_sk(**kwargs)
b = libnacl.secret.SecretBox(key=key)
return b.decrypt(base64.b64decode(data))

View File

@ -27,7 +27,7 @@ Once configured you can access data using a URL such as:
.. code-block:: yaml
password: sdb://myvault/secret/passwords?mypassword
password: sdb://myvault/secret/passwords/mypassword
In this URL, ``myvault`` refers to the configuration profile,
``secret/passwords`` is the path where the data resides, and ``mypassword`` is
@ -56,9 +56,17 @@ def set_(key, value, profile=None):
'''
Set a key/value pair in the vault service
'''
comps = key.split('?')
path = comps[0]
key = comps[1]
if '?' in key:
__utils__['versions.warn_until'](
'Neon',
(
'Using ? to seperate between the path and key for vault has been deprecated '
'and will be removed in {version}. Please just use a /.'
),
)
path, key = key.split('?')
else:
path, key = key.rsplit('/', 1)
try:
url = 'v1/{0}'.format(path)
@ -81,9 +89,17 @@ def get(key, profile=None):
'''
Get a value from the vault service
'''
comps = key.split('?')
path = comps[0]
key = comps[1]
if '?' in key:
__utils__['versions.warn_until'](
'Neon',
(
'Using ? to seperate between the path and key for vault has been deprecated '
'and will be removed in {version}. Please just use a /.'
),
)
path, key = key.split('?')
else:
path, key = key.rsplit('/', 1)
try:
url = 'v1/{0}'.format(path)

View File

@ -720,8 +720,12 @@ class State(object):
except AttributeError:
pillar_enc = six.text_type(pillar_enc).lower()
self._pillar_enc = pillar_enc
if initial_pillar:
if initial_pillar and not self._pillar_override:
self.opts['pillar'] = initial_pillar
else:
# Compile pillar data
self.opts['pillar'] = self._gather_pillar()
# Reapply overrides on top of compiled pillar
if self._pillar_override:
self.opts['pillar'] = salt.utils.dictupdate.merge(
self.opts['pillar'],
@ -729,8 +733,6 @@ class State(object):
self.opts.get('pillar_source_merging_strategy', 'smart'),
self.opts.get('renderer', 'yaml'),
self.opts.get('pillar_merge_lists', False))
else:
self.opts['pillar'] = self._gather_pillar()
self.state_con = context or {}
self.load_modules()
self.active = set()
@ -3919,7 +3921,7 @@ class BaseHighState(object):
return err
if not high:
return ret
cumask = os.umask(0o77)
with salt.utils.files.set_umask(0o077):
try:
if salt.utils.platform.is_windows():
# Make sure cache file isn't read-only
@ -3936,7 +3938,6 @@ class BaseHighState(object):
except (IOError, OSError):
log.error('Unable to write to "state.highstate" cache file %s', cfn)
os.umask(cumask)
return self.state.call_high(high, orchestration_jid)
def compile_highstate(self):

View File

@ -487,9 +487,10 @@ def present(
iargs = {'ami_name': image_name, 'region': region, 'key': key,
'keyid': keyid, 'profile': profile}
image_ids = __salt__['boto_ec2.find_images'](**iargs)
if len(image_ids):
if image_ids: # find_images() returns False on failure
launch_config[index]['image_id'] = image_ids[0]
else:
log.warning("Couldn't find AMI named `%s`, passing literally.", image_name)
launch_config[index]['image_id'] = image_name
del launch_config[index]['image_name']
break

View File

@ -199,8 +199,7 @@ executed when the state it is watching changes. Example:
``cmd.wait`` itself does not do anything; all functionality is inside its ``mod_watch``
function, which is called by ``watch`` on changes.
``cmd.wait`` will be deprecated in future due to the confusion it causes. The
preferred format is using the :ref:`onchanges Requisite <requisites-onchanges>`, which
The preferred format is using the :ref:`onchanges Requisite <requisites-onchanges>`, which
works on ``cmd.run`` as well as on any other state. The example would then look as follows:
.. code-block:: yaml

View File

@ -62,7 +62,8 @@ def installed(name,
no_dev=None,
quiet=False,
composer_home='/root',
always_check=True):
always_check=True,
env=None):
'''
Verify that the correct versions of composer dependencies are present.
@ -111,6 +112,9 @@ def installed(name,
If ``True``, *always* run ``composer install`` in the directory. This is the
default behavior. If ``False``, only run ``composer install`` if there is no
vendor directory present.
env
A list of environment variables to be set prior to execution.
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
@ -153,7 +157,8 @@ def installed(name,
optimize=optimize,
no_dev=no_dev,
quiet=quiet,
composer_home=composer_home
composer_home=composer_home,
env=env
)
except (SaltException) as err:
ret['result'] = False
@ -188,7 +193,8 @@ def update(name,
optimize=None,
no_dev=None,
quiet=False,
composer_home='/root'):
composer_home='/root',
env=None):
'''
Composer update the directory to ensure we have the latest versions
of all project dependencies.
@ -233,6 +239,9 @@ def update(name,
composer_home
``$COMPOSER_HOME`` environment variable
env
A list of environment variables to be set prior to execution.
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
@ -267,7 +276,8 @@ def update(name,
optimize=optimize,
no_dev=no_dev,
quiet=quiet,
composer_home=composer_home
composer_home=composer_home,
env=env
)
except (SaltException) as err:
ret['result'] = False

View File

@ -1,278 +0,0 @@
# -*- coding: utf-8 -*-
'''
States to manage Docker containers, images, volumes, and networks
.. versionchanged:: 2017.7.0
The legacy Docker state and execution module have been removed, and the
new modules (formerly called ``dockerng`` have taken their places).
.. important::
As of the 2017.7.0 release, the states in this module have been separated
into the following four state modules:
- :mod:`docker_container <salt.states.docker_container>` - States to manage
Docker containers
- :mod:`docker_image <salt.states.docker_image>` - States to manage Docker
images
- :mod:`docker_volume <salt.states.docker_volume>` - States to manage
Docker volumes
- :mod:`docker_network <salt.states.docker_network>` - States to manage
Docker networks
The reason for this change was to make states and requisites more clear.
For example, imagine this SLS:
.. code-block:: yaml
myuser/appimage:
docker.image_present:
- sls: docker.images.appimage
myapp:
docker.running:
- image: myuser/appimage
- require:
- docker: myuser/appimage
The new syntax would be:
.. code-block:: yaml
myuser/appimage:
docker_image.present:
- sls: docker.images.appimage
myapp:
docker_container.running:
- image: myuser/appimage
- require:
- docker_image: myuser/appimage
This is similar to how Salt handles MySQL, MongoDB, Zabbix, and other cases
where the same execution module is used to manage several different kinds
of objects (users, databases, roles, etc.).
The old syntax will continue to work until the **Fluorine** release of
Salt.
'''
from __future__ import absolute_import, print_function, unicode_literals
import copy
import logging
# Import salt libs
import salt.utils.args
import salt.utils.versions
# Enable proper logging
log = logging.getLogger(__name__) # pylint: disable=invalid-name
# Define the module's virtual name
__virtualname__ = 'docker'
__virtual_aliases__ = ('dockerng', 'moby')
def __virtual__():
'''
Only load if the docker execution module is available
'''
if 'docker.version' in __salt__:
return __virtualname__
return (False, __salt__.missing_fun_string('docker.version'))
def running(name, **kwargs):
'''
.. deprecated:: 2017.7.0
This state has been moved to :py:func:`docker_container.running
<salt.states.docker_container.running>`.
'''
ret = __states__['docker_container.running'](
name,
**salt.utils.args.clean_kwargs(**kwargs)
)
msg = (
'The docker.running state has been renamed to '
'docker_container.running. To get rid of this warning, update your '
'SLS to use the new name.'
)
salt.utils.versions.warn_until('Fluorine', msg)
ret.setdefault('warnings', []).append(msg)
return ret
def stopped(**kwargs):
'''
.. deprecated:: 2017.7.0
This state has been moved to :py:func:`docker_container.stopped
<salt.states.docker_container.stopped>`.
'''
ret = __states__['docker_container.stopped'](
**salt.utils.args.clean_kwargs(**kwargs)
)
msg = (
'The docker.stopped state has been renamed to '
'docker_container.stopped. To get rid of this warning, update your '
'SLS to use the new name.'
)
salt.utils.versions.warn_until('Fluorine', msg)
ret.setdefault('warnings', []).append(msg)
return ret
def absent(name, **kwargs):
'''
.. deprecated:: 2017.7.0
This state has been moved to :py:func:`docker_container.absent
<salt.states.docker_container.absent>`.
'''
ret = __states__['docker_container.absent'](
name,
**salt.utils.args.clean_kwargs(**kwargs)
)
msg = (
'The docker.absent state has been renamed to '
'docker_container.absent. To get rid of this warning, update your '
'SLS to use the new name.'
)
salt.utils.versions.warn_until('Fluorine', msg)
ret.setdefault('warnings', []).append(msg)
return ret
def network_present(name, **kwargs):
'''
.. deprecated:: 2017.7.0
This state has been moved to :py:func:`docker_network.present
<salt.states.docker_network.present>`.
'''
ret = __states__['docker_network.present'](
name,
**salt.utils.args.clean_kwargs(**kwargs)
)
msg = (
'The docker.network_present state has been renamed to '
'docker_network.present. To get rid of this warning, update your SLS '
'to use the new name.'
)
salt.utils.versions.warn_until('Fluorine', msg)
ret.setdefault('warnings', []).append(msg)
return ret
def network_absent(name, **kwargs):
'''
.. deprecated:: 2017.7.0
This state has been moved to :py:func:`docker_network.absent
<salt.states.docker_network.absent>`.
'''
ret = __states__['docker_network.absent'](
name,
**salt.utils.args.clean_kwargs(**kwargs)
)
msg = (
'The docker.network_absent state has been renamed to '
'docker_network.absent. To get rid of this warning, update your SLS '
'to use the new name.'
)
salt.utils.versions.warn_until('Fluorine', msg)
ret.setdefault('warnings', []).append(msg)
return ret
def image_present(name, **kwargs):
'''
.. deprecated:: 2017.7.0
This state has been moved to :py:func:`docker_image.present
<salt.states.docker_image.present>`.
'''
ret = __states__['docker_image.present'](
name,
**salt.utils.args.clean_kwargs(**kwargs)
)
msg = (
'The docker.image_present state has been renamed to '
'docker_image.present. To get rid of this warning, update your SLS '
'to use the new name.'
)
salt.utils.versions.warn_until('Fluorine', msg)
ret.setdefault('warnings', []).append(msg)
return ret
def image_absent(**kwargs):
'''
.. deprecated:: 2017.7.0
This state has been moved to :py:func:`docker_image.absent
<salt.states.docker_image.absent>`.
'''
ret = __states__['docker_image.absent'](
**salt.utils.args.clean_kwargs(**kwargs)
)
msg = (
'The docker.image_absent state has been renamed to '
'docker_image.absent. To get rid of this warning, update your SLS to '
'use the new name.'
)
salt.utils.versions.warn_until('Fluorine', msg)
ret.setdefault('warnings', []).append(msg)
return ret
def volume_present(name, driver=None, driver_opts=None, force=False):
'''
.. deprecated:: 2017.7.0
This state has been moved to :py:func:`docker_volume.present
<salt.states.docker_volume.present>`.
'''
ret = __states__['docker_volume.present'](name,
driver=driver,
driver_opts=driver_opts,
force=force)
msg = (
'The docker.volume_present state has been renamed to '
'docker_volume.present. To get rid of this warning, update your SLS '
'to use the new name.'
)
salt.utils.versions.warn_until('Fluorine', msg)
ret.setdefault('warnings', []).append(msg)
return ret
def volume_absent(name, driver=None):
'''
.. deprecated:: 2017.7.0
This state has been moved to :py:func:`docker_volume.absent
<salt.states.docker_volume.absent>`.
'''
ret = __states__['docker_volume.absent'](name, driver=driver)
msg = (
'The docker.volume_absent state has been renamed to '
'docker_volume.absent. To get rid of this warning, update your SLS '
'to use the new name.'
)
salt.utils.versions.warn_until('Fluorine', msg)
ret.setdefault('warnings', []).append(msg)
return ret
# Handle requisites
def mod_watch(name, sfun=None, **kwargs):
if sfun == 'running':
watch_kwargs = copy.deepcopy(kwargs)
if watch_kwargs.get('watch_action', 'force') == 'force':
watch_kwargs['force'] = True
else:
watch_kwargs['send_signal'] = True
watch_kwargs['force'] = False
return running(name, **watch_kwargs)
if sfun == 'image_present':
# Force image to be updated
kwargs['force'] = True
return image_present(name, **kwargs)
return {'name': name,
'changes': {},
'result': False,
'comment': ('watch requisite is not'
' implemented for {0}'.format(sfun))}

View File

@ -890,7 +890,7 @@ def present(name,
return ret
def absent(name, driver=None):
def absent(name):
'''
Ensure that a network is absent.
@ -909,12 +909,6 @@ def absent(name, driver=None):
'result': False,
'comment': ''}
if driver is not None:
ret.setdefault('warnings', []).append(
'The \'driver\' argument has no function and will be removed in '
'the Fluorine release.'
)
try:
network = __salt__['docker.inspect_network'](name)
except CommandExecutionError as exc:

View File

@ -2177,10 +2177,6 @@ def detached(name,
If a branch or tag is specified it will be resolved to a commit ID
and checked out.
ref
.. deprecated:: 2017.7.0
Use ``rev`` instead.
target
Name of the target directory where repository is about to be cloned.
@ -2271,7 +2267,6 @@ def detached(name,
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
ref = kwargs.pop('ref', None)
kwargs = salt.utils.args.clean_kwargs(**kwargs)
if kwargs:
return _fail(
@ -2279,15 +2274,6 @@ def detached(name,
salt.utils.args.invalid_kwargs(kwargs, raise_exc=False)
)
if ref is not None:
rev = ref
deprecation_msg = (
'The \'ref\' argument has been renamed to \'rev\' for '
'consistency. Please update your SLS to reflect this.'
)
ret.setdefault('warnings', []).append(deprecation_msg)
salt.utils.versions.warn_until('Fluorine', deprecation_msg)
if not rev:
return _fail(
ret,

View File

@ -63,7 +63,7 @@ def send_message(name,
- api_url: https://hipchat.myteam.com
- api_key: peWcBiMOS9HrZG15peWcBiMOS9HrZG15
- api_version: v1
- color: green
- message_color: green
- notify: True
The following parameters are required:
@ -96,7 +96,7 @@ def send_message(name,
The api version for Hipchat to use,
if not specified in the configuration options of master or minion.
color
message_color
The color the Hipchat message should be displayed in. One of the following, default: yellow
"yellow", "red", "green", "purple", "gray", or "random".

View File

@ -1,151 +0,0 @@
# -*- coding: utf-8 -*-
'''
Manage Kubernetes
.. versionadded:: 2016.3.0
.. code-block:: yaml
kube_label_1:
k8s.label_present:
- name: mylabel
- value: myvalue
- node: myothernodename
- apiserver: http://mykubeapiserer:8080
kube_label_2:
k8s.label_absent:
- name: mylabel
- node: myothernodename
- apiserver: http://mykubeapiserer:8080
kube_label_3:
k8s.label_folder_present:
- name: mylabel
- node: myothernodename
- apiserver: http://mykubeapiserer:8080
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import salt libs
import salt.utils.versions
__virtualname__ = 'k8s'
def __virtual__():
'''Load only if kubernetes module is available.'''
if 'k8s.get_labels' not in __salt__:
return False
return True
def label_present(
name,
value,
node=None,
apiserver=None):
'''
.. deprecated:: 2017.7.0
This state has been moved to :py:func:`kubernetes.node_label_present
<salt.states.kubernetes.node_label_present`.
Ensure the label exists on the kube node.
name
Name of the label.
value
Value of the label.
node
Override node ID.
apiserver
K8S apiserver URL.
'''
# Use salt k8s module to set label
ret = __salt__['k8s.label_present'](name, value, node, apiserver)
msg = (
'The k8s.label_present state has been replaced by '
'kubernetes.node_label_present. Update your SLS to use the new '
'function name to get rid of this warning.'
)
salt.utils.versions.warn_until('Fluorine', msg)
ret.setdefault('warnings', []).append(msg)
return ret
def label_absent(
name,
node=None,
apiserver=None):
'''
.. deprecated:: 2017.7.0
This state has been moved to :py:func:`kubernetes.node_label_absent
<salt.states.kubernetes.node_label_absent`.
Ensure the label doesn't exist on the kube node.
name
Name of the label.
node
Override node ID.
apiserver
K8S apiserver URL.
'''
# Use salt k8s module to set label
ret = __salt__['k8s.label_absent'](name, node, apiserver)
msg = (
'The k8s.label_absent state has been replaced by '
'kubernetes.node_label_absent. Update your SLS to use the new '
'function name to get rid of this warning.'
)
salt.utils.versions.warn_until('Fluorine', msg)
ret.setdefault('warnings', []).append(msg)
return ret
def label_folder_absent(
name,
node=None,
apiserver=None):
'''
.. deprecated:: 2017.7.0
This state has been moved to :py:func:`kubernetes.node_label_folder_absent
<salt.states.kubernetes.node_label_folder_absent`.
Ensure the label folder doesn't exist on the kube node.
name
Name of the label folder.
node
Override node ID.
apiserver
K8S apiserver URL.
'''
# Use salt k8s module to set label
ret = __salt__['k8s.folder_absent'](name, node, apiserver)
msg = (
'The k8s.label_folder_absent state has been replaced by '
'kubernetes.node_label_folder_absent. Update your SLS to use the new '
'function name to get rid of this warning.'
)
salt.utils.versions.warn_until('Fluorine', msg)
ret.setdefault('warnings', []).append(msg)
return ret

View File

@ -341,10 +341,12 @@ def mounted(name,
mount_invisible_keys = [
'actimeo',
'comment',
'credentials',
'direct-io-mode',
'password',
'retry',
'port',
'retry',
'secretfile',
]
if extra_mount_invisible_keys:

View File

@ -180,8 +180,8 @@ def _check_pkg_version_format(pkg):
def _check_if_installed(prefix, state_pkg_name, version_spec,
ignore_installed, force_reinstall,
upgrade, user, cwd, bin_env, index_url):
upgrade, user, cwd, bin_env, env_vars,
index_url):
# result: None means the command failed to run
# result: True means the package is installed
# result: False means the package is not installed
@ -190,7 +190,8 @@ def _check_if_installed(prefix, state_pkg_name, version_spec,
# Check if the requested package is already installed.
try:
pip_list = __salt__['pip.list'](prefix, bin_env=bin_env,
user=user, cwd=cwd)
user=user, cwd=cwd,
env_vars=env_vars)
prefix_realname = _find_key(prefix, pip_list)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = None
@ -682,7 +683,8 @@ def installed(name,
version_spec = version_spec
out = _check_if_installed(prefix, state_pkg_name, version_spec,
ignore_installed, force_reinstall,
upgrade, user, cwd, bin_env, index_url)
upgrade, user, cwd, bin_env, env_vars,
index_url)
# If _check_if_installed result is None, something went wrong with
# the command running. This way we keep stateful output.
if out['result'] is None:
@ -823,7 +825,8 @@ def installed(name,
# Case for packages that are not an URL
if prefix:
pipsearch = __salt__['pip.list'](prefix, bin_env,
user=user, cwd=cwd)
user=user, cwd=cwd,
env_vars=env_vars)
# If we didnt find the package in the system after
# installing it report it

View File

@ -9,9 +9,6 @@ Control Apache Traffic Server
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
# Import Salt libs
import salt.utils.versions
def __virtual__():
'''
@ -239,35 +236,6 @@ def config(name, value):
return ret
def set_var(name, value):
'''
Set Traffic Server configuration variable values.
.. deprecated:: Fluorine
Use ``trafficserver.config`` instead.
.. code-block:: yaml
proxy.config.proxy_name:
trafficserver.set_var:
- value: cdn.site.domain.tld
OR
traffic_server_setting:
trafficserver.set_var:
- name: proxy.config.proxy_name
- value: cdn.site.domain.tld
'''
salt.utils.versions.warn_until(
'Fluorine',
'The \'set_var\' function has been deprecated and will be removed in Salt '
'{version}. Please use \'trafficserver.config\' instead.'
)
return config(name, value)
def shutdown(name):
'''
Shut down Traffic Server on the local node.

View File

@ -33,6 +33,7 @@ import salt.utils.dateutils
import salt.utils.platform
import salt.utils.user
from salt.utils.locales import sdecode, sdecode_if_string
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
from salt.ext.six import string_types, iteritems
@ -77,7 +78,9 @@ def _changes(name,
win_homedrive=None,
win_profile=None,
win_logonscript=None,
win_description=None):
win_description=None,
allow_uid_change=False,
allow_gid_change=False):
'''
Return a dict of the changes required for a user if the user is present,
otherwise return False.
@ -191,6 +194,25 @@ def _changes(name,
if __salt__['user.get_loginclass'](name) != loginclass:
change['loginclass'] = loginclass
errors = []
if not allow_uid_change and 'uid' in change:
errors.append(
'Changing uid ({0} -> {1}) not permitted, set allow_uid_change to '
'True to force this change. Note that this will not change file '
'ownership.'.format(lusr['uid'], uid)
)
if not allow_gid_change and 'gid' in change:
errors.append(
'Changing gid ({0} -> {1}) not permitted, set allow_gid_change to '
'True to force this change. Note that this will not change file '
'ownership.'.format(lusr['gid'], gid)
)
if errors:
raise CommandExecutionError(
'Encountered error checking for needed changes',
info=errors
)
return change
@ -225,7 +247,9 @@ def present(name,
win_profile=None,
win_logonscript=None,
win_description=None,
nologinit=False):
nologinit=False,
allow_uid_change=False,
allow_gid_change=False):
'''
Ensure that the named user is present with the specified properties
@ -233,16 +257,28 @@ def present(name,
The name of the user to manage
uid
The user id to assign, if left empty then the next available user id
will be assigned
The user id to assign. If not specified, and the user does not exist,
then the next available uid will be assigned.
gid
The default group id. Also accepts group name.
The id of the default group to assign to the user. Either a group name
or gid can be used. If not specified, and the user does not exist, then
he next available gid will be assigned.
gid_from_name
If True, the default group id will be set to the id of the group with
the same name as the user. If the group does not exist the state will
fail. Default is ``False``.
gid_from_name : False
If ``True``, the default group id will be set to the id of the group
with the same name as the user. If the group does not exist the state
will fail.
allow_uid_change : False
Set to ``True`` to allow the state to update the uid.
.. versionadded:: 2018.3.1
allow_gid_change : False
Set to ``True`` to allow the state to update the gid.
.. versionadded:: 2018.3.1
groups
A list of groups to assign the user to, pass a list object. If a group
@ -466,6 +502,7 @@ def present(name,
ret['result'] = False
return ret
try:
changes = _changes(name,
uid,
gid,
@ -492,7 +529,13 @@ def present(name,
win_homedrive,
win_profile,
win_logonscript,
win_description)
win_description,
allow_uid_change,
allow_gid_change)
except CommandExecutionError as exc:
ret['result'] = False
ret['comment'] = exc.strerror
return ret
if changes:
if __opts__['test']:
@ -621,7 +664,13 @@ def present(name,
win_homedrive,
win_profile,
win_logonscript,
win_description)
win_description,
allow_uid_change=True,
allow_gid_change=True)
# allow_uid_change and allow_gid_change passed as True to avoid race
# conditions where a uid/gid is modified outside of Salt. If an
# unauthorized change was requested, it would have been caught the
# first time we ran _changes().
if changes:
ret['comment'] = 'These values could not be changed: {0}'.format(

View File

@ -1,587 +0,0 @@
# -*- coding: utf-8 -*-
'''
Management of the windows update agent
======================================
This module is being deprecated and will be removed in Salt Fluorine. Please use
the ``win_wua`` state module instead.
.. versionadded:: 2014.7.0
Set windows updates to run by category. Default behavior is to install
all updates that do not require user interaction to complete.
Optionally set ``category`` to a category of your choice to only
install certain updates. Default is to set to install all available updates.
The following example will install all Security and Critical Updates,
and download but not install standard updates.
.. code-block:: yaml
updates:
win_update.installed:
- categories:
- 'Critical Updates'
- 'Security Updates'
- skips:
- downloaded
win_update.downloaded:
- categories:
- 'Updates'
- skips:
- downloaded
You can also specify a number of features about the update to have a
fine grain approach to specific types of updates. These are the following
features/states of updates available for configuring:
.. code-block:: text
'UI' - User interaction required, skipped by default
'downloaded' - Already downloaded, included by default
'present' - Present on computer, skipped by default
'installed' - Already installed, skipped by default
'reboot' - Reboot required, included by default
'hidden' - Skip updates that have been hidden, skipped by default
'software' - Software updates, included by default
'driver' - driver updates, included by default
The following example installs all driver updates that don't require a reboot:
.. code-block:: yaml
gryffindor:
win_update.installed:
- skips:
- driver: True
- software: False
- reboot: False
To just update your windows machine, add this your sls:
.. code-block:: yaml
updates:
win_update.installed
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
import logging
# Import 3rd-party libs
# pylint: disable=import-error
from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=redefined-builtin
try:
import win32com.client
import pythoncom
HAS_DEPENDENCIES = True
except ImportError:
HAS_DEPENDENCIES = False
# pylint: enable=import-error
# Import Salt libs
import salt.utils.platform
import salt.utils.versions
log = logging.getLogger(__name__)
def __virtual__():
'''
Only works on Windows systems
'''
if salt.utils.platform.is_windows() and HAS_DEPENDENCIES:
return True
return False
def _gather_update_categories(updateCollection):
'''
this is a convenience method to gather what categories of updates are available in any update
collection it is passed. Typically though, the download_collection.
Some known categories:
Updates
Windows 7
Critical Updates
Security Updates
Update Rollups
'''
categories = []
for i in range(updateCollection.Count):
update = updateCollection.Item(i)
for j in range(update.Categories.Count):
name = update.Categories.Item(j).Name
if name not in categories:
log.debug('found category: {0}'.format(name))
categories.append(name)
return categories
class PyWinUpdater(object):
def __init__(self, categories=None, skipUI=True, skipDownloaded=False,
skipInstalled=True, skipReboot=False, skipPresent=False,
skipSoftwareUpdates=False, skipDriverUpdates=False, skipHidden=True):
log.debug('CoInitializing the pycom system')
pythoncom.CoInitialize()
# pylint: disable=invalid-name
self.skipUI = skipUI
self.skipDownloaded = skipDownloaded
self.skipInstalled = skipInstalled
self.skipReboot = skipReboot
self.skipPresent = skipPresent
self.skipHidden = skipHidden
self.skipSoftwareUpdates = skipSoftwareUpdates
self.skipDriverUpdates = skipDriverUpdates
self.categories = categories
self.foundCategories = None
# pylint: enable=invalid-name
log.debug('dispatching update_session to keep the session object.')
self.update_session = win32com.client.Dispatch('Microsoft.Update.Session')
log.debug('update_session got. Now creating a win_searcher to seek out the updates')
self.win_searcher = self.update_session.CreateUpdateSearcher()
# list of updates that are applicable by current settings.
self.download_collection = win32com.client.Dispatch('Microsoft.Update.UpdateColl')
# list of updates to be installed.
self.install_collection = win32com.client.Dispatch('Microsoft.Update.UpdateColl')
# the object responsible for fetching the actual downloads.
self.win_downloader = self.update_session.CreateUpdateDownloader()
self.win_downloader.Updates = self.download_collection
# the object responsible for the installing of the updates.
self.win_installer = self.update_session.CreateUpdateInstaller()
self.win_installer.Updates = self.install_collection
# the results of the download process
self.download_results = None
# the results of the installation process
self.install_results = None
def Search(self, searchString):
try:
log.debug('beginning search of the passed string: %s',
searchString)
self.search_results = self.win_searcher.Search(searchString)
log.debug('search completed successfully.')
except Exception as exc:
log.info('search for updates failed. %s', exc)
return exc
log.debug('parsing results. %s updates were found.',
self.search_results.Updates.Count)
try:
for update in self.search_results.Updates:
if update.InstallationBehavior.CanRequestUserInput:
log.debug('Skipped update %s', update.title)
continue
for category in update.Categories:
if self.skipDownloaded and update.IsDownloaded:
continue
if self.categories is None or category.Name in self.categories:
self.download_collection.Add(update)
log.debug('added update %s', update.title)
self.foundCategories = _gather_update_categories(self.download_collection)
return True
except Exception as exc:
log.info('parsing updates failed. %s', exc)
return exc
def AutoSearch(self):
search_string = ''
searchParams = []
if self.skipInstalled:
searchParams.append('IsInstalled=0')
else:
searchParams.append('IsInstalled=1')
if self.skipHidden:
searchParams.append('IsHidden=0')
else:
searchParams.append('IsHidden=1')
if self.skipReboot:
searchParams.append('RebootRequired=0')
else:
searchParams.append('RebootRequired=1')
if self.skipPresent:
searchParams.append('IsPresent=0')
else:
searchParams.append('IsPresent=1')
if len(searchParams) > 1:
for i in searchParams:
search_string += '{0} and '.format(i)
else:
search_string += '{0} and '.format(searchParams[1])
if not self.skipSoftwareUpdates and not self.skipDriverUpdates:
search_string += 'Type=\'Software\' or Type=\'Driver\''
elif not self.skipSoftwareUpdates:
search_string += 'Type=\'Software\''
elif not self.skipDriverUpdates:
search_string += 'Type=\'Driver\''
else:
return False
# if there is no type, the is nothing to search.
log.debug('generated search string: %s', search_string)
return self.Search(search_string)
def Download(self):
try:
if self.download_collection.Count != 0:
self.download_results = self.win_downloader.Download()
else:
log.debug('Skipped downloading, all updates were already cached.')
return True
except Exception as exc:
log.debug('failed in the downloading %s.', exc)
return exc
def Install(self):
try:
for update in self.search_results.Updates:
if update.IsDownloaded:
self.install_collection.Add(update)
log.debug('Updates prepared. beginning installation')
except Exception as exc:
log.info('Preparing install list failed: %s', exc)
return exc
# accept eula if not accepted
try:
for update in self.search_results.Updates:
if not update.EulaAccepted:
log.debug('Accepting EULA: %s', update.Title)
update.AcceptEula()
except Exception as exc:
log.info('Accepting Eula failed: %s', exc)
return exc
if self.install_collection.Count != 0:
log.debug('Install list created, about to install')
updates = []
try:
self.install_results = self.win_installer.Install()
log.info('Installation of updates complete')
return True
except Exception as exc:
log.info('Installation failed: %s', exc)
return exc
else:
log.info('no new updates.')
return True
def GetInstallationResults(self):
log.debug('bluger has %s updates in it', self.install_collection.Count)
updates = []
if self.install_collection.Count == 0:
return {}
for i in range(self.install_collection.Count):
updates.append('{0}: {1}'.format(
self.install_results.GetUpdateResult(i).ResultCode,
self.install_collection.Item(i).Title))
log.debug('Update results enumerated, now making a list to pass back')
results = {}
for i, update in enumerate(updates):
results['update {0}'.format(i)] = update
log.debug('Update information complied. returning')
return results
def GetDownloadResults(self):
updates = []
for i in range(self.download_collection.Count):
updates.append('{0}: {1}'.format(
self.download_results.GetUpdateResult(i).ResultCode,
self.download_collection.Item(i).Title))
results = {}
for i, update in enumerate(updates):
results['update {0}'.format(i)] = update
return results
def SetCategories(self, categories):
self.categories = categories
def GetCategories(self):
return self.categories
def GetAvailableCategories(self):
return self.foundCategories
def SetSkips(self, skips):
if skips:
for i in skips:
value = i[next(six.iterkeys(i))]
skip = next(six.iterkeys(i))
self.SetSkip(skip, value)
log.debug('was asked to set %s to %s', skip, value)
def SetSkip(self, skip, state):
if skip == 'UI':
self.skipUI = state
elif skip == 'downloaded':
self.skipDownloaded = state
elif skip == 'installed':
self.skipInstalled = state
elif skip == 'reboot':
self.skipReboot = state
elif skip == 'present':
self.skipPresent = state
elif skip == 'hidden':
self.skipHidden = state
elif skip == 'software':
self.skipSoftwareUpdates = state
elif skip == 'driver':
self.skipDriverUpdates = state
log.debug('new search state: \n\tUI: %s\n\tDownload: %s\n'
'\tInstalled: %s\n\treboot :%s\n\tPresent: %s\n'
'\thidden: %s\n\tsoftware: %s\n\tdriver: %s',
self.skipUI, self.skipDownloaded, self.skipInstalled,
self.skipReboot, self.skipPresent, self.skipHidden,
self.skipSoftwareUpdates, self.skipDriverUpdates)
def _search(win_updater, retries=5):
passed = False
clean = True
comment = ''
while not passed:
log.debug('Searching. tries left: %s', retries)
passed = win_updater.AutoSearch()
log.debug('Done searching: %s', passed)
if isinstance(passed, Exception):
clean = False
comment += 'Failed in the seeking/parsing process:\n\t\t{0}\n'.format(passed)
retries -= 1
if retries:
comment += '{0} tries to go. retrying\n'.format(retries)
passed = False
else:
comment += 'out of retries. this update round failed.\n'
return (comment, True, retries)
passed = False
if clean:
comment += 'Search was done without error.\n'
return (comment, True, retries)
def _download(win_updater, retries=5):
passed = False
clean = True
comment = ''
while not passed:
log.debug('Downloading. tries left: %s', retries)
passed = win_updater.Download()
log.debug('Done downloading: %s', passed)
if isinstance(passed, Exception):
clean = False
comment += 'Failed while trying to download updates:\n\t\t{0}\n'.format(passed)
retries -= 1
if retries:
comment += '{0} tries to go. retrying\n'.format(retries)
passed = False
else:
comment += 'out of retries. this update round failed.\n'
return (comment, False, retries)
if clean:
comment += 'Download was done without error.\n'
return (comment, True, retries)
def _install(win_updater, retries=5):
passed = False
clean = True
comment = ''
while not passed:
log.debug('download_collection is this long: %s',
win_updater.install_collection.Count)
log.debug('Installing. tries left: %s', retries)
passed = win_updater.Install()
log.info('Done installing: %s', passed)
if isinstance(passed, Exception):
clean = False
comment += 'Failed while trying to install the updates.\n\t\t{0}\n'.format(passed)
retries -= 1
if retries:
comment += '{0} tries to go. retrying\n'.format(retries)
passed = False
else:
comment += 'out of retries. this update round failed.\n'
return (comment, False, retries)
if clean:
comment += 'Install was done without error.\n'
return (comment, True, retries)
def installed(name, categories=None, skips=None, retries=10):
'''
Install specified windows updates.
name:
if categories is left empty, it will be assumed that you are passing the category option
through the name. These are separate because you can only have one name, but can have
multiple categories.
categories:
the list of categories to be downloaded. These are simply strings in the update's
information, so there is no enumeration of the categories available. Some known categories:
.. code-block:: text
Updates
Windows 7
Critical Updates
Security Updates
Update Rollups
skips:
a list of features of the updates to cull by. Available features:
.. code-block:: text
'UI' - User interaction required, skipped by default
'downloaded' - Already downloaded, skipped by default (downloading)
'present' - Present on computer, included by default (installing)
'installed' - Already installed, skipped by default
'reboot' - Reboot required, included by default
'hidden' - skip those updates that have been hidden.
'software' - Software updates, included by default
'driver' - driver updates, skipped by default
retries
Number of retries to make before giving up. This is total, not per
step.
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
deprecation_msg = 'The \'win_update\' module is deprecated, and will be ' \
'removed in Salt Fluorine. Please use the \'win_wua\' ' \
'module instead.'
salt.utils.versions.warn_until('Fluorine', deprecation_msg)
ret.setdefault('warnings', []).append(deprecation_msg)
if not categories:
categories = [name]
log.debug('categories to search for are: %s', categories)
win_updater = PyWinUpdater()
win_updater.SetCategories(categories)
win_updater.SetSkips(skips)
# this is where we be seeking the things! yar!
comment, passed, retries = _search(win_updater, retries)
ret['comment'] += comment
if not passed:
ret['result'] = False
return ret
# this is where we get all the things! i.e. download updates.
comment, passed, retries = _download(win_updater, retries)
ret['comment'] += comment
if not passed:
ret['result'] = False
return ret
# this is where we put things in their place!
comment, passed, retries = _install(win_updater, retries)
ret['comment'] += comment
if not passed:
ret['result'] = False
return ret
try:
ret['changes'] = win_updater.GetInstallationResults()
except Exception:
ret['comment'] += 'could not get results, but updates were installed.'
return ret
def downloaded(name, categories=None, skips=None, retries=10):
'''
Cache updates for later install.
name:
if categories is left empty, it will be assumed that you are passing the category option
through the name. These are separate because you can only have one name, but can have
multiple categories.
categories:
the list of categories to be downloaded. These are simply strings in the update's
information, so there is no enumeration of the categories available. Some known categories:
.. code-block:: text
Updates
Windows 7
Critical Updates
Security Updates
Update Rollups
skips:
a list of features of the updates to cull by. Available features:
.. code-block:: text
'UI' - User interaction required, skipped by default
'downloaded' - Already downloaded, skipped by default (downloading)
'present' - Present on computer, included by default (installing)
'installed' - Already installed, skipped by default
'reboot' - Reboot required, included by default
'hidden' - skip those updates that have been hidden.
'software' - Software updates, included by default
'driver' - driver updates, skipped by default
retries
Number of retries to make before giving up. This is total, not per
step.
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
deprecation_msg = 'The \'win_update\' module is deprecated, and will be ' \
'removed in Salt Fluorine. Please use the \'win_wua\' ' \
'module instead.'
salt.utils.versions.warn_until('Fluorine', deprecation_msg)
ret.setdefault('warnings', []).append(deprecation_msg)
if not categories:
categories = [name]
log.debug('categories to search for are: %s', categories)
win_updater = PyWinUpdater()
win_updater.SetCategories(categories)
win_updater.SetSkips(skips)
# this is where we be seeking the things! yar!
comment, passed, retries = _search(win_updater, retries)
ret['comment'] += comment
if not passed:
ret['result'] = False
return ret
# this is where we get all the things! i.e. download updates.
comment, passed, retries = _download(win_updater, retries)
ret['comment'] += comment
if not passed:
ret['result'] = False
return ret
try:
ret['changes'] = win_updater.GetDownloadResults()
except Exception:
ret['comment'] += 'could not get results, but updates were downloaded.'
return ret

View File

@ -193,7 +193,7 @@ def present(host, groups, interfaces, **kwargs):
host_exists = __salt__['zabbix.host_exists'](host, **connection_args)
if host_exists:
host = __salt__['zabbix.host_get'](name=host, **connection_args)[0]
host = __salt__['zabbix.host_get'](host=host, **connection_args)[0]
hostid = host['hostid']
update_proxy = False
@ -457,7 +457,7 @@ def assign_templates(host, templates, **kwargs):
ret['comment'] = comment_host_templ_notupdated
return ret
host_info = __salt__['zabbix.host_get'](name=host, **connection_args)[0]
host_info = __salt__['zabbix.host_get'](host=host, **connection_args)[0]
hostid = host_info['hostid']
if not templates:

View File

@ -21,6 +21,7 @@ import errno
import salt.crypt
import salt.utils.async
import salt.utils.event
import salt.utils.files
import salt.utils.platform
import salt.utils.process
import salt.utils.verify
@ -144,8 +145,8 @@ if USE_LOAD_BALANCER:
# Based on default used in tornado.netutil.bind_sockets()
backlog = 128
def __init__(self, opts, socket_queue, log_queue=None):
super(LoadBalancerServer, self).__init__(log_queue=log_queue)
def __init__(self, opts, socket_queue, **kwargs):
super(LoadBalancerServer, self).__init__(**kwargs)
self.opts = opts
self.socket_queue = socket_queue
self._socket = None
@ -159,13 +160,17 @@ if USE_LOAD_BALANCER:
self.__init__(
state['opts'],
state['socket_queue'],
log_queue=state['log_queue']
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {'opts': self.opts,
return {
'opts': self.opts,
'socket_queue': self.socket_queue,
'log_queue': self.log_queue}
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def close(self):
if self._socket is not None:
@ -1347,14 +1352,18 @@ class TCPPubServerChannel(salt.transport.server.PubServerChannel):
return {'opts': self.opts,
'secrets': salt.master.SMaster.secrets}
def _publish_daemon(self, log_queue=None):
def _publish_daemon(self, **kwargs):
'''
Bind to the interface specified in the configuration file
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
log_queue = kwargs.get('log_queue')
if log_queue is not None:
salt.log.setup.set_multiprocessing_logging_queue(log_queue)
log_queue_level = kwargs.get('log_queue_level')
if log_queue_level is not None:
salt.log.setup.set_multiprocessing_logging_level(log_queue_level)
salt.log.setup.setup_multiprocessing_logging(log_queue)
# Check if io_loop was set outside
@ -1386,11 +1395,8 @@ class TCPPubServerChannel(salt.transport.server.PubServerChannel):
# Securely create socket
log.info('Starting the Salt Puller on %s', pull_uri)
old_umask = os.umask(0o177)
try:
with salt.utils.files.set_umask(0o177):
pull_sock.start()
finally:
os.umask(old_umask)
# run forever
try:
@ -1409,6 +1415,9 @@ class TCPPubServerChannel(salt.transport.server.PubServerChannel):
kwargs['log_queue'] = (
salt.log.setup.get_multiprocessing_logging_queue()
)
kwargs['log_queue_level'] = (
salt.log.setup.get_multiprocessing_logging_level()
)
process_manager.add_process(self._publish_daemon, kwargs=kwargs)

View File

@ -19,6 +19,7 @@ from random import randint
import salt.auth
import salt.crypt
import salt.utils.event
import salt.utils.files
import salt.utils.minions
import salt.utils.process
import salt.utils.stringutils
@ -806,11 +807,8 @@ class ZeroMQPubServerChannel(salt.transport.server.PubServerChannel):
# Securely create socket
log.info('Starting the Salt Puller on %s', pull_uri)
old_umask = os.umask(0o177)
try:
with salt.utils.files.set_umask(0o177):
pull_sock.bind(pull_uri)
finally:
os.umask(old_umask)
try:
while True:

View File

@ -142,7 +142,8 @@ def yamlify_arg(arg):
return arg
if arg.strip() == '':
# Because YAML loads empty strings as None, we return the original string
# Because YAML loads empty (or all whitespace) strings as None, we
# return the original string
# >>> import yaml
# >>> yaml.load('') is None
# True
@ -151,6 +152,9 @@ def yamlify_arg(arg):
return arg
elif '_' in arg and all([x in '0123456789_' for x in arg.strip()]):
# When the stripped string includes just digits and underscores, the
# underscores are ignored and the digits are combined together and
# loaded as an int. We don't want that, so return the original value.
return arg
try:
@ -177,6 +181,14 @@ def yamlify_arg(arg):
else:
return arg
elif isinstance(arg, list):
# lists must be wrapped in brackets
if (isinstance(original_arg, six.string_types) and
not original_arg.startswith('[')):
return original_arg
else:
return arg
elif arg is None \
or isinstance(arg, (list, float, six.integer_types, six.string_types)):
# yaml.safe_load will load '|' as '', don't let it do that.

View File

@ -10,7 +10,6 @@ input as formatted by states.
from __future__ import absolute_import, print_function, unicode_literals
import copy
import logging
import os
# Import Salt libs
import salt.utils.args

View File

@ -75,6 +75,7 @@ import salt.payload
import salt.utils.async
import salt.utils.cache
import salt.utils.dicttrim
import salt.utils.files
import salt.utils.platform
import salt.utils.process
import salt.utils.stringutils
@ -1018,12 +1019,9 @@ class AsyncEventPublisher(object):
)
log.info('Starting pull socket on {0}'.format(epull_uri))
old_umask = os.umask(0o177)
try:
with salt.utils.files.set_umask(0o177):
self.publisher.start()
self.puller.start()
finally:
os.umask(old_umask)
def handle_publish(self, package, _):
'''
@ -1056,8 +1054,8 @@ class EventPublisher(salt.utils.process.SignalHandlingMultiprocessingProcess):
The interface that takes master events and republishes them out to anyone
who wants to listen
'''
def __init__(self, opts, log_queue=None):
super(EventPublisher, self).__init__(log_queue=log_queue)
def __init__(self, opts, **kwargs):
super(EventPublisher, self).__init__(**kwargs)
self.opts = salt.config.DEFAULT_MASTER_OPTS.copy()
self.opts.update(opts)
self._closing = False
@ -1067,11 +1065,18 @@ class EventPublisher(salt.utils.process.SignalHandlingMultiprocessingProcess):
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(state['opts'], log_queue=state['log_queue'])
self.__init__(
state['opts'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {'opts': self.opts,
'log_queue': self.log_queue}
return {
'opts': self.opts,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def run(self):
'''
@ -1106,8 +1111,7 @@ class EventPublisher(salt.utils.process.SignalHandlingMultiprocessingProcess):
)
# Start the master event publisher
old_umask = os.umask(0o177)
try:
with salt.utils.files.set_umask(0o177):
self.publisher.start()
self.puller.start()
if (self.opts['ipc_mode'] != 'tcp' and (
@ -1115,8 +1119,6 @@ class EventPublisher(salt.utils.process.SignalHandlingMultiprocessingProcess):
self.opts['external_auth'])):
os.chmod(os.path.join(
self.opts['sock_dir'], 'master_event_pub.ipc'), 0o666)
finally:
os.umask(old_umask)
# Make sure the IO loop and respective sockets are closed and
# destroyed
@ -1171,13 +1173,13 @@ class EventReturn(salt.utils.process.SignalHandlingMultiprocessingProcess):
instance = super(EventReturn, cls).__new__(cls, *args, **kwargs)
return instance
def __init__(self, opts, log_queue=None):
def __init__(self, opts, **kwargs):
'''
Initialize the EventReturn system
Return an EventReturn instance
'''
super(EventReturn, self).__init__(log_queue=log_queue)
super(EventReturn, self).__init__(**kwargs)
self.opts = opts
self.event_return_queue = self.opts['event_return_queue']
@ -1192,11 +1194,18 @@ class EventReturn(salt.utils.process.SignalHandlingMultiprocessingProcess):
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(state['opts'], log_queue=state['log_queue'])
self.__init__(
state['opts'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {'opts': self.opts,
'log_queue': self.log_queue}
return {
'opts': self.opts,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def _handle_signals(self, signum, sigframe):
# Flush and terminate

View File

@ -11,6 +11,7 @@ import shutil
# Import salt libs
import salt.fileclient
import salt.utils.files
import salt.utils.hashutils
import salt.utils.path
import salt.utils.url
@ -71,8 +72,8 @@ def sync(opts,
remote = set()
source = salt.utils.url.create('_' + form)
mod_dir = os.path.join(opts['extension_modules'], '{0}'.format(form))
cumask = os.umask(0o77)
touched = False
with salt.utils.files.set_umask(0o077):
try:
if not os.path.isdir(mod_dir):
log.info('Creating module dir \'%s\'', mod_dir)
@ -150,6 +151,4 @@ def sync(opts,
shutil.rmtree(emptydir, ignore_errors=True)
except Exception as exc:
log.error('Failed to sync %s module: %s', form, exc)
finally:
os.umask(cumask)
return ret, touched

View File

@ -299,20 +299,29 @@ def wait_lock(path, lock_fn=None, timeout=5, sleep=0.1, time_start=None):
log.trace('Write lock for %s (%s) released', path, lock_fn)
def get_umask():
'''
Returns the current umask
'''
ret = os.umask(0) # pylint: disable=blacklisted-function
os.umask(ret) # pylint: disable=blacklisted-function
return ret
@contextlib.contextmanager
def set_umask(mask):
'''
Temporarily set the umask and restore once the contextmanager exits
'''
if salt.utils.platform.is_windows():
# Don't attempt on Windows
if mask is None or salt.utils.platform.is_windows():
# Don't attempt on Windows, or if no mask was passed
yield
else:
try:
orig_mask = os.umask(mask)
orig_mask = os.umask(mask) # pylint: disable=blacklisted-function
yield
finally:
os.umask(orig_mask)
os.umask(orig_mask) # pylint: disable=blacklisted-function
def fopen(*args, **kwargs):

View File

@ -438,11 +438,11 @@ class CacheWorker(MultiprocessingProcess):
main-loop when refreshing minion-list
'''
def __init__(self, opts, log_queue=None):
def __init__(self, opts, **kwargs):
'''
Sets up the zmq-connection to the ConCache
'''
super(CacheWorker, self).__init__(log_queue=log_queue)
super(CacheWorker, self).__init__(**kwargs)
self.opts = opts
# __setstate__ and __getstate__ are only used on Windows.
@ -450,11 +450,18 @@ class CacheWorker(MultiprocessingProcess):
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(state['opts'], log_queue=state['log_queue'])
self.__init__(
state['opts'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {'opts': self.opts,
'log_queue': self.log_queue}
return {
'opts': self.opts,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def run(self):
'''
@ -475,11 +482,11 @@ class ConnectedCache(MultiprocessingProcess):
the master publisher port.
'''
def __init__(self, opts, log_queue=None):
def __init__(self, opts, **kwargs):
'''
starts the timer and inits the cache itself
'''
super(ConnectedCache, self).__init__(log_queue=log_queue)
super(ConnectedCache, self).__init__(**kwargs)
log.debug('ConCache initializing...')
# the possible settings for the cache
@ -506,11 +513,18 @@ class ConnectedCache(MultiprocessingProcess):
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(state['opts'], log_queue=state['log_queue'])
self.__init__(
state['opts'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {'opts': self.opts,
'log_queue': self.log_queue}
return {
'opts': self.opts,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def signal_handler(self, sig, frame):
'''

View File

@ -81,6 +81,13 @@ class SaltNeutron(NeutronShell):
'''
Set up neutron credentials
'''
__utils__['versions.warn_until'](
'Neon',
(
'The neutron module has been deprecated and will be removed in {version}. '
'Please update to using the neutronng module'
),
)
if not HAS_NEUTRON:
return None

View File

@ -715,9 +715,8 @@ class LogLevelMixIn(six.with_metaclass(MixInMeta, object)):
# verify the default
if logfile is not None and not logfile.startswith(('tcp://', 'udp://', 'file://')):
# Logfile is not using Syslog, verify
current_umask = os.umask(0o027)
with salt.utils.files.set_umask(0o027):
verify_files([logfile], self.config['user'])
os.umask(current_umask)
if logfile is None:
# Use the default setting if the logfile wasn't explicity set
@ -862,7 +861,15 @@ class LogLevelMixIn(six.with_metaclass(MixInMeta, object)):
)
def _setup_mp_logging_client(self, *args): # pylint: disable=unused-argument
if salt.utils.platform.is_windows() and self._setup_mp_logging_listener_:
if self._setup_mp_logging_listener_:
# Set multiprocessing logging level even in non-Windows
# environments. In non-Windows environments, this setting will
# propogate from process to process via fork behavior and will be
# used by child processes if they invoke the multiprocessing
# logging client.
log.set_multiprocessing_logging_level_by_opts(self.config)
if salt.utils.platform.is_windows():
# On Windows, all logging including console and
# log file logging will go through the multiprocessing
# logging listener if it exists.
@ -872,8 +879,9 @@ class LogLevelMixIn(six.with_metaclass(MixInMeta, object)):
log.setup_multiprocessing_logging(
self._get_mp_logging_listener_queue()
)
# Remove the temp logger and any other configured loggers since all of
# our logging is going through the multiprocessing logging listener.
# Remove the temp logger and any other configured loggers since
# all of our logging is going through the multiprocessing
# logging listener.
log.shutdown_temp_logging()
log.shutdown_console_logging()
log.shutdown_logfile_logging()
@ -1411,7 +1419,7 @@ class ExecutionOptionsMixIn(six.with_metaclass(MixInMeta, object)):
nargs=2,
default=None,
metavar='<FUNC-NAME> <PROVIDER>',
help='Perform an function that may be specific to this cloud '
help='Perform a function that may be specific to this cloud '
'provider, that does not apply to an instance. This '
'argument requires a provider to be specified (i.e.: nova).'
)

View File

@ -80,7 +80,7 @@ def daemonize(redirect_out=True):
os.chdir('/')
# noinspection PyArgumentList
os.setsid()
os.umask(18)
os.umask(0o022) # pylint: disable=blacklisted-function
# do second fork
try:
@ -376,20 +376,30 @@ class ProcessManager(object):
kwargs = {}
if salt.utils.platform.is_windows():
# Need to ensure that 'log_queue' is correctly transferred to
# processes that inherit from 'MultiprocessingProcess'.
# Need to ensure that 'log_queue' and 'log_queue_level' is
# correctly transferred to processes that inherit from
# 'MultiprocessingProcess'.
if type(MultiprocessingProcess) is type(tgt) and (
issubclass(tgt, MultiprocessingProcess)):
need_log_queue = True
else:
need_log_queue = False
if need_log_queue and 'log_queue' not in kwargs:
if need_log_queue:
if 'log_queue' not in kwargs:
if hasattr(self, 'log_queue'):
kwargs['log_queue'] = self.log_queue
else:
kwargs['log_queue'] = (
salt.log.setup.get_multiprocessing_logging_queue())
salt.log.setup.get_multiprocessing_logging_queue()
)
if 'log_queue_level' not in kwargs:
if hasattr(self, 'log_queue_level'):
kwargs['log_queue_level'] = self.log_queue_level
else:
kwargs['log_queue_level'] = (
salt.log.setup.get_multiprocessing_logging_level()
)
# create a nicer name for the debug log
if name is None:
@ -686,8 +696,14 @@ class MultiprocessingProcess(multiprocessing.Process, NewStyleClassMixIn):
# salt.log.setup.get_multiprocessing_logging_queue().
salt.log.setup.set_multiprocessing_logging_queue(self.log_queue)
self.log_queue_level = kwargs.pop('log_queue_level', None)
if self.log_queue_level is None:
self.log_queue_level = salt.log.setup.get_multiprocessing_logging_level()
else:
salt.log.setup.set_multiprocessing_logging_level(self.log_queue_level)
# Call __init__ from 'multiprocessing.Process' only after removing
# 'log_queue' from kwargs.
# 'log_queue' and 'log_queue_level' from kwargs.
super(MultiprocessingProcess, self).__init__(*args, **kwargs)
if salt.utils.platform.is_windows():
@ -732,6 +748,8 @@ class MultiprocessingProcess(multiprocessing.Process, NewStyleClassMixIn):
kwargs = self._kwargs_for_getstate
if 'log_queue' not in kwargs:
kwargs['log_queue'] = self.log_queue
if 'log_queue_level' not in kwargs:
kwargs['log_queue_level'] = self.log_queue_level
# Remove the version of these in the parent process since
# they are no longer needed.
del self._args_for_getstate

View File

@ -50,8 +50,8 @@ class Reactor(salt.utils.process.SignalHandlingMultiprocessingProcess, salt.stat
'cmd': 'local',
}
def __init__(self, opts, log_queue=None):
super(Reactor, self).__init__(log_queue=log_queue)
def __init__(self, opts, **kwargs):
super(Reactor, self).__init__(**kwargs)
local_minion_opts = opts.copy()
local_minion_opts['file_client'] = 'local'
self.minion = salt.minion.MasterMinion(local_minion_opts)
@ -66,11 +66,16 @@ class Reactor(salt.utils.process.SignalHandlingMultiprocessingProcess, salt.stat
self._is_child = True
Reactor.__init__(
self, state['opts'],
log_queue=state['log_queue'])
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {'opts': self.opts,
'log_queue': self.log_queue}
return {
'opts': self.opts,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def render_reaction(self, glob_ref, tag, data):
'''

View File

@ -426,7 +426,9 @@ class Schedule(object):
# Grab run, assume True
run = data.get('run', True)
run_schedule_jobs_in_background = self.opts.get('run_schedule_jobs_in_background', True)
if run:
if run_schedule_jobs_in_background:
multiprocessing_enabled = self.opts.get('multiprocessing', True)
if multiprocessing_enabled:
thread_cls = salt.utils.process.SignalHandlingMultiprocessingProcess
@ -443,6 +445,8 @@ class Schedule(object):
else:
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, func, data))
proc.start()
else:
func(data)
def enable_schedule(self):
'''
@ -907,40 +911,30 @@ class Schedule(object):
'must be a dict. '
'Ignoring job {0}.'.format(job))
log.error(data['_error'])
__when = self.opts['pillar']['whens'][i]
try:
when__ = dateutil_parser.parse(__when)
except ValueError:
data['_error'] = ('Invalid date string. '
'Ignoring job {0}.'.format(job))
log.error(data['_error'])
return data
when_ = self.opts['pillar']['whens'][i]
elif ('whens' in self.opts['grains'] and
i in self.opts['grains']['whens']):
if not isinstance(self.opts['grains']['whens'],
dict):
data['_error'] = ('Grain "whens" must be dict.'
'Ignoring job {0}.'.format(job))
log.error(data['_error'])
return data
__when = self.opts['grains']['whens'][i]
try:
when__ = dateutil_parser.parse(__when)
except ValueError:
data['_error'] = ('Invalid date string. '
data['_error'] = ('Grain "whens" must be a dict.'
'Ignoring job {0}.'.format(job))
log.error(data['_error'])
return data
when_ = self.opts['grains']['whens'][i]
else:
when_ = i
if not isinstance(when_, datetime.datetime):
try:
when__ = dateutil_parser.parse(i)
when_ = dateutil_parser.parse(when_)
except ValueError:
data['_error'] = ('Invalid date string {0}. '
'Ignoring job {1}.'.format(i, job))
log.error(data['_error'])
return data
_when.append(when__)
_when.append(when_)
if data['_splay']:
_when.append(data['_splay'])
@ -988,32 +982,21 @@ class Schedule(object):
'Ignoring job {0}.'.format(job))
log.error(data['_error'])
return data
_when = self.opts['pillar']['whens'][data['when']]
try:
when = dateutil_parser.parse(_when)
except ValueError:
data['_error'] = ('Invalid date string. '
'Ignoring job {0}.'.format(job))
log.error(data['_error'])
return data
when = self.opts['pillar']['whens'][data['when']]
elif ('whens' in self.opts['grains'] and
data['when'] in self.opts['grains']['whens']):
if not isinstance(self.opts['grains']['whens'], dict):
data['_error'] = ('Grain "whens" must be dict. '
'Ignoring job {0}.'.format(job))
log.error(data['_error'])
return data
_when = self.opts['grains']['whens'][data['when']]
try:
when = dateutil_parser.parse(_when)
except ValueError:
data['_error'] = ('Invalid date string. '
data['_error'] = ('Grain "whens" must be a dict. '
'Ignoring job {0}.'.format(job))
log.error(data['_error'])
return data
when = self.opts['grains']['whens'][data['when']]
else:
when = data['when']
if not isinstance(when, datetime.datetime):
try:
when = dateutil_parser.parse(data['when'])
when = dateutil_parser.parse(when)
except ValueError:
data['_error'] = ('Invalid date string. '
'Ignoring job {0}.'.format(job))
@ -1142,16 +1125,20 @@ class Schedule(object):
return data
else:
if isinstance(data['skip_during_range'], dict):
start = data['skip_during_range']['start']
end = data['skip_during_range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(data['skip_during_range']['start'])
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start in '
'skip_during_range. Ignoring '
'job {0}.'.format(job))
log.error(data['_error'])
return data
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(data['skip_during_range']['end'])
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end in '
'skip_during_range. Ignoring '
@ -1192,7 +1179,7 @@ class Schedule(object):
return data
else:
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary '
'must be specified as a dictionary. '
'Ignoring job {0}.'.format(job))
log.error(data['_error'])
return data
@ -1209,15 +1196,19 @@ class Schedule(object):
return data
else:
if isinstance(data['range'], dict):
start = data['range']['start']
end = data['range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(data['range']['start'])
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start. '
'Ignoring job {0}.'.format(job))
log.error(data['_error'])
return data
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(data['range']['end'])
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end.'
' Ignoring job {0}.'.format(job))
@ -1262,7 +1253,9 @@ class Schedule(object):
'Ignoring job {0}'.format(job))
log.error(data['_error'])
else:
after = dateutil_parser.parse(data['after'])
after = data['after']
if not isinstance(after, datetime.datetime):
after = dateutil_parser.parse(after)
if after >= now:
log.debug(
@ -1286,7 +1279,9 @@ class Schedule(object):
'Ignoring job {0}'.format(job))
log.error(data['_error'])
else:
until = dateutil_parser.parse(data['until'])
until = data['until']
if not isinstance(until, datetime.datetime):
until = dateutil_parser.parse(until)
if until <= now:
log.debug(
@ -1474,7 +1469,7 @@ class Schedule(object):
# If there is no job specific skip_during_range available,
# grab the global which defaults to None.
if 'skip_during_range' not in data:
if 'skip_during_range' not in data and self.skip_during_range:
data['skip_during_range'] = self.skip_during_range
if 'skip_during_range' in data and data['skip_during_range']:

View File

@ -254,7 +254,7 @@ def chugid_and_umask(runas, umask, group=None):
if set_runas or set_grp:
chugid(runas_user, runas_grp)
if umask is not None:
os.umask(umask)
os.umask(umask) # pylint: disable=blacklisted-function
def get_default_group(user):

View File

@ -230,12 +230,11 @@ def verify_env(
continue
if not os.path.isdir(dir_):
try:
cumask = os.umask(18) # 077
with salt.utils.files.set_umask(0o022):
os.makedirs(dir_)
# If starting the process as root, chown the new dirs
if os.getuid() == 0:
os.chown(dir_, uid, gid)
os.umask(cumask)
except OSError as err:
msg = 'Failed to create directory path "{0}" - {1}\n'
sys.stderr.write(msg.format(dir_, err))

View File

@ -1,6 +1,6 @@
profitbricks-config:
username: ''
password: ''
username: 'foo'
password: 'bar'
datacenter_id: 74d65326-d9b7-41c3-9f51-73ffe0fcd16d
driver: profitbricks
ssh_public_key: ~/.ssh/id_rsa.pub

View File

@ -0,0 +1,38 @@
{%- set virtualenv_base = salt['runtests_helpers.get_salt_temp_dir_for_path']('virtualenv-12-base-1') -%}
{%- set virtualenv_test = salt['runtests_helpers.get_salt_temp_dir_for_path']('issue-46127-pip-env-vars') -%}
{{ virtualenv_base }}:
virtualenv.managed:
- system_site_packages: False
- distribute: True
install_older_venv_1:
pip.installed:
- name: 'virtualenv < 13.0'
- bin_env: {{ virtualenv_base }}
- require:
- virtualenv: {{ virtualenv_base }}
# For this test we need to make sure that the virtualenv used in the
# 'issue-46127-setup' pip.installed state below was created using
# virtualenv < 13.0. virtualenvs created using later versions make
# packages with custom setuptools prefixes relative to the virtualenv
# itself, which makes the use of env_vars obsolete.
# Thus, the two states above ensure that the 'base' venv has
# a version old enough to exhibit the behavior we want to test.
setup_test_virtualenv_1:
cmd.run:
- name: {{ virtualenv_base }}/bin/virtualenv {{ virtualenv_test }}
- onchanges:
- pip: install_older_venv_1
issue-46127-setup:
pip.installed:
- name: 'carbon < 1.3'
- no_deps: True
- env_vars:
PYTHONPATH: "/opt/graphite/lib/:/opt/graphite/webapp/"
- bin_env: {{ virtualenv_test }}
- require:
- cmd: setup_test_virtualenv_1

View File

@ -0,0 +1,66 @@
# -*- coding: utf-8 -*-
'''
Tests for the salt-run command
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import salt.utils.stringutils
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
try:
import libnacl # pylint: disable=unused-import
HAS_LIBNACL = True
except ImportError:
HAS_LIBNACL = False
@skipIf(not HAS_LIBNACL, 'skipping test_nacl, libnacl is unavailable')
class NaclTest(ModuleCase):
'''
Test the nacl runner
'''
def test_keygen(self):
'''
Test keygen
'''
# Store the data
ret = self.run_function(
'nacl.keygen',
)
self.assertIn('pk', ret)
self.assertIn('sk', ret)
def test_enc_dec(self):
'''
Generate keys, encrypt, then decrypt.
'''
# Store the data
ret = self.run_function(
'nacl.keygen',
)
self.assertIn('pk', ret)
self.assertIn('sk', ret)
pk = ret['pk']
sk = ret['sk']
unencrypted_data = salt.utils.stringutils.to_bytes('hello')
# Encrypt with pk
ret = self.run_function(
'nacl.enc',
data=unencrypted_data,
pk=pk,
)
encrypted_data = ret
# Decrypt with sk
ret = self.run_function(
'nacl.dec',
data=encrypted_data,
sk=sk,
)
self.assertEqual(unencrypted_data, ret)

View File

@ -0,0 +1,89 @@
# -*- coding: utf-8 -*-
'''
Tests for the salt-run command
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Testing libs
from tests.support.case import ShellCase
from tests.support.unit import skipIf
try:
import libnacl # pylint: disable=unused-import
HAS_LIBNACL = True
except ImportError:
HAS_LIBNACL = False
@skipIf(not HAS_LIBNACL, 'skipping test_nacl, libnacl is unavailable')
class NaclTest(ShellCase):
'''
Test the nacl runner
'''
def test_keygen(self):
'''
Test keygen
'''
# Store the data
ret = self.run_run_plus(
'nacl.keygen',
)
self.assertIn('pk', ret['return'])
self.assertIn('sk', ret['return'])
def test_enc(self):
'''
Test keygen
'''
# Store the data
ret = self.run_run_plus(
'nacl.keygen',
)
self.assertIn('pk', ret['return'])
self.assertIn('sk', ret['return'])
pk = ret['return']['pk']
sk = ret['return']['sk']
unencrypted_data = 'hello'
# Encrypt with pk
ret = self.run_run_plus(
'nacl.enc',
data=unencrypted_data,
pk=pk,
)
self.assertIn('return', ret)
def test_enc_dec(self):
'''
Store, list, fetch, then flush data
'''
# Store the data
ret = self.run_run_plus(
'nacl.keygen',
)
self.assertIn('pk', ret['return'])
self.assertIn('sk', ret['return'])
pk = ret['return']['pk']
sk = ret['return']['sk']
unencrypted_data = 'hello'
# Encrypt with pk
ret = self.run_run_plus(
'nacl.enc',
data=unencrypted_data,
pk=pk,
)
self.assertIn('return', ret)
encrypted_data = ret['return']
# Decrypt with sk
ret = self.run_run_plus(
'nacl.dec',
data=encrypted_data,
sk=sk,
)
self.assertIn('return', ret)
self.assertEqual(unencrypted_data, ret['return'])

View File

@ -0,0 +1,264 @@
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
import copy
import datetime
import logging
import os
import random
import dateutil.parser as dateutil_parser
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.mixins import SaltReturnAssertsMixin
# Import Salt Testing Libs
from tests.support.mock import MagicMock, patch
from tests.support.unit import skipIf
import tests.integration as integration
# Import Salt libs
import salt.utils.schedule
from salt.modules.test import ping as ping
try:
import croniter # pylint: disable=W0611
HAS_CRONITER = True
except ImportError:
HAS_CRONITER = False
log = logging.getLogger(__name__)
ROOT_DIR = os.path.join(integration.TMP, 'schedule-unit-tests')
SOCK_DIR = os.path.join(ROOT_DIR, 'test-socks')
DEFAULT_CONFIG = salt.config.minion_config(None)
DEFAULT_CONFIG['conf_dir'] = ROOT_DIR
DEFAULT_CONFIG['root_dir'] = ROOT_DIR
DEFAULT_CONFIG['sock_dir'] = SOCK_DIR
DEFAULT_CONFIG['pki_dir'] = os.path.join(ROOT_DIR, 'pki')
DEFAULT_CONFIG['cachedir'] = os.path.join(ROOT_DIR, 'cache')
class SchedulerErrorTest(ModuleCase, SaltReturnAssertsMixin):
'''
Validate the pkg module
'''
def setUp(self):
with patch('salt.utils.schedule.clean_proc_dir', MagicMock(return_value=None)):
functions = {'test.ping': ping}
self.schedule = salt.utils.schedule.Schedule(copy.deepcopy(DEFAULT_CONFIG), functions, returners={})
self.schedule.opts['loop_interval'] = 1
self.schedule.opts['grains']['whens'] = {'tea time': '11/29/2017 12:00pm'}
def tearDown(self):
self.schedule.reset()
@skipIf(not HAS_CRONITER, 'Cannot find croniter python module')
def test_eval_cron_invalid(self):
'''
verify that scheduled job runs
'''
job = {
'schedule': {
'job1': {
'function': 'test.ping',
'cron': '0 16 29 13 *'
}
}
}
# Add the job to the scheduler
self.schedule.opts.update(job)
run_time = dateutil_parser.parse('11/29/2017 4:00pm')
with patch('croniter.croniter.get_next', MagicMock(return_value=run_time)):
self.schedule.eval(now=run_time)
ret = self.schedule.job_status('job1')
self.assertEqual(ret['_error'],
'Invalid cron string. Ignoring job job1.')
def test_eval_when_invalid_date(self):
'''
verify that scheduled job does not run
and returns the right error
'''
run_time = dateutil_parser.parse('11/29/2017 4:00pm')
job = {
'schedule': {
'job1': {
'function': 'test.ping',
'when': '13/29/2017 1:00pm',
}
}
}
# Add the job to the scheduler
self.schedule.opts.update(job)
# Evaluate 1 second before the run time
self.schedule.eval(now=run_time)
ret = self.schedule.job_status('job1')
self.assertEqual(ret['_error'],
'Invalid date string. Ignoring job job1.')
def test_eval_whens_grain_not_dict(self):
'''
verify that scheduled job does not run
and returns the right error
'''
run_time = dateutil_parser.parse('11/29/2017 4:00pm')
job = {
'schedule': {
'job1': {
'function': 'test.ping',
'when': 'tea time',
}
}
}
self.schedule.opts['grains']['whens'] = ['tea time']
# Add the job to the scheduler
self.schedule.opts.update(job)
# Evaluate 1 second before the run time
self.schedule.eval(now=run_time)
ret = self.schedule.job_status('job1')
self.assertEqual(ret['_error'],
'Grain "whens" must be a dict. Ignoring job job1.')
def test_eval_once_invalid_datestring(self):
'''
verify that scheduled job does not run
and returns the right error
'''
job = {
'schedule': {
'job1': {
'function': 'test.ping',
'once': '2017-13-13T13:00:00',
}
}
}
run_time = dateutil_parser.parse('12/13/2017 1:00pm')
# Add the job to the scheduler
self.schedule.opts.update(job)
# Evaluate 1 second at the run time
self.schedule.eval(now=run_time)
ret = self.schedule.job_status('job1')
_expected = ('Date string could not be parsed: '
'2017-13-13T13:00:00, %Y-%m-%dT%H:%M:%S. '
'Ignoring job job1.')
self.assertEqual(ret['_error'], _expected)
def test_eval_skip_during_range_invalid_date(self):
'''
verify that scheduled job does not run
and returns the right error
'''
job = {
'schedule': {
'job1': {
'function': 'test.ping',
'hours': 1,
'skip_during_range': {'start': '1:00pm', 'end': '25:00pm'}
}
}
}
# Add the job to the scheduler
self.schedule.opts.update(job)
# eval at 3:00pm to prime, simulate minion start up.
run_time = dateutil_parser.parse('11/29/2017 3:00pm')
self.schedule.eval(now=run_time)
ret = self.schedule.job_status('job1')
# eval at 4:00pm to prime
run_time = dateutil_parser.parse('11/29/2017 4:00pm')
self.schedule.eval(now=run_time)
ret = self.schedule.job_status('job1')
_expected = ('Invalid date string for end in '
'skip_during_range. Ignoring '
'job job1.')
self.assertEqual(ret['_error'], _expected)
def test_eval_skip_during_range_end_before_start(self):
'''
verify that scheduled job does not run
and returns the right error
'''
job = {
'schedule': {
'job1': {
'function': 'test.ping',
'hours': 1,
'skip_during_range': {'start': '1:00pm', 'end': '12:00pm'}
}
}
}
# Add the job to the scheduler
self.schedule.opts.update(job)
# eval at 3:00pm to prime, simulate minion start up.
run_time = dateutil_parser.parse('11/29/2017 3:00pm')
self.schedule.eval(now=run_time)
ret = self.schedule.job_status('job1')
# eval at 4:00pm to prime
run_time = dateutil_parser.parse('11/29/2017 4:00pm')
self.schedule.eval(now=run_time)
ret = self.schedule.job_status('job1')
_expected = ('schedule.handle_func: Invalid '
'range, end must be larger than '
'start. Ignoring job job1.')
self.assertEqual(ret['_error'], _expected)
def test_eval_skip_during_range_not_dict(self):
'''
verify that scheduled job does not run
and returns the right error
'''
job = {
'schedule': {
'job1': {
'function': 'test.ping',
'hours': 1,
'skip_during_range': ['start', '1:00pm', 'end', '12:00pm']
}
}
}
# Add the job to the scheduler
self.schedule.opts.update(job)
# eval at 3:00pm to prime, simulate minion start up.
run_time = dateutil_parser.parse('11/29/2017 3:00pm')
self.schedule.eval(now=run_time)
ret = self.schedule.job_status('job1')
# eval at 4:00pm to prime
run_time = dateutil_parser.parse('11/29/2017 4:00pm')
self.schedule.eval(now=run_time)
ret = self.schedule.job_status('job1')
_expected = ('schedule.handle_func: Invalid, '
'range must be specified as a '
'dictionary. Ignoring job job1.')
self.assertEqual(ret['_error'], _expected)

View File

@ -274,30 +274,6 @@ class SchedulerEvalTest(ModuleCase, SaltReturnAssertsMixin):
ret = self.schedule.job_status('job1')
self.assertEqual(ret['_last_run'], run_time)
@skipIf(not HAS_CRONITER, 'Cannot find croniter python module')
def test_eval_cron_invalid(self):
'''
verify that scheduled job runs
'''
job = {
'schedule': {
'job1': {
'function': 'test.ping',
'cron': '0 16 29 13 *'
}
}
}
# Add the job to the scheduler
self.schedule.opts.update(job)
run_time = dateutil_parser.parse('11/29/2017 4:00pm')
with patch('croniter.croniter.get_next', MagicMock(return_value=run_time)):
self.schedule.eval(now=run_time)
ret = self.schedule.job_status('job1')
self.assertEqual(ret['_error'], 'Invalid cron string. Ignoring job job1.')
@skipIf(not HAS_CRONITER, 'Cannot find croniter python module')
def test_eval_cron_loop_interval(self):
'''
@ -325,56 +301,6 @@ class SchedulerEvalTest(ModuleCase, SaltReturnAssertsMixin):
ret = self.schedule.job_status('job1')
self.assertEqual(ret['_last_run'], run_time)
def test_eval_when_invalid_date(self):
'''
verify that scheduled job does not run
and returns the right error
'''
run_time = dateutil_parser.parse('11/29/2017 4:00pm')
job = {
'schedule': {
'job1': {
'function': 'test.ping',
'when': '13/29/2017 1:00pm',
}
}
}
# Add the job to the scheduler
self.schedule.opts.update(job)
# Evaluate 1 second before the run time
self.schedule.eval(now=run_time)
ret = self.schedule.job_status('job1')
self.assertEqual(ret['_error'], 'Invalid date string. Ignoring job job1.')
def test_eval_once_invalid_datestring(self):
'''
verify that scheduled job does not run
and returns the right error
'''
job = {
'schedule': {
'job1': {
'function': 'test.ping',
'once': '2017-13-13T13:00:00',
}
}
}
run_time = dateutil_parser.parse('12/13/2017 1:00pm')
# Add the job to the scheduler
self.schedule.opts.update(job)
# Evaluate 1 second at the run time
self.schedule.eval(now=run_time)
ret = self.schedule.job_status('job1')
_expected = ('Date string could not be parsed: '
'2017-13-13T13:00:00, %Y-%m-%dT%H:%M:%S. '
'Ignoring job job1.')
self.assertEqual(ret['_error'], _expected)
def test_eval_until(self):
'''
verify that scheduled job is skipped once the current
@ -515,3 +441,32 @@ class SchedulerEvalTest(ModuleCase, SaltReturnAssertsMixin):
ret = self.schedule.job_status('job1')
self.assertNotIn('_last_run', ret)
self.assertEqual(ret['_skip_reason'], 'disabled')
def test_eval_run_on_start(self):
'''
verify that scheduled job is run when minion starts
'''
job = {
'schedule': {
'job1': {
'function': 'test.ping',
'hours': '1',
'run_on_start': True
}
}
}
# Add job to schedule
self.schedule.opts.update(job)
# eval at 2:00pm, will run.
run_time = dateutil_parser.parse('11/29/2017 2:00pm')
self.schedule.eval(now=run_time)
ret = self.schedule.job_status('job1')
self.assertEqual(ret['_last_run'], run_time)
# eval at 3:00pm, will run.
run_time = dateutil_parser.parse('11/29/2017 3:00pm')
self.schedule.eval(now=run_time)
ret = self.schedule.job_status('job1')
self.assertEqual(ret['_last_run'], run_time)

Some files were not shown because too many files have changed in this diff Show More