Merge branch 'lvresize' of https://github.com/Sygnia/salt-1 into lvresize

This commit is contained in:
rkissos 2018-04-09 11:14:51 +03:00
commit ac5c174096
225 changed files with 7606 additions and 4861 deletions

4
.github/stale.yml vendored
View File

@ -1,8 +1,8 @@
# Probot Stale configuration file
# Number of days of inactivity before an issue becomes stale
# 790 is approximately 2 years and 2 months
daysUntilStale: 790
# 770 is approximately 2 years and 1 month
daysUntilStale: 770
# Number of days of inactivity before a stale issue is closed
daysUntilClose: 7

View File

@ -36,7 +36,7 @@ provisioner:
require_chef: false
remote_states:
name: git://github.com/saltstack/salt-jenkins.git
branch: oxygen
branch: 2018.3
repo: git
testingdir: /testing
salt_copy_filter:

View File

@ -29,6 +29,25 @@ load-plugins=saltpylint.pep8,
# Don't bump this values on PyLint 1.4.0 - Know bug that ignores the passed --rcfile
jobs=1
# List of blacklisted functions and suggested replacements
#
# NOTE: This pylint check will infer the full name of the function by walking
# back up from the function name to the parent, to the parent's parent, etc.,
# and this means that functions which come from platform-specific modules need
# to be referenced using name of the module from which the function was
# imported. This happens a lot in the os and os.path modules. Functions from
# os.path should be defined using posixpath.funcname and ntpath.funcname, while
# functions from os should be defined using posix.funcname and nt.funcname.
#
# When defining a blacklisted function, the format is:
#
# <func_name>=<suggested_replacement>
#
# The replacement text will be included in the alert message.
#
blacklisted-functions=posix.umask=salt.utils.files.set_umask or get_umask,
nt.umask=salt.utils.files.set_umask or get_umask
# Allow loading of arbitrary C extensions. Extensions are imported into the
# active Python interpreter and may run arbitrary code.
unsafe-load-any-extension=no

View File

@ -26,7 +26,7 @@
# The level of messages to send to the console.
# One of 'garbage', 'trace', 'debug', info', 'warning', 'error', 'critical'.
# One of 'garbage', 'trace', 'debug', 'info', 'warning', 'error', 'critical'.
#
# The following log levels are considered INSECURE and may log sensitive data:
# ['garbage', 'trace', 'debug']

View File

@ -1157,7 +1157,7 @@
#log_level: warning
# The level of messages to send to the log file.
# One of 'garbage', 'trace', 'debug', info', 'warning', 'error', 'critical'.
# One of 'garbage', 'trace', 'debug', 'info', 'warning', 'error', 'critical'.
# If using 'log_granular_levels' this must be set to the highest desired level.
#log_level_logfile: warning

View File

@ -389,6 +389,16 @@
# minion event bus. The value is expressed in bytes.
#max_event_size: 1048576
# When a minion starts up it sends a notification on the event bus with a tag
# that looks like this: `salt/minion/<minion_id>/start`. For historical reasons
# the minion also sends a similar event with an event tag like this:
# `minion_start`. This duplication can cause a lot of clutter on the event bus
# when there are many minions. Set `enable_legacy_startup_events: False` in the
# minion config to ensure only the `salt/minion/<minion_id>/start` events are
# sent. Beginning with the `Neon` Salt release this option will default to
# `False`
#enable_legacy_startup_events: True
# To detect failed master(s) and fire events on connect/disconnect, set
# master_alive_interval to the number of seconds to poll the masters for
# connection events.
@ -720,7 +730,7 @@
#key_logfile: /var/log/salt/key
# The level of messages to send to the console.
# One of 'garbage', 'trace', 'debug', info', 'warning', 'error', 'critical'.
# One of 'garbage', 'trace', 'debug', 'info', 'warning', 'error', 'critical'.
#
# The following log levels are considered INSECURE and may log sensitive data:
# ['garbage', 'trace', 'debug']

View File

@ -543,7 +543,7 @@
#key_logfile: /var/log/salt/key
# The level of messages to send to the console.
# One of 'garbage', 'trace', 'debug', info', 'warning', 'error', 'critical'.
# One of 'garbage', 'trace', 'debug', 'info', 'warning', 'error', 'critical'.
#
# The following log levels are considered INSECURE and may log sensitive data:
# ['garbage', 'trace', 'debug']

View File

@ -1100,7 +1100,7 @@ syndic_user: salt
#key_logfile: /var/log/salt/key
# The level of messages to send to the console.
# One of 'garbage', 'trace', 'debug', info', 'warning', 'error', 'critical'.
# One of 'garbage', 'trace', 'debug', 'info', 'warning', 'error', 'critical'.
#
# The following log levels are considered INSECURE and may log sensitive data:
# ['garbage', 'trace', 'debug']

Binary file not shown.

Before

Width:  |  Height:  |  Size: 438 KiB

After

Width:  |  Height:  |  Size: 240 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 739 KiB

After

Width:  |  Height:  |  Size: 109 KiB

View File

@ -341,10 +341,15 @@ rst_prolog = """\
| <a href="https://repo.saltstack.com/windows/Salt-Minion-{release}-Py3-AMD64-Setup.exe.md5"><strong>md5</strong></a></p>
.. |osxdownload| raw:: html
.. |osxdownloadpy2| raw:: html
<p>x86_64: <a href="https://repo.saltstack.com/osx/salt-{release}-x86_64.pkg"><strong>salt-{release}-x86_64.pkg</strong></a>
| <a href="https://repo.saltstack.com/osx/salt-{release}-x86_64.pkg.md5"><strong>md5</strong></a></p>
<p>x86_64: <a href="https://repo.saltstack.com/osx/salt-{release}-py2-x86_64.pkg"><strong>salt-{release}-py2-x86_64.pkg</strong></a>
| <a href="https://repo.saltstack.com/osx/salt-{release}-py2-x86_64.pkg.md5"><strong>md5</strong></a></p>
.. |osxdownloadpy3| raw:: html
<p>x86_64: <a href="https://repo.saltstack.com/osx/salt-{release}-py3-x86_64.pkg"><strong>salt-{release}-py3-x86_64.pkg</strong></a>
| <a href="https://repo.saltstack.com/osx/salt-{release}-py3-x86_64.pkg.md5"><strong>md5</strong></a></p>
""".format(release=release)

View File

@ -83,8 +83,8 @@ Glossary
to the system. State module functions should be idempotent. Some
state module functions, such as :mod:`cmd.run <salt.states.cmd.run>`
are not idempotent by default but can be made idempotent with the
proper use of requisites such as :ref:```unless`` <unless-requisite>`
and :ref:```onlyif`` <onlyif-requisite>`. For more information, *see*
proper use of requisites such as :ref:`unless <unless-requisite>`
and :ref:`onlyif <onlyif-requisite>`. For more information, *see*
`wikipedia <https://en.wikipedia.org/wiki/Idempotent>`_.
Jinja

View File

@ -1438,7 +1438,7 @@ This should still be considered a less than secure option, due to the fact
that trust is based on just the requesting minion.
Please see the :ref:`Autoaccept Minions from Grains <tutorial-autoaccept-grains>`
documentation for more infomation.
documentation for more information.
.. code-block:: yaml
@ -2212,7 +2212,7 @@ This allows the following more convenient syntax to be used:
# (this comment remains in the rendered template)
## ensure all the formula services are running
% for service in formula_services:
enable_service_{{ serivce }}:
enable_service_{{ service }}:
service.running:
name: {{ service }}
% endfor
@ -5339,8 +5339,7 @@ branch/tag.
winrepo_branch: winrepo
ext_pillar:
- git:
winrepo_remotes:
- https://mygitserver/winrepo1.git
- https://mygitserver/winrepo2.git:
- foo https://mygitserver/winrepo3.git

View File

@ -208,6 +208,28 @@ minion event bus. The value is expressed in bytes.
max_event_size: 1048576
.. conf_minion:: enable_legacy_startup_events
``enable_legacy_startup_events``
--------------------------------
.. versionadded:: Fluorine
Default: ``True``
When a minion starts up it sends a notification on the event bus with a tag
that looks like this: `salt/minion/<minion_id>/start`. For historical reasons
the minion also sends a similar event with an event tag like this:
`minion_start`. This duplication can cause a lot of clutter on the event bus
when there are many minions. Set `enable_legacy_startup_events: False` in the
minion config to ensure only the `salt/minion/<minion_id>/start` events are
sent. Beginning with the `Neon` Salt release this option will default to
`False`
.. code-block:: yaml
enable_legacy_startup_events: True
.. conf_minion:: master_failback
``master_failback``
@ -2497,7 +2519,7 @@ The grains that should be sent to the master on authentication to decide if
the minion's key should be accepted automatically.
Please see the :ref:`Autoaccept Minions from Grains <tutorial-autoaccept-grains>`
documentation for more infomation.
documentation for more information.
.. code-block:: yaml

View File

@ -334,6 +334,7 @@ execution modules
publish
puppet
purefa
purefb
pushbullet
pushover_notify
pw_group

View File

@ -0,0 +1,6 @@
===================
salt.modules.purefb
===================
.. automodule:: salt.modules.purefb
:members:

View File

@ -296,7 +296,7 @@ Set up an initial profile at ``/etc/salt/cloud.profiles``:
SecurityGroupId:
- sg-750af413
del_root_vol_on_destroy: True
del_all_vol_on_destroy: True
del_all_vols_on_destroy: True
volumes:
- { size: 10, device: /dev/sdf }
- { size: 10, device: /dev/sdg, type: io1, iops: 1000 }

View File

@ -322,6 +322,18 @@ Optional. The path to a file to be read and submitted to Azure as user data.
How this is used depends on the operating system that is being deployed. If
used, any ``userdata`` setting will be ignored.
userdata_sendkeys
-------------
Optional. Set to ``True`` in order to generate salt minion keys and provide
them as variables to the userdata script when running it through the template
renderer. The keys can be referenced as ``{{opts['priv_key']}}`` and
``{{opts['pub_key']}}``.
userdata_template
-------------
Optional. Enter the renderer, such as ``jinja``, to be used for the userdata
script template.
wait_for_ip_timeout
-------------------
Optional. Default is ``600``. When waiting for a VM to be created, Salt Cloud

View File

@ -35,7 +35,7 @@ https://github.com/saltstack/salt/tree/develop/salt/cloud/clouds/msazure.py
The get_configured_provider() Function
--------------------------------------
This function uses ``config.is_provider_configured()`` to determine wither
This function uses ``config.is_provider_configured()`` to determine whether
all required information for this driver has been configured. The last value
in the list of required settings should be followed by a comma.

View File

@ -71,7 +71,7 @@ The generated grain information will appear similar to:
provider: my_ec2:ec2
profile: ec2-web
The generation of the salt-cloud grain can be surpressed by the
The generation of the salt-cloud grain can be suppressed by the
option ``enable_cloud_grains: 'False'`` in the cloud configuration file.
Cloud Configuration Syntax
@ -344,7 +344,35 @@ be set in the configuration file to enable interfacing with GoGrid:
OpenStack
---------
.. automodule:: salt.cloud.clouds.openstack
Using Salt for OpenStack uses the `shade <https://docs.openstack.org/shade/latest/>` driver managed by the
openstack-infra team.
This driver can be configured using the ``/etc/openstack/clouds.yml`` file with
`os-client-config <https://docs.openstack.org/os-client-config/latest/>`
.. code-block:: yaml
myopenstack:
driver: openstack
region_name: RegionOne
cloud: mycloud
Or by just configuring the same auth block directly in the cloud provider config.
.. code-block:: yaml
myopenstack:
driver: openstack
region_name: RegionOne
auth:
username: 'demo'
password: secret
project_name: 'demo'
auth_url: 'http://openstack/identity'
Both of these methods support using the
`vendor <https://docs.openstack.org/os-client-config/latest/user/vendor-support.html>`
options.
For more information, look at :mod:`Openstack Cloud Driver Docs <salt.cloud.clouds.openstack>`
DigitalOcean
------------

View File

@ -0,0 +1,5 @@
==============================
Getting Started with Openstack
==============================
.. automodule:: salt.cloud.clouds.openstack

View File

@ -1,188 +0,0 @@
==============================
Getting Started With Rackspace
==============================
Rackspace is a major public cloud platform which may be configured using either
the `openstack` driver.
Dependencies
============
* Libcloud >= 0.13.2
Configuration
=============
To use the `openstack` driver (recommended), set up the cloud configuration at
``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/rackspace.conf``:
.. code-block:: yaml
my-rackspace-config:
# Set the location of the salt-master
#
minion:
master: saltmaster.example.com
# Configure Rackspace using the OpenStack plugin
#
identity_url: 'https://identity.api.rackspacecloud.com/v2.0/tokens'
compute_name: cloudServersOpenStack
protocol: ipv4
# Set the compute region:
#
compute_region: DFW
# Configure Rackspace authentication credentials
#
user: myname
tenant: 123456
apikey: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
driver: openstack
.. note::
.. versionchanged:: 2015.8.0
The ``provider`` parameter in cloud provider definitions was renamed to ``driver``. This
change was made to avoid confusion with the ``provider`` parameter that is used in cloud profile
definitions. Cloud provider definitions now use ``driver`` to refer to the Salt cloud module that
provides the underlying functionality to connect to a cloud host, while cloud profiles continue
to use ``provider`` to refer to provider configurations that you define.
Compute Region
==============
Rackspace currently has six compute regions which may be used:
.. code-block:: bash
DFW -> Dallas/Forth Worth
ORD -> Chicago
SYD -> Sydney
LON -> London
IAD -> Northern Virginia
HKG -> Hong Kong
Note: Currently the LON region is only available with a UK account, and UK accounts cannot access other regions
Authentication
==============
The ``user`` is the same user as is used to log into the Rackspace Control
Panel. The ``tenant`` and ``apikey`` can be found in the API Keys area of the
Control Panel. The ``apikey`` will be labeled as API Key (and may need to be
generated), and ``tenant`` will be labeled as Cloud Account Number.
An initial profile can be configured in ``/etc/salt/cloud.profiles`` or
``/etc/salt/cloud.profiles.d/rackspace.conf``:
.. code-block:: yaml
openstack_512:
provider: my-rackspace-config
size: 512 MB Standard
image: Ubuntu 12.04 LTS (Precise Pangolin)
To instantiate a machine based on this profile:
.. code-block:: bash
# salt-cloud -p openstack_512 myinstance
This will create a virtual machine at Rackspace with the name ``myinstance``.
This operation may take several minutes to complete, depending on the current
load at the Rackspace data center.
Once the instance has been created with salt-minion installed, connectivity to
it can be verified with Salt:
.. code-block:: bash
# salt myinstance test.ping
RackConnect Environments
------------------------
Rackspace offers a hybrid hosting configuration option called RackConnect that
allows you to use a physical firewall appliance with your cloud servers. When
this service is in use the public_ip assigned by nova will be replaced by a NAT
ip on the firewall. For salt-cloud to work properly it must use the newly
assigned "access ip" instead of the Nova assigned public ip. You can enable that
capability by adding this to your profiles:
.. code-block:: yaml
openstack_512:
provider: my-openstack-config
size: 512 MB Standard
image: Ubuntu 12.04 LTS (Precise Pangolin)
rackconnect: True
Managed Cloud Environments
--------------------------
Rackspace offers a managed service level of hosting. As part of the managed
service level you have the ability to choose from base of lamp installations on
cloud server images. The post build process for both the base and the lamp
installations used Chef to install things such as the cloud monitoring agent and
the cloud backup agent. It also takes care of installing the lamp stack if
selected. In order to prevent the post installation process from stomping over
the bootstrapping you can add the below to your profiles.
.. code-block:: yaml
openstack_512:
provider: my-rackspace-config
size: 512 MB Standard
image: Ubuntu 12.04 LTS (Precise Pangolin)
managedcloud: True
First and Next Generation Images
--------------------------------
Rackspace provides two sets of virtual machine images, *first*, and *next*
generation. As of ``0.8.9`` salt-cloud will default to using the *next*
generation images. To force the use of first generation images, on the profile
configuration please add:
.. code-block:: yaml
FreeBSD-9.0-512:
provider: my-rackspace-config
size: 512 MB Standard
image: FreeBSD 9.0
force_first_gen: True
Private Subnets
---------------
By default salt-cloud will not add Rackspace private networks to new servers. To enable
a private network to a server instantiated by salt cloud, add the following section
to the provider file (typically ``/etc/salt/cloud.providers.d/rackspace.conf``)
.. code-block:: yaml
networks:
- fixed:
# This is the private network
- private-network-id
# This is Rackspace's "PublicNet"
- 00000000-0000-0000-0000-000000000000
# This is Rackspace's "ServiceNet"
- 11111111-1111-1111-1111-111111111111
To get the Rackspace private network ID, go to Networking, Networks and hover over the private network name.
The order of the networks in the above code block does not map to the order of the
ethernet devices on newly created servers. Public IP will always be first ( eth0 )
followed by servicenet ( eth1 ) and then private networks.
Enabling the private network per above gives the option of using the private subnet for
all master-minion communication, including the bootstrap install of salt-minion. To
enable the minion to use the private subnet, update the master: line in the minion:
section of the providers file. To configure the master to only listen on the private
subnet IP, update the interface: line in the /etc/salt/master file to be the private
subnet IP of the salt master.

View File

@ -125,7 +125,7 @@ to start that machine running.
The "magic packet" must be sent by an existing salt minion which is on
the same network segment as the target machine. (Or your router
must be set up especially to route WoL packets.) Your target machine
must be set up to listen for WoL and to respond appropriatly.
must be set up to listen for WoL and to respond appropriately.
You must provide the Salt node id of the machine which will send
the WoL packet \(parameter ``wol_sender_node``\), and

View File

@ -282,9 +282,9 @@ The Salt repository follows a "Merge Forward" policy. The merge-forward
behavior means that changes submitted to older main release branches will
automatically be "merged-forward" into the newer branches.
For example, a pull request is merged into ``2016.11``. Then, the entire
``2016.11`` branch is merged-forward into the ``2017.7`` branch, and the
``2017.7`` branch is merged-forward into the ``develop`` branch.
For example, a pull request is merged into ``2017.7``. Then, the entire
``2017.7`` branch is merged-forward into the ``2018.3`` branch, and the
``2018.3`` branch is merged-forward into the ``develop`` branch.
This process makes is easy for contributors to make only one pull-request
against an older branch, but allows the change to propagate to all **main**

View File

@ -8,7 +8,8 @@ Installation from the Official SaltStack Repository
===================================================
**Latest stable build from the selected branch**:
|osxdownload|
|osxdownloadpy2|
|osxdownloadpy3|
The output of ``md5 <salt pkg>`` should match the contents of the
corresponding md5 file.

View File

@ -153,7 +153,7 @@ starts at the root of the state tree or pillar.
Errors
======
Saltstack allows to raise custom errors using the ``raise`` jinja function.
Saltstack allows raising custom errors using the ``raise`` jinja function.
.. code-block:: jinja
@ -1122,7 +1122,7 @@ Returns:
'body': '{
"userId": 1,
"id": 1,
"title": "sunt aut facere repellat provident occaecati excepturi optio reprehenderit",
"title": "sunt aut facere repellat provident occaecati excepturi option reprehenderit",
"body": "quia et suscipit\\nsuscipit recusandae consequuntur expedita et cum\\nreprehenderit molestiae ut ut quas totam\\nnostrum rerum est autem sunt rem eveniet architecto"
}'
}

View File

@ -79,7 +79,8 @@ Mine Interval
The Salt Mine functions are executed when the Minion starts and at a given
interval by the scheduler. The default interval is every 60 minutes and can
be adjusted for the Minion via the ``mine_interval`` option:
be adjusted for the Minion via the ``mine_interval`` option in the minion
config:
.. code-block:: yaml

View File

@ -338,8 +338,8 @@ Given the above setup, the orchestration will be carried out as follows:
.. _orchestrate-runner-parsing-results-programatically:
Parsing Results Programatically
-------------------------------
Parsing Results Programmatically
--------------------------------
Orchestration jobs return output in a specific data structure. That data
structure is represented differently depending on the outputter used. With the

View File

@ -409,7 +409,7 @@ module. This module includes several functions, each of them with their own
use. These functions include:
- :py:func:`pillar.item <salt.modules.pillar.item>` - Retrieves the value of
one or more keys from the :ref:`in-memory pillar datj <pillar-in-memory>`.
one or more keys from the :ref:`in-memory pillar data <pillar-in-memory>`.
- :py:func:`pillar.items <salt.modules.pillar.items>` - Compiles a fresh pillar
dictionary and returns it, leaving the :ref:`in-memory pillar data
<pillar-in-memory>` untouched. If pillar keys are passed to this function

View File

@ -706,8 +706,8 @@ Salt will sync all custom types (by running a :mod:`saltutil.sync_all
<running-highstate>`. However, there is a chicken-and-egg issue where, on the
initial :ref:`highstate <running-highstate>`, a minion will not yet have these
custom types synced when the top file is first compiled. This can be worked
around with a simple reactor which watches for ``minion_start`` events, which
each minion fires when it first starts up and connects to the master.
around with a simple reactor which watches for ``salt/minion/*/start`` events,
which each minion fires when it first starts up and connects to the master.
On the master, create **/srv/reactor/sync_grains.sls** with the following
contents:

View File

@ -146,7 +146,7 @@ Changes:
- **PR** `#36690`_: (*rallytime*) [2016.3] Merge forward from 2015.8 to 2016.3
- **PR** `#36680`_: (*rallytime*) [2016.3] Merge forward from 2015.8 to 2016.3
- **PR** `#36659`_: (*terminalmage*) Support dynamic env in new-style git_pillar
- **PR** `#36538`_: (*clinta*) daemon-reload on call to service.avaliable
- **PR** `#36538`_: (*clinta*) daemon-reload on call to service.available
- **PR** `#36616`_: (*cro*) Zypper fix test
- **PR** `#36621`_: (*terminalmage*) Fix shadowed builtins
- **PR** `#36636`_: (*rallytime*) Back-port `#36618`_ to 2016.3

View File

@ -503,7 +503,7 @@ setting a ``port`` option under the Master's ``discovery`` configuration:
.. note::
When using a port number other than the default, the Minion's ``discovery``
configuraton must *also* have a port specified, otherwise the Minion will
configuration must *also* have a port specified, otherwise the Minion will
still attempt to contact the Master on port ``4520``.
**Minion configuration**
@ -528,7 +528,7 @@ Connection to a type instead of DNS
By now each Minion was connecting to a Master by DNS or IP address. From now on it is possible
also to connect to a _type_ of a Master. For example, in a network there are three different
Masters, each corresponds for a particular niche or environment or specific role etc. The Minion
is supposed to connect only to one of those Masters that is described approriately.
is supposed to connect only to one of those Masters that is described appropriately.
To achieve such an effect, each `/etc/salt/master` configuration should have a `discovery` option,
which should have a `mapping` element with arbitrary key/value pairs. The same configuration should
@ -665,6 +665,37 @@ The Windows installer will now display command-line help when a help switch
Salt Cloud Features
-------------------
OpenStack Revamp
================
The OpenStack Driver has been rewritten mostly from scratch. Salt is now using
the `shade driver <https://docs.openstack.org/shade/latest/>`.
With this, the ``nova`` driver is being deprecated.
:mod:`openstack driver <salt.cloud.clouds.openstack>`
There have also been several new modules and states added for managing OpenStack
setups using shade as well.
:mod:`keystone <salt.modules.keystoneng>`
:mod:`keystone role grant <salt.states.keystone_role_grant>`
:mod:`keystone group <salt.states.keystone_group>`
:mod:`keystone role <salt.states.keystone_role>`
:mod:`keystone service <salt.states.keystone_service>`
:mod:`keystone user <salt.states.keystone_user>`
:mod:`keystone domain <salt.states.keystone_domain>`
:mod:`keystone project <salt.states.keystone_project>`
:mod:`keystone endpoint <salt.states.keystone_endpoint>`
:mod:`glance <salt.modules.glanceng>`
:mod:`glance_image <salt.states.glance_image>`
:mod:`neutron <salt.modules.neutronng>`
:mod:`neutron subnet <salt.states.neutron_subnet>`
:mod:`neutron secgroup <salt.states.neutron_secgroup>`
:mod:`neutron secgroup rule <salt.states.neutron_secgroup_rule>`
:mod:`neutron network <salt.states.neutron_network>`
Pre-Flight Commands
===================
@ -701,7 +732,7 @@ The generated grain information will appear similar to:
provider: my_ec2:ec2
profile: ec2-web
The generation of salt-cloud grains can be surpressed by the
The generation of salt-cloud grains can be suppressed by the
option ``enable_cloud_grains: 'False'`` in the cloud configuration file.
Upgraded Saltify Driver
@ -766,7 +797,7 @@ Terms usable in yaml files Description
========================== ===========
classes A list of classes that will be processed in order
states A list of states that will be returned by master_tops function
pillars A yaml dictionnary that will be returned by the ext_pillar function
pillars A yaml dictionary that will be returned by the ext_pillar function
environment Node saltenv that will be used by master_tops
========================== ===========
@ -1557,6 +1588,14 @@ PyCrypto is used as it was in the previous releases. M2Crypto is used in the
same way as PyCrypto so there would be no compatibility issues, different nodes
could use different backends.
NaCL Module and Runner changes
------------------------------
In addition to argument changes in both the NaCL module and runner for future
deprecation in the Fluorine release, the default box_type has changed from
`secretbox` to `sealedbox`. SecretBox is data encrypted using private key
`sk` and Sealedbox is encrypted using public key `pk`
Deprecations
------------
@ -1617,6 +1656,15 @@ The ``win_service`` module had the following changes:
- The ``type`` option was removed from the ``create`` function. Please use
``service_type`` instead.
The ``nacl`` module had the following changes:
- The ``key_file`` option was replaced in the ``keygen``, ``enc`` and ``dec``
functions. Please use the ``sk_file`` option instead.
- The ``key`` option was replaced in the ``keygen``, ``enc`` and ``dec``
functions. Please use the ``sk`` option instead.
Runner Deprecations
===================
@ -1625,6 +1673,14 @@ The ``manage`` runner had the following changes:
- The ``root_user`` kwarg was removed from the ``bootstrap`` function. Please
use ``salt-ssh`` roster entries for the host instead.
The ``nacl`` runner had the following changes:
- The ``key_file`` option was replaced in the ``keygen``, ``enc`` and ``dec``
functions. Please use the ``sk_file`` option instead.
- The ``key`` option was replaced in the ``keygen``, ``enc`` and ``dec``
functions. Please use the ``sk`` option instead.
State Deprecations
==================

View File

@ -3,3 +3,129 @@
======================================
Salt Release Notes - Codename Fluorine
======================================
Minion Startup Events
---------------------
When a minion starts up it sends a notification on the event bus with a tag
that looks like this: `salt/minion/<minion_id>/start`. For historical reasons
the minion also sends a similar event with an event tag like this:
`minion_start`. This duplication can cause a lot of clutter on the event bus
when there are many minions. Set `enable_legacy_startup_events: False` in the
minion config to ensure only the `salt/minion/<minion_id>/start` events are
sent.
The new :conf_minion:`enable_legacy_startup_events` minion config option
defaults to ``True``, but will be set to default to ``False`` beginning with
the Neon release of Salt.
The Salt Syndic currently sends an old style `syndic_start` event as well. The
syndic respects :conf_minion:`enable_legacy_startup_events` as well.
Deprecations
------------
Module Deprecations
===================
The ``trafficserver`` module had the following changes:
- Support for the ``match_var`` function was removed. Please use the
``match_metric`` function instead.
- Support for the ``read_var`` function was removed. Please use the
``read_config`` function instead.
- Support for the ``set_var`` function was removed. Please use the
``set_config`` function instead.
The ``win_update`` module has been removed. It has been replaced by ``win_wua``
module.
The ``win_wua`` module had the following changes:
- Support for the ``download_update`` function has been removed. Please use the
``download`` function instead.
- Support for the ``download_updates`` function has been removed. Please use the
``download`` function instead.
- Support for the ``install_update`` function has been removed. Please use the
``install`` function instead.
- Support for the ``install_updates`` function has been removed. Please use the
``install`` function instead.
- Support for the ``list_update`` function has been removed. Please use the
``get`` function instead.
- Support for the ``list_updates`` function has been removed. Please use the
``list`` function instead.
Pillar Deprecations
===================
The ``vault`` pillar had the following changes:
- Support for the ``profile`` argument was removed. Any options passed up until
and following the first ``path=`` are discarded.
Roster Deprecations
===================
The ``cache`` roster had the following changes:
- Support for ``roster_order`` as a list or tuple has been removed. As of the
``Fluorine`` release, ``roster_order`` must be a dictionary.
- The ``roster_order`` option now includes IPv6 in addition to IPv4 for the
``private``, ``public``, ``global`` or ``local`` settings. The syntax for these
settings has changed to ``ipv4-*`` or ``ipv6-*``, respectively.
State Deprecations
==================
The ``docker`` state has been removed. The following functions should be used
instead.
- The ``docker.running`` function was removed. Please update applicable SLS files
to use the ``docker_container.running`` function instead.
- The ``docker.stopped`` function was removed. Please update applicable SLS files
to use the ``docker_container.stopped`` function instead.
- The ``docker.absent`` function was removed. Please update applicable SLS files
to use the ``docker_container.absent`` function instead.
- The ``docker.absent`` function was removed. Please update applicable SLS files
to use the ``docker_container.absent`` function instead.
- The ``docker.network_present`` function was removed. Please update applicable
SLS files to use the ``docker_network.present`` function instead.
- The ``docker.network_absent`` function was removed. Please update applicable
SLS files to use the ``docker_network.absent`` function instead.
- The ``docker.image_present`` function was removed. Please update applicable SLS
files to use the ``docker_image.present`` function instead.
- The ``docker.image_absent`` function was removed. Please update applicable SLS
files to use the ``docker_image.absent`` function instead.
- The ``docker.volume_present`` function was removed. Please update applicable SLS
files to use the ``docker_volume.present`` function instead.
- The ``docker.volume_absent`` function was removed. Please update applicable SLS
files to use the ``docker_volume.absent`` function instead.
The ``docker_network`` state had the following changes:
- Support for the ``driver`` option has been removed from the ``absent`` function.
This option had no functionality in ``docker_network.absent``.
The ``git`` state had the following changes:
- Support for the ``ref`` option in the ``detached`` state has been removed.
Please use the ``rev`` option instead.
The ``k8s`` state has been removed. The following functions should be used
instead:
- The ``k8s.label_absent`` function was removed. Please update applicable SLS
files to use the ``kubernetes.node_label_absent`` function instead.
- The ``k8s.label_present`` function was removed. Please updated applicable SLS
files to use the ``kubernetes.node_label_present`` function instead.
- The ``k8s.label_folder_absent`` function was removed. Please update applicable
SLS files to use the ``kubernetes.node_label_folder_absent`` function instead.
The ``trafficserver`` state had the following changes:
- Support for the ``set_var`` function was removed. Please use the ``config``
function instead.
The ``win_update`` state has been removed. Please use the ``win_wua`` state instead.

View File

@ -79,22 +79,12 @@ from the ``kevinopenstack`` profile above, you would use:
salt-call sdb.get sdb://kevinopenstack/password
Some drivers use slightly more complex URIs. For instance, the ``vault`` driver
requires the full path to where the key is stored, followed by a question mark,
followed by the key to be retrieved. If you were using a profile called
``myvault``, you would use a URI that looks like:
.. code-block:: bash
salt-call sdb.get 'sdb://myvault/secret/salt?saltstack'
Setting a value uses the same URI as would be used to retrieve it, followed
by the value as another argument. For the above ``myvault`` URI, you would set
a new value using a command like:
by the value as another argument.
.. code-block:: bash
salt-call sdb.set 'sdb://myvault/secret/salt?saltstack' 'super awesome'
salt-call sdb.set 'sdb://myvault/secret/salt/saltstack' 'super awesome'
Deleting values (if supported by the driver) is done pretty much the same way as
getting them. Provided that you have a profile called ``mykvstore`` that uses
@ -109,8 +99,8 @@ the runner system:
.. code-block:: bash
salt-run sdb.get 'sdb://myvault/secret/salt?saltstack'
salt-run sdb.set 'sdb://myvault/secret/salt?saltstack' 'super awesome'
salt-run sdb.get 'sdb://myvault/secret/salt/saltstack'
salt-run sdb.set 'sdb://myvault/secret/salt/saltstack' 'super awesome'
salt-run sdb.delete 'sdb://mykvstore/foobar'

View File

@ -10,7 +10,7 @@ Slots
future releases
Many times it is useful to store the results of a command during the course of
an execution. Salt Slots are designed to allow to store this information and
an execution. Salt Slots are designed to allow you to store this information and
use it later during the :ref:`highstate <running-highstate>` or other job
execution.

View File

@ -327,14 +327,14 @@ Here's a summary of the command line options:
-U If set, fully upgrade the system prior to bootstrapping Salt
-I If set, allow insecure connections while downloading any files. For
example, pass '--no-check-certificate' to 'wget' or '--insecure' to
'curl'. On Debian and Ubuntu, using this option with -U allows to obtain
'curl'. On Debian and Ubuntu, using this option with -U allows one to obtain
GnuPG archive keys insecurely if distro has changed release signatures.
-F Allow copied files to overwrite existing (config, init.d, etc)
-K If set, keep the temporary files in the temporary directories specified
with -c and -k
-C Only run the configuration function. Implies -F (forced overwrite).
To overwrite Master or Syndic configs, -M or -S, respectively, must
also be specified. Salt installation will be ommitted, but some of the
also be specified. Salt installation will be omitted, but some of the
dependencies could be installed to write configuration with -j or -J.
-A Pass the salt-master DNS name or IP. This will be stored under
${BS_SALT_ETC_DIR}/minion.d/99-master-address.conf

View File

@ -70,7 +70,7 @@ Mon Oct 12 08:48:25 UTC 2015 - dmacvicar@suse.de
-------------------------------------------------------------------
Mon Oct 12 08:19:45 UTC 2015 - dmacvicar@suse.de
- allow to disable docs in preparation for building
- allow one to disable docs in preparation for building
on other platforms without all dependencies.
-------------------------------------------------------------------

View File

@ -1,5 +1,5 @@
Jinja2
msgpack-python>0.3
msgpack-python>0.3,!=0.5.5
PyYAML
MarkupSafe
requests>=1.0.0

View File

@ -361,7 +361,7 @@ def groups(username, **kwargs):
[salt.utils.stringutils.to_str(_config('accountattributename')), str('cn')]) # future lint: disable=blacklisted-function
for entry, result in search_results:
for user in result[_config('accountattributename'), _config('groupattribute')]:
for user in result[_config('accountattributename')]:
if username == salt.utils.stringutils.to_unicode(user).split(',')[0].split('=')[-1]:
group_list.append(entry.split(',')[0].split('=')[-1])

View File

@ -9,11 +9,11 @@
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import logging
# Import Salt libs
import salt.client.netapi
import salt.utils.files
import salt.utils.parsers as parsers
from salt.utils.verify import check_user, verify_files, verify_log
@ -42,9 +42,8 @@ class SaltAPI(parsers.SaltAPIParser):
'udp://',
'file://')):
# Logfile is not using Syslog, verify
current_umask = os.umask(0o027)
with salt.utils.files.set_umask(0o027):
verify_files([logfile], self.config['user'])
os.umask(current_umask)
except OSError as err:
log.exception('Failed to prepare salt environment')
self.shutdown(err.errno)

View File

@ -659,6 +659,8 @@ class SSH(object):
self.cache_job(jid, host, ret[host], fun)
if self.event:
id_, data = next(six.iteritems(ret))
if isinstance(data, six.text_type):
data = {'return': data}
if 'id' not in data:
data['id'] = id_
data['jid'] = jid # make the jid in the payload the same as the jid in the tag
@ -772,6 +774,8 @@ class SSH(object):
self.opts)
if self.event:
id_, data = next(six.iteritems(ret))
if isinstance(data, six.text_type):
data = {'return': data}
if 'id' not in data:
data['id'] = id_
data['jid'] = jid # make the jid in the payload the same as the jid in the tag
@ -1027,6 +1031,7 @@ class Single(object):
opts_pkg['pillar_roots'] = self.opts['pillar_roots']
opts_pkg['ext_pillar'] = self.opts['ext_pillar']
opts_pkg['extension_modules'] = self.opts['extension_modules']
opts_pkg['module_dirs'] = self.opts['module_dirs']
opts_pkg['_ssh_version'] = self.opts['_ssh_version']
opts_pkg['__master_opts__'] = self.context['master_opts']
if '_caller_cachedir' in self.opts:

View File

@ -106,9 +106,11 @@ def need_deployment():
'''
if os.path.exists(OPTIONS.saltdir):
shutil.rmtree(OPTIONS.saltdir)
old_umask = os.umask(0o077)
old_umask = os.umask(0o077) # pylint: disable=blacklisted-function
try:
os.makedirs(OPTIONS.saltdir)
os.umask(old_umask)
finally:
os.umask(old_umask) # pylint: disable=blacklisted-function
# Verify perms on saltdir
if not is_windows():
euid = os.geteuid()
@ -158,10 +160,10 @@ def unpack_thin(thin_path):
Unpack the Salt thin archive.
'''
tfile = tarfile.TarFile.gzopen(thin_path)
old_umask = os.umask(0o077)
old_umask = os.umask(0o077) # pylint: disable=blacklisted-function
tfile.extractall(path=OPTIONS.saltdir)
tfile.close()
os.umask(old_umask)
os.umask(old_umask) # pylint: disable=blacklisted-function
try:
os.unlink(thin_path)
except OSError:
@ -189,10 +191,10 @@ def unpack_ext(ext_path):
'minion',
'extmods')
tfile = tarfile.TarFile.gzopen(ext_path)
old_umask = os.umask(0o077)
old_umask = os.umask(0o077) # pylint: disable=blacklisted-function
tfile.extractall(path=modcache)
tfile.close()
os.umask(old_umask)
os.umask(old_umask) # pylint: disable=blacklisted-function
os.unlink(ext_path)
ver_path = os.path.join(modcache, 'ext_version')
ver_dst = os.path.join(OPTIONS.saltdir, 'ext_version')
@ -299,7 +301,7 @@ def main(argv): # pylint: disable=W0613
sys.stderr.write(OPTIONS.delimiter + '\n')
sys.stderr.flush()
if OPTIONS.cmd_umask is not None:
old_umask = os.umask(OPTIONS.cmd_umask)
old_umask = os.umask(OPTIONS.cmd_umask) # pylint: disable=blacklisted-function
if OPTIONS.tty:
# Returns bytes instead of string on python 3
stdout, _ = subprocess.Popen(salt_argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
@ -313,7 +315,7 @@ def main(argv): # pylint: disable=W0613
else:
subprocess.call(salt_argv)
if OPTIONS.cmd_umask is not None:
os.umask(old_umask)
os.umask(old_umask) # pylint: disable=blacklisted-function
if __name__ == '__main__':
sys.exit(main(sys.argv))

View File

@ -491,7 +491,7 @@ def request(mods=None,
'kwargs': kwargs
}
})
cumask = os.umask(0o77)
with salt.utils.files.set_umask(0o077):
try:
if salt.utils.platform.is_windows():
# Make sure cache file isn't read-only
@ -499,9 +499,10 @@ def request(mods=None,
with salt.utils.files.fopen(notify_path, 'w+b') as fp_:
serial.dump(req, fp_)
except (IOError, OSError):
msg = 'Unable to write state request file {0}. Check permission.'
log.error(msg.format(notify_path))
os.umask(cumask)
log.error(
'Unable to write state request file %s. Check permission.',
notify_path
)
return ret
@ -557,7 +558,7 @@ def clear_request(name=None):
req.pop(name)
else:
return False
cumask = os.umask(0o77)
with salt.utils.files.set_umask(0o077):
try:
if salt.utils.platform.is_windows():
# Make sure cache file isn't read-only
@ -565,9 +566,10 @@ def clear_request(name=None):
with salt.utils.files.fopen(notify_path, 'w+b') as fp_:
serial.dump(req, fp_)
except (IOError, OSError):
msg = 'Unable to write state request file {0}. Check permission.'
log.error(msg.format(notify_path))
os.umask(cumask)
log.error(
'Unable to write state request file %s. Check permission.',
notify_path
)
return True

View File

@ -103,8 +103,8 @@ import time
import salt.cache
import salt.config as config
import salt.loader
import salt.utils
import salt.utils.cloud
import salt.utils.files
import salt.utils.yaml
import salt.ext.six as six
import salt.version
@ -1003,7 +1003,7 @@ def request_instance(vm_):
)
if ssh_publickeyfile is not None:
try:
with salt.utils.fopen(ssh_publickeyfile, 'r') as spkc_:
with salt.utils.files.fopen(ssh_publickeyfile, 'r') as spkc_:
ssh_publickeyfile_contents = spkc_.read()
except Exception as exc:
raise SaltCloudConfigError(
@ -1219,10 +1219,30 @@ def request_instance(vm_):
if userdata_file:
if os.path.exists(userdata_file):
with salt.utils.fopen(userdata_file, 'r') as fh_:
with salt.utils.files.fopen(userdata_file, 'r') as fh_:
userdata = fh_.read()
if userdata and userdata_template:
userdata_sendkeys = config.get_cloud_config_value(
'userdata_sendkeys', vm_, __opts__, search_global=False, default=None
)
if userdata_sendkeys:
vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys(
config.get_cloud_config_value(
'keysize',
vm_,
__opts__
)
)
key_id = vm_.get('name')
if 'append_domain' in vm_:
key_id = '.'.join([key_id, vm_['append_domain']])
salt.utils.cloud.accept_key(
__opts__['pki_dir'], vm_['pub_key'], key_id
)
userdata = salt.utils.cloud.userdata_template(__opts__, vm_, userdata)
custom_extension = None

View File

@ -1500,6 +1500,24 @@ def _query(action=None,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if len(result['dict']['ERRORARRAY']):
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])

View File

@ -264,6 +264,12 @@ def __virtual__():
if get_dependencies() is False:
return False
__utils__['versions.warn_until'](
'Neon',
'This driver has been deprecated and will be removed in the '
'{version} release of Salt. Please use the openstack driver instead.'
)
return __virtualname__

View File

@ -72,6 +72,7 @@ Or if you need to use a profile to setup some extra stuff, it can be passed as a
username: rackusername
api_key: myapikey
region_name: ORD
auth_type: rackspace_apikey
And this will pull in the profile for rackspace and setup all the correct
options for the auth_url and different api versions for services.
@ -101,6 +102,23 @@ The salt specific ones are:
This is the minimum setup required.
If metadata is set to make sure that the host has finished setting up the
`wait_for_metadata` can be set.
.. code-block:: yaml
centos:
provider: myopenstack
image: CentOS 7
size: ds1G
ssh_key_name: mykey
ssh_key_file: /root/.ssh/id_rsa
meta:
build_config: rack_user_only
wait_for_metadata:
rax_service_level_automation: Complete
rackconnect_automation_status: DEPLOYED
Anything else from the create_server_ docs can be passed through here.
- **image**: Image dict, name or ID to boot with. image is required
@ -678,12 +696,18 @@ def create(vm_):
data = request_instance(conn=conn, call='action', vm_=vm_)
log.debug('VM is now running')
def __query_node_ip(vm_):
def __query_node(vm_):
data = show_instance(vm_['name'], conn=conn, call='action')
if 'wait_for_metadata' in vm_:
for key, value in six.iteritems(vm_.get('wait_for_metadata', {})):
log.debug('Waiting for metadata: {0}={1}'.format(key, value))
if data['metadata'].get(key, None) != value:
log.debug('Metadata is not ready: {0}={1}'.format(key, data['metadata'].get(key, None)))
return False
return preferred_ip(vm_, data[ssh_interface(vm_)])
try:
ip_address = __utils__['cloud.wait_for_ip'](
__query_node_ip,
ip_address = __utils__['cloud.wait_for_fun'](
__query_node,
update_args=(vm_,)
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:

View File

@ -648,7 +648,7 @@ def _get_properties(path="", method="GET", forced_params=None):
# Browse all path elements but last
for elem in path_levels[:-1]:
search_path += '/' + elem
# Lookup for a dictionnary with path = "requested path" in list" and return its children
# Lookup for a dictionary with path = "requested path" in list" and return its children
sub = (item for item in sub if item["path"] == search_path).next()['children']
# Get leaf element in path
search_path += '/' + path_levels[-1]

View File

@ -2684,14 +2684,15 @@ def create(vm_):
non_hostname_chars = compile(r'[^\w-]')
if search(non_hostname_chars, vm_name):
hostName = split(non_hostname_chars, vm_name, maxsplit=1)[0]
domainName = split(non_hostname_chars, vm_name, maxsplit=1)[-1]
else:
hostName = vm_name
domainName = hostName.split('.', 1)[-1]
domainName = domain
if 'Windows' not in object_ref.config.guestFullName:
identity = vim.vm.customization.LinuxPrep()
identity.hostName = vim.vm.customization.FixedName(name=hostName)
identity.domain = domainName if hostName != domainName else domain
identity.domain = domainName
else:
identity = vim.vm.customization.Sysprep()
identity.guiUnattended = vim.vm.customization.GuiUnattended()

View File

@ -345,7 +345,7 @@ __usage() {
with -c and -k
-C Only run the configuration function. Implies -F (forced overwrite).
To overwrite Master or Syndic configs, -M or -S, respectively, must
also be specified. Salt installation will be ommitted, but some of the
also be specified. Salt installation will be omitted, but some of the
dependencies could be installed to write configuration with -j or -J.
-A Pass the salt-master DNS name or IP. This will be stored under
\${BS_SALT_ETC_DIR}/minion.d/99-master-address.conf

View File

@ -252,7 +252,7 @@ VALID_OPTS = {
# Force the minion into a single environment when it fetches files from the master
'saltenv': (type(None), six.string_types),
# Prevent saltenv from being overriden on the command line
# Prevent saltenv from being overridden on the command line
'lock_saltenv': bool,
# Force the minion into a single pillar root when it fetches pillar data from the master
@ -433,6 +433,9 @@ VALID_OPTS = {
# If an event is above this size, it will be trimmed before putting it on the event bus
'max_event_size': int,
# Enable old style events to be sent on minion_startup. Change default to False in Neon release
'enable_legacy_startup_events': bool,
# Always execute states with test=True if this flag is set
'test': bool,
@ -650,10 +653,11 @@ VALID_OPTS = {
's3fs_update_interval': int,
'svnfs_update_interval': int,
'git_pillar_base': six.string_types,
'git_pillar_branch': six.string_types,
'git_pillar_env': six.string_types,
'git_pillar_root': six.string_types,
# NOTE: git_pillar_base, git_pillar_branch, git_pillar_env, and
# git_pillar_root omitted here because their values could conceivably be
# loaded as non-string types, which is OK because git_pillar will normalize
# them to strings. But rather than include all the possible types they
# could be, we'll just skip type-checking.
'git_pillar_ssl_verify': bool,
'git_pillar_global_lock': bool,
'git_pillar_user': six.string_types,
@ -665,12 +669,11 @@ VALID_OPTS = {
'git_pillar_refspecs': list,
'git_pillar_includes': bool,
'git_pillar_verify_config': bool,
# NOTE: gitfs_base, gitfs_mountpoint, and gitfs_root omitted here because
# their values could conceivably be loaded as non-string types, which is OK
# because gitfs will normalize them to strings. But rather than include all
# the possible types they could be, we'll just skip type-checking.
'gitfs_remotes': list,
'gitfs_mountpoint': six.string_types,
'gitfs_root': six.string_types,
'gitfs_base': six.string_types,
'gitfs_user': six.string_types,
'gitfs_password': six.string_types,
'gitfs_insecure_auth': bool,
'gitfs_privkey': six.string_types,
'gitfs_pubkey': six.string_types,
@ -885,11 +888,14 @@ VALID_OPTS = {
'winrepo_dir': six.string_types,
'winrepo_dir_ng': six.string_types,
'winrepo_cachefile': six.string_types,
# NOTE: winrepo_branch omitted here because its value could conceivably be
# loaded as a non-string type, which is OK because winrepo will normalize
# them to strings. But rather than include all the possible types it could
# be, we'll just skip type-checking.
'winrepo_cache_expire_max': int,
'winrepo_cache_expire_min': int,
'winrepo_remotes': list,
'winrepo_remotes_ng': list,
'winrepo_branch': six.string_types,
'winrepo_ssl_verify': bool,
'winrepo_user': six.string_types,
'winrepo_password': six.string_types,
@ -1360,6 +1366,7 @@ DEFAULT_MINION_OPTS = {
'log_rotate_max_bytes': 0,
'log_rotate_backup_count': 0,
'max_event_size': 1048576,
'enable_legacy_startup_events': True,
'test': False,
'ext_job_cache': '',
'cython_enable': False,
@ -1635,6 +1642,7 @@ DEFAULT_MASTER_OPTS = {
'eauth_acl_module': '',
'eauth_tokens': 'localfs',
'extension_modules': os.path.join(salt.syspaths.CACHE_DIR, 'master', 'extmods'),
'module_dirs': [],
'file_recv': False,
'file_recv_max_size': 100,
'file_buffer_size': 1048576,
@ -2132,10 +2140,6 @@ def _read_conf_file(path):
conf_opts['id'] = six.text_type(conf_opts['id'])
else:
conf_opts['id'] = sdecode(conf_opts['id'])
for key, value in six.iteritems(conf_opts.copy()):
if isinstance(value, six.text_type) and six.PY2:
# We do not want unicode settings
conf_opts[key] = value.encode('utf-8')
return conf_opts
@ -2228,7 +2232,6 @@ def include_config(include, orig_path, verbose, exit_on_config_errors=False):
main config file.
'''
# Protect against empty option
if not include:
return {}
@ -2321,7 +2324,7 @@ def prepend_root_dir(opts, path_options):
# drive is not prefixed on a config option
pass
elif os.path.isabs(path):
# Absolute path (not default or overriden root_dir)
# Absolute path (not default or overridden root_dir)
# No prepending required
continue
# Prepending the root dir
@ -3587,7 +3590,7 @@ def get_id(opts, cache_minion_id=False):
if opts.get('minion_id_caching', True):
try:
with salt.utils.files.fopen(id_cache) as idf:
name = idf.readline().strip()
name = salt.utils.stringutils.to_unicode(idf.readline().strip())
bname = salt.utils.stringutils.to_bytes(name)
if bname.startswith(codecs.BOM): # Remove BOM if exists
name = salt.utils.stringutils.to_str(bname.replace(codecs.BOM, '', 1))
@ -3709,7 +3712,9 @@ def apply_minion_config(overrides=None,
)
opts['fileserver_backend'][idx] = new_val
opts['__cli'] = os.path.basename(sys.argv[0])
opts['__cli'] = salt.utils.stringutils.to_unicode(
os.path.basename(sys.argv[0])
)
# No ID provided. Will getfqdn save us?
using_ip_for_id = False
@ -3843,10 +3848,10 @@ def master_config(path, env_var='SALT_MASTER_CONFIG', defaults=None, exit_on_con
defaults['default_include'])
include = overrides.get('include', [])
overrides.update(include_config(default_include, path, verbose=False),
exit_on_config_errors=exit_on_config_errors)
overrides.update(include_config(include, path, verbose=True),
exit_on_config_errors=exit_on_config_errors)
overrides.update(include_config(default_include, path, verbose=False,
exit_on_config_errors=exit_on_config_errors))
overrides.update(include_config(include, path, verbose=True,
exit_on_config_errors=exit_on_config_errors))
opts = apply_master_config(overrides, defaults)
_validate_ssh_minion_opts(opts)
_validate_opts(opts)
@ -3895,6 +3900,10 @@ def apply_master_config(overrides=None, defaults=None):
)
opts['saltenv'] = opts['environment']
if six.PY2 and 'rest_cherrypy' in opts:
# CherryPy is not unicode-compatible
opts['rest_cherrypy'] = salt.utils.data.encode(opts['rest_cherrypy'])
for idx, val in enumerate(opts['fileserver_backend']):
if val in ('git', 'hg', 'svn', 'minion'):
new_val = val + 'fs'

View File

@ -84,8 +84,7 @@ def dropfile(cachedir, user=None):
'''
dfn = os.path.join(cachedir, '.dfn')
# set a mask (to avoid a race condition on file creation) and store original.
mask = os.umask(191)
try:
with salt.utils.files.set_umask(0o277):
log.info('Rotating AES key')
if os.path.isfile(dfn):
log.info('AES key rotation already requested')
@ -103,8 +102,6 @@ def dropfile(cachedir, user=None):
os.chown(dfn, uid, -1)
except (KeyError, ImportError, OSError, IOError):
pass
finally:
os.umask(mask) # restore original umask
def gen_keys(keydir, keyname, keysize, user=None, passphrase=None):
@ -138,17 +135,19 @@ def gen_keys(keydir, keyname, keysize, user=None, passphrase=None):
if not os.access(keydir, os.W_OK):
raise IOError('Write access denied to "{0}" for user "{1}".'.format(os.path.abspath(keydir), getpass.getuser()))
cumask = os.umask(0o277)
with salt.utils.files.set_umask(0o277):
if HAS_M2:
# if passphrase is empty or None use no cipher
if not passphrase:
gen.save_pem(priv, cipher=None)
else:
gen.save_pem(priv, cipher='des_ede3_cbc', callback=lambda x: six.b(passphrase))
gen.save_pem(
priv,
cipher='des_ede3_cbc',
callback=lambda x: salt.utils.stringutils.to_bytes(passphrase))
else:
with salt.utils.files.fopen(priv, 'wb+') as f:
f.write(gen.exportKey('PEM', passphrase))
os.umask(cumask)
if HAS_M2:
gen.save_pub_key(pub)
else:

View File

@ -76,7 +76,8 @@ def init_git_pillar(opts):
opts,
opts_dict['git'],
per_remote_overrides=git_pillar.PER_REMOTE_OVERRIDES,
per_remote_only=git_pillar.PER_REMOTE_ONLY)
per_remote_only=git_pillar.PER_REMOTE_ONLY,
global_only=git_pillar.GLOBAL_ONLY)
ret.append(pillar)
except salt.exceptions.FileserverConfigError:
if opts.get('git_pillar_verify_config', True):
@ -201,10 +202,9 @@ def mk_key(opts, user):
os.unlink(keyfile)
key = salt.crypt.Crypticle.generate_key_string()
cumask = os.umask(191)
with salt.utils.files.set_umask(0o277):
with salt.utils.files.fopen(keyfile, 'w+') as fp_:
fp_.write(salt.utils.stringutils.to_str(key))
os.umask(cumask)
# 600 octal: Read and write access to the owner only.
# Write access is necessary since on subsequent runs, if the file
# exists, it needs to be written to again. Windows enforces this.

View File

@ -69,11 +69,11 @@ class Engine(SignalHandlingMultiprocessingProcess):
'''
Execute the given engine in a new process
'''
def __init__(self, opts, fun, config, funcs, runners, proxy, log_queue=None):
def __init__(self, opts, fun, config, funcs, runners, proxy, **kwargs):
'''
Set up the process executor
'''
super(Engine, self).__init__(log_queue=log_queue)
super(Engine, self).__init__(**kwargs)
self.opts = opts
self.config = config
self.fun = fun
@ -93,17 +93,21 @@ class Engine(SignalHandlingMultiprocessingProcess):
state['funcs'],
state['runners'],
state['proxy'],
log_queue=state['log_queue']
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {'opts': self.opts,
return {
'opts': self.opts,
'fun': self.fun,
'config': self.config,
'funcs': self.funcs,
'runners': self.runners,
'proxy': self.proxy,
'log_queue': self.log_queue}
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def run(self):
'''

View File

@ -46,7 +46,7 @@ Example of usage
08:33:57 gtmanbot > gtmanfred: pong
08:34:02 @gtmanfred > !echo ping
08:34:02 gtmanbot > ping
08:34:17 @gtmanfred > !event test/tag/ircbot irc is usefull
08:34:17 @gtmanfred > !event test/tag/ircbot irc is useful
08:34:17 gtmanbot > gtmanfred: TaDa!
.. code-block:: text

View File

@ -267,7 +267,7 @@ class SlackClient(object):
def can_user_run(self, user, command, groups):
'''
Break out the permissions into the folowing:
Break out the permissions into the following:
Check whether a user is in any group, including whether a group has the '*' membership
@ -282,7 +282,7 @@ class SlackClient(object):
:rtype: tuple
:returns: On a successful permitting match, returns 2-element tuple that contains
the name of the group that successfuly matched, and a dictionary containing
the name of the group that successfully matched, and a dictionary containing
the configuration of the group so it can be referenced.
On failure it returns an empty tuple
@ -400,7 +400,7 @@ class SlackClient(object):
When encountering an error (e.g. invalid message), yields {}, the caller can proceed to the next message
When the websocket being read from has given up all its messages, yields {'done': True} to
indicate that the caller has read all of the relevent data for now, and should continue
indicate that the caller has read all of the relevant data for now, and should continue
its own processing and check back for more data later.
This relies on the caller sleeping between checks, otherwise this could flood

View File

@ -143,8 +143,7 @@ class Client(object):
saltenv,
path)
destdir = os.path.dirname(dest)
cumask = os.umask(63)
with salt.utils.files.set_umask(0o077):
# remove destdir if it is a regular file to avoid an OSError when
# running os.makedirs below
if os.path.isfile(destdir):
@ -158,7 +157,6 @@ class Client(object):
raise
yield dest
os.umask(cumask)
def get_cachedir(self, cachedir=None):
if cachedir is None:
@ -856,12 +854,10 @@ class LocalClient(Client):
fnd = {'path': '',
'rel': ''}
if saltenv not in self.opts['file_roots']:
return fnd
if salt.utils.url.is_escaped(path):
# The path arguments are escaped
path = salt.utils.url.unescape(path)
for root in self.opts['file_roots'][saltenv]:
for root in self.opts['file_roots'].get(saltenv, []):
full = os.path.join(root, path)
if os.path.isfile(full):
fnd['path'] = full
@ -894,10 +890,8 @@ class LocalClient(Client):
with optional relative prefix path to limit directory traversal
'''
ret = []
if saltenv not in self.opts['file_roots']:
return ret
prefix = prefix.strip('/')
for path in self.opts['file_roots'][saltenv]:
for path in self.opts['file_roots'].get(saltenv, []):
for root, dirs, files in salt.utils.path.os_walk(
os.path.join(path, prefix), followlinks=True
):
@ -915,9 +909,7 @@ class LocalClient(Client):
'''
ret = []
prefix = prefix.strip('/')
if saltenv not in self.opts['file_roots']:
return ret
for path in self.opts['file_roots'][saltenv]:
for path in self.opts['file_roots'].get(saltenv, []):
for root, dirs, files in salt.utils.path.os_walk(
os.path.join(path, prefix), followlinks=True
):
@ -933,10 +925,8 @@ class LocalClient(Client):
with optional relative prefix path to limit directory traversal
'''
ret = []
if saltenv not in self.opts['file_roots']:
return ret
prefix = prefix.strip('/')
for path in self.opts['file_roots'][saltenv]:
for path in self.opts['file_roots'].get(saltenv, []):
for root, dirs, files in salt.utils.path.os_walk(
os.path.join(path, prefix), followlinks=True
):
@ -1031,10 +1021,7 @@ class LocalClient(Client):
'''
Return the available environments
'''
ret = []
for saltenv in self.opts['file_roots']:
ret.append(saltenv)
return ret
return list(self.opts['file_roots'])
def master_tops(self):
'''

View File

@ -1927,9 +1927,9 @@ def fqdns():
fqdns.add(socket.gethostbyaddr(ip)[0])
except (socket.error, socket.herror,
socket.gaierror, socket.timeout) as e:
log.error("Exception during resolving address: " + str(e))
log.info("Exception during resolving address: " + str(e))
grains['fqdns'] = list(fqdns)
grains['fqdns'] = sorted(list(fqdns))
return grains
@ -2212,7 +2212,7 @@ def _hw_data(osdata):
if os.path.exists(contents_file):
try:
with salt.utils.files.fopen(contents_file, 'r') as ifile:
grains[key] = ifile.read()
grains[key] = ifile.read().strip()
if key == 'uuid':
grains['uuid'] = grains['uuid'].lower()
except (IOError, OSError) as err:

View File

@ -1044,7 +1044,7 @@ class RaetKey(Key):
'''
Use libnacl to generate and safely save a private key
'''
import libnacl.dual # pylint: disable=3rd-party-module-not-gated
import libnacl.dual # pylint: disable=import-error,3rd-party-module-not-gated
d_key = libnacl.dual.DualSecret()
keydir, keyname, _, _ = self._get_key_attrs(keydir, keyname,
keysize, user)
@ -1440,14 +1440,13 @@ class RaetKey(Key):
keydata = {'priv': priv,
'sign': sign}
path = os.path.join(self.opts['pki_dir'], 'local.key')
c_umask = os.umask(191)
with salt.utils.files.set_umask(0o277):
if os.path.exists(path):
#mode = os.stat(path).st_mode
os.chmod(path, stat.S_IWUSR | stat.S_IRUSR)
with salt.utils.files.fopen(path, 'w+b') as fp_:
with salt.utils.files.fopen(path, 'w+') as fp_:
fp_.write(self.serial.dumps(keydata))
os.chmod(path, stat.S_IRUSR)
os.umask(c_umask)
def delete_local(self):
'''

View File

@ -22,6 +22,7 @@ from zipimport import zipimporter
import salt.config
import salt.syspaths
import salt.utils.context
import salt.utils.data
import salt.utils.dictupdate
import salt.utils.event
import salt.utils.files
@ -651,7 +652,7 @@ def _load_cached_grains(opts, cfn):
try:
serial = salt.payload.Serial(opts)
with salt.utils.files.fopen(cfn, 'rb') as fp_:
cached_grains = serial.load(fp_)
cached_grains = salt.utils.data.decode(serial.load(fp_))
if not cached_grains:
log.debug('Cached grains are empty, cache might be corrupted. Refreshing.')
return None
@ -791,7 +792,7 @@ def grains(opts, force_refresh=False, proxy=None):
grains_data.update(opts['grains'])
# Write cache if enabled
if opts.get('grains_cache', False):
cumask = os.umask(0o77)
with salt.utils.files.set_umask(0o077):
try:
if salt.utils.platform.is_windows():
# Late import
@ -813,13 +814,12 @@ def grains(opts, force_refresh=False, proxy=None):
# exception is.
if os.path.isfile(cfn):
os.unlink(cfn)
os.umask(cumask)
if grains_deep_merge:
salt.utils.dictupdate.update(grains_data, opts['grains'])
else:
grains_data.update(opts['grains'])
return grains_data
return salt.utils.data.decode(grains_data)
# TODO: get rid of? Does anyone use this? You should use raw() instead

View File

@ -117,6 +117,7 @@ __EXTERNAL_LOGGERS_CONFIGURED = False
__MP_LOGGING_LISTENER_CONFIGURED = False
__MP_LOGGING_CONFIGURED = False
__MP_LOGGING_QUEUE = None
__MP_LOGGING_LEVEL = GARBAGE
__MP_LOGGING_QUEUE_PROCESS = None
__MP_LOGGING_QUEUE_HANDLER = None
__MP_IN_MAINPROCESS = multiprocessing.current_process().name == 'MainProcess'
@ -820,6 +821,37 @@ def set_multiprocessing_logging_queue(queue):
__MP_LOGGING_QUEUE = queue
def get_multiprocessing_logging_level():
return __MP_LOGGING_LEVEL
def set_multiprocessing_logging_level(log_level):
global __MP_LOGGING_LEVEL
__MP_LOGGING_LEVEL = log_level
def set_multiprocessing_logging_level_by_opts(opts):
'''
This will set the multiprocessing logging level to the lowest
logging level of all the types of logging that are configured.
'''
global __MP_LOGGING_LEVEL
log_levels = []
log_levels.append(
LOG_LEVELS.get(opts.get('log_level', '').lower(), logging.ERROR)
)
log_levels.append(
LOG_LEVELS.get(opts.get('log_level_logfile', '').lower(), logging.ERROR)
)
for level in six.itervalues(opts.get('log_granular_levels', {})):
log_levels.append(
LOG_LEVELS.get(level.lower(), logging.ERROR)
)
__MP_LOGGING_LEVEL = min(log_levels)
def setup_multiprocessing_logging_listener(opts, queue=None):
global __MP_LOGGING_QUEUE_PROCESS
global __MP_LOGGING_LISTENER_CONFIGURED
@ -883,11 +915,13 @@ def setup_multiprocessing_logging(queue=None):
# Let's add a queue handler to the logging root handlers
__MP_LOGGING_QUEUE_HANDLER = SaltLogQueueHandler(queue or get_multiprocessing_logging_queue())
logging.root.addHandler(__MP_LOGGING_QUEUE_HANDLER)
# Set the logging root level to the lowest to get all messages
logging.root.setLevel(logging.GARBAGE)
# Set the logging root level to the lowest needed level to get all
# desired messages.
log_level = get_multiprocessing_logging_level()
logging.root.setLevel(log_level)
logging.getLogger(__name__).debug(
'Multiprocessing queue logging configured for the process running '
'under PID: %s', os.getpid()
'under PID: %s at log level %s', os.getpid(), log_level
)
# The above logging call will create, in some situations, a futex wait
# lock condition, probably due to the multiprocessing Queue's internal

View File

@ -139,13 +139,13 @@ class Maintenance(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
A generalized maintenance process which performs maintenance routines.
'''
def __init__(self, opts, log_queue=None):
def __init__(self, opts, **kwargs):
'''
Create a maintenance instance
:param dict opts: The salt options
'''
super(Maintenance, self).__init__(log_queue=log_queue)
super(Maintenance, self).__init__(**kwargs)
self.opts = opts
# How often do we perform the maintenance tasks
self.loop_interval = int(self.opts['loop_interval'])
@ -159,11 +159,18 @@ class Maintenance(salt.utils.process.SignalHandlingMultiprocessingProcess):
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(state['opts'], log_queue=state['log_queue'])
self.__init__(
state['opts'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {'opts': self.opts,
'log_queue': self.log_queue}
return {
'opts': self.opts,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def _post_fork_init(self):
'''
@ -578,9 +585,8 @@ class Master(SMaster):
# Check to see if we need to create a pillar cache dir
if self.opts['pillar_cache'] and not os.path.isdir(os.path.join(self.opts['cachedir'], 'pillar_cache')):
try:
prev_umask = os.umask(0o077)
with salt.utils.files.set_umask(0o077):
os.mkdir(os.path.join(self.opts['cachedir'], 'pillar_cache'))
os.umask(prev_umask)
except OSError:
pass
@ -601,7 +607,8 @@ class Master(SMaster):
new_opts,
repo['git'],
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY)
per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY,
global_only=salt.pillar.git_pillar.GLOBAL_ONLY)
except salt.exceptions.FileserverConfigError as exc:
critical_errors.append(exc.strerror)
finally:
@ -708,6 +715,7 @@ class Master(SMaster):
kwargs = {}
if salt.utils.platform.is_windows():
kwargs['log_queue'] = salt.log.setup.get_multiprocessing_logging_queue()
kwargs['log_queue_level'] = salt.log.setup.get_multiprocessing_logging_level()
kwargs['secrets'] = SMaster.secrets
self.process_manager.add_process(
@ -757,13 +765,13 @@ class Halite(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
Manage the Halite server
'''
def __init__(self, hopts, log_queue=None):
def __init__(self, hopts, **kwargs):
'''
Create a halite instance
:param dict hopts: The halite options
'''
super(Halite, self).__init__(log_queue=log_queue)
super(Halite, self).__init__(**kwargs)
self.hopts = hopts
# __setstate__ and __getstate__ are only used on Windows.
@ -771,11 +779,18 @@ class Halite(salt.utils.process.SignalHandlingMultiprocessingProcess):
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(state['hopts'], log_queue=state['log_queue'])
self.__init__(
state['hopts'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {'hopts': self.hopts,
'log_queue': self.log_queue}
return {
'hopts': self.hopts,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def run(self):
'''
@ -790,7 +805,7 @@ class ReqServer(salt.utils.process.SignalHandlingMultiprocessingProcess):
Starts up the master request server, minions send results to this
interface.
'''
def __init__(self, opts, key, mkey, log_queue=None, secrets=None):
def __init__(self, opts, key, mkey, secrets=None, **kwargs):
'''
Create a request server
@ -801,7 +816,7 @@ class ReqServer(salt.utils.process.SignalHandlingMultiprocessingProcess):
:rtype: ReqServer
:returns: Request server
'''
super(ReqServer, self).__init__(log_queue=log_queue)
super(ReqServer, self).__init__(**kwargs)
self.opts = opts
self.master_key = mkey
# Prepare the AES key
@ -813,15 +828,24 @@ class ReqServer(salt.utils.process.SignalHandlingMultiprocessingProcess):
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(state['opts'], state['key'], state['mkey'],
log_queue=state['log_queue'], secrets=state['secrets'])
self.__init__(
state['opts'],
state['key'],
state['mkey'],
secrets=state['secrets'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {'opts': self.opts,
return {
'opts': self.opts,
'key': self.key,
'mkey': self.master_key,
'secrets': self.secrets,
'log_queue': self.log_queue,
'secrets': self.secrets}
'log_queue_level': self.log_queue_level
}
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
self.destroy(signum)
@ -833,6 +857,8 @@ class ReqServer(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
if self.log_queue is not None:
salt.log.setup.set_multiprocessing_logging_queue(self.log_queue)
if self.log_queue_level is not None:
salt.log.setup.set_multiprocessing_logging_level(self.log_queue_level)
salt.log.setup.setup_multiprocessing_logging(self.log_queue)
if self.secrets is not None:
SMaster.secrets = self.secrets
@ -863,6 +889,7 @@ class ReqServer(salt.utils.process.SignalHandlingMultiprocessingProcess):
kwargs = {}
if salt.utils.platform.is_windows():
kwargs['log_queue'] = self.log_queue
kwargs['log_queue_level'] = self.log_queue_level
# Use one worker thread if only the TCP transport is set up on
# Windows and we are using Python 2. There is load balancer
# support on Windows for the TCP transport when using Python 3.
@ -944,7 +971,10 @@ class MWorker(salt.utils.process.SignalHandlingMultiprocessingProcess):
# non-Windows platforms.
def __setstate__(self, state):
self._is_child = True
super(MWorker, self).__init__(log_queue=state['log_queue'])
super(MWorker, self).__init__(
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
self.opts = state['opts']
self.req_channels = state['req_channels']
self.mkey = state['mkey']
@ -953,13 +983,16 @@ class MWorker(salt.utils.process.SignalHandlingMultiprocessingProcess):
SMaster.secrets = state['secrets']
def __getstate__(self):
return {'opts': self.opts,
return {
'opts': self.opts,
'req_channels': self.req_channels,
'mkey': self.mkey,
'key': self.key,
'k_mtime': self.k_mtime,
'secrets': SMaster.secrets,
'log_queue': self.log_queue,
'secrets': SMaster.secrets}
'log_queue_level': self.log_queue_level
}
def _handle_signals(self, signum, sigframe):
for channel in getattr(self, 'req_channels', ()):

View File

@ -2054,6 +2054,8 @@ class Minion(MinionBase):
def _fire_master_minion_start(self):
# Send an event to the master that the minion is live
if self.opts['enable_legacy_startup_events']:
# old style event. Defaults to False in Neon Salt release
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
@ -2061,7 +2063,7 @@ class Minion(MinionBase):
),
'minion_start'
)
# dup name spaced event
# send name spaced event
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
@ -2602,7 +2604,7 @@ class Minion(MinionBase):
def ping_master():
try:
def ping_timeout_handler(*_):
if not self.opts.get('auth_safemode', True):
if self.opts.get('auth_safemode', False):
log.error('** Master Ping failed. Attempting to restart minion**')
delay = self.opts.get('random_reauth_delay', 5)
log.info('delaying random_reauth_delay %ss', delay)
@ -2756,6 +2758,8 @@ class Syndic(Minion):
def fire_master_syndic_start(self):
# Send an event to the master that the minion is live
if self.opts['enable_legacy_startup_events']:
# old style event. Defaults to false in Neon Salt release.
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],

View File

@ -1084,7 +1084,7 @@ def upgrade(refresh=True, dist_upgrade=False, **kwargs):
<value> seconds
download_only
Only donwload the packages, don't unpack or install them
Only download the packages, don't unpack or install them
.. versionadded:: 2018.3.0

View File

@ -1077,8 +1077,7 @@ def unzip(zip_file,
if not salt.utils.platform.is_windows():
perm = zfile.getinfo(target).external_attr >> 16
if perm == 0:
umask_ = os.umask(0)
os.umask(umask_)
umask_ = salt.utils.files.get_umask()
if target.endswith('/'):
perm = 0o777 & ~umask_
else:

View File

@ -829,6 +829,7 @@ def get_instances(name, lifecycle_state="InService", health_status="Healthy",
while True:
try:
asgs = conn.get_all_groups(names=[name])
break
except boto.exception.BotoServerError as e:
if retries and e.code == 'Throttling':
log.debug('Throttled by AWS API, retrying in 5 seconds...')

View File

@ -657,31 +657,25 @@ def find_images(ami_name=None, executable_by=None, owners=None, image_ids=None,
salt myminion boto_ec2.find_images tags='{"mytag": "value"}'
'''
retries = 30
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
while retries:
try:
filter_parameters = {'filters': {}}
if image_ids:
filter_parameters['image_ids'] = [image_ids]
if executable_by:
filter_parameters['executable_by'] = [executable_by]
if owners:
filter_parameters['owners'] = [owners]
if ami_name:
filter_parameters['filters']['name'] = ami_name
if tags:
for tag_name, tag_value in six.iteritems(tags):
filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value
images = conn.get_all_images(**filter_parameters)
log.debug('The filters criteria %s matched the following '
'images:%s', filter_parameters, images)
if images:
if return_objs:
return images
@ -689,7 +683,13 @@ def find_images(ami_name=None, executable_by=None, owners=None, image_ids=None,
else:
return False
except boto.exception.BotoServerError as exc:
log.error(exc)
if exc.error_code == 'Throttling':
log.debug("Throttled by AWS API, will retry in 5 seconds...")
time.sleep(5)
retries -= 1
continue
log.error('Failed to convert AMI name `%s` to an AMI ID: %s', ami_name, exc)
return False
return False

View File

@ -12,6 +12,7 @@ import functools
import glob
import logging
import os
import platform
import shutil
import subprocess
import sys
@ -3334,6 +3335,11 @@ def powershell(cmd,
python_shell = True
# Append PowerShell Object formatting
# ConvertTo-JSON is only available on Versions of Windows greater than
# `7.1.7600`. We have to use `platform.version` instead of `__grains__` here
# because this function is called by `salt/grains/core.py` before
# `__grains__` is populated
if salt.utils.versions.version_cmp(platform.version(), '7.1.7600') == 1:
cmd += ' | ConvertTo-JSON'
if depth is not None:
cmd += ' -Depth {0}'.format(depth)
@ -3353,7 +3359,7 @@ def powershell(cmd,
# caught in a try/catch block. For example, the `Get-WmiObject` command will
# often return a "Non Terminating Error". To fix this, make sure
# `-ErrorAction Stop` is set in the powershell command
cmd = 'try {' + cmd + '} catch { "{}" | ConvertTo-JSON}'
cmd = 'try {' + cmd + '} catch { "{}" }'
# Retrieve the response, while overriding shell with 'powershell'
response = run(cmd,
@ -3425,10 +3431,10 @@ def powershell_all(cmd,
empty Powershell output (which would result in an exception). Instead we
treat this as a special case and one of two things will happen:
- If the value of the ``force_list`` paramater is ``True``, then the
- If the value of the ``force_list`` parameter is ``True``, then the
``result`` field of the return dictionary will be an empty list.
- If the value of the ``force_list`` paramater is ``False``, then the
- If the value of the ``force_list`` parameter is ``False``, then the
return dictionary **will not have a result key added to it**. We aren't
setting ``result`` to ``None`` in this case, because ``None`` is the
Python representation of "null" in JSON. (We likewise can't use ``False``
@ -3441,20 +3447,20 @@ def powershell_all(cmd,
content, and the type of the resulting Python object is other than ``list``
then one of two things will happen:
- If the value of the ``force_list`` paramater is ``True``, then the
- If the value of the ``force_list`` parameter is ``True``, then the
``result`` field will be a singleton list with the Python object as its
sole member.
- If the value of the ``force_list`` paramater is ``False``, then the value
- If the value of the ``force_list`` parameter is ``False``, then the value
of ``result`` will be the unmodified Python object.
If Powershell's output is not an empty string, Python is able to parse its
content, and the type of the resulting Python object is ``list``, then the
value of ``result`` will be the unmodified Python object. The
``force_list`` paramater has no effect in this case.
``force_list`` parameter has no effect in this case.
.. note::
An example of why the ``force_list`` paramater is useful is as
An example of why the ``force_list`` parameter is useful is as
follows: The Powershell command ``dir x | Convert-ToJson`` results in
- no output when x is an empty directory.
@ -3602,7 +3608,7 @@ def powershell_all(cmd,
where characters may be dropped or incorrectly converted when executed.
Default is False.
:param bool force_list: The purpose of this paramater is described in the
:param bool force_list: The purpose of this parameter is described in the
preamble of this function's documentation. Default value is False.
:param list success_retcodes: This parameter will be allow a list of

View File

@ -73,7 +73,8 @@ def _run_composer(action,
no_dev=None,
quiet=False,
composer_home='/root',
extra_flags=None):
extra_flags=None,
env=None):
'''
Run PHP's composer with a specific action.
@ -126,6 +127,9 @@ def _run_composer(action,
extra_flags
None, or a string containing extra flags to pass to composer.
env
A list of environment variables to be set prior to execution.
'''
if composer is not None:
if php is None:
@ -185,9 +189,15 @@ def _run_composer(action,
if optimize is True:
cmd.append('--optimize-autoloader')
if env is not None:
env = salt.utils.data.repack_dictlist(env)
env['COMPOSER_HOME'] = composer_home
else:
env = {'COMPOSER_HOME': composer_home}
result = __salt__['cmd.run_all'](cmd,
runas=runas,
env={'COMPOSER_HOME': composer_home},
env=env,
python_shell=False)
if result['retcode'] != 0:
@ -210,7 +220,8 @@ def install(directory,
optimize=None,
no_dev=None,
quiet=False,
composer_home='/root'):
composer_home='/root',
env=None):
'''
Install composer dependencies for a directory.
@ -257,6 +268,9 @@ def install(directory,
composer_home
$COMPOSER_HOME environment variable
env
A list of environment variables to be set prior to execution.
CLI Example:
.. code-block:: bash
@ -278,7 +292,8 @@ def install(directory,
optimize=optimize,
no_dev=no_dev,
quiet=quiet,
composer_home=composer_home)
composer_home=composer_home,
env=env)
return result
@ -293,7 +308,8 @@ def update(directory,
optimize=None,
no_dev=None,
quiet=False,
composer_home='/root'):
composer_home='/root',
env=None):
'''
Update composer dependencies for a directory.
@ -343,6 +359,9 @@ def update(directory,
composer_home
$COMPOSER_HOME environment variable
env
A list of environment variables to be set prior to execution.
CLI Example:
.. code-block:: bash
@ -365,7 +384,8 @@ def update(directory,
optimize=optimize,
no_dev=no_dev,
quiet=quiet,
composer_home=composer_home)
composer_home=composer_home,
env=env)
return result

View File

@ -589,6 +589,15 @@ def _scrub_links(links, name):
return ret
def _ulimit_sort(ulimit_val):
if isinstance(ulimit_val, list):
return sorted(ulimit_val,
key=lambda x: (x.get('Name'),
x.get('Hard', 0),
x.get('Soft', 0)))
return ulimit_val
def _size_fmt(num):
'''
Format bytes as human-readable file sizes
@ -932,6 +941,9 @@ def compare_containers(first, second, ignore=None):
if item == 'Links':
val1 = sorted(_scrub_links(val1, first))
val2 = sorted(_scrub_links(val2, second))
if item == 'Ulimits':
val1 = _ulimit_sort(val1)
val2 = _ulimit_sort(val2)
if val1 != val2:
ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2}
# Check for optionally-present items that were in the second container
@ -955,6 +967,9 @@ def compare_containers(first, second, ignore=None):
if item == 'Links':
val1 = sorted(_scrub_links(val1, first))
val2 = sorted(_scrub_links(val2, second))
if item == 'Ulimits':
val1 = _ulimit_sort(val1)
val2 = _ulimit_sort(val2)
if val1 != val2:
ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2}
return ret

View File

@ -2433,8 +2433,7 @@ def blockreplace(path,
backup='.bak',
dry_run=False,
show_changes=True,
append_newline=False,
):
append_newline=False):
'''
.. versionadded:: 2014.1.0
@ -2481,18 +2480,30 @@ def blockreplace(path,
The file extension to use for a backup of the file if any edit is made.
Set to ``False`` to skip making a backup.
dry_run
Don't make any edits to the file.
dry_run : False
If ``True``, do not make any edits to the file and simply return the
changes that *would* be made.
show_changes
Output a unified diff of the old file and the new file. If ``False``,
return a boolean if any changes were made.
show_changes : True
Controls how changes are presented. If ``True``, this function will
return a unified diff of the changes made. If False, then it will
return a boolean (``True`` if any changes were made, otherwise
``False``).
append_newline:
Append a newline to the content block. For more information see:
https://github.com/saltstack/salt/issues/33686
append_newline : False
Controls whether or not a newline is appended to the content block. If
the value of this argument is ``True`` then a newline will be added to
the content block. If it is ``False``, then a newline will *not* be
added to the content block. If it is ``None`` then a newline will only
be added to the content block if it does not already end in a newline.
.. versionadded:: 2016.3.4
.. versionchanged:: 2017.7.5,2018.3.1
New behavior added when value is ``None``.
.. versionchanged:: Fluorine
The default value of this argument will change to ``None`` to match
the behavior of the :py:func:`file.blockreplace state
<salt.states.file.blockreplace>`
CLI Example:
@ -2502,87 +2513,137 @@ def blockreplace(path,
'#-- end managed zone foobar --' $'10.0.1.1 foo.foobar\\n10.0.1.2 bar.foobar' True
'''
path = os.path.expanduser(path)
if not os.path.exists(path):
raise SaltInvocationError('File not found: {0}'.format(path))
if append_if_not_found and prepend_if_not_found:
raise SaltInvocationError(
'Only one of append and prepend_if_not_found is permitted'
)
path = os.path.expanduser(path)
if not os.path.exists(path):
raise SaltInvocationError('File not found: {0}'.format(path))
if not __utils__['files.is_text'](path):
raise SaltInvocationError(
'Cannot perform string replacements on a binary file: {0}'
.format(path)
)
# Search the file; track if any changes have been made for the return val
if append_newline is None and not content.endswith((os.linesep, '\n')):
append_newline = True
# Split the content into a list of lines, removing newline characters. To
# ensure that we handle both Windows and POSIX newlines, first split on
# Windows newlines, and then split on POSIX newlines.
split_content = []
for win_line in content.split('\r\n'):
for content_line in win_line.split('\n'):
split_content.append(content_line)
line_count = len(split_content)
has_changes = False
orig_file = []
new_file = []
in_block = False
old_content = ''
done = False
# we do not use in_place editing to avoid file attrs modifications when
block_found = False
linesep = None
def _add_content(linesep, lines=None, include_marker_start=True,
end_line=None):
if lines is None:
lines = []
include_marker_start = True
if end_line is None:
end_line = marker_end
end_line = end_line.rstrip('\r\n') + linesep
if include_marker_start:
lines.append(marker_start + linesep)
if split_content:
for index, content_line in enumerate(split_content, 1):
if index != line_count:
lines.append(content_line + linesep)
else:
# We're on the last line of the content block
if append_newline:
lines.append(content_line + linesep)
lines.append(end_line)
else:
lines.append(content_line + end_line)
else:
lines.append(end_line)
return lines
# We do not use in-place editing to avoid file attrs modifications when
# no changes are required and to avoid any file access on a partially
# written file.
# we could also use salt.utils.filebuffer.BufferedReader
#
# We could also use salt.utils.filebuffer.BufferedReader
try:
fi_file = fileinput.input(path,
inplace=False, backup=False,
bufsize=1, mode='rb')
for line in fi_file:
fi_file = fileinput.input(
path,
inplace=False,
backup=False,
bufsize=1,
mode='rb')
for line in fi_file:
line = salt.utils.stringutils.to_unicode(line)
result = line
write_line_to_new_file = True
if linesep is None:
# Auto-detect line separator
if line.endswith('\r\n'):
linesep = '\r\n'
elif line.endswith('\n'):
linesep = '\n'
else:
# No newline(s) in file, fall back to system's linesep
linesep = os.linesep
if marker_start in line:
# managed block start found, start recording
# We've entered the content block
in_block = True
else:
if in_block:
if marker_end in line:
# end of block detected
# We're not going to write the lines from the old file to
# the new file until we have exited the block.
write_line_to_new_file = False
marker_end_pos = line.find(marker_end)
if marker_end_pos != -1:
# End of block detected
in_block = False
# We've found and exited the block
block_found = True
# Handle situations where there may be multiple types
# of line endings in the same file. Separate the content
# into lines. Account for Windows-style line endings
# using os.linesep, then by linux-style line endings
# using '\n'
split_content = []
for linesep_line in content.split(os.linesep):
for content_line in linesep_line.split('\n'):
split_content.append(content_line)
# Trim any trailing new lines to avoid unwanted
# additional new lines
while not split_content[-1]:
split_content.pop()
# push new block content in file
for content_line in split_content:
new_file.append(content_line + os.linesep)
done = True
else:
# remove old content, but keep a trace
old_content += line
result = None
# else: we are not in the marked block, keep saving things
_add_content(linesep, lines=new_file,
include_marker_start=False,
end_line=line[marker_end_pos:])
# Save the line from the original file
orig_file.append(line)
if result is not None:
new_file.append(result)
# end for. If we are here without block management we maybe have some problems,
# or we need to initialise the marked block
if write_line_to_new_file:
new_file.append(line)
except (IOError, OSError) as exc:
raise CommandExecutionError(
'Failed to read from {0}: {1}'.format(path, exc)
)
finally:
if linesep is None:
# If the file was empty, we will not have set linesep yet. Assume
# the system's line separator. This is needed for when we
# prepend/append later on.
linesep = os.linesep
try:
fi_file.close()
except Exception:
pass
if in_block:
# unterminated block => bad, always fail
@ -2590,35 +2651,27 @@ def blockreplace(path,
'Unterminated marked block. End of file reached before marker_end.'
)
if not done:
if not block_found:
if prepend_if_not_found:
# add the markers and content at the beginning of file
new_file.insert(0, marker_end + os.linesep)
if append_newline is True:
new_file.insert(0, content + os.linesep)
else:
new_file.insert(0, content)
new_file.insert(0, marker_start + os.linesep)
done = True
prepended_content = _add_content(linesep)
prepended_content.extend(new_file)
new_file = prepended_content
block_found = True
elif append_if_not_found:
# Make sure we have a newline at the end of the file
if 0 != len(new_file):
if not new_file[-1].endswith(os.linesep):
new_file[-1] += os.linesep
if not new_file[-1].endswith(linesep):
new_file[-1] += linesep
# add the markers and content at the end of file
new_file.append(marker_start + os.linesep)
if append_newline is True:
new_file.append(content + os.linesep)
else:
new_file.append(content)
new_file.append(marker_end + os.linesep)
done = True
_add_content(linesep, lines=new_file)
block_found = True
else:
raise CommandExecutionError(
'Cannot edit marked block. Markers were not found in file.'
)
if done:
if block_found:
diff = ''.join(difflib.unified_diff(orig_file, new_file))
has_changes = diff is not ''
if has_changes and not dry_run:
@ -5361,9 +5414,7 @@ def manage_file(name,
# Create the file, user rw-only if mode will be set to prevent
# a small security race problem before the permissions are set
if mode:
current_umask = os.umask(0o77)
with salt.utils.files.set_umask(0o077 if mode else None):
# Create a new file when test is False and source is None
if contents is None:
if not __opts__['test']:
@ -5383,9 +5434,6 @@ def manage_file(name,
ret, 'File {0} not created'.format(name)
)
if mode:
os.umask(current_umask)
if contents is not None:
# Write the static contents to a temporary file
tmp = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX,
@ -5418,8 +5466,7 @@ def manage_file(name,
# out what mode to use for the new file.
if mode is None and not salt.utils.platform.is_windows():
# Get current umask
mask = os.umask(0)
os.umask(mask)
mask = salt.utils.files.get_umask()
# Calculate the mode value that results from the umask
mode = oct((0o777 ^ mask) & 0o666)

View File

@ -39,9 +39,9 @@ def __virtual__():
'only available on Gentoo/Open-RC systems.')
def _ret_code(cmd):
def _ret_code(cmd, ignore_retcode=False):
log.debug('executing [{0}]'.format(cmd))
sts = __salt__['cmd.retcode'](cmd, python_shell=False)
sts = __salt__['cmd.retcode'](cmd, python_shell=False, ignore_retcode=ignore_retcode)
return sts
@ -270,7 +270,7 @@ def status(name, sig=None):
results = {}
for service in services:
cmd = _service_cmd(service, 'status')
results[service] = not _ret_code(cmd)
results[service] = not _ret_code(cmd, ignore_retcode=True)
if contains_globbing:
return results
return results[name]

File diff suppressed because it is too large Load Diff

View File

@ -103,6 +103,13 @@ def _auth(profile=None, api_version=2, **connection_args):
Only intended to be used within glance-enabled modules
'''
__utils__['versions.warn_until'](
'Neon',
(
'The glance module has been deprecated and will be removed in {version}. '
'Please update to using the glanceng module'
),
)
if profile:
prefix = profile + ":keystone."

View File

@ -88,6 +88,30 @@ def dfs(command=None, *args):
return 'Error: command must be provided'
def dfsadmin_report(arg=None):
'''
.. versionadded:: Fluorine
Reports basic filesystem information and statistics. Optional flags may be used to filter the list of displayed DataNodes.
arg
[live] [dead] [decommissioning]
CLI Example:
.. code-block:: bash
salt '*' hadoop.dfsadmin -report
'''
if arg is not None:
if arg in ['live', 'dead', 'decommissioning']:
return _hadoop_cmd('dfsadmin', 'report', arg)
else:
return "Error: the arg is wrong, it must be in ['live', 'dead', 'decommissioning']"
else:
return _hadoop_cmd('dfsadmin', 'report')
def dfs_present(path):
'''
Check if a file or directory is present on the distributed FS.

View File

@ -178,7 +178,7 @@ If you wish to customize the document format:
- mode: '0640'
Some `replace_text_regex` values that might be helpfull.
Some `replace_text_regex` values that might be helpful.
## CERTS
'-----BEGIN RSA PRIVATE KEY-----[\r\n\t\f\S]{0,2200}': 'XXXXXXX'

View File

@ -18,7 +18,10 @@ def query(url, **kwargs):
'''
Query a resource, and decode the return data
.. versionadded:: 2015.5.0
Passes through all the parameters described in the
:py:func:`utils.http.query function <salt.utils.http.query>`:
.. autofunction:: salt.utils.http.query
CLI Example:

View File

@ -367,7 +367,7 @@ def get_host_mac(name=None, allow_array=False, **api_opts):
'''
Get mac address from host record.
Use `allow_array` to return possible mutiple values.
Use `allow_array` to return possible multiple values.
CLI Example:
@ -390,7 +390,7 @@ def get_host_ipv4(name=None, mac=None, allow_array=False, **api_opts):
'''
Get ipv4 address from host record.
Use `allow_array` to return possible mutiple values.
Use `allow_array` to return possible multiple values.
CLI Example:
@ -446,7 +446,7 @@ def get_host_ipv6addr_info(ipv6addr=None, mac=None,
def get_network(ipv4addr=None, network=None, return_fields=None, **api_opts):
'''
Get list of all networks.
This is helpfull when looking up subnets to
This is helpful when looking up subnets to
use with func:nextavailableip
This call is offen slow and not cached!

View File

@ -513,7 +513,7 @@ if __name__ == '__main__':
sys.exit(1)
os.setsid()
os.umask(0)
os.umask(0o000) # pylint: disable=blacklisted-function
try:
pid = os.fork()

View File

@ -1093,6 +1093,8 @@ def _parser():
add_arg('--ahres', dest='ahres', action='append')
## bpf
add_arg('--bytecode', dest='bytecode', action='append')
## cgroup
add_arg('--cgroup', dest='cgroup', action='append')
## cluster
add_arg('--cluster-total-nodes',
dest='cluster-total-nodes',

View File

@ -6,6 +6,8 @@ Kapacitor execution module.
parameters or as configuration settings in /etc/salt/minion on the relevant
minions::
kapacitor.unsafe_ssl: 'false'
kapacitor.protocol: 'http'
kapacitor.host: 'localhost'
kapacitor.port: 9092
@ -40,6 +42,17 @@ def version():
return version
def _get_url():
'''
Get the kapacitor URL.
'''
protocol = __salt__['config.option']('kapacitor.protocol', 'http')
host = __salt__['config.option']('kapacitor.host', 'localhost')
port = __salt__['config.option']('kapacitor.port', 9092)
return '{0}://{1}:{2}'.format(protocol, host, port)
def get_task(name):
'''
Get a dict of data on a task.
@ -53,15 +66,14 @@ def get_task(name):
salt '*' kapacitor.get_task cpu
'''
host = __salt__['config.option']('kapacitor.host', 'localhost')
port = __salt__['config.option']('kapacitor.port', 9092)
url = _get_url()
if version() < '0.13':
url = 'http://{0}:{1}/task?name={2}'.format(host, port, name)
task_url = '{0}/task?name={1}'.format(url, name)
else:
url = 'http://{0}:{1}/kapacitor/v1/tasks/{2}?skip-format=true'.format(host, port, name)
task_url = '{0}/kapacitor/v1/tasks/{1}?skip-format=true'.format(url, name)
response = salt.utils.http.query(url, status=True)
response = salt.utils.http.query(task_url, status=True)
if response['status'] == 404:
return None
@ -89,7 +101,11 @@ def _run_cmd(cmd):
Run a Kapacitor task and return a dictionary of info.
'''
ret = {}
result = __salt__['cmd.run_all'](cmd)
env_vars = {
'KAPACITOR_URL': _get_url(),
'KAPACITOR_UNSAFE_SSL': __salt__['config.option']('kapacitor.unsafe_ssl', 'false'),
}
result = __salt__['cmd.run_all'](cmd, env=env_vars)
if result.get('stdout'):
ret['stdout'] = result['stdout']
@ -104,7 +120,8 @@ def define_task(name,
tick_script,
task_type='stream',
database=None,
retention_policy='default'):
retention_policy='default',
dbrps=None):
'''
Define a task. Serves as both create/update.
@ -117,6 +134,13 @@ def define_task(name,
task_type
Task type. Defaults to 'stream'
dbrps
A list of databases and retention policies in "dbname"."rpname" format
to fetch data from. For backward compatibility, the value of
'database' and 'retention_policy' will be merged as part of dbrps.
.. versionadded:: Fluorine
database
Which database to fetch data from. Defaults to None, which will use the
default database in InfluxDB.
@ -143,8 +167,16 @@ def define_task(name,
if task_type:
cmd += ' -type {0}'.format(task_type)
if not dbrps:
dbrps = []
if database and retention_policy:
cmd += ' -dbrp {0}.{1}'.format(database, retention_policy)
dbrp = '{0}.{1}'.format(database, retention_policy)
dbrps.append(dbrp)
if dbrps:
for dbrp in dbrps:
cmd += ' -dbrp {0}'.format(dbrp)
return _run_cmd(cmd)

View File

@ -189,7 +189,7 @@ def upgrade(reboot=False, at_time=None):
def upgrade_available():
'''
Detect if a new kernel version is available in the repositories.
Returns True if a new kernel is avaliable, False otherwise.
Returns True if a new kernel is available, False otherwise.
CLI Example:

View File

@ -182,7 +182,7 @@ def upgrade(reboot=False, at_time=None):
def upgrade_available():
'''
Detect if a new kernel version is available in the repositories.
Returns True if a new kernel is avaliable, False otherwise.
Returns True if a new kernel is available, False otherwise.
CLI Example:

View File

@ -163,6 +163,13 @@ def auth(profile=None, **connection_args):
salt '*' keystone.auth
'''
__utils__['versions.warn_until'](
'Neon',
(
'The keystone module has been deprecated and will be removed in {version}. '
'Please update to using the keystoneng module',
),
)
kwargs = _get_kwargs(profile=profile, **connection_args)
disc = discover.Discover(auth_url=kwargs['auth_url'])

View File

@ -6,32 +6,42 @@ Module for handling kubernetes calls.
:configuration: The k8s API settings are provided either in a pillar, in
the minion's config file, or in master's config file::
kubernetes.user: admin
kubernetes.password: verybadpass
kubernetes.api_url: 'http://127.0.0.1:8080'
kubernetes.certificate-authority-data: '...'
kubernetes.client-certificate-data: '....n
kubernetes.client-key-data: '...'
kubernetes.certificate-authority-file: '/path/to/ca.crt'
kubernetes.client-certificate-file: '/path/to/client.crt'
kubernetes.client-key-file: '/path/to/client.key'
kubernetes.kubeconfig: '/path/to/kubeconfig'
kubernetes.kubeconfig-data: '<base64 encoded kubeconfig content'
kubernetes.context: 'context'
These settings can be overridden by adding `context and `kubeconfig` or
`kubeconfig_data` parameters when calling a function.
These settings can be also overrided by adding `api_url`, `api_user`,
`api_password`, `api_certificate_authority_file`, `api_client_certificate_file`
or `api_client_key_file` parameters when calling a function:
The data format for `kubernetes.kubeconfig-data` value is the content of
`kubeconfig` base64 encoded in one line.
The data format for `kubernetes.*-data` values is the same as provided in `kubeconfig`.
It's base64 encoded certificates/keys in one line.
For an item only one field should be provided. Either a `data` or a `file` entry.
In case both are provided the `file` entry is prefered.
Only `kubeconfig` or `kubeconfig-data` should be provided. In case both are
provided `kubeconfig` entry is preferred.
.. code-block:: bash
salt '*' kubernetes.nodes api_url=http://k8s-api-server:port api_user=myuser api_password=pass
salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube
.. versionadded: 2017.7.0
.. versionchanged:: Fluorine
.. warning::
Configuration options changed in Flourine. The following configuration options have been removed:
- kubernetes.user
- kubernetes.password
- kubernetes.api_url
- kubernetes.certificate-authority-data/file
- kubernetes.client-certificate-data/file
- kubernetes.client-key-data/file
Please use now:
- kubernetes.kubeconfig or kubernetes.kubeconfig-data
- kubernetes.context
'''
# Import Python Futures
@ -39,6 +49,7 @@ from __future__ import absolute_import, unicode_literals, print_function
import sys
import os.path
import base64
import errno
import logging
import tempfile
import signal
@ -49,7 +60,9 @@ from salt.exceptions import CommandExecutionError
from salt.ext.six import iteritems
from salt.ext import six
import salt.utils.files
import salt.utils.platform
import salt.utils.templates
import salt.utils.versions
import salt.utils.yaml
from salt.exceptions import TimeoutError
from salt.ext.six.moves import range # pylint: disable=import-error
@ -101,10 +114,9 @@ if not salt.utils.platform.is_windows():
POLLING_TIME_LIMIT = 30
# pylint: disable=no-member
def _setup_conn(**kwargs):
def _setup_conn_old(**kwargs):
'''
Setup kubernetes API connection singleton
Setup kubernetes API connection singleton the old way
'''
host = __salt__['config.option']('kubernetes.api_url',
'http://localhost:8080')
@ -146,9 +158,6 @@ def _setup_conn(**kwargs):
kubernetes.client.configuration.host = host
kubernetes.client.configuration.user = username
kubernetes.client.configuration.passwd = password
if __salt__['config.option']('kubernetes.api_key'):
kubernetes.client.configuration.api_key = {'authorization': __salt__['config.option']('kubernetes.api_key')}
kubernetes.client.configuration.api_key_prefix = {'authorization': __salt__['config.option']('kubernetes.api_key_prefix')}
if ca_cert_file:
kubernetes.client.configuration.ssl_ca_cert = ca_cert_file
@ -176,12 +185,42 @@ def _setup_conn(**kwargs):
kubernetes.client.configuration.key_file = k.name
else:
kubernetes.client.configuration.key_file = None
return {}
# pylint: disable=no-member
def _setup_conn(**kwargs):
'''
Setup kubernetes API connection singleton
'''
kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig')
kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data')
context = kwargs.get('context') or __salt__['config.option']('kubernetes.context')
if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')):
with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg:
kcfg.write(base64.b64decode(kubeconfig_data))
kubeconfig = kcfg.name
if not (kubeconfig and context):
if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'):
salt.utils.versions.warn_until('Sodium',
'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. '
'Use \'kubeconfig\' and \'context\' instead.')
try:
return _setup_conn_old(**kwargs)
except Exception:
raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0')
else:
raise CommandExecutionError('Invalid kubernetes configuration. Parameter \'kubeconfig\' and \'context\' are required.')
kubernetes.config.load_kube_config(config_file=kubeconfig, context=context)
# The return makes unit testing easier
return vars(kubernetes.client.configuration)
return {'kubeconfig': kubeconfig, 'context': context}
def _cleanup(**kwargs):
def _cleanup_old(**kwargs):
try:
ca = kubernetes.client.configuration.ssl_ca_cert
cert = kubernetes.client.configuration.cert_file
key = kubernetes.client.configuration.key_file
@ -191,6 +230,22 @@ def _cleanup(**kwargs):
salt.utils.files.safe_rm(key)
if ca and os.path.exists(ca) and os.path.basename(ca).startswith('salt-kube-'):
salt.utils.files.safe_rm(ca)
except Exception:
pass
def _cleanup(**kwargs):
if not kwargs:
return _cleanup_old(**kwargs)
if 'kubeconfig' in kwargs:
kubeconfig = kwargs.get('kubeconfig')
if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'):
try:
os.unlink(kubeconfig)
except (IOError, OSError) as err:
if err.errno != errno.ENOENT:
log.exception(err)
def ping(**kwargs):
@ -217,9 +272,9 @@ def nodes(**kwargs):
CLI Examples::
salt '*' kubernetes.nodes
salt '*' kubernetes.nodes api_url=http://myhost:port api_user=my-user
salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube
'''
_setup_conn(**kwargs)
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
api_response = api_instance.list_node()
@ -232,7 +287,7 @@ def nodes(**kwargs):
log.exception('Exception when calling CoreV1Api->list_node')
raise CommandExecutionError(exc)
finally:
_cleanup()
_cleanup(**cfg)
def node(name, **kwargs):
@ -243,7 +298,7 @@ def node(name, **kwargs):
salt '*' kubernetes.node name='minikube'
'''
_setup_conn(**kwargs)
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
api_response = api_instance.list_node()
@ -254,7 +309,7 @@ def node(name, **kwargs):
log.exception('Exception when calling CoreV1Api->list_node')
raise CommandExecutionError(exc)
finally:
_cleanup()
_cleanup(**cfg)
for k8s_node in api_response.items:
if k8s_node.metadata.name == name:
@ -290,7 +345,7 @@ def node_add_label(node_name, label_name, label_value, **kwargs):
salt '*' kubernetes.node_add_label node_name="minikube" \
label_name="foo" label_value="bar"
'''
_setup_conn(**kwargs)
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
body = {
@ -308,7 +363,7 @@ def node_add_label(node_name, label_name, label_value, **kwargs):
log.exception('Exception when calling CoreV1Api->patch_node')
raise CommandExecutionError(exc)
finally:
_cleanup()
_cleanup(**cfg)
return None
@ -323,7 +378,7 @@ def node_remove_label(node_name, label_name, **kwargs):
salt '*' kubernetes.node_remove_label node_name="minikube" \
label_name="foo"
'''
_setup_conn(**kwargs)
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
body = {
@ -341,7 +396,7 @@ def node_remove_label(node_name, label_name, **kwargs):
log.exception('Exception when calling CoreV1Api->patch_node')
raise CommandExecutionError(exc)
finally:
_cleanup()
_cleanup(**cfg)
return None
@ -353,9 +408,9 @@ def namespaces(**kwargs):
CLI Examples::
salt '*' kubernetes.namespaces
salt '*' kubernetes.namespaces api_url=http://myhost:port api_user=my-user
salt '*' kubernetes.namespaces kubeconfig=/etc/salt/k8s/kubeconfig context=minikube
'''
_setup_conn(**kwargs)
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
api_response = api_instance.list_namespace()
@ -368,7 +423,7 @@ def namespaces(**kwargs):
log.exception('Exception when calling CoreV1Api->list_namespace')
raise CommandExecutionError(exc)
finally:
_cleanup()
_cleanup(**cfg)
def deployments(namespace='default', **kwargs):
@ -380,7 +435,7 @@ def deployments(namespace='default', **kwargs):
salt '*' kubernetes.deployments
salt '*' kubernetes.deployments namespace=default
'''
_setup_conn(**kwargs)
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.ExtensionsV1beta1Api()
api_response = api_instance.list_namespaced_deployment(namespace)
@ -396,7 +451,7 @@ def deployments(namespace='default', **kwargs):
)
raise CommandExecutionError(exc)
finally:
_cleanup()
_cleanup(**cfg)
def services(namespace='default', **kwargs):
@ -408,7 +463,7 @@ def services(namespace='default', **kwargs):
salt '*' kubernetes.services
salt '*' kubernetes.services namespace=default
'''
_setup_conn(**kwargs)
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
api_response = api_instance.list_namespaced_service(namespace)
@ -424,7 +479,7 @@ def services(namespace='default', **kwargs):
)
raise CommandExecutionError(exc)
finally:
_cleanup()
_cleanup(**cfg)
def pods(namespace='default', **kwargs):
@ -436,7 +491,7 @@ def pods(namespace='default', **kwargs):
salt '*' kubernetes.pods
salt '*' kubernetes.pods namespace=default
'''
_setup_conn(**kwargs)
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
api_response = api_instance.list_namespaced_pod(namespace)
@ -452,7 +507,7 @@ def pods(namespace='default', **kwargs):
)
raise CommandExecutionError(exc)
finally:
_cleanup()
_cleanup(**cfg)
def secrets(namespace='default', **kwargs):
@ -464,7 +519,7 @@ def secrets(namespace='default', **kwargs):
salt '*' kubernetes.secrets
salt '*' kubernetes.secrets namespace=default
'''
_setup_conn(**kwargs)
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
api_response = api_instance.list_namespaced_secret(namespace)
@ -480,7 +535,7 @@ def secrets(namespace='default', **kwargs):
)
raise CommandExecutionError(exc)
finally:
_cleanup()
_cleanup(**cfg)
def configmaps(namespace='default', **kwargs):
@ -492,7 +547,7 @@ def configmaps(namespace='default', **kwargs):
salt '*' kubernetes.configmaps
salt '*' kubernetes.configmaps namespace=default
'''
_setup_conn(**kwargs)
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
api_response = api_instance.list_namespaced_config_map(namespace)
@ -508,7 +563,7 @@ def configmaps(namespace='default', **kwargs):
)
raise CommandExecutionError(exc)
finally:
_cleanup()
_cleanup(**cfg)
def show_deployment(name, namespace='default', **kwargs):
@ -520,7 +575,7 @@ def show_deployment(name, namespace='default', **kwargs):
salt '*' kubernetes.show_deployment my-nginx default
salt '*' kubernetes.show_deployment name=my-nginx namespace=default
'''
_setup_conn(**kwargs)
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.ExtensionsV1beta1Api()
api_response = api_instance.read_namespaced_deployment(name, namespace)
@ -536,7 +591,7 @@ def show_deployment(name, namespace='default', **kwargs):
)
raise CommandExecutionError(exc)
finally:
_cleanup()
_cleanup(**cfg)
def show_service(name, namespace='default', **kwargs):
@ -548,7 +603,7 @@ def show_service(name, namespace='default', **kwargs):
salt '*' kubernetes.show_service my-nginx default
salt '*' kubernetes.show_service name=my-nginx namespace=default
'''
_setup_conn(**kwargs)
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
api_response = api_instance.read_namespaced_service(name, namespace)
@ -564,7 +619,7 @@ def show_service(name, namespace='default', **kwargs):
)
raise CommandExecutionError(exc)
finally:
_cleanup()
_cleanup(**cfg)
def show_pod(name, namespace='default', **kwargs):
@ -576,7 +631,7 @@ def show_pod(name, namespace='default', **kwargs):
salt '*' kubernetes.show_pod guestbook-708336848-fqr2x
salt '*' kubernetes.show_pod guestbook-708336848-fqr2x namespace=default
'''
_setup_conn(**kwargs)
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
api_response = api_instance.read_namespaced_pod(name, namespace)
@ -592,7 +647,7 @@ def show_pod(name, namespace='default', **kwargs):
)
raise CommandExecutionError(exc)
finally:
_cleanup()
_cleanup(**cfg)
def show_namespace(name, **kwargs):
@ -603,7 +658,7 @@ def show_namespace(name, **kwargs):
salt '*' kubernetes.show_namespace kube-system
'''
_setup_conn(**kwargs)
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
api_response = api_instance.read_namespace(name)
@ -619,7 +674,7 @@ def show_namespace(name, **kwargs):
)
raise CommandExecutionError(exc)
finally:
_cleanup()
_cleanup(**cfg)
def show_secret(name, namespace='default', decode=False, **kwargs):
@ -634,7 +689,7 @@ def show_secret(name, namespace='default', decode=False, **kwargs):
salt '*' kubernetes.show_secret name=confidential namespace=default
salt '*' kubernetes.show_secret name=confidential decode=True
'''
_setup_conn(**kwargs)
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
api_response = api_instance.read_namespaced_secret(name, namespace)
@ -655,7 +710,7 @@ def show_secret(name, namespace='default', decode=False, **kwargs):
)
raise CommandExecutionError(exc)
finally:
_cleanup()
_cleanup(**cfg)
def show_configmap(name, namespace='default', **kwargs):
@ -667,7 +722,7 @@ def show_configmap(name, namespace='default', **kwargs):
salt '*' kubernetes.show_configmap game-config default
salt '*' kubernetes.show_configmap name=game-config namespace=default
'''
_setup_conn(**kwargs)
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
api_response = api_instance.read_namespaced_config_map(
@ -685,7 +740,7 @@ def show_configmap(name, namespace='default', **kwargs):
)
raise CommandExecutionError(exc)
finally:
_cleanup()
_cleanup(**cfg)
def delete_deployment(name, namespace='default', **kwargs):
@ -697,7 +752,7 @@ def delete_deployment(name, namespace='default', **kwargs):
salt '*' kubernetes.delete_deployment my-nginx
salt '*' kubernetes.delete_deployment name=my-nginx namespace=default
'''
_setup_conn(**kwargs)
cfg = _setup_conn(**kwargs)
body = kubernetes.client.V1DeleteOptions(orphan_dependents=True)
try:
@ -740,7 +795,7 @@ def delete_deployment(name, namespace='default', **kwargs):
)
raise CommandExecutionError(exc)
finally:
_cleanup()
_cleanup(**cfg)
def delete_service(name, namespace='default', **kwargs):
@ -752,7 +807,7 @@ def delete_service(name, namespace='default', **kwargs):
salt '*' kubernetes.delete_service my-nginx default
salt '*' kubernetes.delete_service name=my-nginx namespace=default
'''
_setup_conn(**kwargs)
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
@ -770,7 +825,7 @@ def delete_service(name, namespace='default', **kwargs):
)
raise CommandExecutionError(exc)
finally:
_cleanup()
_cleanup(**cfg)
def delete_pod(name, namespace='default', **kwargs):
@ -782,7 +837,7 @@ def delete_pod(name, namespace='default', **kwargs):
salt '*' kubernetes.delete_pod guestbook-708336848-5nl8c default
salt '*' kubernetes.delete_pod name=guestbook-708336848-5nl8c namespace=default
'''
_setup_conn(**kwargs)
cfg = _setup_conn(**kwargs)
body = kubernetes.client.V1DeleteOptions(orphan_dependents=True)
try:
@ -803,7 +858,7 @@ def delete_pod(name, namespace='default', **kwargs):
)
raise CommandExecutionError(exc)
finally:
_cleanup()
_cleanup(**cfg)
def delete_namespace(name, **kwargs):
@ -815,7 +870,7 @@ def delete_namespace(name, **kwargs):
salt '*' kubernetes.delete_namespace salt
salt '*' kubernetes.delete_namespace name=salt
'''
_setup_conn(**kwargs)
cfg = _setup_conn(**kwargs)
body = kubernetes.client.V1DeleteOptions(orphan_dependents=True)
try:
@ -832,7 +887,7 @@ def delete_namespace(name, **kwargs):
)
raise CommandExecutionError(exc)
finally:
_cleanup()
_cleanup(**cfg)
def delete_secret(name, namespace='default', **kwargs):
@ -844,7 +899,7 @@ def delete_secret(name, namespace='default', **kwargs):
salt '*' kubernetes.delete_secret confidential default
salt '*' kubernetes.delete_secret name=confidential namespace=default
'''
_setup_conn(**kwargs)
cfg = _setup_conn(**kwargs)
body = kubernetes.client.V1DeleteOptions(orphan_dependents=True)
try:
@ -864,7 +919,7 @@ def delete_secret(name, namespace='default', **kwargs):
)
raise CommandExecutionError(exc)
finally:
_cleanup()
_cleanup(**cfg)
def delete_configmap(name, namespace='default', **kwargs):
@ -876,7 +931,7 @@ def delete_configmap(name, namespace='default', **kwargs):
salt '*' kubernetes.delete_configmap settings default
salt '*' kubernetes.delete_configmap name=settings namespace=default
'''
_setup_conn(**kwargs)
cfg = _setup_conn(**kwargs)
body = kubernetes.client.V1DeleteOptions(orphan_dependents=True)
try:
@ -897,7 +952,7 @@ def delete_configmap(name, namespace='default', **kwargs):
)
raise CommandExecutionError(exc)
finally:
_cleanup()
_cleanup(**cfg)
def create_deployment(
@ -924,7 +979,7 @@ def create_deployment(
template=template,
saltenv=saltenv)
_setup_conn(**kwargs)
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.ExtensionsV1beta1Api()
@ -942,7 +997,7 @@ def create_deployment(
)
raise CommandExecutionError(exc)
finally:
_cleanup()
_cleanup(**cfg)
def create_pod(
@ -969,7 +1024,7 @@ def create_pod(
template=template,
saltenv=saltenv)
_setup_conn(**kwargs)
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
@ -987,7 +1042,7 @@ def create_pod(
)
raise CommandExecutionError(exc)
finally:
_cleanup()
_cleanup(**cfg)
def create_service(
@ -1014,7 +1069,7 @@ def create_service(
template=template,
saltenv=saltenv)
_setup_conn(**kwargs)
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
@ -1032,7 +1087,7 @@ def create_service(
)
raise CommandExecutionError(exc)
finally:
_cleanup()
_cleanup(**cfg)
def create_secret(
@ -1069,7 +1124,7 @@ def create_secret(
metadata=__dict_to_object_meta(name, namespace, {}),
data=data)
_setup_conn(**kwargs)
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
@ -1087,7 +1142,7 @@ def create_secret(
)
raise CommandExecutionError(exc)
finally:
_cleanup()
_cleanup(**cfg)
def create_configmap(
@ -1120,7 +1175,7 @@ def create_configmap(
metadata=__dict_to_object_meta(name, namespace, {}),
data=data)
_setup_conn(**kwargs)
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
@ -1138,7 +1193,7 @@ def create_configmap(
)
raise CommandExecutionError(exc)
finally:
_cleanup()
_cleanup(**cfg)
def create_namespace(
@ -1156,7 +1211,7 @@ def create_namespace(
body = kubernetes.client.V1Namespace(metadata=meta_obj)
body.metadata.name = name
_setup_conn(**kwargs)
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
@ -1173,7 +1228,7 @@ def create_namespace(
)
raise CommandExecutionError(exc)
finally:
_cleanup()
_cleanup(**cfg)
def replace_deployment(name,
@ -1200,7 +1255,7 @@ def replace_deployment(name,
template=template,
saltenv=saltenv)
_setup_conn(**kwargs)
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.ExtensionsV1beta1Api()
@ -1218,7 +1273,7 @@ def replace_deployment(name,
)
raise CommandExecutionError(exc)
finally:
_cleanup()
_cleanup(**cfg)
def replace_service(name,
@ -1251,7 +1306,7 @@ def replace_service(name,
body.spec.cluster_ip = old_service['spec']['cluster_ip']
body.metadata.resource_version = old_service['metadata']['resource_version']
_setup_conn(**kwargs)
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
@ -1269,7 +1324,7 @@ def replace_service(name,
)
raise CommandExecutionError(exc)
finally:
_cleanup()
_cleanup(**cfg)
def replace_secret(name,
@ -1306,7 +1361,7 @@ def replace_secret(name,
metadata=__dict_to_object_meta(name, namespace, {}),
data=data)
_setup_conn(**kwargs)
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
@ -1324,7 +1379,7 @@ def replace_secret(name,
)
raise CommandExecutionError(exc)
finally:
_cleanup()
_cleanup(**cfg)
def replace_configmap(name,
@ -1355,7 +1410,7 @@ def replace_configmap(name,
metadata=__dict_to_object_meta(name, namespace, {}),
data=data)
_setup_conn(**kwargs)
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
@ -1373,7 +1428,7 @@ def replace_configmap(name,
)
raise CommandExecutionError(exc)
finally:
_cleanup()
_cleanup(**cfg)
def __create_object_body(kind,
@ -1485,7 +1540,7 @@ def __dict_to_deployment_spec(spec):
'''
Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance.
'''
spec_obj = AppsV1beta1DeploymentSpec()
spec_obj = AppsV1beta1DeploymentSpec(template="")
for key, value in iteritems(spec):
if hasattr(spec_obj, key):
setattr(spec_obj, key, value)

View File

@ -206,7 +206,7 @@ def destroy_node(node_id, profile, **libcloud_kwargs):
'''
Destroy a node in the cloud
:param node_id: Unique ID of the node to destory
:param node_id: Unique ID of the node to destroy
:type node_id: ``str``
:param profile: The profile key

View File

@ -129,7 +129,10 @@ def assign(name, value):
tran_tab = name.translate(''.maketrans('./', '/.'))
else:
if isinstance(name, unicode): # pylint: disable=incompatible-py3-code
trans_args = ({ord(x): None for x in ''.join(['./', '/.'])},)
trans_args = ({
ord('/'): '.',
ord('.'): '/'
},)
else:
trans_args = string.maketrans('./', '/.')
tran_tab = name.translate(*trans_args)

View File

@ -256,7 +256,7 @@ def rotate(name, pattern=None, conf_file=default_conf, **kwargs):
``name`` and ``pattern`` were kept for backwards compatibility reasons.
``name`` is an alias for the ``entryname`` argument, ``pattern`` is an alias
for ``log_file``. These aliasses wil only be used if the ``entryname`` and
for ``log_file``. These aliases will only be used if the ``entryname`` and
``log_file`` arguments are not passed.
For a full list of arguments see ```logadm.show_args```.

View File

@ -150,6 +150,37 @@ def _get_service(name):
raise CommandExecutionError('Service not found: {0}'.format(name))
def _always_running_service(name):
'''
Check if the service should always be running based on the KeepAlive Key
in the service plist.
:param str name: Service label, file name, or full path
:return: True if the KeepAlive key is set to True, False if set to False or
not set in the plist at all.
:rtype: bool
.. versionadded:: Fluorine
'''
# get all the info from the launchctl service
service_info = show(name)
# get the value for the KeepAlive key in service plist
try:
keep_alive = service_info['plist']['KeepAlive']
except KeyError:
return False
# check if KeepAlive is True and not just set.
if keep_alive is True:
return True
return False
def show(name):
'''
Show properties of a launchctl service
@ -403,7 +434,9 @@ def status(name, sig=None, runas=None):
:param str runas: User to run launchctl commands
:return: The PID for the service if it is running, otherwise an empty string
:return: The PID for the service if it is running, or 'loaded' if the
service should not always have a PID, or otherwise an empty string
:rtype: str
CLI Example:
@ -416,6 +449,12 @@ def status(name, sig=None, runas=None):
if sig:
return __salt__['status.pid'](sig)
# mac services are a little different than other platforms as they may be
# set to run on intervals and may not always active with a PID. This will
# return a string 'loaded' if it shouldn't always be running and is enabled.
if not _always_running_service(name) and enabled(name):
return 'loaded'
output = list_(runas=runas)
# Used a string here instead of a list because that's what the linux version

View File

@ -164,7 +164,6 @@ import salt.utils.stringutils
import salt.utils.win_functions
import salt.utils.win_dacl
REQ_ERROR = None
try:
import libnacl.secret
@ -186,9 +185,9 @@ def _get_config(**kwargs):
config = {
'box_type': 'sealedbox',
'sk': None,
'sk_file': '/etc/salt/pki/master/nacl',
'sk_file': os.path.join(__opts__['pki_dir'], 'master/nacl'),
'pk': None,
'pk_file': '/etc/salt/pki/master/nacl.pub',
'pk_file': os.path.join(__opts__['pki_dir'], 'master/nacl.pub'),
}
config_key = '{0}.config'.format(__virtualname__)
try:
@ -233,7 +232,7 @@ def _get_pk(**kwargs):
return base64.b64decode(pubkey)
def keygen(sk_file=None, pk_file=None):
def keygen(sk_file=None, pk_file=None, **kwargs):
'''
Use libnacl to generate a keypair.
@ -253,6 +252,14 @@ def keygen(sk_file=None, pk_file=None):
salt-call nacl.keygen sk_file=/etc/salt/pki/master/nacl pk_file=/etc/salt/pki/master/nacl.pub
salt-call --local nacl.keygen
'''
if 'keyfile' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The \'keyfile\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk_file\' argument instead.'
)
sk_file = kwargs['keyfile']
if sk_file is None:
kp = libnacl.public.SecretKey()
return {'sk': base64.b64encode(kp.sk), 'pk': base64.b64encode(kp.pk)}
@ -313,6 +320,25 @@ def enc(data, **kwargs):
box_type: secretbox, sealedbox(default)
'''
if 'keyfile' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The \'keyfile\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk_file\' argument instead.'
)
kwargs['sk_file'] = kwargs['keyfile']
if 'key' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The \'key\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk\' argument instead.'
)
kwargs['sk'] = kwargs['key']
# ensure data is in bytes
data = salt.utils.stringutils.to_bytes(data)
box_type = _get_config(**kwargs)['box_type']
if box_type == 'sealedbox':
return sealedbox_encrypt(data, **kwargs)
@ -360,6 +386,31 @@ def dec(data, **kwargs):
box_type: secretbox, sealedbox(default)
'''
if 'keyfile' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The \'keyfile\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk_file\' argument instead.'
)
kwargs['sk_file'] = kwargs['keyfile']
# set boxtype to `secretbox` to maintain backward compatibility
kwargs['box_type'] = 'secretbox'
if 'key' in kwargs:
salt.utils.versions.warn_until(
'Fluorine',
'The \'key\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk\' argument instead.'
)
kwargs['sk'] = kwargs['key']
# set boxtype to `secretbox` to maintain backward compatibility
kwargs['box_type'] = 'secretbox'
# ensure data is in bytes
data = salt.utils.stringutils.to_bytes(data)
box_type = _get_config(**kwargs)['box_type']
if box_type == 'sealedbox':
return sealedbox_decrypt(data, **kwargs)
@ -414,6 +465,9 @@ def sealedbox_encrypt(data, **kwargs):
salt-call --local nacl.sealedbox_encrypt datatoenc pk_file=/etc/salt/pki/master/nacl.pub
salt-call --local nacl.sealedbox_encrypt datatoenc pk='vrwQF7cNiNAVQVAiS3bvcbJUnF0cN6fU9YTZD9mBfzQ='
'''
# ensure data is in bytes
data = salt.utils.stringutils.to_bytes(data)
pk = _get_pk(**kwargs)
b = libnacl.sealed.SealedBox(pk)
return base64.b64encode(b.encrypt(data))
@ -433,6 +487,10 @@ def sealedbox_decrypt(data, **kwargs):
'''
if data is None:
return None
# ensure data is in bytes
data = salt.utils.stringutils.to_bytes(data)
sk = _get_sk(**kwargs)
keypair = libnacl.public.SecretKey(sk)
b = libnacl.sealed.SealedBox(keypair)
@ -452,6 +510,9 @@ def secretbox_encrypt(data, **kwargs):
salt-call --local nacl.secretbox_encrypt datatoenc sk_file=/etc/salt/pki/master/nacl
salt-call --local nacl.secretbox_encrypt datatoenc sk='YmFkcGFzcwo='
'''
# ensure data is in bytes
data = salt.utils.stringutils.to_bytes(data)
sk = _get_sk(**kwargs)
b = libnacl.secret.SecretBox(sk)
return base64.b64encode(b.encrypt(data))
@ -472,6 +533,10 @@ def secretbox_decrypt(data, **kwargs):
'''
if data is None:
return None
# ensure data is in bytes
data = salt.utils.stringutils.to_bytes(data)
key = _get_sk(**kwargs)
b = libnacl.secret.SecretBox(key=key)
return b.decrypt(base64.b64decode(data))

View File

@ -1618,167 +1618,3 @@ def list_agents(profile=None):
'''
conn = _auth(profile)
return conn.list_agents()
# The following is a list of functions that need to be incorporated in the
# neutron module. This list should be updated as functions are added.
#
# update_ipsec_site_connection
# Updates an IPsecSiteConnection.
# update_ikepolicy Updates an IKEPolicy
# update_ipsecpolicy Updates an IPsecPolicy
# list_vips Fetches a list of all load balancer vips for a tenant.
# show_vip Fetches information of a certain load balancer vip.
# create_vip Creates a new load balancer vip.
# update_vip Updates a load balancer vip.
# delete_vip Deletes the specified load balancer vip.
# list_pools Fetches a list of all load balancer pools for a tenant.
# show_pool Fetches information of a certain load balancer pool.
# create_pool Creates a new load balancer pool.
# update_pool Updates a load balancer pool.
# delete_pool Deletes the specified load balancer pool.
# retrieve_pool_stats Retrieves stats for a certain load balancer pool.
# list_members Fetches a list of all load balancer members for
# a tenant.
# show_member Fetches information of a certain load balancer member.
# create_member Creates a new load balancer member.
# update_member Updates a load balancer member.
# delete_member Deletes the specified load balancer member.
# list_health_monitors Fetches a list of all load balancer health monitors for
# a tenant.
# show_health_monitor Fetches information of a certain load balancer
# health monitor.
# create_health_monitor
# Creates a new load balancer health monitor.
# update_health_monitor
# Updates a load balancer health monitor.
# delete_health_monitor
# Deletes the specified load balancer health monitor.
# associate_health_monitor
# Associate specified load balancer health monitor
# and pool.
# disassociate_health_monitor
# Disassociate specified load balancer health monitor
# and pool.
# create_qos_queue Creates a new queue.
# list_qos_queues Fetches a list of all queues for a tenant.
# show_qos_queue Fetches information of a certain queue.
# delete_qos_queue Deletes the specified queue.
# list_agents Fetches agents.
# show_agent Fetches information of a certain agent.
# update_agent Updates an agent.
# delete_agent Deletes the specified agent.
# list_network_gateways
# Retrieve network gateways.
# show_network_gateway Fetch a network gateway.
# create_network_gateway
# Create a new network gateway.
# update_network_gateway
# Update a network gateway.
# delete_network_gateway
# Delete the specified network gateway.
# connect_network_gateway
# Connect a network gateway to the specified network.
# disconnect_network_gateway
# Disconnect a network from the specified gateway.
# list_gateway_devices Retrieve gateway devices.
# show_gateway_device Fetch a gateway device.
# create_gateway_device
# Create a new gateway device.
# update_gateway_device
# Updates a new gateway device.
# delete_gateway_device
# Delete the specified gateway device.
# list_dhcp_agent_hosting_networks
# Fetches a list of dhcp agents hosting a network.
# list_networks_on_dhcp_agent
# Fetches a list of dhcp agents hosting a network.
# add_network_to_dhcp_agent
# Adds a network to dhcp agent.
# remove_network_from_dhcp_agent
# Remove a network from dhcp agent.
# list_l3_agent_hosting_routers
# Fetches a list of L3 agents hosting a router.
# list_routers_on_l3_agent
# Fetches a list of L3 agents hosting a router.
# add_router_to_l3_agent
# Adds a router to L3 agent.
# list_firewall_rules Fetches a list of all firewall rules for a tenant.
# show_firewall_rule Fetches information of a certain firewall rule.
# create_firewall_rule Creates a new firewall rule.
# update_firewall_rule Updates a firewall rule.
# delete_firewall_rule Deletes the specified firewall rule.
# list_firewall_policies
# Fetches a list of all firewall policies for a tenant.
# show_firewall_policy Fetches information of a certain firewall policy.
# create_firewall_policy
# Creates a new firewall policy.
# update_firewall_policy
# Updates a firewall policy.
# delete_firewall_policy
# Deletes the specified firewall policy.
# firewall_policy_insert_rule
# Inserts specified rule into firewall policy.
# firewall_policy_remove_rule
# Removes specified rule from firewall policy.
# list_firewalls Fetches a list of all firewals for a tenant.
# show_firewall Fetches information of a certain firewall.
# create_firewall Creates a new firewall.
# update_firewall Updates a firewall.
# delete_firewall Deletes the specified firewall.
# remove_router_from_l3_agent
# Remove a router from l3 agent.
# get_lbaas_agent_hosting_pool
# Fetches a loadbalancer agent hosting a pool.
# list_pools_on_lbaas_agent
# Fetches a list of pools hosted by
# the loadbalancer agent.
# list_service_providers
# Fetches service providers.
# list_credentials Fetch a list of all credentials for a tenant.
# show_credential Fetch a credential.
# create_credential Create a new credential.
# update_credential Update a credential.
# delete_credential Delete the specified credential.
# list_network_profile_bindings
# Fetch a list of all tenants associated for
# a network profile.
# list_network_profiles
# Fetch a list of all network profiles for a tenant.
# show_network_profile Fetch a network profile.
# create_network_profile
# Create a network profile.
# update_network_profile
# Update a network profile.
# delete_network_profile
# Delete the network profile.
# list_policy_profile_bindings
# Fetch a list of all tenants associated for
# a policy profile.
# list_policy_profiles Fetch a list of all network profiles for a tenant.
# show_policy_profile Fetch a network profile.
# update_policy_profile
# Update a policy profile.
# create_metering_label
# Creates a metering label.
# delete_metering_label
# Deletes the specified metering label.
# list_metering_labels Fetches a list of all metering labels for a tenant.
# show_metering_label Fetches information of a certain metering label.
# create_metering_label_rule
# Creates a metering label rule.
# delete_metering_label_rule
# Deletes the specified metering label rule.
# list_metering_label_rules
# Fetches a list of all metering label rules for a label.
# show_metering_label_rule
# Fetches information of a certain metering label rule.
# list_net_partitions Fetch a list of all network partitions for a tenant.
# show_net_partition etch a network partition.
# create_net_partition Create a network partition.
# delete_net_partition Delete the network partition.
# create_packet_filter Create a new packet filter.
# update_packet_filter Update a packet filter.
# list_packet_filters Fetch a list of all packet filters for a tenant.
# show_packet_filter Fetch information of a certain packet filter.
# delete_packet_filter Delete the specified packet filter.

View File

@ -153,7 +153,8 @@ def latest_version(*names, **kwargs):
continue
cur = pkgs.get(pkgname, '')
if not cur or salt.utils.compare_versions(ver1=cur,
if not cur or salt.utils.versions.compare(
ver1=cur,
oper='<',
ver2=pkgver):
ret[pkgname] = pkgver

View File

@ -13,7 +13,6 @@ from subprocess import Popen, PIPE
# Import Salt libs
from salt.ext import six
from salt.client import Caller
ArgumentParser = object
@ -105,8 +104,7 @@ def xccdf(params):
success = _OSCAP_EXIT_CODES_MAP[proc.returncode]
returncode = proc.returncode
if success:
caller = Caller()
caller.cmd('cp.push_dir', tempdir)
__salt__['cp.push_dir'](tempdir)
shutil.rmtree(tempdir, ignore_errors=True)
upload_dir = tempdir

View File

@ -42,8 +42,8 @@ def get(key,
Attempt to retrieve the named value from pillar, if the named value is not
available return the passed default. The default return is an empty string
except __opts__['pillar_raise_on_missing'] is set to True, in which case a
KeyError will be raised.
except ``__opts__['pillar_raise_on_missing']`` is set to True, in which
case a ``KeyError`` exception will be raised.
If the merge parameter is set to ``True``, the default will be recursively
merged into the returned pillar data.
@ -53,11 +53,18 @@ def get(key,
{'pkg': {'apache': 'httpd'}}
To retrieve the value associated with the apache key in the pkg dict this
key can be passed::
To retrieve the value associated with the ``apache`` key in the ``pkg``
dict this key can be passed as::
pkg:apache
key
The pillar key to get value from
default
If specified, return this value in case when named pillar value does
not exist.
merge : ``False``
If ``True``, the retrieved values will be merged into the passed
default. When the default and the retrieved value are both

View File

@ -338,6 +338,22 @@ def _process_requirements(requirements, cmd, cwd, saltenv, user):
return cleanup_requirements, None
def _format_env_vars(env_vars):
ret = {}
if env_vars:
if isinstance(env_vars, dict):
for key, val in six.iteritems(env_vars):
if not isinstance(key, six.string_types):
key = str(key) # future lint: disable=blacklisted-function
if not isinstance(val, six.string_types):
val = str(val) # future lint: disable=blacklisted-function
ret[key] = val
else:
raise CommandExecutionError(
'env_vars {0} is not a dictionary'.format(env_vars))
return ret
def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
requirements=None,
bin_env=None,
@ -811,16 +827,7 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
cmd_kwargs = dict(saltenv=saltenv, use_vt=use_vt, runas=user)
if env_vars:
if isinstance(env_vars, dict):
for key, val in six.iteritems(env_vars):
if not isinstance(key, six.string_types):
key = str(key) # future lint: disable=blacklisted-function
if not isinstance(val, six.string_types):
val = str(val) # future lint: disable=blacklisted-function
cmd_kwargs.setdefault('env', {})[key] = val
else:
raise CommandExecutionError(
'env_vars {0} is not a dictionary'.format(env_vars))
cmd_kwargs.setdefault('env', {}).update(_format_env_vars(env_vars))
try:
if cwd:
@ -974,7 +981,8 @@ def uninstall(pkgs=None,
def freeze(bin_env=None,
user=None,
cwd=None,
use_vt=False):
use_vt=False,
env_vars=None):
'''
Return a list of installed packages either globally or in the specified
virtualenv
@ -1027,6 +1035,8 @@ def freeze(bin_env=None,
cmd_kwargs = dict(runas=user, cwd=cwd, use_vt=use_vt, python_shell=False)
if bin_env and os.path.isdir(bin_env):
cmd_kwargs['env'] = {'VIRTUAL_ENV': bin_env}
if env_vars:
cmd_kwargs.setdefault('env', {}).update(_format_env_vars(env_vars))
result = __salt__['cmd.run_all'](cmd, **cmd_kwargs)
if result['retcode'] > 0:
@ -1038,7 +1048,8 @@ def freeze(bin_env=None,
def list_(prefix=None,
bin_env=None,
user=None,
cwd=None):
cwd=None,
env_vars=None):
'''
Filter list of installed apps from ``freeze`` and check to see if
``prefix`` exists in the list of packages installed.
@ -1067,7 +1078,7 @@ def list_(prefix=None,
if prefix is None or 'pip'.startswith(prefix):
packages['pip'] = version(bin_env)
for line in freeze(bin_env=bin_env, user=user, cwd=cwd):
for line in freeze(bin_env=bin_env, user=user, cwd=cwd, env_vars=env_vars):
if line.startswith('-f') or line.startswith('#'):
# ignore -f line as it contains --find-links directory
# ignore comment lines

View File

@ -302,8 +302,7 @@ def _parsed_version(user=None, host=None, port=None, maintenance_db=None,
return None
def _connection_defaults(user=None, host=None, port=None, maintenance_db=None,
password=None):
def _connection_defaults(user=None, host=None, port=None, maintenance_db=None):
'''
Returns a tuple of (user, host, port, db) with config, pillar, or default
values assigned to missing values.
@ -316,31 +315,29 @@ def _connection_defaults(user=None, host=None, port=None, maintenance_db=None,
port = __salt__['config.option']('postgres.port')
if not maintenance_db:
maintenance_db = __salt__['config.option']('postgres.maintenance_db')
if password is None:
password = __salt__['config.option']('postgres.pass')
return (user, host, port, maintenance_db, password)
return (user, host, port, maintenance_db)
def _psql_cmd(*args, **kwargs):
'''
Return string with fully composed psql command.
Accept optional keyword arguments: user, host and port as well as any
number or positional arguments to be added to the end of command.
Accepts optional keyword arguments: user, host, port and maintenance_db,
as well as any number of positional arguments to be added to the end of
the command.
'''
(user, host, port, maintenance_db, password) = _connection_defaults(
(user, host, port, maintenance_db) = _connection_defaults(
kwargs.get('user'),
kwargs.get('host'),
kwargs.get('port'),
kwargs.get('maintenance_db'),
kwargs.get('password'))
kwargs.get('maintenance_db'))
_PSQL_BIN = _find_pg_binary('psql')
cmd = [_PSQL_BIN,
'--no-align',
'--no-readline',
'--no-psqlrc',
'--no-password'] # It is never acceptable to issue a password prompt.
'--no-password'] # Never prompt, handled in _run_psql.
if user:
cmd += ['--username', user]
if host:
@ -363,7 +360,7 @@ def _psql_prepare_and_run(cmd,
user=None):
rcmd = _psql_cmd(
host=host, user=user, port=port,
maintenance_db=maintenance_db, password=password,
maintenance_db=maintenance_db,
*cmd)
cmdret = _run_psql(
rcmd, runas=runas, password=password, host=host, port=port, user=user)

View File

@ -83,6 +83,9 @@ def _to_unicode(vdata):
Converts from current users character encoding to unicode. Use this for
parameters being pass to reg functions
'''
# None does not convert to Unicode
if vdata is None:
return None
return salt.utils.stringutils.to_unicode(vdata, 'utf-8')
@ -526,13 +529,13 @@ def set_value(hive,
# https://www.python.org/dev/peps/pep-0237/
# String Types to Unicode
if vtype_value in [1, 2]:
if vtype_value in [win32con.REG_SZ, win32con.REG_EXPAND_SZ]:
local_vdata = _to_unicode(vdata)
# Don't touch binary...
elif vtype_value == 3:
elif vtype_value == win32con.REG_BINARY:
local_vdata = vdata
# Make sure REG_MULTI_SZ is a list of strings
elif vtype_value == 7:
elif vtype_value == win32con.REG_MULTI_SZ:
local_vdata = [_to_unicode(i) for i in vdata]
# Everything else is int
else:
@ -686,7 +689,6 @@ def delete_value(hive, key, vname=None, use_32bit_registry=False):
salt '*' reg.delete_value HKEY_CURRENT_USER 'SOFTWARE\\Salt' 'version'
'''
local_hive = _to_unicode(hive)
local_key = _to_unicode(key)
local_vname = _to_unicode(vname)
@ -728,7 +730,7 @@ def import_file(source, use_32bit_registry=False):
can be either a local file path or a URL type supported by salt
(e.g. ``salt://salt_master_path``).
:param bool use_32bit_registry: If the value of this paramater is ``True``
:param bool use_32bit_registry: If the value of this parameter is ``True``
then the ``REG`` file will be imported into the Windows 32 bit registry.
Otherwise the Windows 64 bit registry will be used.

View File

@ -737,6 +737,45 @@ def sync_utils(saltenv=None, refresh=True, extmod_whitelist=None, extmod_blackli
return ret
def sync_serializers(saltenv=None, refresh=True, extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: Fluorine
Sync serializers from ``salt://_serializers`` to the minion
saltenv
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
If not passed, then all environments configured in the :ref:`top files
<states-top>` will be checked for serializer modules to sync. If no top
files are found, then the ``base`` environment will be synced.
refresh : True
If ``True``, refresh the available execution modules on the minion.
This refresh will be performed even if no new serializer modules are
synced. Set to ``False`` to prevent this refresh.
extmod_whitelist : None
comma-seperated list of modules to sync
extmod_blacklist : None
comma-seperated list of modules to blacklist based on type
CLI Examples:
.. code-block:: bash
salt '*' saltutil.sync_serializers
salt '*' saltutil.sync_serializers saltenv=dev
salt '*' saltutil.sync_serializers saltenv=base,dev
'''
ret = _sync('serializers', saltenv, extmod_whitelist, extmod_blacklist)
if refresh:
refresh_modules()
return ret
def list_extmods():
'''
.. versionadded:: 2017.7.0
@ -904,6 +943,7 @@ def sync_all(saltenv=None, refresh=True, extmod_whitelist=None, extmod_blacklist
ret['proxymodules'] = sync_proxymodules(saltenv, False, extmod_whitelist, extmod_blacklist)
ret['engines'] = sync_engines(saltenv, False, extmod_whitelist, extmod_blacklist)
ret['thorium'] = sync_thorium(saltenv, False, extmod_whitelist, extmod_blacklist)
ret['serializers'] = sync_serializers(saltenv, False, extmod_whitelist, extmod_blacklist)
if __opts__['file_client'] == 'local':
ret['pillar'] = sync_pillar(saltenv, False, extmod_whitelist, extmod_blacklist)
if refresh:

View File

@ -58,7 +58,7 @@ SCHEDULE_CONF = [
'after',
'return_config',
'return_kwargs',
'run_on_start'
'run_on_start',
'skip_during_range',
'run_after_skip_range',
]

View File

@ -11,7 +11,6 @@ import logging
import salt.utils.json
import salt.utils.path
import salt.utils.platform
import salt.utils.decorators as decorators
log = logging.getLogger(__name__)
@ -26,14 +25,6 @@ __func_alias__ = {
__virtualname__ = 'imgadm'
@decorators.memoize
def _check_imgadm():
'''
Looks to see if imgadm is present on the system
'''
return salt.utils.path.which('imgadm')
def _exit_status(retcode):
'''
Translate exit status of imgadm
@ -70,11 +61,12 @@ def __virtual__():
'''
Provides imgadm only on SmartOS
'''
if salt.utils.platform.is_smartos_globalzone() and _check_imgadm():
if salt.utils.platform.is_smartos_globalzone() and \
salt.utils.path.which('imgadm'):
return __virtualname__
return (
False,
'{0} module can only be loaded on SmartOS computed nodes'.format(
'{0} module can only be loaded on SmartOS compute nodes'.format(
__virtualname__
)
)
@ -91,8 +83,7 @@ def version():
salt '*' imgadm.version
'''
ret = {}
imgadm = _check_imgadm()
cmd = '{0} --version'.format(imgadm)
cmd = 'imgadm --version'
res = __salt__['cmd.run'](cmd).splitlines()
ret = res[0].split()
return ret[-1]
@ -111,9 +102,7 @@ def update_installed(uuid=''):
salt '*' imgadm.update [uuid]
'''
imgadm = _check_imgadm()
if imgadm:
cmd = '{0} update {1}'.format(imgadm, uuid).rstrip()
cmd = 'imgadm update {0}'.format(uuid).rstrip()
__salt__['cmd.run'](cmd)
return {}
@ -135,8 +124,7 @@ def avail(search=None, verbose=False):
salt '*' imgadm.avail verbose=True
'''
ret = {}
imgadm = _check_imgadm()
cmd = '{0} avail -j'.format(imgadm)
cmd = 'imgadm avail -j'
res = __salt__['cmd.run_all'](cmd)
retcode = res['retcode']
result = {}
@ -169,8 +157,7 @@ def list_installed(verbose=False):
salt '*' imgadm.list [verbose=True]
'''
ret = {}
imgadm = _check_imgadm()
cmd = '{0} list -j'.format(imgadm)
cmd = 'imgadm list -j'
res = __salt__['cmd.run_all'](cmd)
retcode = res['retcode']
result = {}
@ -198,8 +185,7 @@ def show(uuid):
salt '*' imgadm.show e42f8c84-bbea-11e2-b920-078fab2aab1f
'''
ret = {}
imgadm = _check_imgadm()
cmd = '{0} show {1}'.format(imgadm, uuid)
cmd = 'imgadm show {0}'.format(uuid)
res = __salt__['cmd.run_all'](cmd, python_shell=False)
retcode = res['retcode']
if retcode != 0:
@ -223,8 +209,7 @@ def get(uuid):
salt '*' imgadm.get e42f8c84-bbea-11e2-b920-078fab2aab1f
'''
ret = {}
imgadm = _check_imgadm()
cmd = '{0} get {1}'.format(imgadm, uuid)
cmd = 'imgadm get {0}'.format(uuid)
res = __salt__['cmd.run_all'](cmd, python_shell=False)
retcode = res['retcode']
if retcode != 0:
@ -250,8 +235,7 @@ def import_image(uuid, verbose=False):
salt '*' imgadm.import e42f8c84-bbea-11e2-b920-078fab2aab1f [verbose=True]
'''
ret = {}
imgadm = _check_imgadm()
cmd = '{0} import {1}'.format(imgadm, uuid)
cmd = 'imgadm import {0}'.format(uuid)
res = __salt__['cmd.run_all'](cmd, python_shell=False)
retcode = res['retcode']
if retcode != 0:
@ -275,8 +259,7 @@ def delete(uuid):
salt '*' imgadm.delete e42f8c84-bbea-11e2-b920-078fab2aab1f
'''
ret = {}
imgadm = _check_imgadm()
cmd = '{0} delete {1}'.format(imgadm, uuid)
cmd = 'imgadm delete {0}'.format(uuid)
res = __salt__['cmd.run_all'](cmd, python_shell=False)
retcode = res['retcode']
if retcode != 0:
@ -305,8 +288,7 @@ def vacuum(verbose=False):
salt '*' imgadm.vacuum [verbose=True]
'''
ret = {}
imgadm = _check_imgadm()
cmd = '{0} vacuum -f'.format(imgadm)
cmd = 'imgadm vacuum -f'
res = __salt__['cmd.run_all'](cmd)
retcode = res['retcode']
if retcode != 0:

Some files were not shown because too many files have changed in this diff Show More