diff --git a/.github/stale.yml b/.github/stale.yml
index 513b52aca7..8aed31616e 100644
--- a/.github/stale.yml
+++ b/.github/stale.yml
@@ -1,8 +1,8 @@
# Probot Stale configuration file
# Number of days of inactivity before an issue becomes stale
-# 790 is approximately 2 years and 2 months
-daysUntilStale: 790
+# 770 is approximately 2 years and 1 month
+daysUntilStale: 770
# Number of days of inactivity before a stale issue is closed
daysUntilClose: 7
diff --git a/.kitchen.yml b/.kitchen.yml
index ba2266c8b3..cf74731163 100644
--- a/.kitchen.yml
+++ b/.kitchen.yml
@@ -36,7 +36,7 @@ provisioner:
require_chef: false
remote_states:
name: git://github.com/saltstack/salt-jenkins.git
- branch: oxygen
+ branch: 2018.3
repo: git
testingdir: /testing
salt_copy_filter:
diff --git a/.testing.pylintrc b/.testing.pylintrc
index 832ff814e3..ccb20da64d 100644
--- a/.testing.pylintrc
+++ b/.testing.pylintrc
@@ -29,6 +29,25 @@ load-plugins=saltpylint.pep8,
# Don't bump this values on PyLint 1.4.0 - Know bug that ignores the passed --rcfile
jobs=1
+# List of blacklisted functions and suggested replacements
+#
+# NOTE: This pylint check will infer the full name of the function by walking
+# back up from the function name to the parent, to the parent's parent, etc.,
+# and this means that functions which come from platform-specific modules need
+# to be referenced using name of the module from which the function was
+# imported. This happens a lot in the os and os.path modules. Functions from
+# os.path should be defined using posixpath.funcname and ntpath.funcname, while
+# functions from os should be defined using posix.funcname and nt.funcname.
+#
+# When defining a blacklisted function, the format is:
+#
+# =
+#
+# The replacement text will be included in the alert message.
+#
+blacklisted-functions=posix.umask=salt.utils.files.set_umask or get_umask,
+ nt.umask=salt.utils.files.set_umask or get_umask
+
# Allow loading of arbitrary C extensions. Extensions are imported into the
# active Python interpreter and may run arbitrary code.
unsafe-load-any-extension=no
diff --git a/conf/cloud b/conf/cloud
index 035cfea101..5a379e6745 100644
--- a/conf/cloud
+++ b/conf/cloud
@@ -26,7 +26,7 @@
# The level of messages to send to the console.
-# One of 'garbage', 'trace', 'debug', info', 'warning', 'error', 'critical'.
+# One of 'garbage', 'trace', 'debug', 'info', 'warning', 'error', 'critical'.
#
# The following log levels are considered INSECURE and may log sensitive data:
# ['garbage', 'trace', 'debug']
diff --git a/conf/master b/conf/master
index 986898436a..f6eccc9a16 100644
--- a/conf/master
+++ b/conf/master
@@ -1157,7 +1157,7 @@
#log_level: warning
# The level of messages to send to the log file.
-# One of 'garbage', 'trace', 'debug', info', 'warning', 'error', 'critical'.
+# One of 'garbage', 'trace', 'debug', 'info', 'warning', 'error', 'critical'.
# If using 'log_granular_levels' this must be set to the highest desired level.
#log_level_logfile: warning
diff --git a/conf/minion b/conf/minion
index 41b507bef4..e768839318 100644
--- a/conf/minion
+++ b/conf/minion
@@ -389,6 +389,16 @@
# minion event bus. The value is expressed in bytes.
#max_event_size: 1048576
+# When a minion starts up it sends a notification on the event bus with a tag
+# that looks like this: `salt/minion//start`. For historical reasons
+# the minion also sends a similar event with an event tag like this:
+# `minion_start`. This duplication can cause a lot of clutter on the event bus
+# when there are many minions. Set `enable_legacy_startup_events: False` in the
+# minion config to ensure only the `salt/minion//start` events are
+# sent. Beginning with the `Neon` Salt release this option will default to
+# `False`
+#enable_legacy_startup_events: True
+
# To detect failed master(s) and fire events on connect/disconnect, set
# master_alive_interval to the number of seconds to poll the masters for
# connection events.
@@ -720,7 +730,7 @@
#key_logfile: /var/log/salt/key
# The level of messages to send to the console.
-# One of 'garbage', 'trace', 'debug', info', 'warning', 'error', 'critical'.
+# One of 'garbage', 'trace', 'debug', 'info', 'warning', 'error', 'critical'.
#
# The following log levels are considered INSECURE and may log sensitive data:
# ['garbage', 'trace', 'debug']
diff --git a/conf/proxy b/conf/proxy
index 908dd25ba8..cc40641786 100644
--- a/conf/proxy
+++ b/conf/proxy
@@ -543,7 +543,7 @@
#key_logfile: /var/log/salt/key
# The level of messages to send to the console.
-# One of 'garbage', 'trace', 'debug', info', 'warning', 'error', 'critical'.
+# One of 'garbage', 'trace', 'debug', 'info', 'warning', 'error', 'critical'.
#
# The following log levels are considered INSECURE and may log sensitive data:
# ['garbage', 'trace', 'debug']
diff --git a/conf/suse/master b/conf/suse/master
index 3125f76308..22bad39031 100644
--- a/conf/suse/master
+++ b/conf/suse/master
@@ -1100,7 +1100,7 @@ syndic_user: salt
#key_logfile: /var/log/salt/key
# The level of messages to send to the console.
-# One of 'garbage', 'trace', 'debug', info', 'warning', 'error', 'critical'.
+# One of 'garbage', 'trace', 'debug', 'info', 'warning', 'error', 'critical'.
#
# The following log levels are considered INSECURE and may log sensitive data:
# ['garbage', 'trace', 'debug']
diff --git a/doc/_themes/saltstack2/static/images/DOCBANNER.jpg b/doc/_themes/saltstack2/static/images/DOCBANNER.jpg
index 886e3a9b7c..f68f0787bb 100644
Binary files a/doc/_themes/saltstack2/static/images/DOCBANNER.jpg and b/doc/_themes/saltstack2/static/images/DOCBANNER.jpg differ
diff --git a/doc/_themes/saltstack2/static/images/enterprise_ad.jpg b/doc/_themes/saltstack2/static/images/enterprise_ad.jpg
index 18fe72e961..185539326d 100644
Binary files a/doc/_themes/saltstack2/static/images/enterprise_ad.jpg and b/doc/_themes/saltstack2/static/images/enterprise_ad.jpg differ
diff --git a/doc/conf.py b/doc/conf.py
index 17c8d25d4d..0b02718ffa 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -341,10 +341,15 @@ rst_prolog = """\
| md5
-.. |osxdownload| raw:: html
+.. |osxdownloadpy2| raw:: html
- x86_64: salt-{release}-x86_64.pkg
- | md5
+ x86_64: salt-{release}-py2-x86_64.pkg
+ | md5
+
+.. |osxdownloadpy3| raw:: html
+
+ x86_64: salt-{release}-py3-x86_64.pkg
+ | md5
""".format(release=release)
diff --git a/doc/glossary.rst b/doc/glossary.rst
index 75df13fca5..e779a0e063 100644
--- a/doc/glossary.rst
+++ b/doc/glossary.rst
@@ -83,8 +83,8 @@ Glossary
to the system. State module functions should be idempotent. Some
state module functions, such as :mod:`cmd.run `
are not idempotent by default but can be made idempotent with the
- proper use of requisites such as :ref:```unless`` `
- and :ref:```onlyif`` `. For more information, *see*
+ proper use of requisites such as :ref:`unless `
+ and :ref:`onlyif `. For more information, *see*
`wikipedia `_.
Jinja
diff --git a/doc/ref/configuration/master.rst b/doc/ref/configuration/master.rst
index b554496710..35b7655fa3 100644
--- a/doc/ref/configuration/master.rst
+++ b/doc/ref/configuration/master.rst
@@ -1438,7 +1438,7 @@ This should still be considered a less than secure option, due to the fact
that trust is based on just the requesting minion.
Please see the :ref:`Autoaccept Minions from Grains `
-documentation for more infomation.
+documentation for more information.
.. code-block:: yaml
@@ -2212,7 +2212,7 @@ This allows the following more convenient syntax to be used:
# (this comment remains in the rendered template)
## ensure all the formula services are running
% for service in formula_services:
- enable_service_{{ serivce }}:
+ enable_service_{{ service }}:
service.running:
name: {{ service }}
% endfor
@@ -5339,11 +5339,10 @@ branch/tag.
winrepo_branch: winrepo
- ext_pillar:
- - git:
- - https://mygitserver/winrepo1.git
- - https://mygitserver/winrepo2.git:
- - foo https://mygitserver/winrepo3.git
+ winrepo_remotes:
+ - https://mygitserver/winrepo1.git
+ - https://mygitserver/winrepo2.git:
+ - foo https://mygitserver/winrepo3.git
.. conf_master:: winrepo_ssl_verify
diff --git a/doc/ref/configuration/minion.rst b/doc/ref/configuration/minion.rst
index cdd45a98de..91566a146f 100644
--- a/doc/ref/configuration/minion.rst
+++ b/doc/ref/configuration/minion.rst
@@ -208,6 +208,28 @@ minion event bus. The value is expressed in bytes.
max_event_size: 1048576
+.. conf_minion:: enable_legacy_startup_events
+
+``enable_legacy_startup_events``
+--------------------------------
+
+.. versionadded:: Fluorine
+
+Default: ``True``
+
+When a minion starts up it sends a notification on the event bus with a tag
+that looks like this: `salt/minion//start`. For historical reasons
+the minion also sends a similar event with an event tag like this:
+`minion_start`. This duplication can cause a lot of clutter on the event bus
+when there are many minions. Set `enable_legacy_startup_events: False` in the
+minion config to ensure only the `salt/minion//start` events are
+sent. Beginning with the `Neon` Salt release this option will default to
+`False`
+
+.. code-block:: yaml
+
+ enable_legacy_startup_events: True
+
.. conf_minion:: master_failback
``master_failback``
@@ -2497,7 +2519,7 @@ The grains that should be sent to the master on authentication to decide if
the minion's key should be accepted automatically.
Please see the :ref:`Autoaccept Minions from Grains `
-documentation for more infomation.
+documentation for more information.
.. code-block:: yaml
diff --git a/doc/ref/modules/all/index.rst b/doc/ref/modules/all/index.rst
index b916e9ae53..775d21f358 100644
--- a/doc/ref/modules/all/index.rst
+++ b/doc/ref/modules/all/index.rst
@@ -334,6 +334,7 @@ execution modules
publish
puppet
purefa
+ purefb
pushbullet
pushover_notify
pw_group
diff --git a/doc/ref/modules/all/salt.modules.purefb.rst b/doc/ref/modules/all/salt.modules.purefb.rst
new file mode 100644
index 0000000000..45f731b915
--- /dev/null
+++ b/doc/ref/modules/all/salt.modules.purefb.rst
@@ -0,0 +1,6 @@
+===================
+salt.modules.purefb
+===================
+
+.. automodule:: salt.modules.purefb
+ :members:
diff --git a/doc/topics/cloud/aws.rst b/doc/topics/cloud/aws.rst
index 376edc6115..fda04b2d24 100644
--- a/doc/topics/cloud/aws.rst
+++ b/doc/topics/cloud/aws.rst
@@ -296,7 +296,7 @@ Set up an initial profile at ``/etc/salt/cloud.profiles``:
SecurityGroupId:
- sg-750af413
del_root_vol_on_destroy: True
- del_all_vol_on_destroy: True
+ del_all_vols_on_destroy: True
volumes:
- { size: 10, device: /dev/sdf }
- { size: 10, device: /dev/sdg, type: io1, iops: 1000 }
diff --git a/doc/topics/cloud/azurearm.rst b/doc/topics/cloud/azurearm.rst
index 4bf0c14acd..8fb9da3d40 100644
--- a/doc/topics/cloud/azurearm.rst
+++ b/doc/topics/cloud/azurearm.rst
@@ -322,6 +322,18 @@ Optional. The path to a file to be read and submitted to Azure as user data.
How this is used depends on the operating system that is being deployed. If
used, any ``userdata`` setting will be ignored.
+userdata_sendkeys
+-------------
+Optional. Set to ``True`` in order to generate salt minion keys and provide
+them as variables to the userdata script when running it through the template
+renderer. The keys can be referenced as ``{{opts['priv_key']}}`` and
+``{{opts['pub_key']}}``.
+
+userdata_template
+-------------
+Optional. Enter the renderer, such as ``jinja``, to be used for the userdata
+script template.
+
wait_for_ip_timeout
-------------------
Optional. Default is ``600``. When waiting for a VM to be created, Salt Cloud
diff --git a/doc/topics/cloud/cloud.rst b/doc/topics/cloud/cloud.rst
index 78c9f0d64e..96cf765b53 100644
--- a/doc/topics/cloud/cloud.rst
+++ b/doc/topics/cloud/cloud.rst
@@ -35,7 +35,7 @@ https://github.com/saltstack/salt/tree/develop/salt/cloud/clouds/msazure.py
The get_configured_provider() Function
--------------------------------------
-This function uses ``config.is_provider_configured()`` to determine wither
+This function uses ``config.is_provider_configured()`` to determine whether
all required information for this driver has been configured. The last value
in the list of required settings should be followed by a comma.
diff --git a/doc/topics/cloud/config.rst b/doc/topics/cloud/config.rst
index df456e72da..b721f8421f 100644
--- a/doc/topics/cloud/config.rst
+++ b/doc/topics/cloud/config.rst
@@ -71,7 +71,7 @@ The generated grain information will appear similar to:
provider: my_ec2:ec2
profile: ec2-web
-The generation of the salt-cloud grain can be surpressed by the
+The generation of the salt-cloud grain can be suppressed by the
option ``enable_cloud_grains: 'False'`` in the cloud configuration file.
Cloud Configuration Syntax
@@ -344,7 +344,35 @@ be set in the configuration file to enable interfacing with GoGrid:
OpenStack
---------
-.. automodule:: salt.cloud.clouds.openstack
+Using Salt for OpenStack uses the `shade ` driver managed by the
+openstack-infra team.
+
+This driver can be configured using the ``/etc/openstack/clouds.yml`` file with
+`os-client-config `
+
+.. code-block:: yaml
+ myopenstack:
+ driver: openstack
+ region_name: RegionOne
+ cloud: mycloud
+
+Or by just configuring the same auth block directly in the cloud provider config.
+
+.. code-block:: yaml
+ myopenstack:
+ driver: openstack
+ region_name: RegionOne
+ auth:
+ username: 'demo'
+ password: secret
+ project_name: 'demo'
+ auth_url: 'http://openstack/identity'
+
+Both of these methods support using the
+`vendor `
+options.
+
+For more information, look at :mod:`Openstack Cloud Driver Docs `
DigitalOcean
------------
diff --git a/doc/topics/cloud/openstack.rst b/doc/topics/cloud/openstack.rst
new file mode 100644
index 0000000000..ccb1fefba8
--- /dev/null
+++ b/doc/topics/cloud/openstack.rst
@@ -0,0 +1,5 @@
+==============================
+Getting Started with Openstack
+==============================
+
+.. automodule:: salt.cloud.clouds.openstack
diff --git a/doc/topics/cloud/rackspace.rst b/doc/topics/cloud/rackspace.rst
deleted file mode 100644
index 45df0b8553..0000000000
--- a/doc/topics/cloud/rackspace.rst
+++ /dev/null
@@ -1,188 +0,0 @@
-==============================
-Getting Started With Rackspace
-==============================
-
-Rackspace is a major public cloud platform which may be configured using either
-the `openstack` driver.
-
-
-Dependencies
-============
-* Libcloud >= 0.13.2
-
-
-Configuration
-=============
-To use the `openstack` driver (recommended), set up the cloud configuration at
- ``/etc/salt/cloud.providers`` or
- ``/etc/salt/cloud.providers.d/rackspace.conf``:
-
-.. code-block:: yaml
-
- my-rackspace-config:
- # Set the location of the salt-master
- #
- minion:
- master: saltmaster.example.com
-
- # Configure Rackspace using the OpenStack plugin
- #
- identity_url: 'https://identity.api.rackspacecloud.com/v2.0/tokens'
- compute_name: cloudServersOpenStack
- protocol: ipv4
-
- # Set the compute region:
- #
- compute_region: DFW
-
- # Configure Rackspace authentication credentials
- #
- user: myname
- tenant: 123456
- apikey: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-
- driver: openstack
-
-
-.. note::
- .. versionchanged:: 2015.8.0
-
- The ``provider`` parameter in cloud provider definitions was renamed to ``driver``. This
- change was made to avoid confusion with the ``provider`` parameter that is used in cloud profile
- definitions. Cloud provider definitions now use ``driver`` to refer to the Salt cloud module that
- provides the underlying functionality to connect to a cloud host, while cloud profiles continue
- to use ``provider`` to refer to provider configurations that you define.
-
-Compute Region
-==============
-
-Rackspace currently has six compute regions which may be used:
-
-.. code-block:: bash
-
- DFW -> Dallas/Forth Worth
- ORD -> Chicago
- SYD -> Sydney
- LON -> London
- IAD -> Northern Virginia
- HKG -> Hong Kong
-
-Note: Currently the LON region is only available with a UK account, and UK accounts cannot access other regions
-
-Authentication
-==============
-
-The ``user`` is the same user as is used to log into the Rackspace Control
-Panel. The ``tenant`` and ``apikey`` can be found in the API Keys area of the
-Control Panel. The ``apikey`` will be labeled as API Key (and may need to be
-generated), and ``tenant`` will be labeled as Cloud Account Number.
-
-An initial profile can be configured in ``/etc/salt/cloud.profiles`` or
-``/etc/salt/cloud.profiles.d/rackspace.conf``:
-
-.. code-block:: yaml
-
- openstack_512:
- provider: my-rackspace-config
- size: 512 MB Standard
- image: Ubuntu 12.04 LTS (Precise Pangolin)
-
-To instantiate a machine based on this profile:
-
-.. code-block:: bash
-
- # salt-cloud -p openstack_512 myinstance
-
-This will create a virtual machine at Rackspace with the name ``myinstance``.
-This operation may take several minutes to complete, depending on the current
-load at the Rackspace data center.
-
-Once the instance has been created with salt-minion installed, connectivity to
-it can be verified with Salt:
-
-.. code-block:: bash
-
- # salt myinstance test.ping
-
-RackConnect Environments
-------------------------
-
-Rackspace offers a hybrid hosting configuration option called RackConnect that
-allows you to use a physical firewall appliance with your cloud servers. When
-this service is in use the public_ip assigned by nova will be replaced by a NAT
-ip on the firewall. For salt-cloud to work properly it must use the newly
-assigned "access ip" instead of the Nova assigned public ip. You can enable that
-capability by adding this to your profiles:
-
-.. code-block:: yaml
-
- openstack_512:
- provider: my-openstack-config
- size: 512 MB Standard
- image: Ubuntu 12.04 LTS (Precise Pangolin)
- rackconnect: True
-
-Managed Cloud Environments
---------------------------
-
-Rackspace offers a managed service level of hosting. As part of the managed
-service level you have the ability to choose from base of lamp installations on
-cloud server images. The post build process for both the base and the lamp
-installations used Chef to install things such as the cloud monitoring agent and
-the cloud backup agent. It also takes care of installing the lamp stack if
-selected. In order to prevent the post installation process from stomping over
-the bootstrapping you can add the below to your profiles.
-
-.. code-block:: yaml
-
- openstack_512:
- provider: my-rackspace-config
- size: 512 MB Standard
- image: Ubuntu 12.04 LTS (Precise Pangolin)
- managedcloud: True
-
-First and Next Generation Images
---------------------------------
-
-Rackspace provides two sets of virtual machine images, *first*, and *next*
-generation. As of ``0.8.9`` salt-cloud will default to using the *next*
-generation images. To force the use of first generation images, on the profile
-configuration please add:
-
-.. code-block:: yaml
-
- FreeBSD-9.0-512:
- provider: my-rackspace-config
- size: 512 MB Standard
- image: FreeBSD 9.0
- force_first_gen: True
-
-Private Subnets
----------------
-By default salt-cloud will not add Rackspace private networks to new servers. To enable
-a private network to a server instantiated by salt cloud, add the following section
-to the provider file (typically ``/etc/salt/cloud.providers.d/rackspace.conf``)
-
-.. code-block:: yaml
-
- networks:
- - fixed:
- # This is the private network
- - private-network-id
- # This is Rackspace's "PublicNet"
- - 00000000-0000-0000-0000-000000000000
- # This is Rackspace's "ServiceNet"
- - 11111111-1111-1111-1111-111111111111
-
-To get the Rackspace private network ID, go to Networking, Networks and hover over the private network name.
-
-The order of the networks in the above code block does not map to the order of the
-ethernet devices on newly created servers. Public IP will always be first ( eth0 )
-followed by servicenet ( eth1 ) and then private networks.
-
-Enabling the private network per above gives the option of using the private subnet for
-all master-minion communication, including the bootstrap install of salt-minion. To
-enable the minion to use the private subnet, update the master: line in the minion:
-section of the providers file. To configure the master to only listen on the private
-subnet IP, update the interface: line in the /etc/salt/master file to be the private
-subnet IP of the salt master.
diff --git a/doc/topics/cloud/saltify.rst b/doc/topics/cloud/saltify.rst
index ac89e374c7..aed77baec8 100644
--- a/doc/topics/cloud/saltify.rst
+++ b/doc/topics/cloud/saltify.rst
@@ -125,7 +125,7 @@ to start that machine running.
The "magic packet" must be sent by an existing salt minion which is on
the same network segment as the target machine. (Or your router
must be set up especially to route WoL packets.) Your target machine
-must be set up to listen for WoL and to respond appropriatly.
+must be set up to listen for WoL and to respond appropriately.
You must provide the Salt node id of the machine which will send
the WoL packet \(parameter ``wol_sender_node``\), and
diff --git a/doc/topics/development/contributing.rst b/doc/topics/development/contributing.rst
index 3c21c9b9c0..81a8fd1ab7 100644
--- a/doc/topics/development/contributing.rst
+++ b/doc/topics/development/contributing.rst
@@ -282,9 +282,9 @@ The Salt repository follows a "Merge Forward" policy. The merge-forward
behavior means that changes submitted to older main release branches will
automatically be "merged-forward" into the newer branches.
-For example, a pull request is merged into ``2016.11``. Then, the entire
-``2016.11`` branch is merged-forward into the ``2017.7`` branch, and the
-``2017.7`` branch is merged-forward into the ``develop`` branch.
+For example, a pull request is merged into ``2017.7``. Then, the entire
+``2017.7`` branch is merged-forward into the ``2018.3`` branch, and the
+``2018.3`` branch is merged-forward into the ``develop`` branch.
This process makes is easy for contributors to make only one pull-request
against an older branch, but allows the change to propagate to all **main**
diff --git a/doc/topics/installation/osx.rst b/doc/topics/installation/osx.rst
index 66ee787ca1..ce78e347d4 100644
--- a/doc/topics/installation/osx.rst
+++ b/doc/topics/installation/osx.rst
@@ -8,7 +8,8 @@ Installation from the Official SaltStack Repository
===================================================
**Latest stable build from the selected branch**:
-|osxdownload|
+|osxdownloadpy2|
+|osxdownloadpy3|
The output of ``md5 `` should match the contents of the
corresponding md5 file.
diff --git a/doc/topics/jinja/index.rst b/doc/topics/jinja/index.rst
index caa7da9d13..a049d0694d 100644
--- a/doc/topics/jinja/index.rst
+++ b/doc/topics/jinja/index.rst
@@ -153,7 +153,7 @@ starts at the root of the state tree or pillar.
Errors
======
-Saltstack allows to raise custom errors using the ``raise`` jinja function.
+Saltstack allows raising custom errors using the ``raise`` jinja function.
.. code-block:: jinja
@@ -1122,7 +1122,7 @@ Returns:
'body': '{
"userId": 1,
"id": 1,
- "title": "sunt aut facere repellat provident occaecati excepturi optio reprehenderit",
+ "title": "sunt aut facere repellat provident occaecati excepturi option reprehenderit",
"body": "quia et suscipit\\nsuscipit recusandae consequuntur expedita et cum\\nreprehenderit molestiae ut ut quas totam\\nnostrum rerum est autem sunt rem eveniet architecto"
}'
}
diff --git a/doc/topics/mine/index.rst b/doc/topics/mine/index.rst
index 62bdd55dbe..de33a54026 100644
--- a/doc/topics/mine/index.rst
+++ b/doc/topics/mine/index.rst
@@ -79,7 +79,8 @@ Mine Interval
The Salt Mine functions are executed when the Minion starts and at a given
interval by the scheduler. The default interval is every 60 minutes and can
-be adjusted for the Minion via the ``mine_interval`` option:
+be adjusted for the Minion via the ``mine_interval`` option in the minion
+config:
.. code-block:: yaml
diff --git a/doc/topics/orchestrate/orchestrate_runner.rst b/doc/topics/orchestrate/orchestrate_runner.rst
index 4771ff1d47..1a735f5e9e 100644
--- a/doc/topics/orchestrate/orchestrate_runner.rst
+++ b/doc/topics/orchestrate/orchestrate_runner.rst
@@ -338,8 +338,8 @@ Given the above setup, the orchestration will be carried out as follows:
.. _orchestrate-runner-parsing-results-programatically:
-Parsing Results Programatically
--------------------------------
+Parsing Results Programmatically
+--------------------------------
Orchestration jobs return output in a specific data structure. That data
structure is represented differently depending on the outputter used. With the
diff --git a/doc/topics/pillar/index.rst b/doc/topics/pillar/index.rst
index b76e698ca5..96513f8215 100644
--- a/doc/topics/pillar/index.rst
+++ b/doc/topics/pillar/index.rst
@@ -409,7 +409,7 @@ module. This module includes several functions, each of them with their own
use. These functions include:
- :py:func:`pillar.item ` - Retrieves the value of
- one or more keys from the :ref:`in-memory pillar datj `.
+ one or more keys from the :ref:`in-memory pillar data `.
- :py:func:`pillar.items ` - Compiles a fresh pillar
dictionary and returns it, leaving the :ref:`in-memory pillar data
` untouched. If pillar keys are passed to this function
diff --git a/doc/topics/reactor/index.rst b/doc/topics/reactor/index.rst
index 5763763f42..425e78ae2f 100644
--- a/doc/topics/reactor/index.rst
+++ b/doc/topics/reactor/index.rst
@@ -706,8 +706,8 @@ Salt will sync all custom types (by running a :mod:`saltutil.sync_all
`. However, there is a chicken-and-egg issue where, on the
initial :ref:`highstate `, a minion will not yet have these
custom types synced when the top file is first compiled. This can be worked
-around with a simple reactor which watches for ``minion_start`` events, which
-each minion fires when it first starts up and connects to the master.
+around with a simple reactor which watches for ``salt/minion/*/start`` events,
+which each minion fires when it first starts up and connects to the master.
On the master, create **/srv/reactor/sync_grains.sls** with the following
contents:
diff --git a/doc/topics/releases/2016.3.4.rst b/doc/topics/releases/2016.3.4.rst
index 4cd6e5a7b3..b6b27e52e1 100644
--- a/doc/topics/releases/2016.3.4.rst
+++ b/doc/topics/releases/2016.3.4.rst
@@ -146,7 +146,7 @@ Changes:
- **PR** `#36690`_: (*rallytime*) [2016.3] Merge forward from 2015.8 to 2016.3
- **PR** `#36680`_: (*rallytime*) [2016.3] Merge forward from 2015.8 to 2016.3
- **PR** `#36659`_: (*terminalmage*) Support dynamic env in new-style git_pillar
-- **PR** `#36538`_: (*clinta*) daemon-reload on call to service.avaliable
+- **PR** `#36538`_: (*clinta*) daemon-reload on call to service.available
- **PR** `#36616`_: (*cro*) Zypper fix test
- **PR** `#36621`_: (*terminalmage*) Fix shadowed builtins
- **PR** `#36636`_: (*rallytime*) Back-port `#36618`_ to 2016.3
diff --git a/doc/topics/releases/2018.3.0.rst b/doc/topics/releases/2018.3.0.rst
index 9ef219621d..3444a0f226 100644
--- a/doc/topics/releases/2018.3.0.rst
+++ b/doc/topics/releases/2018.3.0.rst
@@ -503,7 +503,7 @@ setting a ``port`` option under the Master's ``discovery`` configuration:
.. note::
When using a port number other than the default, the Minion's ``discovery``
- configuraton must *also* have a port specified, otherwise the Minion will
+ configuration must *also* have a port specified, otherwise the Minion will
still attempt to contact the Master on port ``4520``.
**Minion configuration**
@@ -528,7 +528,7 @@ Connection to a type instead of DNS
By now each Minion was connecting to a Master by DNS or IP address. From now on it is possible
also to connect to a _type_ of a Master. For example, in a network there are three different
Masters, each corresponds for a particular niche or environment or specific role etc. The Minion
-is supposed to connect only to one of those Masters that is described approriately.
+is supposed to connect only to one of those Masters that is described appropriately.
To achieve such an effect, each `/etc/salt/master` configuration should have a `discovery` option,
which should have a `mapping` element with arbitrary key/value pairs. The same configuration should
@@ -665,6 +665,37 @@ The Windows installer will now display command-line help when a help switch
Salt Cloud Features
-------------------
+OpenStack Revamp
+================
+
+The OpenStack Driver has been rewritten mostly from scratch. Salt is now using
+the `shade driver `.
+
+With this, the ``nova`` driver is being deprecated.
+
+:mod:`openstack driver `
+
+There have also been several new modules and states added for managing OpenStack
+setups using shade as well.
+
+:mod:`keystone `
+:mod:`keystone role grant `
+:mod:`keystone group `
+:mod:`keystone role `
+:mod:`keystone service `
+:mod:`keystone user `
+:mod:`keystone domain `
+:mod:`keystone project `
+:mod:`keystone endpoint `
+:mod:`glance `
+:mod:`glance_image `
+:mod:`neutron `
+:mod:`neutron subnet `
+:mod:`neutron secgroup `
+:mod:`neutron secgroup rule `
+:mod:`neutron network `
+
+
Pre-Flight Commands
===================
@@ -701,7 +732,7 @@ The generated grain information will appear similar to:
provider: my_ec2:ec2
profile: ec2-web
-The generation of salt-cloud grains can be surpressed by the
+The generation of salt-cloud grains can be suppressed by the
option ``enable_cloud_grains: 'False'`` in the cloud configuration file.
Upgraded Saltify Driver
@@ -766,7 +797,7 @@ Terms usable in yaml files Description
========================== ===========
classes A list of classes that will be processed in order
states A list of states that will be returned by master_tops function
-pillars A yaml dictionnary that will be returned by the ext_pillar function
+pillars A yaml dictionary that will be returned by the ext_pillar function
environment Node saltenv that will be used by master_tops
========================== ===========
@@ -1557,6 +1588,14 @@ PyCrypto is used as it was in the previous releases. M2Crypto is used in the
same way as PyCrypto so there would be no compatibility issues, different nodes
could use different backends.
+NaCL Module and Runner changes
+------------------------------
+
+In addition to argument changes in both the NaCL module and runner for future
+deprecation in the Fluorine release, the default box_type has changed from
+`secretbox` to `sealedbox`. SecretBox is data encrypted using private key
+`sk` and Sealedbox is encrypted using public key `pk`
+
Deprecations
------------
@@ -1617,6 +1656,15 @@ The ``win_service`` module had the following changes:
- The ``type`` option was removed from the ``create`` function. Please use
``service_type`` instead.
+The ``nacl`` module had the following changes:
+
+- The ``key_file`` option was replaced in the ``keygen``, ``enc`` and ``dec``
+functions. Please use the ``sk_file`` option instead.
+
+- The ``key`` option was replaced in the ``keygen``, ``enc`` and ``dec``
+functions. Please use the ``sk`` option instead.
+
+
Runner Deprecations
===================
@@ -1625,6 +1673,14 @@ The ``manage`` runner had the following changes:
- The ``root_user`` kwarg was removed from the ``bootstrap`` function. Please
use ``salt-ssh`` roster entries for the host instead.
+The ``nacl`` runner had the following changes:
+
+- The ``key_file`` option was replaced in the ``keygen``, ``enc`` and ``dec``
+functions. Please use the ``sk_file`` option instead.
+
+- The ``key`` option was replaced in the ``keygen``, ``enc`` and ``dec``
+functions. Please use the ``sk`` option instead.
+
State Deprecations
==================
diff --git a/doc/topics/releases/fluorine.rst b/doc/topics/releases/fluorine.rst
index 83f7d5b08d..54e9820088 100644
--- a/doc/topics/releases/fluorine.rst
+++ b/doc/topics/releases/fluorine.rst
@@ -3,3 +3,129 @@
======================================
Salt Release Notes - Codename Fluorine
======================================
+
+
+Minion Startup Events
+---------------------
+
+When a minion starts up it sends a notification on the event bus with a tag
+that looks like this: `salt/minion//start`. For historical reasons
+the minion also sends a similar event with an event tag like this:
+`minion_start`. This duplication can cause a lot of clutter on the event bus
+when there are many minions. Set `enable_legacy_startup_events: False` in the
+minion config to ensure only the `salt/minion//start` events are
+sent.
+
+The new :conf_minion:`enable_legacy_startup_events` minion config option
+defaults to ``True``, but will be set to default to ``False`` beginning with
+the Neon release of Salt.
+
+The Salt Syndic currently sends an old style `syndic_start` event as well. The
+syndic respects :conf_minion:`enable_legacy_startup_events` as well.
+
+
+Deprecations
+------------
+
+Module Deprecations
+===================
+
+The ``trafficserver`` module had the following changes:
+
+- Support for the ``match_var`` function was removed. Please use the
+ ``match_metric`` function instead.
+- Support for the ``read_var`` function was removed. Please use the
+ ``read_config`` function instead.
+- Support for the ``set_var`` function was removed. Please use the
+ ``set_config`` function instead.
+
+The ``win_update`` module has been removed. It has been replaced by ``win_wua``
+module.
+
+The ``win_wua`` module had the following changes:
+
+- Support for the ``download_update`` function has been removed. Please use the
+ ``download`` function instead.
+- Support for the ``download_updates`` function has been removed. Please use the
+ ``download`` function instead.
+- Support for the ``install_update`` function has been removed. Please use the
+ ``install`` function instead.
+- Support for the ``install_updates`` function has been removed. Please use the
+ ``install`` function instead.
+- Support for the ``list_update`` function has been removed. Please use the
+ ``get`` function instead.
+- Support for the ``list_updates`` function has been removed. Please use the
+ ``list`` function instead.
+
+Pillar Deprecations
+===================
+
+The ``vault`` pillar had the following changes:
+
+- Support for the ``profile`` argument was removed. Any options passed up until
+ and following the first ``path=`` are discarded.
+
+Roster Deprecations
+===================
+
+The ``cache`` roster had the following changes:
+
+- Support for ``roster_order`` as a list or tuple has been removed. As of the
+ ``Fluorine`` release, ``roster_order`` must be a dictionary.
+- The ``roster_order`` option now includes IPv6 in addition to IPv4 for the
+ ``private``, ``public``, ``global`` or ``local`` settings. The syntax for these
+ settings has changed to ``ipv4-*`` or ``ipv6-*``, respectively.
+
+State Deprecations
+==================
+
+The ``docker`` state has been removed. The following functions should be used
+instead.
+
+- The ``docker.running`` function was removed. Please update applicable SLS files
+ to use the ``docker_container.running`` function instead.
+- The ``docker.stopped`` function was removed. Please update applicable SLS files
+ to use the ``docker_container.stopped`` function instead.
+- The ``docker.absent`` function was removed. Please update applicable SLS files
+ to use the ``docker_container.absent`` function instead.
+- The ``docker.absent`` function was removed. Please update applicable SLS files
+ to use the ``docker_container.absent`` function instead.
+- The ``docker.network_present`` function was removed. Please update applicable
+ SLS files to use the ``docker_network.present`` function instead.
+- The ``docker.network_absent`` function was removed. Please update applicable
+ SLS files to use the ``docker_network.absent`` function instead.
+- The ``docker.image_present`` function was removed. Please update applicable SLS
+ files to use the ``docker_image.present`` function instead.
+- The ``docker.image_absent`` function was removed. Please update applicable SLS
+ files to use the ``docker_image.absent`` function instead.
+- The ``docker.volume_present`` function was removed. Please update applicable SLS
+ files to use the ``docker_volume.present`` function instead.
+- The ``docker.volume_absent`` function was removed. Please update applicable SLS
+ files to use the ``docker_volume.absent`` function instead.
+
+The ``docker_network`` state had the following changes:
+
+- Support for the ``driver`` option has been removed from the ``absent`` function.
+ This option had no functionality in ``docker_network.absent``.
+
+The ``git`` state had the following changes:
+
+- Support for the ``ref`` option in the ``detached`` state has been removed.
+ Please use the ``rev`` option instead.
+
+The ``k8s`` state has been removed. The following functions should be used
+instead:
+
+- The ``k8s.label_absent`` function was removed. Please update applicable SLS
+ files to use the ``kubernetes.node_label_absent`` function instead.
+- The ``k8s.label_present`` function was removed. Please updated applicable SLS
+ files to use the ``kubernetes.node_label_present`` function instead.
+- The ``k8s.label_folder_absent`` function was removed. Please update applicable
+ SLS files to use the ``kubernetes.node_label_folder_absent`` function instead.
+
+The ``trafficserver`` state had the following changes:
+
+- Support for the ``set_var`` function was removed. Please use the ``config``
+ function instead.
+
+The ``win_update`` state has been removed. Please use the ``win_wua`` state instead.
diff --git a/doc/topics/sdb/index.rst b/doc/topics/sdb/index.rst
index c4d94120e2..cdc66b21f6 100644
--- a/doc/topics/sdb/index.rst
+++ b/doc/topics/sdb/index.rst
@@ -79,22 +79,12 @@ from the ``kevinopenstack`` profile above, you would use:
salt-call sdb.get sdb://kevinopenstack/password
-Some drivers use slightly more complex URIs. For instance, the ``vault`` driver
-requires the full path to where the key is stored, followed by a question mark,
-followed by the key to be retrieved. If you were using a profile called
-``myvault``, you would use a URI that looks like:
-
-.. code-block:: bash
-
- salt-call sdb.get 'sdb://myvault/secret/salt?saltstack'
-
Setting a value uses the same URI as would be used to retrieve it, followed
-by the value as another argument. For the above ``myvault`` URI, you would set
-a new value using a command like:
+by the value as another argument.
.. code-block:: bash
- salt-call sdb.set 'sdb://myvault/secret/salt?saltstack' 'super awesome'
+ salt-call sdb.set 'sdb://myvault/secret/salt/saltstack' 'super awesome'
Deleting values (if supported by the driver) is done pretty much the same way as
getting them. Provided that you have a profile called ``mykvstore`` that uses
@@ -109,8 +99,8 @@ the runner system:
.. code-block:: bash
- salt-run sdb.get 'sdb://myvault/secret/salt?saltstack'
- salt-run sdb.set 'sdb://myvault/secret/salt?saltstack' 'super awesome'
+ salt-run sdb.get 'sdb://myvault/secret/salt/saltstack'
+ salt-run sdb.set 'sdb://myvault/secret/salt/saltstack' 'super awesome'
salt-run sdb.delete 'sdb://mykvstore/foobar'
diff --git a/doc/topics/slots/index.rst b/doc/topics/slots/index.rst
index ebb0dc1f46..42a77cf1bd 100644
--- a/doc/topics/slots/index.rst
+++ b/doc/topics/slots/index.rst
@@ -10,7 +10,7 @@ Slots
future releases
Many times it is useful to store the results of a command during the course of
-an execution. Salt Slots are designed to allow to store this information and
+an execution. Salt Slots are designed to allow you to store this information and
use it later during the :ref:`highstate ` or other job
execution.
diff --git a/doc/topics/tutorials/salt_bootstrap.rst b/doc/topics/tutorials/salt_bootstrap.rst
index 4b11ee91da..605dc66634 100644
--- a/doc/topics/tutorials/salt_bootstrap.rst
+++ b/doc/topics/tutorials/salt_bootstrap.rst
@@ -327,14 +327,14 @@ Here's a summary of the command line options:
-U If set, fully upgrade the system prior to bootstrapping Salt
-I If set, allow insecure connections while downloading any files. For
example, pass '--no-check-certificate' to 'wget' or '--insecure' to
- 'curl'. On Debian and Ubuntu, using this option with -U allows to obtain
+ 'curl'. On Debian and Ubuntu, using this option with -U allows one to obtain
GnuPG archive keys insecurely if distro has changed release signatures.
-F Allow copied files to overwrite existing (config, init.d, etc)
-K If set, keep the temporary files in the temporary directories specified
with -c and -k
-C Only run the configuration function. Implies -F (forced overwrite).
To overwrite Master or Syndic configs, -M or -S, respectively, must
- also be specified. Salt installation will be ommitted, but some of the
+ also be specified. Salt installation will be omitted, but some of the
dependencies could be installed to write configuration with -j or -J.
-A Pass the salt-master DNS name or IP. This will be stored under
${BS_SALT_ETC_DIR}/minion.d/99-master-address.conf
diff --git a/pkg/suse/salt.changes b/pkg/suse/salt.changes
index 7e6ac7ee29..d806880d6d 100644
--- a/pkg/suse/salt.changes
+++ b/pkg/suse/salt.changes
@@ -70,7 +70,7 @@ Mon Oct 12 08:48:25 UTC 2015 - dmacvicar@suse.de
-------------------------------------------------------------------
Mon Oct 12 08:19:45 UTC 2015 - dmacvicar@suse.de
-- allow to disable docs in preparation for building
+- allow one to disable docs in preparation for building
on other platforms without all dependencies.
-------------------------------------------------------------------
diff --git a/requirements/base.txt b/requirements/base.txt
index 1a16e368d0..de490ed07f 100644
--- a/requirements/base.txt
+++ b/requirements/base.txt
@@ -1,5 +1,5 @@
Jinja2
-msgpack-python>0.3
+msgpack-python>0.3,!=0.5.5
PyYAML
MarkupSafe
requests>=1.0.0
diff --git a/salt/auth/ldap.py b/salt/auth/ldap.py
index 84d5f83b1d..cbfb03a2f2 100644
--- a/salt/auth/ldap.py
+++ b/salt/auth/ldap.py
@@ -361,7 +361,7 @@ def groups(username, **kwargs):
[salt.utils.stringutils.to_str(_config('accountattributename')), str('cn')]) # future lint: disable=blacklisted-function
for entry, result in search_results:
- for user in result[_config('accountattributename'), _config('groupattribute')]:
+ for user in result[_config('accountattributename')]:
if username == salt.utils.stringutils.to_unicode(user).split(',')[0].split('=')[-1]:
group_list.append(entry.split(',')[0].split('=')[-1])
diff --git a/salt/cli/api.py b/salt/cli/api.py
index 08112cf082..ebb72107f7 100644
--- a/salt/cli/api.py
+++ b/salt/cli/api.py
@@ -9,11 +9,11 @@
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
-import os
import logging
# Import Salt libs
import salt.client.netapi
+import salt.utils.files
import salt.utils.parsers as parsers
from salt.utils.verify import check_user, verify_files, verify_log
@@ -42,9 +42,8 @@ class SaltAPI(parsers.SaltAPIParser):
'udp://',
'file://')):
# Logfile is not using Syslog, verify
- current_umask = os.umask(0o027)
- verify_files([logfile], self.config['user'])
- os.umask(current_umask)
+ with salt.utils.files.set_umask(0o027):
+ verify_files([logfile], self.config['user'])
except OSError as err:
log.exception('Failed to prepare salt environment')
self.shutdown(err.errno)
diff --git a/salt/client/ssh/__init__.py b/salt/client/ssh/__init__.py
index c2f8e17fb5..e82b95b0e5 100644
--- a/salt/client/ssh/__init__.py
+++ b/salt/client/ssh/__init__.py
@@ -659,6 +659,8 @@ class SSH(object):
self.cache_job(jid, host, ret[host], fun)
if self.event:
id_, data = next(six.iteritems(ret))
+ if isinstance(data, six.text_type):
+ data = {'return': data}
if 'id' not in data:
data['id'] = id_
data['jid'] = jid # make the jid in the payload the same as the jid in the tag
@@ -772,6 +774,8 @@ class SSH(object):
self.opts)
if self.event:
id_, data = next(six.iteritems(ret))
+ if isinstance(data, six.text_type):
+ data = {'return': data}
if 'id' not in data:
data['id'] = id_
data['jid'] = jid # make the jid in the payload the same as the jid in the tag
@@ -1027,6 +1031,7 @@ class Single(object):
opts_pkg['pillar_roots'] = self.opts['pillar_roots']
opts_pkg['ext_pillar'] = self.opts['ext_pillar']
opts_pkg['extension_modules'] = self.opts['extension_modules']
+ opts_pkg['module_dirs'] = self.opts['module_dirs']
opts_pkg['_ssh_version'] = self.opts['_ssh_version']
opts_pkg['__master_opts__'] = self.context['master_opts']
if '_caller_cachedir' in self.opts:
diff --git a/salt/client/ssh/ssh_py_shim.py b/salt/client/ssh/ssh_py_shim.py
index e46220fc80..7335c2b0f2 100644
--- a/salt/client/ssh/ssh_py_shim.py
+++ b/salt/client/ssh/ssh_py_shim.py
@@ -106,9 +106,11 @@ def need_deployment():
'''
if os.path.exists(OPTIONS.saltdir):
shutil.rmtree(OPTIONS.saltdir)
- old_umask = os.umask(0o077)
- os.makedirs(OPTIONS.saltdir)
- os.umask(old_umask)
+ old_umask = os.umask(0o077) # pylint: disable=blacklisted-function
+ try:
+ os.makedirs(OPTIONS.saltdir)
+ finally:
+ os.umask(old_umask) # pylint: disable=blacklisted-function
# Verify perms on saltdir
if not is_windows():
euid = os.geteuid()
@@ -158,10 +160,10 @@ def unpack_thin(thin_path):
Unpack the Salt thin archive.
'''
tfile = tarfile.TarFile.gzopen(thin_path)
- old_umask = os.umask(0o077)
+ old_umask = os.umask(0o077) # pylint: disable=blacklisted-function
tfile.extractall(path=OPTIONS.saltdir)
tfile.close()
- os.umask(old_umask)
+ os.umask(old_umask) # pylint: disable=blacklisted-function
try:
os.unlink(thin_path)
except OSError:
@@ -189,10 +191,10 @@ def unpack_ext(ext_path):
'minion',
'extmods')
tfile = tarfile.TarFile.gzopen(ext_path)
- old_umask = os.umask(0o077)
+ old_umask = os.umask(0o077) # pylint: disable=blacklisted-function
tfile.extractall(path=modcache)
tfile.close()
- os.umask(old_umask)
+ os.umask(old_umask) # pylint: disable=blacklisted-function
os.unlink(ext_path)
ver_path = os.path.join(modcache, 'ext_version')
ver_dst = os.path.join(OPTIONS.saltdir, 'ext_version')
@@ -299,7 +301,7 @@ def main(argv): # pylint: disable=W0613
sys.stderr.write(OPTIONS.delimiter + '\n')
sys.stderr.flush()
if OPTIONS.cmd_umask is not None:
- old_umask = os.umask(OPTIONS.cmd_umask)
+ old_umask = os.umask(OPTIONS.cmd_umask) # pylint: disable=blacklisted-function
if OPTIONS.tty:
# Returns bytes instead of string on python 3
stdout, _ = subprocess.Popen(salt_argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
@@ -313,7 +315,7 @@ def main(argv): # pylint: disable=W0613
else:
subprocess.call(salt_argv)
if OPTIONS.cmd_umask is not None:
- os.umask(old_umask)
+ os.umask(old_umask) # pylint: disable=blacklisted-function
if __name__ == '__main__':
sys.exit(main(sys.argv))
diff --git a/salt/client/ssh/wrapper/state.py b/salt/client/ssh/wrapper/state.py
index 89b444f139..78d0f3d1ea 100644
--- a/salt/client/ssh/wrapper/state.py
+++ b/salt/client/ssh/wrapper/state.py
@@ -491,17 +491,18 @@ def request(mods=None,
'kwargs': kwargs
}
})
- cumask = os.umask(0o77)
- try:
- if salt.utils.platform.is_windows():
- # Make sure cache file isn't read-only
- __salt__['cmd.run']('attrib -R "{0}"'.format(notify_path))
- with salt.utils.files.fopen(notify_path, 'w+b') as fp_:
- serial.dump(req, fp_)
- except (IOError, OSError):
- msg = 'Unable to write state request file {0}. Check permission.'
- log.error(msg.format(notify_path))
- os.umask(cumask)
+ with salt.utils.files.set_umask(0o077):
+ try:
+ if salt.utils.platform.is_windows():
+ # Make sure cache file isn't read-only
+ __salt__['cmd.run']('attrib -R "{0}"'.format(notify_path))
+ with salt.utils.files.fopen(notify_path, 'w+b') as fp_:
+ serial.dump(req, fp_)
+ except (IOError, OSError):
+ log.error(
+ 'Unable to write state request file %s. Check permission.',
+ notify_path
+ )
return ret
@@ -557,17 +558,18 @@ def clear_request(name=None):
req.pop(name)
else:
return False
- cumask = os.umask(0o77)
- try:
- if salt.utils.platform.is_windows():
- # Make sure cache file isn't read-only
- __salt__['cmd.run']('attrib -R "{0}"'.format(notify_path))
- with salt.utils.files.fopen(notify_path, 'w+b') as fp_:
- serial.dump(req, fp_)
- except (IOError, OSError):
- msg = 'Unable to write state request file {0}. Check permission.'
- log.error(msg.format(notify_path))
- os.umask(cumask)
+ with salt.utils.files.set_umask(0o077):
+ try:
+ if salt.utils.platform.is_windows():
+ # Make sure cache file isn't read-only
+ __salt__['cmd.run']('attrib -R "{0}"'.format(notify_path))
+ with salt.utils.files.fopen(notify_path, 'w+b') as fp_:
+ serial.dump(req, fp_)
+ except (IOError, OSError):
+ log.error(
+ 'Unable to write state request file %s. Check permission.',
+ notify_path
+ )
return True
diff --git a/salt/cloud/clouds/azurearm.py b/salt/cloud/clouds/azurearm.py
index 2fa9f8cfe4..6d7454577c 100644
--- a/salt/cloud/clouds/azurearm.py
+++ b/salt/cloud/clouds/azurearm.py
@@ -103,8 +103,8 @@ import time
import salt.cache
import salt.config as config
import salt.loader
-import salt.utils
import salt.utils.cloud
+import salt.utils.files
import salt.utils.yaml
import salt.ext.six as six
import salt.version
@@ -1003,7 +1003,7 @@ def request_instance(vm_):
)
if ssh_publickeyfile is not None:
try:
- with salt.utils.fopen(ssh_publickeyfile, 'r') as spkc_:
+ with salt.utils.files.fopen(ssh_publickeyfile, 'r') as spkc_:
ssh_publickeyfile_contents = spkc_.read()
except Exception as exc:
raise SaltCloudConfigError(
@@ -1219,10 +1219,30 @@ def request_instance(vm_):
if userdata_file:
if os.path.exists(userdata_file):
- with salt.utils.fopen(userdata_file, 'r') as fh_:
+ with salt.utils.files.fopen(userdata_file, 'r') as fh_:
userdata = fh_.read()
if userdata and userdata_template:
+ userdata_sendkeys = config.get_cloud_config_value(
+ 'userdata_sendkeys', vm_, __opts__, search_global=False, default=None
+ )
+ if userdata_sendkeys:
+ vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys(
+ config.get_cloud_config_value(
+ 'keysize',
+ vm_,
+ __opts__
+ )
+ )
+
+ key_id = vm_.get('name')
+ if 'append_domain' in vm_:
+ key_id = '.'.join([key_id, vm_['append_domain']])
+
+ salt.utils.cloud.accept_key(
+ __opts__['pki_dir'], vm_['pub_key'], key_id
+ )
+
userdata = salt.utils.cloud.userdata_template(__opts__, vm_, userdata)
custom_extension = None
diff --git a/salt/cloud/clouds/linode.py b/salt/cloud/clouds/linode.py
index ff1e2d0492..0ba4c3073c 100644
--- a/salt/cloud/clouds/linode.py
+++ b/salt/cloud/clouds/linode.py
@@ -1500,6 +1500,24 @@ def _query(action=None,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
+
+ if 'ERRORARRAY' in result['dict']:
+ if len(result['dict']['ERRORARRAY']):
+ error_list = []
+
+ for error in result['dict']['ERRORARRAY']:
+ msg = error['ERRORMESSAGE']
+
+ if msg == "Authentication failed":
+ raise SaltCloudSystemExit(
+ 'Linode API Key is expired or invalid'
+ )
+ else:
+ error_list.append(msg)
+ raise SaltCloudException(
+ 'Linode API reported error(s): {}'.format(", ".join(error_list))
+ )
+
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
diff --git a/salt/cloud/clouds/nova.py b/salt/cloud/clouds/nova.py
index d520afbcb7..737abd9f8e 100644
--- a/salt/cloud/clouds/nova.py
+++ b/salt/cloud/clouds/nova.py
@@ -264,6 +264,12 @@ def __virtual__():
if get_dependencies() is False:
return False
+ __utils__['versions.warn_until'](
+ 'Neon',
+ 'This driver has been deprecated and will be removed in the '
+ '{version} release of Salt. Please use the openstack driver instead.'
+ )
+
return __virtualname__
diff --git a/salt/cloud/clouds/openstack.py b/salt/cloud/clouds/openstack.py
index 62ef280410..5646dc64e2 100644
--- a/salt/cloud/clouds/openstack.py
+++ b/salt/cloud/clouds/openstack.py
@@ -72,6 +72,7 @@ Or if you need to use a profile to setup some extra stuff, it can be passed as a
username: rackusername
api_key: myapikey
region_name: ORD
+ auth_type: rackspace_apikey
And this will pull in the profile for rackspace and setup all the correct
options for the auth_url and different api versions for services.
@@ -101,6 +102,23 @@ The salt specific ones are:
This is the minimum setup required.
+If metadata is set to make sure that the host has finished setting up the
+`wait_for_metadata` can be set.
+
+.. code-block:: yaml
+
+ centos:
+ provider: myopenstack
+ image: CentOS 7
+ size: ds1G
+ ssh_key_name: mykey
+ ssh_key_file: /root/.ssh/id_rsa
+ meta:
+ build_config: rack_user_only
+ wait_for_metadata:
+ rax_service_level_automation: Complete
+ rackconnect_automation_status: DEPLOYED
+
Anything else from the create_server_ docs can be passed through here.
- **image**: Image dict, name or ID to boot with. image is required
@@ -678,12 +696,18 @@ def create(vm_):
data = request_instance(conn=conn, call='action', vm_=vm_)
log.debug('VM is now running')
- def __query_node_ip(vm_):
+ def __query_node(vm_):
data = show_instance(vm_['name'], conn=conn, call='action')
+ if 'wait_for_metadata' in vm_:
+ for key, value in six.iteritems(vm_.get('wait_for_metadata', {})):
+ log.debug('Waiting for metadata: {0}={1}'.format(key, value))
+ if data['metadata'].get(key, None) != value:
+ log.debug('Metadata is not ready: {0}={1}'.format(key, data['metadata'].get(key, None)))
+ return False
return preferred_ip(vm_, data[ssh_interface(vm_)])
try:
- ip_address = __utils__['cloud.wait_for_ip'](
- __query_node_ip,
+ ip_address = __utils__['cloud.wait_for_fun'](
+ __query_node,
update_args=(vm_,)
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
diff --git a/salt/cloud/clouds/proxmox.py b/salt/cloud/clouds/proxmox.py
index 765217fed6..4246aa409a 100644
--- a/salt/cloud/clouds/proxmox.py
+++ b/salt/cloud/clouds/proxmox.py
@@ -648,7 +648,7 @@ def _get_properties(path="", method="GET", forced_params=None):
# Browse all path elements but last
for elem in path_levels[:-1]:
search_path += '/' + elem
- # Lookup for a dictionnary with path = "requested path" in list" and return its children
+ # Lookup for a dictionary with path = "requested path" in list" and return its children
sub = (item for item in sub if item["path"] == search_path).next()['children']
# Get leaf element in path
search_path += '/' + path_levels[-1]
diff --git a/salt/cloud/clouds/vmware.py b/salt/cloud/clouds/vmware.py
index 6696f22d39..c04cf52dc9 100644
--- a/salt/cloud/clouds/vmware.py
+++ b/salt/cloud/clouds/vmware.py
@@ -2684,14 +2684,15 @@ def create(vm_):
non_hostname_chars = compile(r'[^\w-]')
if search(non_hostname_chars, vm_name):
hostName = split(non_hostname_chars, vm_name, maxsplit=1)[0]
+ domainName = split(non_hostname_chars, vm_name, maxsplit=1)[-1]
else:
hostName = vm_name
- domainName = hostName.split('.', 1)[-1]
+ domainName = domain
if 'Windows' not in object_ref.config.guestFullName:
identity = vim.vm.customization.LinuxPrep()
identity.hostName = vim.vm.customization.FixedName(name=hostName)
- identity.domain = domainName if hostName != domainName else domain
+ identity.domain = domainName
else:
identity = vim.vm.customization.Sysprep()
identity.guiUnattended = vim.vm.customization.GuiUnattended()
diff --git a/salt/cloud/deploy/bootstrap-salt.sh b/salt/cloud/deploy/bootstrap-salt.sh
index a9baf0680b..519d795a0a 100755
--- a/salt/cloud/deploy/bootstrap-salt.sh
+++ b/salt/cloud/deploy/bootstrap-salt.sh
@@ -345,7 +345,7 @@ __usage() {
with -c and -k
-C Only run the configuration function. Implies -F (forced overwrite).
To overwrite Master or Syndic configs, -M or -S, respectively, must
- also be specified. Salt installation will be ommitted, but some of the
+ also be specified. Salt installation will be omitted, but some of the
dependencies could be installed to write configuration with -j or -J.
-A Pass the salt-master DNS name or IP. This will be stored under
\${BS_SALT_ETC_DIR}/minion.d/99-master-address.conf
diff --git a/salt/config/__init__.py b/salt/config/__init__.py
index 3847ba7ada..6cd1ef2ecf 100644
--- a/salt/config/__init__.py
+++ b/salt/config/__init__.py
@@ -252,7 +252,7 @@ VALID_OPTS = {
# Force the minion into a single environment when it fetches files from the master
'saltenv': (type(None), six.string_types),
- # Prevent saltenv from being overriden on the command line
+ # Prevent saltenv from being overridden on the command line
'lock_saltenv': bool,
# Force the minion into a single pillar root when it fetches pillar data from the master
@@ -433,6 +433,9 @@ VALID_OPTS = {
# If an event is above this size, it will be trimmed before putting it on the event bus
'max_event_size': int,
+ # Enable old style events to be sent on minion_startup. Change default to False in Neon release
+ 'enable_legacy_startup_events': bool,
+
# Always execute states with test=True if this flag is set
'test': bool,
@@ -650,10 +653,11 @@ VALID_OPTS = {
's3fs_update_interval': int,
'svnfs_update_interval': int,
- 'git_pillar_base': six.string_types,
- 'git_pillar_branch': six.string_types,
- 'git_pillar_env': six.string_types,
- 'git_pillar_root': six.string_types,
+ # NOTE: git_pillar_base, git_pillar_branch, git_pillar_env, and
+ # git_pillar_root omitted here because their values could conceivably be
+ # loaded as non-string types, which is OK because git_pillar will normalize
+ # them to strings. But rather than include all the possible types they
+ # could be, we'll just skip type-checking.
'git_pillar_ssl_verify': bool,
'git_pillar_global_lock': bool,
'git_pillar_user': six.string_types,
@@ -665,12 +669,11 @@ VALID_OPTS = {
'git_pillar_refspecs': list,
'git_pillar_includes': bool,
'git_pillar_verify_config': bool,
+ # NOTE: gitfs_base, gitfs_mountpoint, and gitfs_root omitted here because
+ # their values could conceivably be loaded as non-string types, which is OK
+ # because gitfs will normalize them to strings. But rather than include all
+ # the possible types they could be, we'll just skip type-checking.
'gitfs_remotes': list,
- 'gitfs_mountpoint': six.string_types,
- 'gitfs_root': six.string_types,
- 'gitfs_base': six.string_types,
- 'gitfs_user': six.string_types,
- 'gitfs_password': six.string_types,
'gitfs_insecure_auth': bool,
'gitfs_privkey': six.string_types,
'gitfs_pubkey': six.string_types,
@@ -885,11 +888,14 @@ VALID_OPTS = {
'winrepo_dir': six.string_types,
'winrepo_dir_ng': six.string_types,
'winrepo_cachefile': six.string_types,
+ # NOTE: winrepo_branch omitted here because its value could conceivably be
+ # loaded as a non-string type, which is OK because winrepo will normalize
+ # them to strings. But rather than include all the possible types it could
+ # be, we'll just skip type-checking.
'winrepo_cache_expire_max': int,
'winrepo_cache_expire_min': int,
'winrepo_remotes': list,
'winrepo_remotes_ng': list,
- 'winrepo_branch': six.string_types,
'winrepo_ssl_verify': bool,
'winrepo_user': six.string_types,
'winrepo_password': six.string_types,
@@ -1360,6 +1366,7 @@ DEFAULT_MINION_OPTS = {
'log_rotate_max_bytes': 0,
'log_rotate_backup_count': 0,
'max_event_size': 1048576,
+ 'enable_legacy_startup_events': True,
'test': False,
'ext_job_cache': '',
'cython_enable': False,
@@ -1635,6 +1642,7 @@ DEFAULT_MASTER_OPTS = {
'eauth_acl_module': '',
'eauth_tokens': 'localfs',
'extension_modules': os.path.join(salt.syspaths.CACHE_DIR, 'master', 'extmods'),
+ 'module_dirs': [],
'file_recv': False,
'file_recv_max_size': 100,
'file_buffer_size': 1048576,
@@ -2132,10 +2140,6 @@ def _read_conf_file(path):
conf_opts['id'] = six.text_type(conf_opts['id'])
else:
conf_opts['id'] = sdecode(conf_opts['id'])
- for key, value in six.iteritems(conf_opts.copy()):
- if isinstance(value, six.text_type) and six.PY2:
- # We do not want unicode settings
- conf_opts[key] = value.encode('utf-8')
return conf_opts
@@ -2228,7 +2232,6 @@ def include_config(include, orig_path, verbose, exit_on_config_errors=False):
main config file.
'''
# Protect against empty option
-
if not include:
return {}
@@ -2321,7 +2324,7 @@ def prepend_root_dir(opts, path_options):
# drive is not prefixed on a config option
pass
elif os.path.isabs(path):
- # Absolute path (not default or overriden root_dir)
+ # Absolute path (not default or overridden root_dir)
# No prepending required
continue
# Prepending the root dir
@@ -3587,7 +3590,7 @@ def get_id(opts, cache_minion_id=False):
if opts.get('minion_id_caching', True):
try:
with salt.utils.files.fopen(id_cache) as idf:
- name = idf.readline().strip()
+ name = salt.utils.stringutils.to_unicode(idf.readline().strip())
bname = salt.utils.stringutils.to_bytes(name)
if bname.startswith(codecs.BOM): # Remove BOM if exists
name = salt.utils.stringutils.to_str(bname.replace(codecs.BOM, '', 1))
@@ -3709,7 +3712,9 @@ def apply_minion_config(overrides=None,
)
opts['fileserver_backend'][idx] = new_val
- opts['__cli'] = os.path.basename(sys.argv[0])
+ opts['__cli'] = salt.utils.stringutils.to_unicode(
+ os.path.basename(sys.argv[0])
+ )
# No ID provided. Will getfqdn save us?
using_ip_for_id = False
@@ -3843,10 +3848,10 @@ def master_config(path, env_var='SALT_MASTER_CONFIG', defaults=None, exit_on_con
defaults['default_include'])
include = overrides.get('include', [])
- overrides.update(include_config(default_include, path, verbose=False),
- exit_on_config_errors=exit_on_config_errors)
- overrides.update(include_config(include, path, verbose=True),
- exit_on_config_errors=exit_on_config_errors)
+ overrides.update(include_config(default_include, path, verbose=False,
+ exit_on_config_errors=exit_on_config_errors))
+ overrides.update(include_config(include, path, verbose=True,
+ exit_on_config_errors=exit_on_config_errors))
opts = apply_master_config(overrides, defaults)
_validate_ssh_minion_opts(opts)
_validate_opts(opts)
@@ -3895,6 +3900,10 @@ def apply_master_config(overrides=None, defaults=None):
)
opts['saltenv'] = opts['environment']
+ if six.PY2 and 'rest_cherrypy' in opts:
+ # CherryPy is not unicode-compatible
+ opts['rest_cherrypy'] = salt.utils.data.encode(opts['rest_cherrypy'])
+
for idx, val in enumerate(opts['fileserver_backend']):
if val in ('git', 'hg', 'svn', 'minion'):
new_val = val + 'fs'
diff --git a/salt/crypt.py b/salt/crypt.py
index bd82d71ff5..f34cf1d660 100644
--- a/salt/crypt.py
+++ b/salt/crypt.py
@@ -84,8 +84,7 @@ def dropfile(cachedir, user=None):
'''
dfn = os.path.join(cachedir, '.dfn')
# set a mask (to avoid a race condition on file creation) and store original.
- mask = os.umask(191)
- try:
+ with salt.utils.files.set_umask(0o277):
log.info('Rotating AES key')
if os.path.isfile(dfn):
log.info('AES key rotation already requested')
@@ -103,8 +102,6 @@ def dropfile(cachedir, user=None):
os.chown(dfn, uid, -1)
except (KeyError, ImportError, OSError, IOError):
pass
- finally:
- os.umask(mask) # restore original umask
def gen_keys(keydir, keyname, keysize, user=None, passphrase=None):
@@ -138,17 +135,19 @@ def gen_keys(keydir, keyname, keysize, user=None, passphrase=None):
if not os.access(keydir, os.W_OK):
raise IOError('Write access denied to "{0}" for user "{1}".'.format(os.path.abspath(keydir), getpass.getuser()))
- cumask = os.umask(0o277)
- if HAS_M2:
- # if passphrase is empty or None use no cipher
- if not passphrase:
- gen.save_pem(priv, cipher=None)
+ with salt.utils.files.set_umask(0o277):
+ if HAS_M2:
+ # if passphrase is empty or None use no cipher
+ if not passphrase:
+ gen.save_pem(priv, cipher=None)
+ else:
+ gen.save_pem(
+ priv,
+ cipher='des_ede3_cbc',
+ callback=lambda x: salt.utils.stringutils.to_bytes(passphrase))
else:
- gen.save_pem(priv, cipher='des_ede3_cbc', callback=lambda x: six.b(passphrase))
- else:
- with salt.utils.files.fopen(priv, 'wb+') as f:
- f.write(gen.exportKey('PEM', passphrase))
- os.umask(cumask)
+ with salt.utils.files.fopen(priv, 'wb+') as f:
+ f.write(gen.exportKey('PEM', passphrase))
if HAS_M2:
gen.save_pub_key(pub)
else:
diff --git a/salt/daemons/masterapi.py b/salt/daemons/masterapi.py
index 4fe00934cd..84537fab3b 100644
--- a/salt/daemons/masterapi.py
+++ b/salt/daemons/masterapi.py
@@ -76,7 +76,8 @@ def init_git_pillar(opts):
opts,
opts_dict['git'],
per_remote_overrides=git_pillar.PER_REMOTE_OVERRIDES,
- per_remote_only=git_pillar.PER_REMOTE_ONLY)
+ per_remote_only=git_pillar.PER_REMOTE_ONLY,
+ global_only=git_pillar.GLOBAL_ONLY)
ret.append(pillar)
except salt.exceptions.FileserverConfigError:
if opts.get('git_pillar_verify_config', True):
@@ -201,10 +202,9 @@ def mk_key(opts, user):
os.unlink(keyfile)
key = salt.crypt.Crypticle.generate_key_string()
- cumask = os.umask(191)
- with salt.utils.files.fopen(keyfile, 'w+') as fp_:
- fp_.write(salt.utils.stringutils.to_str(key))
- os.umask(cumask)
+ with salt.utils.files.set_umask(0o277):
+ with salt.utils.files.fopen(keyfile, 'w+') as fp_:
+ fp_.write(salt.utils.stringutils.to_str(key))
# 600 octal: Read and write access to the owner only.
# Write access is necessary since on subsequent runs, if the file
# exists, it needs to be written to again. Windows enforces this.
diff --git a/salt/engines/__init__.py b/salt/engines/__init__.py
index 4bea047dc9..7b4a08a403 100644
--- a/salt/engines/__init__.py
+++ b/salt/engines/__init__.py
@@ -69,11 +69,11 @@ class Engine(SignalHandlingMultiprocessingProcess):
'''
Execute the given engine in a new process
'''
- def __init__(self, opts, fun, config, funcs, runners, proxy, log_queue=None):
+ def __init__(self, opts, fun, config, funcs, runners, proxy, **kwargs):
'''
Set up the process executor
'''
- super(Engine, self).__init__(log_queue=log_queue)
+ super(Engine, self).__init__(**kwargs)
self.opts = opts
self.config = config
self.fun = fun
@@ -93,17 +93,21 @@ class Engine(SignalHandlingMultiprocessingProcess):
state['funcs'],
state['runners'],
state['proxy'],
- log_queue=state['log_queue']
+ log_queue=state['log_queue'],
+ log_queue_level=state['log_queue_level']
)
def __getstate__(self):
- return {'opts': self.opts,
- 'fun': self.fun,
- 'config': self.config,
- 'funcs': self.funcs,
- 'runners': self.runners,
- 'proxy': self.proxy,
- 'log_queue': self.log_queue}
+ return {
+ 'opts': self.opts,
+ 'fun': self.fun,
+ 'config': self.config,
+ 'funcs': self.funcs,
+ 'runners': self.runners,
+ 'proxy': self.proxy,
+ 'log_queue': self.log_queue,
+ 'log_queue_level': self.log_queue_level
+ }
def run(self):
'''
diff --git a/salt/engines/ircbot.py b/salt/engines/ircbot.py
index e3b8778cd2..7c75037703 100644
--- a/salt/engines/ircbot.py
+++ b/salt/engines/ircbot.py
@@ -46,7 +46,7 @@ Example of usage
08:33:57 gtmanbot > gtmanfred: pong
08:34:02 @gtmanfred > !echo ping
08:34:02 gtmanbot > ping
- 08:34:17 @gtmanfred > !event test/tag/ircbot irc is usefull
+ 08:34:17 @gtmanfred > !event test/tag/ircbot irc is useful
08:34:17 gtmanbot > gtmanfred: TaDa!
.. code-block:: text
diff --git a/salt/engines/slack.py b/salt/engines/slack.py
index 4189f0e2b8..2fccda66be 100644
--- a/salt/engines/slack.py
+++ b/salt/engines/slack.py
@@ -267,7 +267,7 @@ class SlackClient(object):
def can_user_run(self, user, command, groups):
'''
- Break out the permissions into the folowing:
+ Break out the permissions into the following:
Check whether a user is in any group, including whether a group has the '*' membership
@@ -282,7 +282,7 @@ class SlackClient(object):
:rtype: tuple
:returns: On a successful permitting match, returns 2-element tuple that contains
- the name of the group that successfuly matched, and a dictionary containing
+ the name of the group that successfully matched, and a dictionary containing
the configuration of the group so it can be referenced.
On failure it returns an empty tuple
@@ -400,7 +400,7 @@ class SlackClient(object):
When encountering an error (e.g. invalid message), yields {}, the caller can proceed to the next message
When the websocket being read from has given up all its messages, yields {'done': True} to
- indicate that the caller has read all of the relevent data for now, and should continue
+ indicate that the caller has read all of the relevant data for now, and should continue
its own processing and check back for more data later.
This relies on the caller sleeping between checks, otherwise this could flood
diff --git a/salt/fileclient.py b/salt/fileclient.py
index faf119c9ac..76719f6c99 100644
--- a/salt/fileclient.py
+++ b/salt/fileclient.py
@@ -143,22 +143,20 @@ class Client(object):
saltenv,
path)
destdir = os.path.dirname(dest)
- cumask = os.umask(63)
+ with salt.utils.files.set_umask(0o077):
+ # remove destdir if it is a regular file to avoid an OSError when
+ # running os.makedirs below
+ if os.path.isfile(destdir):
+ os.remove(destdir)
- # remove destdir if it is a regular file to avoid an OSError when
- # running os.makedirs below
- if os.path.isfile(destdir):
- os.remove(destdir)
+ # ensure destdir exists
+ try:
+ os.makedirs(destdir)
+ except OSError as exc:
+ if exc.errno != errno.EEXIST: # ignore if it was there already
+ raise
- # ensure destdir exists
- try:
- os.makedirs(destdir)
- except OSError as exc:
- if exc.errno != errno.EEXIST: # ignore if it was there already
- raise
-
- yield dest
- os.umask(cumask)
+ yield dest
def get_cachedir(self, cachedir=None):
if cachedir is None:
@@ -856,12 +854,10 @@ class LocalClient(Client):
fnd = {'path': '',
'rel': ''}
- if saltenv not in self.opts['file_roots']:
- return fnd
if salt.utils.url.is_escaped(path):
# The path arguments are escaped
path = salt.utils.url.unescape(path)
- for root in self.opts['file_roots'][saltenv]:
+ for root in self.opts['file_roots'].get(saltenv, []):
full = os.path.join(root, path)
if os.path.isfile(full):
fnd['path'] = full
@@ -894,10 +890,8 @@ class LocalClient(Client):
with optional relative prefix path to limit directory traversal
'''
ret = []
- if saltenv not in self.opts['file_roots']:
- return ret
prefix = prefix.strip('/')
- for path in self.opts['file_roots'][saltenv]:
+ for path in self.opts['file_roots'].get(saltenv, []):
for root, dirs, files in salt.utils.path.os_walk(
os.path.join(path, prefix), followlinks=True
):
@@ -915,9 +909,7 @@ class LocalClient(Client):
'''
ret = []
prefix = prefix.strip('/')
- if saltenv not in self.opts['file_roots']:
- return ret
- for path in self.opts['file_roots'][saltenv]:
+ for path in self.opts['file_roots'].get(saltenv, []):
for root, dirs, files in salt.utils.path.os_walk(
os.path.join(path, prefix), followlinks=True
):
@@ -933,10 +925,8 @@ class LocalClient(Client):
with optional relative prefix path to limit directory traversal
'''
ret = []
- if saltenv not in self.opts['file_roots']:
- return ret
prefix = prefix.strip('/')
- for path in self.opts['file_roots'][saltenv]:
+ for path in self.opts['file_roots'].get(saltenv, []):
for root, dirs, files in salt.utils.path.os_walk(
os.path.join(path, prefix), followlinks=True
):
@@ -1031,10 +1021,7 @@ class LocalClient(Client):
'''
Return the available environments
'''
- ret = []
- for saltenv in self.opts['file_roots']:
- ret.append(saltenv)
- return ret
+ return list(self.opts['file_roots'])
def master_tops(self):
'''
diff --git a/salt/grains/core.py b/salt/grains/core.py
index cc7c92ca73..b6f6b28aea 100644
--- a/salt/grains/core.py
+++ b/salt/grains/core.py
@@ -1927,9 +1927,9 @@ def fqdns():
fqdns.add(socket.gethostbyaddr(ip)[0])
except (socket.error, socket.herror,
socket.gaierror, socket.timeout) as e:
- log.error("Exception during resolving address: " + str(e))
+ log.info("Exception during resolving address: " + str(e))
- grains['fqdns'] = list(fqdns)
+ grains['fqdns'] = sorted(list(fqdns))
return grains
@@ -2212,7 +2212,7 @@ def _hw_data(osdata):
if os.path.exists(contents_file):
try:
with salt.utils.files.fopen(contents_file, 'r') as ifile:
- grains[key] = ifile.read()
+ grains[key] = ifile.read().strip()
if key == 'uuid':
grains['uuid'] = grains['uuid'].lower()
except (IOError, OSError) as err:
diff --git a/salt/key.py b/salt/key.py
index 3b936d2e42..80dc4d9c7c 100644
--- a/salt/key.py
+++ b/salt/key.py
@@ -1044,7 +1044,7 @@ class RaetKey(Key):
'''
Use libnacl to generate and safely save a private key
'''
- import libnacl.dual # pylint: disable=3rd-party-module-not-gated
+ import libnacl.dual # pylint: disable=import-error,3rd-party-module-not-gated
d_key = libnacl.dual.DualSecret()
keydir, keyname, _, _ = self._get_key_attrs(keydir, keyname,
keysize, user)
@@ -1440,14 +1440,13 @@ class RaetKey(Key):
keydata = {'priv': priv,
'sign': sign}
path = os.path.join(self.opts['pki_dir'], 'local.key')
- c_umask = os.umask(191)
- if os.path.exists(path):
- #mode = os.stat(path).st_mode
- os.chmod(path, stat.S_IWUSR | stat.S_IRUSR)
- with salt.utils.files.fopen(path, 'w+b') as fp_:
- fp_.write(self.serial.dumps(keydata))
- os.chmod(path, stat.S_IRUSR)
- os.umask(c_umask)
+ with salt.utils.files.set_umask(0o277):
+ if os.path.exists(path):
+ #mode = os.stat(path).st_mode
+ os.chmod(path, stat.S_IWUSR | stat.S_IRUSR)
+ with salt.utils.files.fopen(path, 'w+') as fp_:
+ fp_.write(self.serial.dumps(keydata))
+ os.chmod(path, stat.S_IRUSR)
def delete_local(self):
'''
diff --git a/salt/loader.py b/salt/loader.py
index d343ebd7a9..fd547b633a 100644
--- a/salt/loader.py
+++ b/salt/loader.py
@@ -22,6 +22,7 @@ from zipimport import zipimporter
import salt.config
import salt.syspaths
import salt.utils.context
+import salt.utils.data
import salt.utils.dictupdate
import salt.utils.event
import salt.utils.files
@@ -651,7 +652,7 @@ def _load_cached_grains(opts, cfn):
try:
serial = salt.payload.Serial(opts)
with salt.utils.files.fopen(cfn, 'rb') as fp_:
- cached_grains = serial.load(fp_)
+ cached_grains = salt.utils.data.decode(serial.load(fp_))
if not cached_grains:
log.debug('Cached grains are empty, cache might be corrupted. Refreshing.')
return None
@@ -791,35 +792,34 @@ def grains(opts, force_refresh=False, proxy=None):
grains_data.update(opts['grains'])
# Write cache if enabled
if opts.get('grains_cache', False):
- cumask = os.umask(0o77)
- try:
- if salt.utils.platform.is_windows():
- # Late import
- import salt.modules.cmdmod
- # Make sure cache file isn't read-only
- salt.modules.cmdmod._run_quiet('attrib -R "{0}"'.format(cfn))
- with salt.utils.files.fopen(cfn, 'w+b') as fp_:
- try:
- serial = salt.payload.Serial(opts)
- serial.dump(grains_data, fp_)
- except TypeError as e:
- log.error('Failed to serialize grains cache: %s', e)
- raise # re-throw for cleanup
- except Exception as e:
- log.error('Unable to write to grains cache file %s: %s', cfn, e)
- # Based on the original exception, the file may or may not have been
- # created. If it was, we will remove it now, as the exception means
- # the serialized data is not to be trusted, no matter what the
- # exception is.
- if os.path.isfile(cfn):
- os.unlink(cfn)
- os.umask(cumask)
+ with salt.utils.files.set_umask(0o077):
+ try:
+ if salt.utils.platform.is_windows():
+ # Late import
+ import salt.modules.cmdmod
+ # Make sure cache file isn't read-only
+ salt.modules.cmdmod._run_quiet('attrib -R "{0}"'.format(cfn))
+ with salt.utils.files.fopen(cfn, 'w+b') as fp_:
+ try:
+ serial = salt.payload.Serial(opts)
+ serial.dump(grains_data, fp_)
+ except TypeError as e:
+ log.error('Failed to serialize grains cache: %s', e)
+ raise # re-throw for cleanup
+ except Exception as e:
+ log.error('Unable to write to grains cache file %s: %s', cfn, e)
+ # Based on the original exception, the file may or may not have been
+ # created. If it was, we will remove it now, as the exception means
+ # the serialized data is not to be trusted, no matter what the
+ # exception is.
+ if os.path.isfile(cfn):
+ os.unlink(cfn)
if grains_deep_merge:
salt.utils.dictupdate.update(grains_data, opts['grains'])
else:
grains_data.update(opts['grains'])
- return grains_data
+ return salt.utils.data.decode(grains_data)
# TODO: get rid of? Does anyone use this? You should use raw() instead
diff --git a/salt/log/setup.py b/salt/log/setup.py
index c06dffa7bf..cba3fb1a7b 100644
--- a/salt/log/setup.py
+++ b/salt/log/setup.py
@@ -117,6 +117,7 @@ __EXTERNAL_LOGGERS_CONFIGURED = False
__MP_LOGGING_LISTENER_CONFIGURED = False
__MP_LOGGING_CONFIGURED = False
__MP_LOGGING_QUEUE = None
+__MP_LOGGING_LEVEL = GARBAGE
__MP_LOGGING_QUEUE_PROCESS = None
__MP_LOGGING_QUEUE_HANDLER = None
__MP_IN_MAINPROCESS = multiprocessing.current_process().name == 'MainProcess'
@@ -820,6 +821,37 @@ def set_multiprocessing_logging_queue(queue):
__MP_LOGGING_QUEUE = queue
+def get_multiprocessing_logging_level():
+ return __MP_LOGGING_LEVEL
+
+
+def set_multiprocessing_logging_level(log_level):
+ global __MP_LOGGING_LEVEL
+ __MP_LOGGING_LEVEL = log_level
+
+
+def set_multiprocessing_logging_level_by_opts(opts):
+ '''
+ This will set the multiprocessing logging level to the lowest
+ logging level of all the types of logging that are configured.
+ '''
+ global __MP_LOGGING_LEVEL
+
+ log_levels = []
+ log_levels.append(
+ LOG_LEVELS.get(opts.get('log_level', '').lower(), logging.ERROR)
+ )
+ log_levels.append(
+ LOG_LEVELS.get(opts.get('log_level_logfile', '').lower(), logging.ERROR)
+ )
+ for level in six.itervalues(opts.get('log_granular_levels', {})):
+ log_levels.append(
+ LOG_LEVELS.get(level.lower(), logging.ERROR)
+ )
+
+ __MP_LOGGING_LEVEL = min(log_levels)
+
+
def setup_multiprocessing_logging_listener(opts, queue=None):
global __MP_LOGGING_QUEUE_PROCESS
global __MP_LOGGING_LISTENER_CONFIGURED
@@ -883,11 +915,13 @@ def setup_multiprocessing_logging(queue=None):
# Let's add a queue handler to the logging root handlers
__MP_LOGGING_QUEUE_HANDLER = SaltLogQueueHandler(queue or get_multiprocessing_logging_queue())
logging.root.addHandler(__MP_LOGGING_QUEUE_HANDLER)
- # Set the logging root level to the lowest to get all messages
- logging.root.setLevel(logging.GARBAGE)
+ # Set the logging root level to the lowest needed level to get all
+ # desired messages.
+ log_level = get_multiprocessing_logging_level()
+ logging.root.setLevel(log_level)
logging.getLogger(__name__).debug(
'Multiprocessing queue logging configured for the process running '
- 'under PID: %s', os.getpid()
+ 'under PID: %s at log level %s', os.getpid(), log_level
)
# The above logging call will create, in some situations, a futex wait
# lock condition, probably due to the multiprocessing Queue's internal
diff --git a/salt/master.py b/salt/master.py
index fb704909c8..7eb458959c 100644
--- a/salt/master.py
+++ b/salt/master.py
@@ -139,13 +139,13 @@ class Maintenance(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
A generalized maintenance process which performs maintenance routines.
'''
- def __init__(self, opts, log_queue=None):
+ def __init__(self, opts, **kwargs):
'''
Create a maintenance instance
:param dict opts: The salt options
'''
- super(Maintenance, self).__init__(log_queue=log_queue)
+ super(Maintenance, self).__init__(**kwargs)
self.opts = opts
# How often do we perform the maintenance tasks
self.loop_interval = int(self.opts['loop_interval'])
@@ -159,11 +159,18 @@ class Maintenance(salt.utils.process.SignalHandlingMultiprocessingProcess):
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
- self.__init__(state['opts'], log_queue=state['log_queue'])
+ self.__init__(
+ state['opts'],
+ log_queue=state['log_queue'],
+ log_queue_level=state['log_queue_level']
+ )
def __getstate__(self):
- return {'opts': self.opts,
- 'log_queue': self.log_queue}
+ return {
+ 'opts': self.opts,
+ 'log_queue': self.log_queue,
+ 'log_queue_level': self.log_queue_level
+ }
def _post_fork_init(self):
'''
@@ -578,9 +585,8 @@ class Master(SMaster):
# Check to see if we need to create a pillar cache dir
if self.opts['pillar_cache'] and not os.path.isdir(os.path.join(self.opts['cachedir'], 'pillar_cache')):
try:
- prev_umask = os.umask(0o077)
- os.mkdir(os.path.join(self.opts['cachedir'], 'pillar_cache'))
- os.umask(prev_umask)
+ with salt.utils.files.set_umask(0o077):
+ os.mkdir(os.path.join(self.opts['cachedir'], 'pillar_cache'))
except OSError:
pass
@@ -601,7 +607,8 @@ class Master(SMaster):
new_opts,
repo['git'],
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
- per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY)
+ per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY,
+ global_only=salt.pillar.git_pillar.GLOBAL_ONLY)
except salt.exceptions.FileserverConfigError as exc:
critical_errors.append(exc.strerror)
finally:
@@ -708,6 +715,7 @@ class Master(SMaster):
kwargs = {}
if salt.utils.platform.is_windows():
kwargs['log_queue'] = salt.log.setup.get_multiprocessing_logging_queue()
+ kwargs['log_queue_level'] = salt.log.setup.get_multiprocessing_logging_level()
kwargs['secrets'] = SMaster.secrets
self.process_manager.add_process(
@@ -757,13 +765,13 @@ class Halite(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
Manage the Halite server
'''
- def __init__(self, hopts, log_queue=None):
+ def __init__(self, hopts, **kwargs):
'''
Create a halite instance
:param dict hopts: The halite options
'''
- super(Halite, self).__init__(log_queue=log_queue)
+ super(Halite, self).__init__(**kwargs)
self.hopts = hopts
# __setstate__ and __getstate__ are only used on Windows.
@@ -771,11 +779,18 @@ class Halite(salt.utils.process.SignalHandlingMultiprocessingProcess):
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
- self.__init__(state['hopts'], log_queue=state['log_queue'])
+ self.__init__(
+ state['hopts'],
+ log_queue=state['log_queue'],
+ log_queue_level=state['log_queue_level']
+ )
def __getstate__(self):
- return {'hopts': self.hopts,
- 'log_queue': self.log_queue}
+ return {
+ 'hopts': self.hopts,
+ 'log_queue': self.log_queue,
+ 'log_queue_level': self.log_queue_level
+ }
def run(self):
'''
@@ -790,7 +805,7 @@ class ReqServer(salt.utils.process.SignalHandlingMultiprocessingProcess):
Starts up the master request server, minions send results to this
interface.
'''
- def __init__(self, opts, key, mkey, log_queue=None, secrets=None):
+ def __init__(self, opts, key, mkey, secrets=None, **kwargs):
'''
Create a request server
@@ -801,7 +816,7 @@ class ReqServer(salt.utils.process.SignalHandlingMultiprocessingProcess):
:rtype: ReqServer
:returns: Request server
'''
- super(ReqServer, self).__init__(log_queue=log_queue)
+ super(ReqServer, self).__init__(**kwargs)
self.opts = opts
self.master_key = mkey
# Prepare the AES key
@@ -813,15 +828,24 @@ class ReqServer(salt.utils.process.SignalHandlingMultiprocessingProcess):
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
- self.__init__(state['opts'], state['key'], state['mkey'],
- log_queue=state['log_queue'], secrets=state['secrets'])
+ self.__init__(
+ state['opts'],
+ state['key'],
+ state['mkey'],
+ secrets=state['secrets'],
+ log_queue=state['log_queue'],
+ log_queue_level=state['log_queue_level']
+ )
def __getstate__(self):
- return {'opts': self.opts,
- 'key': self.key,
- 'mkey': self.master_key,
- 'log_queue': self.log_queue,
- 'secrets': self.secrets}
+ return {
+ 'opts': self.opts,
+ 'key': self.key,
+ 'mkey': self.master_key,
+ 'secrets': self.secrets,
+ 'log_queue': self.log_queue,
+ 'log_queue_level': self.log_queue_level
+ }
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
self.destroy(signum)
@@ -833,6 +857,8 @@ class ReqServer(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
if self.log_queue is not None:
salt.log.setup.set_multiprocessing_logging_queue(self.log_queue)
+ if self.log_queue_level is not None:
+ salt.log.setup.set_multiprocessing_logging_level(self.log_queue_level)
salt.log.setup.setup_multiprocessing_logging(self.log_queue)
if self.secrets is not None:
SMaster.secrets = self.secrets
@@ -863,6 +889,7 @@ class ReqServer(salt.utils.process.SignalHandlingMultiprocessingProcess):
kwargs = {}
if salt.utils.platform.is_windows():
kwargs['log_queue'] = self.log_queue
+ kwargs['log_queue_level'] = self.log_queue_level
# Use one worker thread if only the TCP transport is set up on
# Windows and we are using Python 2. There is load balancer
# support on Windows for the TCP transport when using Python 3.
@@ -944,7 +971,10 @@ class MWorker(salt.utils.process.SignalHandlingMultiprocessingProcess):
# non-Windows platforms.
def __setstate__(self, state):
self._is_child = True
- super(MWorker, self).__init__(log_queue=state['log_queue'])
+ super(MWorker, self).__init__(
+ log_queue=state['log_queue'],
+ log_queue_level=state['log_queue_level']
+ )
self.opts = state['opts']
self.req_channels = state['req_channels']
self.mkey = state['mkey']
@@ -953,13 +983,16 @@ class MWorker(salt.utils.process.SignalHandlingMultiprocessingProcess):
SMaster.secrets = state['secrets']
def __getstate__(self):
- return {'opts': self.opts,
- 'req_channels': self.req_channels,
- 'mkey': self.mkey,
- 'key': self.key,
- 'k_mtime': self.k_mtime,
- 'log_queue': self.log_queue,
- 'secrets': SMaster.secrets}
+ return {
+ 'opts': self.opts,
+ 'req_channels': self.req_channels,
+ 'mkey': self.mkey,
+ 'key': self.key,
+ 'k_mtime': self.k_mtime,
+ 'secrets': SMaster.secrets,
+ 'log_queue': self.log_queue,
+ 'log_queue_level': self.log_queue_level
+ }
def _handle_signals(self, signum, sigframe):
for channel in getattr(self, 'req_channels', ()):
diff --git a/salt/minion.py b/salt/minion.py
index 514519f6c1..0ed7b9fc92 100644
--- a/salt/minion.py
+++ b/salt/minion.py
@@ -2054,14 +2054,16 @@ class Minion(MinionBase):
def _fire_master_minion_start(self):
# Send an event to the master that the minion is live
- self._fire_master(
- 'Minion {0} started at {1}'.format(
- self.opts['id'],
- time.asctime()
- ),
- 'minion_start'
- )
- # dup name spaced event
+ if self.opts['enable_legacy_startup_events']:
+ # old style event. Defaults to False in Neon Salt release
+ self._fire_master(
+ 'Minion {0} started at {1}'.format(
+ self.opts['id'],
+ time.asctime()
+ ),
+ 'minion_start'
+ )
+ # send name spaced event
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
@@ -2602,7 +2604,7 @@ class Minion(MinionBase):
def ping_master():
try:
def ping_timeout_handler(*_):
- if not self.opts.get('auth_safemode', True):
+ if self.opts.get('auth_safemode', False):
log.error('** Master Ping failed. Attempting to restart minion**')
delay = self.opts.get('random_reauth_delay', 5)
log.info('delaying random_reauth_delay %ss', delay)
@@ -2756,14 +2758,16 @@ class Syndic(Minion):
def fire_master_syndic_start(self):
# Send an event to the master that the minion is live
- self._fire_master(
- 'Syndic {0} started at {1}'.format(
- self.opts['id'],
- time.asctime()
- ),
- 'syndic_start',
- sync=False,
- )
+ if self.opts['enable_legacy_startup_events']:
+ # old style event. Defaults to false in Neon Salt release.
+ self._fire_master(
+ 'Syndic {0} started at {1}'.format(
+ self.opts['id'],
+ time.asctime()
+ ),
+ 'syndic_start',
+ sync=False,
+ )
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py
index af07a81638..1e30806a7c 100644
--- a/salt/modules/aptpkg.py
+++ b/salt/modules/aptpkg.py
@@ -1084,7 +1084,7 @@ def upgrade(refresh=True, dist_upgrade=False, **kwargs):
seconds
download_only
- Only donwload the packages, don't unpack or install them
+ Only download the packages, don't unpack or install them
.. versionadded:: 2018.3.0
diff --git a/salt/modules/archive.py b/salt/modules/archive.py
index f9750b938d..48f0efa18e 100644
--- a/salt/modules/archive.py
+++ b/salt/modules/archive.py
@@ -1077,8 +1077,7 @@ def unzip(zip_file,
if not salt.utils.platform.is_windows():
perm = zfile.getinfo(target).external_attr >> 16
if perm == 0:
- umask_ = os.umask(0)
- os.umask(umask_)
+ umask_ = salt.utils.files.get_umask()
if target.endswith('/'):
perm = 0o777 & ~umask_
else:
diff --git a/salt/modules/boto_asg.py b/salt/modules/boto_asg.py
index 7d28f4abe1..af3f2f4d5b 100644
--- a/salt/modules/boto_asg.py
+++ b/salt/modules/boto_asg.py
@@ -829,6 +829,7 @@ def get_instances(name, lifecycle_state="InService", health_status="Healthy",
while True:
try:
asgs = conn.get_all_groups(names=[name])
+ break
except boto.exception.BotoServerError as e:
if retries and e.code == 'Throttling':
log.debug('Throttled by AWS API, retrying in 5 seconds...')
diff --git a/salt/modules/boto_ec2.py b/salt/modules/boto_ec2.py
index 93911fc307..11f85383b1 100644
--- a/salt/modules/boto_ec2.py
+++ b/salt/modules/boto_ec2.py
@@ -657,40 +657,40 @@ def find_images(ami_name=None, executable_by=None, owners=None, image_ids=None,
salt myminion boto_ec2.find_images tags='{"mytag": "value"}'
'''
+ retries = 30
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
-
- try:
- filter_parameters = {'filters': {}}
-
- if image_ids:
- filter_parameters['image_ids'] = [image_ids]
-
- if executable_by:
- filter_parameters['executable_by'] = [executable_by]
-
- if owners:
- filter_parameters['owners'] = [owners]
-
- if ami_name:
- filter_parameters['filters']['name'] = ami_name
-
- if tags:
- for tag_name, tag_value in six.iteritems(tags):
- filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value
-
- images = conn.get_all_images(**filter_parameters)
- log.debug('The filters criteria %s matched the following '
- 'images:%s', filter_parameters, images)
-
- if images:
- if return_objs:
- return images
- return [image.id for image in images]
- else:
+ while retries:
+ try:
+ filter_parameters = {'filters': {}}
+ if image_ids:
+ filter_parameters['image_ids'] = [image_ids]
+ if executable_by:
+ filter_parameters['executable_by'] = [executable_by]
+ if owners:
+ filter_parameters['owners'] = [owners]
+ if ami_name:
+ filter_parameters['filters']['name'] = ami_name
+ if tags:
+ for tag_name, tag_value in six.iteritems(tags):
+ filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value
+ images = conn.get_all_images(**filter_parameters)
+ log.debug('The filters criteria %s matched the following '
+ 'images:%s', filter_parameters, images)
+ if images:
+ if return_objs:
+ return images
+ return [image.id for image in images]
+ else:
+ return False
+ except boto.exception.BotoServerError as exc:
+ if exc.error_code == 'Throttling':
+ log.debug("Throttled by AWS API, will retry in 5 seconds...")
+ time.sleep(5)
+ retries -= 1
+ continue
+ log.error('Failed to convert AMI name `%s` to an AMI ID: %s', ami_name, exc)
return False
- except boto.exception.BotoServerError as exc:
- log.error(exc)
- return False
+ return False
def terminate(instance_id=None, name=None, region=None,
diff --git a/salt/modules/cmdmod.py b/salt/modules/cmdmod.py
index 4c62deb955..4a1870c270 100644
--- a/salt/modules/cmdmod.py
+++ b/salt/modules/cmdmod.py
@@ -12,6 +12,7 @@ import functools
import glob
import logging
import os
+import platform
import shutil
import subprocess
import sys
@@ -3334,9 +3335,14 @@ def powershell(cmd,
python_shell = True
# Append PowerShell Object formatting
- cmd += ' | ConvertTo-JSON'
- if depth is not None:
- cmd += ' -Depth {0}'.format(depth)
+ # ConvertTo-JSON is only available on Versions of Windows greater than
+ # `7.1.7600`. We have to use `platform.version` instead of `__grains__` here
+ # because this function is called by `salt/grains/core.py` before
+ # `__grains__` is populated
+ if salt.utils.versions.version_cmp(platform.version(), '7.1.7600') == 1:
+ cmd += ' | ConvertTo-JSON'
+ if depth is not None:
+ cmd += ' -Depth {0}'.format(depth)
if encode_cmd:
# Convert the cmd to UTF-16LE without a BOM and base64 encode.
@@ -3353,7 +3359,7 @@ def powershell(cmd,
# caught in a try/catch block. For example, the `Get-WmiObject` command will
# often return a "Non Terminating Error". To fix this, make sure
# `-ErrorAction Stop` is set in the powershell command
- cmd = 'try {' + cmd + '} catch { "{}" | ConvertTo-JSON}'
+ cmd = 'try {' + cmd + '} catch { "{}" }'
# Retrieve the response, while overriding shell with 'powershell'
response = run(cmd,
@@ -3425,10 +3431,10 @@ def powershell_all(cmd,
empty Powershell output (which would result in an exception). Instead we
treat this as a special case and one of two things will happen:
- - If the value of the ``force_list`` paramater is ``True``, then the
+ - If the value of the ``force_list`` parameter is ``True``, then the
``result`` field of the return dictionary will be an empty list.
- - If the value of the ``force_list`` paramater is ``False``, then the
+ - If the value of the ``force_list`` parameter is ``False``, then the
return dictionary **will not have a result key added to it**. We aren't
setting ``result`` to ``None`` in this case, because ``None`` is the
Python representation of "null" in JSON. (We likewise can't use ``False``
@@ -3441,20 +3447,20 @@ def powershell_all(cmd,
content, and the type of the resulting Python object is other than ``list``
then one of two things will happen:
- - If the value of the ``force_list`` paramater is ``True``, then the
+ - If the value of the ``force_list`` parameter is ``True``, then the
``result`` field will be a singleton list with the Python object as its
sole member.
- - If the value of the ``force_list`` paramater is ``False``, then the value
+ - If the value of the ``force_list`` parameter is ``False``, then the value
of ``result`` will be the unmodified Python object.
If Powershell's output is not an empty string, Python is able to parse its
content, and the type of the resulting Python object is ``list``, then the
value of ``result`` will be the unmodified Python object. The
- ``force_list`` paramater has no effect in this case.
+ ``force_list`` parameter has no effect in this case.
.. note::
- An example of why the ``force_list`` paramater is useful is as
+ An example of why the ``force_list`` parameter is useful is as
follows: The Powershell command ``dir x | Convert-ToJson`` results in
- no output when x is an empty directory.
@@ -3602,7 +3608,7 @@ def powershell_all(cmd,
where characters may be dropped or incorrectly converted when executed.
Default is False.
- :param bool force_list: The purpose of this paramater is described in the
+ :param bool force_list: The purpose of this parameter is described in the
preamble of this function's documentation. Default value is False.
:param list success_retcodes: This parameter will be allow a list of
diff --git a/salt/modules/composer.py b/salt/modules/composer.py
index adb1b26493..0b905778f7 100644
--- a/salt/modules/composer.py
+++ b/salt/modules/composer.py
@@ -73,7 +73,8 @@ def _run_composer(action,
no_dev=None,
quiet=False,
composer_home='/root',
- extra_flags=None):
+ extra_flags=None,
+ env=None):
'''
Run PHP's composer with a specific action.
@@ -126,6 +127,9 @@ def _run_composer(action,
extra_flags
None, or a string containing extra flags to pass to composer.
+
+ env
+ A list of environment variables to be set prior to execution.
'''
if composer is not None:
if php is None:
@@ -185,9 +189,15 @@ def _run_composer(action,
if optimize is True:
cmd.append('--optimize-autoloader')
+ if env is not None:
+ env = salt.utils.data.repack_dictlist(env)
+ env['COMPOSER_HOME'] = composer_home
+ else:
+ env = {'COMPOSER_HOME': composer_home}
+
result = __salt__['cmd.run_all'](cmd,
runas=runas,
- env={'COMPOSER_HOME': composer_home},
+ env=env,
python_shell=False)
if result['retcode'] != 0:
@@ -210,7 +220,8 @@ def install(directory,
optimize=None,
no_dev=None,
quiet=False,
- composer_home='/root'):
+ composer_home='/root',
+ env=None):
'''
Install composer dependencies for a directory.
@@ -257,6 +268,9 @@ def install(directory,
composer_home
$COMPOSER_HOME environment variable
+ env
+ A list of environment variables to be set prior to execution.
+
CLI Example:
.. code-block:: bash
@@ -278,7 +292,8 @@ def install(directory,
optimize=optimize,
no_dev=no_dev,
quiet=quiet,
- composer_home=composer_home)
+ composer_home=composer_home,
+ env=env)
return result
@@ -293,7 +308,8 @@ def update(directory,
optimize=None,
no_dev=None,
quiet=False,
- composer_home='/root'):
+ composer_home='/root',
+ env=None):
'''
Update composer dependencies for a directory.
@@ -343,6 +359,9 @@ def update(directory,
composer_home
$COMPOSER_HOME environment variable
+ env
+ A list of environment variables to be set prior to execution.
+
CLI Example:
.. code-block:: bash
@@ -365,7 +384,8 @@ def update(directory,
optimize=optimize,
no_dev=no_dev,
quiet=quiet,
- composer_home=composer_home)
+ composer_home=composer_home,
+ env=env)
return result
diff --git a/salt/modules/dockermod.py b/salt/modules/dockermod.py
index 6bafb30a60..d950223fc6 100644
--- a/salt/modules/dockermod.py
+++ b/salt/modules/dockermod.py
@@ -589,6 +589,15 @@ def _scrub_links(links, name):
return ret
+def _ulimit_sort(ulimit_val):
+ if isinstance(ulimit_val, list):
+ return sorted(ulimit_val,
+ key=lambda x: (x.get('Name'),
+ x.get('Hard', 0),
+ x.get('Soft', 0)))
+ return ulimit_val
+
+
def _size_fmt(num):
'''
Format bytes as human-readable file sizes
@@ -932,6 +941,9 @@ def compare_containers(first, second, ignore=None):
if item == 'Links':
val1 = sorted(_scrub_links(val1, first))
val2 = sorted(_scrub_links(val2, second))
+ if item == 'Ulimits':
+ val1 = _ulimit_sort(val1)
+ val2 = _ulimit_sort(val2)
if val1 != val2:
ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2}
# Check for optionally-present items that were in the second container
@@ -955,6 +967,9 @@ def compare_containers(first, second, ignore=None):
if item == 'Links':
val1 = sorted(_scrub_links(val1, first))
val2 = sorted(_scrub_links(val2, second))
+ if item == 'Ulimits':
+ val1 = _ulimit_sort(val1)
+ val2 = _ulimit_sort(val2)
if val1 != val2:
ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2}
return ret
diff --git a/salt/modules/file.py b/salt/modules/file.py
index 3bea22ad39..98a0fc1501 100644
--- a/salt/modules/file.py
+++ b/salt/modules/file.py
@@ -2433,8 +2433,7 @@ def blockreplace(path,
backup='.bak',
dry_run=False,
show_changes=True,
- append_newline=False,
- ):
+ append_newline=False):
'''
.. versionadded:: 2014.1.0
@@ -2481,18 +2480,30 @@ def blockreplace(path,
The file extension to use for a backup of the file if any edit is made.
Set to ``False`` to skip making a backup.
- dry_run
- Don't make any edits to the file.
+ dry_run : False
+ If ``True``, do not make any edits to the file and simply return the
+ changes that *would* be made.
- show_changes
- Output a unified diff of the old file and the new file. If ``False``,
- return a boolean if any changes were made.
+ show_changes : True
+ Controls how changes are presented. If ``True``, this function will
+ return a unified diff of the changes made. If False, then it will
+ return a boolean (``True`` if any changes were made, otherwise
+ ``False``).
- append_newline:
- Append a newline to the content block. For more information see:
- https://github.com/saltstack/salt/issues/33686
+ append_newline : False
+ Controls whether or not a newline is appended to the content block. If
+ the value of this argument is ``True`` then a newline will be added to
+ the content block. If it is ``False``, then a newline will *not* be
+ added to the content block. If it is ``None`` then a newline will only
+ be added to the content block if it does not already end in a newline.
.. versionadded:: 2016.3.4
+ .. versionchanged:: 2017.7.5,2018.3.1
+ New behavior added when value is ``None``.
+ .. versionchanged:: Fluorine
+ The default value of this argument will change to ``None`` to match
+ the behavior of the :py:func:`file.blockreplace state
+ `
CLI Example:
@@ -2502,87 +2513,137 @@ def blockreplace(path,
'#-- end managed zone foobar --' $'10.0.1.1 foo.foobar\\n10.0.1.2 bar.foobar' True
'''
- path = os.path.expanduser(path)
-
- if not os.path.exists(path):
- raise SaltInvocationError('File not found: {0}'.format(path))
-
if append_if_not_found and prepend_if_not_found:
raise SaltInvocationError(
'Only one of append and prepend_if_not_found is permitted'
)
+ path = os.path.expanduser(path)
+
+ if not os.path.exists(path):
+ raise SaltInvocationError('File not found: {0}'.format(path))
+
if not __utils__['files.is_text'](path):
raise SaltInvocationError(
'Cannot perform string replacements on a binary file: {0}'
.format(path)
)
- # Search the file; track if any changes have been made for the return val
+ if append_newline is None and not content.endswith((os.linesep, '\n')):
+ append_newline = True
+
+ # Split the content into a list of lines, removing newline characters. To
+ # ensure that we handle both Windows and POSIX newlines, first split on
+ # Windows newlines, and then split on POSIX newlines.
+ split_content = []
+ for win_line in content.split('\r\n'):
+ for content_line in win_line.split('\n'):
+ split_content.append(content_line)
+
+ line_count = len(split_content)
+
has_changes = False
orig_file = []
new_file = []
in_block = False
- old_content = ''
- done = False
- # we do not use in_place editing to avoid file attrs modifications when
+ block_found = False
+ linesep = None
+
+ def _add_content(linesep, lines=None, include_marker_start=True,
+ end_line=None):
+ if lines is None:
+ lines = []
+ include_marker_start = True
+
+ if end_line is None:
+ end_line = marker_end
+ end_line = end_line.rstrip('\r\n') + linesep
+
+ if include_marker_start:
+ lines.append(marker_start + linesep)
+
+ if split_content:
+ for index, content_line in enumerate(split_content, 1):
+ if index != line_count:
+ lines.append(content_line + linesep)
+ else:
+ # We're on the last line of the content block
+ if append_newline:
+ lines.append(content_line + linesep)
+ lines.append(end_line)
+ else:
+ lines.append(content_line + end_line)
+ else:
+ lines.append(end_line)
+
+ return lines
+
+ # We do not use in-place editing to avoid file attrs modifications when
# no changes are required and to avoid any file access on a partially
# written file.
- # we could also use salt.utils.filebuffer.BufferedReader
+ #
+ # We could also use salt.utils.filebuffer.BufferedReader
try:
- fi_file = fileinput.input(path,
- inplace=False, backup=False,
- bufsize=1, mode='rb')
- for line in fi_file:
+ fi_file = fileinput.input(
+ path,
+ inplace=False,
+ backup=False,
+ bufsize=1,
+ mode='rb')
+ for line in fi_file:
line = salt.utils.stringutils.to_unicode(line)
- result = line
+ write_line_to_new_file = True
+
+ if linesep is None:
+ # Auto-detect line separator
+ if line.endswith('\r\n'):
+ linesep = '\r\n'
+ elif line.endswith('\n'):
+ linesep = '\n'
+ else:
+ # No newline(s) in file, fall back to system's linesep
+ linesep = os.linesep
if marker_start in line:
- # managed block start found, start recording
+ # We've entered the content block
in_block = True
-
else:
if in_block:
- if marker_end in line:
- # end of block detected
+ # We're not going to write the lines from the old file to
+ # the new file until we have exited the block.
+ write_line_to_new_file = False
+
+ marker_end_pos = line.find(marker_end)
+ if marker_end_pos != -1:
+ # End of block detected
in_block = False
+ # We've found and exited the block
+ block_found = True
- # Handle situations where there may be multiple types
- # of line endings in the same file. Separate the content
- # into lines. Account for Windows-style line endings
- # using os.linesep, then by linux-style line endings
- # using '\n'
- split_content = []
- for linesep_line in content.split(os.linesep):
- for content_line in linesep_line.split('\n'):
- split_content.append(content_line)
-
- # Trim any trailing new lines to avoid unwanted
- # additional new lines
- while not split_content[-1]:
- split_content.pop()
-
- # push new block content in file
- for content_line in split_content:
- new_file.append(content_line + os.linesep)
-
- done = True
-
- else:
- # remove old content, but keep a trace
- old_content += line
- result = None
- # else: we are not in the marked block, keep saving things
+ _add_content(linesep, lines=new_file,
+ include_marker_start=False,
+ end_line=line[marker_end_pos:])
+ # Save the line from the original file
orig_file.append(line)
- if result is not None:
- new_file.append(result)
- # end for. If we are here without block management we maybe have some problems,
- # or we need to initialise the marked block
+ if write_line_to_new_file:
+ new_file.append(line)
+ except (IOError, OSError) as exc:
+ raise CommandExecutionError(
+ 'Failed to read from {0}: {1}'.format(path, exc)
+ )
finally:
- fi_file.close()
+ if linesep is None:
+ # If the file was empty, we will not have set linesep yet. Assume
+ # the system's line separator. This is needed for when we
+ # prepend/append later on.
+ linesep = os.linesep
+ try:
+ fi_file.close()
+ except Exception:
+ pass
if in_block:
# unterminated block => bad, always fail
@@ -2590,35 +2651,27 @@ def blockreplace(path,
'Unterminated marked block. End of file reached before marker_end.'
)
- if not done:
+ if not block_found:
if prepend_if_not_found:
# add the markers and content at the beginning of file
- new_file.insert(0, marker_end + os.linesep)
- if append_newline is True:
- new_file.insert(0, content + os.linesep)
- else:
- new_file.insert(0, content)
- new_file.insert(0, marker_start + os.linesep)
- done = True
+ prepended_content = _add_content(linesep)
+ prepended_content.extend(new_file)
+ new_file = prepended_content
+ block_found = True
elif append_if_not_found:
# Make sure we have a newline at the end of the file
if 0 != len(new_file):
- if not new_file[-1].endswith(os.linesep):
- new_file[-1] += os.linesep
+ if not new_file[-1].endswith(linesep):
+ new_file[-1] += linesep
# add the markers and content at the end of file
- new_file.append(marker_start + os.linesep)
- if append_newline is True:
- new_file.append(content + os.linesep)
- else:
- new_file.append(content)
- new_file.append(marker_end + os.linesep)
- done = True
+ _add_content(linesep, lines=new_file)
+ block_found = True
else:
raise CommandExecutionError(
'Cannot edit marked block. Markers were not found in file.'
)
- if done:
+ if block_found:
diff = ''.join(difflib.unified_diff(orig_file, new_file))
has_changes = diff is not ''
if has_changes and not dry_run:
@@ -5361,30 +5414,25 @@ def manage_file(name,
# Create the file, user rw-only if mode will be set to prevent
# a small security race problem before the permissions are set
- if mode:
- current_umask = os.umask(0o77)
-
- # Create a new file when test is False and source is None
- if contents is None:
- if not __opts__['test']:
- if touch(name):
- ret['changes']['new'] = 'file {0} created'.format(name)
- ret['comment'] = 'Empty file'
- else:
- return _error(
- ret, 'Empty file {0} not created'.format(name)
- )
- else:
- if not __opts__['test']:
- if touch(name):
- ret['changes']['diff'] = 'New file'
- else:
- return _error(
- ret, 'File {0} not created'.format(name)
- )
-
- if mode:
- os.umask(current_umask)
+ with salt.utils.files.set_umask(0o077 if mode else None):
+ # Create a new file when test is False and source is None
+ if contents is None:
+ if not __opts__['test']:
+ if touch(name):
+ ret['changes']['new'] = 'file {0} created'.format(name)
+ ret['comment'] = 'Empty file'
+ else:
+ return _error(
+ ret, 'Empty file {0} not created'.format(name)
+ )
+ else:
+ if not __opts__['test']:
+ if touch(name):
+ ret['changes']['diff'] = 'New file'
+ else:
+ return _error(
+ ret, 'File {0} not created'.format(name)
+ )
if contents is not None:
# Write the static contents to a temporary file
@@ -5418,8 +5466,7 @@ def manage_file(name,
# out what mode to use for the new file.
if mode is None and not salt.utils.platform.is_windows():
# Get current umask
- mask = os.umask(0)
- os.umask(mask)
+ mask = salt.utils.files.get_umask()
# Calculate the mode value that results from the umask
mode = oct((0o777 ^ mask) & 0o666)
diff --git a/salt/modules/gentoo_service.py b/salt/modules/gentoo_service.py
index 545d0772f5..2bbb112433 100644
--- a/salt/modules/gentoo_service.py
+++ b/salt/modules/gentoo_service.py
@@ -39,9 +39,9 @@ def __virtual__():
'only available on Gentoo/Open-RC systems.')
-def _ret_code(cmd):
+def _ret_code(cmd, ignore_retcode=False):
log.debug('executing [{0}]'.format(cmd))
- sts = __salt__['cmd.retcode'](cmd, python_shell=False)
+ sts = __salt__['cmd.retcode'](cmd, python_shell=False, ignore_retcode=ignore_retcode)
return sts
@@ -270,7 +270,7 @@ def status(name, sig=None):
results = {}
for service in services:
cmd = _service_cmd(service, 'status')
- results[service] = not _ret_code(cmd)
+ results[service] = not _ret_code(cmd, ignore_retcode=True)
if contains_globbing:
return results
return results[name]
diff --git a/salt/modules/git.py b/salt/modules/git.py
index dda2868dbe..59a1c64bc9 100644
--- a/salt/modules/git.py
+++ b/salt/modules/git.py
@@ -67,6 +67,7 @@ def _config_getter(get_opt,
user=None,
password=None,
ignore_retcode=False,
+ output_encoding=None,
**kwargs):
'''
Common code for config.get_* functions, builds and runs the git CLI command
@@ -94,7 +95,8 @@ def _config_getter(get_opt,
value_regex = None
command = ['git', 'config']
- command.extend(_which_git_config(global_, cwd, user, password))
+ command.extend(_which_git_config(global_, cwd, user, password,
+ output_encoding=output_encoding))
command.append(get_opt)
command.append(key)
if value_regex is not None:
@@ -104,7 +106,8 @@ def _config_getter(get_opt,
user=user,
password=password,
ignore_retcode=ignore_retcode,
- failhard=False)
+ failhard=False,
+ output_encoding=output_encoding)
def _expand_path(cwd, user):
@@ -210,7 +213,7 @@ def _find_ssh_exe():
def _git_run(command, cwd=None, user=None, password=None, identity=None,
ignore_retcode=False, failhard=True, redirect_stderr=False,
- saltenv='base', **kwargs):
+ saltenv='base', output_encoding=None, **kwargs):
'''
simple, throw an exception with the error message on an error return code.
@@ -218,6 +221,9 @@ def _git_run(command, cwd=None, user=None, password=None, identity=None,
'cmd.run_all', and used as an alternative to 'cmd.run_all'. Some
commands don't return proper retcodes, so this can't replace 'cmd.run_all'.
'''
+ if salt.utils.platform.is_windows() and output_encoding is None:
+ output_encoding = 'utf-8'
+
env = {}
if identity:
@@ -312,6 +318,7 @@ def _git_run(command, cwd=None, user=None, password=None, identity=None,
log_callback=salt.utils.url.redact_http_basic_auth,
ignore_retcode=ignore_retcode,
redirect_stderr=redirect_stderr,
+ output_encoding=output_encoding,
**kwargs)
finally:
if tmp_ssh_wrapper:
@@ -378,6 +385,7 @@ def _git_run(command, cwd=None, user=None, password=None, identity=None,
log_callback=salt.utils.url.redact_http_basic_auth,
ignore_retcode=ignore_retcode,
redirect_stderr=redirect_stderr,
+ output_encoding=output_encoding,
**kwargs)
if result['retcode'] == 0:
@@ -399,7 +407,7 @@ def _git_run(command, cwd=None, user=None, password=None, identity=None,
return result
-def _get_toplevel(path, user=None, password=None):
+def _get_toplevel(path, user=None, password=None, output_encoding=None):
'''
Use git rev-parse to return the top level of a repo
'''
@@ -407,10 +415,11 @@ def _get_toplevel(path, user=None, password=None):
['git', 'rev-parse', '--show-toplevel'],
cwd=path,
user=user,
- password=password)['stdout']
+ password=password,
+ output_encoding=output_encoding)['stdout']
-def _git_config(cwd, user, password):
+def _git_config(cwd, user, password, output_encoding=None):
'''
Helper to retrieve git config options
'''
@@ -420,7 +429,8 @@ def _git_config(cwd, user, password):
opts=['--git-dir'],
user=user,
password=password,
- ignore_retcode=True)
+ ignore_retcode=True,
+ output_encoding=output_encoding)
if not os.path.isabs(git_dir):
paths = (cwd, git_dir, 'config')
else:
@@ -429,7 +439,7 @@ def _git_config(cwd, user, password):
return __context__[contextkey]
-def _which_git_config(global_, cwd, user, password):
+def _which_git_config(global_, cwd, user, password, output_encoding=None):
'''
Based on whether global or local config is desired, return a list of CLI
args to include in the git config command.
@@ -442,7 +452,8 @@ def _which_git_config(global_, cwd, user, password):
return ['--local']
else:
# For earlier versions, need to specify the path to the git config file
- return ['--file', _git_config(cwd, user, password)]
+ return ['--file', _git_config(cwd, user, password,
+ output_encoding=output_encoding)]
def add(cwd,
@@ -451,7 +462,8 @@ def add(cwd,
git_opts='',
user=None,
password=None,
- ignore_retcode=False):
+ ignore_retcode=False,
+ output_encoding=None):
'''
.. versionchanged:: 2015.8.0
The ``--verbose`` command line argument is now implied
@@ -498,8 +510,31 @@ def add(cwd,
.. versionadded:: 2015.8.0
- .. _`git-add(1)`: http://git-scm.com/docs/git-add
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
+
+ .. _`git-add(1)`: http://git-scm.com/docs/git-add
CLI Examples:
@@ -519,7 +554,8 @@ def add(cwd,
cwd=cwd,
user=user,
password=password,
- ignore_retcode=ignore_retcode)['stdout']
+ ignore_retcode=ignore_retcode,
+ output_encoding=output_encoding)['stdout']
def archive(cwd,
@@ -530,6 +566,7 @@ def archive(cwd,
user=None,
password=None,
ignore_retcode=False,
+ output_encoding=None,
**kwargs):
'''
.. versionchanged:: 2015.8.0
@@ -621,8 +658,31 @@ def archive(cwd,
.. versionadded:: 2015.8.0
- .. _`git-archive(1)`: http://git-scm.com/docs/git-archive
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
+
+ .. _`git-archive(1)`: http://git-scm.com/docs/git-archive
CLI Example:
@@ -657,7 +717,8 @@ def archive(cwd,
cwd=cwd,
user=user,
password=password,
- ignore_retcode=ignore_retcode)
+ ignore_retcode=ignore_retcode,
+ output_encoding=output_encoding)
# No output (unless --verbose is used, and we don't want all files listed
# in the output in case there are thousands), so just return True. If there
# was an error in the git command, it will have already raised an exception
@@ -671,7 +732,8 @@ def branch(cwd,
git_opts='',
user=None,
password=None,
- ignore_retcode=False):
+ ignore_retcode=False,
+ output_encoding=None):
'''
Interface to `git-branch(1)`_
@@ -722,8 +784,31 @@ def branch(cwd,
.. versionadded:: 2015.8.0
- .. _`git-branch(1)`: http://git-scm.com/docs/git-branch
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
+
+ .. _`git-branch(1)`: http://git-scm.com/docs/git-branch
CLI Examples:
@@ -748,7 +833,8 @@ def branch(cwd,
cwd=cwd,
user=user,
password=password,
- ignore_retcode=ignore_retcode)
+ ignore_retcode=ignore_retcode,
+ output_encoding=output_encoding)
return True
@@ -759,7 +845,8 @@ def checkout(cwd,
git_opts='',
user=None,
password=None,
- ignore_retcode=False):
+ ignore_retcode=False,
+ output_encoding=None):
'''
Interface to `git-checkout(1)`_
@@ -810,8 +897,31 @@ def checkout(cwd,
.. versionadded:: 2015.8.0
- .. _`git-checkout(1)`: http://git-scm.com/docs/git-checkout
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
+
+ .. _`git-checkout(1)`: http://git-scm.com/docs/git-checkout
CLI Examples:
@@ -847,7 +957,8 @@ def checkout(cwd,
user=user,
password=password,
ignore_retcode=ignore_retcode,
- redirect_stderr=True)['stdout']
+ redirect_stderr=True,
+ output_encoding=output_encoding)['stdout']
def clone(cwd,
@@ -861,7 +972,8 @@ def clone(cwd,
https_user=None,
https_pass=None,
ignore_retcode=False,
- saltenv='base'):
+ saltenv='base',
+ output_encoding=None):
'''
Interface to `git-clone(1)`_
@@ -951,6 +1063,30 @@ def clone(cwd,
.. versionadded:: 2016.3.1
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
+
.. _`git-clone(1)`: http://git-scm.com/docs/git-clone
CLI Example:
@@ -996,7 +1132,8 @@ def clone(cwd,
password=password,
identity=identity,
ignore_retcode=ignore_retcode,
- saltenv=saltenv)
+ saltenv=saltenv,
+ output_encoding=output_encoding)
return True
@@ -1007,7 +1144,8 @@ def commit(cwd,
user=None,
password=None,
filename=None,
- ignore_retcode=False):
+ ignore_retcode=False,
+ output_encoding=None):
'''
Interface to `git-commit(1)`_
@@ -1066,8 +1204,31 @@ def commit(cwd,
.. versionadded:: 2015.8.0
- .. _`git-commit(1)`: http://git-scm.com/docs/git-commit
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
+
+ .. _`git-commit(1)`: http://git-scm.com/docs/git-commit
CLI Examples:
@@ -1088,7 +1249,8 @@ def commit(cwd,
cwd=cwd,
user=user,
password=password,
- ignore_retcode=ignore_retcode)['stdout']
+ ignore_retcode=ignore_retcode,
+ output_encoding=output_encoding)['stdout']
def config_get(key,
@@ -1096,6 +1258,7 @@ def config_get(key,
user=None,
password=None,
ignore_retcode=False,
+ output_encoding=None,
**kwargs):
'''
Get the value of a key in the git configuration file
@@ -1140,6 +1303,29 @@ def config_get(key,
.. versionadded:: 2015.8.0
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
CLI Examples:
@@ -1160,6 +1346,7 @@ def config_get(key,
user=user,
password=password,
ignore_retcode=ignore_retcode,
+ output_encoding=output_encoding,
**kwargs)
# git config --get exits with retcode of 1 when key does not exist
@@ -1182,6 +1369,7 @@ def config_get_regexp(key,
user=None,
password=None,
ignore_retcode=False,
+ output_encoding=None,
**kwargs):
r'''
.. versionadded:: 2015.8.0
@@ -1226,6 +1414,29 @@ def config_get_regexp(key,
If ``True``, do not log an error to the minion log if the git command
returns a nonzero exit status.
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
CLI Examples:
@@ -1245,6 +1456,7 @@ def config_get_regexp(key,
user=user,
password=password,
ignore_retcode=ignore_retcode,
+ output_encoding=output_encoding,
**kwargs)
# git config --get exits with retcode of 1 when key does not exist
@@ -1269,6 +1481,7 @@ def config_set(key,
user=None,
password=None,
ignore_retcode=False,
+ output_encoding=None,
**kwargs):
'''
.. versionchanged:: 2015.8.0
@@ -1326,7 +1539,31 @@ def config_set(key,
global : False
If ``True``, set a global variable
- CLI Example:
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
+
+ CLI Examples:
.. code-block:: bash
@@ -1382,7 +1619,8 @@ def config_set(key,
cwd=cwd,
user=user,
password=password,
- ignore_retcode=ignore_retcode)
+ ignore_retcode=ignore_retcode,
+ output_encoding=output_encoding)
else:
for idx, target in enumerate(multivar):
command = copy.copy(command_prefix)
@@ -1395,12 +1633,14 @@ def config_set(key,
cwd=cwd,
user=user,
password=password,
- ignore_retcode=ignore_retcode)
+ ignore_retcode=ignore_retcode,
+ output_encoding=output_encoding)
return config_get(key,
user=user,
password=password,
cwd=cwd,
ignore_retcode=ignore_retcode,
+ output_encoding=output_encoding,
**{'all': True, 'global': global_})
@@ -1410,6 +1650,7 @@ def config_unset(key,
user=None,
password=None,
ignore_retcode=False,
+ output_encoding=None,
**kwargs):
'''
.. versionadded:: 2015.8.0
@@ -1449,6 +1690,29 @@ def config_unset(key,
If ``True``, do not log an error to the minion log if the git command
returns a nonzero exit status.
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
CLI Example:
@@ -1476,7 +1740,8 @@ def config_unset(key,
command.append('--unset-all')
else:
command.append('--unset')
- command.extend(_which_git_config(global_, cwd, user, password))
+ command.extend(_which_git_config(global_, cwd, user, password,
+ output_encoding=output_encoding))
command.append(key)
if value_regex is not None:
@@ -1486,7 +1751,8 @@ def config_unset(key,
user=user,
password=password,
ignore_retcode=ignore_retcode,
- failhard=False)
+ failhard=False,
+ output_encoding=output_encoding)
retcode = ret['retcode']
if retcode == 0:
return True
@@ -1497,7 +1763,8 @@ def config_unset(key,
key,
user=user,
password=password,
- ignore_retcode=ignore_retcode) is None:
+ ignore_retcode=ignore_retcode,
+ output_encoding=output_encoding) is None:
raise CommandExecutionError(
'Key \'{0}\' does not exist'.format(key)
)
@@ -1521,7 +1788,8 @@ def config_unset(key,
def current_branch(cwd,
user=None,
password=None,
- ignore_retcode=False):
+ ignore_retcode=False,
+ output_encoding=None):
'''
Returns the current branch name of a local checkout. If HEAD is detached,
return the SHA1 of the revision which is currently checked out.
@@ -1545,6 +1813,29 @@ def current_branch(cwd,
.. versionadded:: 2015.8.0
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
CLI Example:
@@ -1558,14 +1849,16 @@ def current_branch(cwd,
cwd=cwd,
user=user,
password=password,
- ignore_retcode=ignore_retcode)['stdout']
+ ignore_retcode=ignore_retcode,
+ output_encoding=output_encoding)['stdout']
def describe(cwd,
rev='HEAD',
user=None,
password=None,
- ignore_retcode=False):
+ ignore_retcode=False,
+ output_encoding=None):
'''
Returns the `git-describe(1)`_ string (or the SHA1 hash if there are no
tags) for the given revision.
@@ -1592,8 +1885,31 @@ def describe(cwd,
.. versionadded:: 2015.8.0
- .. _`git-describe(1)`: http://git-scm.com/docs/git-describe
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
+
+ .. _`git-describe(1)`: http://git-scm.com/docs/git-describe
CLI Examples:
@@ -1611,7 +1927,8 @@ def describe(cwd,
cwd=cwd,
user=user,
password=password,
- ignore_retcode=ignore_retcode)['stdout']
+ ignore_retcode=ignore_retcode,
+ output_encoding=output_encoding)['stdout']
def diff(cwd,
@@ -1623,7 +1940,8 @@ def diff(cwd,
password=None,
no_index=False,
cached=False,
- paths=None):
+ paths=None,
+ output_encoding=None):
'''
.. versionadded:: 2015.8.12,2016.3.3,2016.11.0
@@ -1691,8 +2009,31 @@ def diff(cwd,
File paths to pass to the ``git diff`` command. Can be passed as a
comma-separated list or a Python list.
- .. _`git-diff(1)`: http://git-scm.com/docs/git-diff
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
+
+ .. _`git-diff(1)`: http://git-scm.com/docs/git-diff
CLI Example:
@@ -1770,7 +2111,8 @@ def diff(cwd,
password=password,
ignore_retcode=ignore_retcode,
failhard=failhard,
- redirect_stderr=True)['stdout']
+ redirect_stderr=True,
+ output_encoding=output_encoding)['stdout']
def fetch(cwd,
@@ -1783,7 +2125,8 @@ def fetch(cwd,
password=None,
identity=None,
ignore_retcode=False,
- saltenv='base'):
+ saltenv='base',
+ output_encoding=None):
'''
.. versionchanged:: 2015.8.2
Return data is now a dictionary containing information on branches and
@@ -1873,8 +2216,31 @@ def fetch(cwd,
.. versionadded:: 2016.3.1
- .. _`git-fetch(1)`: http://git-scm.com/docs/git-fetch
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
+
+ .. _`git-fetch(1)`: http://git-scm.com/docs/git-fetch
CLI Example:
@@ -1908,7 +2274,8 @@ def fetch(cwd,
identity=identity,
ignore_retcode=ignore_retcode,
redirect_stderr=True,
- saltenv=saltenv)['stdout']
+ saltenv=saltenv,
+ output_encoding=output_encoding)['stdout']
update_re = re.compile(
r'[\s*]*(?:([0-9a-f]+)\.\.([0-9a-f]+)|'
@@ -1946,7 +2313,8 @@ def init(cwd,
git_opts='',
user=None,
password=None,
- ignore_retcode=False):
+ ignore_retcode=False,
+ output_encoding=None):
'''
Interface to `git-init(1)`_
@@ -2008,6 +2376,30 @@ def init(cwd,
.. versionadded:: 2015.8.0
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
+
.. _`git-init(1)`: http://git-scm.com/docs/git-init
.. _`template directory`: http://git-scm.com/docs/git-init#_template_directory
@@ -2045,12 +2437,14 @@ def init(cwd,
return _git_run(command,
user=user,
password=password,
- ignore_retcode=ignore_retcode)['stdout']
+ ignore_retcode=ignore_retcode,
+ output_encoding=output_encoding)['stdout']
def is_worktree(cwd,
user=None,
- password=None):
+ password=None,
+ output_encoding=None):
'''
.. versionadded:: 2015.8.0
@@ -2071,6 +2465,29 @@ def is_worktree(cwd,
.. versionadded:: 2016.3.4
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
CLI Example:
@@ -2080,7 +2497,8 @@ def is_worktree(cwd,
'''
cwd = _expand_path(cwd, user)
try:
- toplevel = _get_toplevel(cwd, user=user, password=password)
+ toplevel = _get_toplevel(cwd, user=user, password=password,
+ output_encoding=output_encoding)
except CommandExecutionError:
return False
gitdir = os.path.join(toplevel, '.git')
@@ -2111,7 +2529,8 @@ def list_branches(cwd,
remote=False,
user=None,
password=None,
- ignore_retcode=False):
+ ignore_retcode=False,
+ output_encoding=None):
'''
.. versionadded:: 2015.8.0
@@ -2146,6 +2565,29 @@ def list_branches(cwd,
.. versionadded:: 2015.8.0
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
CLI Examples:
@@ -2161,13 +2603,15 @@ def list_branches(cwd,
cwd=cwd,
user=user,
password=password,
- ignore_retcode=ignore_retcode)['stdout'].splitlines()
+ ignore_retcode=ignore_retcode,
+ output_encoding=output_encoding)['stdout'].splitlines()
def list_tags(cwd,
user=None,
password=None,
- ignore_retcode=False):
+ ignore_retcode=False,
+ output_encoding=None):
'''
.. versionadded:: 2015.8.0
@@ -2192,6 +2636,29 @@ def list_tags(cwd,
.. versionadded:: 2015.8.0
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
CLI Examples:
@@ -2206,13 +2673,15 @@ def list_tags(cwd,
cwd=cwd,
user=user,
password=password,
- ignore_retcode=ignore_retcode)['stdout'].splitlines()
+ ignore_retcode=ignore_retcode,
+ output_encoding=output_encoding)['stdout'].splitlines()
def list_worktrees(cwd,
stale=False,
user=None,
password=None,
+ output_encoding=None,
**kwargs):
'''
.. versionadded:: 2015.8.0
@@ -2255,8 +2724,31 @@ def list_worktrees(cwd,
.. note::
Only one of ``all`` and ``stale`` can be set to ``True``.
- .. _`git-worktree(1)`: http://git-scm.com/docs/git-worktree
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
+
+ .. _`git-worktree(1)`: http://git-scm.com/docs/git-worktree
CLI Examples:
@@ -2279,14 +2771,16 @@ def list_worktrees(cwd,
'\'all\' and \'stale\' cannot both be set to True'
)
- def _git_tag_points_at(cwd, rev, user=None, password=None):
+ def _git_tag_points_at(cwd, rev, user=None, password=None,
+ output_encoding=None):
'''
Get any tags that point at a
'''
return _git_run(['git', 'tag', '--points-at', rev],
cwd=cwd,
user=user,
- password=password)['stdout'].splitlines()
+ password=password,
+ output_encoding=output_encoding)['stdout'].splitlines()
def _desired(is_stale, all_, stale):
'''
@@ -2323,7 +2817,8 @@ def list_worktrees(cwd,
out = _git_run(['git', 'worktree', 'list', '--porcelain'],
cwd=cwd,
user=user,
- password=password)
+ password=password,
+ output_encoding=output_encoding)
if out['retcode'] != 0:
msg = 'Failed to list worktrees'
if out['stderr']:
@@ -2393,7 +2888,8 @@ def list_worktrees(cwd,
tags_found = _git_tag_points_at(cwd,
wt_ptr['HEAD'],
user=user,
- password=password)
+ password=password,
+ output_encoding=output_encoding)
if tags_found:
wt_ptr['tags'] = tags_found
else:
@@ -2403,12 +2899,14 @@ def list_worktrees(cwd,
return ret
else:
- toplevel = _get_toplevel(cwd, user=user, password=password)
+ toplevel = _get_toplevel(cwd, user=user, password=password,
+ output_encoding=output_encoding)
try:
worktree_root = rev_parse(cwd,
opts=['--git-path', 'worktrees'],
user=user,
- password=password)
+ password=password,
+ output_encoding=output_encoding)
except CommandExecutionError as exc:
msg = 'Failed to find worktree location for ' + cwd
log.error(msg, exc_info_on_loglevel=logging.DEBUG)
@@ -2474,7 +2972,8 @@ def list_worktrees(cwd,
wt_head = rev_parse(cwd,
rev=head_ref,
user=user,
- password=password)
+ password=password,
+ output_encoding=output_encoding)
wt_detached = False
else:
wt_branch = None
@@ -2492,7 +2991,8 @@ def list_worktrees(cwd,
tags_found = _git_tag_points_at(cwd,
wt_head,
user=user,
- password=password)
+ password=password,
+ output_encoding=output_encoding)
if tags_found:
wt_ptr['tags'] = tags_found
@@ -2510,6 +3010,7 @@ def ls_remote(cwd=None,
https_user=None,
https_pass=None,
ignore_retcode=False,
+ output_encoding=None,
saltenv='base'):
'''
Interface to `git-ls-remote(1)`_. Returns the upstream hash for a remote
@@ -2609,8 +3110,31 @@ def ls_remote(cwd=None,
.. versionadded:: 2016.3.1
- .. _`git-ls-remote(1)`: http://git-scm.com/docs/git-ls-remote
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
+
+ .. _`git-ls-remote(1)`: http://git-scm.com/docs/git-ls-remote
CLI Example:
@@ -2640,7 +3164,8 @@ def ls_remote(cwd=None,
password=password,
identity=identity,
ignore_retcode=ignore_retcode,
- saltenv=saltenv)['stdout']
+ saltenv=saltenv,
+ output_encoding=output_encoding)['stdout']
ret = {}
for line in output.splitlines():
try:
@@ -2658,6 +3183,7 @@ def merge(cwd,
user=None,
password=None,
ignore_retcode=False,
+ output_encoding=None,
**kwargs):
'''
Interface to `git-merge(1)`_
@@ -2705,8 +3231,31 @@ def merge(cwd,
.. versionadded:: 2015.8.0
- .. _`git-merge(1)`: http://git-scm.com/docs/git-merge
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
+
+ .. _`git-merge(1)`: http://git-scm.com/docs/git-merge
CLI Example:
@@ -2733,7 +3282,8 @@ def merge(cwd,
cwd=cwd,
user=user,
password=password,
- ignore_retcode=ignore_retcode)['stdout']
+ ignore_retcode=ignore_retcode,
+ output_encoding=output_encoding)['stdout']
def merge_base(cwd,
@@ -2747,6 +3297,7 @@ def merge_base(cwd,
user=None,
password=None,
ignore_retcode=False,
+ output_encoding=None,
**kwargs):
'''
.. versionadded:: 2015.8.0
@@ -2833,10 +3384,33 @@ def merge_base(cwd,
if ``True``, do not log an error to the minion log if the git command
returns a nonzero exit status.
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
+
.. _`git-merge-base(1)`: http://git-scm.com/docs/git-merge-base
.. _here: http://git-scm.com/docs/git-merge-base#_discussion
-
CLI Examples:
.. code-block:: bash
@@ -2898,13 +3472,15 @@ def merge_base(cwd,
opts=['--verify'],
user=user,
password=password,
- ignore_retcode=ignore_retcode)
+ ignore_retcode=ignore_retcode,
+ output_encoding=output_encoding)
return merge_base(cwd,
refs=refs,
is_ancestor=False,
user=user,
password=password,
- ignore_retcode=ignore_retcode) == first_commit
+ ignore_retcode=ignore_retcode,
+ output_encoding=output_encoding) == first_commit
command = ['git'] + _format_git_opts(git_opts)
command.append('merge-base')
@@ -2925,7 +3501,8 @@ def merge_base(cwd,
user=user,
password=password,
ignore_retcode=ignore_retcode,
- failhard=False if is_ancestor else True)
+ failhard=False if is_ancestor else True,
+ output_encoding=output_encoding)
if is_ancestor:
return result['retcode'] == 0
all_bases = result['stdout'].splitlines()
@@ -2940,7 +3517,8 @@ def merge_tree(cwd,
base=None,
user=None,
password=None,
- ignore_retcode=False):
+ ignore_retcode=False,
+ output_encoding=None):
'''
.. versionadded:: 2015.8.0
@@ -2975,8 +3553,31 @@ def merge_tree(cwd,
if ``True``, do not log an error to the minion log if the git command
returns a nonzero exit status.
- .. _`git-merge-tree(1)`: http://git-scm.com/docs/git-merge-tree
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
+
+ .. _`git-merge-tree(1)`: http://git-scm.com/docs/git-merge-tree
CLI Examples:
@@ -2989,7 +3590,8 @@ def merge_tree(cwd,
command = ['git', 'merge-tree']
if base is None:
try:
- base = merge_base(cwd, refs=[ref1, ref2])
+ base = merge_base(cwd, refs=[ref1, ref2],
+ output_encoding=output_encoding)
except (SaltInvocationError, CommandExecutionError):
raise CommandExecutionError(
'Unable to determine merge base for {0} and {1}'
@@ -3000,7 +3602,8 @@ def merge_tree(cwd,
cwd=cwd,
user=user,
password=password,
- ignore_retcode=ignore_retcode)['stdout']
+ ignore_retcode=ignore_retcode,
+ output_encoding=output_encoding)['stdout']
def pull(cwd,
@@ -3010,7 +3613,8 @@ def pull(cwd,
password=None,
identity=None,
ignore_retcode=False,
- saltenv='base'):
+ saltenv='base',
+ output_encoding=None):
'''
Interface to `git-pull(1)`_
@@ -3079,6 +3683,30 @@ def pull(cwd,
.. versionadded:: 2016.3.1
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
+
.. _`git-pull(1)`: http://git-scm.com/docs/git-pull
CLI Example:
@@ -3097,7 +3725,8 @@ def pull(cwd,
password=password,
identity=identity,
ignore_retcode=ignore_retcode,
- saltenv=saltenv)['stdout']
+ saltenv=saltenv,
+ output_encoding=output_encoding)['stdout']
def push(cwd,
@@ -3110,6 +3739,7 @@ def push(cwd,
identity=None,
ignore_retcode=False,
saltenv='base',
+ output_encoding=None,
**kwargs):
'''
Interface to `git-push(1)`_
@@ -3191,6 +3821,30 @@ def push(cwd,
.. versionadded:: 2016.3.1
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
+
.. _`git-push(1)`: http://git-scm.com/docs/git-push
.. _refspec: http://git-scm.com/book/en/v2/Git-Internals-The-Refspec
@@ -3220,7 +3874,8 @@ def push(cwd,
password=password,
identity=identity,
ignore_retcode=ignore_retcode,
- saltenv=saltenv)['stdout']
+ saltenv=saltenv,
+ output_encoding=output_encoding)['stdout']
def rebase(cwd,
@@ -3229,7 +3884,8 @@ def rebase(cwd,
git_opts='',
user=None,
password=None,
- ignore_retcode=False):
+ ignore_retcode=False,
+ output_encoding=None):
'''
Interface to `git-rebase(1)`_
@@ -3273,6 +3929,30 @@ def rebase(cwd,
.. versionadded:: 2015.8.0
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
+
.. _`git-rebase(1)`: http://git-scm.com/docs/git-rebase
@@ -3298,7 +3978,8 @@ def rebase(cwd,
cwd=cwd,
user=user,
password=password,
- ignore_retcode=ignore_retcode)['stdout']
+ ignore_retcode=ignore_retcode,
+ output_encoding=output_encoding)['stdout']
def remote_get(cwd,
@@ -3306,7 +3987,8 @@ def remote_get(cwd,
user=None,
password=None,
redact_auth=True,
- ignore_retcode=False):
+ ignore_retcode=False,
+ output_encoding=None):
'''
Get the fetch and push URL for a specific remote
@@ -3344,6 +4026,29 @@ def remote_get(cwd,
.. versionadded:: 2015.8.0
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
CLI Examples:
@@ -3357,7 +4062,8 @@ def remote_get(cwd,
user=user,
password=password,
redact_auth=redact_auth,
- ignore_retcode=ignore_retcode)
+ ignore_retcode=ignore_retcode,
+ output_encoding=output_encoding)
if remote not in all_remotes:
raise CommandExecutionError(
'Remote \'{0}\' not present in git checkout located at {1}'
@@ -3375,6 +4081,7 @@ def remote_refs(url,
https_user=None,
https_pass=None,
ignore_retcode=False,
+ output_encoding=None,
saltenv='base'):
'''
.. versionadded:: 2015.8.0
@@ -3438,6 +4145,30 @@ def remote_refs(url,
.. versionadded:: 2016.3.1
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
+
CLI Example:
.. code-block:: bash
@@ -3461,7 +4192,8 @@ def remote_refs(url,
password=password,
identity=identity,
ignore_retcode=ignore_retcode,
- saltenv=saltenv)['stdout']
+ saltenv=saltenv,
+ output_encoding=output_encoding)['stdout']
ret = {}
for line in salt.utils.itertools.split(output, '\n'):
try:
@@ -3482,7 +4214,8 @@ def remote_set(cwd,
push_url=None,
push_https_user=None,
push_https_pass=None,
- ignore_retcode=False):
+ ignore_retcode=False,
+ output_encoding=None):
'''
cwd
The path to the git checkout
@@ -3536,6 +4269,29 @@ def remote_set(cwd,
.. versionadded:: 2015.8.0
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
CLI Examples:
@@ -3546,7 +4302,8 @@ def remote_set(cwd,
salt myminion git.remote_set /path/to/repo https://github.com/user/repo.git remote=upstream push_url=git@github.com:user/repo.git
'''
# Check if remote exists
- if remote in remotes(cwd, user=user, password=password):
+ if remote in remotes(cwd, user=user, password=password,
+ output_encoding=output_encoding):
log.debug(
'Remote \'%s\' already exists in git checkout located at %s, '
'removing so it can be re-added', remote, cwd
@@ -3556,7 +4313,8 @@ def remote_set(cwd,
cwd=cwd,
user=user,
password=password,
- ignore_retcode=ignore_retcode)
+ ignore_retcode=ignore_retcode,
+ output_encoding=output_encoding)
# Add remote
try:
url = salt.utils.url.add_http_basic_auth(url,
@@ -3570,7 +4328,8 @@ def remote_set(cwd,
cwd=cwd,
user=user,
password=password,
- ignore_retcode=ignore_retcode)
+ ignore_retcode=ignore_retcode,
+ output_encoding=output_encoding)
if push_url:
if not isinstance(push_url, six.string_types):
push_url = six.text_type(push_url)
@@ -3586,19 +4345,22 @@ def remote_set(cwd,
cwd=cwd,
user=user,
password=password,
- ignore_retcode=ignore_retcode)
+ ignore_retcode=ignore_retcode,
+ output_encoding=output_encoding)
return remote_get(cwd=cwd,
remote=remote,
user=user,
password=password,
- ignore_retcode=ignore_retcode)
+ ignore_retcode=ignore_retcode,
+ output_encoding=output_encoding)
def remotes(cwd,
user=None,
password=None,
redact_auth=True,
- ignore_retcode=False):
+ ignore_retcode=False,
+ output_encoding=None):
'''
Get fetch and push URLs for each remote in a git checkout
@@ -3634,6 +4396,29 @@ def remotes(cwd,
.. versionadded:: 2015.8.0
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
CLI Example:
@@ -3648,7 +4433,8 @@ def remotes(cwd,
cwd=cwd,
user=user,
password=password,
- ignore_retcode=ignore_retcode)['stdout']
+ ignore_retcode=ignore_retcode,
+ output_encoding=output_encoding)['stdout']
for remote_line in salt.utils.itertools.split(output, '\n'):
try:
remote, remote_info = remote_line.split(None, 1)
@@ -3677,7 +4463,8 @@ def reset(cwd,
git_opts='',
user=None,
password=None,
- ignore_retcode=False):
+ ignore_retcode=False,
+ output_encoding=None):
'''
Interface to `git-reset(1)`_, returns the stdout from the git command
@@ -3718,8 +4505,31 @@ def reset(cwd,
.. versionadded:: 2015.8.0
- .. _`git-reset(1)`: http://git-scm.com/docs/git-reset
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
+
+ .. _`git-reset(1)`: http://git-scm.com/docs/git-reset
CLI Examples:
@@ -3738,7 +4548,8 @@ def reset(cwd,
cwd=cwd,
user=user,
password=password,
- ignore_retcode=ignore_retcode)['stdout']
+ ignore_retcode=ignore_retcode,
+ output_encoding=output_encoding)['stdout']
def rev_parse(cwd,
@@ -3747,7 +4558,8 @@ def rev_parse(cwd,
git_opts='',
user=None,
password=None,
- ignore_retcode=False):
+ ignore_retcode=False,
+ output_encoding=None):
'''
.. versionadded:: 2015.8.0
@@ -3791,11 +4603,34 @@ def rev_parse(cwd,
If ``True``, do not log an error to the minion log if the git command
returns a nonzero exit status.
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
+
.. _`git-rev-parse(1)`: http://git-scm.com/docs/git-rev-parse
.. _`SPECIFYING REVISIONS`: http://git-scm.com/docs/git-rev-parse#_specifying_revisions
.. _`Options for Files`: http://git-scm.com/docs/git-rev-parse#_options_for_files
-
CLI Examples:
.. code-block:: bash
@@ -3821,7 +4656,8 @@ def rev_parse(cwd,
cwd=cwd,
user=user,
password=password,
- ignore_retcode=ignore_retcode)['stdout']
+ ignore_retcode=ignore_retcode,
+ output_encoding=output_encoding)['stdout']
def revision(cwd,
@@ -3829,7 +4665,8 @@ def revision(cwd,
short=False,
user=None,
password=None,
- ignore_retcode=False):
+ ignore_retcode=False,
+ output_encoding=None):
'''
Returns the SHA1 hash of a given identifier (hash, branch, tag, HEAD, etc.)
@@ -3858,6 +4695,30 @@ def revision(cwd,
.. versionadded:: 2015.8.0
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
+
CLI Example:
.. code-block:: bash
@@ -3873,7 +4734,8 @@ def revision(cwd,
cwd=cwd,
user=user,
password=password,
- ignore_retcode=ignore_retcode)['stdout']
+ ignore_retcode=ignore_retcode,
+ output_encoding=output_encoding)['stdout']
def rm_(cwd,
@@ -3882,7 +4744,8 @@ def rm_(cwd,
git_opts='',
user=None,
password=None,
- ignore_retcode=False):
+ ignore_retcode=False,
+ output_encoding=None):
'''
Interface to `git-rm(1)`_
@@ -3930,8 +4793,31 @@ def rm_(cwd,
.. versionadded:: 2015.8.0
- .. _`git-rm(1)`: http://git-scm.com/docs/git-rm
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
+
+ .. _`git-rm(1)`: http://git-scm.com/docs/git-rm
CLI Examples:
@@ -3950,7 +4836,8 @@ def rm_(cwd,
cwd=cwd,
user=user,
password=password,
- ignore_retcode=ignore_retcode)['stdout']
+ ignore_retcode=ignore_retcode,
+ output_encoding=output_encoding)['stdout']
def stash(cwd,
@@ -3959,7 +4846,8 @@ def stash(cwd,
git_opts='',
user=None,
password=None,
- ignore_retcode=False):
+ ignore_retcode=False,
+ output_encoding=None):
'''
Interface to `git-stash(1)`_, returns the stdout from the git command
@@ -3999,8 +4887,31 @@ def stash(cwd,
.. versionadded:: 2015.8.0
- .. _`git-stash(1)`: http://git-scm.com/docs/git-stash
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
+
+ .. _`git-stash(1)`: http://git-scm.com/docs/git-stash
CLI Examples:
@@ -4019,13 +4930,15 @@ def stash(cwd,
cwd=cwd,
user=user,
password=password,
- ignore_retcode=ignore_retcode)['stdout']
+ ignore_retcode=ignore_retcode,
+ output_encoding=output_encoding)['stdout']
def status(cwd,
user=None,
password=None,
- ignore_retcode=False):
+ ignore_retcode=False,
+ output_encoding=None):
'''
.. versionchanged:: 2015.8.0
Return data has changed from a list of lists to a dictionary
@@ -4051,6 +4964,29 @@ def status(cwd,
.. versionadded:: 2015.8.0
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
CLI Example:
@@ -4071,7 +5007,8 @@ def status(cwd,
cwd=cwd,
user=user,
password=password,
- ignore_retcode=ignore_retcode)['stdout']
+ ignore_retcode=ignore_retcode,
+ output_encoding=output_encoding)['stdout']
for line in output.split('\0'):
try:
state, filename = line.split(None, 1)
@@ -4090,6 +5027,7 @@ def submodule(cwd,
identity=None,
ignore_retcode=False,
saltenv='base',
+ output_encoding=None,
**kwargs):
'''
.. versionchanged:: 2015.8.0
@@ -4181,6 +5119,30 @@ def submodule(cwd,
.. versionadded:: 2016.3.1
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
+
.. _`git-submodule(1)`: http://git-scm.com/docs/git-submodule
CLI Example:
@@ -4222,7 +5184,8 @@ def submodule(cwd,
password=password,
identity=identity,
ignore_retcode=ignore_retcode,
- saltenv=saltenv)['stdout']
+ saltenv=saltenv,
+ output_encoding=output_encoding)['stdout']
def symbolic_ref(cwd,
@@ -4232,7 +5195,8 @@ def symbolic_ref(cwd,
git_opts='',
user=None,
password=None,
- ignore_retcode=False):
+ ignore_retcode=False,
+ output_encoding=None):
'''
.. versionadded:: 2015.8.0
@@ -4282,8 +5246,31 @@ def symbolic_ref(cwd,
.. versionadded:: 2015.8.0
- .. _`git-symbolic-ref(1)`: http://git-scm.com/docs/git-symbolic-ref
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
+
+ .. _`git-symbolic-ref(1)`: http://git-scm.com/docs/git-symbolic-ref
CLI Examples:
@@ -4313,7 +5300,8 @@ def symbolic_ref(cwd,
cwd=cwd,
user=user,
password=password,
- ignore_retcode=ignore_retcode)['stdout']
+ ignore_retcode=ignore_retcode,
+ output_encoding=output_encoding)['stdout']
def version(versioninfo=False):
@@ -4326,7 +5314,6 @@ def version(versioninfo=False):
If ``True``, return the version in a versioninfo list (e.g. ``[2, 5,
0]``)
-
CLI Example:
.. code-block:: bash
@@ -4377,6 +5364,7 @@ def worktree_add(cwd,
user=None,
password=None,
ignore_retcode=False,
+ output_encoding=None,
**kwargs):
'''
.. versionadded:: 2015.8.0
@@ -4453,8 +5441,31 @@ def worktree_add(cwd,
.. versionadded:: 2015.8.0
- .. _`git-worktree(1)`: http://git-scm.com/docs/git-worktree
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
+
+ .. _`git-worktree(1)`: http://git-scm.com/docs/git-worktree
CLI Examples:
@@ -4500,7 +5511,8 @@ def worktree_add(cwd,
user=user,
password=password,
ignore_retcode=ignore_retcode,
- redirect_stderr=True)['stdout']
+ redirect_stderr=True,
+ output_encoding=output_encoding)['stdout']
def worktree_prune(cwd,
@@ -4511,7 +5523,8 @@ def worktree_prune(cwd,
git_opts='',
user=None,
password=None,
- ignore_retcode=False):
+ ignore_retcode=False,
+ output_encoding=None):
'''
.. versionadded:: 2015.8.0
@@ -4574,10 +5587,33 @@ def worktree_prune(cwd,
.. versionadded:: 2015.8.0
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
+
.. _`git-worktree(1)`: http://git-scm.com/docs/git-worktree
.. _`git-config(1)`: http://git-scm.com/docs/git-config/2.5.1
-
CLI Examples:
.. code-block:: bash
@@ -4601,10 +5637,11 @@ def worktree_prune(cwd,
cwd=cwd,
user=user,
password=password,
- ignore_retcode=ignore_retcode)['stdout']
+ ignore_retcode=ignore_retcode,
+ output_encoding=output_encoding)['stdout']
-def worktree_rm(cwd, user=None):
+def worktree_rm(cwd, user=None, output_encoding=None):
'''
.. versionadded:: 2015.8.0
@@ -4629,6 +5666,29 @@ def worktree_rm(cwd, user=None):
running. Setting this option will change the home directory from which
path expansion is performed.
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
CLI Examples:
@@ -4640,7 +5700,7 @@ def worktree_rm(cwd, user=None):
cwd = _expand_path(cwd, user)
if not os.path.exists(cwd):
raise CommandExecutionError(cwd + ' does not exist')
- elif not is_worktree(cwd):
+ elif not is_worktree(cwd, output_encoding=output_encoding):
raise CommandExecutionError(cwd + ' is not a git worktree')
try:
salt.utils.files.rm_rf(cwd)
diff --git a/salt/modules/glance.py b/salt/modules/glance.py
index 2a7caac7e1..16b90c9072 100644
--- a/salt/modules/glance.py
+++ b/salt/modules/glance.py
@@ -103,6 +103,13 @@ def _auth(profile=None, api_version=2, **connection_args):
Only intended to be used within glance-enabled modules
'''
+ __utils__['versions.warn_until'](
+ 'Neon',
+ (
+ 'The glance module has been deprecated and will be removed in {version}. '
+ 'Please update to using the glanceng module'
+ ),
+ )
if profile:
prefix = profile + ":keystone."
diff --git a/salt/modules/hadoop.py b/salt/modules/hadoop.py
index 07fda21ba9..68c15417b1 100644
--- a/salt/modules/hadoop.py
+++ b/salt/modules/hadoop.py
@@ -88,6 +88,30 @@ def dfs(command=None, *args):
return 'Error: command must be provided'
+def dfsadmin_report(arg=None):
+ '''
+ .. versionadded:: Fluorine
+
+ Reports basic filesystem information and statistics. Optional flags may be used to filter the list of displayed DataNodes.
+
+ arg
+ [live] [dead] [decommissioning]
+
+ CLI Example:
+
+ .. code-block:: bash
+
+ salt '*' hadoop.dfsadmin -report
+ '''
+ if arg is not None:
+ if arg in ['live', 'dead', 'decommissioning']:
+ return _hadoop_cmd('dfsadmin', 'report', arg)
+ else:
+ return "Error: the arg is wrong, it must be in ['live', 'dead', 'decommissioning']"
+ else:
+ return _hadoop_cmd('dfsadmin', 'report')
+
+
def dfs_present(path):
'''
Check if a file or directory is present on the distributed FS.
diff --git a/salt/modules/highstate_doc.py b/salt/modules/highstate_doc.py
index d551d760f1..77a1d37848 100644
--- a/salt/modules/highstate_doc.py
+++ b/salt/modules/highstate_doc.py
@@ -178,7 +178,7 @@ If you wish to customize the document format:
- mode: '0640'
-Some `replace_text_regex` values that might be helpfull.
+Some `replace_text_regex` values that might be helpful.
## CERTS
'-----BEGIN RSA PRIVATE KEY-----[\r\n\t\f\S]{0,2200}': 'XXXXXXX'
diff --git a/salt/modules/http.py b/salt/modules/http.py
index 162460bc6e..d11eff2f13 100644
--- a/salt/modules/http.py
+++ b/salt/modules/http.py
@@ -18,7 +18,10 @@ def query(url, **kwargs):
'''
Query a resource, and decode the return data
- .. versionadded:: 2015.5.0
+ Passes through all the parameters described in the
+ :py:func:`utils.http.query function `:
+
+ .. autofunction:: salt.utils.http.query
CLI Example:
diff --git a/salt/modules/infoblox.py b/salt/modules/infoblox.py
index f44b45b272..511512d284 100644
--- a/salt/modules/infoblox.py
+++ b/salt/modules/infoblox.py
@@ -367,7 +367,7 @@ def get_host_mac(name=None, allow_array=False, **api_opts):
'''
Get mac address from host record.
- Use `allow_array` to return possible mutiple values.
+ Use `allow_array` to return possible multiple values.
CLI Example:
@@ -390,7 +390,7 @@ def get_host_ipv4(name=None, mac=None, allow_array=False, **api_opts):
'''
Get ipv4 address from host record.
- Use `allow_array` to return possible mutiple values.
+ Use `allow_array` to return possible multiple values.
CLI Example:
@@ -446,7 +446,7 @@ def get_host_ipv6addr_info(ipv6addr=None, mac=None,
def get_network(ipv4addr=None, network=None, return_fields=None, **api_opts):
'''
Get list of all networks.
- This is helpfull when looking up subnets to
+ This is helpful when looking up subnets to
use with func:nextavailableip
This call is offen slow and not cached!
diff --git a/salt/modules/inspectlib/collector.py b/salt/modules/inspectlib/collector.py
index 74951391cb..8d1cf3c9a0 100644
--- a/salt/modules/inspectlib/collector.py
+++ b/salt/modules/inspectlib/collector.py
@@ -513,7 +513,7 @@ if __name__ == '__main__':
sys.exit(1)
os.setsid()
- os.umask(0)
+ os.umask(0o000) # pylint: disable=blacklisted-function
try:
pid = os.fork()
diff --git a/salt/modules/iptables.py b/salt/modules/iptables.py
index 3f526f06e9..04853b80bc 100644
--- a/salt/modules/iptables.py
+++ b/salt/modules/iptables.py
@@ -1093,6 +1093,8 @@ def _parser():
add_arg('--ahres', dest='ahres', action='append')
## bpf
add_arg('--bytecode', dest='bytecode', action='append')
+ ## cgroup
+ add_arg('--cgroup', dest='cgroup', action='append')
## cluster
add_arg('--cluster-total-nodes',
dest='cluster-total-nodes',
diff --git a/salt/modules/kapacitor.py b/salt/modules/kapacitor.py
index 7cccbfb415..fd4811bfb6 100644
--- a/salt/modules/kapacitor.py
+++ b/salt/modules/kapacitor.py
@@ -6,6 +6,8 @@ Kapacitor execution module.
parameters or as configuration settings in /etc/salt/minion on the relevant
minions::
+ kapacitor.unsafe_ssl: 'false'
+ kapacitor.protocol: 'http'
kapacitor.host: 'localhost'
kapacitor.port: 9092
@@ -40,6 +42,17 @@ def version():
return version
+def _get_url():
+ '''
+ Get the kapacitor URL.
+ '''
+ protocol = __salt__['config.option']('kapacitor.protocol', 'http')
+ host = __salt__['config.option']('kapacitor.host', 'localhost')
+ port = __salt__['config.option']('kapacitor.port', 9092)
+
+ return '{0}://{1}:{2}'.format(protocol, host, port)
+
+
def get_task(name):
'''
Get a dict of data on a task.
@@ -53,15 +66,14 @@ def get_task(name):
salt '*' kapacitor.get_task cpu
'''
- host = __salt__['config.option']('kapacitor.host', 'localhost')
- port = __salt__['config.option']('kapacitor.port', 9092)
+ url = _get_url()
if version() < '0.13':
- url = 'http://{0}:{1}/task?name={2}'.format(host, port, name)
+ task_url = '{0}/task?name={1}'.format(url, name)
else:
- url = 'http://{0}:{1}/kapacitor/v1/tasks/{2}?skip-format=true'.format(host, port, name)
+ task_url = '{0}/kapacitor/v1/tasks/{1}?skip-format=true'.format(url, name)
- response = salt.utils.http.query(url, status=True)
+ response = salt.utils.http.query(task_url, status=True)
if response['status'] == 404:
return None
@@ -89,7 +101,11 @@ def _run_cmd(cmd):
Run a Kapacitor task and return a dictionary of info.
'''
ret = {}
- result = __salt__['cmd.run_all'](cmd)
+ env_vars = {
+ 'KAPACITOR_URL': _get_url(),
+ 'KAPACITOR_UNSAFE_SSL': __salt__['config.option']('kapacitor.unsafe_ssl', 'false'),
+ }
+ result = __salt__['cmd.run_all'](cmd, env=env_vars)
if result.get('stdout'):
ret['stdout'] = result['stdout']
@@ -104,7 +120,8 @@ def define_task(name,
tick_script,
task_type='stream',
database=None,
- retention_policy='default'):
+ retention_policy='default',
+ dbrps=None):
'''
Define a task. Serves as both create/update.
@@ -117,6 +134,13 @@ def define_task(name,
task_type
Task type. Defaults to 'stream'
+ dbrps
+ A list of databases and retention policies in "dbname"."rpname" format
+ to fetch data from. For backward compatibility, the value of
+ 'database' and 'retention_policy' will be merged as part of dbrps.
+
+ .. versionadded:: Fluorine
+
database
Which database to fetch data from. Defaults to None, which will use the
default database in InfluxDB.
@@ -143,8 +167,16 @@ def define_task(name,
if task_type:
cmd += ' -type {0}'.format(task_type)
+ if not dbrps:
+ dbrps = []
+
if database and retention_policy:
- cmd += ' -dbrp {0}.{1}'.format(database, retention_policy)
+ dbrp = '{0}.{1}'.format(database, retention_policy)
+ dbrps.append(dbrp)
+
+ if dbrps:
+ for dbrp in dbrps:
+ cmd += ' -dbrp {0}'.format(dbrp)
return _run_cmd(cmd)
diff --git a/salt/modules/kernelpkg_linux_apt.py b/salt/modules/kernelpkg_linux_apt.py
index f807a89f3f..d08388f7e1 100644
--- a/salt/modules/kernelpkg_linux_apt.py
+++ b/salt/modules/kernelpkg_linux_apt.py
@@ -189,7 +189,7 @@ def upgrade(reboot=False, at_time=None):
def upgrade_available():
'''
Detect if a new kernel version is available in the repositories.
- Returns True if a new kernel is avaliable, False otherwise.
+ Returns True if a new kernel is available, False otherwise.
CLI Example:
diff --git a/salt/modules/kernelpkg_linux_yum.py b/salt/modules/kernelpkg_linux_yum.py
index 3cd90a922f..c9da824523 100644
--- a/salt/modules/kernelpkg_linux_yum.py
+++ b/salt/modules/kernelpkg_linux_yum.py
@@ -182,7 +182,7 @@ def upgrade(reboot=False, at_time=None):
def upgrade_available():
'''
Detect if a new kernel version is available in the repositories.
- Returns True if a new kernel is avaliable, False otherwise.
+ Returns True if a new kernel is available, False otherwise.
CLI Example:
diff --git a/salt/modules/keystone.py b/salt/modules/keystone.py
index 2f62d262f4..ba871f1deb 100644
--- a/salt/modules/keystone.py
+++ b/salt/modules/keystone.py
@@ -163,6 +163,13 @@ def auth(profile=None, **connection_args):
salt '*' keystone.auth
'''
+ __utils__['versions.warn_until'](
+ 'Neon',
+ (
+ 'The keystone module has been deprecated and will be removed in {version}. '
+ 'Please update to using the keystoneng module',
+ ),
+ )
kwargs = _get_kwargs(profile=profile, **connection_args)
disc = discover.Discover(auth_url=kwargs['auth_url'])
diff --git a/salt/modules/kubernetes.py b/salt/modules/kubernetes.py
index ecd5760920..4dcc99b658 100644
--- a/salt/modules/kubernetes.py
+++ b/salt/modules/kubernetes.py
@@ -6,32 +6,42 @@ Module for handling kubernetes calls.
:configuration: The k8s API settings are provided either in a pillar, in
the minion's config file, or in master's config file::
- kubernetes.user: admin
- kubernetes.password: verybadpass
- kubernetes.api_url: 'http://127.0.0.1:8080'
- kubernetes.certificate-authority-data: '...'
- kubernetes.client-certificate-data: '....n
- kubernetes.client-key-data: '...'
- kubernetes.certificate-authority-file: '/path/to/ca.crt'
- kubernetes.client-certificate-file: '/path/to/client.crt'
- kubernetes.client-key-file: '/path/to/client.key'
+ kubernetes.kubeconfig: '/path/to/kubeconfig'
+ kubernetes.kubeconfig-data: 'list_node')
raise CommandExecutionError(exc)
finally:
- _cleanup()
+ _cleanup(**cfg)
def node(name, **kwargs):
@@ -243,7 +298,7 @@ def node(name, **kwargs):
salt '*' kubernetes.node name='minikube'
'''
- _setup_conn(**kwargs)
+ cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
api_response = api_instance.list_node()
@@ -254,7 +309,7 @@ def node(name, **kwargs):
log.exception('Exception when calling CoreV1Api->list_node')
raise CommandExecutionError(exc)
finally:
- _cleanup()
+ _cleanup(**cfg)
for k8s_node in api_response.items:
if k8s_node.metadata.name == name:
@@ -290,7 +345,7 @@ def node_add_label(node_name, label_name, label_value, **kwargs):
salt '*' kubernetes.node_add_label node_name="minikube" \
label_name="foo" label_value="bar"
'''
- _setup_conn(**kwargs)
+ cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
body = {
@@ -308,7 +363,7 @@ def node_add_label(node_name, label_name, label_value, **kwargs):
log.exception('Exception when calling CoreV1Api->patch_node')
raise CommandExecutionError(exc)
finally:
- _cleanup()
+ _cleanup(**cfg)
return None
@@ -323,7 +378,7 @@ def node_remove_label(node_name, label_name, **kwargs):
salt '*' kubernetes.node_remove_label node_name="minikube" \
label_name="foo"
'''
- _setup_conn(**kwargs)
+ cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
body = {
@@ -341,7 +396,7 @@ def node_remove_label(node_name, label_name, **kwargs):
log.exception('Exception when calling CoreV1Api->patch_node')
raise CommandExecutionError(exc)
finally:
- _cleanup()
+ _cleanup(**cfg)
return None
@@ -353,9 +408,9 @@ def namespaces(**kwargs):
CLI Examples::
salt '*' kubernetes.namespaces
- salt '*' kubernetes.namespaces api_url=http://myhost:port api_user=my-user
+ salt '*' kubernetes.namespaces kubeconfig=/etc/salt/k8s/kubeconfig context=minikube
'''
- _setup_conn(**kwargs)
+ cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
api_response = api_instance.list_namespace()
@@ -368,7 +423,7 @@ def namespaces(**kwargs):
log.exception('Exception when calling CoreV1Api->list_namespace')
raise CommandExecutionError(exc)
finally:
- _cleanup()
+ _cleanup(**cfg)
def deployments(namespace='default', **kwargs):
@@ -380,7 +435,7 @@ def deployments(namespace='default', **kwargs):
salt '*' kubernetes.deployments
salt '*' kubernetes.deployments namespace=default
'''
- _setup_conn(**kwargs)
+ cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.ExtensionsV1beta1Api()
api_response = api_instance.list_namespaced_deployment(namespace)
@@ -396,7 +451,7 @@ def deployments(namespace='default', **kwargs):
)
raise CommandExecutionError(exc)
finally:
- _cleanup()
+ _cleanup(**cfg)
def services(namespace='default', **kwargs):
@@ -408,7 +463,7 @@ def services(namespace='default', **kwargs):
salt '*' kubernetes.services
salt '*' kubernetes.services namespace=default
'''
- _setup_conn(**kwargs)
+ cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
api_response = api_instance.list_namespaced_service(namespace)
@@ -424,7 +479,7 @@ def services(namespace='default', **kwargs):
)
raise CommandExecutionError(exc)
finally:
- _cleanup()
+ _cleanup(**cfg)
def pods(namespace='default', **kwargs):
@@ -436,7 +491,7 @@ def pods(namespace='default', **kwargs):
salt '*' kubernetes.pods
salt '*' kubernetes.pods namespace=default
'''
- _setup_conn(**kwargs)
+ cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
api_response = api_instance.list_namespaced_pod(namespace)
@@ -452,7 +507,7 @@ def pods(namespace='default', **kwargs):
)
raise CommandExecutionError(exc)
finally:
- _cleanup()
+ _cleanup(**cfg)
def secrets(namespace='default', **kwargs):
@@ -464,7 +519,7 @@ def secrets(namespace='default', **kwargs):
salt '*' kubernetes.secrets
salt '*' kubernetes.secrets namespace=default
'''
- _setup_conn(**kwargs)
+ cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
api_response = api_instance.list_namespaced_secret(namespace)
@@ -480,7 +535,7 @@ def secrets(namespace='default', **kwargs):
)
raise CommandExecutionError(exc)
finally:
- _cleanup()
+ _cleanup(**cfg)
def configmaps(namespace='default', **kwargs):
@@ -492,7 +547,7 @@ def configmaps(namespace='default', **kwargs):
salt '*' kubernetes.configmaps
salt '*' kubernetes.configmaps namespace=default
'''
- _setup_conn(**kwargs)
+ cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
api_response = api_instance.list_namespaced_config_map(namespace)
@@ -508,7 +563,7 @@ def configmaps(namespace='default', **kwargs):
)
raise CommandExecutionError(exc)
finally:
- _cleanup()
+ _cleanup(**cfg)
def show_deployment(name, namespace='default', **kwargs):
@@ -520,7 +575,7 @@ def show_deployment(name, namespace='default', **kwargs):
salt '*' kubernetes.show_deployment my-nginx default
salt '*' kubernetes.show_deployment name=my-nginx namespace=default
'''
- _setup_conn(**kwargs)
+ cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.ExtensionsV1beta1Api()
api_response = api_instance.read_namespaced_deployment(name, namespace)
@@ -536,7 +591,7 @@ def show_deployment(name, namespace='default', **kwargs):
)
raise CommandExecutionError(exc)
finally:
- _cleanup()
+ _cleanup(**cfg)
def show_service(name, namespace='default', **kwargs):
@@ -548,7 +603,7 @@ def show_service(name, namespace='default', **kwargs):
salt '*' kubernetes.show_service my-nginx default
salt '*' kubernetes.show_service name=my-nginx namespace=default
'''
- _setup_conn(**kwargs)
+ cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
api_response = api_instance.read_namespaced_service(name, namespace)
@@ -564,7 +619,7 @@ def show_service(name, namespace='default', **kwargs):
)
raise CommandExecutionError(exc)
finally:
- _cleanup()
+ _cleanup(**cfg)
def show_pod(name, namespace='default', **kwargs):
@@ -576,7 +631,7 @@ def show_pod(name, namespace='default', **kwargs):
salt '*' kubernetes.show_pod guestbook-708336848-fqr2x
salt '*' kubernetes.show_pod guestbook-708336848-fqr2x namespace=default
'''
- _setup_conn(**kwargs)
+ cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
api_response = api_instance.read_namespaced_pod(name, namespace)
@@ -592,7 +647,7 @@ def show_pod(name, namespace='default', **kwargs):
)
raise CommandExecutionError(exc)
finally:
- _cleanup()
+ _cleanup(**cfg)
def show_namespace(name, **kwargs):
@@ -603,7 +658,7 @@ def show_namespace(name, **kwargs):
salt '*' kubernetes.show_namespace kube-system
'''
- _setup_conn(**kwargs)
+ cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
api_response = api_instance.read_namespace(name)
@@ -619,7 +674,7 @@ def show_namespace(name, **kwargs):
)
raise CommandExecutionError(exc)
finally:
- _cleanup()
+ _cleanup(**cfg)
def show_secret(name, namespace='default', decode=False, **kwargs):
@@ -634,7 +689,7 @@ def show_secret(name, namespace='default', decode=False, **kwargs):
salt '*' kubernetes.show_secret name=confidential namespace=default
salt '*' kubernetes.show_secret name=confidential decode=True
'''
- _setup_conn(**kwargs)
+ cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
api_response = api_instance.read_namespaced_secret(name, namespace)
@@ -655,7 +710,7 @@ def show_secret(name, namespace='default', decode=False, **kwargs):
)
raise CommandExecutionError(exc)
finally:
- _cleanup()
+ _cleanup(**cfg)
def show_configmap(name, namespace='default', **kwargs):
@@ -667,7 +722,7 @@ def show_configmap(name, namespace='default', **kwargs):
salt '*' kubernetes.show_configmap game-config default
salt '*' kubernetes.show_configmap name=game-config namespace=default
'''
- _setup_conn(**kwargs)
+ cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
api_response = api_instance.read_namespaced_config_map(
@@ -685,7 +740,7 @@ def show_configmap(name, namespace='default', **kwargs):
)
raise CommandExecutionError(exc)
finally:
- _cleanup()
+ _cleanup(**cfg)
def delete_deployment(name, namespace='default', **kwargs):
@@ -697,7 +752,7 @@ def delete_deployment(name, namespace='default', **kwargs):
salt '*' kubernetes.delete_deployment my-nginx
salt '*' kubernetes.delete_deployment name=my-nginx namespace=default
'''
- _setup_conn(**kwargs)
+ cfg = _setup_conn(**kwargs)
body = kubernetes.client.V1DeleteOptions(orphan_dependents=True)
try:
@@ -740,7 +795,7 @@ def delete_deployment(name, namespace='default', **kwargs):
)
raise CommandExecutionError(exc)
finally:
- _cleanup()
+ _cleanup(**cfg)
def delete_service(name, namespace='default', **kwargs):
@@ -752,7 +807,7 @@ def delete_service(name, namespace='default', **kwargs):
salt '*' kubernetes.delete_service my-nginx default
salt '*' kubernetes.delete_service name=my-nginx namespace=default
'''
- _setup_conn(**kwargs)
+ cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
@@ -770,7 +825,7 @@ def delete_service(name, namespace='default', **kwargs):
)
raise CommandExecutionError(exc)
finally:
- _cleanup()
+ _cleanup(**cfg)
def delete_pod(name, namespace='default', **kwargs):
@@ -782,7 +837,7 @@ def delete_pod(name, namespace='default', **kwargs):
salt '*' kubernetes.delete_pod guestbook-708336848-5nl8c default
salt '*' kubernetes.delete_pod name=guestbook-708336848-5nl8c namespace=default
'''
- _setup_conn(**kwargs)
+ cfg = _setup_conn(**kwargs)
body = kubernetes.client.V1DeleteOptions(orphan_dependents=True)
try:
@@ -803,7 +858,7 @@ def delete_pod(name, namespace='default', **kwargs):
)
raise CommandExecutionError(exc)
finally:
- _cleanup()
+ _cleanup(**cfg)
def delete_namespace(name, **kwargs):
@@ -815,7 +870,7 @@ def delete_namespace(name, **kwargs):
salt '*' kubernetes.delete_namespace salt
salt '*' kubernetes.delete_namespace name=salt
'''
- _setup_conn(**kwargs)
+ cfg = _setup_conn(**kwargs)
body = kubernetes.client.V1DeleteOptions(orphan_dependents=True)
try:
@@ -832,7 +887,7 @@ def delete_namespace(name, **kwargs):
)
raise CommandExecutionError(exc)
finally:
- _cleanup()
+ _cleanup(**cfg)
def delete_secret(name, namespace='default', **kwargs):
@@ -844,7 +899,7 @@ def delete_secret(name, namespace='default', **kwargs):
salt '*' kubernetes.delete_secret confidential default
salt '*' kubernetes.delete_secret name=confidential namespace=default
'''
- _setup_conn(**kwargs)
+ cfg = _setup_conn(**kwargs)
body = kubernetes.client.V1DeleteOptions(orphan_dependents=True)
try:
@@ -864,7 +919,7 @@ def delete_secret(name, namespace='default', **kwargs):
)
raise CommandExecutionError(exc)
finally:
- _cleanup()
+ _cleanup(**cfg)
def delete_configmap(name, namespace='default', **kwargs):
@@ -876,7 +931,7 @@ def delete_configmap(name, namespace='default', **kwargs):
salt '*' kubernetes.delete_configmap settings default
salt '*' kubernetes.delete_configmap name=settings namespace=default
'''
- _setup_conn(**kwargs)
+ cfg = _setup_conn(**kwargs)
body = kubernetes.client.V1DeleteOptions(orphan_dependents=True)
try:
@@ -897,7 +952,7 @@ def delete_configmap(name, namespace='default', **kwargs):
)
raise CommandExecutionError(exc)
finally:
- _cleanup()
+ _cleanup(**cfg)
def create_deployment(
@@ -924,7 +979,7 @@ def create_deployment(
template=template,
saltenv=saltenv)
- _setup_conn(**kwargs)
+ cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.ExtensionsV1beta1Api()
@@ -942,7 +997,7 @@ def create_deployment(
)
raise CommandExecutionError(exc)
finally:
- _cleanup()
+ _cleanup(**cfg)
def create_pod(
@@ -969,7 +1024,7 @@ def create_pod(
template=template,
saltenv=saltenv)
- _setup_conn(**kwargs)
+ cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
@@ -987,7 +1042,7 @@ def create_pod(
)
raise CommandExecutionError(exc)
finally:
- _cleanup()
+ _cleanup(**cfg)
def create_service(
@@ -1014,7 +1069,7 @@ def create_service(
template=template,
saltenv=saltenv)
- _setup_conn(**kwargs)
+ cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
@@ -1032,7 +1087,7 @@ def create_service(
)
raise CommandExecutionError(exc)
finally:
- _cleanup()
+ _cleanup(**cfg)
def create_secret(
@@ -1069,7 +1124,7 @@ def create_secret(
metadata=__dict_to_object_meta(name, namespace, {}),
data=data)
- _setup_conn(**kwargs)
+ cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
@@ -1087,7 +1142,7 @@ def create_secret(
)
raise CommandExecutionError(exc)
finally:
- _cleanup()
+ _cleanup(**cfg)
def create_configmap(
@@ -1120,7 +1175,7 @@ def create_configmap(
metadata=__dict_to_object_meta(name, namespace, {}),
data=data)
- _setup_conn(**kwargs)
+ cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
@@ -1138,7 +1193,7 @@ def create_configmap(
)
raise CommandExecutionError(exc)
finally:
- _cleanup()
+ _cleanup(**cfg)
def create_namespace(
@@ -1156,7 +1211,7 @@ def create_namespace(
body = kubernetes.client.V1Namespace(metadata=meta_obj)
body.metadata.name = name
- _setup_conn(**kwargs)
+ cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
@@ -1173,7 +1228,7 @@ def create_namespace(
)
raise CommandExecutionError(exc)
finally:
- _cleanup()
+ _cleanup(**cfg)
def replace_deployment(name,
@@ -1200,7 +1255,7 @@ def replace_deployment(name,
template=template,
saltenv=saltenv)
- _setup_conn(**kwargs)
+ cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.ExtensionsV1beta1Api()
@@ -1218,7 +1273,7 @@ def replace_deployment(name,
)
raise CommandExecutionError(exc)
finally:
- _cleanup()
+ _cleanup(**cfg)
def replace_service(name,
@@ -1251,7 +1306,7 @@ def replace_service(name,
body.spec.cluster_ip = old_service['spec']['cluster_ip']
body.metadata.resource_version = old_service['metadata']['resource_version']
- _setup_conn(**kwargs)
+ cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
@@ -1269,7 +1324,7 @@ def replace_service(name,
)
raise CommandExecutionError(exc)
finally:
- _cleanup()
+ _cleanup(**cfg)
def replace_secret(name,
@@ -1306,7 +1361,7 @@ def replace_secret(name,
metadata=__dict_to_object_meta(name, namespace, {}),
data=data)
- _setup_conn(**kwargs)
+ cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
@@ -1324,7 +1379,7 @@ def replace_secret(name,
)
raise CommandExecutionError(exc)
finally:
- _cleanup()
+ _cleanup(**cfg)
def replace_configmap(name,
@@ -1355,7 +1410,7 @@ def replace_configmap(name,
metadata=__dict_to_object_meta(name, namespace, {}),
data=data)
- _setup_conn(**kwargs)
+ cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
@@ -1373,7 +1428,7 @@ def replace_configmap(name,
)
raise CommandExecutionError(exc)
finally:
- _cleanup()
+ _cleanup(**cfg)
def __create_object_body(kind,
@@ -1485,7 +1540,7 @@ def __dict_to_deployment_spec(spec):
'''
Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance.
'''
- spec_obj = AppsV1beta1DeploymentSpec()
+ spec_obj = AppsV1beta1DeploymentSpec(template="")
for key, value in iteritems(spec):
if hasattr(spec_obj, key):
setattr(spec_obj, key, value)
diff --git a/salt/modules/libcloud_compute.py b/salt/modules/libcloud_compute.py
index 47725f5668..f1191dea59 100644
--- a/salt/modules/libcloud_compute.py
+++ b/salt/modules/libcloud_compute.py
@@ -206,7 +206,7 @@ def destroy_node(node_id, profile, **libcloud_kwargs):
'''
Destroy a node in the cloud
- :param node_id: Unique ID of the node to destory
+ :param node_id: Unique ID of the node to destroy
:type node_id: ``str``
:param profile: The profile key
diff --git a/salt/modules/linux_sysctl.py b/salt/modules/linux_sysctl.py
index 48c11f71af..9db9f2a20a 100644
--- a/salt/modules/linux_sysctl.py
+++ b/salt/modules/linux_sysctl.py
@@ -129,7 +129,10 @@ def assign(name, value):
tran_tab = name.translate(''.maketrans('./', '/.'))
else:
if isinstance(name, unicode): # pylint: disable=incompatible-py3-code
- trans_args = ({ord(x): None for x in ''.join(['./', '/.'])},)
+ trans_args = ({
+ ord('/'): '.',
+ ord('.'): '/'
+ },)
else:
trans_args = string.maketrans('./', '/.')
tran_tab = name.translate(*trans_args)
diff --git a/salt/modules/logadm.py b/salt/modules/logadm.py
index 98ba62b5c4..869e5b7987 100644
--- a/salt/modules/logadm.py
+++ b/salt/modules/logadm.py
@@ -256,7 +256,7 @@ def rotate(name, pattern=None, conf_file=default_conf, **kwargs):
``name`` and ``pattern`` were kept for backwards compatibility reasons.
``name`` is an alias for the ``entryname`` argument, ``pattern`` is an alias
- for ``log_file``. These aliasses wil only be used if the ``entryname`` and
+ for ``log_file``. These aliases will only be used if the ``entryname`` and
``log_file`` arguments are not passed.
For a full list of arguments see ```logadm.show_args```.
diff --git a/salt/modules/mac_service.py b/salt/modules/mac_service.py
index 6d14de6aa0..e4fc05c1ed 100644
--- a/salt/modules/mac_service.py
+++ b/salt/modules/mac_service.py
@@ -150,6 +150,37 @@ def _get_service(name):
raise CommandExecutionError('Service not found: {0}'.format(name))
+def _always_running_service(name):
+ '''
+ Check if the service should always be running based on the KeepAlive Key
+ in the service plist.
+
+ :param str name: Service label, file name, or full path
+
+ :return: True if the KeepAlive key is set to True, False if set to False or
+ not set in the plist at all.
+
+ :rtype: bool
+
+ .. versionadded:: Fluorine
+ '''
+
+ # get all the info from the launchctl service
+ service_info = show(name)
+
+ # get the value for the KeepAlive key in service plist
+ try:
+ keep_alive = service_info['plist']['KeepAlive']
+ except KeyError:
+ return False
+
+ # check if KeepAlive is True and not just set.
+ if keep_alive is True:
+ return True
+
+ return False
+
+
def show(name):
'''
Show properties of a launchctl service
@@ -403,7 +434,9 @@ def status(name, sig=None, runas=None):
:param str runas: User to run launchctl commands
- :return: The PID for the service if it is running, otherwise an empty string
+ :return: The PID for the service if it is running, or 'loaded' if the
+ service should not always have a PID, or otherwise an empty string
+
:rtype: str
CLI Example:
@@ -416,6 +449,12 @@ def status(name, sig=None, runas=None):
if sig:
return __salt__['status.pid'](sig)
+ # mac services are a little different than other platforms as they may be
+ # set to run on intervals and may not always active with a PID. This will
+ # return a string 'loaded' if it shouldn't always be running and is enabled.
+ if not _always_running_service(name) and enabled(name):
+ return 'loaded'
+
output = list_(runas=runas)
# Used a string here instead of a list because that's what the linux version
diff --git a/salt/modules/nacl.py b/salt/modules/nacl.py
index 96a1afb5c0..1f8e27ff4e 100644
--- a/salt/modules/nacl.py
+++ b/salt/modules/nacl.py
@@ -164,7 +164,6 @@ import salt.utils.stringutils
import salt.utils.win_functions
import salt.utils.win_dacl
-
REQ_ERROR = None
try:
import libnacl.secret
@@ -186,9 +185,9 @@ def _get_config(**kwargs):
config = {
'box_type': 'sealedbox',
'sk': None,
- 'sk_file': '/etc/salt/pki/master/nacl',
+ 'sk_file': os.path.join(__opts__['pki_dir'], 'master/nacl'),
'pk': None,
- 'pk_file': '/etc/salt/pki/master/nacl.pub',
+ 'pk_file': os.path.join(__opts__['pki_dir'], 'master/nacl.pub'),
}
config_key = '{0}.config'.format(__virtualname__)
try:
@@ -233,7 +232,7 @@ def _get_pk(**kwargs):
return base64.b64decode(pubkey)
-def keygen(sk_file=None, pk_file=None):
+def keygen(sk_file=None, pk_file=None, **kwargs):
'''
Use libnacl to generate a keypair.
@@ -253,6 +252,14 @@ def keygen(sk_file=None, pk_file=None):
salt-call nacl.keygen sk_file=/etc/salt/pki/master/nacl pk_file=/etc/salt/pki/master/nacl.pub
salt-call --local nacl.keygen
'''
+ if 'keyfile' in kwargs:
+ salt.utils.versions.warn_until(
+ 'Fluorine',
+ 'The \'keyfile\' argument has been deprecated and will be removed in Salt '
+ '{version}. Please use \'sk_file\' argument instead.'
+ )
+ sk_file = kwargs['keyfile']
+
if sk_file is None:
kp = libnacl.public.SecretKey()
return {'sk': base64.b64encode(kp.sk), 'pk': base64.b64encode(kp.pk)}
@@ -313,6 +320,25 @@ def enc(data, **kwargs):
box_type: secretbox, sealedbox(default)
'''
+ if 'keyfile' in kwargs:
+ salt.utils.versions.warn_until(
+ 'Fluorine',
+ 'The \'keyfile\' argument has been deprecated and will be removed in Salt '
+ '{version}. Please use \'sk_file\' argument instead.'
+ )
+ kwargs['sk_file'] = kwargs['keyfile']
+
+ if 'key' in kwargs:
+ salt.utils.versions.warn_until(
+ 'Fluorine',
+ 'The \'key\' argument has been deprecated and will be removed in Salt '
+ '{version}. Please use \'sk\' argument instead.'
+ )
+ kwargs['sk'] = kwargs['key']
+
+ # ensure data is in bytes
+ data = salt.utils.stringutils.to_bytes(data)
+
box_type = _get_config(**kwargs)['box_type']
if box_type == 'sealedbox':
return sealedbox_encrypt(data, **kwargs)
@@ -360,6 +386,31 @@ def dec(data, **kwargs):
box_type: secretbox, sealedbox(default)
'''
+ if 'keyfile' in kwargs:
+ salt.utils.versions.warn_until(
+ 'Fluorine',
+ 'The \'keyfile\' argument has been deprecated and will be removed in Salt '
+ '{version}. Please use \'sk_file\' argument instead.'
+ )
+ kwargs['sk_file'] = kwargs['keyfile']
+
+ # set boxtype to `secretbox` to maintain backward compatibility
+ kwargs['box_type'] = 'secretbox'
+
+ if 'key' in kwargs:
+ salt.utils.versions.warn_until(
+ 'Fluorine',
+ 'The \'key\' argument has been deprecated and will be removed in Salt '
+ '{version}. Please use \'sk\' argument instead.'
+ )
+ kwargs['sk'] = kwargs['key']
+
+ # set boxtype to `secretbox` to maintain backward compatibility
+ kwargs['box_type'] = 'secretbox'
+
+ # ensure data is in bytes
+ data = salt.utils.stringutils.to_bytes(data)
+
box_type = _get_config(**kwargs)['box_type']
if box_type == 'sealedbox':
return sealedbox_decrypt(data, **kwargs)
@@ -414,6 +465,9 @@ def sealedbox_encrypt(data, **kwargs):
salt-call --local nacl.sealedbox_encrypt datatoenc pk_file=/etc/salt/pki/master/nacl.pub
salt-call --local nacl.sealedbox_encrypt datatoenc pk='vrwQF7cNiNAVQVAiS3bvcbJUnF0cN6fU9YTZD9mBfzQ='
'''
+ # ensure data is in bytes
+ data = salt.utils.stringutils.to_bytes(data)
+
pk = _get_pk(**kwargs)
b = libnacl.sealed.SealedBox(pk)
return base64.b64encode(b.encrypt(data))
@@ -433,6 +487,10 @@ def sealedbox_decrypt(data, **kwargs):
'''
if data is None:
return None
+
+ # ensure data is in bytes
+ data = salt.utils.stringutils.to_bytes(data)
+
sk = _get_sk(**kwargs)
keypair = libnacl.public.SecretKey(sk)
b = libnacl.sealed.SealedBox(keypair)
@@ -452,6 +510,9 @@ def secretbox_encrypt(data, **kwargs):
salt-call --local nacl.secretbox_encrypt datatoenc sk_file=/etc/salt/pki/master/nacl
salt-call --local nacl.secretbox_encrypt datatoenc sk='YmFkcGFzcwo='
'''
+ # ensure data is in bytes
+ data = salt.utils.stringutils.to_bytes(data)
+
sk = _get_sk(**kwargs)
b = libnacl.secret.SecretBox(sk)
return base64.b64encode(b.encrypt(data))
@@ -472,6 +533,10 @@ def secretbox_decrypt(data, **kwargs):
'''
if data is None:
return None
+
+ # ensure data is in bytes
+ data = salt.utils.stringutils.to_bytes(data)
+
key = _get_sk(**kwargs)
b = libnacl.secret.SecretBox(key=key)
return b.decrypt(base64.b64decode(data))
diff --git a/salt/modules/neutron.py b/salt/modules/neutron.py
index 87ca1de7f3..03a5db4699 100644
--- a/salt/modules/neutron.py
+++ b/salt/modules/neutron.py
@@ -1618,167 +1618,3 @@ def list_agents(profile=None):
'''
conn = _auth(profile)
return conn.list_agents()
-
-
-# The following is a list of functions that need to be incorporated in the
-# neutron module. This list should be updated as functions are added.
-#
-# update_ipsec_site_connection
-# Updates an IPsecSiteConnection.
-# update_ikepolicy Updates an IKEPolicy
-# update_ipsecpolicy Updates an IPsecPolicy
-# list_vips Fetches a list of all load balancer vips for a tenant.
-# show_vip Fetches information of a certain load balancer vip.
-# create_vip Creates a new load balancer vip.
-# update_vip Updates a load balancer vip.
-# delete_vip Deletes the specified load balancer vip.
-# list_pools Fetches a list of all load balancer pools for a tenant.
-# show_pool Fetches information of a certain load balancer pool.
-# create_pool Creates a new load balancer pool.
-# update_pool Updates a load balancer pool.
-# delete_pool Deletes the specified load balancer pool.
-# retrieve_pool_stats Retrieves stats for a certain load balancer pool.
-# list_members Fetches a list of all load balancer members for
-# a tenant.
-# show_member Fetches information of a certain load balancer member.
-# create_member Creates a new load balancer member.
-# update_member Updates a load balancer member.
-# delete_member Deletes the specified load balancer member.
-# list_health_monitors Fetches a list of all load balancer health monitors for
-# a tenant.
-# show_health_monitor Fetches information of a certain load balancer
-# health monitor.
-# create_health_monitor
-# Creates a new load balancer health monitor.
-# update_health_monitor
-# Updates a load balancer health monitor.
-# delete_health_monitor
-# Deletes the specified load balancer health monitor.
-# associate_health_monitor
-# Associate specified load balancer health monitor
-# and pool.
-# disassociate_health_monitor
-# Disassociate specified load balancer health monitor
-# and pool.
-# create_qos_queue Creates a new queue.
-# list_qos_queues Fetches a list of all queues for a tenant.
-# show_qos_queue Fetches information of a certain queue.
-# delete_qos_queue Deletes the specified queue.
-# list_agents Fetches agents.
-# show_agent Fetches information of a certain agent.
-# update_agent Updates an agent.
-# delete_agent Deletes the specified agent.
-# list_network_gateways
-# Retrieve network gateways.
-# show_network_gateway Fetch a network gateway.
-# create_network_gateway
-# Create a new network gateway.
-# update_network_gateway
-# Update a network gateway.
-# delete_network_gateway
-# Delete the specified network gateway.
-# connect_network_gateway
-# Connect a network gateway to the specified network.
-# disconnect_network_gateway
-# Disconnect a network from the specified gateway.
-# list_gateway_devices Retrieve gateway devices.
-# show_gateway_device Fetch a gateway device.
-# create_gateway_device
-# Create a new gateway device.
-# update_gateway_device
-# Updates a new gateway device.
-# delete_gateway_device
-# Delete the specified gateway device.
-# list_dhcp_agent_hosting_networks
-# Fetches a list of dhcp agents hosting a network.
-# list_networks_on_dhcp_agent
-# Fetches a list of dhcp agents hosting a network.
-# add_network_to_dhcp_agent
-# Adds a network to dhcp agent.
-# remove_network_from_dhcp_agent
-# Remove a network from dhcp agent.
-# list_l3_agent_hosting_routers
-# Fetches a list of L3 agents hosting a router.
-# list_routers_on_l3_agent
-# Fetches a list of L3 agents hosting a router.
-# add_router_to_l3_agent
-# Adds a router to L3 agent.
-# list_firewall_rules Fetches a list of all firewall rules for a tenant.
-# show_firewall_rule Fetches information of a certain firewall rule.
-# create_firewall_rule Creates a new firewall rule.
-# update_firewall_rule Updates a firewall rule.
-# delete_firewall_rule Deletes the specified firewall rule.
-# list_firewall_policies
-# Fetches a list of all firewall policies for a tenant.
-# show_firewall_policy Fetches information of a certain firewall policy.
-# create_firewall_policy
-# Creates a new firewall policy.
-# update_firewall_policy
-# Updates a firewall policy.
-# delete_firewall_policy
-# Deletes the specified firewall policy.
-# firewall_policy_insert_rule
-# Inserts specified rule into firewall policy.
-# firewall_policy_remove_rule
-# Removes specified rule from firewall policy.
-# list_firewalls Fetches a list of all firewals for a tenant.
-# show_firewall Fetches information of a certain firewall.
-# create_firewall Creates a new firewall.
-# update_firewall Updates a firewall.
-# delete_firewall Deletes the specified firewall.
-# remove_router_from_l3_agent
-# Remove a router from l3 agent.
-# get_lbaas_agent_hosting_pool
-# Fetches a loadbalancer agent hosting a pool.
-# list_pools_on_lbaas_agent
-# Fetches a list of pools hosted by
-# the loadbalancer agent.
-# list_service_providers
-# Fetches service providers.
-# list_credentials Fetch a list of all credentials for a tenant.
-# show_credential Fetch a credential.
-# create_credential Create a new credential.
-# update_credential Update a credential.
-# delete_credential Delete the specified credential.
-# list_network_profile_bindings
-# Fetch a list of all tenants associated for
-# a network profile.
-# list_network_profiles
-# Fetch a list of all network profiles for a tenant.
-# show_network_profile Fetch a network profile.
-# create_network_profile
-# Create a network profile.
-# update_network_profile
-# Update a network profile.
-# delete_network_profile
-# Delete the network profile.
-# list_policy_profile_bindings
-# Fetch a list of all tenants associated for
-# a policy profile.
-# list_policy_profiles Fetch a list of all network profiles for a tenant.
-# show_policy_profile Fetch a network profile.
-# update_policy_profile
-# Update a policy profile.
-# create_metering_label
-# Creates a metering label.
-# delete_metering_label
-# Deletes the specified metering label.
-# list_metering_labels Fetches a list of all metering labels for a tenant.
-# show_metering_label Fetches information of a certain metering label.
-# create_metering_label_rule
-# Creates a metering label rule.
-# delete_metering_label_rule
-# Deletes the specified metering label rule.
-# list_metering_label_rules
-# Fetches a list of all metering label rules for a label.
-# show_metering_label_rule
-# Fetches information of a certain metering label rule.
-# list_net_partitions Fetch a list of all network partitions for a tenant.
-# show_net_partition etch a network partition.
-# create_net_partition Create a network partition.
-# delete_net_partition Delete the network partition.
-# create_packet_filter Create a new packet filter.
-# update_packet_filter Update a packet filter.
-# list_packet_filters Fetch a list of all packet filters for a tenant.
-# show_packet_filter Fetch information of a certain packet filter.
-# delete_packet_filter Delete the specified packet filter.
diff --git a/salt/modules/openbsdpkg.py b/salt/modules/openbsdpkg.py
index add6f1bbd3..decceee1fc 100644
--- a/salt/modules/openbsdpkg.py
+++ b/salt/modules/openbsdpkg.py
@@ -153,9 +153,10 @@ def latest_version(*names, **kwargs):
continue
cur = pkgs.get(pkgname, '')
- if not cur or salt.utils.compare_versions(ver1=cur,
- oper='<',
- ver2=pkgver):
+ if not cur or salt.utils.versions.compare(
+ ver1=cur,
+ oper='<',
+ ver2=pkgver):
ret[pkgname] = pkgver
# Return a string if only one package name passed
diff --git a/salt/modules/openscap.py b/salt/modules/openscap.py
index c5b51a1846..e3190e1e11 100644
--- a/salt/modules/openscap.py
+++ b/salt/modules/openscap.py
@@ -13,7 +13,6 @@ from subprocess import Popen, PIPE
# Import Salt libs
from salt.ext import six
-from salt.client import Caller
ArgumentParser = object
@@ -105,8 +104,7 @@ def xccdf(params):
success = _OSCAP_EXIT_CODES_MAP[proc.returncode]
returncode = proc.returncode
if success:
- caller = Caller()
- caller.cmd('cp.push_dir', tempdir)
+ __salt__['cp.push_dir'](tempdir)
shutil.rmtree(tempdir, ignore_errors=True)
upload_dir = tempdir
diff --git a/salt/modules/pillar.py b/salt/modules/pillar.py
index 5e7a184fba..d320e46754 100644
--- a/salt/modules/pillar.py
+++ b/salt/modules/pillar.py
@@ -42,8 +42,8 @@ def get(key,
Attempt to retrieve the named value from pillar, if the named value is not
available return the passed default. The default return is an empty string
- except __opts__['pillar_raise_on_missing'] is set to True, in which case a
- KeyError will be raised.
+ except ``__opts__['pillar_raise_on_missing']`` is set to True, in which
+ case a ``KeyError`` exception will be raised.
If the merge parameter is set to ``True``, the default will be recursively
merged into the returned pillar data.
@@ -53,11 +53,18 @@ def get(key,
{'pkg': {'apache': 'httpd'}}
- To retrieve the value associated with the apache key in the pkg dict this
- key can be passed::
+ To retrieve the value associated with the ``apache`` key in the ``pkg``
+ dict this key can be passed as::
pkg:apache
+ key
+ The pillar key to get value from
+
+ default
+ If specified, return this value in case when named pillar value does
+ not exist.
+
merge : ``False``
If ``True``, the retrieved values will be merged into the passed
default. When the default and the retrieved value are both
diff --git a/salt/modules/pip.py b/salt/modules/pip.py
index f9830056b9..b560612b9e 100644
--- a/salt/modules/pip.py
+++ b/salt/modules/pip.py
@@ -338,6 +338,22 @@ def _process_requirements(requirements, cmd, cwd, saltenv, user):
return cleanup_requirements, None
+def _format_env_vars(env_vars):
+ ret = {}
+ if env_vars:
+ if isinstance(env_vars, dict):
+ for key, val in six.iteritems(env_vars):
+ if not isinstance(key, six.string_types):
+ key = str(key) # future lint: disable=blacklisted-function
+ if not isinstance(val, six.string_types):
+ val = str(val) # future lint: disable=blacklisted-function
+ ret[key] = val
+ else:
+ raise CommandExecutionError(
+ 'env_vars {0} is not a dictionary'.format(env_vars))
+ return ret
+
+
def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
requirements=None,
bin_env=None,
@@ -811,16 +827,7 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
cmd_kwargs = dict(saltenv=saltenv, use_vt=use_vt, runas=user)
if env_vars:
- if isinstance(env_vars, dict):
- for key, val in six.iteritems(env_vars):
- if not isinstance(key, six.string_types):
- key = str(key) # future lint: disable=blacklisted-function
- if not isinstance(val, six.string_types):
- val = str(val) # future lint: disable=blacklisted-function
- cmd_kwargs.setdefault('env', {})[key] = val
- else:
- raise CommandExecutionError(
- 'env_vars {0} is not a dictionary'.format(env_vars))
+ cmd_kwargs.setdefault('env', {}).update(_format_env_vars(env_vars))
try:
if cwd:
@@ -974,7 +981,8 @@ def uninstall(pkgs=None,
def freeze(bin_env=None,
user=None,
cwd=None,
- use_vt=False):
+ use_vt=False,
+ env_vars=None):
'''
Return a list of installed packages either globally or in the specified
virtualenv
@@ -1027,6 +1035,8 @@ def freeze(bin_env=None,
cmd_kwargs = dict(runas=user, cwd=cwd, use_vt=use_vt, python_shell=False)
if bin_env and os.path.isdir(bin_env):
cmd_kwargs['env'] = {'VIRTUAL_ENV': bin_env}
+ if env_vars:
+ cmd_kwargs.setdefault('env', {}).update(_format_env_vars(env_vars))
result = __salt__['cmd.run_all'](cmd, **cmd_kwargs)
if result['retcode'] > 0:
@@ -1038,7 +1048,8 @@ def freeze(bin_env=None,
def list_(prefix=None,
bin_env=None,
user=None,
- cwd=None):
+ cwd=None,
+ env_vars=None):
'''
Filter list of installed apps from ``freeze`` and check to see if
``prefix`` exists in the list of packages installed.
@@ -1067,7 +1078,7 @@ def list_(prefix=None,
if prefix is None or 'pip'.startswith(prefix):
packages['pip'] = version(bin_env)
- for line in freeze(bin_env=bin_env, user=user, cwd=cwd):
+ for line in freeze(bin_env=bin_env, user=user, cwd=cwd, env_vars=env_vars):
if line.startswith('-f') or line.startswith('#'):
# ignore -f line as it contains --find-links directory
# ignore comment lines
diff --git a/salt/modules/postgres.py b/salt/modules/postgres.py
index 67fc34e88c..36969b5154 100644
--- a/salt/modules/postgres.py
+++ b/salt/modules/postgres.py
@@ -302,8 +302,7 @@ def _parsed_version(user=None, host=None, port=None, maintenance_db=None,
return None
-def _connection_defaults(user=None, host=None, port=None, maintenance_db=None,
- password=None):
+def _connection_defaults(user=None, host=None, port=None, maintenance_db=None):
'''
Returns a tuple of (user, host, port, db) with config, pillar, or default
values assigned to missing values.
@@ -316,31 +315,29 @@ def _connection_defaults(user=None, host=None, port=None, maintenance_db=None,
port = __salt__['config.option']('postgres.port')
if not maintenance_db:
maintenance_db = __salt__['config.option']('postgres.maintenance_db')
- if password is None:
- password = __salt__['config.option']('postgres.pass')
- return (user, host, port, maintenance_db, password)
+ return (user, host, port, maintenance_db)
def _psql_cmd(*args, **kwargs):
'''
Return string with fully composed psql command.
- Accept optional keyword arguments: user, host and port as well as any
- number or positional arguments to be added to the end of command.
+ Accepts optional keyword arguments: user, host, port and maintenance_db,
+ as well as any number of positional arguments to be added to the end of
+ the command.
'''
- (user, host, port, maintenance_db, password) = _connection_defaults(
+ (user, host, port, maintenance_db) = _connection_defaults(
kwargs.get('user'),
kwargs.get('host'),
kwargs.get('port'),
- kwargs.get('maintenance_db'),
- kwargs.get('password'))
+ kwargs.get('maintenance_db'))
_PSQL_BIN = _find_pg_binary('psql')
cmd = [_PSQL_BIN,
'--no-align',
'--no-readline',
'--no-psqlrc',
- '--no-password'] # It is never acceptable to issue a password prompt.
+ '--no-password'] # Never prompt, handled in _run_psql.
if user:
cmd += ['--username', user]
if host:
@@ -363,7 +360,7 @@ def _psql_prepare_and_run(cmd,
user=None):
rcmd = _psql_cmd(
host=host, user=user, port=port,
- maintenance_db=maintenance_db, password=password,
+ maintenance_db=maintenance_db,
*cmd)
cmdret = _run_psql(
rcmd, runas=runas, password=password, host=host, port=port, user=user)
diff --git a/salt/modules/reg.py b/salt/modules/reg.py
index 3e79bf9991..8be6ad04ed 100644
--- a/salt/modules/reg.py
+++ b/salt/modules/reg.py
@@ -83,6 +83,9 @@ def _to_unicode(vdata):
Converts from current users character encoding to unicode. Use this for
parameters being pass to reg functions
'''
+ # None does not convert to Unicode
+ if vdata is None:
+ return None
return salt.utils.stringutils.to_unicode(vdata, 'utf-8')
@@ -526,13 +529,13 @@ def set_value(hive,
# https://www.python.org/dev/peps/pep-0237/
# String Types to Unicode
- if vtype_value in [1, 2]:
+ if vtype_value in [win32con.REG_SZ, win32con.REG_EXPAND_SZ]:
local_vdata = _to_unicode(vdata)
# Don't touch binary...
- elif vtype_value == 3:
+ elif vtype_value == win32con.REG_BINARY:
local_vdata = vdata
# Make sure REG_MULTI_SZ is a list of strings
- elif vtype_value == 7:
+ elif vtype_value == win32con.REG_MULTI_SZ:
local_vdata = [_to_unicode(i) for i in vdata]
# Everything else is int
else:
@@ -686,7 +689,6 @@ def delete_value(hive, key, vname=None, use_32bit_registry=False):
salt '*' reg.delete_value HKEY_CURRENT_USER 'SOFTWARE\\Salt' 'version'
'''
-
local_hive = _to_unicode(hive)
local_key = _to_unicode(key)
local_vname = _to_unicode(vname)
@@ -728,7 +730,7 @@ def import_file(source, use_32bit_registry=False):
can be either a local file path or a URL type supported by salt
(e.g. ``salt://salt_master_path``).
- :param bool use_32bit_registry: If the value of this paramater is ``True``
+ :param bool use_32bit_registry: If the value of this parameter is ``True``
then the ``REG`` file will be imported into the Windows 32 bit registry.
Otherwise the Windows 64 bit registry will be used.
diff --git a/salt/modules/saltutil.py b/salt/modules/saltutil.py
index 50d1f14ed9..880d4a5d4f 100644
--- a/salt/modules/saltutil.py
+++ b/salt/modules/saltutil.py
@@ -737,6 +737,45 @@ def sync_utils(saltenv=None, refresh=True, extmod_whitelist=None, extmod_blackli
return ret
+def sync_serializers(saltenv=None, refresh=True, extmod_whitelist=None, extmod_blacklist=None):
+ '''
+ .. versionadded:: Fluorine
+
+ Sync serializers from ``salt://_serializers`` to the minion
+
+ saltenv
+ The fileserver environment from which to sync. To sync from more than
+ one environment, pass a comma-separated list.
+
+ If not passed, then all environments configured in the :ref:`top files
+ ` will be checked for serializer modules to sync. If no top
+ files are found, then the ``base`` environment will be synced.
+
+ refresh : True
+ If ``True``, refresh the available execution modules on the minion.
+ This refresh will be performed even if no new serializer modules are
+ synced. Set to ``False`` to prevent this refresh.
+
+ extmod_whitelist : None
+ comma-seperated list of modules to sync
+
+ extmod_blacklist : None
+ comma-seperated list of modules to blacklist based on type
+
+ CLI Examples:
+
+ .. code-block:: bash
+
+ salt '*' saltutil.sync_serializers
+ salt '*' saltutil.sync_serializers saltenv=dev
+ salt '*' saltutil.sync_serializers saltenv=base,dev
+ '''
+ ret = _sync('serializers', saltenv, extmod_whitelist, extmod_blacklist)
+ if refresh:
+ refresh_modules()
+ return ret
+
+
def list_extmods():
'''
.. versionadded:: 2017.7.0
@@ -904,6 +943,7 @@ def sync_all(saltenv=None, refresh=True, extmod_whitelist=None, extmod_blacklist
ret['proxymodules'] = sync_proxymodules(saltenv, False, extmod_whitelist, extmod_blacklist)
ret['engines'] = sync_engines(saltenv, False, extmod_whitelist, extmod_blacklist)
ret['thorium'] = sync_thorium(saltenv, False, extmod_whitelist, extmod_blacklist)
+ ret['serializers'] = sync_serializers(saltenv, False, extmod_whitelist, extmod_blacklist)
if __opts__['file_client'] == 'local':
ret['pillar'] = sync_pillar(saltenv, False, extmod_whitelist, extmod_blacklist)
if refresh:
diff --git a/salt/modules/schedule.py b/salt/modules/schedule.py
index a77693e66f..0491ee791c 100644
--- a/salt/modules/schedule.py
+++ b/salt/modules/schedule.py
@@ -58,7 +58,7 @@ SCHEDULE_CONF = [
'after',
'return_config',
'return_kwargs',
- 'run_on_start'
+ 'run_on_start',
'skip_during_range',
'run_after_skip_range',
]
diff --git a/salt/modules/smartos_imgadm.py b/salt/modules/smartos_imgadm.py
index 5e5c16b8ae..c11285a4ba 100644
--- a/salt/modules/smartos_imgadm.py
+++ b/salt/modules/smartos_imgadm.py
@@ -11,7 +11,6 @@ import logging
import salt.utils.json
import salt.utils.path
import salt.utils.platform
-import salt.utils.decorators as decorators
log = logging.getLogger(__name__)
@@ -26,14 +25,6 @@ __func_alias__ = {
__virtualname__ = 'imgadm'
-@decorators.memoize
-def _check_imgadm():
- '''
- Looks to see if imgadm is present on the system
- '''
- return salt.utils.path.which('imgadm')
-
-
def _exit_status(retcode):
'''
Translate exit status of imgadm
@@ -70,11 +61,12 @@ def __virtual__():
'''
Provides imgadm only on SmartOS
'''
- if salt.utils.platform.is_smartos_globalzone() and _check_imgadm():
+ if salt.utils.platform.is_smartos_globalzone() and \
+ salt.utils.path.which('imgadm'):
return __virtualname__
return (
False,
- '{0} module can only be loaded on SmartOS computed nodes'.format(
+ '{0} module can only be loaded on SmartOS compute nodes'.format(
__virtualname__
)
)
@@ -91,8 +83,7 @@ def version():
salt '*' imgadm.version
'''
ret = {}
- imgadm = _check_imgadm()
- cmd = '{0} --version'.format(imgadm)
+ cmd = 'imgadm --version'
res = __salt__['cmd.run'](cmd).splitlines()
ret = res[0].split()
return ret[-1]
@@ -111,10 +102,8 @@ def update_installed(uuid=''):
salt '*' imgadm.update [uuid]
'''
- imgadm = _check_imgadm()
- if imgadm:
- cmd = '{0} update {1}'.format(imgadm, uuid).rstrip()
- __salt__['cmd.run'](cmd)
+ cmd = 'imgadm update {0}'.format(uuid).rstrip()
+ __salt__['cmd.run'](cmd)
return {}
@@ -135,8 +124,7 @@ def avail(search=None, verbose=False):
salt '*' imgadm.avail verbose=True
'''
ret = {}
- imgadm = _check_imgadm()
- cmd = '{0} avail -j'.format(imgadm)
+ cmd = 'imgadm avail -j'
res = __salt__['cmd.run_all'](cmd)
retcode = res['retcode']
result = {}
@@ -169,8 +157,7 @@ def list_installed(verbose=False):
salt '*' imgadm.list [verbose=True]
'''
ret = {}
- imgadm = _check_imgadm()
- cmd = '{0} list -j'.format(imgadm)
+ cmd = 'imgadm list -j'
res = __salt__['cmd.run_all'](cmd)
retcode = res['retcode']
result = {}
@@ -198,8 +185,7 @@ def show(uuid):
salt '*' imgadm.show e42f8c84-bbea-11e2-b920-078fab2aab1f
'''
ret = {}
- imgadm = _check_imgadm()
- cmd = '{0} show {1}'.format(imgadm, uuid)
+ cmd = 'imgadm show {0}'.format(uuid)
res = __salt__['cmd.run_all'](cmd, python_shell=False)
retcode = res['retcode']
if retcode != 0:
@@ -223,8 +209,7 @@ def get(uuid):
salt '*' imgadm.get e42f8c84-bbea-11e2-b920-078fab2aab1f
'''
ret = {}
- imgadm = _check_imgadm()
- cmd = '{0} get {1}'.format(imgadm, uuid)
+ cmd = 'imgadm get {0}'.format(uuid)
res = __salt__['cmd.run_all'](cmd, python_shell=False)
retcode = res['retcode']
if retcode != 0:
@@ -250,8 +235,7 @@ def import_image(uuid, verbose=False):
salt '*' imgadm.import e42f8c84-bbea-11e2-b920-078fab2aab1f [verbose=True]
'''
ret = {}
- imgadm = _check_imgadm()
- cmd = '{0} import {1}'.format(imgadm, uuid)
+ cmd = 'imgadm import {0}'.format(uuid)
res = __salt__['cmd.run_all'](cmd, python_shell=False)
retcode = res['retcode']
if retcode != 0:
@@ -275,8 +259,7 @@ def delete(uuid):
salt '*' imgadm.delete e42f8c84-bbea-11e2-b920-078fab2aab1f
'''
ret = {}
- imgadm = _check_imgadm()
- cmd = '{0} delete {1}'.format(imgadm, uuid)
+ cmd = 'imgadm delete {0}'.format(uuid)
res = __salt__['cmd.run_all'](cmd, python_shell=False)
retcode = res['retcode']
if retcode != 0:
@@ -305,8 +288,7 @@ def vacuum(verbose=False):
salt '*' imgadm.vacuum [verbose=True]
'''
ret = {}
- imgadm = _check_imgadm()
- cmd = '{0} vacuum -f'.format(imgadm)
+ cmd = 'imgadm vacuum -f'
res = __salt__['cmd.run_all'](cmd)
retcode = res['retcode']
if retcode != 0:
diff --git a/salt/modules/smartos_nictagadm.py b/salt/modules/smartos_nictagadm.py
index fd95c5569f..18445d9df2 100644
--- a/salt/modules/smartos_nictagadm.py
+++ b/salt/modules/smartos_nictagadm.py
@@ -17,7 +17,6 @@ import logging
# Import Salt libs
import salt.utils.path
import salt.utils.platform
-import salt.utils.decorators as decorators
log = logging.getLogger(__name__)
@@ -30,31 +29,17 @@ __func_alias__ = {
__virtualname__ = 'nictagadm'
-@decorators.memoize
-def _check_nictagadm():
- '''
- Looks to see if nictagadm is present on the system
- '''
- return salt.utils.path.which('nictagadm')
-
-
-def _check_dladm():
- '''
- Looks to see if dladm is present on the system
- '''
- return salt.utils.path.which('dladm')
-
-
def __virtual__():
'''
Provides nictagadm on SmartOS
'''
- if salt.utils.platform.is_smartos_globalzone() \
- and _check_nictagadm() and _check_dladm():
+ if salt.utils.platform.is_smartos_globalzone() and \
+ salt.utils.path.which('dladm') and \
+ salt.utils.path.which('nictagadm'):
return __virtualname__
return (
False,
- '{0} module can only be loaded on SmartOS computed nodes'.format(
+ '{0} module can only be loaded on SmartOS compute nodes'.format(
__virtualname__
)
)
@@ -74,10 +59,8 @@ def list_nictags(include_etherstubs=True):
salt '*' nictagadm.list
'''
ret = {}
- nictagadm = _check_nictagadm()
- cmd = '{nictagadm} list -d "|" -p{estubs}'.format(
- nictagadm=nictagadm,
- estubs=' -L' if not include_etherstubs else ''
+ cmd = 'nictagadm list -d "|" -p{0}'.format(
+ ' -L' if not include_etherstubs else ''
)
res = __salt__['cmd.run_all'](cmd)
retcode = res['retcode']
@@ -109,11 +92,7 @@ def vms(nictag):
salt '*' nictagadm.vms admin
'''
ret = {}
- nictagadm = _check_nictagadm()
- cmd = '{nictagadm} vms {nictag}'.format(
- nictagadm=nictagadm,
- nictag=nictag
- )
+ cmd = 'nictagadm vms {0}'.format(nictag)
res = __salt__['cmd.run_all'](cmd)
retcode = res['retcode']
if retcode != 0:
@@ -139,14 +118,10 @@ def exists(*nictag, **kwargs):
salt '*' nictagadm.exists admin
'''
ret = {}
- nictagadm = _check_nictagadm()
if len(nictag) == 0:
return {'Error': 'Please provide at least one nictag to check.'}
- cmd = '{nictagadm} exists -l {nictags}'.format(
- nictagadm=nictagadm,
- nictags=' '.join(nictag)
- )
+ cmd = 'nictagadm exists -l {0}'.format(' '.join(nictag))
res = __salt__['cmd.run_all'](cmd)
if not kwargs.get('verbose', False):
@@ -178,29 +153,23 @@ def add(name, mac, mtu=1500):
salt '*' nictagadm.add trunk 'DE:AD:OO:OO:BE:EF' 9000
'''
ret = {}
- nictagadm = _check_nictagadm()
- dladm = _check_dladm()
if mtu > 9000 or mtu < 1500:
return {'Error': 'mtu must be a value between 1500 and 9000.'}
if mac != 'etherstub':
- cmd = '{dladm} show-phys -m -p -o address'.format(
- dladm=dladm
- )
+ cmd = 'dladm show-phys -m -p -o address'
res = __salt__['cmd.run_all'](cmd)
if mac not in res['stdout'].splitlines():
return {'Error': '{0} is not present on this system.'.format(mac)}
if mac == 'etherstub':
- cmd = '{nictagadm} add -l -p mtu={mtu} {name}'.format(
- nictagadm=nictagadm,
+ cmd = 'nictagadm add -l -p mtu={mtu} {name}'.format(
mtu=mtu,
name=name
)
res = __salt__['cmd.run_all'](cmd)
else:
- cmd = '{nictagadm} add -p mtu={mtu},mac={mac} {name}'.format(
- nictagadm=nictagadm,
+ cmd = 'nictagadm add -p mtu={mtu},mac={mac} {name}'.format(
mtu=mtu,
mac=mac,
name=name
@@ -231,8 +200,6 @@ def update(name, mac=None, mtu=None):
salt '*' nictagadm.update trunk mtu=9000
'''
ret = {}
- nictagadm = _check_nictagadm()
- dladm = _check_dladm()
if name not in list_nictags():
return {'Error': 'nictag {0} does not exists.'.format(name)}
@@ -245,9 +212,7 @@ def update(name, mac=None, mtu=None):
if mac == 'etherstub':
return {'Error': 'cannot update a nic with "etherstub".'}
else:
- cmd = '{dladm} show-phys -m -p -o address'.format(
- dladm=dladm
- )
+ cmd = 'dladm show-phys -m -p -o address'
res = __salt__['cmd.run_all'](cmd)
if mac not in res['stdout'].splitlines():
return {'Error': '{0} is not present on this system.'.format(mac)}
@@ -259,8 +224,7 @@ def update(name, mac=None, mtu=None):
elif mtu:
properties = "mtu={0}".format(mtu) if mtu else ""
- cmd = '{nictagadm} update -p {properties} {name}'.format(
- nictagadm=nictagadm,
+ cmd = 'nictagadm update -p {properties} {name}'.format(
properties=properties,
name=name
)
@@ -288,13 +252,11 @@ def delete(name, force=False):
salt '*' nictagadm.exists admin
'''
ret = {}
- nictagadm = _check_nictagadm()
if name not in list_nictags():
return True
- cmd = '{nictagadm} delete {force}{name}'.format(
- nictagadm=nictagadm,
+ cmd = 'nictagadm delete {force}{name}'.format(
force="-f " if force else "",
name=name
)
diff --git a/salt/modules/smartos_virt.py b/salt/modules/smartos_virt.py
index a7063c5ccb..ad1272fa4d 100644
--- a/salt/modules/smartos_virt.py
+++ b/salt/modules/smartos_virt.py
@@ -27,7 +27,7 @@ def __virtual__():
return __virtualname__
return (
False,
- '{0} module can only be loaded on SmartOS computed nodes'.format(
+ '{0} module can only be loaded on SmartOS compute nodes'.format(
__virtualname__
)
)
diff --git a/salt/modules/smartos_vmadm.py b/salt/modules/smartos_vmadm.py
index 411c01d49e..085a76b133 100644
--- a/salt/modules/smartos_vmadm.py
+++ b/salt/modules/smartos_vmadm.py
@@ -14,7 +14,6 @@ except ImportError:
# Import Salt libs
import salt.utils.args
-import salt.utils.decorators as decorators
import salt.utils.files
import salt.utils.json
import salt.utils.path
@@ -36,30 +35,17 @@ __func_alias__ = {
__virtualname__ = 'vmadm'
-@decorators.memoize
-def _check_vmadm():
- '''
- Looks to see if vmadm is present on the system
- '''
- return salt.utils.path.which('vmadm')
-
-
-def _check_zfs():
- '''
- Looks to see if zfs is present on the system
- '''
- return salt.utils.path.which('zfs')
-
-
def __virtual__():
'''
Provides vmadm on SmartOS
'''
- if salt.utils.platform.is_smartos_globalzone() and _check_vmadm():
+ if salt.utils.platform.is_smartos_globalzone() and \
+ salt.utils.path.which('vmadm') and \
+ salt.utils.path.which('zfs'):
return __virtualname__
return (
False,
- '{0} module can only be loaded on SmartOS computed nodes'.format(
+ '{0} module can only be loaded on SmartOS compute nodes'.format(
__virtualname__
)
)
@@ -80,13 +66,11 @@ def _create_update_from_file(mode='create', uuid=None, path=None):
Create vm from file
'''
ret = {}
- vmadm = _check_vmadm()
if not os.path.isfile(path) or path is None:
ret['Error'] = 'File ({0}) does not exists!'.format(path)
return ret
# vmadm validate create|update [-f ]
- cmd = '{vmadm} validate {mode} {brand} -f {path}'.format(
- vmadm=vmadm,
+ cmd = 'vmadm validate {mode} {brand} -f {path}'.format(
mode=mode,
brand=get(uuid)['brand'] if uuid is not None else '',
path=path
@@ -102,8 +86,7 @@ def _create_update_from_file(mode='create', uuid=None, path=None):
ret['Error'] = res['stderr']
return ret
# vmadm create|update [-f ]
- cmd = '{vmadm} {mode} {uuid} -f {path}'.format(
- vmadm=vmadm,
+ cmd = 'vmadm {mode} {uuid} -f {path}'.format(
mode=mode,
uuid=uuid if uuid is not None else '',
path=path
@@ -129,7 +112,6 @@ def _create_update_from_cfg(mode='create', uuid=None, vmcfg=None):
Create vm from configuration
'''
ret = {}
- vmadm = _check_vmadm()
# write json file
vmadm_json_file = __salt__['temp.file'](prefix='vmadm-')
@@ -137,8 +119,7 @@ def _create_update_from_cfg(mode='create', uuid=None, vmcfg=None):
salt.utils.json.dump(vmcfg, vmadm_json)
# vmadm validate create|update [-f ]
- cmd = '{vmadm} validate {mode} {brand} -f {vmadm_json_file}'.format(
- vmadm=vmadm,
+ cmd = 'vmadm validate {mode} {brand} -f {vmadm_json_file}'.format(
mode=mode,
brand=get(uuid)['brand'] if uuid is not None else '',
vmadm_json_file=vmadm_json_file
@@ -154,8 +135,7 @@ def _create_update_from_cfg(mode='create', uuid=None, vmcfg=None):
ret['Error'] = res['stderr']
return ret
# vmadm create|update [-f ]
- cmd = '{vmadm} {mode} {uuid} -f {vmadm_json_file}'.format(
- vmadm=vmadm,
+ cmd = 'vmadm {mode} {uuid} -f {vmadm_json_file}'.format(
mode=mode,
uuid=uuid if uuid is not None else '',
vmadm_json_file=vmadm_json_file
@@ -202,7 +182,6 @@ def start(vm, options=None, key='uuid'):
salt '*' vmadm.start vm=nina.example.org key=hostname
'''
ret = {}
- vmadm = _check_vmadm()
if key not in ['uuid', 'alias', 'hostname']:
ret['Error'] = 'Key must be either uuid, alias or hostname'
return ret
@@ -210,8 +189,7 @@ def start(vm, options=None, key='uuid'):
if 'Error' in vm:
return vm
# vmadm start [option=value ...]
- cmd = '{vmadm} start {uuid} {options}'.format(
- vmadm=vmadm,
+ cmd = 'vmadm start {uuid} {options}'.format(
uuid=vm,
options=options if options else ''
)
@@ -244,7 +222,6 @@ def stop(vm, force=False, key='uuid'):
salt '*' vmadm.stop vm=nina.example.org key=hostname
'''
ret = {}
- vmadm = _check_vmadm()
if key not in ['uuid', 'alias', 'hostname']:
ret['Error'] = 'Key must be either uuid, alias or hostname'
return ret
@@ -252,8 +229,7 @@ def stop(vm, force=False, key='uuid'):
if 'Error' in vm:
return vm
# vmadm stop [-F]
- cmd = '{vmadm} stop {force} {uuid}'.format(
- vmadm=vmadm,
+ cmd = 'vmadm stop {force} {uuid}'.format(
force='-F' if force else '',
uuid=vm
)
@@ -286,7 +262,6 @@ def reboot(vm, force=False, key='uuid'):
salt '*' vmadm.reboot vm=nina.example.org key=hostname
'''
ret = {}
- vmadm = _check_vmadm()
if key not in ['uuid', 'alias', 'hostname']:
ret['Error'] = 'Key must be either uuid, alias or hostname'
return ret
@@ -294,8 +269,7 @@ def reboot(vm, force=False, key='uuid'):
if 'Error' in vm:
return vm
# vmadm reboot [-F]
- cmd = '{vmadm} reboot {force} {uuid}'.format(
- vmadm=vmadm,
+ cmd = 'vmadm reboot {force} {uuid}'.format(
force='-F' if force else '',
uuid=vm
)
@@ -331,10 +305,8 @@ def list_vms(search=None, sort=None, order='uuid,type,ram,state,alias', keyed=Tr
salt '*' vmadm.list search='type=KVM'
'''
ret = {}
- vmadm = _check_vmadm()
# vmadm list [-p] [-H] [-o field,...] [-s field,...] [field=value ...]
- cmd = '{vmadm} list -p -H {order} {sort} {search}'.format(
- vmadm=vmadm,
+ cmd = 'vmadm list -p -H {order} {sort} {search}'.format(
order='-o {0}'.format(order) if order else '',
sort='-s {0}'.format(sort) if sort else '',
search=search if search else ''
@@ -387,10 +359,8 @@ def lookup(search=None, order=None, one=False):
salt '*' vmadm.lookup search='alias=nacl' one=True
'''
ret = {}
- vmadm = _check_vmadm()
# vmadm lookup [-j|-1] [-o field,...] [field=value ...]
- cmd = '{vmadm} lookup {one} {order} {search}'.format(
- vmadm=vmadm,
+ cmd = 'vmadm lookup {one} {order} {search}'.format(
one='-1' if one else '-j',
order='-o {0}'.format(order) if order else '',
search=search if search else ''
@@ -431,7 +401,6 @@ def sysrq(vm, action='nmi', key='uuid'):
salt '*' vmadm.sysrq nacl nmi key=alias
'''
ret = {}
- vmadm = _check_vmadm()
if key not in ['uuid', 'alias', 'hostname']:
ret['Error'] = 'Key must be either uuid, alias or hostname'
return ret
@@ -442,8 +411,7 @@ def sysrq(vm, action='nmi', key='uuid'):
if 'Error' in vm:
return vm
# vmadm sysrq
- cmd = '{vmadm} sysrq {uuid} {action}'.format(
- vmadm=vmadm,
+ cmd = 'vmadm sysrq {uuid} {action}'.format(
uuid=vm,
action=action
)
@@ -472,7 +440,6 @@ def delete(vm, key='uuid'):
salt '*' vmadm.delete nacl key=alias
'''
ret = {}
- vmadm = _check_vmadm()
if key not in ['uuid', 'alias', 'hostname']:
ret['Error'] = 'Key must be either uuid, alias or hostname'
return ret
@@ -480,10 +447,7 @@ def delete(vm, key='uuid'):
if 'Error' in vm:
return vm
# vmadm delete
- cmd = '{vmadm} delete {uuid}'.format(
- vmadm=vmadm,
- uuid=vm
- )
+ cmd = 'vmadm delete {0}'.format(vm)
res = __salt__['cmd.run_all'](cmd)
retcode = res['retcode']
if retcode != 0:
@@ -509,7 +473,6 @@ def get(vm, key='uuid'):
salt '*' vmadm.get nacl key=alias
'''
ret = {}
- vmadm = _check_vmadm()
if key not in ['uuid', 'alias', 'hostname']:
ret['Error'] = 'Key must be either uuid, alias or hostname'
return ret
@@ -517,10 +480,7 @@ def get(vm, key='uuid'):
if 'Error' in vm:
return vm
# vmadm get
- cmd = '{vmadm} get {uuid}'.format(
- vmadm=vmadm,
- uuid=vm
- )
+ cmd = 'vmadm get {0}'.format(vm)
res = __salt__['cmd.run_all'](cmd)
retcode = res['retcode']
if retcode != 0:
@@ -550,7 +510,6 @@ def info(vm, info_type='all', key='uuid'):
salt '*' vmadm.info nacl vnc key=alias
'''
ret = {}
- vmadm = _check_vmadm()
if info_type not in ['all', 'block', 'blockstats', 'chardev', 'cpus', 'kvm', 'pci', 'spice', 'version', 'vnc']:
ret['Error'] = 'Requested info_type is not available'
return ret
@@ -561,8 +520,7 @@ def info(vm, info_type='all', key='uuid'):
if 'Error' in vm:
return vm
# vmadm info [type,...]
- cmd = '{vmadm} info {uuid} {type}'.format(
- vmadm=vmadm,
+ cmd = 'vmadm info {uuid} {type}'.format(
uuid=vm,
type=info_type
)
@@ -596,7 +554,6 @@ def create_snapshot(vm, name, key='uuid'):
salt '*' vmadm.create_snapshot nacl baseline key=alias
'''
ret = {}
- vmadm = _check_vmadm()
if key not in ['uuid', 'alias', 'hostname']:
ret['Error'] = 'Key must be either uuid, alias or hostname'
return ret
@@ -614,8 +571,7 @@ def create_snapshot(vm, name, key='uuid'):
ret['Error'] = 'VM must be running to take a snapshot'
return ret
# vmadm create-snapshot
- cmd = '{vmadm} create-snapshot {uuid} {snapshot}'.format(
- vmadm=vmadm,
+ cmd = 'vmadm create-snapshot {uuid} {snapshot}'.format(
snapshot=name,
uuid=vm
)
@@ -649,7 +605,6 @@ def delete_snapshot(vm, name, key='uuid'):
salt '*' vmadm.delete_snapshot nacl baseline key=alias
'''
ret = {}
- vmadm = _check_vmadm()
if key not in ['uuid', 'alias', 'hostname']:
ret['Error'] = 'Key must be either uuid, alias or hostname'
return ret
@@ -664,8 +619,7 @@ def delete_snapshot(vm, name, key='uuid'):
ret['Error'] = 'VM must be of type OS'
return ret
# vmadm delete-snapshot
- cmd = '{vmadm} delete-snapshot {uuid} {snapshot}'.format(
- vmadm=vmadm,
+ cmd = 'vmadm delete-snapshot {uuid} {snapshot}'.format(
snapshot=name,
uuid=vm
)
@@ -699,7 +653,6 @@ def rollback_snapshot(vm, name, key='uuid'):
salt '*' vmadm.rollback_snapshot nacl baseline key=alias
'''
ret = {}
- vmadm = _check_vmadm()
if key not in ['uuid', 'alias', 'hostname']:
ret['Error'] = 'Key must be either uuid, alias or hostname'
return ret
@@ -714,8 +667,7 @@ def rollback_snapshot(vm, name, key='uuid'):
ret['Error'] = 'VM must be of type OS'
return ret
# vmadm rollback-snapshot
- cmd = '{vmadm} rollback-snapshot {uuid} {snapshot}'.format(
- vmadm=vmadm,
+ cmd = 'vmadm rollback-snapshot {uuid} {snapshot}'.format(
snapshot=name,
uuid=vm
)
@@ -746,7 +698,6 @@ def reprovision(vm, image, key='uuid'):
salt '*' vmadm.reprovision nacl c02a2044-c1bd-11e4-bd8c-dfc1db8b0182 key=alias
'''
ret = {}
- vmadm = _check_vmadm()
if key not in ['uuid', 'alias', 'hostname']:
ret['Error'] = 'Key must be either uuid, alias or hostname'
return ret
@@ -757,8 +708,7 @@ def reprovision(vm, image, key='uuid'):
ret['Error'] = 'Image ({0}) is not present on this host'.format(image)
return ret
# vmadm reprovision [-f ]
- cmd = six.text_type('echo {image} | {vmadm} reprovision {uuid}').format(
- vmadm=salt.utils.stringutils.to_unicode(vmadm),
+ cmd = six.text_type('echo {image} | vmadm reprovision {uuid}').format(
uuid=salt.utils.stringutils.to_unicode(vm),
image=_quote_args(salt.utils.json.dumps({'image_uuid': image}))
)
@@ -821,7 +771,6 @@ def update(vm, from_file=None, key='uuid', **kwargs):
salt '*' vmadm.update vm=186da9ab-7392-4f55-91a5-b8f1fe770543 max_physical_memory=1024
'''
ret = {}
- vmadm = _check_vmadm()
# prepare vmcfg
vmcfg = {}
kwargs = salt.utils.args.clean_kwargs(**kwargs)
@@ -860,8 +809,6 @@ def send(vm, target, key='uuid'):
salt '*' vmadm.send vm=nacl target=/opt/backups key=alias
'''
ret = {}
- vmadm = _check_vmadm()
- zfs = _check_zfs()
if key not in ['uuid', 'alias', 'hostname']:
ret['Error'] = 'Key must be either uuid, alias or hostname'
return ret
@@ -872,8 +819,7 @@ def send(vm, target, key='uuid'):
if 'Error' in vm:
return vm
# vmadm send [target]
- cmd = '{vmadm} send {uuid} > {target}'.format(
- vmadm=vmadm,
+ cmd = 'vmadm send {uuid} > {target}'.format(
uuid=vm,
target=os.path.join(target, '{0}.vmdata'.format(vm))
)
@@ -890,8 +836,7 @@ def send(vm, target, key='uuid'):
for dataset in vmobj['datasets']:
name = dataset.split('/')
name = name[-1]
- cmd = '{zfs} send {dataset} > {target}'.format(
- zfs=zfs,
+ cmd = 'zfs send {dataset} > {target}'.format(
dataset=dataset,
target=os.path.join(target, '{0}-{1}.zfsds'.format(vm, name))
)
@@ -919,8 +864,6 @@ def receive(uuid, source):
salt '*' vmadm.receive 186da9ab-7392-4f55-91a5-b8f1fe770543 /opt/backups
'''
ret = {}
- vmadm = _check_vmadm()
- zfs = _check_zfs()
if not os.path.isdir(source):
ret['Error'] = 'Source must be a directory or host'
return ret
@@ -928,8 +871,7 @@ def receive(uuid, source):
ret['Error'] = 'Unknow vm with uuid in {0}'.format(source)
return ret
# vmadm receive
- cmd = '{vmadm} receive < {source}'.format(
- vmadm=vmadm,
+ cmd = 'vmadm receive < {source}'.format(
source=os.path.join(source, '{0}.vmdata'.format(uuid))
)
res = __salt__['cmd.run_all'](cmd, python_shell=True)
@@ -945,8 +887,7 @@ def receive(uuid, source):
for dataset in vmobj['datasets']:
name = dataset.split('/')
name = name[-1]
- cmd = '{zfs} receive {dataset} < {source}'.format(
- zfs=zfs,
+ cmd = 'zfs receive {dataset} < {source}'.format(
dataset=dataset,
source=os.path.join(source, '{0}-{1}.zfsds'.format(uuid, name))
)
@@ -955,10 +896,7 @@ def receive(uuid, source):
if retcode != 0:
ret['Error'] = res['stderr'] if 'stderr' in res else _exit_status(retcode)
return ret
- cmd = '{vmadm} install {uuid}'.format(
- vmadm=vmadm,
- uuid=uuid
- )
+ cmd = 'vmadm install {0}'.format(uuid)
res = __salt__['cmd.run_all'](cmd, python_shell=True)
retcode = res['retcode']
if retcode != 0 and not res['stderr'].endswith('datasets'):
diff --git a/salt/modules/ssh.py b/salt/modules/ssh.py
index e271e6dbf7..8a5108d594 100644
--- a/salt/modules/ssh.py
+++ b/salt/modules/ssh.py
@@ -173,7 +173,8 @@ def _replace_auth_key(
# Re-open the file writable after properly closing it
with salt.utils.files.fopen(full, 'w') as _fh:
# Write out any changes
- _fh.writelines(lines)
+ for line in lines:
+ _fh.write(line)
except (IOError, OSError) as exc:
raise CommandExecutionError(
'Problem reading or writing to key file: {0}'.format(exc)
diff --git a/salt/modules/state.py b/salt/modules/state.py
index e9c3a09df3..f357283d26 100644
--- a/salt/modules/state.py
+++ b/salt/modules/state.py
@@ -231,7 +231,7 @@ def soft_kill(jid, state_id=None):
this instructs a running state to safely exit at a given
state id. This needs to pass in the jid of the running state.
If a state_id is not passed then the jid referenced will be safely exited
- at the begining of the next state run.
+ at the beginning of the next state run.
The given state id is the id got a given state execution, so given a state
that looks like this:
@@ -264,7 +264,7 @@ def pause(jid, state_id=None, duration=None):
Set up a state id pause, this instructs a running state to pause at a given
state id. This needs to pass in the jid of the running state and can
optionally pass in a duration in seconds. If a state_id is not passed then
- the jid referenced will be paused at the begining of the next state run.
+ the jid referenced will be paused at the beginning of the next state run.
The given state id is the id got a given state execution, so given a state
that looks like this:
@@ -778,19 +778,18 @@ def request(mods=None,
'kwargs': kwargs
}
})
- cumask = os.umask(0o77)
- try:
- if salt.utils.platform.is_windows():
- # Make sure cache file isn't read-only
- __salt__['cmd.run']('attrib -R "{0}"'.format(notify_path))
- with salt.utils.files.fopen(notify_path, 'w+b') as fp_:
- serial.dump(req, fp_)
- except (IOError, OSError):
- log.error(
- 'Unable to write state request file %s. Check permission.',
- notify_path
- )
- os.umask(cumask)
+ with salt.utils.files.set_umask(0o077):
+ try:
+ if salt.utils.platform.is_windows():
+ # Make sure cache file isn't read-only
+ __salt__['cmd.run']('attrib -R "{0}"'.format(notify_path))
+ with salt.utils.files.fopen(notify_path, 'w+b') as fp_:
+ serial.dump(req, fp_)
+ except (IOError, OSError):
+ log.error(
+ 'Unable to write state request file %s. Check permission.',
+ notify_path
+ )
return ret
@@ -844,19 +843,18 @@ def clear_request(name=None):
req.pop(name)
else:
return False
- cumask = os.umask(0o77)
- try:
- if salt.utils.platform.is_windows():
- # Make sure cache file isn't read-only
- __salt__['cmd.run']('attrib -R "{0}"'.format(notify_path))
- with salt.utils.files.fopen(notify_path, 'w+b') as fp_:
- serial.dump(req, fp_)
- except (IOError, OSError):
- log.error(
- 'Unable to write state request file %s. Check permission.',
- notify_path
- )
- os.umask(cumask)
+ with salt.utils.files.set_umask(0o077):
+ try:
+ if salt.utils.platform.is_windows():
+ # Make sure cache file isn't read-only
+ __salt__['cmd.run']('attrib -R "{0}"'.format(notify_path))
+ with salt.utils.files.fopen(notify_path, 'w+b') as fp_:
+ serial.dump(req, fp_)
+ except (IOError, OSError):
+ log.error(
+ 'Unable to write state request file %s. Check permission.',
+ notify_path
+ )
return True
@@ -1249,13 +1247,12 @@ def sls(mods, test=None, exclude=None, queue=False, **kwargs):
return ['Pillar failed to render with the following messages:'] + errors
orchestration_jid = kwargs.get('orchestration_jid')
- umask = os.umask(0o77)
- if kwargs.get('cache'):
- if os.path.isfile(cfn):
- with salt.utils.files.fopen(cfn, 'rb') as fp_:
- high_ = serial.load(fp_)
- return st_.state.call_high(high_, orchestration_jid)
- os.umask(umask)
+ with salt.utils.files.set_umask(0o077):
+ if kwargs.get('cache'):
+ if os.path.isfile(cfn):
+ with salt.utils.files.fopen(cfn, 'rb') as fp_:
+ high_ = serial.load(fp_)
+ return st_.state.call_high(high_, orchestration_jid)
mods = salt.utils.args.split_input(mods)
@@ -1280,36 +1277,36 @@ def sls(mods, test=None, exclude=None, queue=False, **kwargs):
if __salt__['config.option']('state_data', '') == 'terse' or kwargs.get('terse'):
ret = _filter_running(ret)
cache_file = os.path.join(__opts__['cachedir'], 'sls.p')
- cumask = os.umask(0o77)
- try:
- if salt.utils.platform.is_windows():
- # Make sure cache file isn't read-only
- __salt__['cmd.run'](['attrib', '-R', cache_file], python_shell=False)
- with salt.utils.files.fopen(cache_file, 'w+b') as fp_:
- serial.dump(ret, fp_)
- except (IOError, OSError):
- log.error(
- 'Unable to write to SLS cache file %s. Check permission.',
- cache_file
- )
- _set_retcode(ret, high_)
- # Work around Windows multiprocessing bug, set __opts__['test'] back to
- # value from before this function was run.
- __opts__['test'] = orig_test
+ with salt.utils.files.set_umask(0o077):
+ try:
+ if salt.utils.platform.is_windows():
+ # Make sure cache file isn't read-only
+ __salt__['cmd.run'](['attrib', '-R', cache_file], python_shell=False)
+ with salt.utils.files.fopen(cache_file, 'w+b') as fp_:
+ serial.dump(ret, fp_)
+ except (IOError, OSError):
+ log.error(
+ 'Unable to write to SLS cache file %s. Check permission.',
+ cache_file
+ )
+ _set_retcode(ret, high_)
+ # Work around Windows multiprocessing bug, set __opts__['test'] back to
+ # value from before this function was run.
+ __opts__['test'] = orig_test
+
+ try:
+ with salt.utils.files.fopen(cfn, 'w+b') as fp_:
+ try:
+ serial.dump(high_, fp_)
+ except TypeError:
+ # Can't serialize pydsl
+ pass
+ except (IOError, OSError):
+ log.error(
+ 'Unable to write to highstate cache file %s. Do you have permissions?',
+ cfn
+ )
- try:
- with salt.utils.files.fopen(cfn, 'w+b') as fp_:
- try:
- serial.dump(high_, fp_)
- except TypeError:
- # Can't serialize pydsl
- pass
- except (IOError, OSError):
- log.error(
- 'Unable to write to highstate cache file %s. Do you have permissions?',
- cfn
- )
- os.umask(cumask)
_snapper_post(opts, kwargs.get('__pub_jid', 'called localy'), snapper_pre)
return ret
diff --git a/salt/modules/suse_apache.py b/salt/modules/suse_apache.py
index d2fa584421..9607b9bc49 100644
--- a/salt/modules/suse_apache.py
+++ b/salt/modules/suse_apache.py
@@ -23,7 +23,7 @@ def __virtual__():
'''
Only load the module if apache is installed.
'''
- if salt.utils.path.which('apache2ctl') and __grains__['os_family'] == 'SUSE':
+ if salt.utils.path.which('apache2ctl') and __grains__['os_family'] == 'Suse':
return __virtualname__
return (False, 'apache execution module not loaded: apache not installed.')
diff --git a/salt/modules/trafficserver.py b/salt/modules/trafficserver.py
index f7aeb751c1..423c5ad4bf 100644
--- a/salt/modules/trafficserver.py
+++ b/salt/modules/trafficserver.py
@@ -16,7 +16,6 @@ import subprocess
# Import salt libs
import salt.utils.path
import salt.utils.stringutils
-import salt.utils.versions
__virtualname__ = 'trafficserver'
@@ -203,28 +202,6 @@ def restart_local(drain=False):
return _subprocess(cmd)
-def match_var(regex):
- '''
- Display the current values of all performance statistics or configuration
- variables whose names match the given regular expression.
-
- .. deprecated:: Fluorine
- Use ``match_metric`` or ``match_config`` instead.
-
- .. code-block:: bash
-
- salt '*' trafficserver.match_var regex
- '''
- salt.utils.versions.warn_until(
- 'Fluorine',
- 'The \'match_var\' function has been deprecated and will be removed in Salt '
- '{version}. Please use \'match_metric\' or \'match_config\' instead.'
- )
- cmd = _traffic_line('-m', regex)
- log.debug('Running: %s', cmd)
- return _subprocess(cmd)
-
-
def match_metric(regex):
'''
Display the current values of all metrics whose names match the
@@ -345,55 +322,6 @@ def set_config(variable, value):
return _subprocess(cmd)
-def read_var(*args):
- '''
- Read variable definitions from the traffic_line command.
-
- .. deprecated:: Fluorine
- Use ``read_metric`` or ``read_config`` instead. Note that this
- function does not work for Traffic Server versions >= 7.0.
-
- .. code-block:: bash
-
- salt '*' trafficserver.read_var proxy.process.http.tcp_hit_count_stat
- '''
- salt.utils.versions.warn_until(
- 'Fluorine',
- 'The \'read_var\' function has been deprecated and will be removed in Salt '
- '{version}. Please use \'read_metric\' or \'read_config\' instead.'
- )
-
- ret = {}
-
- try:
- for arg in args:
- log.debug('Querying: %s', arg)
- cmd = '{0} {1} {2}'.format(_TRAFFICLINE, '-r', arg)
- ret[arg] = _subprocess(cmd)
- except KeyError:
- pass
-
- return ret
-
-
-def set_var(variable, value):
- '''
- .. code-block:: bash
-
- .. deprecated:: Fluorine
- Use ``set_config`` instead. Note that this function does
- not work for Traffic Server versions >= 7.0.
-
- salt '*' trafficserver.set_var proxy.config.http.server_ports
- '''
- salt.utils.versions.warn_until(
- 'Fluorine',
- 'The \'set_var\' function has been deprecated and will be removed in Salt '
- '{version}. Please use \'set_config\' instead.'
- )
- return set_config(variable, value)
-
-
def shutdown():
'''
Shut down Traffic Server on the local node.
diff --git a/salt/modules/upstart.py b/salt/modules/upstart.py
index 54007734ec..eb6362b2d9 100644
--- a/salt/modules/upstart.py
+++ b/salt/modules/upstart.py
@@ -98,7 +98,7 @@ def _find_utmp():
result[os.stat(utmp).st_mtime] = utmp
except Exception:
pass
- if result > 0:
+ if len(result):
return result[sorted(result).pop()]
else:
return False
diff --git a/salt/modules/virt.py b/salt/modules/virt.py
index 7fb8e16895..385e968c51 100644
--- a/salt/modules/virt.py
+++ b/salt/modules/virt.py
@@ -410,8 +410,7 @@ def _qemu_image_create(vm_name,
log.debug('Copying %s to %s', sfn, img_dest)
salt.utils.files.copyfile(sfn, img_dest)
- mask = os.umask(0)
- os.umask(mask)
+ mask = salt.utils.files.get_umask()
if disk_size and qcow2:
log.debug('Resize qcow2 image to %sM', disk_size)
@@ -433,8 +432,7 @@ def _qemu_image_create(vm_name,
else:
# Create empty disk
try:
- mask = os.umask(0)
- os.umask(mask)
+ mask = salt.utils.files.get_umask()
if disk_size:
log.debug('Create empty image with size %sM', disk_size)
diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py
index 37f7d6f0fd..7fc971a621 100644
--- a/salt/modules/vsphere.py
+++ b/salt/modules/vsphere.py
@@ -3944,7 +3944,7 @@ def create_dvs(dvs_dict, dvs_name, service_instance=None):
Note: The ``dvs_name`` param will override any name set in ``dvs_dict``.
dvs_dict
- Dict representation of the new DVS (exmaple in salt.states.dvs)
+ Dict representation of the new DVS (example in salt.states.dvs)
dvs_name
Name of the DVS to be created.
@@ -4019,7 +4019,7 @@ def update_dvs(dvs_dict, dvs, service_instance=None):
dvs_dict
Dictionary with the values the DVS should be update with
- (exmaple in salt.states.dvs)
+ (example in salt.states.dvs)
dvs
Name of the DVS to be updated.
@@ -4479,7 +4479,7 @@ def create_dvportgroup(portgroup_dict, portgroup_name, dvs,
portgroup_dict
Dictionary with the config values the portgroup should be created with
- (exmaple in salt.states.dvs).
+ (example in salt.states.dvs).
portgroup_name
Name of the portgroup to be created.
@@ -4526,7 +4526,7 @@ def update_dvportgroup(portgroup_dict, portgroup, dvs, service_instance=True):
portgroup_dict
Dictionary with the values the portgroup should be update with
- (exmaple in salt.states.dvs).
+ (example in salt.states.dvs).
portgroup
Name of the portgroup to be updated.
@@ -4813,7 +4813,7 @@ def create_storage_policy(policy_name, policy_dict, service_instance=None):
policy_dict
Dictionary containing the changes to apply to the policy.
- (exmaple in salt.states.pbm)
+ (example in salt.states.pbm)
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
@@ -4853,7 +4853,7 @@ def update_storage_policy(policy, policy_dict, service_instance=None):
policy_dict
Dictionary containing the changes to apply to the policy.
- (exmaple in salt.states.pbm)
+ (example in salt.states.pbm)
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
@@ -6472,7 +6472,7 @@ def configure_host_cache(enabled, datastore=None, swap_size_MiB=None,
swap_size_MiB
Swap size in Mibibytes. Needs to be set if enabled is ``true``. Must be
- smaller thant the datastore size.
+ smaller than the datastore size.
service_instance
Service instance (vim.ServiceInstance) of the vCenter/ESXi host.
@@ -7595,7 +7595,7 @@ def _apply_hard_disk(unit_number, key, operation, disk_label=None, size=None,
Action which should be done on the device add or edit
disk_label
- Label of the new disk, can be overriden
+ Label of the new disk, can be overridden
size
Size of the disk
diff --git a/salt/modules/win_file.py b/salt/modules/win_file.py
index 8eb8ddd60c..d321bd538e 100644
--- a/salt/modules/win_file.py
+++ b/salt/modules/win_file.py
@@ -1605,7 +1605,7 @@ def check_perms(path,
``True``.
reset (bool):
- ``True`` wil show what permisisons will be removed by resetting the
+ ``True`` will show what permisisons will be removed by resetting the
DACL. ``False`` will do nothing. Default is ``False``.
Returns:
diff --git a/salt/modules/win_update.py b/salt/modules/win_update.py
deleted file mode 100644
index 778d4803c8..0000000000
--- a/salt/modules/win_update.py
+++ /dev/null
@@ -1,750 +0,0 @@
-# -*- coding: utf-8 -*-
-'''
-Module for running windows updates.
-
-This module is being deprecated and will be removed in Salt Fluorine. Please use
-the ``win_wua`` module instead.
-
-:depends: - win32com
- - win32con
- - win32api
- - pywintypes
-
-.. versionadded:: 2014.7.0
-
-Set windows updates to run by category. Default behavior is to install
-all updates that do not require user interaction to complete.
-Optionally set ``categories`` to a category of your choice to only
-install certain updates. Default is to set to install all available but driver updates.
-The following example will install all Security and Critical Updates,
-and download but not install standard updates.
-
-.. code-block:: bash
-
- salt '*' win_update.install_updates categories="['Critical Updates', 'Security Updates']"
-
-You can also specify a number of features about the update to have a
-fine grain approach to specific types of updates. These are the following
-features/states of updates available for configuring:
-.. code-block:: text
- 'UI' - User interaction required, skipped by default
- 'downloaded' - Already downloaded, included by default
- 'present' - Present on computer, included by default
- 'installed' - Already installed, skipped by default
- 'reboot' - Reboot required, included by default
- 'hidden' - Skip hidden updates, skipped by default
- 'software' - Software updates, included by default
- 'driver' - Driver updates, included by default
-
-The following example installs all updates that don't require a reboot:
-.. code-block:: bash
-
- salt '*' win_update.install_updates skips="[{'reboot':True}]"
-
-
-Once installed Salt will return a similar output:
-
-.. code-block:: bash
-
- 2 : Windows Server 2012 Update (KB123456)
- 4 : Internet Explorer Security Update (KB098765)
- 2 : Malware Definition Update (KB321456)
- ...
-
-The number at the beginning of the line is an OperationResultCode from the Windows Update Agent,
-it's enumeration is described here: https://msdn.microsoft.com/en-us/library/windows/desktop/aa387095(v=vs.85).aspx.
-The result code is then followed by the update name and its KB identifier.
-
-'''
-# pylint: disable=invalid-name,missing-docstring
-
-# Import Python libs
-from __future__ import absolute_import, unicode_literals, print_function
-import logging
-
-# Import 3rd-party libs
-# pylint: disable=import-error
-from salt.ext import six
-from salt.ext.six.moves import range # pylint: disable=no-name-in-module,redefined-builtin
-try:
- import win32com.client
- import pythoncom
- HAS_DEPENDENCIES = True
-except ImportError:
- HAS_DEPENDENCIES = False
-# pylint: enable=import-error
-
-# Import Salt libs
-import salt.utils.platform
-import salt.utils.locales
-import salt.utils.versions
-
-log = logging.getLogger(__name__)
-
-
-def __virtual__():
- '''
- Only works on Windows systems
- '''
- if salt.utils.platform.is_windows() and HAS_DEPENDENCIES:
- salt.utils.versions.warn_until(
- 'Fluorine',
- 'The \'win_update\' module is being deprecated and will be removed '
- 'in Salt {version}. Please use the \'win_wua\' module instead.'
- )
- return True
- return (False, "Module win_update: module has failed dependencies or is not on Windows client")
-
-
-def _gather_update_categories(updateCollection):
- '''
- this is a convenience method to gather what categories of updates are available in any update
- collection it is passed. Typically though, the download_collection.
- Some known categories:
- Updates
- Windows 7
- Critical Updates
- Security Updates
- Update Rollups
- '''
- categories = []
- for i in range(updateCollection.Count):
- update = updateCollection.Item(i)
- for j in range(update.Categories.Count):
- name = update.Categories.Item(j).Name
- if name not in categories:
- log.debug('found category: %s', name)
- categories.append(name)
- return categories
-
-
-class PyWinUpdater(object):
- def __init__(self, categories=None, skipUI=True, skipDownloaded=False,
- skipInstalled=True, skipReboot=False, skipPresent=False,
- skipSoftwareUpdates=False, skipDriverUpdates=False, skipHidden=True):
- log.debug('CoInitializing the pycom system')
- pythoncom.CoInitialize()
-
- self.skipUI = skipUI
- self.skipDownloaded = skipDownloaded
- self.skipInstalled = skipInstalled
- self.skipReboot = skipReboot
- self.skipPresent = skipPresent
- self.skipHidden = skipHidden
-
- self.skipSoftwareUpdates = skipSoftwareUpdates
- self.skipDriverUpdates = skipDriverUpdates
-
- # the list of categories that the user wants to be searched for.
- self.categories = categories
-
- # the list of categories that are present in the updates found.
- self.foundCategories = []
- # careful not to get those two confused.
-
- log.debug('dispatching update_session to keep the session object.')
- self.update_session = win32com.client.Dispatch('Microsoft.Update.Session')
-
- log.debug('update_session got. Now creating a win_searcher to seek out the updates')
- self.win_searcher = self.update_session.CreateUpdateSearcher()
-
- # list of updates that are applicable by current settings.
- self.download_collection = win32com.client.Dispatch('Microsoft.Update.UpdateColl')
-
- # list of updates to be installed.
- self.install_collection = win32com.client.Dispatch('Microsoft.Update.UpdateColl')
-
- # the object responsible for fetching the actual downloads.
- self.win_downloader = self.update_session.CreateUpdateDownloader()
- self.win_downloader.Updates = self.download_collection
-
- # the object responsible for the installing of the updates.
- self.win_installer = self.update_session.CreateUpdateInstaller()
- self.win_installer.Updates = self.install_collection
-
- # the results of the download process
- self.download_results = None
-
- # the results of the installation process
- self.install_results = None
-
- # search results from CreateUpdateSearcher()
- self.search_results = None
-
- def Search(self, searchString):
- try:
- log.debug('beginning search of the passed string: %s', searchString)
- self.search_results = self.win_searcher.Search(searchString)
- log.debug('search completed successfully.')
- except Exception as exc:
- log.info('search for updates failed. %s', exc)
- return exc
-
- log.debug('parsing results. %s updates were found.',
- self.search_results.Updates.Count)
-
- try:
- # step through the list of the updates to ensure that the updates match the
- # features desired.
- for update in self.search_results.Updates:
- # this skipps an update if UI updates are not desired.
- if update.InstallationBehavior.CanRequestUserInput:
- log.debug(U'Skipped update {0} - requests user input'.format(update.title))
- continue
-
- # if this update is already downloaded, it doesn't need to be in
- # the download_collection. so skipping it unless the user mandates re-download.
- if self.skipDownloaded and update.IsDownloaded:
- log.debug(
- 'Skipped update %s - already downloaded',
- update.title
- )
- continue
-
- # check this update's categories against the ones desired.
- for category in update.Categories:
- # this is a zero guard. these tests have to be in this order
- # or it will error out when the user tries to search for
- # updates with out specifying categories.
- if self.categories is None or category.Name in self.categories:
- # adds it to the list to be downloaded.
- self.download_collection.Add(update)
- log.debug('added update %s', update.title)
- # ever update has 2 categories. this prevents the
- # from being added twice.
- break
- log.debug('download_collection made. gathering found categories.')
-
- # gets the categories of the updates available in this collection of updates
- self.foundCategories = _gather_update_categories(self.download_collection)
- log.debug('found categories: %s',
- six.text_type(self.foundCategories))
- return True
- except Exception as exc:
- log.info('parsing updates failed. %s', exc)
- return exc
-
- def AutoSearch(self):
- '''
- this function generates a search string. simplifying the search function while
- still providing as many features as possible.
- '''
- search_string = ''
- searchParams = []
-
- if self.skipInstalled:
- searchParams.append('IsInstalled=0')
- else:
- searchParams.append('IsInstalled=1')
-
- if self.skipHidden:
- searchParams.append('IsHidden=0')
- else:
- searchParams.append('IsHidden=1')
-
- if self.skipReboot:
- searchParams.append('RebootRequired=0')
- else:
- searchParams.append('RebootRequired=1')
-
- if self.skipPresent:
- searchParams.append('IsPresent=0')
- else:
- searchParams.append('IsPresent=1')
-
- for i in searchParams:
- search_string += '{0} and '.format(i)
-
- if not self.skipSoftwareUpdates and not self.skipDriverUpdates:
- search_string += 'Type=\'Software\' or Type=\'Driver\''
- elif not self.skipSoftwareUpdates:
- search_string += 'Type=\'Software\''
- elif not self.skipDriverUpdates:
- search_string += 'Type=\'Driver\''
- else:
- return False
- # if there is no type, the is nothing to search.
- log.debug('generated search string: %s', search_string)
- return self.Search(search_string)
-
- def Download(self):
- # chase the download_collection! do the actual download process.
- try:
- # if the download_collection is empty. no need to download things.
- if self.download_collection.Count != 0:
- self.download_results = self.win_downloader.Download()
- else:
- log.debug('Skipped downloading, all updates were already cached.')
- return True
- except Exception as exc:
- log.debug('failed in the downloading %s.', exc)
- return exc
-
- def Install(self):
- # beat those updates into place!
- try:
- # this does not draw from the download_collection. important thing to know.
- # the blugger is created regardless of what the download_collection has done. but it
- # will only download those updates which have been downloaded and are ready.
- for update in self.search_results.Updates:
- if update.IsDownloaded:
- self.install_collection.Add(update)
- log.debug('Updates prepared. beginning installation')
- except Exception as exc:
- log.info('Preparing install list failed: %s', exc)
- return exc
-
- # accept eula if not accepted
- try:
- for update in self.search_results.Updates:
- if not update.EulaAccepted:
- log.debug('Accepting EULA: %s', update.Title)
- update.AcceptEula()
- except Exception as exc:
- log.info('Accepting Eula failed: %s', exc)
- return exc
-
- # if the blugger is empty. no point it starting the install process.
- if self.install_collection.Count != 0:
- log.debug('Install list created, about to install')
- try:
- # the call to install.
- self.install_results = self.win_installer.Install()
- log.info('Installation of updates complete')
- return True
- except Exception as exc:
- log.info('Installation failed: %s', exc)
- return exc
- else:
- log.info('no new updates.')
- return True
-
- def GetInstallationResults(self):
- '''
- this gets results of installation process.
- '''
- # if the blugger is empty, the results are nil.
- log.debug('blugger has {0} updates in it'.format(self.install_collection.Count))
- if self.install_collection.Count == 0:
- return {}
-
- updates = []
- log.debug('repairing update list')
- for i in range(self.install_collection.Count):
- # this gets the result from install_results, but the title comes from the update
- # collection install_collection.
- updates.append('{0}: {1}'.format(
- self.install_results.GetUpdateResult(i).ResultCode,
- self.install_collection.Item(i).Title))
-
- log.debug('Update results enumerated, now making a library to pass back')
- results = {}
-
- # translates the list of update results into a library that salt expects.
- for i, update in enumerate(updates):
- results['update {0}'.format(i)] = update
-
- log.debug('Update information complied. returning')
- return results
-
- def GetInstallationResultsPretty(self):
- '''
- converts the installation results into a pretty print.
- '''
- updates = self.GetInstallationResults()
- ret = 'The following are the updates and their return codes.\n'
- for i in updates:
- ret += '\t{0}\n'.format(updates[i])
- return ret
-
- def GetDownloadResults(self):
- updates = []
- for i in range(self.download_collection.Count):
- updates.append('{0}: {1}'.format(
- six.text_type(self.download_results.GetUpdateResult(i).ResultCode),
- six.text_type(self.download_collection.Item(i).Title)))
- results = {}
- for i, update in enumerate(updates):
- results['update {0}'.format(i)] = update
- return results
-
- def GetSearchResultsVerbose(self):
- updates = []
- log.debug('parsing results. %s updates were found.',
- self.download_collection.count)
-
- for update in self.download_collection:
- if update.InstallationBehavior.CanRequestUserInput:
- log.debug('Skipped update %s', update.title)
- continue
- # More fields can be added from https://msdn.microsoft.com/en-us/library/windows/desktop/aa386099(v=vs.85).aspx
- update_com_fields = ['Categories', 'Deadline', 'Description',
- 'Identity', 'IsMandatory',
- 'KBArticleIDs', 'MaxDownloadSize', 'MinDownloadSize',
- 'MoreInfoUrls', 'MsrcSeverity', 'ReleaseNotes',
- 'SecurityBulletinIDs', 'SupportUrl', 'Title']
- simple_enums = ['KBArticleIDs', 'MoreInfoUrls', 'SecurityBulletinIDs']
- # update_dict = {k: getattr(update, k) for k in update_com_fields}
- update_dict = {}
- for f in update_com_fields:
- v = getattr(update, f)
- if not any([isinstance(v, bool), isinstance(v, six.string_types)]):
- # Fields that require special evaluation.
- if f in simple_enums:
- v = [x for x in v]
- elif f == 'Categories':
- v = [{'Name': cat.Name, 'Description': cat.Description} for cat in v]
- elif f == 'Deadline':
- # Deadline will be useful and should be added.
- # However, until it can be tested with a date object
- # as returned by the COM, it is unclear how to
- # handle this field.
- continue
- elif f == 'Identity':
- v = {'RevisionNumber': v.RevisionNumber,
- 'UpdateID': v.UpdateID}
- update_dict[f] = v
- updates.append(update_dict)
- log.debug('added update %s', update.title)
- return updates
-
- def GetSearchResults(self, fields=None):
- """Reduce full updates information to the most important information."""
- updates_verbose = self.GetSearchResultsVerbose()
- if fields is not None:
- updates = [dict((k, v) for k, v in update.items() if k in fields)
- for update in updates_verbose]
- return updates
- # Return list of titles.
- return [update['Title'] for update in updates_verbose]
-
- def SetCategories(self, categories):
- self.categories = categories
-
- def GetCategories(self):
- return self.categories
-
- def GetAvailableCategories(self):
- return self.foundCategories
-
- def SetSkips(self, skips):
- if skips:
- for i in skips:
- value = i[next(six.iterkeys(i))]
- skip = next(six.iterkeys(i))
- self.SetSkip(skip, value)
- log.debug('was asked to set %s to %s', skip, value)
-
- def SetSkip(self, skip, state):
- if skip == 'UI':
- self.skipUI = state
- elif skip == 'downloaded':
- self.skipDownloaded = state
- elif skip == 'installed':
- self.skipInstalled = state
- elif skip == 'reboot':
- self.skipReboot = state
- elif skip == 'present':
- self.skipPresent = state
- elif skip == 'hidden':
- self.skipHidden = state
- elif skip == 'software':
- self.skipSoftwareUpdates = state
- elif skip == 'driver':
- self.skipDriverUpdates = state
- log.debug('new search state: \n\tUI: %s\n\tDownload: %s\n\tInstalled: %s\n\treboot :%s\n\tPresent: %s\n\thidden: %s\n\tsoftware: %s\n\tdriver: %s',
- self.skipUI, self.skipDownloaded, self.skipInstalled, self.skipReboot,
- self.skipPresent, self.skipHidden, self.skipSoftwareUpdates, self.skipDriverUpdates)
-
- def __str__(self):
- results = 'There are {0} updates, by category there are:\n'.format(
- self.download_collection.count)
- for category in self.foundCategories:
- count = 0
- for update in self.download_collection:
- for cat in update.Categories:
- if category == cat.Name:
- count += 1
- results += '\t{0}: {1}\n'.format(category, count)
- return results
-
-
-def _search(quidditch, retries=5):
- '''
- a wrapper method for the pywinupdater class. I might move this into the class, but right now,
- that is to much for one class I think.
- '''
- passed = False
- clean = True
- comment = ''
- while not passed:
- log.debug('Searching. tries left: %s', retries)
- # let the updater make its own search string. MORE POWER this way.
- passed = quidditch.AutoSearch()
- log.debug('Done searching: %s', passed)
- if isinstance(passed, Exception):
- clean = False
- comment += 'Failed in the seeking/parsing process:\n\t\t{0}\n'.format(passed)
- retries -= 1
- if retries:
- comment += '{0} tries to go. retrying\n'.format(str(retries))
- else:
- comment += 'out of retries. this update round failed.\n'
- return (comment, True, retries)
- passed = False
- if clean:
- # bragging rights.
- comment += 'Search was done without error.\n'
-
- return (comment, True, retries)
-
-
-def _download(quidditch, retries=5):
- '''
- another wrapper method.
- '''
- passed = False
- clean = True
- comment = ''
- while not passed:
- log.debug('Downloading. tries left: %s', retries)
- passed = quidditch.Download()
- log.debug('Done downloading: %s', passed)
- if isinstance(passed, Exception):
- clean = False
- comment += 'Failed while trying to download updates:\n\t\t{0}\n'.format(str(passed))
- retries -= 1
- if retries:
- comment += '{0} tries to go. retrying\n'.format(str(retries))
- passed = False
- else:
- comment += 'out of retries. this update round failed.\n'
- return (comment, False, retries)
- if clean:
- comment += 'Download was done without error.\n'
- return (comment, True, retries)
-
-
-def _install(quidditch, retries=5):
- '''
- and the last wrapper method. keeping things simple.
- '''
- passed = False
- clean = True
- comment = ''
- while not passed:
- log.debug('download_collection is this long: %s',
- quidditch.install_collection.Count)
- log.debug('Installing. tries left: %s', retries)
- passed = quidditch.Install()
- log.info('Done installing: %s', passed)
- if isinstance(passed, Exception):
- clean = False
- comment += 'Failed while trying to install the updates.\n\t\t{0}\n'.format(str(passed))
- retries -= 1
- if retries:
- comment += '{0} tries to go. retrying\n'.format(str(retries))
- passed = False
- else:
- comment += 'out of retries. this update round failed.\n'
- return (comment, False, retries)
- if clean:
- comment += 'Install was done without error.\n'
- return (comment, True, retries)
-
-
-# this is where the actual functions available to salt begin.
-
-def list_updates(verbose=False, fields=None, skips=None, retries=5, categories=None):
- '''
- Returns a summary of available updates, grouped into their non-mutually
- exclusive categories.
-
- verbose
- Return full set of results, including several fields from the COM.
-
- fields
- Return a list of specific fields for each update. The optional
- values here are those at the root level of the verbose list. This
- is superseded by the verbose option.
-
- retries
- Number of retries to make before giving up. This is total, not per
- step.
-
- categories
- Specify the categories to list. Must be passed as a list.
-
- .. code-block:: bash
-
- salt '*' win_update.list_updates categories="['Updates']"
-
- Categories include, but are not limited to, the following:
-
- * Updates
- * Windows 7
- * Critical Updates
- * Security Updates
- * Update Rollups
-
- CLI Examples:
-
- .. code-block:: bash
-
- # Normal Usage
- salt '*' win_update.list_updates
-
- # Specific Fields
- salt '*' win_update.list_updates fields="['Title', 'Description']"
-
- # List all critical updates list in verbose detail
- salt '*' win_update.list_updates categories="['Critical Updates']" verbose=True
-
- '''
-
- log.debug('categories to search for are: %s', categories)
- updates = PyWinUpdater()
- if categories:
- updates.SetCategories(categories)
- updates.SetSkips(skips)
-
- # this is where we be seeking the things! yar!
- comment, passed, retries = _search(updates, retries)
- if not passed:
- return (comment, str(passed))
- log.debug('verbose: %s', verbose)
- if verbose:
- return updates.GetSearchResultsVerbose()
- return updates.GetSearchResults(fields=fields)
-
-
-def download_updates(skips=None, retries=5, categories=None):
- '''
- Downloads all available updates, skipping those that require user
- interaction.
-
- Various aspects of the updates can be included or excluded. this feature is
- still in development.
-
- retries
- Number of retries to make before giving up. This is total, not per
- step.
-
- categories
- Specify the categories to update. Must be passed as a list.
-
- .. code-block:: bash
-
- salt '*' win_update.download_updates categories="['Updates']"
-
- Categories include the following:
-
- * Updates
- * Windows 7
- * Critical Updates
- * Security Updates
- * Update Rollups
-
- CLI Examples:
-
- .. code-block:: bash
-
- # Normal Usage
- salt '*' win_update.download_updates
-
- # Download critical updates only
- salt '*' win_update.download_updates categories="['Critical Updates']"
-
- '''
-
- log.debug('categories to search for are: %s', categories)
- quidditch = PyWinUpdater(skipDownloaded=True)
- quidditch.SetCategories(categories)
- quidditch.SetSkips(skips)
-
- # this is where we be seeking the things! yar!
- comment, passed, retries = _search(quidditch, retries)
- if not passed:
- return (comment, str(passed))
-
- # this is where we get all the things! i.e. download updates.
- comment, passed, retries = _download(quidditch, retries)
- if not passed:
- return (comment, str(passed))
-
- try:
- comment = quidditch.GetDownloadResults()
- except Exception as exc:
- comment = 'could not get results, but updates were installed. {0}'.format(exc)
- return 'Windows is up to date. \n{0}'.format(comment)
-
-
-def install_updates(skips=None, retries=5, categories=None):
- '''
- Downloads and installs all available updates, skipping those that require
- user interaction.
-
- Add ``cached`` to only install those updates which have already been downloaded.
-
- you can set the maximum number of retries to ``n`` in the search process by
- adding: ``retries=n``
-
- various aspects of the updates can be included or excluded. This function is
- still under development.
-
- retries
- Number of retries to make before giving up. This is total, not per
- step.
-
- categories
- Specify the categories to install. Must be passed as a list.
-
- .. code-block:: bash
-
- salt '*' win_update.install_updates categories="['Updates']"
-
- Categories include the following:
-
- * Updates
- * Windows 7
- * Critical Updates
- * Security Updates
- * Update Rollups
-
- CLI Examples:
-
- .. code-block:: bash
-
- # Normal Usage
- salt '*' win_update.install_updates
-
- # Install all critical updates
- salt '*' win_update.install_updates categories="['Critical Updates']"
-
- '''
-
- log.debug('categories to search for are: %s', categories)
- quidditch = PyWinUpdater()
- quidditch.SetCategories(categories)
- quidditch.SetSkips(skips)
-
- # this is where we be seeking the things! yar!
- comment, passed, retries = _search(quidditch, retries)
- if not passed:
- return (comment, str(passed))
-
- # this is where we get all the things! i.e. download updates.
- comment, passed, retries = _download(quidditch, retries)
- if not passed:
- return (comment, str(passed))
-
- # this is where we put things in their place!
- comment, passed, retries = _install(quidditch, retries)
- if not passed:
- return (comment, str(passed))
-
- try:
- comment = quidditch.GetInstallationResultsPretty()
- except Exception as exc:
- comment = 'Could not get results, but updates were installed. {0}'.format(exc)
- return 'Windows is up to date. \n{0}'.format(comment)
diff --git a/salt/modules/win_wua.py b/salt/modules/win_wua.py
index 222a35ec21..b37d7676ca 100644
--- a/salt/modules/win_wua.py
+++ b/salt/modules/win_wua.py
@@ -56,7 +56,6 @@ import logging
# Import Salt libs
import salt.utils.platform
-import salt.utils.versions
import salt.utils.win_update
from salt.exceptions import CommandExecutionError
@@ -228,87 +227,6 @@ def available(software=True,
return updates.summary() if summary else updates.list()
-def list_update(name, download=False, install=False):
- '''
- .. deprecated:: 2017.7.0
- Use :func:`get` instead
-
- Returns details for all updates that match the search criteria
-
- Args:
-
- name (str):
- The name of the update you're searching for. This can be the GUID, a
- KB number, or any part of the name of the update. GUIDs and KBs are
- preferred. Run ``list_updates`` to get the GUID for the update
- you're looking for.
-
- download (bool):
- Download the update returned by this function. Run this function
- first to see if the update exists, then set ``download=True`` to
- download the update.
-
- install (bool):
- Install the update returned by this function. Run this function
- first to see if the update exists, then set ``install=True`` to
- install the update.
-
- Returns:
-
- dict: Returns a dict containing a list of updates that match the name if
- download and install are both set to False. Should usually be a single
- update, but can return multiple if a partial name is given.
-
- If download or install is set to true it will return the results of the
- operation.
-
- .. code-block:: cfg
-
- List of Updates:
- {'': {'Title': ,
- 'KB': ,
- 'GUID':
- 'Description': ,
- 'Downloaded': ,
- 'Installed': ,
- 'Mandatory': ,
- 'UserInput': ,
- 'EULAAccepted': ,
- 'Severity': ,
- 'NeedsReboot': ,
- 'RebootBehavior': ,
- 'Categories': [ '',
- '',
- ...]
- }
- }
-
- CLI Examples:
-
- .. code-block:: bash
-
- # Recommended Usage using GUID without braces
- # Use this to find the status of a specific update
- salt '*' win_wua.list_update 12345678-abcd-1234-abcd-1234567890ab
-
- # Use the following if you don't know the GUID:
-
- # Using a KB number (could possibly return multiple results)
- # Not all updates have an associated KB
- salt '*' win_wua.list_update KB3030298
-
- # Using part or all of the name of the update
- # Could possibly return multiple results
- # Not all updates have an associated KB
- salt '*' win_wua.list_update 'Microsoft Camera Codec Pack'
- '''
- salt.utils.versions.warn_until(
- 'Fluorine',
- 'This function is replaced by \'get\' as of Salt 2017.7.0. This '
- 'warning will be removed in Salt Fluorine.')
- return get(name, download, install)
-
-
def get(name, download=False, install=False):
'''
.. versionadded:: 2017.7.0
@@ -401,142 +319,6 @@ def get(name, download=False, install=False):
return ret if ret else updates.list()
-def list_updates(software=True,
- drivers=False,
- summary=False,
- skip_installed=True,
- categories=None,
- severities=None,
- download=False,
- install=False):
- '''
- .. deprecated:: 2017.7.0
- Use :func:`list` instead
-
- Returns a detailed list of available updates or a summary. If download or
- install is True the same list will be downloaded and/or installed.
-
- Args:
-
- software (bool):
- Include software updates in the results (default is True)
-
- drivers (bool):
- Include driver updates in the results (default is False)
-
- summary (bool):
- - True: Return a summary of updates available for each category.
- - False (default): Return a detailed list of available updates.
-
- skip_installed (bool):
- Skip installed updates in the results (default is False)
-
- download (bool):
- (Overrides reporting functionality) Download the list of updates
- returned by this function. Run this function first with
- ``download=False`` to see what will be downloaded, then set
- ``download=True`` to download the updates.
-
- install (bool):
- (Overrides reporting functionality) Install the list of updates
- returned by this function. Run this function first with
- ``install=False`` to see what will be installed, then set
- ``install=True`` to install the updates.
-
- categories (list):
- Specify the categories to list. Must be passed as a list. All
- categories returned by default.
-
- Categories include the following:
-
- * Critical Updates
- * Definition Updates
- * Drivers (make sure you set drivers=True)
- * Feature Packs
- * Security Updates
- * Update Rollups
- * Updates
- * Update Rollups
- * Windows 7
- * Windows 8.1
- * Windows 8.1 drivers
- * Windows 8.1 and later drivers
- * Windows Defender
-
- severities (list):
- Specify the severities to include. Must be passed as a list. All
- severities returned by default.
-
- Severities include the following:
-
- * Critical
- * Important
-
- Returns:
-
- dict: Returns a dict containing either a summary or a list of updates:
-
- .. code-block:: cfg
-
- List of Updates:
- {'': {'Title': ,
- 'KB': ,
- 'GUID':
- 'Description': ,
- 'Downloaded': ,
- 'Installed': ,
- 'Mandatory': ,
- 'UserInput': ,
- 'EULAAccepted': ,
- 'Severity': ,
- 'NeedsReboot': ,
- 'RebootBehavior': ,
- 'Categories': [ '',
- '',
- ...]
- }
- }
-
- Summary of Updates:
- {'Total': ,
- 'Available': ,
- 'Downloaded': ,
- 'Installed': ,
- 'Categories': { : ,
- : ,
- ... }
- }
-
- CLI Examples:
-
- .. code-block:: bash
-
- # Normal Usage (list all software updates)
- salt '*' win_wua.list_updates
-
- # List all updates with categories of Critical Updates and Drivers
- salt '*' win_wua.list_updates categories=['Critical Updates','Drivers']
-
- # List all Critical Security Updates
- salt '*' win_wua.list_updates categories=['Security Updates'] severities=['Critical']
-
- # List all updates with a severity of Critical
- salt '*' win_wua.list_updates severities=['Critical']
-
- # A summary of all available updates
- salt '*' win_wua.list_updates summary=True
-
- # A summary of all Feature Packs and Windows 8.1 Updates
- salt '*' win_wua.list_updates categories=['Feature Packs','Windows 8.1'] summary=True
- '''
- salt.utils.versions.warn_until(
- 'Fluorine',
- 'This function is replaced by \'list\' as of Salt 2017.7.0. This '
- 'warning will be removed in Salt Fluorine.')
- return list(software, drivers, summary, skip_installed, categories,
- severities, download, install)
-
-
def list(software=True,
drivers=False,
summary=False,
@@ -688,74 +470,6 @@ def list(software=True,
return ret
-def download_update(name):
- '''
- .. deprecated:: 2017.7.0
- Use :func:`download` instead
-
- Downloads a single update.
-
- Args:
-
- name (str):
- The name of the update to download. This can be a GUID, a KB number,
- or any part of the name. To ensure a single item is matched the GUID
- is preferred.
-
- .. note::
- If more than one result is returned an error will be raised.
-
- Returns:
-
- dict: A dictionary containing the results of the download
-
- CLI Examples:
-
- .. code-block:: bash
-
- salt '*' win_wua.download_update 12345678-abcd-1234-abcd-1234567890ab
-
- salt '*' win_wua.download_update KB12312321
- '''
- salt.utils.versions.warn_until(
- 'Fluorine',
- 'This function is replaced by \'download\' as of Salt 2017.7.0. This '
- 'warning will be removed in Salt Fluorine.')
- return download(name)
-
-
-def download_updates(names):
- '''
- .. deprecated:: 2017.7.0
- Use :func:`download` instead
-
- Downloads updates that match the list of passed identifiers. It's easier to
- use this function by using list_updates and setting install=True.
-
- Args:
-
- names (list):
- A list of updates to download. This can be any combination of GUIDs,
- KB numbers, or names. GUIDs or KBs are preferred.
-
- Returns:
-
- dict: A dictionary containing the details about the downloaded updates
-
- CLI Examples:
-
- .. code-block:: bash
-
- # Normal Usage
- salt '*' win_wua.download_updates guid=['12345678-abcd-1234-abcd-1234567890ab', 'KB2131233']
- '''
- salt.utils.versions.warn_until(
- 'Fluorine',
- 'This function is replaced by \'download\' as of Salt 2017.7.0. This '
- 'warning will be removed in Salt Fluorine.')
- return download(names)
-
-
def download(names):
'''
.. versionadded:: 2017.7.0
@@ -808,73 +522,6 @@ def download(names):
return wua.download(updates)
-def install_update(name):
- '''
- .. deprecated:: 2017.7.0
- Use :func:`install` instead
-
- Installs a single update
-
- Args:
-
- name (str): The name of the update to install. This can be a GUID, a KB
- number, or any part of the name. To ensure a single item is matched the
- GUID is preferred.
-
- .. note::
- If no results or more than one result is returned an error will be
- raised.
-
- Returns:
-
- dict: A dictionary containing the results of the install
-
- CLI Examples:
-
- .. code-block:: bash
-
- salt '*' win_wua.install_update 12345678-abcd-1234-abcd-1234567890ab
-
- salt '*' win_wua.install_update KB12312231
- '''
- salt.utils.versions.warn_until(
- 'Fluorine',
- 'This function is replaced by \'install\' as of Salt 2017.7.0. This '
- 'warning will be removed in Salt Fluorine.')
- return install(name)
-
-
-def install_updates(names):
- '''
- .. deprecated:: 2017.7.0
- Use :func:`install` instead
-
- Installs updates that match the list of identifiers. It may be easier to use
- the list_updates function and set install=True.
-
- Args:
-
- names (list): A list of updates to install. This can be any combination
- of GUIDs, KB numbers, or names. GUIDs or KBs are preferred.
-
- Returns:
-
- dict: A dictionary containing the details about the installed updates
-
- CLI Examples:
-
- .. code-block:: bash
-
- # Normal Usage
- salt '*' win_wua.install_updates guid=['12345678-abcd-1234-abcd-1234567890ab', 'KB12323211']
- '''
- salt.utils.versions.warn_until(
- 'Fluorine',
- 'This function is replaced by \'install\' as of Salt 2017.7.0. This '
- 'warning will be removed in Salt Fluorine.')
- return install(names)
-
-
def install(names):
'''
.. versionadded:: 2017.7.0
diff --git a/salt/modules/x509.py b/salt/modules/x509.py
index 108379635e..f741cf085e 100644
--- a/salt/modules/x509.py
+++ b/salt/modules/x509.py
@@ -756,28 +756,27 @@ def write_pem(text, path, overwrite=True, pem_type=None):
"-----BEGIN CERTIFICATE-----MIIGMzCCBBugA..." \\
path=/etc/pki/mycert.crt
'''
- old_umask = os.umask(0o77)
- text = get_pem_entry(text, pem_type=pem_type)
- _dhparams = ''
- _private_key = ''
- if pem_type and pem_type == 'CERTIFICATE' and os.path.isfile(path) and \
- not overwrite:
- _filecontents = _text_or_file(path)
- try:
- _dhparams = get_pem_entry(_filecontents, 'DH PARAMETERS')
- except salt.exceptions.SaltInvocationError:
- pass
- try:
- _private_key = get_pem_entry(_filecontents, '(?:RSA )?PRIVATE KEY')
- except salt.exceptions.SaltInvocationError:
- pass
- with salt.utils.files.fopen(path, 'w') as _fp:
- if pem_type and pem_type == 'CERTIFICATE' and _private_key:
- _fp.write(salt.utils.stringutils.to_str(_private_key))
- _fp.write(text)
- if pem_type and pem_type == 'CERTIFICATE' and _dhparams:
- _fp.write(salt.utils.stringutils.to_str(_dhparams))
- os.umask(old_umask)
+ with salt.utils.files.set_umask(0o077):
+ text = get_pem_entry(text, pem_type=pem_type)
+ _dhparams = ''
+ _private_key = ''
+ if pem_type and pem_type == 'CERTIFICATE' and os.path.isfile(path) and \
+ not overwrite:
+ _filecontents = _text_or_file(path)
+ try:
+ _dhparams = get_pem_entry(_filecontents, 'DH PARAMETERS')
+ except salt.exceptions.SaltInvocationError:
+ pass
+ try:
+ _private_key = get_pem_entry(_filecontents, '(?:RSA )?PRIVATE KEY')
+ except salt.exceptions.SaltInvocationError:
+ pass
+ with salt.utils.files.fopen(path, 'w') as _fp:
+ if pem_type and pem_type == 'CERTIFICATE' and _private_key:
+ _fp.write(salt.utils.stringutils.to_str(_private_key))
+ _fp.write(text)
+ if pem_type and pem_type == 'CERTIFICATE' and _dhparams:
+ _fp.write(salt.utils.stringutils.to_str(_dhparams))
return 'PEM written to {0}'.format(path)
diff --git a/salt/modules/zypper.py b/salt/modules/zypper.py
index b84d8abcc9..f0d7aaafab 100644
--- a/salt/modules/zypper.py
+++ b/salt/modules/zypper.py
@@ -1058,7 +1058,7 @@ def install(name=None,
This parameter is ignored if ``pkgs`` or ``sources`` is passed.
resolve_capabilities
- If this option is set to True zypper will take capabilites into
+ If this option is set to True zypper will take capabilities into
account. In this case names which are just provided by a package
will get installed. Default is False.
diff --git a/salt/netapi/rest_tornado/saltnado.py b/salt/netapi/rest_tornado/saltnado.py
index 7f927e9473..a2f8bc8115 100644
--- a/salt/netapi/rest_tornado/saltnado.py
+++ b/salt/netapi/rest_tornado/saltnado.py
@@ -307,9 +307,9 @@ class EventListener(object):
'''
if request not in self.request_map:
return
- for tag, future in self.request_map[request]:
+ for tag, matcher, future in self.request_map[request]:
# timeout the future
- self._timeout_future(tag, future)
+ self._timeout_future(tag, matcher, future)
# remove the timeout
if future in self.timeout_map:
tornado.ioloop.IOLoop.current().remove_timeout(self.timeout_map[future])
@@ -317,9 +317,22 @@ class EventListener(object):
del self.request_map[request]
+ @staticmethod
+ def prefix_matcher(mtag, tag):
+ if mtag is None or tag is None:
+ raise TypeError('mtag or tag can not be None')
+ return mtag.startswith(tag)
+
+ @staticmethod
+ def exact_matcher(mtag, tag):
+ if mtag is None or tag is None:
+ raise TypeError('mtag or tag can not be None')
+ return mtag == tag
+
def get_event(self,
request,
tag='',
+ matcher=prefix_matcher.__func__,
callback=None,
timeout=None
):
@@ -339,43 +352,52 @@ class EventListener(object):
tornado.ioloop.IOLoop.current().add_callback(callback, future)
future.add_done_callback(handle_future)
# add this tag and future to the callbacks
- self.tag_map[tag].append(future)
- self.request_map[request].append((tag, future))
+ self.tag_map[(tag, matcher)].append(future)
+ self.request_map[request].append((tag, matcher, future))
if timeout:
- timeout_future = tornado.ioloop.IOLoop.current().call_later(timeout, self._timeout_future, tag, future)
+ timeout_future = tornado.ioloop.IOLoop.current().call_later(timeout, self._timeout_future, tag, matcher, future)
self.timeout_map[future] = timeout_future
return future
- def _timeout_future(self, tag, future):
+ def _timeout_future(self, tag, matcher, future):
'''
Timeout a specific future
'''
- if tag not in self.tag_map:
+ if (tag, matcher) not in self.tag_map:
return
if not future.done():
future.set_exception(TimeoutException())
- self.tag_map[tag].remove(future)
- if len(self.tag_map[tag]) == 0:
- del self.tag_map[tag]
+ self.tag_map[(tag, matcher)].remove(future)
+ if len(self.tag_map[(tag, matcher)]) == 0:
+ del self.tag_map[(tag, matcher)]
def _handle_event_socket_recv(self, raw):
'''
Callback for events on the event sub socket
'''
mtag, data = self.event.unpack(raw, self.event.serial)
+
# see if we have any futures that need this info:
- for tag_prefix, futures in six.iteritems(self.tag_map):
- if mtag.startswith(tag_prefix):
- for future in futures:
- if future.done():
- continue
- future.set_result({'data': data, 'tag': mtag})
- self.tag_map[tag_prefix].remove(future)
- if future in self.timeout_map:
- tornado.ioloop.IOLoop.current().remove_timeout(self.timeout_map[future])
- del self.timeout_map[future]
+ for (tag, matcher), futures in six.iteritems(self.tag_map):
+ try:
+ is_matched = matcher(mtag, tag)
+ except Exception as e:
+ log.error('Failed to run a matcher.', exc_info=True)
+ is_matched = False
+
+ if not is_matched:
+ continue
+
+ for future in futures:
+ if future.done():
+ continue
+ future.set_result({'data': data, 'tag': mtag})
+ self.tag_map[(tag, matcher)].remove(future)
+ if future in self.timeout_map:
+ tornado.ioloop.IOLoop.current().remove_timeout(self.timeout_map[future])
+ del self.timeout_map[future]
class BaseSaltAPIHandler(tornado.web.RequestHandler): # pylint: disable=W0223
@@ -924,64 +946,83 @@ class SaltAPIHandler(BaseSaltAPIHandler): # pylint: disable=W0223
if self.application.opts['order_masters']:
syndic_min_wait = tornado.gen.sleep(self.application.opts['syndic_wait'])
- job_not_running = self.job_not_running(pub_data['jid'],
- chunk['tgt'],
- f_call['kwargs']['tgt_type'],
- minions_remaining=minions_remaining
- )
+ # To ensure job_not_running and all_return are terminated by each other, communicate using a future
+ is_finished = Future()
+ job_not_running_future = self.job_not_running(pub_data['jid'],
+ chunk['tgt'],
+ f_call['kwargs']['tgt_type'],
+ is_finished,
+ minions_remaining=list(minions_remaining),
+ )
# if we have a min_wait, do that
if syndic_min_wait is not None:
yield syndic_min_wait
- # we are completed when either all minions return or the job isn't running anywhere
- chunk_ret = yield self.all_returns(pub_data['jid'],
- finish_futures=[job_not_running],
- minions_remaining=minions_remaining,
- )
- raise tornado.gen.Return(chunk_ret)
+ all_return_future = self.all_returns(pub_data['jid'],
+ is_finished,
+ minions_remaining=list(minions_remaining),
+ )
+ yield job_not_running_future
+ raise tornado.gen.Return((yield all_return_future))
@tornado.gen.coroutine
def all_returns(self,
jid,
- finish_futures=None,
+ is_finished,
minions_remaining=None,
):
'''
Return a future which will complete once all returns are completed
- (according to minions_remaining), or one of the passed in "finish_futures" completes
+ (according to minions_remaining), or one of the passed in "is_finished" completes
'''
- if finish_futures is None:
- finish_futures = []
if minions_remaining is None:
minions_remaining = []
- ret_tag = tagify([jid, 'ret'], 'job')
chunk_ret = {}
+
+ minion_events = {}
+ for minion in minions_remaining:
+ tag = tagify([jid, 'ret', minion], 'job')
+ minion_event = self.application.event_listener.get_event(self,
+ tag=tag,
+ matcher=EventListener.exact_matcher,
+ timeout=self.application.opts['timeout'])
+ minion_events[minion_event] = minion
+
while True:
- ret_event = self.application.event_listener.get_event(self,
- tag=ret_tag,
- )
- f = yield Any([ret_event] + finish_futures)
- if f in finish_futures:
- raise tornado.gen.Return(chunk_ret)
- event = f.result()
- chunk_ret[event['data']['id']] = event['data']['return']
- # its possible to get a return that wasn't in the minion_remaining list
+ f = yield Any(minion_events.keys() + [is_finished])
try:
- minions_remaining.remove(event['data']['id'])
+ if f is is_finished:
+ for event in minion_events:
+ if not event.done():
+ event.set_result(None)
+ raise tornado.gen.Return(chunk_ret)
+ f_result = f.result()
+ chunk_ret[f_result['data']['id']] = f_result['data']['return']
+ except TimeoutException:
+ pass
+
+ # clear finished event future
+ try:
+ minions_remaining.remove(minion_events[f])
+ del minion_events[f]
except ValueError:
pass
+
if len(minions_remaining) == 0:
+ if not is_finished.done():
+ is_finished.set_result(True)
raise tornado.gen.Return(chunk_ret)
@tornado.gen.coroutine
def job_not_running(self,
- jid,
- tgt,
- tgt_type,
- minions_remaining=None,
- ):
+ jid,
+ tgt,
+ tgt_type,
+ is_finished,
+ minions_remaining=None,
+ ):
'''
Return a future which will complete once jid (passed in) is no longer
running on tgt
@@ -998,12 +1039,21 @@ class SaltAPIHandler(BaseSaltAPIHandler): # pylint: disable=W0223
minion_running = False
while True:
try:
- event = yield self.application.event_listener.get_event(self,
- tag=ping_tag,
- timeout=self.application.opts['gather_job_timeout'],
- )
+ event = self.application.event_listener.get_event(self,
+ tag=ping_tag,
+ timeout=self.application.opts['gather_job_timeout'],
+ )
+ f = yield Any([event, is_finished])
+ # When finished entire routine, cleanup other futures and return result
+ if f is is_finished:
+ if not event.done():
+ event.set_result(None)
+ raise tornado.gen.Return(True)
+ event = f.result()
except TimeoutException:
if not minion_running:
+ if not is_finished.done():
+ is_finished.set_result(True)
raise tornado.gen.Return(True)
else:
ping_pub_data = yield self.saltclients['local'](tgt,
diff --git a/salt/payload.py b/salt/payload.py
index d58fbfd2be..de15afc22d 100644
--- a/salt/payload.py
+++ b/salt/payload.py
@@ -17,6 +17,7 @@ import salt.log
import salt.crypt
import salt.transport.frame
import salt.utils.immutabletypes as immutabletypes
+import salt.utils.stringutils
from salt.exceptions import SaltReqTimeoutError
# Import third party libs
@@ -132,15 +133,21 @@ class Serial(object):
the contents cannot be converted.
'''
try:
+ def ext_type_decoder(code, data):
+ if code == 78:
+ data = salt.utils.stringutils.to_unicode(data)
+ return datetime.datetime.strptime(data, '%Y%m%dT%H:%M:%S.%f')
+ return data
+
gc.disable() # performance optimization for msgpack
if msgpack.version >= (0, 4, 0):
# msgpack only supports 'encoding' starting in 0.4.0.
# Due to this, if we don't need it, don't pass it at all so
# that under Python 2 we can still work with older versions
# of msgpack.
- ret = msgpack.loads(msg, use_list=True, encoding=encoding)
+ ret = msgpack.loads(msg, use_list=True, ext_hook=ext_type_decoder, encoding=encoding)
else:
- ret = msgpack.loads(msg, use_list=True)
+ ret = msgpack.loads(msg, use_list=True, ext_hook=ext_type_decoder)
if six.PY3 and encoding is None and not raw:
ret = salt.transport.frame.decode_embedded_strs(ret)
except Exception as exc:
@@ -179,149 +186,62 @@ class Serial(object):
Since this changes the wire protocol, this
option should not be used outside of IPC.
'''
- # Got this trick from msgpack.fallback._pack
- while True:
- try:
- if msgpack.version >= (0, 4, 0):
- # msgpack only supports 'use_bin_type' starting in 0.4.0.
- # Due to this, if we don't need it, don't pass it at all so
- # that under Python 2 we can still work with older versions
- # of msgpack.
- return msgpack.dumps(msg, use_bin_type=use_bin_type)
- else:
- return msgpack.dumps(msg)
- except (OverflowError, msgpack.exceptions.PackValueError):
+ def ext_type_encoder(obj):
+ if isinstance(obj, six.integer_types):
# msgpack can't handle the very long Python longs for jids
# Convert any very long longs to strings
- # We borrow the technique used by TypeError below
- def verylong_encoder(obj):
- if isinstance(obj, dict):
- for key, value in six.iteritems(obj.copy()):
- obj[key] = verylong_encoder(value)
- return dict(obj)
- elif isinstance(obj, (list, tuple)):
- obj = list(obj)
- for idx, entry in enumerate(obj):
- obj[idx] = verylong_encoder(entry)
- return obj
- # This is a spurious lint failure as we are gating this check
- # behind a check for six.PY2.
- if six.PY2 and isinstance(obj, long) and long > pow(2, 64): # pylint: disable=incompatible-py3-code
- return six.text_type(obj)
- elif six.PY3 and isinstance(obj, int) and int > pow(2, 64):
- return six.text_type(obj)
- else:
- return obj
- if msgpack.version >= (0, 4, 0):
- return msgpack.dumps(verylong_encoder(msg), use_bin_type=use_bin_type)
- else:
- return msgpack.dumps(verylong_encoder(msg))
- except TypeError as exc:
- # msgpack doesn't support datetime.datetime or datetime.date datatype
- # So here we convert it to a string.
- # Note that if you want to be able to decode data, you will have to wrap
- # the object in a msgpack.ExtType-object. The typenumber is not predefined,
- # (I couldn't find a list anywhere), so keep a list somewhere!
- # Also, if you wrap objects as msgpack.ExtType, you HAVE TO provide
- # an ext_hook callable when unpacking (decoding) the msgpack.ExtType object.
- # See also: https://pypi.python.org/pypi/msgpack-python
- def datetime_encode(obj):
- '''
- Convert datetime.datetime object to formatted string
- '''
- return obj.strftime('%Y%m%dT%H:%M:%S.%f')
+ return six.text_type(obj)
+ elif isinstance(obj, (datetime.datetime, datetime.date)):
+ # msgpack doesn't support datetime.datetime and datetime.date datatypes.
+ # So here we have converted these types to custom datatype
+ # This is msgpack Extended types numbered 78
+ return msgpack.ExtType(78, salt.utils.stringutils.to_bytes(
+ obj.strftime('%Y%m%dT%H:%M:%S.%f')))
+ # The same for immutable types
+ elif isinstance(obj, immutabletypes.ImmutableDict):
+ return dict(obj)
+ elif isinstance(obj, immutabletypes.ImmutableList):
+ return list(obj)
+ elif isinstance(obj, (set, immutabletypes.ImmutableSet)):
+ # msgpack can't handle set so translate it to tuple
+ return tuple(obj)
+ # Nothing known exceptions found. Let msgpack raise it's own.
+ return obj
- def date_encode(obj):
- '''
- Convert datetime.date object to formatted string
- '''
- return obj.strftime('%Y%m%d')
-
- def recursive_encoder(obj, datatype, fn_encode):
- '''
- Recursively encodes every instance of datatype found in obj,
- which can be a dict, list or value of type datatype.
- Uses fn_encode to do the encoding.
- '''
- if datatype in [list, dict, tuple]:
- raise TypeError('Recursive_encoder called with '
- 'unsupported datatype: {}'
- .format(datatype))
- if isinstance(obj, dict):
- for key, value in six.iteritems(obj.copy()):
- encodedkey = recursive_encoder(key, datatype, fn_encode)
- if key != encodedkey:
- del obj[key]
- key = encodedkey
- obj[key] = recursive_encoder(value, datatype, fn_encode)
- return dict(obj)
- elif isinstance(obj, (list, tuple)):
- obj = list(obj)
- for idx, entry in enumerate(obj):
- obj[idx] = recursive_encoder(entry, datatype, fn_encode)
- return obj
- if isinstance(obj, datatype):
- return fn_encode(obj)
- else:
- return obj
-
- def immutable_encoder(obj):
- '''
- Convert immutable dict,list,set to regular dict,list,set
- '''
- log.debug('IMMUTABLE OBJ: %s', obj)
- if isinstance(obj, immutabletypes.ImmutableDict):
- return dict(obj)
- if isinstance(obj, immutabletypes.ImmutableList):
- return list(obj)
- if isinstance(obj, immutabletypes.ImmutableSet):
- return set(obj)
-
- fixed_message_data = None
-
- if 'datetime.datetime' in six.text_type(exc):
- fixed_message_data = recursive_encoder(msg, datetime.datetime, datetime_encode)
- elif 'datetime.date' in six.text_type(exc):
- fixed_message_data = recursive_encoder(msg, datetime.date, date_encode)
- elif 'Immutable' in six.text_type(exc):
- fixed_message_data = immutable_encoder(msg)
-
- if fixed_message_data is not None:
- msg = fixed_message_data
- # Retry packing the data, just in case it contains multiple
- # unsupported types
- continue
-
- if msgpack.version >= (0, 2, 0):
- # Should support OrderedDict serialization, so, let's
- # raise the exception
- raise
-
- # msgpack is < 0.2.0, let's make its life easier
- # Since OrderedDict is identified as a dictionary, we can't
- # make use of msgpack custom types, we will need to convert by
- # hand.
- # This means iterating through all elements of a dictionary or
- # list/tuple
- def odict_encoder(obj):
- if isinstance(obj, dict):
- for key, value in six.iteritems(obj.copy()):
- obj[key] = odict_encoder(value)
- return dict(obj)
- elif isinstance(obj, (list, tuple)):
- obj = list(obj)
- for idx, entry in enumerate(obj):
- obj[idx] = odict_encoder(entry)
- return obj
+ try:
+ if msgpack.version >= (0, 4, 0):
+ # msgpack only supports 'use_bin_type' starting in 0.4.0.
+ # Due to this, if we don't need it, don't pass it at all so
+ # that under Python 2 we can still work with older versions
+ # of msgpack.
+ return msgpack.dumps(msg, default=ext_type_encoder, use_bin_type=use_bin_type)
+ else:
+ return msgpack.dumps(msg, default=ext_type_encoder)
+ except (OverflowError, msgpack.exceptions.PackValueError):
+ # msgpack<=0.4.6 don't call ext encoder on very long integers raising the error instead.
+ # Convert any very long longs to strings and call dumps again.
+ def verylong_encoder(obj):
+ if isinstance(obj, dict):
+ for key, value in six.iteritems(obj.copy()):
+ obj[key] = verylong_encoder(value)
+ return dict(obj)
+ elif isinstance(obj, (list, tuple)):
+ obj = list(obj)
+ for idx, entry in enumerate(obj):
+ obj[idx] = verylong_encoder(entry)
return obj
- # We're on < 0.2.0, so no use_bin_type is possible
- return msgpack.dumps(odict_encoder(msg))
- except (SystemError, TypeError) as exc: # pylint: disable=W0705
- log.critical(
- 'Unable to serialize message! Consider upgrading msgpack. '
- 'Message which failed was %s, with exception %s', msg, exc
- )
- break
+ # A value of an Integer object is limited from -(2^63) upto (2^64)-1 by MessagePack
+ # spec. Here we care only of JIDs that are positive integers.
+ if isinstance(obj, six.integer_types) and obj >= pow(2, 64):
+ return six.text_type(obj)
+ else:
+ return obj
+
+ msg = verylong_encoder(msg)
+ if msgpack.version >= (0, 4, 0):
+ return msgpack.dumps(msg, default=ext_type_encoder, use_bin_type=use_bin_type)
+ else:
+ return msgpack.dumps(msg, default=ext_type_encoder)
def dump(self, msg, fn_):
'''
diff --git a/salt/pillar/__init__.py b/salt/pillar/__init__.py
index a70f7f9bdd..ceecaa0256 100644
--- a/salt/pillar/__init__.py
+++ b/salt/pillar/__init__.py
@@ -469,10 +469,7 @@ class Pillar(object):
'''
Pull the file server environments out of the master options
'''
- envs = set(['base'])
- if 'file_roots' in self.opts:
- envs.update(list(self.opts['file_roots']))
- return envs
+ return set(['base']) | set(self.opts.get('file_roots', []))
def get_tops(self):
'''
@@ -484,6 +481,7 @@ class Pillar(object):
errors = []
# Gather initial top files
try:
+ saltenvs = set()
if self.opts['pillarenv']:
# If the specified pillarenv is not present in the available
# pillar environments, do not cache the pillar top file.
@@ -494,42 +492,24 @@ class Pillar(object):
self.opts['pillarenv'], ', '.join(self.opts['file_roots'])
)
else:
- top = self.client.cache_file(self.opts['state_top'], self.opts['pillarenv'])
- if top:
- tops[self.opts['pillarenv']] = [
- compile_template(
- top,
- self.rend,
- self.opts['renderer'],
- self.opts['renderer_blacklist'],
- self.opts['renderer_whitelist'],
- self.opts['pillarenv'],
- _pillar_rend=True,
- )
- ]
+ saltenvs.add(self.opts['pillarenv'])
else:
- for saltenv in self._get_envs():
- if self.opts.get('pillar_source_merging_strategy', None) == "none":
- if self.saltenv and saltenv != self.saltenv:
- continue
- if not self.saltenv and not saltenv == 'base':
- continue
- top = self.client.cache_file(
- self.opts['state_top'],
- saltenv
- )
- if top:
- tops[saltenv].append(
- compile_template(
- top,
- self.rend,
- self.opts['renderer'],
- self.opts['renderer_blacklist'],
- self.opts['renderer_whitelist'],
- saltenv=saltenv,
- _pillar_rend=True,
- )
- )
+ saltenvs = self._get_envs()
+ if self.opts.get('pillar_source_merging_strategy', None) == "none":
+ saltenvs &= set([self.saltenv or 'base'])
+
+ for saltenv in saltenvs:
+ top = self.client.cache_file(self.opts['state_top'], saltenv)
+ if top:
+ tops[saltenv].append(compile_template(
+ top,
+ self.rend,
+ self.opts['renderer'],
+ self.opts['renderer_blacklist'],
+ self.opts['renderer_whitelist'],
+ saltenv=saltenv,
+ _pillar_rend=True,
+ ))
except Exception as exc:
errors.append(
('Rendering Primary Top file failed, render error:\n{0}'
@@ -912,7 +892,8 @@ class Pillar(object):
self.opts,
self.ext['git'],
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
- per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY)
+ per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY,
+ global_only=salt.pillar.git_pillar.GLOBAL_ONLY)
git_pillar.fetch_remotes()
except TypeError:
# Handle malformed ext_pillar
diff --git a/salt/pillar/git_pillar.py b/salt/pillar/git_pillar.py
index a011fb88a1..1e2cc15a95 100644
--- a/salt/pillar/git_pillar.py
+++ b/salt/pillar/git_pillar.py
@@ -347,6 +347,7 @@ from salt.ext import six
PER_REMOTE_OVERRIDES = ('env', 'root', 'ssl_verify', 'refspecs')
PER_REMOTE_ONLY = ('name', 'mountpoint')
+GLOBAL_ONLY = ('base', 'branch')
# Set up logging
log = logging.getLogger(__name__)
@@ -385,7 +386,8 @@ def ext_pillar(minion_id, pillar, *repos): # pylint: disable=unused-argument
opts,
repos,
per_remote_overrides=PER_REMOTE_OVERRIDES,
- per_remote_only=PER_REMOTE_ONLY)
+ per_remote_only=PER_REMOTE_ONLY,
+ global_only=GLOBAL_ONLY)
if __opts__.get('__role') == 'minion':
# If masterless, fetch the remotes. We'll need to remove this once
# we make the minion daemon able to run standalone.
diff --git a/salt/pillar/http_json.py b/salt/pillar/http_json.py
index e24b80a2f4..5f0fd9a7fa 100644
--- a/salt/pillar/http_json.py
+++ b/salt/pillar/http_json.py
@@ -29,7 +29,7 @@ in <> brackets) in the url in order to populate pillar data based on the grain v
.. versionchanged:: 2018.3.0
- If %s is present in the url, it will be automaticaly replaced by the minion_id:
+ If %s is present in the url, it will be automatically replaced by the minion_id:
.. code-block:: yaml
diff --git a/salt/pillar/http_yaml.py b/salt/pillar/http_yaml.py
index e3523ad044..7ba64aef65 100644
--- a/salt/pillar/http_yaml.py
+++ b/salt/pillar/http_yaml.py
@@ -29,7 +29,7 @@ in <> brackets) in the url in order to populate pillar data based on the grain v
.. versionchanged:: 2018.3.0
- If %s is present in the url, it will be automaticaly replaced by the minion_id:
+ If %s is present in the url, it will be automatically replaced by the minion_id:
.. code-block:: yaml
diff --git a/salt/pillar/netbox.py b/salt/pillar/netbox.py
new file mode 100644
index 0000000000..5bd7ce79a4
--- /dev/null
+++ b/salt/pillar/netbox.py
@@ -0,0 +1,129 @@
+# -*- coding: utf-8 -*-
+'''
+A module that adds data to the Pillar structure from a NetBox API.
+
+
+Configuring the NetBox ext_pillar
+====================================
+
+.. code-block:: yaml
+
+ ext_pillar:
+ - netbox:
+ api_url: http://netbox_url.com/api/
+
+The following are optional, and determine whether or not the module will
+attempt to configure the ``proxy`` pillar data for use with the napalm
+proxy-minion:
+
+.. code-block:: yaml
+
+ proxy_return: True
+ proxy_username: admin
+ api_token: 123abc
+
+Create a token in your NetBox instance at
+http://netbox_url.com/user/api-tokens/
+
+By default, this module will query the NetBox API for the platform associated
+with the device, and use the 'NAPALM driver' field to set the napalm
+proxy-minion driver. (Currently only 'napalm' is supported for drivertype.)
+
+This module assumes you will use SSH keys to authenticate to the network device
+If password authentication is desired, it is recommended to create another
+``proxy`` key in pillar_roots (or git_pillar) with just the ``passwd`` key and
+use :py:func:`salt.renderers.gpg ` to encrypt the value.
+If any additional options for the proxy setup are needed they should also be
+configured in pillar_roots.
+'''
+
+
+from __future__ import absolute_import, print_function, unicode_literals
+import logging
+
+try:
+ import requests
+ import ipaddress
+ _HAS_DEPENDENCIES = True
+except ImportError:
+ _HAS_DEPENDENCIES = False
+
+log = logging.getLogger(__name__)
+
+
+def __virtual__():
+ return _HAS_DEPENDENCIES
+
+
+def ext_pillar(minion_id, pillar, *args, **kwargs):
+ '''
+ Query NetBox API for minion data
+ '''
+
+ # Pull settings from kwargs
+ api_url = kwargs['api_url'].rstrip('/')
+
+ api_token = kwargs.get('api_token', None)
+ proxy_username = kwargs.get('proxy_username', None)
+ proxy_return = kwargs.get('proxy_return', True)
+
+ ret = {}
+
+ headers = {}
+ if api_token:
+ headers['Authorization'] = 'Token ' + api_token
+
+ # Fetch device from API
+ device_results = requests.get(
+ api_url + '/dcim/devices/',
+ params={'name': minion_id, },
+ headers=headers,
+ )
+
+ # Check status code for API call
+ if device_results.status_code != requests.codes.ok:
+ log.warn('API query failed for "%s", status code: %d',
+ minion_id, device_results.status_code)
+
+ # Assign results from API call to "netbox" key
+ try:
+ devices = device_results.json()['results']
+ if len(devices) == 1:
+ ret['netbox'] = devices[0]
+ elif len(devices) > 1:
+ log.error('More than one device found for "%s"', minion_id)
+ except Exception:
+ log.error('Device not found for "%s"', minion_id)
+ if proxy_return:
+ # Attempt to add "proxy" key, based on platform API call
+ try:
+ # Fetch device from API
+ platform_results = requests.get(
+ ret['netbox']['platform']['url'],
+ headers=headers,
+ )
+
+ # Check status code for API call
+ if platform_results.status_code != requests.codes.ok:
+ log.info('API query failed for "%s", status code: %d',
+ minion_id, platform_results.status_code)
+
+ # Assign results from API call to "proxy" key if the platform has a
+ # napalm_driver defined.
+ napalm_driver = platform_results.json().get('napalm_driver')
+ if napalm_driver:
+ ret['proxy'] = {
+ 'host': str(ipaddress.IPv4Interface(
+ ret['netbox']['primary_ip4']['address']).ip),
+ 'driver': napalm_driver,
+ 'proxytype': 'napalm',
+ }
+
+ if proxy_username:
+ ret['proxy']['username'] = proxy_username
+
+ except Exception:
+ log.debug(
+ 'Could not create proxy config data for "%s"', minion_id)
+
+ return ret
diff --git a/salt/pillar/pillar_ldap.py b/salt/pillar/pillar_ldap.py
index 696f66d51c..5fb3a60d31 100644
--- a/salt/pillar/pillar_ldap.py
+++ b/salt/pillar/pillar_ldap.py
@@ -58,8 +58,9 @@ The ``it-admins`` configuration below returns the Pillar ``it-admins`` by:
- filtering for:
- members of the group ``it-admins``
- objects with ``objectclass=user``
-- returning the data of users (``mode: map``), where each user is a dictionary
- containing the configured string or list attributes.
+- returning the data of users (``mode: map``) as a list of dictionaries, where
+ each user is a dictionary containing the configured string or list attributes,
+ and the user dictionaries are combined to a list.
**Configuration:**
@@ -106,6 +107,118 @@ The ``it-admins`` configuration below returns the Pillar ``it-admins`` by:
- cn=team02,ou=groups,dc=company
+Dict Mode
+---------
+
+The ``it-admins`` configuration below returns the Pillar ``it-admins`` by:
+
+- filtering for:
+ - members of the group ``it-admins``
+ - objects with ``objectclass=user``
+- returning the data of users (``mode: dict``), where each user is a dictionary
+ containing the configured string or list attributes, and the user dictionaries
+ are combined to a dictionary using the value of the LDAP attribute defined in the
+ ``dict_key_attr`` configuration option (defaults to ``dn`` or ``distinguishedName``)
+ as the key.
+
+
+ **Configuration:**
+
+.. code-block:: yaml
+
+ salt-users:
+ server: ldap.company.tld
+ port: 389
+ tls: true
+ dn: 'dc=company,dc=tld'
+ binddn: 'cn=salt-pillars,ou=users,dc=company,dc=tld'
+ bindpw: bi7ieBai5Ano
+ referrals: false
+ anonymous: false
+ mode: dict
+ dn: 'ou=users,dc=company,dc=tld'
+ filter: '(&(memberof=cn=it-admins,ou=groups,dc=company,dc=tld)(objectclass=user))'
+ attrs:
+ - cn
+ - displayName
+ - givenName
+ - sn
+ lists:
+ - memberOf
+
+
+ **Result:**
+
+.. code-block:: yaml
+
+ salt-users:
+ cn=johndoe,ou=users,dc=company,dc=tld:
+ - cn: cn=johndoe,ou=users,dc=company,dc=tld
+ displayName: John Doe
+ givenName: John
+ sn: Doe
+ memberOf:
+ - cn=it-admins,ou=groups,dc=company,dc=tld
+ - cn=team01,ou=groups,dc=company
+ cn=janedoe,ou=users,dc=company,dc=tld:
+ - cn: cn=janedoe,ou=users,dc=company,dc=tld
+ displayName: Jane Doe
+ givenName: Jane
+ sn: Doe
+ memberOf:
+ - cn=it-admins,ou=groups,dc=company,dc=tld
+ - cn=team02,ou=groups,dc=company
+
+
+ **Configuration:**
+
+.. code-block:: yaml
+
+ salt-users:
+ server: ldap.company.tld
+ port: 389
+ tls: true
+ dn: 'dc=company,dc=tld'
+ binddn: 'cn=salt-pillars,ou=users,dc=company,dc=tld'
+ bindpw: bi7ieBai5Ano
+ referrals: false
+ anonymous: false
+ mode: dict
+ dict_key_attr: displayName
+ dn: 'ou=users,dc=company,dc=tld'
+ filter: '(&(memberof=cn=it-admins,ou=groups,dc=company,dc=tld)(objectclass=user))'
+ attrs:
+ - dn
+ - cn
+ - givenName
+ - sn
+ lists:
+ - memberOf
+
+
+ **Result:**
+
+.. code-block:: yaml
+
+ salt-users:
+ John Doe:
+ - dn: cn=johndoe,ou=users,dc=company,dc=tld
+ cn: cn=johndoe,ou=users,dc=company,dc=tld
+ givenName: John
+ sn: Doe
+ memberOf:
+ - cn=it-admins,ou=groups,dc=company,dc=tld
+ - cn=team01,ou=groups,dc=company
+ Jane Doe:
+ - dn: cn=janedoe,ou=users,dc=company,dc=tld
+ cn: cn=janedoe,ou=users,dc=company,dc=tld
+ givenName: Jane
+ sn: Doe
+ memberOf:
+ - cn=it-admins,ou=groups,dc=company,dc=tld
+ - cn=team02,ou=groups,dc=company
+
+
List Mode
---------
@@ -193,6 +306,7 @@ def _result_to_dict(data, result, conf, source):
'''
attrs = _config('attrs', conf) or []
lists = _config('lists', conf) or []
+ dict_key_attr = _config('dict_key_attr', conf) or 'dn'
# TODO:
# deprecate the default 'mode: split' and make the more
# straightforward 'mode: dict' the new default
@@ -213,6 +327,30 @@ def _result_to_dict(data, result, conf, source):
if key in lists:
ret[key] = record.get(key)
data[source].append(ret)
+ elif mode == 'dict':
+ data[source] = {}
+ for record in result:
+ ret = {}
+ distinguished_name = record[0]
+ log.debug('dn: %s', distinguished_name)
+ if 'dn' in attrs or 'distinguishedName' in attrs:
+ ret['dn'] = distinguished_name
+ record = record[1]
+ log.debug('record: %s', record)
+ for key in record:
+ if key in attrs:
+ for item in record.get(key):
+ ret[key] = item
+ if key in lists:
+ ret[key] = record.get(key)
+ if dict_key_attr in ['dn', 'distinguishedName']:
+ dict_key = distinguished_name
+ else:
+ dict_key = ','.join(sorted(record.get(dict_key_attr, [])))
+ try:
+ data[source][dict_key].append(ret)
+ except KeyError:
+ data[source][dict_key] = [ret]
elif mode == 'split':
for key in result[0][1]:
if key in attrs:
@@ -250,7 +388,8 @@ def _do_search(conf):
scope = _config('scope', conf)
_lists = _config('lists', conf) or []
_attrs = _config('attrs', conf) or []
- attrs = _lists + _attrs
+ _dict_key_attr = _config('dict_key_attr', conf) or 'dn'
+ attrs = _lists + _attrs + [_dict_key_attr]
if not attrs:
attrs = None
# Perform the search
diff --git a/salt/pillar/stack.py b/salt/pillar/stack.py
index 554942981d..1363f311bd 100644
--- a/salt/pillar/stack.py
+++ b/salt/pillar/stack.py
@@ -413,7 +413,7 @@ def ext_pillar(minion_id, pillar, *args, **kwargs):
stack_config_files += cfgs
for cfg in stack_config_files:
if not os.path.isfile(cfg):
- log.warning(
+ log.info(
'Ignoring pillar stack cfg "%s": file does not exist', cfg)
continue
stack = _process_stack_cfg(cfg, stack, minion_id, pillar)
@@ -424,10 +424,6 @@ def _to_unix_slashes(path):
return posixpath.join(*path.split(os.sep))
-def _construct_unicode(loader, node):
- return node.value
-
-
def _process_stack_cfg(cfg, stack, minion_id, pillar):
log.debug('Config: %s', cfg)
basedir, filename = os.path.split(cfg)
@@ -437,7 +433,8 @@ def _process_stack_cfg(cfg, stack, minion_id, pillar):
"__salt__": __salt__,
"__grains__": __grains__,
"__stack__": {
- 'traverse': salt.utils.data.traverse_dict_and_list
+ 'traverse': salt.utils.data.traverse_dict_and_list,
+ 'cfg_path': cfg,
},
"minion_id": minion_id,
"pillar": pillar,
@@ -448,7 +445,7 @@ def _process_stack_cfg(cfg, stack, minion_id, pillar):
continue # silently ignore whitespace or empty lines
paths = glob.glob(os.path.join(basedir, item))
if not paths:
- log.warning(
+ log.info(
'Ignoring pillar stack template "%s": can\'t find from root '
'dir "%s"', item, basedir
)
@@ -457,7 +454,7 @@ def _process_stack_cfg(cfg, stack, minion_id, pillar):
log.debug('YAML: basedir=%s, path=%s', basedir, path)
# FileSystemLoader always expects unix-style paths
unix_path = _to_unix_slashes(os.path.relpath(path, basedir))
- obj = salt.utils.yaml.safe_load(jenv.get_template(unix_path).render(stack=stack))
+ obj = salt.utils.yaml.safe_load(jenv.get_template(unix_path).render(stack=stack, ymlpath=path))
if not isinstance(obj, dict):
log.info('Ignoring pillar stack template "%s": Can\'t parse '
'as a valid yaml dictionary', path)
diff --git a/salt/pillar/vault.py b/salt/pillar/vault.py
index 80b830fbdf..d4fc339f43 100644
--- a/salt/pillar/vault.py
+++ b/salt/pillar/vault.py
@@ -52,9 +52,6 @@ Multiple Vault sources may also be used:
from __future__ import absolute_import, print_function, unicode_literals
import logging
-# Import Salt libs
-import salt.utils.versions
-
log = logging.getLogger(__name__)
__func_alias__ = {
@@ -77,12 +74,6 @@ def ext_pillar(minion_id, # pylint: disable=W0613
'''
comps = conf.split()
- if not comps[0].startswith('path='):
- salt.utils.versions.warn_until(
- 'Fluorine',
- 'The \'profile\' argument has been deprecated. Any parts up until '
- 'and following the first "path=" are discarded'
- )
paths = [comp for comp in comps if comp.startswith('path=')]
if not paths:
log.error('"%s" is not a valid Vault ext_pillar config', conf)
diff --git a/salt/renderers/aws_kms.py b/salt/renderers/aws_kms.py
new file mode 100644
index 0000000000..ed5f2dae37
--- /dev/null
+++ b/salt/renderers/aws_kms.py
@@ -0,0 +1,259 @@
+# -*- coding: utf-8 -*-
+r'''
+Renderer that will decrypt ciphers encrypted using `AWS KMS Envelope Encryption`_.
+
+.. _`AWS KMS Envelope Encryption`: https://docs.aws.amazon.com/kms/latest/developerguide/workflow.html
+
+Any key in the data to be rendered can be a urlsafe_b64encoded string, and this renderer will attempt
+to decrypt it before passing it off to Salt. This allows you to safely store secrets in
+source control, in such a way that only your Salt master can decrypt them and
+distribute them only to the minions that need them.
+
+The typical use-case would be to use ciphers in your pillar data, and keep the encrypted
+data key on your master. This way developers with appropriate AWS IAM privileges can add new secrets
+quickly and easily.
+
+This renderer requires the boto3_ Python library.
+
+.. _boto3: https://boto3.readthedocs.io/
+
+Setup
+-----
+
+First, set up your AWS client. For complete instructions on configuration the AWS client,
+please read the `boto3 configuration documentation`_. By default, this renderer will use
+the default AWS profile. You can override the profile name in salt configuration.
+For example, if you have a profile in your aws client configuration named "salt",
+you can add the following salt configuration:
+
+.. code-block:: yaml
+
+ aws_kms:
+ profile_name: salt
+
+.. _boto3 configuration documentation: https://boto3.readthedocs.io/en/latest/guide/configuration.html
+
+The rest of these instructions assume that you will use the default profile for key generation
+and setup. If not, export AWS_PROFILE and set it to the desired value.
+
+Once the aws client is configured, generate a KMS customer master key and use that to generate
+a local data key.
+
+.. code-block:: bash
+
+ # data_key=$(aws kms generate-data-key --key-id your-key-id --key-spec AES_256
+ --query 'CiphertextBlob' --output text)
+ # echo 'aws_kms:'
+ # echo ' data_key: !!binary "%s"\n' "$data_key" >> config/master
+
+To apply the renderer on a file-by-file basis add the following line to the
+top of any pillar with gpg data in it:
+
+.. code-block:: yaml
+
+ #!yaml|aws_kms
+
+Now with your renderer configured, you can include your ciphers in your pillar
+data like so:
+
+.. code-block:: yaml
+
+ #!yaml|aws_kms
+
+ a-secret: gAAAAABaj5uzShPI3PEz6nL5Vhk2eEHxGXSZj8g71B84CZsVjAAtDFY1mfjNRl-1Su9YVvkUzNjI4lHCJJfXqdcTvwczBYtKy0Pa7Ri02s10Wn1tF0tbRwk=
+'''
+
+# Import python libs
+from __future__ import absolute_import, print_function, unicode_literals
+import logging
+import base64
+
+# Import salt libs
+import salt.utils.stringio
+
+# Import 3rd-party libs
+from salt.ext import six
+
+try:
+ import botocore.exceptions
+ import boto3
+ logging.getLogger('boto3').setLevel(logging.CRITICAL)
+except ImportError:
+ pass
+
+try:
+ import cryptography.fernet as fernet
+ HAS_FERNET = True
+except ImportError:
+ HAS_FERNET = False
+
+
+def __virtual__():
+ '''
+ Only load if boto libraries exist and if boto libraries are greater than
+ a given version.
+ '''
+ return HAS_FERNET and salt.utils.versions.check_boto_reqs()
+
+log = logging.getLogger(__name__)
+
+
+def _cfg(key, default=None):
+ '''
+ Return the requested value from the aws_kms key in salt configuration.
+
+ If it's not set, return the default.
+ '''
+ root_cfg = __salt__.get('config.get', __opts__.get)
+ kms_cfg = root_cfg('aws_kms', {})
+ return kms_cfg.get(key, default)
+
+
+def _cfg_data_key():
+ '''
+ Return the encrypted KMS data key from configuration.
+
+ Raises SaltConfigurationError if not set.
+ '''
+ data_key = _cfg('data_key', '')
+ if data_key:
+ return data_key
+ raise salt.exceptions.SaltConfigurationError('aws_kms:data_key is not set')
+
+
+def _session():
+ '''
+ Return the boto3 session to use for the KMS client.
+
+ If aws_kms:profile_name is set in the salt configuration, use that profile.
+ Otherwise, fall back on the default aws profile.
+
+ We use the boto3 profile system to avoid having to duplicate
+ individual boto3 configuration settings in salt configuration.
+ '''
+ profile_name = _cfg('profile_name')
+ if profile_name:
+ log.info('Using the "%s" aws profile.', profile_name)
+ else:
+ log.info('aws_kms:profile_name is not set in salt. Falling back on default profile.')
+ try:
+ return boto3.Session(profile_name=profile_name)
+ except botocore.exceptions.ProfileNotFound as orig_exc:
+ err_msg = 'Boto3 could not find the "{}" profile configured in Salt.'.format(
+ profile_name or 'default')
+ config_error = salt.exceptions.SaltConfigurationError(err_msg)
+ six.raise_from(config_error, orig_exc)
+ except botocore.exceptions.NoRegionError as orig_exc:
+ err_msg = ('Boto3 was unable to determine the AWS '
+ 'endpoint region using the {} profile.').format(profile_name or 'default')
+ config_error = salt.exceptions.SaltConfigurationError(err_msg)
+ six.raise_from(config_error, orig_exc)
+
+
+def _kms():
+ '''
+ Return the boto3 client for the KMS API.
+ '''
+ session = _session()
+ return session.client('kms')
+
+
+def _api_decrypt():
+ '''
+ Return the response dictionary from the KMS decrypt API call.
+ '''
+ kms = _kms()
+ data_key = _cfg_data_key()
+ try:
+ return kms.decrypt(CiphertextBlob=data_key)
+ except botocore.exceptions.ClientError as orig_exc:
+ error_code = orig_exc.response.get('Error', {}).get('Code', '')
+ if error_code != 'InvalidCiphertextException':
+ raise
+ err_msg = 'aws_kms:data_key is not a valid KMS data key'
+ config_error = salt.exceptions.SaltConfigurationError(err_msg)
+ six.raise_from(config_error, orig_exc)
+
+
+def _plaintext_data_key():
+ '''
+ Return the configured KMS data key decrypted and encoded in urlsafe base64.
+
+ Cache the result to minimize API calls to AWS.
+ '''
+ response = getattr(_plaintext_data_key, 'response', None)
+ cache_hit = response is not None
+ if not cache_hit:
+ response = _api_decrypt()
+ setattr(_plaintext_data_key, 'response', response)
+ key_id = response['KeyId']
+ plaintext = response['Plaintext']
+ if hasattr(plaintext, 'encode'):
+ plaintext = plaintext.encode(__salt_system_encoding__)
+ log.debug('Using key %s from %s', key_id, 'cache' if cache_hit else 'api call')
+ return plaintext
+
+
+def _base64_plaintext_data_key():
+ '''
+ Return the configured KMS data key decrypted and encoded in urlsafe base64.
+ '''
+ plaintext_data_key = _plaintext_data_key()
+ return base64.urlsafe_b64encode(plaintext_data_key)
+
+
+def _decrypt_ciphertext(cipher, translate_newlines=False):
+ '''
+ Given a blob of ciphertext as a bytestring, try to decrypt
+ the cipher and return the decrypted string. If the cipher cannot be
+ decrypted, log the error, and return the ciphertext back out.
+ '''
+ if translate_newlines:
+ cipher = cipher.replace(r'\n', '\n')
+ if hasattr(cipher, 'encode'):
+ cipher = cipher.encode(__salt_system_encoding__)
+
+ # Decryption
+ data_key = _base64_plaintext_data_key()
+ plain_text = fernet.Fernet(data_key).decrypt(cipher)
+ if hasattr(plain_text, 'decode'):
+ plain_text = plain_text.decode(__salt_system_encoding__)
+ return six.text_type(plain_text)
+
+
+def _decrypt_object(obj, translate_newlines=False):
+ '''
+ Recursively try to decrypt any object.
+ Recur on objects that are not strings.
+ Decrypt strings that are valid Fernet tokens.
+ Return the rest unchanged.
+ '''
+ if salt.utils.stringio.is_readable(obj):
+ return _decrypt_object(obj.getvalue(), translate_newlines)
+ if isinstance(obj, six.string_types):
+ try:
+ return _decrypt_ciphertext(obj,
+ translate_newlines=translate_newlines)
+ except (fernet.InvalidToken, TypeError):
+ return obj
+
+ elif isinstance(obj, dict):
+ for key, value in six.iteritems(obj):
+ obj[key] = _decrypt_object(value,
+ translate_newlines=translate_newlines)
+ return obj
+ elif isinstance(obj, list):
+ for key, value in enumerate(obj):
+ obj[key] = _decrypt_object(value,
+ translate_newlines=translate_newlines)
+ return obj
+ else:
+ return obj
+
+
+def render(data, saltenv='base', sls='', argline='', **kwargs): # pylint: disable=unused-argument
+ '''
+ Decrypt the data to be rendered that was encrypted using AWS KMS envelope encryption.
+ '''
+ translate_newlines = kwargs.get('translate_newlines', False)
+ return _decrypt_object(data, translate_newlines=translate_newlines)
diff --git a/salt/roster/cache.py b/salt/roster/cache.py
index 6b5c94776b..5dfd160891 100644
--- a/salt/roster/cache.py
+++ b/salt/roster/cache.py
@@ -93,17 +93,16 @@ This should be especially useful for the other roster keys:
- ssh:auth:private_key
'''
-from __future__ import absolute_import, print_function, unicode_literals
-# Python
+# Import Python libs
+from __future__ import absolute_import, print_function, unicode_literals
import logging
import re
import copy
-# Salt libs
+# Import Salt libs
import salt.utils.data
import salt.utils.minions
-import salt.utils.versions
import salt.cache
from salt._compat import ipaddress
from salt.ext import six
@@ -132,25 +131,6 @@ def targets(tgt, tgt_type='glob', **kwargs): # pylint: disable=W0613
roster_order = __opts__.get('roster_order', {
'host': ('ipv6-private', 'ipv6-global', 'ipv4-private', 'ipv4-public')
})
- if isinstance(roster_order, (tuple, list)):
- salt.utils.versions.warn_until(
- 'Fluorine',
- 'Using legacy syntax for roster_order'
- )
- roster_order = {
- 'host': roster_order
- }
- for config_key, order in roster_order.items():
- for idx, key in enumerate(order):
- if key in ('public', 'private', 'local'):
- salt.utils.versions.warn_until(
- 'Fluorine',
- 'roster_order {0} will include IPv6 soon. '
- 'Set order to ipv4-{0} if needed.'.format(key)
- )
- order[idx] = 'ipv4-' + key
-
- # log.debug(roster_order)
ret = {}
for minion_id in minions:
diff --git a/salt/runners/cache.py b/salt/runners/cache.py
index 0e4da67431..9f616c697f 100644
--- a/salt/runners/cache.py
+++ b/salt/runners/cache.py
@@ -355,7 +355,8 @@ def clear_git_lock(role, remote=None, **kwargs):
__opts__,
ext_pillar['git'],
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
- per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY)
+ per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY,
+ global_only=salt.pillar.git_pillar.GLOBAL_ONLY)
git_objects.append(obj)
elif role == 'winrepo':
winrepo_dir = __opts__['winrepo_dir']
@@ -371,6 +372,7 @@ def clear_git_lock(role, remote=None, **kwargs):
remotes,
per_remote_overrides=salt.runners.winrepo.PER_REMOTE_OVERRIDES,
per_remote_only=salt.runners.winrepo.PER_REMOTE_ONLY,
+ global_only=salt.runners.winrepo.GLOBAL_ONLY,
cache_root=base_dir)
git_objects.append(obj)
else:
diff --git a/salt/runners/fileserver.py b/salt/runners/fileserver.py
index e4abc256de..27391ba0c2 100644
--- a/salt/runners/fileserver.py
+++ b/salt/runners/fileserver.py
@@ -164,6 +164,20 @@ def file_list(saltenv='base', backend=None):
.. versionadded:: 2015.5.0
+ .. note:
+ Keep in mind that executing this function spawns a new process,
+ separate from the master. This means that if the fileserver
+ configuration has been changed in some way since the master has been
+ restarted (e.g. if :conf_master:`fileserver_backend`,
+ :conf_master:`gitfs_remotes`, :conf_master:`hgfs_remotes`, etc. have
+ been updated), then the results of this runner will not accurately
+ reflect what files are available to minions.
+
+ When in doubt, use :py:func:`cp.list_master
+ ` to see what files the minion can see,
+ and always remember to restart the salt-master daemon when updating
+ the fileserver configuration.
+
CLI Examples:
.. code-block:: bash
@@ -196,6 +210,20 @@ def symlink_list(saltenv='base', backend=None):
.. versionadded:: 2015.5.0
+ .. note:
+ Keep in mind that executing this function spawns a new process,
+ separate from the master. This means that if the fileserver
+ configuration has been changed in some way since the master has been
+ restarted (e.g. if :conf_master:`fileserver_backend`,
+ :conf_master:`gitfs_remotes`, :conf_master:`hgfs_remotes`, etc. have
+ been updated), then the results of this runner will not accurately
+ reflect what symlinks are available to minions.
+
+ When in doubt, use :py:func:`cp.list_master_symlinks
+ ` to see what symlinks the minion
+ can see, and always remember to restart the salt-master daemon when
+ updating the fileserver configuration.
+
CLI Example:
.. code-block:: bash
@@ -228,6 +256,20 @@ def dir_list(saltenv='base', backend=None):
.. versionadded:: 2015.5.0
+ .. note:
+ Keep in mind that executing this function spawns a new process,
+ separate from the master. This means that if the fileserver
+ configuration has been changed in some way since the master has been
+ restarted (e.g. if :conf_master:`fileserver_backend`,
+ :conf_master:`gitfs_remotes`, :conf_master:`hgfs_remotes`, etc. have
+ been updated), then the results of this runner will not accurately
+ reflect what dirs are available to minions.
+
+ When in doubt, use :py:func:`cp.list_master_dirs
+ ` to see what dirs the minion can see,
+ and always remember to restart the salt-master daemon when updating
+ the fileserver configuration.
+
CLI Example:
.. code-block:: bash
diff --git a/salt/runners/git_pillar.py b/salt/runners/git_pillar.py
index b567891bac..436740b10c 100644
--- a/salt/runners/git_pillar.py
+++ b/salt/runners/git_pillar.py
@@ -70,7 +70,8 @@ def update(branch=None, repo=None):
__opts__,
pillar_conf,
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
- per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY)
+ per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY,
+ global_only=salt.pillar.git_pillar.GLOBAL_ONLY)
for remote in pillar.remotes:
# Skip this remote if it doesn't match the search criteria
if branch is not None:
diff --git a/salt/runners/http.py b/salt/runners/http.py
index 3cae7daca1..c0f2998096 100644
--- a/salt/runners/http.py
+++ b/salt/runners/http.py
@@ -19,7 +19,8 @@ def query(url, output=True, **kwargs):
'''
Query a resource, and decode the return data
- .. versionadded:: 2015.5.0
+ Passes through all the parameters described in the
+ :py:func:`utils.http.query function `:
CLI Example:
diff --git a/salt/runners/nacl.py b/salt/runners/nacl.py
index 53af19ec20..5727356258 100644
--- a/salt/runners/nacl.py
+++ b/salt/runners/nacl.py
@@ -8,9 +8,6 @@ This is often useful if you wish to store your pillars in source control or
share your pillar data with others that you trust. I don't advise making your pillars public
regardless if they are encrypted or not.
-When generating keys and encrypting passwords use --local when using salt-call for extra
-security. Also consider using just the salt runner nacl when encrypting pillar passwords.
-
:configuration: The following configuration defaults can be
define (pillar or config files) Avoid storing private keys in pillars! Ensure master does not have `pillar_opts=True`:
@@ -30,7 +27,7 @@ security. Also consider using just the salt runner nacl when encrypting pillar p
.. code-block:: bash
- salt-call nacl.enc sk_file=/etc/salt/pki/master/nacl pk_file=/etc/salt/pki/master/nacl.pub
+ salt-run nacl.enc sk_file=/etc/salt/pki/master/nacl pk_file=/etc/salt/pki/master/nacl.pub
The nacl lib uses 32byte keys, these keys are base64 encoded to make your life more simple.
@@ -38,9 +35,9 @@ To generate your `sk_file` and `pk_file` use:
.. code-block:: bash
- salt-call --local nacl.keygen sk_file=/etc/salt/pki/master/nacl
+ salt-run nacl.keygen sk_file=/etc/salt/pki/master/nacl
# or if you want to work without files.
- salt-call --local nacl.keygen
+ salt-run nacl.keygen
local:
----------
pk:
@@ -59,14 +56,14 @@ Sealedbox only has one key that is for both encryption and decryption.
.. code-block:: bash
- salt-call --local nacl.enc asecretpass pk=/kfGX7PbWeu099702PBbKWLpG/9p06IQRswkdWHCDk0=
+ salt-run nacl.enc asecretpass pk=/kfGX7PbWeu099702PBbKWLpG/9p06IQRswkdWHCDk0=
tqXzeIJnTAM9Xf0mdLcpEdklMbfBGPj2oTKmlgrm3S1DTVVHNnh9h8mU1GKllGq/+cYsk6m5WhGdk58=
To decrypt the data:
.. code-block:: bash
- salt-call --local nacl.dec data='tqXzeIJnTAM9Xf0mdLcpEdklMbfBGPj2oTKmlgrm3S1DTVVHNnh9h8mU1GKllGq/+cYsk6m5WhGdk58=' \
+ salt-run nacl.dec data='tqXzeIJnTAM9Xf0mdLcpEdklMbfBGPj2oTKmlgrm3S1DTVVHNnh9h8mU1GKllGq/+cYsk6m5WhGdk58=' \
sk='SVWut5SqNpuPeNzb1b9y6b2eXg2PLIog43GBzp48Sow='
When the keys are defined in the master config you can use them from the nacl runner
@@ -94,7 +91,7 @@ The developer can then use a less-secure system to encrypt data.
.. code-block:: bash
- salt-call --local nacl.enc apassword
+ salt-run nacl.enc apassword
Pillar files can include protected data that the salt master decrypts:
@@ -111,42 +108,7 @@ Larger files like certificates can be encrypted with:
.. code-block:: bash
- salt-call nacl.enc_file /tmp/cert.crt out=/tmp/cert.nacl
- # or more advanced
- cert=$(cat /tmp/cert.crt)
- salt-call --out=newline_values_only nacl.enc_pub data="$cert" > /tmp/cert.nacl
-
-In pillars rended with jinja be sure to include `|json` so line breaks are encoded:
-
-.. code-block:: jinja
-
- cert: "{{salt.nacl.dec('S2uogToXkgENz9...085KYt')|json}}"
-
-In states rendered with jinja it is also good pratice to include `|json`:
-
-.. code-block:: jinja
-
- {{sls}} private key:
- file.managed:
- - name: /etc/ssl/private/cert.key
- - mode: 700
- - contents: "{{pillar['pillarexample']['cert_key']|json}}"
-
-
-Optional small program to encrypt data without needing salt modules.
-
-.. code-block:: python
-
- #!/bin/python3
- import sys, base64, libnacl.sealed
- pk = base64.b64decode('YOURPUBKEY')
- b = libnacl.sealed.SealedBox(pk)
- data = sys.stdin.buffer.read()
- print(base64.b64encode(b.encrypt(data)).decode())
-
-.. code-block:: bash
-
- echo 'apassword' | nacl_enc.py
+ salt-run nacl.enc_file /tmp/cert.crt out=/tmp/cert.nacl
'''
@@ -158,6 +120,7 @@ import os
# Import Salt libs
import salt.utils.files
import salt.utils.platform
+import salt.utils.stringutils
import salt.utils.win_functions
import salt.utils.win_dacl
import salt.syspaths
@@ -186,9 +149,9 @@ def _get_config(**kwargs):
config = {
'box_type': 'sealedbox',
'sk': None,
- 'sk_file': '/etc/salt/pki/master/nacl',
+ 'sk_file': os.path.join(__opts__['pki_dir'], 'nacl'),
'pk': None,
- 'pk_file': '/etc/salt/pki/master/nacl.pub',
+ 'pk_file': os.path.join(__opts__['pki_dir'], 'nacl.pub'),
}
config_key = '{0}.config'.format(__virtualname__)
try:
@@ -233,7 +196,7 @@ def _get_pk(**kwargs):
return base64.b64decode(pubkey)
-def keygen(sk_file=None, pk_file=None):
+def keygen(sk_file=None, pk_file=None, **kwargs):
'''
Use libnacl to generate a keypair.
@@ -248,11 +211,20 @@ def keygen(sk_file=None, pk_file=None):
.. code-block:: bash
- salt-call nacl.keygen
- salt-call nacl.keygen sk_file=/etc/salt/pki/master/nacl
- salt-call nacl.keygen sk_file=/etc/salt/pki/master/nacl pk_file=/etc/salt/pki/master/nacl.pub
- salt-call --local nacl.keygen
+ salt-run nacl.keygen
+ salt-run nacl.keygen sk_file=/etc/salt/pki/master/nacl
+ salt-run nacl.keygen sk_file=/etc/salt/pki/master/nacl pk_file=/etc/salt/pki/master/nacl.pub
+ salt-run nacl.keygen
'''
+
+ if 'keyfile' in kwargs:
+ salt.utils.versions.warn_until(
+ 'Fluorine',
+ 'The \'keyfile\' argument has been deprecated and will be removed in Salt '
+ '{version}. Please use \'sk_file\' argument instead.'
+ )
+ sk_file = kwargs['keyfile']
+
if sk_file is None:
kp = libnacl.public.SecretKey()
return {'sk': base64.b64encode(kp.sk), 'pk': base64.b64encode(kp.pk)}
@@ -313,6 +285,26 @@ def enc(data, **kwargs):
box_type: secretbox, sealedbox(default)
'''
+
+ if 'keyfile' in kwargs:
+ salt.utils.versions.warn_until(
+ 'Fluorine',
+ 'The \'keyfile\' argument has been deprecated and will be removed in Salt '
+ '{version}. Please use \'sk_file\' argument instead.'
+ )
+ kwargs['sk_file'] = kwargs['keyfile']
+
+ if 'key' in kwargs:
+ salt.utils.versions.warn_until(
+ 'Fluorine',
+ 'The \'key\' argument has been deprecated and will be removed in Salt '
+ '{version}. Please use \'sk\' argument instead.'
+ )
+ kwargs['sk'] = kwargs['key']
+
+ # ensure data is bytes
+ data = salt.utils.stringutils.to_bytes(data)
+
box_type = _get_config(**kwargs)['box_type']
if box_type == 'sealedbox':
return sealedbox_encrypt(data, **kwargs)
@@ -334,7 +326,6 @@ def enc_file(name, out=None, **kwargs):
.. code-block:: bash
salt-run nacl.enc_file name=/tmp/id_rsa
- salt-call nacl.enc_file name=salt://crt/mycert out=/tmp/cert
salt-run nacl.enc_file name=/tmp/id_rsa box_type=secretbox \
sk_file=/etc/salt/pki/master/nacl.pub
'''
@@ -360,6 +351,31 @@ def dec(data, **kwargs):
box_type: secretbox, sealedbox(default)
'''
+ if 'keyfile' in kwargs:
+ salt.utils.versions.warn_until(
+ 'Fluorine',
+ 'The \'keyfile\' argument has been deprecated and will be removed in Salt '
+ '{version}. Please use \'sk_file\' argument instead.'
+ )
+ kwargs['sk_file'] = kwargs['keyfile']
+
+ # set boxtype to `secretbox` to maintain backward compatibility
+ kwargs['box_type'] = 'secretbox'
+
+ if 'key' in kwargs:
+ salt.utils.versions.warn_until(
+ 'Fluorine',
+ 'The \'key\' argument has been deprecated and will be removed in Salt '
+ '{version}. Please use \'sk\' argument instead.'
+ )
+ kwargs['sk'] = kwargs['key']
+
+ # set boxtype to `secretbox` to maintain backward compatibility
+ kwargs['box_type'] = 'secretbox'
+
+ # ensure data is bytes
+ data = salt.utils.stringutils.to_bytes(data)
+
box_type = _get_config(**kwargs)['box_type']
if box_type == 'sealedbox':
return sealedbox_decrypt(data, **kwargs)
@@ -381,7 +397,6 @@ def dec_file(name, out=None, **kwargs):
.. code-block:: bash
salt-run nacl.dec_file name=/tmp/id_rsa.nacl
- salt-call nacl.dec_file name=salt://crt/mycert.nacl out=/tmp/id_rsa
salt-run nacl.dec_file name=/tmp/id_rsa.nacl box_type=secretbox \
sk_file=/etc/salt/pki/master/nacl.pub
'''
@@ -411,9 +426,10 @@ def sealedbox_encrypt(data, **kwargs):
.. code-block:: bash
salt-run nacl.sealedbox_encrypt datatoenc
- salt-call --local nacl.sealedbox_encrypt datatoenc pk_file=/etc/salt/pki/master/nacl.pub
- salt-call --local nacl.sealedbox_encrypt datatoenc pk='vrwQF7cNiNAVQVAiS3bvcbJUnF0cN6fU9YTZD9mBfzQ='
'''
+ # ensure data is bytes
+ data = salt.utils.stringutils.to_bytes(data)
+
pk = _get_pk(**kwargs)
b = libnacl.sealed.SealedBox(pk)
return base64.b64encode(b.encrypt(data))
@@ -427,12 +443,16 @@ def sealedbox_decrypt(data, **kwargs):
.. code-block:: bash
- salt-call nacl.sealedbox_decrypt pEXHQM6cuaF7A=
- salt-call --local nacl.sealedbox_decrypt data='pEXHQM6cuaF7A=' sk_file=/etc/salt/pki/master/nacl
- salt-call --local nacl.sealedbox_decrypt data='pEXHQM6cuaF7A=' sk='YmFkcGFzcwo='
+ salt-run nacl.sealedbox_decrypt pEXHQM6cuaF7A=
+ salt-run nacl.sealedbox_decrypt data='pEXHQM6cuaF7A=' sk_file=/etc/salt/pki/master/nacl
+ salt-run nacl.sealedbox_decrypt data='pEXHQM6cuaF7A=' sk='YmFkcGFzcwo='
'''
if data is None:
return None
+
+ # ensure data is bytes
+ data = salt.utils.stringutils.to_bytes(data)
+
sk = _get_sk(**kwargs)
keypair = libnacl.public.SecretKey(sk)
b = libnacl.sealed.SealedBox(keypair)
@@ -449,9 +469,12 @@ def secretbox_encrypt(data, **kwargs):
.. code-block:: bash
salt-run nacl.secretbox_encrypt datatoenc
- salt-call --local nacl.secretbox_encrypt datatoenc sk_file=/etc/salt/pki/master/nacl
- salt-call --local nacl.secretbox_encrypt datatoenc sk='YmFkcGFzcwo='
+ salt-run nacl.secretbox_encrypt datatoenc sk_file=/etc/salt/pki/master/nacl
+ salt-run nacl.secretbox_encrypt datatoenc sk='YmFkcGFzcwo='
'''
+ # ensure data is bytes
+ data = salt.utils.stringutils.to_bytes(data)
+
sk = _get_sk(**kwargs)
b = libnacl.secret.SecretBox(sk)
return base64.b64encode(b.encrypt(data))
@@ -466,12 +489,16 @@ def secretbox_decrypt(data, **kwargs):
.. code-block:: bash
- salt-call nacl.secretbox_decrypt pEXHQM6cuaF7A=
- salt-call --local nacl.secretbox_decrypt data='pEXHQM6cuaF7A=' sk_file=/etc/salt/pki/master/nacl
- salt-call --local nacl.secretbox_decrypt data='pEXHQM6cuaF7A=' sk='YmFkcGFzcwo='
+ salt-run nacl.secretbox_decrypt pEXHQM6cuaF7A=
+ salt-run nacl.secretbox_decrypt data='pEXHQM6cuaF7A=' sk_file=/etc/salt/pki/master/nacl
+ salt-run nacl.secretbox_decrypt data='pEXHQM6cuaF7A=' sk='YmFkcGFzcwo='
'''
if data is None:
return None
+
+ # ensure data is bytes
+ data = salt.utils.stringutils.to_bytes(data)
+
key = _get_sk(**kwargs)
b = libnacl.secret.SecretBox(key=key)
return b.decrypt(base64.b64decode(data))
diff --git a/salt/runners/saltutil.py b/salt/runners/saltutil.py
index 4a6e7b3ed2..b61aa2bef6 100644
--- a/salt/runners/saltutil.py
+++ b/salt/runners/saltutil.py
@@ -63,6 +63,7 @@ def sync_all(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
ret['fileserver'] = sync_fileserver(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['tops'] = sync_tops(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['tokens'] = sync_eauth_tokens(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
+ ret['serializers'] = sync_serializers(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
return ret
@@ -580,3 +581,29 @@ def sync_eauth_tokens(saltenv='base', extmod_whitelist=None, extmod_blacklist=No
'''
return salt.utils.extmods.sync(__opts__, 'tokens', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
+
+
+def sync_serializers(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
+ '''
+ .. versionadded:: Fluorine
+
+ Sync serializer modules from ``salt://_serializers`` to the master
+
+ saltenv : base
+ The fileserver environment from which to sync. To sync from more than
+ one environment, pass a comma-separated list.
+
+ extmod_whitelist : None
+ comma-seperated list of modules to sync
+
+ extmod_blacklist : None
+ comma-seperated list of modules to blacklist based on type
+
+ CLI Example:
+
+ .. code-block:: bash
+
+ salt-run saltutil.sync_utils
+ '''
+ return salt.utils.extmods.sync(__opts__, 'serializers', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
+ extmod_blacklist=extmod_blacklist)[0]
diff --git a/salt/runners/winrepo.py b/salt/runners/winrepo.py
index 716ba30e9b..480a3138b6 100644
--- a/salt/runners/winrepo.py
+++ b/salt/runners/winrepo.py
@@ -37,6 +37,7 @@ PER_REMOTE_OVERRIDES = ('ssl_verify', 'refspecs')
# salt.utils.gitfs.PER_REMOTE_ONLY for this value, so this is mainly for
# runners and other modules that import salt.runners.winrepo.
PER_REMOTE_ONLY = salt.utils.gitfs.PER_REMOTE_ONLY
+GLOBAL_ONLY = ('branch',)
def genrepo(opts=None, fire_event=True):
@@ -164,7 +165,8 @@ def update_git_repos(opts=None, clean=False, masterless=False):
ret = {}
for remotes, base_dir in winrepo_cfg:
- if not any((salt.utils.gitfs.HAS_GITPYTHON, salt.utils.gitfs.HAS_PYGIT2)):
+ if not any((salt.utils.gitfs.GITPYTHON_VERSION,
+ salt.utils.gitfs.PYGIT2_VERSION)):
# Use legacy code
winrepo_result = {}
for remote_info in remotes:
@@ -217,6 +219,7 @@ def update_git_repos(opts=None, clean=False, masterless=False):
remotes,
per_remote_overrides=PER_REMOTE_OVERRIDES,
per_remote_only=PER_REMOTE_ONLY,
+ global_only=GLOBAL_ONLY,
cache_root=base_dir)
winrepo.fetch_remotes()
# Since we're not running update(), we need to manually call
diff --git a/salt/sdb/vault.py b/salt/sdb/vault.py
index 6ef616b287..598054333b 100644
--- a/salt/sdb/vault.py
+++ b/salt/sdb/vault.py
@@ -27,7 +27,7 @@ Once configured you can access data using a URL such as:
.. code-block:: yaml
- password: sdb://myvault/secret/passwords?mypassword
+ password: sdb://myvault/secret/passwords/mypassword
In this URL, ``myvault`` refers to the configuration profile,
``secret/passwords`` is the path where the data resides, and ``mypassword`` is
@@ -56,9 +56,17 @@ def set_(key, value, profile=None):
'''
Set a key/value pair in the vault service
'''
- comps = key.split('?')
- path = comps[0]
- key = comps[1]
+ if '?' in key:
+ __utils__['versions.warn_until'](
+ 'Neon',
+ (
+ 'Using ? to seperate between the path and key for vault has been deprecated '
+ 'and will be removed in {version}. Please just use a /.'
+ ),
+ )
+ path, key = key.split('?')
+ else:
+ path, key = key.rsplit('/', 1)
try:
url = 'v1/{0}'.format(path)
@@ -81,9 +89,17 @@ def get(key, profile=None):
'''
Get a value from the vault service
'''
- comps = key.split('?')
- path = comps[0]
- key = comps[1]
+ if '?' in key:
+ __utils__['versions.warn_until'](
+ 'Neon',
+ (
+ 'Using ? to seperate between the path and key for vault has been deprecated '
+ 'and will be removed in {version}. Please just use a /.'
+ ),
+ )
+ path, key = key.split('?')
+ else:
+ path, key = key.rsplit('/', 1)
try:
url = 'v1/{0}'.format(path)
diff --git a/salt/state.py b/salt/state.py
index 45511d6039..3d368b2c0f 100644
--- a/salt/state.py
+++ b/salt/state.py
@@ -720,8 +720,12 @@ class State(object):
except AttributeError:
pillar_enc = six.text_type(pillar_enc).lower()
self._pillar_enc = pillar_enc
- if initial_pillar:
+ if initial_pillar and not self._pillar_override:
self.opts['pillar'] = initial_pillar
+ else:
+ # Compile pillar data
+ self.opts['pillar'] = self._gather_pillar()
+ # Reapply overrides on top of compiled pillar
if self._pillar_override:
self.opts['pillar'] = salt.utils.dictupdate.merge(
self.opts['pillar'],
@@ -729,8 +733,6 @@ class State(object):
self.opts.get('pillar_source_merging_strategy', 'smart'),
self.opts.get('renderer', 'yaml'),
self.opts.get('pillar_merge_lists', False))
- else:
- self.opts['pillar'] = self._gather_pillar()
self.state_con = context or {}
self.load_modules()
self.active = set()
@@ -3919,24 +3921,23 @@ class BaseHighState(object):
return err
if not high:
return ret
- cumask = os.umask(0o77)
- try:
- if salt.utils.platform.is_windows():
- # Make sure cache file isn't read-only
- self.state.functions['cmd.run'](
- ['attrib', '-R', cfn],
- python_shell=False,
- output_loglevel='quiet')
- with salt.utils.files.fopen(cfn, 'w+b') as fp_:
- try:
- self.serial.dump(high, fp_)
- except TypeError:
- # Can't serialize pydsl
- pass
- except (IOError, OSError):
- log.error('Unable to write to "state.highstate" cache file %s', cfn)
+ with salt.utils.files.set_umask(0o077):
+ try:
+ if salt.utils.platform.is_windows():
+ # Make sure cache file isn't read-only
+ self.state.functions['cmd.run'](
+ ['attrib', '-R', cfn],
+ python_shell=False,
+ output_loglevel='quiet')
+ with salt.utils.files.fopen(cfn, 'w+b') as fp_:
+ try:
+ self.serial.dump(high, fp_)
+ except TypeError:
+ # Can't serialize pydsl
+ pass
+ except (IOError, OSError):
+ log.error('Unable to write to "state.highstate" cache file %s', cfn)
- os.umask(cumask)
return self.state.call_high(high, orchestration_jid)
def compile_highstate(self):
diff --git a/salt/states/archive.py b/salt/states/archive.py
index c4ea3019b1..9e2aa93ab5 100644
--- a/salt/states/archive.py
+++ b/salt/states/archive.py
@@ -696,7 +696,7 @@ def extracted(name,
# True
# >>> os.path.isfile('/tmp/foo.txt/')
# False
- name = name.rstrip('/')
+ name = name.rstrip(os.sep)
if os.path.isfile(name):
ret['comment'] = '{0} exists and is not a directory'.format(name)
return ret
@@ -729,6 +729,11 @@ def extracted(name,
)
return ret
+ if if_missing is not None and os.path.exists(if_missing):
+ ret['result'] = True
+ ret['comment'] = 'Path {0} exists'.format(if_missing)
+ return ret
+
if user or group:
if salt.utils.platform.is_windows():
ret['comment'] = \
@@ -1511,7 +1516,7 @@ def extracted(name,
if not if_missing:
# If is_missing was used, and both a) the archive had never been
# extracted, and b) the path referred to by if_missing exists, then
- # enforce_missing would contain paths of top_levle dirs/files that
+ # enforce_missing would contain paths of top_level dirs/files that
# _would_ have been extracted. Since if_missing can be used as a
# semaphore to conditionally extract, we don't want to make this a
# case where the state fails, so we only fail the state if
diff --git a/salt/states/azurearm_resource.py b/salt/states/azurearm_resource.py
index d0c555c90d..79f124b5da 100644
--- a/salt/states/azurearm_resource.py
+++ b/salt/states/azurearm_resource.py
@@ -81,13 +81,13 @@ parameters are sensitive, it's recommended to pass them to the states via pillar
'''
-# Python libs
+# Import Python libs
from __future__ import absolute_import
import json
import logging
-# Salt libs
-import salt.utils
+# Import Salt libs
+import salt.utils.files
__virtualname__ = 'azurearm_resource'
@@ -424,7 +424,7 @@ def policy_definition_present(name, policy_rule=None, policy_type=None, mode=Non
return ret
try:
- with salt.utils.fopen(sfn, 'r') as prf:
+ with salt.utils.files.fopen(sfn, 'r') as prf:
temp_rule = json.load(prf)
except Exception as exc:
ret['comment'] = 'Unable to load policy rule file "{0}"! ({1})'.format(policy_rule_file, exc)
diff --git a/salt/states/boto_asg.py b/salt/states/boto_asg.py
index 13bd2c3b2d..3530f80bbe 100644
--- a/salt/states/boto_asg.py
+++ b/salt/states/boto_asg.py
@@ -487,9 +487,10 @@ def present(
iargs = {'ami_name': image_name, 'region': region, 'key': key,
'keyid': keyid, 'profile': profile}
image_ids = __salt__['boto_ec2.find_images'](**iargs)
- if len(image_ids):
+ if image_ids: # find_images() returns False on failure
launch_config[index]['image_id'] = image_ids[0]
else:
+ log.warning("Couldn't find AMI named `%s`, passing literally.", image_name)
launch_config[index]['image_id'] = image_name
del launch_config[index]['image_name']
break
diff --git a/salt/states/boto_secgroup.py b/salt/states/boto_secgroup.py
index d396ac1b71..8fd6aee8a8 100644
--- a/salt/states/boto_secgroup.py
+++ b/salt/states/boto_secgroup.py
@@ -58,6 +58,11 @@ passed in as a dict, or as a string to pull from pillars or minion config:
from_port: -1
to_port: -1
source_group_name: mysecgroup
+ - ip_protocol: tcp
+ from_port: 8080
+ to_port: 8080
+ source_group_name: MyOtherSecGroup
+ source_group_name_vpc: MyPeeredVPC
- rules_egress:
- ip_protocol: all
from_port: -1
@@ -422,8 +427,14 @@ def _rules_present(name, rules, delete_ingress_rules=True, vpc_id=None,
for rule in rules:
_source_group_name = rule.get('source_group_name', None)
if _source_group_name:
+ _group_vpc_name = vpc_name
+ _group_vpc_id = vpc_id
+ _source_group_name_vpc = rule.get('source_group_name_vpc', None)
+ if _source_group_name_vpc:
+ _group_vpc_name = _source_group_name_vpc
+ _group_vpc_id = None
_group_id = __salt__['boto_secgroup.get_group_id'](
- name=_source_group_name, vpc_id=vpc_id, vpc_name=vpc_name,
+ name=_source_group_name, vpc_id=_group_vpc_id, vpc_name=_group_vpc_name,
region=region, key=key, keyid=keyid, profile=profile
)
if not _group_id:
@@ -432,6 +443,8 @@ def _rules_present(name, rules, delete_ingress_rules=True, vpc_id=None,
'source group id.'.format(_source_group_name)
)
rule['source_group_name'] = None
+ if _source_group_name_vpc:
+ rule.pop('source_group_name_vpc')
rule['source_group_group_id'] = _group_id
# rules = rules that exist in salt state
# sg['rules'] = that exist in present group
@@ -508,8 +521,14 @@ def _rules_egress_present(name, rules_egress, delete_egress_rules=True, vpc_id=N
for rule in rules_egress:
_source_group_name = rule.get('source_group_name', None)
if _source_group_name:
+ _group_vpc_name = vpc_name
+ _group_vpc_id = vpc_id
+ _source_group_name_vpc = rule.get('source_group_name_vpc', None)
+ if _source_group_name_vpc:
+ _group_vpc_name = _source_group_name_vpc
+ _group_vpc_id = None
_group_id = __salt__['boto_secgroup.get_group_id'](
- name=_source_group_name, vpc_id=vpc_id, vpc_name=vpc_name,
+ name=_source_group_name, vpc_id=_group_vpc_id, vpc_name=_group_vpc_name,
region=region, key=key, keyid=keyid, profile=profile
)
if not _group_id:
@@ -518,6 +537,8 @@ def _rules_egress_present(name, rules_egress, delete_egress_rules=True, vpc_id=N
'source group id.'.format(_source_group_name)
)
rule['source_group_name'] = None
+ if _source_group_name_vpc:
+ rule.pop('source_group_name_vpc')
rule['source_group_group_id'] = _group_id
# rules_egress = rules that exist in salt state
# sg['rules_egress'] = that exist in present group
diff --git a/salt/states/cmd.py b/salt/states/cmd.py
index 2ae3bbaf7c..2a93458ffb 100644
--- a/salt/states/cmd.py
+++ b/salt/states/cmd.py
@@ -199,8 +199,7 @@ executed when the state it is watching changes. Example:
``cmd.wait`` itself does not do anything; all functionality is inside its ``mod_watch``
function, which is called by ``watch`` on changes.
-``cmd.wait`` will be deprecated in future due to the confusion it causes. The
-preferred format is using the :ref:`onchanges Requisite `, which
+The preferred format is using the :ref:`onchanges Requisite `, which
works on ``cmd.run`` as well as on any other state. The example would then look as follows:
.. code-block:: yaml
diff --git a/salt/states/composer.py b/salt/states/composer.py
index a154714669..9496b16fe1 100644
--- a/salt/states/composer.py
+++ b/salt/states/composer.py
@@ -62,7 +62,8 @@ def installed(name,
no_dev=None,
quiet=False,
composer_home='/root',
- always_check=True):
+ always_check=True,
+ env=None):
'''
Verify that the correct versions of composer dependencies are present.
@@ -111,6 +112,9 @@ def installed(name,
If ``True``, *always* run ``composer install`` in the directory. This is the
default behavior. If ``False``, only run ``composer install`` if there is no
vendor directory present.
+
+ env
+ A list of environment variables to be set prior to execution.
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
@@ -153,7 +157,8 @@ def installed(name,
optimize=optimize,
no_dev=no_dev,
quiet=quiet,
- composer_home=composer_home
+ composer_home=composer_home,
+ env=env
)
except (SaltException) as err:
ret['result'] = False
@@ -188,7 +193,8 @@ def update(name,
optimize=None,
no_dev=None,
quiet=False,
- composer_home='/root'):
+ composer_home='/root',
+ env=None):
'''
Composer update the directory to ensure we have the latest versions
of all project dependencies.
@@ -233,6 +239,9 @@ def update(name,
composer_home
``$COMPOSER_HOME`` environment variable
+
+ env
+ A list of environment variables to be set prior to execution.
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
@@ -267,7 +276,8 @@ def update(name,
optimize=optimize,
no_dev=no_dev,
quiet=quiet,
- composer_home=composer_home
+ composer_home=composer_home,
+ env=env
)
except (SaltException) as err:
ret['result'] = False
diff --git a/salt/states/docker.py b/salt/states/docker.py
deleted file mode 100644
index f143f3fedf..0000000000
--- a/salt/states/docker.py
+++ /dev/null
@@ -1,278 +0,0 @@
-# -*- coding: utf-8 -*-
-'''
-States to manage Docker containers, images, volumes, and networks
-
-.. versionchanged:: 2017.7.0
- The legacy Docker state and execution module have been removed, and the
- new modules (formerly called ``dockerng`` have taken their places).
-
-.. important::
- As of the 2017.7.0 release, the states in this module have been separated
- into the following four state modules:
-
- - :mod:`docker_container ` - States to manage
- Docker containers
- - :mod:`docker_image ` - States to manage Docker
- images
- - :mod:`docker_volume ` - States to manage
- Docker volumes
- - :mod:`docker_network ` - States to manage
- Docker networks
-
- The reason for this change was to make states and requisites more clear.
- For example, imagine this SLS:
-
- .. code-block:: yaml
-
- myuser/appimage:
- docker.image_present:
- - sls: docker.images.appimage
-
- myapp:
- docker.running:
- - image: myuser/appimage
- - require:
- - docker: myuser/appimage
-
- The new syntax would be:
-
- .. code-block:: yaml
-
- myuser/appimage:
- docker_image.present:
- - sls: docker.images.appimage
-
- myapp:
- docker_container.running:
- - image: myuser/appimage
- - require:
- - docker_image: myuser/appimage
-
- This is similar to how Salt handles MySQL, MongoDB, Zabbix, and other cases
- where the same execution module is used to manage several different kinds
- of objects (users, databases, roles, etc.).
-
- The old syntax will continue to work until the **Fluorine** release of
- Salt.
-'''
-from __future__ import absolute_import, print_function, unicode_literals
-import copy
-import logging
-
-# Import salt libs
-import salt.utils.args
-import salt.utils.versions
-
-# Enable proper logging
-log = logging.getLogger(__name__) # pylint: disable=invalid-name
-
-# Define the module's virtual name
-__virtualname__ = 'docker'
-__virtual_aliases__ = ('dockerng', 'moby')
-
-
-def __virtual__():
- '''
- Only load if the docker execution module is available
- '''
- if 'docker.version' in __salt__:
- return __virtualname__
- return (False, __salt__.missing_fun_string('docker.version'))
-
-
-def running(name, **kwargs):
- '''
- .. deprecated:: 2017.7.0
- This state has been moved to :py:func:`docker_container.running
- `.
- '''
- ret = __states__['docker_container.running'](
- name,
- **salt.utils.args.clean_kwargs(**kwargs)
- )
- msg = (
- 'The docker.running state has been renamed to '
- 'docker_container.running. To get rid of this warning, update your '
- 'SLS to use the new name.'
- )
- salt.utils.versions.warn_until('Fluorine', msg)
- ret.setdefault('warnings', []).append(msg)
- return ret
-
-
-def stopped(**kwargs):
- '''
- .. deprecated:: 2017.7.0
- This state has been moved to :py:func:`docker_container.stopped
- `.
- '''
- ret = __states__['docker_container.stopped'](
- **salt.utils.args.clean_kwargs(**kwargs)
- )
- msg = (
- 'The docker.stopped state has been renamed to '
- 'docker_container.stopped. To get rid of this warning, update your '
- 'SLS to use the new name.'
- )
- salt.utils.versions.warn_until('Fluorine', msg)
- ret.setdefault('warnings', []).append(msg)
- return ret
-
-
-def absent(name, **kwargs):
- '''
- .. deprecated:: 2017.7.0
- This state has been moved to :py:func:`docker_container.absent
- `.
- '''
- ret = __states__['docker_container.absent'](
- name,
- **salt.utils.args.clean_kwargs(**kwargs)
- )
- msg = (
- 'The docker.absent state has been renamed to '
- 'docker_container.absent. To get rid of this warning, update your '
- 'SLS to use the new name.'
- )
- salt.utils.versions.warn_until('Fluorine', msg)
- ret.setdefault('warnings', []).append(msg)
- return ret
-
-
-def network_present(name, **kwargs):
- '''
- .. deprecated:: 2017.7.0
- This state has been moved to :py:func:`docker_network.present
- `.
- '''
- ret = __states__['docker_network.present'](
- name,
- **salt.utils.args.clean_kwargs(**kwargs)
- )
- msg = (
- 'The docker.network_present state has been renamed to '
- 'docker_network.present. To get rid of this warning, update your SLS '
- 'to use the new name.'
- )
- salt.utils.versions.warn_until('Fluorine', msg)
- ret.setdefault('warnings', []).append(msg)
- return ret
-
-
-def network_absent(name, **kwargs):
- '''
- .. deprecated:: 2017.7.0
- This state has been moved to :py:func:`docker_network.absent
- `.
- '''
- ret = __states__['docker_network.absent'](
- name,
- **salt.utils.args.clean_kwargs(**kwargs)
- )
- msg = (
- 'The docker.network_absent state has been renamed to '
- 'docker_network.absent. To get rid of this warning, update your SLS '
- 'to use the new name.'
- )
- salt.utils.versions.warn_until('Fluorine', msg)
- ret.setdefault('warnings', []).append(msg)
- return ret
-
-
-def image_present(name, **kwargs):
- '''
- .. deprecated:: 2017.7.0
- This state has been moved to :py:func:`docker_image.present
- `.
- '''
- ret = __states__['docker_image.present'](
- name,
- **salt.utils.args.clean_kwargs(**kwargs)
- )
- msg = (
- 'The docker.image_present state has been renamed to '
- 'docker_image.present. To get rid of this warning, update your SLS '
- 'to use the new name.'
- )
- salt.utils.versions.warn_until('Fluorine', msg)
- ret.setdefault('warnings', []).append(msg)
- return ret
-
-
-def image_absent(**kwargs):
- '''
- .. deprecated:: 2017.7.0
- This state has been moved to :py:func:`docker_image.absent
- `.
- '''
- ret = __states__['docker_image.absent'](
- **salt.utils.args.clean_kwargs(**kwargs)
- )
- msg = (
- 'The docker.image_absent state has been renamed to '
- 'docker_image.absent. To get rid of this warning, update your SLS to '
- 'use the new name.'
- )
- salt.utils.versions.warn_until('Fluorine', msg)
- ret.setdefault('warnings', []).append(msg)
- return ret
-
-
-def volume_present(name, driver=None, driver_opts=None, force=False):
- '''
- .. deprecated:: 2017.7.0
- This state has been moved to :py:func:`docker_volume.present
- `.
- '''
- ret = __states__['docker_volume.present'](name,
- driver=driver,
- driver_opts=driver_opts,
- force=force)
- msg = (
- 'The docker.volume_present state has been renamed to '
- 'docker_volume.present. To get rid of this warning, update your SLS '
- 'to use the new name.'
- )
- salt.utils.versions.warn_until('Fluorine', msg)
- ret.setdefault('warnings', []).append(msg)
- return ret
-
-
-def volume_absent(name, driver=None):
- '''
- .. deprecated:: 2017.7.0
- This state has been moved to :py:func:`docker_volume.absent
- `.
- '''
- ret = __states__['docker_volume.absent'](name, driver=driver)
- msg = (
- 'The docker.volume_absent state has been renamed to '
- 'docker_volume.absent. To get rid of this warning, update your SLS '
- 'to use the new name.'
- )
- salt.utils.versions.warn_until('Fluorine', msg)
- ret.setdefault('warnings', []).append(msg)
- return ret
-
-
-# Handle requisites
-def mod_watch(name, sfun=None, **kwargs):
- if sfun == 'running':
- watch_kwargs = copy.deepcopy(kwargs)
- if watch_kwargs.get('watch_action', 'force') == 'force':
- watch_kwargs['force'] = True
- else:
- watch_kwargs['send_signal'] = True
- watch_kwargs['force'] = False
- return running(name, **watch_kwargs)
-
- if sfun == 'image_present':
- # Force image to be updated
- kwargs['force'] = True
- return image_present(name, **kwargs)
-
- return {'name': name,
- 'changes': {},
- 'result': False,
- 'comment': ('watch requisite is not'
- ' implemented for {0}'.format(sfun))}
diff --git a/salt/states/docker_container.py b/salt/states/docker_container.py
index ecee5077b8..afdeadbf6a 100644
--- a/salt/states/docker_container.py
+++ b/salt/states/docker_container.py
@@ -1564,14 +1564,14 @@ def running(name,
.. code-block:: yaml
foo:
- dockerng.running:
+ docker_container.running:
- image: bar/baz:latest
- ulimits: nofile=1024:1024,nproc=60
.. code-block:: yaml
foo:
- dockerng.running:
+ docker_container.running:
- image: bar/baz:latest
- ulimits:
- nofile=1024:1024
diff --git a/salt/states/docker_network.py b/salt/states/docker_network.py
index 4457e6d0e6..6742c00b73 100644
--- a/salt/states/docker_network.py
+++ b/salt/states/docker_network.py
@@ -890,7 +890,7 @@ def present(name,
return ret
-def absent(name, driver=None):
+def absent(name):
'''
Ensure that a network is absent.
@@ -909,12 +909,6 @@ def absent(name, driver=None):
'result': False,
'comment': ''}
- if driver is not None:
- ret.setdefault('warnings', []).append(
- 'The \'driver\' argument has no function and will be removed in '
- 'the Fluorine release.'
- )
-
try:
network = __salt__['docker.inspect_network'](name)
except CommandExecutionError as exc:
diff --git a/salt/states/esxi.py b/salt/states/esxi.py
index f179840182..f44533c1be 100644
--- a/salt/states/esxi.py
+++ b/salt/states/esxi.py
@@ -1061,7 +1061,7 @@ def diskgroups_configured(name, diskgroups, erase_disks=False):
erase_disks
Specifies whether to erase all partitions on all disks member of the
- disk group before the disk group is created. Default vaule is False.
+ disk group before the disk group is created. Default value is False.
'''
proxy_details = __salt__['esxi.get_details']()
hostname = proxy_details['host'] if not proxy_details.get('vcenter') \
@@ -1371,7 +1371,7 @@ def host_cache_configured(name, enabled, datastore, swap_size='100%',
erase_backing_disk
Specifies whether to erase all partitions on the backing disk before
- the datastore is created. Default vaule is False.
+ the datastore is created. Default value is False.
'''
log.trace('enabled = %s', enabled)
log.trace('datastore = %s', datastore)
diff --git a/salt/states/etcd_mod.py b/salt/states/etcd_mod.py
index cef4a915ec..da112d7e76 100644
--- a/salt/states/etcd_mod.py
+++ b/salt/states/etcd_mod.py
@@ -41,7 +41,7 @@ or clusters are available.
as this makes all master configuration settings available in all minion's
pillars.
-Etcd profile configuration can be overriden using following arguments: ``host``,
+Etcd profile configuration can be overridden using following arguments: ``host``,
``port``, ``username``, ``password``, ``ca``, ``client_key`` and ``client_cert``.
.. code-block:: yaml
diff --git a/salt/states/file.py b/salt/states/file.py
index 5ab8b4ca7c..5b303c882a 100644
--- a/salt/states/file.py
+++ b/salt/states/file.py
@@ -4160,11 +4160,20 @@ def blockreplace(
append_if_not_found=False,
prepend_if_not_found=False,
backup='.bak',
- show_changes=True):
+ show_changes=True,
+ append_newline=None):
'''
Maintain an edit in a file in a zone delimited by two line markers
.. versionadded:: 2014.1.0
+ .. versionchanged:: 2017.7.5,2018.3.1
+ ``append_newline`` argument added. Additionally, to improve
+ idempotence, if the string represented by ``marker_end`` is found in
+ the middle of the line, the content preceding the marker will be
+ removed when the block is replaced. This allows one to remove
+ ``append_newline: False`` from the SLS and have the block properly
+ replaced if the end of the content block is immediately followed by the
+ ``marker_end`` (i.e. no newline before the marker).
A block of content delimited by comments can help you manage several lines
entries without worrying about old entries removal. This can help you
@@ -4249,41 +4258,54 @@ def blockreplace(
See the ``source_hash`` parameter description for :mod:`file.managed
` function for more details and examples.
- template
- The named templating engine will be used to render the downloaded file.
- Defaults to ``jinja``. The following templates are supported:
+ template : jinja
+ Templating engine to be used to render the downloaded file. The
+ following engines are supported:
- - :mod:`cheetah`
- - :mod:`genshi`
- - :mod:`jinja`
- - :mod:`mako`
- - :mod:`py`
- - :mod:`wempy`
+ - :mod:`cheetah `
+ - :mod:`genshi `
+ - :mod:`jinja `
+ - :mod:`mako `
+ - :mod:`py `
+ - :mod:`wempy `
context
- Overrides default context variables passed to the template.
+ Overrides default context variables passed to the template
defaults
- Default context passed to the template.
+ Default context passed to the template
- append_if_not_found
- If markers are not found and set to True then the markers and content
- will be appended to the file. Default is ``False``
+ append_if_not_found : False
+ If markers are not found and this option is set to ``True``, the
+ content block will be appended to the file.
- prepend_if_not_found
- If markers are not found and set to True then the markers and content
- will be prepended to the file. Default is ``False``
+ prepend_if_not_found : False
+ If markers are not found and this option is set to ``True``, the
+ content block will be prepended to the file.
backup
The file extension to use for a backup of the file if any edit is made.
Set this to ``False`` to skip making a backup.
- dry_run
- Don't make any edits to the file
+ dry_run : False
+ If ``True``, do not make any edits to the file and simply return the
+ changes that *would* be made.
- show_changes
- Output a unified diff of the old file and the new file. If ``False``
- return a boolean if any changes were made
+ show_changes : True
+ Controls how changes are presented. If ``True``, the ``Changes``
+ section of the state return will contain a unified diff of the changes
+ made. If False, then it will contain a boolean (``True`` if any changes
+ were made, otherwise ``False``).
+
+ append_newline
+ Controls whether or not a newline is appended to the content block. If
+ the value of this argument is ``True`` then a newline will be added to
+ the content block. If it is ``False``, then a newline will *not* be
+ added to the content block. If it is unspecified, then a newline will
+ only be added to the content block if it does not already end in a
+ newline.
+
+ .. versionadded:: 2017.7.5,2018.3.1
Example of usage with an accumulator and with a variable:
@@ -4385,17 +4407,25 @@ def blockreplace(
for index, item in enumerate(text):
content += six.text_type(item)
- changes = __salt__['file.blockreplace'](
- name,
- marker_start,
- marker_end,
- content=content,
- append_if_not_found=append_if_not_found,
- prepend_if_not_found=prepend_if_not_found,
- backup=backup,
- dry_run=__opts__['test'],
- show_changes=show_changes
- )
+ try:
+ changes = __salt__['file.blockreplace'](
+ name,
+ marker_start,
+ marker_end,
+ content=content,
+ append_if_not_found=append_if_not_found,
+ prepend_if_not_found=prepend_if_not_found,
+ backup=backup,
+ dry_run=__opts__['test'],
+ show_changes=show_changes,
+ append_newline=append_newline)
+ except Exception as exc:
+ log.exception('Encountered error managing block')
+ ret['comment'] = (
+ 'Encountered error managing block: {0}. '
+ 'See the log for details.'.format(exc)
+ )
+ return ret
if changes:
ret['pchanges'] = {'diff': changes}
diff --git a/salt/states/git.py b/salt/states/git.py
index 72b1ce8e87..63c987d35d 100644
--- a/salt/states/git.py
+++ b/salt/states/git.py
@@ -110,26 +110,30 @@ def _get_branch_opts(branch, local_branch, all_local_branches,
return ret
-def _get_local_rev_and_branch(target, user, password):
+def _get_local_rev_and_branch(target, user, password, output_encoding=None):
'''
Return the local revision for before/after comparisons
'''
log.info('Checking local revision for %s', target)
try:
- local_rev = __salt__['git.revision'](target,
- user=user,
- password=password,
- ignore_retcode=True)
+ local_rev = __salt__['git.revision'](
+ target,
+ user=user,
+ password=password,
+ ignore_retcode=True,
+ output_encoding=output_encoding)
except CommandExecutionError:
log.info('No local revision for %s', target)
local_rev = None
log.info('Checking local branch for %s', target)
try:
- local_branch = __salt__['git.current_branch'](target,
- user=user,
- password=password,
- ignore_retcode=True)
+ local_branch = __salt__['git.current_branch'](
+ target,
+ user=user,
+ password=password,
+ ignore_retcode=True,
+ output_encoding=output_encoding)
except CommandExecutionError:
log.info('No local branch for %s', target)
local_branch = None
@@ -260,6 +264,7 @@ def latest(name,
unless=False,
refspec_branch='*',
refspec_tag='*',
+ output_encoding=None,
**kwargs):
'''
Make sure the repository is cloned to the given directory and is
@@ -521,6 +526,30 @@ def latest(name,
.. versionadded:: 2017.7.0
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
+
.. _`git-fetch(1)`: http://git-scm.com/docs/git-fetch
.. note::
@@ -698,7 +727,8 @@ def latest(name,
https_user=https_user,
https_pass=https_pass,
ignore_retcode=False,
- saltenv=__env__)
+ saltenv=__env__,
+ output_encoding=output_encoding)
except CommandExecutionError as exc:
return _fail(
ret,
@@ -808,18 +838,29 @@ def latest(name,
check = 'refs' if bare else '.git'
gitdir = os.path.join(target, check)
comments = []
- if os.path.isdir(gitdir) or __salt__['git.is_worktree'](target,
- user=user,
- password=password):
+ if os.path.isdir(gitdir) \
+ or __salt__['git.is_worktree'](
+ target,
+ user=user,
+ password=password,
+ output_encoding=output_encoding):
# Target directory is a git repository or git worktree
try:
all_local_branches = __salt__['git.list_branches'](
- target, user=user, password=password)
- all_local_tags = __salt__['git.list_tags'](target,
- user=user,
- password=password)
- local_rev, local_branch = \
- _get_local_rev_and_branch(target, user, password)
+ target,
+ user=user,
+ password=password,
+ output_encoding=output_encoding)
+ all_local_tags = __salt__['git.list_tags'](
+ target,
+ user=user,
+ password=password,
+ output_encoding=output_encoding)
+ local_rev, local_branch = _get_local_rev_and_branch(
+ target,
+ user,
+ password,
+ output_encoding)
if not bare and remote_rev is None and local_rev is not None:
return _fail(
@@ -855,7 +896,8 @@ def latest(name,
branch + '^{commit}',
user=user,
password=password,
- ignore_retcode=True)
+ ignore_retcode=True,
+ output_encoding=output_encoding)
except CommandExecutionError as exc:
return _fail(
ret,
@@ -867,7 +909,8 @@ def latest(name,
remotes = __salt__['git.remotes'](target,
user=user,
password=password,
- redact_auth=False)
+ redact_auth=False,
+ output_encoding=output_encoding)
revs_match = _revs_equal(local_rev, remote_rev, remote_rev_type)
try:
@@ -879,7 +922,8 @@ def latest(name,
__salt__['git.diff'](target,
'HEAD',
user=user,
- password=password)
+ password=password,
+ output_encoding=output_encoding)
)
except CommandExecutionError:
# No need to capture the error and log it, the _git_run()
@@ -933,7 +977,8 @@ def latest(name,
remote_rev + '^{commit}',
user=user,
password=password,
- ignore_retcode=True)
+ ignore_retcode=True,
+ output_encoding=output_encoding)
except CommandExecutionError:
# Local checkout doesn't have the remote_rev
pass
@@ -954,7 +999,8 @@ def latest(name,
desired_upstream,
user=user,
password=password,
- ignore_retcode=True)
+ ignore_retcode=True,
+ output_encoding=output_encoding)
except CommandExecutionError:
pass
else:
@@ -974,7 +1020,8 @@ def latest(name,
rev + '^{commit}',
user=user,
password=password,
- ignore_retcode=True)
+ ignore_retcode=True,
+ output_encoding=output_encoding)
except CommandExecutionError:
# Shouldn't happen if the tag exists
# locally but account for this just in
@@ -1044,7 +1091,8 @@ def latest(name,
is_ancestor=True,
user=user,
password=password,
- ignore_retcode=True)
+ ignore_retcode=True,
+ output_encoding=output_encoding)
if fast_forward is False:
if not force_reset:
@@ -1075,7 +1123,8 @@ def latest(name,
opts=['--abbrev-ref'],
user=user,
password=password,
- ignore_retcode=True)
+ ignore_retcode=True,
+ output_encoding=output_encoding)
except CommandExecutionError:
# There is a local branch but the rev-parse command
# failed, so that means there is no upstream tracking
@@ -1144,7 +1193,8 @@ def latest(name,
user=user,
password=password,
https_user=https_user,
- https_pass=https_pass)
+ https_pass=https_pass,
+ output_encoding=output_encoding)
if fetch_url is None:
comments.append(
'Remote \'{0}\' set to {1}'.format(
@@ -1318,7 +1368,7 @@ def latest(name,
identity=identity,
saltenv=__env__,
ignore_retcode=True,
- ).keys() if '^{}' not in x
+ output_encoding=output_encoding) if '^{}' not in x
])
if set(all_local_tags) != remote_tags:
has_remote_rev = False
@@ -1336,7 +1386,8 @@ def latest(name,
user=user,
password=password,
identity=identity,
- saltenv=__env__)
+ saltenv=__env__,
+ output_encoding=output_encoding)
except CommandExecutionError as exc:
return _failed_fetch(ret, exc, comments)
else:
@@ -1352,7 +1403,8 @@ def latest(name,
remote_rev + '^{commit}',
user=user,
password=password,
- ignore_retcode=True)
+ ignore_retcode=True,
+ output_encoding=output_encoding)
except CommandExecutionError as exc:
return _fail(
ret,
@@ -1384,7 +1436,8 @@ def latest(name,
refs=[base_rev, remote_rev],
is_ancestor=True,
user=user,
- password=password)
+ password=password,
+ output_encoding=output_encoding)
if fast_forward is False and not force_reset:
return _not_fast_forward(
@@ -1427,7 +1480,8 @@ def latest(name,
force=force_checkout,
opts=checkout_opts,
user=user,
- password=password)
+ password=password,
+ output_encoding=output_encoding)
if '-b' in checkout_opts:
comments.append(
'New branch \'{0}\' was checked out, with {1} '
@@ -1450,7 +1504,7 @@ def latest(name,
opts=['--hard', remote_rev],
user=user,
password=password,
- )
+ output_encoding=output_encoding)
ret['changes']['forced update'] = True
comments.append(
'Repository was hard-reset to {0}'.format(remote_loc)
@@ -1461,7 +1515,8 @@ def latest(name,
target,
opts=branch_opts,
user=user,
- password=password)
+ password=password,
+ output_encoding=output_encoding)
comments.append(upstream_action)
# Fast-forward to the desired revision
@@ -1474,12 +1529,14 @@ def latest(name,
# trying to merge changes. (The call to
# git.symbolic_ref will only return output if HEAD
# points to a branch.)
- if __salt__['git.symbolic_ref'](target,
- 'HEAD',
- opts=['--quiet'],
- user=user,
- password=password,
- ignore_retcode=True):
+ if __salt__['git.symbolic_ref'](
+ target,
+ 'HEAD',
+ opts=['--quiet'],
+ user=user,
+ password=password,
+ ignore_retcode=True,
+ output_encoding=output_encoding):
if git_ver >= _LooseVersion('1.8.1.6'):
# --ff-only added in version 1.8.1.6. It's not
@@ -1499,7 +1556,8 @@ def latest(name,
rev=remote_rev,
opts=merge_opts,
user=user,
- password=password)
+ password=password,
+ output_encoding=output_encoding)
comments.append(
'Repository was fast-forwarded to {0}'
.format(remote_loc)
@@ -1518,7 +1576,8 @@ def latest(name,
opts=['--hard',
remote_rev if rev == 'HEAD' else rev],
user=user,
- password=password)
+ password=password,
+ output_encoding=output_encoding)
comments.append(
'Repository was reset to {0} (fast-forward)'
.format(rev)
@@ -1535,7 +1594,8 @@ def latest(name,
user=user,
password=password,
identity=identity,
- saltenv=__env__)
+ saltenv=__env__,
+ output_encoding=output_encoding)
except CommandExecutionError as exc:
return _failed_submodule_update(ret, exc, comments)
elif bare:
@@ -1557,7 +1617,8 @@ def latest(name,
user=user,
password=password,
identity=identity,
- saltenv=__env__)
+ saltenv=__env__,
+ output_encoding=output_encoding)
except CommandExecutionError as exc:
return _failed_fetch(ret, exc, comments)
else:
@@ -1574,7 +1635,8 @@ def latest(name,
cwd=target,
user=user,
password=password,
- ignore_retcode=True)
+ ignore_retcode=True,
+ output_encoding=output_encoding)
except CommandExecutionError:
new_rev = None
@@ -1682,7 +1744,8 @@ def latest(name,
identity=identity,
https_user=https_user,
https_pass=https_pass,
- saltenv=__env__)
+ saltenv=__env__,
+ output_encoding=output_encoding)
except CommandExecutionError as exc:
msg = 'Clone failed: {0}'.format(_strip_exc(exc))
return _fail(ret, msg, comments)
@@ -1715,7 +1778,10 @@ def latest(name,
else:
if remote_rev_type == 'tag' \
and rev not in __salt__['git.list_tags'](
- target, user=user, password=password):
+ target,
+ user=user,
+ password=password,
+ output_encoding=output_encoding):
return _fail(
ret,
'Revision \'{0}\' does not exist in clone'
@@ -1728,18 +1794,21 @@ def latest(name,
__salt__['git.list_branches'](
target,
user=user,
- password=password):
+ password=password,
+ output_encoding=output_encoding):
if rev == 'HEAD':
checkout_rev = remote_rev
else:
checkout_rev = desired_upstream \
if desired_upstream \
else rev
- __salt__['git.checkout'](target,
- checkout_rev,
- opts=['-b', branch],
- user=user,
- password=password)
+ __salt__['git.checkout'](
+ target,
+ checkout_rev,
+ opts=['-b', branch],
+ user=user,
+ password=password,
+ output_encoding=output_encoding)
comments.append(
'Branch \'{0}\' checked out, with {1} '
'as a starting point'.format(
@@ -1748,8 +1817,11 @@ def latest(name,
)
)
- local_rev, local_branch = \
- _get_local_rev_and_branch(target, user, password)
+ local_rev, local_branch = _get_local_rev_and_branch(
+ target,
+ user,
+ password,
+ output_encoding=output_encoding)
if local_branch is None \
and remote_rev is not None \
@@ -1771,7 +1843,8 @@ def latest(name,
target,
opts=['--hard', remote_rev],
user=user,
- password=password)
+ password=password,
+ output_encoding=output_encoding)
comments.append(
'Repository was reset to {0}'.format(remote_loc)
)
@@ -1783,7 +1856,8 @@ def latest(name,
opts=['--abbrev-ref'],
user=user,
password=password,
- ignore_retcode=True)
+ ignore_retcode=True,
+ output_encoding=output_encoding)
except CommandExecutionError:
upstream = False
@@ -1796,9 +1870,11 @@ def latest(name,
branch_opts = _get_branch_opts(
branch,
local_branch,
- __salt__['git.list_branches'](target,
- user=user,
- password=password),
+ __salt__['git.list_branches'](
+ target,
+ user=user,
+ password=password,
+ output_encoding=output_encoding),
desired_upstream,
git_ver)
elif upstream and desired_upstream is False:
@@ -1821,9 +1897,11 @@ def latest(name,
branch_opts = _get_branch_opts(
branch,
local_branch,
- __salt__['git.list_branches'](target,
- user=user,
- password=password),
+ __salt__['git.list_branches'](
+ target,
+ user=user,
+ password=password,
+ output_encoding=output_encoding),
desired_upstream,
git_ver)
else:
@@ -1834,17 +1912,20 @@ def latest(name,
target,
opts=branch_opts,
user=user,
- password=password)
+ password=password,
+ output_encoding=output_encoding)
comments.append(upstream_action)
if submodules and remote_rev:
try:
- __salt__['git.submodule'](target,
- 'update',
- opts=['--init', '--recursive'],
- user=user,
- password=password,
- identity=identity)
+ __salt__['git.submodule'](
+ target,
+ 'update',
+ opts=['--init', '--recursive'],
+ user=user,
+ password=password,
+ identity=identity,
+ output_encoding=output_encoding)
except CommandExecutionError as exc:
return _failed_submodule_update(ret, exc, comments)
@@ -1853,7 +1934,8 @@ def latest(name,
cwd=target,
user=user,
password=password,
- ignore_retcode=True)
+ ignore_retcode=True,
+ output_encoding=output_encoding)
except CommandExecutionError:
new_rev = None
@@ -1883,7 +1965,8 @@ def present(name,
separate_git_dir=None,
shared=None,
user=None,
- password=None):
+ password=None,
+ output_encoding=None):
'''
Ensure that a repository exists in the given directory
@@ -1943,6 +2026,30 @@ def present(name,
.. versionadded:: 2016.3.4
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
+
.. _`git-init(1)`: http://git-scm.com/docs/git-init
.. _`worktree`: http://git-scm.com/docs/git-worktree
'''
@@ -1954,7 +2061,10 @@ def present(name,
return ret
elif not bare and \
(os.path.isdir(os.path.join(name, '.git')) or
- __salt__['git.is_worktree'](name, user=user, password=password)):
+ __salt__['git.is_worktree'](name,
+ user=user,
+ password=password,
+ output_encoding=output_encoding)):
return ret
# Directory exists and is not a git repo, if force is set destroy the
# directory and recreate, otherwise throw an error
@@ -2013,7 +2123,8 @@ def present(name,
separate_git_dir=separate_git_dir,
shared=shared,
user=user,
- password=password)
+ password=password,
+ output_encoding=output_encoding)
actions = [
'Initialized {0}repository in {1}'.format(
@@ -2050,6 +2161,7 @@ def detached(name,
https_pass=None,
onlyif=False,
unless=False,
+ output_encoding=None,
**kwargs):
'''
.. versionadded:: 2016.3.0
@@ -2065,10 +2177,6 @@ def detached(name,
If a branch or tag is specified it will be resolved to a commit ID
and checked out.
- ref
- .. deprecated:: 2017.7.0
- Use ``rev`` instead.
-
target
Name of the target directory where repository is about to be cloned.
@@ -2132,11 +2240,33 @@ def detached(name,
A command to run as a check, only run the named command if the command
passed to the ``unless`` option returns false
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
- ref = kwargs.pop('ref', None)
kwargs = salt.utils.args.clean_kwargs(**kwargs)
if kwargs:
return _fail(
@@ -2144,15 +2274,6 @@ def detached(name,
salt.utils.args.invalid_kwargs(kwargs, raise_exc=False)
)
- if ref is not None:
- rev = ref
- deprecation_msg = (
- 'The \'ref\' argument has been renamed to \'rev\' for '
- 'consistency. Please update your SLS to reflect this.'
- )
- ret.setdefault('warnings', []).append(deprecation_msg)
- salt.utils.versions.warn_until('Fluorine', deprecation_msg)
-
if not rev:
return _fail(
ret,
@@ -2252,10 +2373,17 @@ def detached(name,
gitdir = os.path.join(target, '.git')
if os.path.isdir(gitdir) \
- or __salt__['git.is_worktree'](target, user=user, password=password):
+ or __salt__['git.is_worktree'](target,
+ user=user,
+ password=password,
+ output_encoding=output_encoding):
# Target directory is a git repository or git worktree
- local_commit_id = _get_local_rev_and_branch(target, user, password)[0]
+ local_commit_id = _get_local_rev_and_branch(
+ target,
+ user,
+ password,
+ output_encoding=output_encoding)[0]
if remote_rev_type is 'hash':
try:
@@ -2263,7 +2391,8 @@ def detached(name,
rev,
user=user,
password=password,
- ignore_retcode=True)
+ ignore_retcode=True,
+ output_encoding=output_encoding)
except CommandExecutionError:
hash_exists_locally = False
else:
@@ -2274,7 +2403,8 @@ def detached(name,
remotes = __salt__['git.remotes'](target,
user=user,
password=password,
- redact_auth=False)
+ redact_auth=False,
+ output_encoding=output_encoding)
if remote in remotes and name in remotes[remote]['fetch']:
pass
@@ -2300,7 +2430,8 @@ def detached(name,
user=user,
password=password,
https_user=https_user,
- https_pass=https_pass)
+ https_pass=https_pass,
+ output_encoding=output_encoding)
comments.append(
'Remote {0} updated from \'{1}\' to \'{2}\''.format(
remote,
@@ -2380,7 +2511,8 @@ def detached(name,
identity=identity,
https_user=https_user,
https_pass=https_pass,
- saltenv=__env__)
+ saltenv=__env__,
+ output_encoding=output_encoding)
comments.append('{0} cloned to {1}'.format(name, target))
except Exception as exc:
@@ -2417,7 +2549,8 @@ def detached(name,
user=user,
password=password,
identity=identity,
- saltenv=__env__)
+ saltenv=__env__,
+ output_encoding=output_encoding)
except CommandExecutionError as exc:
msg = 'Fetch failed'
msg += ':\n\n' + six.text_type(exc)
@@ -2429,10 +2562,15 @@ def detached(name,
'refs'.format(remote)
)
- #get refs and checkout
+ # get refs and checkout
checkout_commit_id = ''
if remote_rev_type is 'hash':
- if __salt__['git.describe'](target, rev, user=user, password=password):
+ if __salt__['git.describe'](
+ target,
+ rev,
+ user=user,
+ password=password,
+ output_encoding=output_encoding):
checkout_commit_id = rev
else:
return _fail(
@@ -2448,7 +2586,8 @@ def detached(name,
identity=identity,
https_user=https_user,
https_pass=https_pass,
- ignore_retcode=False)
+ ignore_retcode=False,
+ output_encoding=output_encoding)
if 'refs/remotes/'+remote+'/'+rev in all_remote_refs:
checkout_commit_id = all_remote_refs['refs/remotes/' + remote + '/' + rev]
@@ -2476,7 +2615,8 @@ def detached(name,
target,
opts=['--hard', 'HEAD'],
user=user,
- password=password)
+ password=password,
+ output_encoding=output_encoding)
comments.append(
'Repository was reset to HEAD before checking out revision'
)
@@ -2499,7 +2639,8 @@ def detached(name,
checkout_commit_id,
force=force_checkout,
user=user,
- password=password)
+ password=password,
+ output_encoding=output_encoding)
comments.append(
'Commit ID {0} was checked out at {1}'.format(
checkout_commit_id,
@@ -2512,7 +2653,8 @@ def detached(name,
cwd=target,
user=user,
password=password,
- ignore_retcode=True)
+ ignore_retcode=True,
+ output_encoding=output_encoding)
except CommandExecutionError:
new_rev = None
@@ -2522,7 +2664,8 @@ def detached(name,
opts=['--init', '--recursive'],
user=user,
password=password,
- identity=identity)
+ identity=identity,
+ output_encoding=output_encoding)
comments.append(
'Submodules were updated'
)
@@ -2544,6 +2687,7 @@ def config_unset(name,
repo=None,
user=None,
password=None,
+ output_encoding=None,
**kwargs):
r'''
.. versionadded:: 2015.8.0
@@ -2586,6 +2730,30 @@ def config_unset(name,
global : False
If ``True``, this will set a global git config option
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
+
**Examples:**
@@ -2658,6 +2826,7 @@ def config_unset(name,
user=user,
password=password,
ignore_retcode=True,
+ output_encoding=output_encoding,
**{'global': global_}
)
@@ -2707,6 +2876,7 @@ def config_unset(name,
user=user,
password=password,
ignore_retcode=True,
+ output_encoding=output_encoding,
**{'global': global_}
)
@@ -2722,6 +2892,7 @@ def config_unset(name,
all=all_,
user=user,
password=password,
+ output_encoding=output_encoding,
**{'global': global_}
)
except CommandExecutionError as exc:
@@ -2746,6 +2917,7 @@ def config_unset(name,
user=user,
password=password,
ignore_retcode=True,
+ output_encoding=output_encoding,
**{'global': global_}
)
@@ -2766,6 +2938,7 @@ def config_unset(name,
user=user,
password=password,
ignore_retcode=True,
+ output_encoding=output_encoding,
**{'global': global_}
)
@@ -2787,6 +2960,7 @@ def config_set(name,
repo=None,
user=None,
password=None,
+ output_encoding=None,
**kwargs):
'''
.. versionadded:: 2014.7.0
@@ -2829,6 +3003,30 @@ def config_set(name,
global : False
If ``True``, this will set a global git config option
+ output_encoding
+ Use this option to specify which encoding to use to decode the output
+ from any git commands which are run. This should not be needed in most
+ cases.
+
+ .. note::
+
+ On Windows, this option works slightly differently in the git state
+ and execution module than it does in the :mod:`"cmd" execution
+ module `. The filenames in most git
+ repositories are created using a UTF-8 locale, and the system
+ encoding on Windows (CP1252) will successfully (but incorrectly)
+ decode many UTF-8 characters. This makes interacting with
+ repositories containing UTF-8 filenames on Windows unreliable.
+ Therefore, Windows will default to decoding the output from git
+ commands using UTF-8 unless this option is explicitly used to
+ specify the encoding.
+
+ On non-Windows platforms, the default output decoding behavior will
+ be observed (i.e. the encoding specified by the locale will be
+ tried first, and if that fails, UTF-8 will be used as a fallback).
+
+ .. versionadded:: 2018.3.1
+
**Local Config Example:**
.. code-block:: yaml
@@ -2922,6 +3120,7 @@ def config_set(name,
user=user,
password=password,
ignore_retcode=True,
+ output_encoding=output_encoding,
**{'all': True, 'global': global_}
)
@@ -2952,6 +3151,7 @@ def config_set(name,
multivar=multivar,
user=user,
password=password,
+ output_encoding=output_encoding,
**{'global': global_}
)
except CommandExecutionError as exc:
diff --git a/salt/states/hipchat.py b/salt/states/hipchat.py
index 53f870f4da..cb31d58dd9 100644
--- a/salt/states/hipchat.py
+++ b/salt/states/hipchat.py
@@ -63,7 +63,7 @@ def send_message(name,
- api_url: https://hipchat.myteam.com
- api_key: peWcBiMOS9HrZG15peWcBiMOS9HrZG15
- api_version: v1
- - color: green
+ - message_color: green
- notify: True
The following parameters are required:
@@ -96,7 +96,7 @@ def send_message(name,
The api version for Hipchat to use,
if not specified in the configuration options of master or minion.
- color
+ message_color
The color the Hipchat message should be displayed in. One of the following, default: yellow
"yellow", "red", "green", "purple", "gray", or "random".
diff --git a/salt/states/http.py b/salt/states/http.py
index b30bd51171..0bcd373c3e 100644
--- a/salt/states/http.py
+++ b/salt/states/http.py
@@ -24,7 +24,8 @@ def query(name, match=None, match_type='string', status=None, wait_for=None, **k
'''
Perform an HTTP query and statefully return the result
- .. versionadded:: 2015.5.0
+ Passes through all the parameters described in the
+ :py:func:`utils.http.query function `:
name
The name of the query.
diff --git a/salt/states/infoblox_host_record.py b/salt/states/infoblox_host_record.py
index ff02bd8698..f61ba499e3 100644
--- a/salt/states/infoblox_host_record.py
+++ b/salt/states/infoblox_host_record.py
@@ -104,7 +104,7 @@ def present(name=None, data=None, ensure_data=True, **api_opts):
addr['ipv4addr'] = ip
found_matches += 1
if found_matches > 1:
- ret['comment'] = 'infoblox record cant updated because ipaddress {0} matches mutiple func:nextavailableip'.format(ip)
+ ret['comment'] = 'infoblox record cant updated because ipaddress {0} matches multiple func:nextavailableip'.format(ip)
ret['result'] = False
return ret
diff --git a/salt/states/k8s.py b/salt/states/k8s.py
deleted file mode 100644
index 90c4584318..0000000000
--- a/salt/states/k8s.py
+++ /dev/null
@@ -1,151 +0,0 @@
-# -*- coding: utf-8 -*-
-'''
-Manage Kubernetes
-
-.. versionadded:: 2016.3.0
-
-.. code-block:: yaml
-
- kube_label_1:
- k8s.label_present:
- - name: mylabel
- - value: myvalue
- - node: myothernodename
- - apiserver: http://mykubeapiserer:8080
-
- kube_label_2:
- k8s.label_absent:
- - name: mylabel
- - node: myothernodename
- - apiserver: http://mykubeapiserer:8080
-
- kube_label_3:
- k8s.label_folder_present:
- - name: mylabel
- - node: myothernodename
- - apiserver: http://mykubeapiserer:8080
-'''
-from __future__ import absolute_import, unicode_literals, print_function
-
-# Import salt libs
-import salt.utils.versions
-
-
-__virtualname__ = 'k8s'
-
-
-def __virtual__():
- '''Load only if kubernetes module is available.'''
- if 'k8s.get_labels' not in __salt__:
- return False
- return True
-
-
-def label_present(
- name,
- value,
- node=None,
- apiserver=None):
- '''
- .. deprecated:: 2017.7.0
- This state has been moved to :py:func:`kubernetes.node_label_present
- {1}) not permitted, set allow_uid_change to '
+ 'True to force this change. Note that this will not change file '
+ 'ownership.'.format(lusr['uid'], uid)
+ )
+ if not allow_gid_change and 'gid' in change:
+ errors.append(
+ 'Changing gid ({0} -> {1}) not permitted, set allow_gid_change to '
+ 'True to force this change. Note that this will not change file '
+ 'ownership.'.format(lusr['gid'], gid)
+ )
+ if errors:
+ raise CommandExecutionError(
+ 'Encountered error checking for needed changes',
+ info=errors
+ )
+
return change
@@ -225,7 +247,9 @@ def present(name,
win_profile=None,
win_logonscript=None,
win_description=None,
- nologinit=False):
+ nologinit=False,
+ allow_uid_change=False,
+ allow_gid_change=False):
'''
Ensure that the named user is present with the specified properties
@@ -233,16 +257,28 @@ def present(name,
The name of the user to manage
uid
- The user id to assign, if left empty then the next available user id
- will be assigned
+ The user id to assign. If not specified, and the user does not exist,
+ then the next available uid will be assigned.
gid
- The default group id. Also accepts group name.
+ The id of the default group to assign to the user. Either a group name
+ or gid can be used. If not specified, and the user does not exist, then
+ he next available gid will be assigned.
- gid_from_name
- If True, the default group id will be set to the id of the group with
- the same name as the user. If the group does not exist the state will
- fail. Default is ``False``.
+ gid_from_name : False
+ If ``True``, the default group id will be set to the id of the group
+ with the same name as the user. If the group does not exist the state
+ will fail.
+
+ allow_uid_change : False
+ Set to ``True`` to allow the state to update the uid.
+
+ .. versionadded:: 2018.3.1
+
+ allow_gid_change : False
+ Set to ``True`` to allow the state to update the gid.
+
+ .. versionadded:: 2018.3.1
groups
A list of groups to assign the user to, pass a list object. If a group
@@ -466,33 +502,40 @@ def present(name,
ret['result'] = False
return ret
- changes = _changes(name,
- uid,
- gid,
- groups,
- present_optgroups,
- remove_groups,
- home,
- createhome,
- password,
- enforce_password,
- empty_password,
- shell,
- fullname,
- roomnumber,
- workphone,
- homephone,
- loginclass,
- date,
- mindays,
- maxdays,
- inactdays,
- warndays,
- expire,
- win_homedrive,
- win_profile,
- win_logonscript,
- win_description)
+ try:
+ changes = _changes(name,
+ uid,
+ gid,
+ groups,
+ present_optgroups,
+ remove_groups,
+ home,
+ createhome,
+ password,
+ enforce_password,
+ empty_password,
+ shell,
+ fullname,
+ roomnumber,
+ workphone,
+ homephone,
+ loginclass,
+ date,
+ mindays,
+ maxdays,
+ inactdays,
+ warndays,
+ expire,
+ win_homedrive,
+ win_profile,
+ win_logonscript,
+ win_description,
+ allow_uid_change,
+ allow_gid_change)
+ except CommandExecutionError as exc:
+ ret['result'] = False
+ ret['comment'] = exc.strerror
+ return ret
if changes:
if __opts__['test']:
@@ -621,7 +664,13 @@ def present(name,
win_homedrive,
win_profile,
win_logonscript,
- win_description)
+ win_description,
+ allow_uid_change=True,
+ allow_gid_change=True)
+ # allow_uid_change and allow_gid_change passed as True to avoid race
+ # conditions where a uid/gid is modified outside of Salt. If an
+ # unauthorized change was requested, it would have been caught the
+ # first time we ran _changes().
if changes:
ret['comment'] = 'These values could not be changed: {0}'.format(
diff --git a/salt/states/vagrant.py b/salt/states/vagrant.py
index 86e2f13797..6b63616b6a 100644
--- a/salt/states/vagrant.py
+++ b/salt/states/vagrant.py
@@ -319,7 +319,7 @@ def powered_off(name):
def destroyed(name):
'''
- Stops a VM (or VMs) and removes all refences to it (them). (Runs ``vagrant destroy``.)
+ Stops a VM (or VMs) and removes all references to it (them). (Runs ``vagrant destroy``.)
Subsequent re-use of the same machine will requere another operation of ``vagrant.running``
or a call to the ``vagrant.init`` execution module.
diff --git a/salt/states/win_update.py b/salt/states/win_update.py
deleted file mode 100644
index 0d9e9cba41..0000000000
--- a/salt/states/win_update.py
+++ /dev/null
@@ -1,587 +0,0 @@
-# -*- coding: utf-8 -*-
-'''
-Management of the windows update agent
-======================================
-
-This module is being deprecated and will be removed in Salt Fluorine. Please use
-the ``win_wua`` state module instead.
-
-.. versionadded:: 2014.7.0
-
-Set windows updates to run by category. Default behavior is to install
-all updates that do not require user interaction to complete.
-
-Optionally set ``category`` to a category of your choice to only
-install certain updates. Default is to set to install all available updates.
-
-The following example will install all Security and Critical Updates,
-and download but not install standard updates.
-
-.. code-block:: yaml
-
- updates:
- win_update.installed:
- - categories:
- - 'Critical Updates'
- - 'Security Updates'
- - skips:
- - downloaded
- win_update.downloaded:
- - categories:
- - 'Updates'
- - skips:
- - downloaded
-
-You can also specify a number of features about the update to have a
-fine grain approach to specific types of updates. These are the following
-features/states of updates available for configuring:
-
-.. code-block:: text
-
- 'UI' - User interaction required, skipped by default
- 'downloaded' - Already downloaded, included by default
- 'present' - Present on computer, skipped by default
- 'installed' - Already installed, skipped by default
- 'reboot' - Reboot required, included by default
- 'hidden' - Skip updates that have been hidden, skipped by default
- 'software' - Software updates, included by default
- 'driver' - driver updates, included by default
-
-The following example installs all driver updates that don't require a reboot:
-.. code-block:: yaml
-
- gryffindor:
- win_update.installed:
- - skips:
- - driver: True
- - software: False
- - reboot: False
-
-To just update your windows machine, add this your sls:
-
-.. code-block:: yaml
-
- updates:
- win_update.installed
-'''
-
-# Import Python libs
-from __future__ import absolute_import, unicode_literals, print_function
-import logging
-
-# Import 3rd-party libs
-# pylint: disable=import-error
-from salt.ext import six
-from salt.ext.six.moves import range # pylint: disable=redefined-builtin
-try:
- import win32com.client
- import pythoncom
- HAS_DEPENDENCIES = True
-except ImportError:
- HAS_DEPENDENCIES = False
-# pylint: enable=import-error
-
-# Import Salt libs
-import salt.utils.platform
-import salt.utils.versions
-
-log = logging.getLogger(__name__)
-
-
-def __virtual__():
- '''
- Only works on Windows systems
- '''
- if salt.utils.platform.is_windows() and HAS_DEPENDENCIES:
- return True
- return False
-
-
-def _gather_update_categories(updateCollection):
- '''
- this is a convenience method to gather what categories of updates are available in any update
- collection it is passed. Typically though, the download_collection.
- Some known categories:
- Updates
- Windows 7
- Critical Updates
- Security Updates
- Update Rollups
- '''
- categories = []
- for i in range(updateCollection.Count):
- update = updateCollection.Item(i)
- for j in range(update.Categories.Count):
- name = update.Categories.Item(j).Name
- if name not in categories:
- log.debug('found category: {0}'.format(name))
- categories.append(name)
- return categories
-
-
-class PyWinUpdater(object):
- def __init__(self, categories=None, skipUI=True, skipDownloaded=False,
- skipInstalled=True, skipReboot=False, skipPresent=False,
- skipSoftwareUpdates=False, skipDriverUpdates=False, skipHidden=True):
- log.debug('CoInitializing the pycom system')
- pythoncom.CoInitialize()
-
- # pylint: disable=invalid-name
- self.skipUI = skipUI
- self.skipDownloaded = skipDownloaded
- self.skipInstalled = skipInstalled
- self.skipReboot = skipReboot
- self.skipPresent = skipPresent
- self.skipHidden = skipHidden
-
- self.skipSoftwareUpdates = skipSoftwareUpdates
- self.skipDriverUpdates = skipDriverUpdates
- self.categories = categories
- self.foundCategories = None
- # pylint: enable=invalid-name
-
- log.debug('dispatching update_session to keep the session object.')
- self.update_session = win32com.client.Dispatch('Microsoft.Update.Session')
-
- log.debug('update_session got. Now creating a win_searcher to seek out the updates')
- self.win_searcher = self.update_session.CreateUpdateSearcher()
-
- # list of updates that are applicable by current settings.
- self.download_collection = win32com.client.Dispatch('Microsoft.Update.UpdateColl')
-
- # list of updates to be installed.
- self.install_collection = win32com.client.Dispatch('Microsoft.Update.UpdateColl')
-
- # the object responsible for fetching the actual downloads.
- self.win_downloader = self.update_session.CreateUpdateDownloader()
- self.win_downloader.Updates = self.download_collection
-
- # the object responsible for the installing of the updates.
- self.win_installer = self.update_session.CreateUpdateInstaller()
- self.win_installer.Updates = self.install_collection
-
- # the results of the download process
- self.download_results = None
-
- # the results of the installation process
- self.install_results = None
-
- def Search(self, searchString):
- try:
- log.debug('beginning search of the passed string: %s',
- searchString)
- self.search_results = self.win_searcher.Search(searchString)
- log.debug('search completed successfully.')
- except Exception as exc:
- log.info('search for updates failed. %s', exc)
- return exc
-
- log.debug('parsing results. %s updates were found.',
- self.search_results.Updates.Count)
- try:
- for update in self.search_results.Updates:
- if update.InstallationBehavior.CanRequestUserInput:
- log.debug('Skipped update %s', update.title)
- continue
- for category in update.Categories:
- if self.skipDownloaded and update.IsDownloaded:
- continue
- if self.categories is None or category.Name in self.categories:
- self.download_collection.Add(update)
- log.debug('added update %s', update.title)
- self.foundCategories = _gather_update_categories(self.download_collection)
- return True
- except Exception as exc:
- log.info('parsing updates failed. %s', exc)
- return exc
-
- def AutoSearch(self):
- search_string = ''
- searchParams = []
- if self.skipInstalled:
- searchParams.append('IsInstalled=0')
- else:
- searchParams.append('IsInstalled=1')
-
- if self.skipHidden:
- searchParams.append('IsHidden=0')
- else:
- searchParams.append('IsHidden=1')
-
- if self.skipReboot:
- searchParams.append('RebootRequired=0')
- else:
- searchParams.append('RebootRequired=1')
-
- if self.skipPresent:
- searchParams.append('IsPresent=0')
- else:
- searchParams.append('IsPresent=1')
-
- if len(searchParams) > 1:
- for i in searchParams:
- search_string += '{0} and '.format(i)
- else:
- search_string += '{0} and '.format(searchParams[1])
-
- if not self.skipSoftwareUpdates and not self.skipDriverUpdates:
- search_string += 'Type=\'Software\' or Type=\'Driver\''
- elif not self.skipSoftwareUpdates:
- search_string += 'Type=\'Software\''
- elif not self.skipDriverUpdates:
- search_string += 'Type=\'Driver\''
- else:
- return False
- # if there is no type, the is nothing to search.
- log.debug('generated search string: %s', search_string)
- return self.Search(search_string)
-
- def Download(self):
- try:
- if self.download_collection.Count != 0:
- self.download_results = self.win_downloader.Download()
- else:
- log.debug('Skipped downloading, all updates were already cached.')
- return True
- except Exception as exc:
- log.debug('failed in the downloading %s.', exc)
- return exc
-
- def Install(self):
- try:
- for update in self.search_results.Updates:
- if update.IsDownloaded:
- self.install_collection.Add(update)
- log.debug('Updates prepared. beginning installation')
- except Exception as exc:
- log.info('Preparing install list failed: %s', exc)
- return exc
-
- # accept eula if not accepted
- try:
- for update in self.search_results.Updates:
- if not update.EulaAccepted:
- log.debug('Accepting EULA: %s', update.Title)
- update.AcceptEula()
- except Exception as exc:
- log.info('Accepting Eula failed: %s', exc)
- return exc
-
- if self.install_collection.Count != 0:
- log.debug('Install list created, about to install')
- updates = []
- try:
- self.install_results = self.win_installer.Install()
- log.info('Installation of updates complete')
- return True
- except Exception as exc:
- log.info('Installation failed: %s', exc)
- return exc
- else:
- log.info('no new updates.')
- return True
-
- def GetInstallationResults(self):
- log.debug('bluger has %s updates in it', self.install_collection.Count)
- updates = []
- if self.install_collection.Count == 0:
- return {}
- for i in range(self.install_collection.Count):
- updates.append('{0}: {1}'.format(
- self.install_results.GetUpdateResult(i).ResultCode,
- self.install_collection.Item(i).Title))
-
- log.debug('Update results enumerated, now making a list to pass back')
- results = {}
- for i, update in enumerate(updates):
- results['update {0}'.format(i)] = update
-
- log.debug('Update information complied. returning')
- return results
-
- def GetDownloadResults(self):
- updates = []
- for i in range(self.download_collection.Count):
- updates.append('{0}: {1}'.format(
- self.download_results.GetUpdateResult(i).ResultCode,
- self.download_collection.Item(i).Title))
- results = {}
- for i, update in enumerate(updates):
- results['update {0}'.format(i)] = update
- return results
-
- def SetCategories(self, categories):
- self.categories = categories
-
- def GetCategories(self):
- return self.categories
-
- def GetAvailableCategories(self):
- return self.foundCategories
-
- def SetSkips(self, skips):
- if skips:
- for i in skips:
- value = i[next(six.iterkeys(i))]
- skip = next(six.iterkeys(i))
- self.SetSkip(skip, value)
- log.debug('was asked to set %s to %s', skip, value)
-
- def SetSkip(self, skip, state):
- if skip == 'UI':
- self.skipUI = state
- elif skip == 'downloaded':
- self.skipDownloaded = state
- elif skip == 'installed':
- self.skipInstalled = state
- elif skip == 'reboot':
- self.skipReboot = state
- elif skip == 'present':
- self.skipPresent = state
- elif skip == 'hidden':
- self.skipHidden = state
- elif skip == 'software':
- self.skipSoftwareUpdates = state
- elif skip == 'driver':
- self.skipDriverUpdates = state
- log.debug('new search state: \n\tUI: %s\n\tDownload: %s\n'
- '\tInstalled: %s\n\treboot :%s\n\tPresent: %s\n'
- '\thidden: %s\n\tsoftware: %s\n\tdriver: %s',
- self.skipUI, self.skipDownloaded, self.skipInstalled,
- self.skipReboot, self.skipPresent, self.skipHidden,
- self.skipSoftwareUpdates, self.skipDriverUpdates)
-
-
-def _search(win_updater, retries=5):
- passed = False
- clean = True
- comment = ''
- while not passed:
- log.debug('Searching. tries left: %s', retries)
- passed = win_updater.AutoSearch()
- log.debug('Done searching: %s', passed)
- if isinstance(passed, Exception):
- clean = False
- comment += 'Failed in the seeking/parsing process:\n\t\t{0}\n'.format(passed)
- retries -= 1
- if retries:
- comment += '{0} tries to go. retrying\n'.format(retries)
- passed = False
- else:
- comment += 'out of retries. this update round failed.\n'
- return (comment, True, retries)
- passed = False
- if clean:
- comment += 'Search was done without error.\n'
- return (comment, True, retries)
-
-
-def _download(win_updater, retries=5):
- passed = False
- clean = True
- comment = ''
- while not passed:
- log.debug('Downloading. tries left: %s', retries)
- passed = win_updater.Download()
- log.debug('Done downloading: %s', passed)
- if isinstance(passed, Exception):
- clean = False
- comment += 'Failed while trying to download updates:\n\t\t{0}\n'.format(passed)
- retries -= 1
- if retries:
- comment += '{0} tries to go. retrying\n'.format(retries)
- passed = False
- else:
- comment += 'out of retries. this update round failed.\n'
- return (comment, False, retries)
- if clean:
- comment += 'Download was done without error.\n'
- return (comment, True, retries)
-
-
-def _install(win_updater, retries=5):
- passed = False
- clean = True
- comment = ''
- while not passed:
- log.debug('download_collection is this long: %s',
- win_updater.install_collection.Count)
- log.debug('Installing. tries left: %s', retries)
- passed = win_updater.Install()
- log.info('Done installing: %s', passed)
- if isinstance(passed, Exception):
- clean = False
- comment += 'Failed while trying to install the updates.\n\t\t{0}\n'.format(passed)
- retries -= 1
- if retries:
- comment += '{0} tries to go. retrying\n'.format(retries)
- passed = False
- else:
- comment += 'out of retries. this update round failed.\n'
- return (comment, False, retries)
- if clean:
- comment += 'Install was done without error.\n'
- return (comment, True, retries)
-
-
-def installed(name, categories=None, skips=None, retries=10):
- '''
- Install specified windows updates.
-
- name:
- if categories is left empty, it will be assumed that you are passing the category option
- through the name. These are separate because you can only have one name, but can have
- multiple categories.
-
- categories:
- the list of categories to be downloaded. These are simply strings in the update's
- information, so there is no enumeration of the categories available. Some known categories:
-
- .. code-block:: text
-
- Updates
- Windows 7
- Critical Updates
- Security Updates
- Update Rollups
-
- skips:
- a list of features of the updates to cull by. Available features:
-
- .. code-block:: text
-
- 'UI' - User interaction required, skipped by default
- 'downloaded' - Already downloaded, skipped by default (downloading)
- 'present' - Present on computer, included by default (installing)
- 'installed' - Already installed, skipped by default
- 'reboot' - Reboot required, included by default
- 'hidden' - skip those updates that have been hidden.
- 'software' - Software updates, included by default
- 'driver' - driver updates, skipped by default
-
- retries
- Number of retries to make before giving up. This is total, not per
- step.
- '''
-
- ret = {'name': name,
- 'result': True,
- 'changes': {},
- 'comment': ''}
- deprecation_msg = 'The \'win_update\' module is deprecated, and will be ' \
- 'removed in Salt Fluorine. Please use the \'win_wua\' ' \
- 'module instead.'
- salt.utils.versions.warn_until('Fluorine', deprecation_msg)
- ret.setdefault('warnings', []).append(deprecation_msg)
- if not categories:
- categories = [name]
- log.debug('categories to search for are: %s', categories)
- win_updater = PyWinUpdater()
- win_updater.SetCategories(categories)
- win_updater.SetSkips(skips)
-
- # this is where we be seeking the things! yar!
- comment, passed, retries = _search(win_updater, retries)
- ret['comment'] += comment
- if not passed:
- ret['result'] = False
- return ret
-
- # this is where we get all the things! i.e. download updates.
- comment, passed, retries = _download(win_updater, retries)
- ret['comment'] += comment
- if not passed:
- ret['result'] = False
- return ret
-
- # this is where we put things in their place!
- comment, passed, retries = _install(win_updater, retries)
- ret['comment'] += comment
- if not passed:
- ret['result'] = False
- return ret
-
- try:
- ret['changes'] = win_updater.GetInstallationResults()
- except Exception:
- ret['comment'] += 'could not get results, but updates were installed.'
- return ret
-
-
-def downloaded(name, categories=None, skips=None, retries=10):
- '''
- Cache updates for later install.
-
- name:
- if categories is left empty, it will be assumed that you are passing the category option
- through the name. These are separate because you can only have one name, but can have
- multiple categories.
-
- categories:
- the list of categories to be downloaded. These are simply strings in the update's
- information, so there is no enumeration of the categories available. Some known categories:
-
- .. code-block:: text
-
- Updates
- Windows 7
- Critical Updates
- Security Updates
- Update Rollups
-
- skips:
- a list of features of the updates to cull by. Available features:
-
- .. code-block:: text
-
- 'UI' - User interaction required, skipped by default
- 'downloaded' - Already downloaded, skipped by default (downloading)
- 'present' - Present on computer, included by default (installing)
- 'installed' - Already installed, skipped by default
- 'reboot' - Reboot required, included by default
- 'hidden' - skip those updates that have been hidden.
- 'software' - Software updates, included by default
- 'driver' - driver updates, skipped by default
-
- retries
- Number of retries to make before giving up. This is total, not per
- step.
- '''
- ret = {'name': name,
- 'result': True,
- 'changes': {},
- 'comment': ''}
-
- deprecation_msg = 'The \'win_update\' module is deprecated, and will be ' \
- 'removed in Salt Fluorine. Please use the \'win_wua\' ' \
- 'module instead.'
- salt.utils.versions.warn_until('Fluorine', deprecation_msg)
- ret.setdefault('warnings', []).append(deprecation_msg)
-
- if not categories:
- categories = [name]
- log.debug('categories to search for are: %s', categories)
- win_updater = PyWinUpdater()
- win_updater.SetCategories(categories)
- win_updater.SetSkips(skips)
-
- # this is where we be seeking the things! yar!
- comment, passed, retries = _search(win_updater, retries)
- ret['comment'] += comment
- if not passed:
- ret['result'] = False
- return ret
-
- # this is where we get all the things! i.e. download updates.
- comment, passed, retries = _download(win_updater, retries)
- ret['comment'] += comment
- if not passed:
- ret['result'] = False
- return ret
-
- try:
- ret['changes'] = win_updater.GetDownloadResults()
- except Exception:
- ret['comment'] += 'could not get results, but updates were downloaded.'
-
- return ret
diff --git a/salt/states/zabbix_host.py b/salt/states/zabbix_host.py
index 65f8303162..ed54b845a1 100644
--- a/salt/states/zabbix_host.py
+++ b/salt/states/zabbix_host.py
@@ -193,7 +193,7 @@ def present(host, groups, interfaces, **kwargs):
host_exists = __salt__['zabbix.host_exists'](host, **connection_args)
if host_exists:
- host = __salt__['zabbix.host_get'](name=host, **connection_args)[0]
+ host = __salt__['zabbix.host_get'](host=host, **connection_args)[0]
hostid = host['hostid']
update_proxy = False
@@ -457,7 +457,7 @@ def assign_templates(host, templates, **kwargs):
ret['comment'] = comment_host_templ_notupdated
return ret
- host_info = __salt__['zabbix.host_get'](name=host, **connection_args)[0]
+ host_info = __salt__['zabbix.host_get'](host=host, **connection_args)[0]
hostid = host_info['hostid']
if not templates:
diff --git a/salt/templates/rh_ip/rh5_eth.jinja b/salt/templates/rh_ip/rh5_eth.jinja
index 5ccc188b2b..f11da8619a 100644
--- a/salt/templates/rh_ip/rh5_eth.jinja
+++ b/salt/templates/rh_ip/rh5_eth.jinja
@@ -1,5 +1,6 @@
DEVICE={{name}}
-{% if addr %}HWADDR={{addr}}
+{%endif%}{% if hwaddr %}HWADDR="{{hwaddr}}"
+{%endif%}{% if macaddr %}MACADDR="{{macaddr}}"
{%endif%}{% if userctl %}USERCTL={{userctl}}
{%endif%}{% if master %}MASTER={{master}}
{%endif%}{% if slave %}SLAVE={{slave}}
diff --git a/salt/templates/rh_ip/rh6_eth.jinja b/salt/templates/rh_ip/rh6_eth.jinja
index df1eccf375..3234190c43 100644
--- a/salt/templates/rh_ip/rh6_eth.jinja
+++ b/salt/templates/rh_ip/rh6_eth.jinja
@@ -1,5 +1,6 @@
{% if "range" not in name %}DEVICE="{{name}}"
-{%endif%}{% if addr %}HWADDR="{{addr}}"
+{%endif%}{% if hwaddr %}HWADDR="{{hwaddr}}"
+{%endif%}{% if macaddr %}MACADDR="{{macaddr}}"
{%endif%}{% if userctl %}USERCTL="{{userctl}}"
{%endif%}{% if master %}MASTER="{{master}}"
{%endif%}{% if slave %}SLAVE="{{slave}}"
diff --git a/salt/templates/rh_ip/rh7_eth.jinja b/salt/templates/rh_ip/rh7_eth.jinja
index 5fec4fd7b0..c05546b647 100644
--- a/salt/templates/rh_ip/rh7_eth.jinja
+++ b/salt/templates/rh_ip/rh7_eth.jinja
@@ -1,6 +1,7 @@
DEVICE="{{name}}"
{% if nickname %}NAME="{{nickname}}"
-{%endif%}{% if addr %}HWADDR="{{addr}}"
+{%endif%}{% if hwaddr %}HWADDR="{{hwaddr}}"
+{%endif%}{% if macaddr %}MACADDR="{{macaddr}}"
{%endif%}{% if uuid %}UUID="{{uuid}}"
{%endif%}{% if userctl %}USERCTL="{{userctl}}"
{%endif%}{% if master %}MASTER="{{master}}"
diff --git a/salt/thorium/check.py b/salt/thorium/check.py
index ea4e99c12d..41b47d9397 100644
--- a/salt/thorium/check.py
+++ b/salt/thorium/check.py
@@ -373,7 +373,7 @@ def len_gte(name, value):
def len_lt(name, value):
'''
- Only succeed if the lenght of the given register location is less than
+ Only succeed if the length of the given register location is less than
the given value.
USAGE:
diff --git a/salt/transport/tcp.py b/salt/transport/tcp.py
index 334ed0a3ad..5a2529f789 100644
--- a/salt/transport/tcp.py
+++ b/salt/transport/tcp.py
@@ -21,6 +21,7 @@ import errno
import salt.crypt
import salt.utils.async
import salt.utils.event
+import salt.utils.files
import salt.utils.platform
import salt.utils.process
import salt.utils.verify
@@ -144,8 +145,8 @@ if USE_LOAD_BALANCER:
# Based on default used in tornado.netutil.bind_sockets()
backlog = 128
- def __init__(self, opts, socket_queue, log_queue=None):
- super(LoadBalancerServer, self).__init__(log_queue=log_queue)
+ def __init__(self, opts, socket_queue, **kwargs):
+ super(LoadBalancerServer, self).__init__(**kwargs)
self.opts = opts
self.socket_queue = socket_queue
self._socket = None
@@ -159,13 +160,17 @@ if USE_LOAD_BALANCER:
self.__init__(
state['opts'],
state['socket_queue'],
- log_queue=state['log_queue']
+ log_queue=state['log_queue'],
+ log_queue_level=state['log_queue_level']
)
def __getstate__(self):
- return {'opts': self.opts,
- 'socket_queue': self.socket_queue,
- 'log_queue': self.log_queue}
+ return {
+ 'opts': self.opts,
+ 'socket_queue': self.socket_queue,
+ 'log_queue': self.log_queue,
+ 'log_queue_level': self.log_queue_level
+ }
def close(self):
if self._socket is not None:
@@ -1347,14 +1352,18 @@ class TCPPubServerChannel(salt.transport.server.PubServerChannel):
return {'opts': self.opts,
'secrets': salt.master.SMaster.secrets}
- def _publish_daemon(self, log_queue=None):
+ def _publish_daemon(self, **kwargs):
'''
Bind to the interface specified in the configuration file
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
+ log_queue = kwargs.get('log_queue')
if log_queue is not None:
salt.log.setup.set_multiprocessing_logging_queue(log_queue)
+ log_queue_level = kwargs.get('log_queue_level')
+ if log_queue_level is not None:
+ salt.log.setup.set_multiprocessing_logging_level(log_queue_level)
salt.log.setup.setup_multiprocessing_logging(log_queue)
# Check if io_loop was set outside
@@ -1386,11 +1395,8 @@ class TCPPubServerChannel(salt.transport.server.PubServerChannel):
# Securely create socket
log.info('Starting the Salt Puller on %s', pull_uri)
- old_umask = os.umask(0o177)
- try:
+ with salt.utils.files.set_umask(0o177):
pull_sock.start()
- finally:
- os.umask(old_umask)
# run forever
try:
@@ -1409,6 +1415,9 @@ class TCPPubServerChannel(salt.transport.server.PubServerChannel):
kwargs['log_queue'] = (
salt.log.setup.get_multiprocessing_logging_queue()
)
+ kwargs['log_queue_level'] = (
+ salt.log.setup.get_multiprocessing_logging_level()
+ )
process_manager.add_process(self._publish_daemon, kwargs=kwargs)
diff --git a/salt/transport/zeromq.py b/salt/transport/zeromq.py
index ad3dbc27e9..89c0292bde 100644
--- a/salt/transport/zeromq.py
+++ b/salt/transport/zeromq.py
@@ -19,6 +19,7 @@ from random import randint
import salt.auth
import salt.crypt
import salt.utils.event
+import salt.utils.files
import salt.utils.minions
import salt.utils.process
import salt.utils.stringutils
@@ -806,11 +807,8 @@ class ZeroMQPubServerChannel(salt.transport.server.PubServerChannel):
# Securely create socket
log.info('Starting the Salt Puller on %s', pull_uri)
- old_umask = os.umask(0o177)
- try:
+ with salt.utils.files.set_umask(0o177):
pull_sock.bind(pull_uri)
- finally:
- os.umask(old_umask)
try:
while True:
diff --git a/salt/utils/args.py b/salt/utils/args.py
index f2c9a10d51..91e71193f8 100644
--- a/salt/utils/args.py
+++ b/salt/utils/args.py
@@ -142,7 +142,8 @@ def yamlify_arg(arg):
return arg
if arg.strip() == '':
- # Because YAML loads empty strings as None, we return the original string
+ # Because YAML loads empty (or all whitespace) strings as None, we
+ # return the original string
# >>> import yaml
# >>> yaml.load('') is None
# True
@@ -151,6 +152,9 @@ def yamlify_arg(arg):
return arg
elif '_' in arg and all([x in '0123456789_' for x in arg.strip()]):
+ # When the stripped string includes just digits and underscores, the
+ # underscores are ignored and the digits are combined together and
+ # loaded as an int. We don't want that, so return the original value.
return arg
try:
@@ -177,6 +181,14 @@ def yamlify_arg(arg):
else:
return arg
+ elif isinstance(arg, list):
+ # lists must be wrapped in brackets
+ if (isinstance(original_arg, six.string_types) and
+ not original_arg.startswith('[')):
+ return original_arg
+ else:
+ return arg
+
elif arg is None \
or isinstance(arg, (list, float, six.integer_types, six.string_types)):
# yaml.safe_load will load '|' as '', don't let it do that.
diff --git a/salt/utils/aws.py b/salt/utils/aws.py
index 059450e7ca..a605647349 100644
--- a/salt/utils/aws.py
+++ b/salt/utils/aws.py
@@ -89,9 +89,7 @@ def creds(provider):
proxies={'http': ''}, timeout=AWS_METADATA_TIMEOUT,
)
result.raise_for_status()
- role = result.text.encode(
- result.encoding if result.encoding else 'utf-8'
- )
+ role = result.text
except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError):
return provider['id'], provider['key'], ''
diff --git a/salt/utils/cloud.py b/salt/utils/cloud.py
index 7337a58364..de0e147960 100644
--- a/salt/utils/cloud.py
+++ b/salt/utils/cloud.py
@@ -2263,7 +2263,7 @@ def check_auth(name, sock_dir=None, queue=None, timeout=300):
ret = event.get_event(full=True)
if ret is None:
continue
- if ret['tag'] == 'minion_start' and ret['data']['id'] == name:
+ if ret['tag'] == 'salt/minion/{0}/start'.format(name):
queue.put(name)
newtimeout = 0
log.debug('Minion %s is ready to receive commands', name)
diff --git a/salt/utils/docker/__init__.py b/salt/utils/docker/__init__.py
index c665f0d7fc..268992389d 100644
--- a/salt/utils/docker/__init__.py
+++ b/salt/utils/docker/__init__.py
@@ -8,8 +8,8 @@ input as formatted by states.
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
+import copy
import logging
-import os
# Import Salt libs
import salt.utils.args
@@ -183,7 +183,7 @@ def translate_input(translator,
of that tuple will have their translation skipped. Optionally,
skip_translate can be set to True to skip *all* translation.
'''
- kwargs = salt.utils.args.clean_kwargs(**kwargs)
+ kwargs = copy.deepcopy(salt.utils.args.clean_kwargs(**kwargs))
invalid = {}
collisions = []
diff --git a/salt/utils/event.py b/salt/utils/event.py
index 4681bccb04..fced9229b6 100644
--- a/salt/utils/event.py
+++ b/salt/utils/event.py
@@ -75,6 +75,7 @@ import salt.payload
import salt.utils.async
import salt.utils.cache
import salt.utils.dicttrim
+import salt.utils.files
import salt.utils.platform
import salt.utils.process
import salt.utils.stringutils
@@ -1018,12 +1019,9 @@ class AsyncEventPublisher(object):
)
log.info('Starting pull socket on {0}'.format(epull_uri))
- old_umask = os.umask(0o177)
- try:
+ with salt.utils.files.set_umask(0o177):
self.publisher.start()
self.puller.start()
- finally:
- os.umask(old_umask)
def handle_publish(self, package, _):
'''
@@ -1056,8 +1054,8 @@ class EventPublisher(salt.utils.process.SignalHandlingMultiprocessingProcess):
The interface that takes master events and republishes them out to anyone
who wants to listen
'''
- def __init__(self, opts, log_queue=None):
- super(EventPublisher, self).__init__(log_queue=log_queue)
+ def __init__(self, opts, **kwargs):
+ super(EventPublisher, self).__init__(**kwargs)
self.opts = salt.config.DEFAULT_MASTER_OPTS.copy()
self.opts.update(opts)
self._closing = False
@@ -1067,11 +1065,18 @@ class EventPublisher(salt.utils.process.SignalHandlingMultiprocessingProcess):
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
- self.__init__(state['opts'], log_queue=state['log_queue'])
+ self.__init__(
+ state['opts'],
+ log_queue=state['log_queue'],
+ log_queue_level=state['log_queue_level']
+ )
def __getstate__(self):
- return {'opts': self.opts,
- 'log_queue': self.log_queue}
+ return {
+ 'opts': self.opts,
+ 'log_queue': self.log_queue,
+ 'log_queue_level': self.log_queue_level
+ }
def run(self):
'''
@@ -1106,8 +1111,7 @@ class EventPublisher(salt.utils.process.SignalHandlingMultiprocessingProcess):
)
# Start the master event publisher
- old_umask = os.umask(0o177)
- try:
+ with salt.utils.files.set_umask(0o177):
self.publisher.start()
self.puller.start()
if (self.opts['ipc_mode'] != 'tcp' and (
@@ -1115,8 +1119,6 @@ class EventPublisher(salt.utils.process.SignalHandlingMultiprocessingProcess):
self.opts['external_auth'])):
os.chmod(os.path.join(
self.opts['sock_dir'], 'master_event_pub.ipc'), 0o666)
- finally:
- os.umask(old_umask)
# Make sure the IO loop and respective sockets are closed and
# destroyed
@@ -1171,13 +1173,13 @@ class EventReturn(salt.utils.process.SignalHandlingMultiprocessingProcess):
instance = super(EventReturn, cls).__new__(cls, *args, **kwargs)
return instance
- def __init__(self, opts, log_queue=None):
+ def __init__(self, opts, **kwargs):
'''
Initialize the EventReturn system
Return an EventReturn instance
'''
- super(EventReturn, self).__init__(log_queue=log_queue)
+ super(EventReturn, self).__init__(**kwargs)
self.opts = opts
self.event_return_queue = self.opts['event_return_queue']
@@ -1192,11 +1194,18 @@ class EventReturn(salt.utils.process.SignalHandlingMultiprocessingProcess):
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
- self.__init__(state['opts'], log_queue=state['log_queue'])
+ self.__init__(
+ state['opts'],
+ log_queue=state['log_queue'],
+ log_queue_level=state['log_queue_level']
+ )
def __getstate__(self):
- return {'opts': self.opts,
- 'log_queue': self.log_queue}
+ return {
+ 'opts': self.opts,
+ 'log_queue': self.log_queue,
+ 'log_queue_level': self.log_queue_level
+ }
def _handle_signals(self, signum, sigframe):
# Flush and terminate
diff --git a/salt/utils/extmods.py b/salt/utils/extmods.py
index 7114d190d9..18a794fab1 100644
--- a/salt/utils/extmods.py
+++ b/salt/utils/extmods.py
@@ -11,6 +11,7 @@ import shutil
# Import salt libs
import salt.fileclient
+import salt.utils.files
import salt.utils.hashutils
import salt.utils.path
import salt.utils.url
@@ -71,85 +72,83 @@ def sync(opts,
remote = set()
source = salt.utils.url.create('_' + form)
mod_dir = os.path.join(opts['extension_modules'], '{0}'.format(form))
- cumask = os.umask(0o77)
touched = False
- try:
- if not os.path.isdir(mod_dir):
- log.info('Creating module dir \'%s\'', mod_dir)
- try:
- os.makedirs(mod_dir)
- except (IOError, OSError):
- log.error(
- 'Cannot create cache module directory %s. Check '
- 'permissions.', mod_dir
- )
- fileclient = salt.fileclient.get_file_client(opts)
- for sub_env in saltenv:
- log.info(
- 'Syncing %s for environment \'%s\'', form, sub_env
- )
- cache = []
- log.info(
- 'Loading cache from {0}, for {1})'.format(source, sub_env)
- )
- # Grab only the desired files (.py, .pyx, .so)
- cache.extend(
- fileclient.cache_dir(
- source, sub_env, include_empty=False,
- include_pat=r'E@\.(pyx?|so|zip)$', exclude_pat=None
- )
- )
- local_cache_dir = os.path.join(
- opts['cachedir'],
- 'files',
- sub_env,
- '_{0}'.format(form)
+ with salt.utils.files.set_umask(0o077):
+ try:
+ if not os.path.isdir(mod_dir):
+ log.info('Creating module dir \'%s\'', mod_dir)
+ try:
+ os.makedirs(mod_dir)
+ except (IOError, OSError):
+ log.error(
+ 'Cannot create cache module directory %s. Check '
+ 'permissions.', mod_dir
)
- log.debug('Local cache dir: \'%s\'', local_cache_dir)
- for fn_ in cache:
- relpath = os.path.relpath(fn_, local_cache_dir)
- relname = os.path.splitext(relpath)[0].replace(os.sep, '.')
- if extmod_whitelist and form in extmod_whitelist and relname not in extmod_whitelist[form]:
- continue
- if extmod_blacklist and form in extmod_blacklist and relname in extmod_blacklist[form]:
- continue
- remote.add(relpath)
- dest = os.path.join(mod_dir, relpath)
- log.info('Copying \'%s\' to \'%s\'', fn_, dest)
- if os.path.isfile(dest):
- # The file is present, if the sum differs replace it
- hash_type = opts.get('hash_type', 'md5')
- src_digest = salt.utils.hashutils.get_hash(fn_, hash_type)
- dst_digest = salt.utils.hashutils.get_hash(dest, hash_type)
- if src_digest != dst_digest:
- # The downloaded file differs, replace!
+ fileclient = salt.fileclient.get_file_client(opts)
+ for sub_env in saltenv:
+ log.info(
+ 'Syncing %s for environment \'%s\'', form, sub_env
+ )
+ cache = []
+ log.info(
+ 'Loading cache from {0}, for {1})'.format(source, sub_env)
+ )
+ # Grab only the desired files (.py, .pyx, .so)
+ cache.extend(
+ fileclient.cache_dir(
+ source, sub_env, include_empty=False,
+ include_pat=r'E@\.(pyx?|so|zip)$', exclude_pat=None
+ )
+ )
+ local_cache_dir = os.path.join(
+ opts['cachedir'],
+ 'files',
+ sub_env,
+ '_{0}'.format(form)
+ )
+ log.debug('Local cache dir: \'%s\'', local_cache_dir)
+ for fn_ in cache:
+ relpath = os.path.relpath(fn_, local_cache_dir)
+ relname = os.path.splitext(relpath)[0].replace(os.sep, '.')
+ if extmod_whitelist and form in extmod_whitelist and relname not in extmod_whitelist[form]:
+ continue
+ if extmod_blacklist and form in extmod_blacklist and relname in extmod_blacklist[form]:
+ continue
+ remote.add(relpath)
+ dest = os.path.join(mod_dir, relpath)
+ log.info('Copying \'%s\' to \'%s\'', fn_, dest)
+ if os.path.isfile(dest):
+ # The file is present, if the sum differs replace it
+ hash_type = opts.get('hash_type', 'md5')
+ src_digest = salt.utils.hashutils.get_hash(fn_, hash_type)
+ dst_digest = salt.utils.hashutils.get_hash(dest, hash_type)
+ if src_digest != dst_digest:
+ # The downloaded file differs, replace!
+ shutil.copyfile(fn_, dest)
+ ret.append('{0}.{1}'.format(form, relname))
+ else:
+ dest_dir = os.path.dirname(dest)
+ if not os.path.isdir(dest_dir):
+ os.makedirs(dest_dir)
shutil.copyfile(fn_, dest)
ret.append('{0}.{1}'.format(form, relname))
- else:
- dest_dir = os.path.dirname(dest)
- if not os.path.isdir(dest_dir):
- os.makedirs(dest_dir)
- shutil.copyfile(fn_, dest)
- ret.append('{0}.{1}'.format(form, relname))
- touched = bool(ret)
- if opts['clean_dynamic_modules'] is True:
- current = set(_listdir_recursively(mod_dir))
- for fn_ in current - remote:
- full = os.path.join(mod_dir, fn_)
- if os.path.isfile(full):
- touched = True
- os.remove(full)
- # Cleanup empty dirs
- while True:
- emptydirs = _list_emptydirs(mod_dir)
- if not emptydirs:
- break
- for emptydir in emptydirs:
- touched = True
- shutil.rmtree(emptydir, ignore_errors=True)
- except Exception as exc:
- log.error('Failed to sync %s module: %s', form, exc)
- finally:
- os.umask(cumask)
+ touched = bool(ret)
+ if opts['clean_dynamic_modules'] is True:
+ current = set(_listdir_recursively(mod_dir))
+ for fn_ in current - remote:
+ full = os.path.join(mod_dir, fn_)
+ if os.path.isfile(full):
+ touched = True
+ os.remove(full)
+ # Cleanup empty dirs
+ while True:
+ emptydirs = _list_emptydirs(mod_dir)
+ if not emptydirs:
+ break
+ for emptydir in emptydirs:
+ touched = True
+ shutil.rmtree(emptydir, ignore_errors=True)
+ except Exception as exc:
+ log.error('Failed to sync %s module: %s', form, exc)
return ret, touched
diff --git a/salt/utils/files.py b/salt/utils/files.py
index 397b2206fb..eddb53b410 100644
--- a/salt/utils/files.py
+++ b/salt/utils/files.py
@@ -299,20 +299,29 @@ def wait_lock(path, lock_fn=None, timeout=5, sleep=0.1, time_start=None):
log.trace('Write lock for %s (%s) released', path, lock_fn)
+def get_umask():
+ '''
+ Returns the current umask
+ '''
+ ret = os.umask(0) # pylint: disable=blacklisted-function
+ os.umask(ret) # pylint: disable=blacklisted-function
+ return ret
+
+
@contextlib.contextmanager
def set_umask(mask):
'''
Temporarily set the umask and restore once the contextmanager exits
'''
- if salt.utils.platform.is_windows():
- # Don't attempt on Windows
+ if mask is None or salt.utils.platform.is_windows():
+ # Don't attempt on Windows, or if no mask was passed
yield
else:
try:
- orig_mask = os.umask(mask)
+ orig_mask = os.umask(mask) # pylint: disable=blacklisted-function
yield
finally:
- os.umask(orig_mask)
+ os.umask(orig_mask) # pylint: disable=blacklisted-function
def fopen(*args, **kwargs):
diff --git a/salt/utils/gitfs.py b/salt/utils/gitfs.py
index 275fad4c83..526b9aa8fb 100644
--- a/salt/utils/gitfs.py
+++ b/salt/utils/gitfs.py
@@ -55,6 +55,9 @@ VALID_REF_TYPES = _DEFAULT_MASTER_OPTS['gitfs_ref_types']
# Optional per-remote params that can only be used on a per-remote basis, and
# thus do not have defaults in salt/config.py.
PER_REMOTE_ONLY = ('name',)
+# Params which are global only and cannot be overridden for a single remote.
+GLOBAL_ONLY = ()
+
SYMLINK_RECURSE_DEPTH = 100
# Auth support (auth params can be global or per-remote, too)
@@ -357,7 +360,7 @@ class GitProvider(object):
salt.utils.url.strip_proto(saltenv_ptr['mountpoint'])
for key, val in six.iteritems(self.conf):
- if key not in PER_SALTENV_PARAMS:
+ if key not in PER_SALTENV_PARAMS and not hasattr(self, key):
setattr(self, key, val)
for key in PER_SALTENV_PARAMS:
@@ -489,7 +492,7 @@ class GitProvider(object):
@classmethod
def add_conf_overlay(cls, name):
'''
- Programatically determine config value based on the desired saltenv
+ Programmatically determine config value based on the desired saltenv
'''
def _getconf(self, tgt_env='base'):
strip_sep = lambda x: x.rstrip(os.sep) \
@@ -973,13 +976,13 @@ class GitProvider(object):
'''
Resolve dynamically-set branch
'''
- if self.branch == '__env__':
+ if self.role == 'git_pillar' and self.branch == '__env__':
target = self.opts.get('pillarenv') \
or self.opts.get('saltenv') \
or 'base'
- return self.opts['{0}_base'.format(self.role)] \
+ return self.base \
if target == 'base' \
- else target
+ else six.text_type(target)
return self.branch
def get_tree(self, tgt_env):
@@ -1021,7 +1024,7 @@ class GitProvider(object):
try:
self.branch, self.url = self.id.split(None, 1)
except ValueError:
- self.branch = self.opts['{0}_branch'.format(self.role)]
+ self.branch = self.conf['branch']
self.url = self.id
else:
self.url = self.id
@@ -2026,8 +2029,8 @@ class GitBase(object):
Base class for gitfs/git_pillar
'''
def __init__(self, opts, remotes=None, per_remote_overrides=(),
- per_remote_only=PER_REMOTE_ONLY, git_providers=None,
- cache_root=None, init_remotes=True):
+ per_remote_only=PER_REMOTE_ONLY, global_only=GLOBAL_ONLY,
+ git_providers=None, cache_root=None, init_remotes=True):
'''
IMPORTANT: If specifying a cache_root, understand that this is also
where the remotes will be cloned. A non-default cache_root is only
@@ -2085,10 +2088,12 @@ class GitBase(object):
self.init_remotes(
remotes if remotes is not None else [],
per_remote_overrides,
- per_remote_only)
+ per_remote_only,
+ global_only)
def init_remotes(self, remotes, per_remote_overrides=(),
- per_remote_only=PER_REMOTE_ONLY):
+ per_remote_only=PER_REMOTE_ONLY,
+ global_only=GLOBAL_ONLY):
'''
Initialize remotes
'''
@@ -2121,7 +2126,9 @@ class GitBase(object):
failhard(self.role)
per_remote_defaults = {}
- for param in override_params:
+ global_values = set(override_params)
+ global_values.update(set(global_only))
+ for param in global_values:
key = '{0}_{1}'.format(self.role, param)
if key not in self.opts:
log.critical(
@@ -2967,8 +2974,7 @@ class GitPillar(GitBase):
if repo.env:
env = repo.env
else:
- base_branch = self.opts['{0}_base'.format(self.role)]
- env = 'base' if repo.branch == base_branch else repo.branch
+ env = 'base' if repo.branch == repo.base else repo.branch
if repo._mountpoint:
if self.link_mountpoint(repo):
self.pillar_dirs[repo.linkdir] = env
@@ -3095,6 +3101,9 @@ class WinRepo(GitBase):
Functionality specific to the winrepo runner
'''
role = 'winrepo'
+ # Need to define this in case we try to reference it before checking
+ # out the repos.
+ winrepo_dirs = {}
def checkout(self):
'''
diff --git a/salt/utils/master.py b/salt/utils/master.py
index 9fd3cd369d..c78eafdbf4 100644
--- a/salt/utils/master.py
+++ b/salt/utils/master.py
@@ -438,11 +438,11 @@ class CacheWorker(MultiprocessingProcess):
main-loop when refreshing minion-list
'''
- def __init__(self, opts, log_queue=None):
+ def __init__(self, opts, **kwargs):
'''
Sets up the zmq-connection to the ConCache
'''
- super(CacheWorker, self).__init__(log_queue=log_queue)
+ super(CacheWorker, self).__init__(**kwargs)
self.opts = opts
# __setstate__ and __getstate__ are only used on Windows.
@@ -450,11 +450,18 @@ class CacheWorker(MultiprocessingProcess):
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
- self.__init__(state['opts'], log_queue=state['log_queue'])
+ self.__init__(
+ state['opts'],
+ log_queue=state['log_queue'],
+ log_queue_level=state['log_queue_level']
+ )
def __getstate__(self):
- return {'opts': self.opts,
- 'log_queue': self.log_queue}
+ return {
+ 'opts': self.opts,
+ 'log_queue': self.log_queue,
+ 'log_queue_level': self.log_queue_level
+ }
def run(self):
'''
@@ -475,11 +482,11 @@ class ConnectedCache(MultiprocessingProcess):
the master publisher port.
'''
- def __init__(self, opts, log_queue=None):
+ def __init__(self, opts, **kwargs):
'''
starts the timer and inits the cache itself
'''
- super(ConnectedCache, self).__init__(log_queue=log_queue)
+ super(ConnectedCache, self).__init__(**kwargs)
log.debug('ConCache initializing...')
# the possible settings for the cache
@@ -506,11 +513,18 @@ class ConnectedCache(MultiprocessingProcess):
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
- self.__init__(state['opts'], log_queue=state['log_queue'])
+ self.__init__(
+ state['opts'],
+ log_queue=state['log_queue'],
+ log_queue_level=state['log_queue_level']
+ )
def __getstate__(self):
- return {'opts': self.opts,
- 'log_queue': self.log_queue}
+ return {
+ 'opts': self.opts,
+ 'log_queue': self.log_queue,
+ 'log_queue_level': self.log_queue_level
+ }
def signal_handler(self, sig, frame):
'''
diff --git a/salt/utils/napalm.py b/salt/utils/napalm.py
index b551c63e09..9fafd124f7 100644
--- a/salt/utils/napalm.py
+++ b/salt/utils/napalm.py
@@ -259,7 +259,7 @@ def get_device_opts(opts, salt_obj=None):
if opts.get('proxy') or opts.get('napalm'):
opts['multiprocessing'] = device_dict.get('multiprocessing', False)
# Most NAPALM drivers are SSH-based, so multiprocessing should default to False.
- # But the user can be allows to have a different value for the multiprocessing, which will
+ # But the user can be allows one to have a different value for the multiprocessing, which will
# override the opts.
if salt_obj and not device_dict:
# get the connection details from the opts
diff --git a/salt/utils/network.py b/salt/utils/network.py
index 171e5b99ff..16ea009377 100644
--- a/salt/utils/network.py
+++ b/salt/utils/network.py
@@ -168,7 +168,11 @@ def generate_minion_id():
:return:
'''
- return _generate_minion_id().first() or 'localhost'
+ try:
+ ret = salt.utils.stringutils.to_unicode(_generate_minion_id().first())
+ except TypeError:
+ ret = None
+ return ret or 'localhost'
def get_socket(addr, type=socket.SOCK_STREAM, proto=0):
@@ -1751,42 +1755,35 @@ def refresh_dns():
def dns_check(addr, port, safe=False, ipv6=None):
'''
Return the ip resolved by dns, but do not exit on failure, only raise an
- exception. Obeys system preference for IPv4/6 address resolution.
+ exception. Obeys system preference for IPv4/6 address resolution - this
+ can be overridden by the ipv6 flag.
Tries to connect to the address before considering it useful. If no address
can be reached, the first one resolved is used as a fallback.
'''
error = False
lookup = addr
seen_ipv6 = False
+ family = socket.AF_INET6 if ipv6 else socket.AF_INET if ipv6 is False else socket.AF_UNSPEC
try:
refresh_dns()
- hostnames = socket.getaddrinfo(
- addr, None, socket.AF_UNSPEC, socket.SOCK_STREAM
- )
+ hostnames = socket.getaddrinfo(addr, port, family, socket.SOCK_STREAM)
if not hostnames:
error = True
else:
resolved = False
candidates = []
for h in hostnames:
- # It's an IP address, just return it
+ # Input is IP address, passed through unchanged, just return it
if h[4][0] == addr:
resolved = salt.utils.zeromq.ip_bracket(addr)
break
- if h[0] == socket.AF_INET and ipv6 is True:
- continue
- if h[0] == socket.AF_INET6 and ipv6 is False:
- continue
-
candidate_addr = salt.utils.zeromq.ip_bracket(h[4][0])
-
- if h[0] != socket.AF_INET6 or ipv6 is not None:
- candidates.append(candidate_addr)
+ candidates.append(candidate_addr)
try:
s = socket.socket(h[0], socket.SOCK_STREAM)
- s.connect((candidate_addr.strip('[]'), port))
+ s.connect((candidate_addr.strip('[]'), h[1]))
s.close()
resolved = candidate_addr
diff --git a/salt/utils/openstack/neutron.py b/salt/utils/openstack/neutron.py
index 2a7a65f137..0af2de3140 100644
--- a/salt/utils/openstack/neutron.py
+++ b/salt/utils/openstack/neutron.py
@@ -81,6 +81,13 @@ class SaltNeutron(NeutronShell):
'''
Set up neutron credentials
'''
+ __utils__['versions.warn_until'](
+ 'Neon',
+ (
+ 'The neutron module has been deprecated and will be removed in {version}. '
+ 'Please update to using the neutronng module'
+ ),
+ )
if not HAS_NEUTRON:
return None
diff --git a/salt/utils/parsers.py b/salt/utils/parsers.py
index 88ce272fa9..b99e7597a5 100644
--- a/salt/utils/parsers.py
+++ b/salt/utils/parsers.py
@@ -715,9 +715,8 @@ class LogLevelMixIn(six.with_metaclass(MixInMeta, object)):
# verify the default
if logfile is not None and not logfile.startswith(('tcp://', 'udp://', 'file://')):
# Logfile is not using Syslog, verify
- current_umask = os.umask(0o027)
- verify_files([logfile], self.config['user'])
- os.umask(current_umask)
+ with salt.utils.files.set_umask(0o027):
+ verify_files([logfile], self.config['user'])
if logfile is None:
# Use the default setting if the logfile wasn't explicity set
@@ -862,21 +861,30 @@ class LogLevelMixIn(six.with_metaclass(MixInMeta, object)):
)
def _setup_mp_logging_client(self, *args): # pylint: disable=unused-argument
- if salt.utils.platform.is_windows() and self._setup_mp_logging_listener_:
- # On Windows, all logging including console and
- # log file logging will go through the multiprocessing
- # logging listener if it exists.
- # This will allow log file rotation on Windows
- # since only one process can own the log file
- # for log file rotation to work.
- log.setup_multiprocessing_logging(
- self._get_mp_logging_listener_queue()
- )
- # Remove the temp logger and any other configured loggers since all of
- # our logging is going through the multiprocessing logging listener.
- log.shutdown_temp_logging()
- log.shutdown_console_logging()
- log.shutdown_logfile_logging()
+ if self._setup_mp_logging_listener_:
+ # Set multiprocessing logging level even in non-Windows
+ # environments. In non-Windows environments, this setting will
+ # propogate from process to process via fork behavior and will be
+ # used by child processes if they invoke the multiprocessing
+ # logging client.
+ log.set_multiprocessing_logging_level_by_opts(self.config)
+
+ if salt.utils.platform.is_windows():
+ # On Windows, all logging including console and
+ # log file logging will go through the multiprocessing
+ # logging listener if it exists.
+ # This will allow log file rotation on Windows
+ # since only one process can own the log file
+ # for log file rotation to work.
+ log.setup_multiprocessing_logging(
+ self._get_mp_logging_listener_queue()
+ )
+ # Remove the temp logger and any other configured loggers since
+ # all of our logging is going through the multiprocessing
+ # logging listener.
+ log.shutdown_temp_logging()
+ log.shutdown_console_logging()
+ log.shutdown_logfile_logging()
def __setup_console_logger_config(self, *args): # pylint: disable=unused-argument
# Since we're not going to be a daemon, setup the console logger
@@ -1411,7 +1419,7 @@ class ExecutionOptionsMixIn(six.with_metaclass(MixInMeta, object)):
nargs=2,
default=None,
metavar=' ',
- help='Perform an function that may be specific to this cloud '
+ help='Perform a function that may be specific to this cloud '
'provider, that does not apply to an instance. This '
'argument requires a provider to be specified (i.e.: nova).'
)
diff --git a/salt/utils/process.py b/salt/utils/process.py
index 6dafaf0441..72d3c6825d 100644
--- a/salt/utils/process.py
+++ b/salt/utils/process.py
@@ -80,7 +80,7 @@ def daemonize(redirect_out=True):
os.chdir('/')
# noinspection PyArgumentList
os.setsid()
- os.umask(18)
+ os.umask(0o022) # pylint: disable=blacklisted-function
# do second fork
try:
@@ -376,20 +376,30 @@ class ProcessManager(object):
kwargs = {}
if salt.utils.platform.is_windows():
- # Need to ensure that 'log_queue' is correctly transferred to
- # processes that inherit from 'MultiprocessingProcess'.
+ # Need to ensure that 'log_queue' and 'log_queue_level' is
+ # correctly transferred to processes that inherit from
+ # 'MultiprocessingProcess'.
if type(MultiprocessingProcess) is type(tgt) and (
issubclass(tgt, MultiprocessingProcess)):
need_log_queue = True
else:
need_log_queue = False
- if need_log_queue and 'log_queue' not in kwargs:
- if hasattr(self, 'log_queue'):
- kwargs['log_queue'] = self.log_queue
- else:
- kwargs['log_queue'] = (
- salt.log.setup.get_multiprocessing_logging_queue())
+ if need_log_queue:
+ if 'log_queue' not in kwargs:
+ if hasattr(self, 'log_queue'):
+ kwargs['log_queue'] = self.log_queue
+ else:
+ kwargs['log_queue'] = (
+ salt.log.setup.get_multiprocessing_logging_queue()
+ )
+ if 'log_queue_level' not in kwargs:
+ if hasattr(self, 'log_queue_level'):
+ kwargs['log_queue_level'] = self.log_queue_level
+ else:
+ kwargs['log_queue_level'] = (
+ salt.log.setup.get_multiprocessing_logging_level()
+ )
# create a nicer name for the debug log
if name is None:
@@ -686,8 +696,14 @@ class MultiprocessingProcess(multiprocessing.Process, NewStyleClassMixIn):
# salt.log.setup.get_multiprocessing_logging_queue().
salt.log.setup.set_multiprocessing_logging_queue(self.log_queue)
+ self.log_queue_level = kwargs.pop('log_queue_level', None)
+ if self.log_queue_level is None:
+ self.log_queue_level = salt.log.setup.get_multiprocessing_logging_level()
+ else:
+ salt.log.setup.set_multiprocessing_logging_level(self.log_queue_level)
+
# Call __init__ from 'multiprocessing.Process' only after removing
- # 'log_queue' from kwargs.
+ # 'log_queue' and 'log_queue_level' from kwargs.
super(MultiprocessingProcess, self).__init__(*args, **kwargs)
if salt.utils.platform.is_windows():
@@ -732,6 +748,8 @@ class MultiprocessingProcess(multiprocessing.Process, NewStyleClassMixIn):
kwargs = self._kwargs_for_getstate
if 'log_queue' not in kwargs:
kwargs['log_queue'] = self.log_queue
+ if 'log_queue_level' not in kwargs:
+ kwargs['log_queue_level'] = self.log_queue_level
# Remove the version of these in the parent process since
# they are no longer needed.
del self._args_for_getstate
diff --git a/salt/utils/reactor.py b/salt/utils/reactor.py
index 903283b138..6b6e95cbfb 100644
--- a/salt/utils/reactor.py
+++ b/salt/utils/reactor.py
@@ -50,8 +50,8 @@ class Reactor(salt.utils.process.SignalHandlingMultiprocessingProcess, salt.stat
'cmd': 'local',
}
- def __init__(self, opts, log_queue=None):
- super(Reactor, self).__init__(log_queue=log_queue)
+ def __init__(self, opts, **kwargs):
+ super(Reactor, self).__init__(**kwargs)
local_minion_opts = opts.copy()
local_minion_opts['file_client'] = 'local'
self.minion = salt.minion.MasterMinion(local_minion_opts)
@@ -66,11 +66,16 @@ class Reactor(salt.utils.process.SignalHandlingMultiprocessingProcess, salt.stat
self._is_child = True
Reactor.__init__(
self, state['opts'],
- log_queue=state['log_queue'])
+ log_queue=state['log_queue'],
+ log_queue_level=state['log_queue_level']
+ )
def __getstate__(self):
- return {'opts': self.opts,
- 'log_queue': self.log_queue}
+ return {
+ 'opts': self.opts,
+ 'log_queue': self.log_queue,
+ 'log_queue_level': self.log_queue_level
+ }
def render_reaction(self, glob_ref, tag, data):
'''
diff --git a/salt/utils/schedule.py b/salt/utils/schedule.py
index d6eb78225b..4cf2e928f1 100644
--- a/salt/utils/schedule.py
+++ b/salt/utils/schedule.py
@@ -426,23 +426,27 @@ class Schedule(object):
# Grab run, assume True
run = data.get('run', True)
+ run_schedule_jobs_in_background = self.opts.get('run_schedule_jobs_in_background', True)
if run:
- multiprocessing_enabled = self.opts.get('multiprocessing', True)
- if multiprocessing_enabled:
- thread_cls = salt.utils.process.SignalHandlingMultiprocessingProcess
- else:
- thread_cls = threading.Thread
+ if run_schedule_jobs_in_background:
+ multiprocessing_enabled = self.opts.get('multiprocessing', True)
+ if multiprocessing_enabled:
+ thread_cls = salt.utils.process.SignalHandlingMultiprocessingProcess
+ else:
+ thread_cls = threading.Thread
- if multiprocessing_enabled:
- with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
+ if multiprocessing_enabled:
+ with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
+ proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, func, data))
+ # Reset current signals before starting the process in
+ # order not to inherit the current signal handlers
+ proc.start()
+ proc.join()
+ else:
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, func, data))
- # Reset current signals before starting the process in
- # order not to inherit the current signal handlers
proc.start()
- proc.join()
else:
- proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, func, data))
- proc.start()
+ func(data)
def enable_schedule(self):
'''
@@ -907,40 +911,30 @@ class Schedule(object):
'must be a dict. '
'Ignoring job {0}.'.format(job))
log.error(data['_error'])
- __when = self.opts['pillar']['whens'][i]
- try:
- when__ = dateutil_parser.parse(__when)
- except ValueError:
- data['_error'] = ('Invalid date string. '
- 'Ignoring job {0}.'.format(job))
- log.error(data['_error'])
return data
+ when_ = self.opts['pillar']['whens'][i]
elif ('whens' in self.opts['grains'] and
i in self.opts['grains']['whens']):
if not isinstance(self.opts['grains']['whens'],
dict):
- data['_error'] = ('Grain "whens" must be dict.'
- 'Ignoring job {0}.'.format(job))
- log.error(data['_error'])
- return data
- __when = self.opts['grains']['whens'][i]
- try:
- when__ = dateutil_parser.parse(__when)
- except ValueError:
- data['_error'] = ('Invalid date string. '
+ data['_error'] = ('Grain "whens" must be a dict.'
'Ignoring job {0}.'.format(job))
log.error(data['_error'])
return data
+ when_ = self.opts['grains']['whens'][i]
else:
+ when_ = i
+
+ if not isinstance(when_, datetime.datetime):
try:
- when__ = dateutil_parser.parse(i)
+ when_ = dateutil_parser.parse(when_)
except ValueError:
data['_error'] = ('Invalid date string {0}. '
'Ignoring job {1}.'.format(i, job))
log.error(data['_error'])
return data
- _when.append(when__)
+ _when.append(when_)
if data['_splay']:
_when.append(data['_splay'])
@@ -988,32 +982,21 @@ class Schedule(object):
'Ignoring job {0}.'.format(job))
log.error(data['_error'])
return data
- _when = self.opts['pillar']['whens'][data['when']]
- try:
- when = dateutil_parser.parse(_when)
- except ValueError:
- data['_error'] = ('Invalid date string. '
- 'Ignoring job {0}.'.format(job))
- log.error(data['_error'])
- return data
+ when = self.opts['pillar']['whens'][data['when']]
elif ('whens' in self.opts['grains'] and
data['when'] in self.opts['grains']['whens']):
if not isinstance(self.opts['grains']['whens'], dict):
- data['_error'] = ('Grain "whens" must be dict. '
- 'Ignoring job {0}.'.format(job))
- log.error(data['_error'])
- return data
- _when = self.opts['grains']['whens'][data['when']]
- try:
- when = dateutil_parser.parse(_when)
- except ValueError:
- data['_error'] = ('Invalid date string. '
+ data['_error'] = ('Grain "whens" must be a dict. '
'Ignoring job {0}.'.format(job))
log.error(data['_error'])
return data
+ when = self.opts['grains']['whens'][data['when']]
else:
+ when = data['when']
+
+ if not isinstance(when, datetime.datetime):
try:
- when = dateutil_parser.parse(data['when'])
+ when = dateutil_parser.parse(when)
except ValueError:
data['_error'] = ('Invalid date string. '
'Ignoring job {0}.'.format(job))
@@ -1142,22 +1125,26 @@ class Schedule(object):
return data
else:
if isinstance(data['skip_during_range'], dict):
- try:
- start = dateutil_parser.parse(data['skip_during_range']['start'])
- except ValueError:
- data['_error'] = ('Invalid date string for start in '
- 'skip_during_range. Ignoring '
- 'job {0}.'.format(job))
- log.error(data['_error'])
- return data
- try:
- end = dateutil_parser.parse(data['skip_during_range']['end'])
- except ValueError:
- data['_error'] = ('Invalid date string for end in '
- 'skip_during_range. Ignoring '
- 'job {0}.'.format(job))
- log.error(data['_error'])
- return data
+ start = data['skip_during_range']['start']
+ end = data['skip_during_range']['end']
+ if not isinstance(start, datetime.datetime):
+ try:
+ start = dateutil_parser.parse(start)
+ except ValueError:
+ data['_error'] = ('Invalid date string for start in '
+ 'skip_during_range. Ignoring '
+ 'job {0}.'.format(job))
+ log.error(data['_error'])
+ return data
+ if not isinstance(end, datetime.datetime):
+ try:
+ end = dateutil_parser.parse(end)
+ except ValueError:
+ data['_error'] = ('Invalid date string for end in '
+ 'skip_during_range. Ignoring '
+ 'job {0}.'.format(job))
+ log.error(data['_error'])
+ return data
# Check to see if we should run the job immediately
# after the skip_during_range is over
@@ -1192,7 +1179,7 @@ class Schedule(object):
return data
else:
data['_error'] = ('schedule.handle_func: Invalid, range '
- 'must be specified as a dictionary '
+ 'must be specified as a dictionary. '
'Ignoring job {0}.'.format(job))
log.error(data['_error'])
return data
@@ -1209,20 +1196,24 @@ class Schedule(object):
return data
else:
if isinstance(data['range'], dict):
- try:
- start = dateutil_parser.parse(data['range']['start'])
- except ValueError:
- data['_error'] = ('Invalid date string for start. '
- 'Ignoring job {0}.'.format(job))
- log.error(data['_error'])
- return data
- try:
- end = dateutil_parser.parse(data['range']['end'])
- except ValueError:
- data['_error'] = ('Invalid date string for end.'
- ' Ignoring job {0}.'.format(job))
- log.error(data['_error'])
- return data
+ start = data['range']['start']
+ end = data['range']['end']
+ if not isinstance(start, datetime.datetime):
+ try:
+ start = dateutil_parser.parse(start)
+ except ValueError:
+ data['_error'] = ('Invalid date string for start. '
+ 'Ignoring job {0}.'.format(job))
+ log.error(data['_error'])
+ return data
+ if not isinstance(end, datetime.datetime):
+ try:
+ end = dateutil_parser.parse(end)
+ except ValueError:
+ data['_error'] = ('Invalid date string for end.'
+ ' Ignoring job {0}.'.format(job))
+ log.error(data['_error'])
+ return data
if end > start:
if 'invert' in data['range'] and data['range']['invert']:
if now <= start or now >= end:
@@ -1262,7 +1253,9 @@ class Schedule(object):
'Ignoring job {0}'.format(job))
log.error(data['_error'])
else:
- after = dateutil_parser.parse(data['after'])
+ after = data['after']
+ if not isinstance(after, datetime.datetime):
+ after = dateutil_parser.parse(after)
if after >= now:
log.debug(
@@ -1286,7 +1279,9 @@ class Schedule(object):
'Ignoring job {0}'.format(job))
log.error(data['_error'])
else:
- until = dateutil_parser.parse(data['until'])
+ until = data['until']
+ if not isinstance(until, datetime.datetime):
+ until = dateutil_parser.parse(until)
if until <= now:
log.debug(
@@ -1474,7 +1469,7 @@ class Schedule(object):
# If there is no job specific skip_during_range available,
# grab the global which defaults to None.
- if 'skip_during_range' not in data:
+ if 'skip_during_range' not in data and self.skip_during_range:
data['skip_during_range'] = self.skip_during_range
if 'skip_during_range' in data and data['skip_during_range']:
diff --git a/salt/utils/ssdp.py b/salt/utils/ssdp.py
index 73f3f6e2de..1d6cb91d74 100644
--- a/salt/utils/ssdp.py
+++ b/salt/utils/ssdp.py
@@ -193,10 +193,10 @@ class SSDPFactory(SSDPBase):
self.log.debug('Received "%s" from %s:%s', message, *addr)
self._sendto(
- str('{0}:@:{1}').format( # future lint: disable=blacklisted-function
+ salt.utils.stringutils.to_bytes(str('{0}:@:{1}').format( # future lint: disable=blacklisted-function
self.signature,
salt.utils.json.dumps(self.answer, _json_module=_json)
- ),
+ )),
addr
)
else:
diff --git a/salt/utils/user.py b/salt/utils/user.py
index a4f660554d..a6493ae3a1 100644
--- a/salt/utils/user.py
+++ b/salt/utils/user.py
@@ -16,6 +16,7 @@ import sys
# Import Salt libs
import salt.utils.path
import salt.utils.platform
+import salt.utils.stringutils
from salt.exceptions import CommandExecutionError
from salt.utils.decorators.jinja import jinja_filter
@@ -55,12 +56,13 @@ def get_user():
Get the current user
'''
if HAS_PWD:
- return pwd.getpwuid(os.geteuid()).pw_name
+ ret = pwd.getpwuid(os.geteuid()).pw_name
elif HAS_WIN_FUNCTIONS and salt.utils.win_functions.HAS_WIN32:
- return salt.utils.win_functions.get_current_user()
+ ret = salt.utils.win_functions.get_current_user()
else:
raise CommandExecutionError(
'Required external library (pwd or win32api) not installed')
+ return salt.utils.stringutils.to_unicode(ret)
@jinja_filter('get_uid')
@@ -252,7 +254,7 @@ def chugid_and_umask(runas, umask, group=None):
if set_runas or set_grp:
chugid(runas_user, runas_grp)
if umask is not None:
- os.umask(umask)
+ os.umask(umask) # pylint: disable=blacklisted-function
def get_default_group(user):
diff --git a/salt/utils/verify.py b/salt/utils/verify.py
index 0eeb95b402..9dd8dd4b84 100644
--- a/salt/utils/verify.py
+++ b/salt/utils/verify.py
@@ -230,12 +230,11 @@ def verify_env(
continue
if not os.path.isdir(dir_):
try:
- cumask = os.umask(18) # 077
- os.makedirs(dir_)
+ with salt.utils.files.set_umask(0o022):
+ os.makedirs(dir_)
# If starting the process as root, chown the new dirs
if os.getuid() == 0:
os.chown(dir_, uid, gid)
- os.umask(cumask)
except OSError as err:
msg = 'Failed to create directory path "{0}" - {1}\n'
sys.stderr.write(msg.format(dir_, err))
diff --git a/tests/integration/files/conf/cloud.providers.d/profitbricks.conf b/tests/integration/files/conf/cloud.providers.d/profitbricks.conf
index 292e2e3230..cb75738d68 100644
--- a/tests/integration/files/conf/cloud.providers.d/profitbricks.conf
+++ b/tests/integration/files/conf/cloud.providers.d/profitbricks.conf
@@ -1,6 +1,6 @@
profitbricks-config:
- username: ''
- password: ''
+ username: 'foo'
+ password: 'bar'
datacenter_id: 74d65326-d9b7-41c3-9f51-73ffe0fcd16d
driver: profitbricks
ssh_public_key: ~/.ssh/id_rsa.pub
diff --git a/tests/integration/files/file/base/issue-46127-pip-env-vars.sls b/tests/integration/files/file/base/issue-46127-pip-env-vars.sls
new file mode 100644
index 0000000000..e9e415eb87
--- /dev/null
+++ b/tests/integration/files/file/base/issue-46127-pip-env-vars.sls
@@ -0,0 +1,38 @@
+{%- set virtualenv_base = salt['runtests_helpers.get_salt_temp_dir_for_path']('virtualenv-12-base-1') -%}
+{%- set virtualenv_test = salt['runtests_helpers.get_salt_temp_dir_for_path']('issue-46127-pip-env-vars') -%}
+
+{{ virtualenv_base }}:
+ virtualenv.managed:
+ - system_site_packages: False
+ - distribute: True
+
+install_older_venv_1:
+ pip.installed:
+ - name: 'virtualenv < 13.0'
+ - bin_env: {{ virtualenv_base }}
+ - require:
+ - virtualenv: {{ virtualenv_base }}
+
+# For this test we need to make sure that the virtualenv used in the
+# 'issue-46127-setup' pip.installed state below was created using
+# virtualenv < 13.0. virtualenvs created using later versions make
+# packages with custom setuptools prefixes relative to the virtualenv
+# itself, which makes the use of env_vars obsolete.
+# Thus, the two states above ensure that the 'base' venv has
+# a version old enough to exhibit the behavior we want to test.
+
+setup_test_virtualenv_1:
+ cmd.run:
+ - name: {{ virtualenv_base }}/bin/virtualenv {{ virtualenv_test }}
+ - onchanges:
+ - pip: install_older_venv_1
+
+issue-46127-setup:
+ pip.installed:
+ - name: 'carbon < 1.3'
+ - no_deps: True
+ - env_vars:
+ PYTHONPATH: "/opt/graphite/lib/:/opt/graphite/webapp/"
+ - bin_env: {{ virtualenv_test }}
+ - require:
+ - cmd: setup_test_virtualenv_1
diff --git a/tests/integration/files/saltclass/examples/classes/roles/app.yml b/tests/integration/files/saltclass/examples/classes/roles/app.yml
index af244e402c..dc8b9864f1 100644
--- a/tests/integration/files/saltclass/examples/classes/roles/app.yml
+++ b/tests/integration/files/saltclass/examples/classes/roles/app.yml
@@ -17,5 +17,5 @@ pillars:
- app-backend
# Safe minion_id matching
{% if minion_id == 'zrh.node3' %}
- safe_pillar: '_only_ zrh.node3 will see this pillar and this cannot be overriden like grains'
+ safe_pillar: '_only_ zrh.node3 will see this pillar and this cannot be overridden like grains'
{% endif %}
diff --git a/tests/integration/modules/test_git.py b/tests/integration/modules/test_git.py
index 6ede0f96f1..1172e70332 100644
--- a/tests/integration/modules/test_git.py
+++ b/tests/integration/modules/test_git.py
@@ -147,7 +147,8 @@ class GitModuleTest(ModuleCase):
TODO: maybe move this behavior to ModuleCase itself?
'''
return salt.utils.data.decode(
- super(GitModuleTest, self).run_function(*args, **kwargs)
+ super(GitModuleTest, self).run_function(*args, **kwargs),
+ encoding='utf-8'
)
def tearDown(self):
@@ -206,7 +207,8 @@ class GitModuleTest(ModuleCase):
self.run_function('cmd.run', ['cp ' + tar_archive + ' /root/'])
with closing(tarfile.open(tar_archive, 'r')) as tar_obj:
self.assertEqual(
- sorted(salt.utils.data.decode(tar_obj.getnames())),
+ sorted(salt.utils.data.decode(tar_obj.getnames(),
+ encoding='utf-8')),
sorted([
'foo', 'foo/bar', 'foo/baz', 'foo/foo', 'foo/питон',
'foo/qux', 'foo/qux/bar', 'foo/qux/baz', 'foo/qux/foo',
@@ -236,7 +238,8 @@ class GitModuleTest(ModuleCase):
self.assertTrue(tarfile.is_tarfile(tar_archive))
with closing(tarfile.open(tar_archive, 'r')) as tar_obj:
self.assertEqual(
- sorted(salt.utils.data.decode(tar_obj.getnames())),
+ sorted(salt.utils.data.decode(tar_obj.getnames(),
+ encoding='utf-8')),
sorted(['foo', 'foo/bar', 'foo/baz', 'foo/foo', 'foo/питон'])
)
finally:
diff --git a/tests/integration/modules/test_nacl.py b/tests/integration/modules/test_nacl.py
new file mode 100644
index 0000000000..10a2c50572
--- /dev/null
+++ b/tests/integration/modules/test_nacl.py
@@ -0,0 +1,66 @@
+# -*- coding: utf-8 -*-
+'''
+Tests for the salt-run command
+'''
+# Import Python libs
+from __future__ import absolute_import, print_function, unicode_literals
+
+import salt.utils.stringutils
+
+# Import Salt Testing libs
+from tests.support.case import ModuleCase
+from tests.support.unit import skipIf
+
+try:
+ import libnacl # pylint: disable=unused-import
+ HAS_LIBNACL = True
+except ImportError:
+ HAS_LIBNACL = False
+
+
+@skipIf(not HAS_LIBNACL, 'skipping test_nacl, libnacl is unavailable')
+class NaclTest(ModuleCase):
+ '''
+ Test the nacl runner
+ '''
+ def test_keygen(self):
+ '''
+ Test keygen
+ '''
+ # Store the data
+ ret = self.run_function(
+ 'nacl.keygen',
+ )
+ self.assertIn('pk', ret)
+ self.assertIn('sk', ret)
+
+ def test_enc_dec(self):
+ '''
+ Generate keys, encrypt, then decrypt.
+ '''
+ # Store the data
+ ret = self.run_function(
+ 'nacl.keygen',
+ )
+ self.assertIn('pk', ret)
+ self.assertIn('sk', ret)
+ pk = ret['pk']
+ sk = ret['sk']
+
+ unencrypted_data = salt.utils.stringutils.to_bytes('hello')
+
+ # Encrypt with pk
+ ret = self.run_function(
+ 'nacl.enc',
+ data=unencrypted_data,
+ pk=pk,
+ )
+ encrypted_data = ret
+
+ # Decrypt with sk
+ ret = self.run_function(
+ 'nacl.dec',
+ data=encrypted_data,
+ sk=sk,
+ )
+ self.assertEqual(unencrypted_data, ret)
diff --git a/tests/integration/modules/test_saltutil.py b/tests/integration/modules/test_saltutil.py
index 156fed358b..09d24bf948 100644
--- a/tests/integration/modules/test_saltutil.py
+++ b/tests/integration/modules/test_saltutil.py
@@ -94,7 +94,8 @@ class SaltUtilSyncModuleTest(ModuleCase):
'sdb': [],
'proxymodules': [],
'output': [],
- 'thorium': []}
+ 'thorium': [],
+ 'serializers': []}
ret = self.run_function('saltutil.sync_all')
self.assertEqual(ret, expected_return)
@@ -115,7 +116,8 @@ class SaltUtilSyncModuleTest(ModuleCase):
'sdb': [],
'proxymodules': [],
'output': [],
- 'thorium': []}
+ 'thorium': [],
+ 'serializers': []}
ret = self.run_function('saltutil.sync_all', extmod_whitelist={'modules': ['salttest']})
self.assertEqual(ret, expected_return)
@@ -138,7 +140,8 @@ class SaltUtilSyncModuleTest(ModuleCase):
'sdb': [],
'proxymodules': [],
'output': [],
- 'thorium': []}
+ 'thorium': [],
+ 'serializers': []}
ret = self.run_function('saltutil.sync_all', extmod_blacklist={'modules': ['runtests_decorators']})
self.assertEqual(ret, expected_return)
@@ -159,7 +162,8 @@ class SaltUtilSyncModuleTest(ModuleCase):
'sdb': [],
'proxymodules': [],
'output': [],
- 'thorium': []}
+ 'thorium': [],
+ 'serializers': []}
ret = self.run_function('saltutil.sync_all', extmod_whitelist={'modules': ['runtests_decorators']},
extmod_blacklist={'modules': ['runtests_decorators']})
self.assertEqual(ret, expected_return)
diff --git a/tests/integration/runners/test_nacl.py b/tests/integration/runners/test_nacl.py
new file mode 100644
index 0000000000..4c93da6211
--- /dev/null
+++ b/tests/integration/runners/test_nacl.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+'''
+Tests for the salt-run command
+'''
+# Import Python libs
+from __future__ import absolute_import, print_function, unicode_literals
+
+# Import Salt Testing libs
+from tests.support.case import ShellCase
+from tests.support.unit import skipIf
+
+try:
+ import libnacl # pylint: disable=unused-import
+ HAS_LIBNACL = True
+except ImportError:
+ HAS_LIBNACL = False
+
+
+@skipIf(not HAS_LIBNACL, 'skipping test_nacl, libnacl is unavailable')
+class NaclTest(ShellCase):
+ '''
+ Test the nacl runner
+ '''
+ def test_keygen(self):
+ '''
+ Test keygen
+ '''
+ # Store the data
+ ret = self.run_run_plus(
+ 'nacl.keygen',
+ )
+ self.assertIn('pk', ret['return'])
+ self.assertIn('sk', ret['return'])
+
+ def test_enc(self):
+ '''
+ Test keygen
+ '''
+ # Store the data
+ ret = self.run_run_plus(
+ 'nacl.keygen',
+ )
+ self.assertIn('pk', ret['return'])
+ self.assertIn('sk', ret['return'])
+ pk = ret['return']['pk']
+ sk = ret['return']['sk']
+
+ unencrypted_data = 'hello'
+
+ # Encrypt with pk
+ ret = self.run_run_plus(
+ 'nacl.enc',
+ data=unencrypted_data,
+ pk=pk,
+ )
+ self.assertIn('return', ret)
+
+ def test_enc_dec(self):
+ '''
+ Store, list, fetch, then flush data
+ '''
+ # Store the data
+ ret = self.run_run_plus(
+ 'nacl.keygen',
+ )
+ self.assertIn('pk', ret['return'])
+ self.assertIn('sk', ret['return'])
+ pk = ret['return']['pk']
+ sk = ret['return']['sk']
+
+ unencrypted_data = 'hello'
+
+ # Encrypt with pk
+ ret = self.run_run_plus(
+ 'nacl.enc',
+ data=unencrypted_data,
+ pk=pk,
+ )
+ self.assertIn('return', ret)
+ encrypted_data = ret['return']
+
+ # Decrypt with sk
+ ret = self.run_run_plus(
+ 'nacl.dec',
+ data=encrypted_data,
+ sk=sk,
+ )
+ self.assertIn('return', ret)
+ self.assertEqual(unencrypted_data, ret['return'])
diff --git a/tests/integration/scheduler/test_error.py b/tests/integration/scheduler/test_error.py
new file mode 100644
index 0000000000..87813d5938
--- /dev/null
+++ b/tests/integration/scheduler/test_error.py
@@ -0,0 +1,264 @@
+# -*- coding: utf-8 -*-
+
+# Import Python libs
+from __future__ import absolute_import
+import copy
+import datetime
+import logging
+import os
+import random
+
+import dateutil.parser as dateutil_parser
+
+# Import Salt Testing libs
+from tests.support.case import ModuleCase
+from tests.support.mixins import SaltReturnAssertsMixin
+
+# Import Salt Testing Libs
+from tests.support.mock import MagicMock, patch
+from tests.support.unit import skipIf
+import tests.integration as integration
+
+# Import Salt libs
+import salt.utils.schedule
+
+from salt.modules.test import ping as ping
+
+try:
+ import croniter # pylint: disable=W0611
+ HAS_CRONITER = True
+except ImportError:
+ HAS_CRONITER = False
+
+log = logging.getLogger(__name__)
+ROOT_DIR = os.path.join(integration.TMP, 'schedule-unit-tests')
+SOCK_DIR = os.path.join(ROOT_DIR, 'test-socks')
+
+DEFAULT_CONFIG = salt.config.minion_config(None)
+DEFAULT_CONFIG['conf_dir'] = ROOT_DIR
+DEFAULT_CONFIG['root_dir'] = ROOT_DIR
+DEFAULT_CONFIG['sock_dir'] = SOCK_DIR
+DEFAULT_CONFIG['pki_dir'] = os.path.join(ROOT_DIR, 'pki')
+DEFAULT_CONFIG['cachedir'] = os.path.join(ROOT_DIR, 'cache')
+
+
+class SchedulerErrorTest(ModuleCase, SaltReturnAssertsMixin):
+ '''
+ Validate the pkg module
+ '''
+ def setUp(self):
+ with patch('salt.utils.schedule.clean_proc_dir', MagicMock(return_value=None)):
+ functions = {'test.ping': ping}
+ self.schedule = salt.utils.schedule.Schedule(copy.deepcopy(DEFAULT_CONFIG), functions, returners={})
+ self.schedule.opts['loop_interval'] = 1
+
+ self.schedule.opts['grains']['whens'] = {'tea time': '11/29/2017 12:00pm'}
+
+ def tearDown(self):
+ self.schedule.reset()
+
+ @skipIf(not HAS_CRONITER, 'Cannot find croniter python module')
+ def test_eval_cron_invalid(self):
+ '''
+ verify that scheduled job runs
+ '''
+ job = {
+ 'schedule': {
+ 'job1': {
+ 'function': 'test.ping',
+ 'cron': '0 16 29 13 *'
+ }
+ }
+ }
+
+ # Add the job to the scheduler
+ self.schedule.opts.update(job)
+
+ run_time = dateutil_parser.parse('11/29/2017 4:00pm')
+ with patch('croniter.croniter.get_next', MagicMock(return_value=run_time)):
+ self.schedule.eval(now=run_time)
+
+ ret = self.schedule.job_status('job1')
+ self.assertEqual(ret['_error'],
+ 'Invalid cron string. Ignoring job job1.')
+
+ def test_eval_when_invalid_date(self):
+ '''
+ verify that scheduled job does not run
+ and returns the right error
+ '''
+ run_time = dateutil_parser.parse('11/29/2017 4:00pm')
+
+ job = {
+ 'schedule': {
+ 'job1': {
+ 'function': 'test.ping',
+ 'when': '13/29/2017 1:00pm',
+ }
+ }
+ }
+
+ # Add the job to the scheduler
+ self.schedule.opts.update(job)
+
+ # Evaluate 1 second before the run time
+ self.schedule.eval(now=run_time)
+ ret = self.schedule.job_status('job1')
+ self.assertEqual(ret['_error'],
+ 'Invalid date string. Ignoring job job1.')
+
+ def test_eval_whens_grain_not_dict(self):
+ '''
+ verify that scheduled job does not run
+ and returns the right error
+ '''
+ run_time = dateutil_parser.parse('11/29/2017 4:00pm')
+
+ job = {
+ 'schedule': {
+ 'job1': {
+ 'function': 'test.ping',
+ 'when': 'tea time',
+ }
+ }
+ }
+
+ self.schedule.opts['grains']['whens'] = ['tea time']
+
+ # Add the job to the scheduler
+ self.schedule.opts.update(job)
+
+ # Evaluate 1 second before the run time
+ self.schedule.eval(now=run_time)
+ ret = self.schedule.job_status('job1')
+ self.assertEqual(ret['_error'],
+ 'Grain "whens" must be a dict. Ignoring job job1.')
+
+ def test_eval_once_invalid_datestring(self):
+ '''
+ verify that scheduled job does not run
+ and returns the right error
+ '''
+ job = {
+ 'schedule': {
+ 'job1': {
+ 'function': 'test.ping',
+ 'once': '2017-13-13T13:00:00',
+ }
+ }
+ }
+ run_time = dateutil_parser.parse('12/13/2017 1:00pm')
+
+ # Add the job to the scheduler
+ self.schedule.opts.update(job)
+
+ # Evaluate 1 second at the run time
+ self.schedule.eval(now=run_time)
+ ret = self.schedule.job_status('job1')
+ _expected = ('Date string could not be parsed: '
+ '2017-13-13T13:00:00, %Y-%m-%dT%H:%M:%S. '
+ 'Ignoring job job1.')
+ self.assertEqual(ret['_error'], _expected)
+
+ def test_eval_skip_during_range_invalid_date(self):
+ '''
+ verify that scheduled job does not run
+ and returns the right error
+ '''
+
+ job = {
+ 'schedule': {
+ 'job1': {
+ 'function': 'test.ping',
+ 'hours': 1,
+ 'skip_during_range': {'start': '1:00pm', 'end': '25:00pm'}
+
+ }
+ }
+ }
+
+ # Add the job to the scheduler
+ self.schedule.opts.update(job)
+
+ # eval at 3:00pm to prime, simulate minion start up.
+ run_time = dateutil_parser.parse('11/29/2017 3:00pm')
+ self.schedule.eval(now=run_time)
+ ret = self.schedule.job_status('job1')
+
+ # eval at 4:00pm to prime
+ run_time = dateutil_parser.parse('11/29/2017 4:00pm')
+ self.schedule.eval(now=run_time)
+ ret = self.schedule.job_status('job1')
+ _expected = ('Invalid date string for end in '
+ 'skip_during_range. Ignoring '
+ 'job job1.')
+ self.assertEqual(ret['_error'], _expected)
+
+ def test_eval_skip_during_range_end_before_start(self):
+ '''
+ verify that scheduled job does not run
+ and returns the right error
+ '''
+
+ job = {
+ 'schedule': {
+ 'job1': {
+ 'function': 'test.ping',
+ 'hours': 1,
+ 'skip_during_range': {'start': '1:00pm', 'end': '12:00pm'}
+
+ }
+ }
+ }
+
+ # Add the job to the scheduler
+ self.schedule.opts.update(job)
+
+ # eval at 3:00pm to prime, simulate minion start up.
+ run_time = dateutil_parser.parse('11/29/2017 3:00pm')
+ self.schedule.eval(now=run_time)
+ ret = self.schedule.job_status('job1')
+
+ # eval at 4:00pm to prime
+ run_time = dateutil_parser.parse('11/29/2017 4:00pm')
+ self.schedule.eval(now=run_time)
+ ret = self.schedule.job_status('job1')
+ _expected = ('schedule.handle_func: Invalid '
+ 'range, end must be larger than '
+ 'start. Ignoring job job1.')
+ self.assertEqual(ret['_error'], _expected)
+
+ def test_eval_skip_during_range_not_dict(self):
+ '''
+ verify that scheduled job does not run
+ and returns the right error
+ '''
+
+ job = {
+ 'schedule': {
+ 'job1': {
+ 'function': 'test.ping',
+ 'hours': 1,
+ 'skip_during_range': ['start', '1:00pm', 'end', '12:00pm']
+
+ }
+ }
+ }
+
+ # Add the job to the scheduler
+ self.schedule.opts.update(job)
+
+ # eval at 3:00pm to prime, simulate minion start up.
+ run_time = dateutil_parser.parse('11/29/2017 3:00pm')
+ self.schedule.eval(now=run_time)
+ ret = self.schedule.job_status('job1')
+
+ # eval at 4:00pm to prime
+ run_time = dateutil_parser.parse('11/29/2017 4:00pm')
+ self.schedule.eval(now=run_time)
+ ret = self.schedule.job_status('job1')
+ _expected = ('schedule.handle_func: Invalid, '
+ 'range must be specified as a '
+ 'dictionary. Ignoring job job1.')
+ self.assertEqual(ret['_error'], _expected)
+
diff --git a/tests/integration/scheduler/test_eval.py b/tests/integration/scheduler/test_eval.py
index c16f0495e6..9e7fe4f899 100644
--- a/tests/integration/scheduler/test_eval.py
+++ b/tests/integration/scheduler/test_eval.py
@@ -274,30 +274,6 @@ class SchedulerEvalTest(ModuleCase, SaltReturnAssertsMixin):
ret = self.schedule.job_status('job1')
self.assertEqual(ret['_last_run'], run_time)
- @skipIf(not HAS_CRONITER, 'Cannot find croniter python module')
- def test_eval_cron_invalid(self):
- '''
- verify that scheduled job runs
- '''
- job = {
- 'schedule': {
- 'job1': {
- 'function': 'test.ping',
- 'cron': '0 16 29 13 *'
- }
- }
- }
-
- # Add the job to the scheduler
- self.schedule.opts.update(job)
-
- run_time = dateutil_parser.parse('11/29/2017 4:00pm')
- with patch('croniter.croniter.get_next', MagicMock(return_value=run_time)):
- self.schedule.eval(now=run_time)
-
- ret = self.schedule.job_status('job1')
- self.assertEqual(ret['_error'], 'Invalid cron string. Ignoring job job1.')
-
@skipIf(not HAS_CRONITER, 'Cannot find croniter python module')
def test_eval_cron_loop_interval(self):
'''
@@ -325,56 +301,6 @@ class SchedulerEvalTest(ModuleCase, SaltReturnAssertsMixin):
ret = self.schedule.job_status('job1')
self.assertEqual(ret['_last_run'], run_time)
- def test_eval_when_invalid_date(self):
- '''
- verify that scheduled job does not run
- and returns the right error
- '''
- run_time = dateutil_parser.parse('11/29/2017 4:00pm')
-
- job = {
- 'schedule': {
- 'job1': {
- 'function': 'test.ping',
- 'when': '13/29/2017 1:00pm',
- }
- }
- }
-
- # Add the job to the scheduler
- self.schedule.opts.update(job)
-
- # Evaluate 1 second before the run time
- self.schedule.eval(now=run_time)
- ret = self.schedule.job_status('job1')
- self.assertEqual(ret['_error'], 'Invalid date string. Ignoring job job1.')
-
- def test_eval_once_invalid_datestring(self):
- '''
- verify that scheduled job does not run
- and returns the right error
- '''
- job = {
- 'schedule': {
- 'job1': {
- 'function': 'test.ping',
- 'once': '2017-13-13T13:00:00',
- }
- }
- }
- run_time = dateutil_parser.parse('12/13/2017 1:00pm')
-
- # Add the job to the scheduler
- self.schedule.opts.update(job)
-
- # Evaluate 1 second at the run time
- self.schedule.eval(now=run_time)
- ret = self.schedule.job_status('job1')
- _expected = ('Date string could not be parsed: '
- '2017-13-13T13:00:00, %Y-%m-%dT%H:%M:%S. '
- 'Ignoring job job1.')
- self.assertEqual(ret['_error'], _expected)
-
def test_eval_until(self):
'''
verify that scheduled job is skipped once the current
@@ -515,3 +441,32 @@ class SchedulerEvalTest(ModuleCase, SaltReturnAssertsMixin):
ret = self.schedule.job_status('job1')
self.assertNotIn('_last_run', ret)
self.assertEqual(ret['_skip_reason'], 'disabled')
+
+ def test_eval_run_on_start(self):
+ '''
+ verify that scheduled job is run when minion starts
+ '''
+ job = {
+ 'schedule': {
+ 'job1': {
+ 'function': 'test.ping',
+ 'hours': '1',
+ 'run_on_start': True
+ }
+ }
+ }
+
+ # Add job to schedule
+ self.schedule.opts.update(job)
+
+ # eval at 2:00pm, will run.
+ run_time = dateutil_parser.parse('11/29/2017 2:00pm')
+ self.schedule.eval(now=run_time)
+ ret = self.schedule.job_status('job1')
+ self.assertEqual(ret['_last_run'], run_time)
+
+ # eval at 3:00pm, will run.
+ run_time = dateutil_parser.parse('11/29/2017 3:00pm')
+ self.schedule.eval(now=run_time)
+ ret = self.schedule.job_status('job1')
+ self.assertEqual(ret['_last_run'], run_time)
diff --git a/tests/integration/shell/test_call.py b/tests/integration/shell/test_call.py
index 3dff8fea5e..7d51174fd9 100644
--- a/tests/integration/shell/test_call.py
+++ b/tests/integration/shell/test_call.py
@@ -363,58 +363,56 @@ class CallTest(ShellCase, testprogram.TestProgramCase, ShellCaseCommonTestsMixin
def test_issue_14979_output_file_permissions(self):
output_file = os.path.join(TMP, 'issue-14979')
- current_umask = os.umask(0o077)
- try:
- # Let's create an initial output file with some data
- self.run_script(
- 'salt-call',
- '-c {0} --output-file={1} -g'.format(
- self.get_config_dir(),
- output_file
- ),
- catch_stderr=True,
- with_retcode=True
- )
- stat1 = os.stat(output_file)
+ with salt.utils.files.set_umask(0o077):
+ try:
+ # Let's create an initial output file with some data
+ self.run_script(
+ 'salt-call',
+ '-c {0} --output-file={1} -g'.format(
+ self.get_config_dir(),
+ output_file
+ ),
+ catch_stderr=True,
+ with_retcode=True
+ )
+ stat1 = os.stat(output_file)
- # Let's change umask
- os.umask(0o777)
+ # Let's change umask
+ os.umask(0o777) # pylint: disable=blacklisted-function
- self.run_script(
- 'salt-call',
- '-c {0} --output-file={1} --output-file-append -g'.format(
- self.get_config_dir(),
- output_file
- ),
- catch_stderr=True,
- with_retcode=True
- )
- stat2 = os.stat(output_file)
- self.assertEqual(stat1.st_mode, stat2.st_mode)
- # Data was appeneded to file
- self.assertTrue(stat1.st_size < stat2.st_size)
+ self.run_script(
+ 'salt-call',
+ '-c {0} --output-file={1} --output-file-append -g'.format(
+ self.get_config_dir(),
+ output_file
+ ),
+ catch_stderr=True,
+ with_retcode=True
+ )
+ stat2 = os.stat(output_file)
+ self.assertEqual(stat1.st_mode, stat2.st_mode)
+ # Data was appeneded to file
+ self.assertTrue(stat1.st_size < stat2.st_size)
- # Let's remove the output file
- os.unlink(output_file)
-
- # Not appending data
- self.run_script(
- 'salt-call',
- '-c {0} --output-file={1} -g'.format(
- self.get_config_dir(),
- output_file
- ),
- catch_stderr=True,
- with_retcode=True
- )
- stat3 = os.stat(output_file)
- # Mode must have changed since we're creating a new log file
- self.assertNotEqual(stat1.st_mode, stat3.st_mode)
- finally:
- if os.path.exists(output_file):
+ # Let's remove the output file
os.unlink(output_file)
- # Restore umask
- os.umask(current_umask)
+
+ # Not appending data
+ self.run_script(
+ 'salt-call',
+ '-c {0} --output-file={1} -g'.format(
+ self.get_config_dir(),
+ output_file
+ ),
+ catch_stderr=True,
+ with_retcode=True
+ )
+ stat3 = os.stat(output_file)
+ # Mode must have changed since we're creating a new log file
+ self.assertNotEqual(stat1.st_mode, stat3.st_mode)
+ finally:
+ if os.path.exists(output_file):
+ os.unlink(output_file)
@skipIf(sys.platform.startswith('win'), 'This test does not apply on Win')
def test_42116_cli_pillar_override(self):
diff --git a/tests/integration/ssh/test_master.py b/tests/integration/ssh/test_master.py
index 2fa5cf95cd..76196e3f5c 100644
--- a/tests/integration/ssh/test_master.py
+++ b/tests/integration/ssh/test_master.py
@@ -26,10 +26,15 @@ class SSHMasterTestCase(ModuleCase):
def test_service(self):
service = 'cron'
os_family = self.run_function('grains.get', ['os_family'], minion_tgt='localhost')
+ os_release = self.run_function('grains.get', ['osrelease'], minion_tgt='localhost')
if os_family == 'RedHat':
service = 'crond'
elif os_family == 'Arch':
service = 'sshd'
+ elif os_family == 'MacOS':
+ service = 'org.ntp.ntpd'
+ if int(os_release.split('.')[1]) >= 13:
+ service = 'com.apple.AirPlayXPCHelper'
ret = self.run_function('service.get_all', minion_tgt='localhost')
self.assertIn(service, ret)
self.run_function('service.stop', [service], minion_tgt='localhost')
@@ -40,8 +45,12 @@ class SSHMasterTestCase(ModuleCase):
self.assertTrue(ret)
def test_grains_items(self):
+ os_family = self.run_function('grains.get', ['os_family'], minion_tgt='localhost')
ret = self.run_function('grains.items', minion_tgt='localhost')
- self.assertEqual(ret['kernel'], 'Linux')
+ if os_family == 'MacOS':
+ self.assertEqual(ret['kernel'], 'Darwin')
+ else:
+ self.assertEqual(ret['kernel'], 'Linux')
def test_state_apply(self):
ret = self.run_function('state.apply', ['core'], minion_tgt='localhost')
diff --git a/tests/integration/states/test_file.py b/tests/integration/states/test_file.py
index 5c8e7f1f90..d0c0c06cd4 100644
--- a/tests/integration/states/test_file.py
+++ b/tests/integration/states/test_file.py
@@ -27,6 +27,7 @@ from tests.support.paths import FILES, TMP, TMP_STATE_TREE
from tests.support.helpers import (
skip_if_not_root,
with_system_user_and_group,
+ with_tempfile,
Webserver,
)
from tests.support.mixins import SaltReturnAssertsMixin
@@ -35,6 +36,7 @@ from tests.support.mixins import SaltReturnAssertsMixin
import salt.utils.files
import salt.utils.path
import salt.utils.platform
+import salt.utils.stringutils
HAS_PWD = True
try:
@@ -134,6 +136,16 @@ class FileTest(ModuleCase, SaltReturnAssertsMixin):
'''
Validate the file state
'''
+ def tearDown(self):
+ '''
+ remove files created in previous tests
+ '''
+ for path in (FILEPILLAR, FILEPILLARDEF, FILEPILLARGIT):
+ try:
+ os.remove(path)
+ except OSError as exc:
+ if exc.errno != os.errno.ENOENT:
+ log.error('Failed to remove %s: %s', path, exc)
def test_symlink(self):
'''
@@ -2531,15 +2543,1208 @@ class FileTest(ModuleCase, SaltReturnAssertsMixin):
self.assertEqual(desired['group'], result['group'])
self.assertEqual(desired['mode'], result['mode'].lstrip('0Oo'))
- def tearDown(self):
+
+class BlockreplaceTest(ModuleCase, SaltReturnAssertsMixin):
+ marker_start = '# start'
+ marker_end = '# end'
+ content = textwrap.dedent('''\
+ Line 1 of block
+ Line 2 of block
+ ''')
+ without_block = textwrap.dedent('''\
+ Hello world!
+
+ # comment here
+ ''')
+ with_non_matching_block = textwrap.dedent('''\
+ Hello world!
+
+ # start
+ No match here
+ # end
+ # comment here
+ ''')
+ with_non_matching_block_and_marker_end_not_after_newline = textwrap.dedent('''\
+ Hello world!
+
+ # start
+ No match here# end
+ # comment here
+ ''')
+ with_matching_block = textwrap.dedent('''\
+ Hello world!
+
+ # start
+ Line 1 of block
+ Line 2 of block
+ # end
+ # comment here
+ ''')
+ with_matching_block_and_extra_newline = textwrap.dedent('''\
+ Hello world!
+
+ # start
+ Line 1 of block
+ Line 2 of block
+
+ # end
+ # comment here
+ ''')
+ with_matching_block_and_marker_end_not_after_newline = textwrap.dedent('''\
+ Hello world!
+
+ # start
+ Line 1 of block
+ Line 2 of block# end
+ # comment here
+ ''')
+ content_explicit_posix_newlines = ('Line 1 of block\n'
+ 'Line 2 of block\n')
+ content_explicit_windows_newlines = ('Line 1 of block\r\n'
+ 'Line 2 of block\r\n')
+ without_block_explicit_posix_newlines = ('Hello world!\n\n'
+ '# comment here\n')
+ without_block_explicit_windows_newlines = ('Hello world!\r\n\r\n'
+ '# comment here\r\n')
+ with_block_prepended_explicit_posix_newlines = ('# start\n'
+ 'Line 1 of block\n'
+ 'Line 2 of block\n'
+ '# end\n'
+ 'Hello world!\n\n'
+ '# comment here\n')
+ with_block_prepended_explicit_windows_newlines = ('# start\r\n'
+ 'Line 1 of block\r\n'
+ 'Line 2 of block\r\n'
+ '# end\r\n'
+ 'Hello world!\r\n\r\n'
+ '# comment here\r\n')
+ with_block_appended_explicit_posix_newlines = ('Hello world!\n\n'
+ '# comment here\n'
+ '# start\n'
+ 'Line 1 of block\n'
+ 'Line 2 of block\n'
+ '# end\n')
+ with_block_appended_explicit_windows_newlines = ('Hello world!\r\n\r\n'
+ '# comment here\r\n'
+ '# start\r\n'
+ 'Line 1 of block\r\n'
+ 'Line 2 of block\r\n'
+ '# end\r\n')
+
+ @staticmethod
+ def _write(dest, content):
+ with salt.utils.files.fopen(dest, 'wb') as fp_:
+ fp_.write(salt.utils.stringutils.to_bytes(content))
+
+ @staticmethod
+ def _read(src):
+ with salt.utils.files.fopen(src, 'rb') as fp_:
+ return salt.utils.stringutils.to_unicode(fp_.read())
+
+ @with_tempfile
+ def test_prepend(self, name):
'''
- remove files created in previous tests
+ Test blockreplace when prepend_if_not_found=True and block doesn't
+ exist in file.
'''
- all_files = [FILEPILLAR, FILEPILLARDEF, FILEPILLARGIT]
- for file in all_files:
- check_file = self.run_function('file.file_exists', [file])
- if check_file:
- self.run_function('file.remove', [file])
+ expected = self.marker_start + os.linesep + self.content + \
+ self.marker_end + os.linesep + self.without_block
+
+ # Pass 1: content ends in newline
+ self._write(name, self.without_block)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ prepend_if_not_found=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertTrue(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), expected)
+ # Pass 1a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ prepend_if_not_found=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), expected)
+
+ # Pass 2: content does not end in newline
+ self._write(name, self.without_block)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ prepend_if_not_found=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertTrue(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), expected)
+ # Pass 2a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ prepend_if_not_found=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), expected)
+
+ @with_tempfile
+ def test_prepend_append_newline(self, name):
+ '''
+ Test blockreplace when prepend_if_not_found=True and block doesn't
+ exist in file. Test with append_newline explicitly set to True.
+ '''
+ # Pass 1: content ends in newline
+ expected = self.marker_start + os.linesep + self.content + \
+ os.linesep + self.marker_end + os.linesep + self.without_block
+ self._write(name, self.without_block)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ prepend_if_not_found=True,
+ append_newline=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertTrue(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), expected)
+ # Pass 1a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ prepend_if_not_found=True,
+ append_newline=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), expected)
+
+ # Pass 2: content does not end in newline
+ expected = self.marker_start + os.linesep + self.content + \
+ self.marker_end + os.linesep + self.without_block
+ self._write(name, self.without_block)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ prepend_if_not_found=True,
+ append_newline=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertTrue(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), expected)
+ # Pass 2a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ prepend_if_not_found=True,
+ append_newline=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), expected)
+
+ @with_tempfile
+ def test_prepend_no_append_newline(self, name):
+ '''
+ Test blockreplace when prepend_if_not_found=True and block doesn't
+ exist in file. Test with append_newline explicitly set to False.
+ '''
+ # Pass 1: content ends in newline
+ expected = self.marker_start + os.linesep + self.content + \
+ self.marker_end + os.linesep + self.without_block
+ self._write(name, self.without_block)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ prepend_if_not_found=True,
+ append_newline=False)
+ self.assertSaltTrueReturn(ret)
+ self.assertTrue(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), expected)
+ # Pass 1a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ prepend_if_not_found=True,
+ append_newline=False)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), expected)
+
+ # Pass 2: content does not end in newline
+ expected = self.marker_start + os.linesep + \
+ self.content.rstrip('\r\n') + self.marker_end + os.linesep + \
+ self.without_block
+ self._write(name, self.without_block)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ prepend_if_not_found=True,
+ append_newline=False)
+ self.assertSaltTrueReturn(ret)
+ self.assertTrue(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), expected)
+ # Pass 2a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ prepend_if_not_found=True,
+ append_newline=False)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), expected)
+
+ @with_tempfile
+ def test_append(self, name):
+ '''
+ Test blockreplace when append_if_not_found=True and block doesn't
+ exist in file.
+ '''
+ expected = self.without_block + self.marker_start + os.linesep + \
+ self.content + self.marker_end + os.linesep
+
+ # Pass 1: content ends in newline
+ self._write(name, self.without_block)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_if_not_found=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertTrue(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), expected)
+ # Pass 1a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_if_not_found=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), expected)
+
+ # Pass 2: content does not end in newline
+ self._write(name, self.without_block)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_if_not_found=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertTrue(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), expected)
+ # Pass 2a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_if_not_found=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), expected)
+
+ @with_tempfile
+ def test_append_append_newline(self, name):
+ '''
+ Test blockreplace when append_if_not_found=True and block doesn't
+ exist in file. Test with append_newline explicitly set to True.
+ '''
+ # Pass 1: content ends in newline
+ expected = self.without_block + self.marker_start + os.linesep + \
+ self.content + os.linesep + self.marker_end + os.linesep
+ self._write(name, self.without_block)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_if_not_found=True,
+ append_newline=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertTrue(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), expected)
+ # Pass 1a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_if_not_found=True,
+ append_newline=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), expected)
+
+ # Pass 2: content does not end in newline
+ expected = self.without_block + self.marker_start + os.linesep + \
+ self.content + self.marker_end + os.linesep
+ self._write(name, self.without_block)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_if_not_found=True,
+ append_newline=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertTrue(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), expected)
+ # Pass 2a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_if_not_found=True,
+ append_newline=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), expected)
+
+ @with_tempfile
+ def test_append_no_append_newline(self, name):
+ '''
+ Test blockreplace when append_if_not_found=True and block doesn't
+ exist in file. Test with append_newline explicitly set to False.
+ '''
+ # Pass 1: content ends in newline
+ expected = self.without_block + self.marker_start + os.linesep + \
+ self.content + self.marker_end + os.linesep
+ self._write(name, self.without_block)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_if_not_found=True,
+ append_newline=False)
+ self.assertSaltTrueReturn(ret)
+ self.assertTrue(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), expected)
+ # Pass 1a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_if_not_found=True,
+ append_newline=False)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), expected)
+
+ # Pass 2: content does not end in newline
+ expected = self.without_block + self.marker_start + os.linesep + \
+ self.content.rstrip('\r\n') + self.marker_end + os.linesep
+ self._write(name, self.without_block)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_if_not_found=True,
+ append_newline=False)
+ self.assertSaltTrueReturn(ret)
+ self.assertTrue(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), expected)
+ # Pass 2a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_if_not_found=True,
+ append_newline=False)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), expected)
+
+ @with_tempfile
+ def test_prepend_auto_line_separator(self, name):
+ '''
+ This tests the line separator auto-detection when prepending the block
+ '''
+ # POSIX newlines to Windows newlines
+ self._write(name, self.without_block_explicit_windows_newlines)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content_explicit_posix_newlines,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ prepend_if_not_found=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertTrue(ret[next(iter(ret))]['changes'])
+ self.assertEqual(
+ self._read(name),
+ self.with_block_prepended_explicit_windows_newlines)
+ # Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content_explicit_posix_newlines,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ prepend_if_not_found=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(
+ self._read(name),
+ self.with_block_prepended_explicit_windows_newlines)
+
+ # Windows newlines to POSIX newlines
+ self._write(name, self.without_block_explicit_posix_newlines)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content_explicit_windows_newlines,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ prepend_if_not_found=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertTrue(ret[next(iter(ret))]['changes'])
+ self.assertEqual(
+ self._read(name),
+ self.with_block_prepended_explicit_posix_newlines)
+ # Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content_explicit_windows_newlines,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ prepend_if_not_found=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(
+ self._read(name),
+ self.with_block_prepended_explicit_posix_newlines)
+
+ @with_tempfile
+ def test_append_auto_line_separator(self, name):
+ '''
+ This tests the line separator auto-detection when appending the block
+ '''
+ # POSIX newlines to Windows newlines
+ self._write(name, self.without_block_explicit_windows_newlines)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content_explicit_posix_newlines,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_if_not_found=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertTrue(ret[next(iter(ret))]['changes'])
+ self.assertEqual(
+ self._read(name),
+ self.with_block_appended_explicit_windows_newlines)
+ # Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content_explicit_posix_newlines,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_if_not_found=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(
+ self._read(name),
+ self.with_block_appended_explicit_windows_newlines)
+
+ # Windows newlines to POSIX newlines
+ self._write(name, self.without_block_explicit_posix_newlines)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content_explicit_windows_newlines,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_if_not_found=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertTrue(ret[next(iter(ret))]['changes'])
+ self.assertEqual(
+ self._read(name),
+ self.with_block_appended_explicit_posix_newlines)
+ # Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content_explicit_windows_newlines,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_if_not_found=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(
+ self._read(name),
+ self.with_block_appended_explicit_posix_newlines)
+
+ @with_tempfile
+ def test_non_matching_block(self, name):
+ '''
+ Test blockreplace when block exists but its contents are not a
+ match.
+ '''
+ # Pass 1: content ends in newline
+ self._write(name, self.with_non_matching_block)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end)
+ self.assertSaltTrueReturn(ret)
+ self.assertTrue(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), self.with_matching_block)
+ # Pass 1a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), self.with_matching_block)
+
+ # Pass 2: content does not end in newline
+ self._write(name, self.with_non_matching_block)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end)
+ self.assertSaltTrueReturn(ret)
+ self.assertTrue(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), self.with_matching_block)
+ # Pass 2a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), self.with_matching_block)
+
+ @with_tempfile
+ def test_non_matching_block_append_newline(self, name):
+ '''
+ Test blockreplace when block exists but its contents are not a
+ match. Test with append_newline explicitly set to True.
+ '''
+ # Pass 1: content ends in newline
+ self._write(name, self.with_non_matching_block)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_newline=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertTrue(ret[next(iter(ret))]['changes'])
+ self.assertEqual(
+ self._read(name),
+ self.with_matching_block_and_extra_newline)
+ # Pass 1a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_newline=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(
+ self._read(name),
+ self.with_matching_block_and_extra_newline)
+
+ # Pass 2: content does not end in newline
+ self._write(name, self.with_non_matching_block)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_newline=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertTrue(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), self.with_matching_block)
+ # Pass 2a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_newline=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), self.with_matching_block)
+
+ @with_tempfile
+ def test_non_matching_block_no_append_newline(self, name):
+ '''
+ Test blockreplace when block exists but its contents are not a
+ match. Test with append_newline explicitly set to False.
+ '''
+ # Pass 1: content ends in newline
+ self._write(name, self.with_non_matching_block)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_newline=False)
+ self.assertSaltTrueReturn(ret)
+ self.assertTrue(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), self.with_matching_block)
+ # Pass 1a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_newline=False)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), self.with_matching_block)
+
+ # Pass 2: content does not end in newline
+ self._write(name, self.with_non_matching_block)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_newline=False)
+ self.assertSaltTrueReturn(ret)
+ self.assertTrue(ret[next(iter(ret))]['changes'])
+ self.assertEqual(
+ self._read(name),
+ self.with_matching_block_and_marker_end_not_after_newline)
+ # Pass 2a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_newline=False)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(
+ self._read(name),
+ self.with_matching_block_and_marker_end_not_after_newline)
+
+ @with_tempfile
+ def test_non_matching_block_and_marker_not_after_newline(self, name):
+ '''
+ Test blockreplace when block exists but its contents are not a
+ match, and the marker_end is not directly preceded by a newline.
+ '''
+ # Pass 1: content ends in newline
+ self._write(
+ name,
+ self.with_non_matching_block_and_marker_end_not_after_newline)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end)
+ self.assertSaltTrueReturn(ret)
+ self.assertTrue(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), self.with_matching_block)
+ # Pass 1a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), self.with_matching_block)
+
+ # Pass 2: content does not end in newline
+ self._write(
+ name,
+ self.with_non_matching_block_and_marker_end_not_after_newline)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end)
+ self.assertSaltTrueReturn(ret)
+ self.assertTrue(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), self.with_matching_block)
+ # Pass 2a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), self.with_matching_block)
+
+ @with_tempfile
+ def test_non_matching_block_and_marker_not_after_newline_append_newline(self, name):
+ '''
+ Test blockreplace when block exists but its contents are not a match,
+ and the marker_end is not directly preceded by a newline. Test with
+ append_newline explicitly set to True.
+ '''
+ # Pass 1: content ends in newline
+ self._write(
+ name,
+ self.with_non_matching_block_and_marker_end_not_after_newline)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_newline=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertTrue(ret[next(iter(ret))]['changes'])
+ self.assertEqual(
+ self._read(name),
+ self.with_matching_block_and_extra_newline)
+ # Pass 1a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_newline=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(
+ self._read(name),
+ self.with_matching_block_and_extra_newline)
+
+ # Pass 2: content does not end in newline
+ self._write(
+ name,
+ self.with_non_matching_block_and_marker_end_not_after_newline)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_newline=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertTrue(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), self.with_matching_block)
+ # Pass 2a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_newline=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), self.with_matching_block)
+
+ @with_tempfile
+ def test_non_matching_block_and_marker_not_after_newline_no_append_newline(self, name):
+ '''
+ Test blockreplace when block exists but its contents are not a match,
+ and the marker_end is not directly preceded by a newline. Test with
+ append_newline explicitly set to False.
+ '''
+ # Pass 1: content ends in newline
+ self._write(
+ name,
+ self.with_non_matching_block_and_marker_end_not_after_newline)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_newline=False)
+ self.assertSaltTrueReturn(ret)
+ self.assertTrue(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), self.with_matching_block)
+ # Pass 1a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_newline=False)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), self.with_matching_block)
+
+ # Pass 2: content does not end in newline
+ self._write(
+ name,
+ self.with_non_matching_block_and_marker_end_not_after_newline)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_newline=False)
+ self.assertSaltTrueReturn(ret)
+ self.assertTrue(ret[next(iter(ret))]['changes'])
+ self.assertEqual(
+ self._read(name),
+ self.with_matching_block_and_marker_end_not_after_newline)
+ # Pass 2a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_newline=False)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(
+ self._read(name),
+ self.with_matching_block_and_marker_end_not_after_newline)
+
+ @with_tempfile
+ def test_matching_block(self, name):
+ '''
+ Test blockreplace when block exists and its contents are a match. No
+ changes should be made.
+ '''
+ # Pass 1: content ends in newline
+ self._write(name, self.with_matching_block)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), self.with_matching_block)
+ # Pass 1a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), self.with_matching_block)
+
+ # Pass 2: content does not end in newline
+ self._write(name, self.with_matching_block)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), self.with_matching_block)
+ # Pass 2a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), self.with_matching_block)
+
+ @with_tempfile
+ def test_matching_block_append_newline(self, name):
+ '''
+ Test blockreplace when block exists and its contents are a match. Test
+ with append_newline explicitly set to True. This will result in an
+ extra newline when the content ends in a newline, and will not when the
+ content does not end in a newline.
+ '''
+ # Pass 1: content ends in newline
+ self._write(name, self.with_matching_block)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_newline=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertTrue(ret[next(iter(ret))]['changes'])
+ self.assertEqual(
+ self._read(name),
+ self.with_matching_block_and_extra_newline)
+ # Pass 1a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_newline=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(
+ self._read(name),
+ self.with_matching_block_and_extra_newline)
+
+ # Pass 2: content does not end in newline
+ self._write(name, self.with_matching_block)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_newline=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), self.with_matching_block)
+ # Pass 2a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_newline=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), self.with_matching_block)
+
+ @with_tempfile
+ def test_matching_block_no_append_newline(self, name):
+ '''
+ Test blockreplace when block exists and its contents are a match. Test
+ with append_newline explicitly set to False. This will result in the
+ marker_end not being directly preceded by a newline when the content
+ does not end in a newline.
+ '''
+ # Pass 1: content ends in newline
+ self._write(name, self.with_matching_block)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_newline=False)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), self.with_matching_block)
+
+ # Pass 1a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_newline=False)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), self.with_matching_block)
+
+ # Pass 2: content does not end in newline
+ self._write(name, self.with_matching_block)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_newline=False)
+ self.assertSaltTrueReturn(ret)
+ self.assertTrue(ret[next(iter(ret))]['changes'])
+ self.assertEqual(
+ self._read(name),
+ self.with_matching_block_and_marker_end_not_after_newline)
+
+ # Pass 2a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_newline=False)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(
+ self._read(name),
+ self.with_matching_block_and_marker_end_not_after_newline)
+
+ @with_tempfile
+ def test_matching_block_and_marker_not_after_newline(self, name):
+ '''
+ Test blockreplace when block exists and its contents are a match, but
+ the marker_end is not directly preceded by a newline.
+ '''
+ # Pass 1: content ends in newline
+ self._write(
+ name,
+ self.with_matching_block_and_marker_end_not_after_newline)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end)
+ self.assertSaltTrueReturn(ret)
+ self.assertTrue(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), self.with_matching_block)
+ # Pass 1a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), self.with_matching_block)
+
+ # Pass 2: content does not end in newline
+ self._write(
+ name,
+ self.with_matching_block_and_marker_end_not_after_newline)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end)
+ self.assertSaltTrueReturn(ret)
+ self.assertTrue(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), self.with_matching_block)
+ # Pass 2a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), self.with_matching_block)
+
+ @with_tempfile
+ def test_matching_block_and_marker_not_after_newline_append_newline(self, name):
+ '''
+ Test blockreplace when block exists and its contents are a match, but
+ the marker_end is not directly preceded by a newline. Test with
+ append_newline explicitly set to True. This will result in an extra
+ newline when the content ends in a newline, and will not when the
+ content does not end in a newline.
+ '''
+ # Pass 1: content ends in newline
+ self._write(
+ name,
+ self.with_matching_block_and_marker_end_not_after_newline)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_newline=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertTrue(ret[next(iter(ret))]['changes'])
+ self.assertEqual(
+ self._read(name),
+ self.with_matching_block_and_extra_newline)
+ # Pass 1a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_newline=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(
+ self._read(name),
+ self.with_matching_block_and_extra_newline)
+
+ # Pass 2: content does not end in newline
+ self._write(
+ name,
+ self.with_matching_block_and_marker_end_not_after_newline)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_newline=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertTrue(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), self.with_matching_block)
+ # Pass 2a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_newline=True)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), self.with_matching_block)
+
+ @with_tempfile
+ def test_matching_block_and_marker_not_after_newline_no_append_newline(self, name):
+ '''
+ Test blockreplace when block exists and its contents are a match, but
+ the marker_end is not directly preceded by a newline. Test with
+ append_newline explicitly set to False.
+ '''
+ # Pass 1: content ends in newline
+ self._write(
+ name,
+ self.with_matching_block_and_marker_end_not_after_newline)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_newline=False)
+ self.assertSaltTrueReturn(ret)
+ self.assertTrue(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), self.with_matching_block)
+ # Pass 1a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content,
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_newline=False)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(self._read(name), self.with_matching_block)
+
+ # Pass 2: content does not end in newline
+ self._write(
+ name,
+ self.with_matching_block_and_marker_end_not_after_newline)
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_newline=False)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(
+ self._read(name),
+ self.with_matching_block_and_marker_end_not_after_newline)
+ # Pass 2a: Re-run state, no changes should be made
+ ret = self.run_state('file.blockreplace',
+ name=name,
+ content=self.content.rstrip('\r\n'),
+ marker_start=self.marker_start,
+ marker_end=self.marker_end,
+ append_newline=False)
+ self.assertSaltTrueReturn(ret)
+ self.assertFalse(ret[next(iter(ret))]['changes'])
+ self.assertEqual(
+ self._read(name),
+ self.with_matching_block_and_marker_end_not_after_newline)
class RemoteFileTest(ModuleCase, SaltReturnAssertsMixin):
diff --git a/tests/integration/states/test_pip.py b/tests/integration/states/test_pip.py
index 0ea7d8cd5f..692067eb7f 100644
--- a/tests/integration/states/test_pip.py
+++ b/tests/integration/states/test_pip.py
@@ -524,3 +524,78 @@ class PipStateTest(ModuleCase, SaltReturnAssertsMixin):
finally:
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir)
+
+ def test_46127_pip_env_vars(self):
+ '''
+ Test that checks if env_vars passed to pip.installed are also passed
+ to pip.freeze while checking for existing installations
+ '''
+ # This issue is most easily checked while installing carbon
+ # Much of the code here comes from the test_weird_install function above
+ ographite = '/opt/graphite'
+ if os.path.isdir(ographite):
+ self.skipTest(
+ 'You already have \'{0}\'. This test would overwrite this '
+ 'directory'.format(ographite)
+ )
+ try:
+ os.makedirs(ographite)
+ except OSError as err:
+ if err.errno == errno.EACCES:
+ # Permission denied
+ self.skipTest(
+ 'You don\'t have the required permissions to run this test'
+ )
+ finally:
+ if os.path.isdir(ographite):
+ shutil.rmtree(ographite)
+
+ venv_dir = os.path.join(RUNTIME_VARS.TMP, 'issue-46127-pip-env-vars')
+ try:
+ # We may be able to remove this, I had to add it because the custom
+ # modules from the test suite weren't available in the jinja
+ # context when running the call to state.sls that comes after.
+ self.run_function('saltutil.sync_modules')
+ # Since we don't have the virtualenv created, pip.installed will
+ # thrown and error.
+ ret = self.run_function(
+ 'state.sls', mods='issue-46127-pip-env-vars'
+ )
+ self.assertSaltTrueReturn(ret)
+ for key in six.iterkeys(ret):
+ self.assertTrue(ret[key]['result'])
+ if ret[key]['name'] != 'carbon < 1.3':
+ continue
+ self.assertEqual(
+ ret[key]['comment'],
+ 'All packages were successfully installed'
+ )
+ break
+ else:
+ raise Exception('Expected state did not run')
+ # Run the state again. Now the already installed message should
+ # appear
+ ret = self.run_function(
+ 'state.sls', mods='issue-46127-pip-env-vars'
+ )
+ self.assertSaltTrueReturn(ret)
+ # We cannot use assertInSaltComment here because we need to skip
+ # some of the state return parts
+ for key in six.iterkeys(ret):
+ self.assertTrue(ret[key]['result'])
+ # As we are re-running the formula, some states will not be run
+ # and "name" may or may not be present, so we use .get() pattern
+ if ret[key].get('name', '') != 'carbon < 1.3':
+ continue
+ self.assertEqual(
+ ret[key]['comment'],
+ ('Python package carbon < 1.3 was already installed\n'
+ 'All packages were successfully installed'))
+ break
+ else:
+ raise Exception('Expected state did not run')
+ finally:
+ if os.path.isdir(ographite):
+ shutil.rmtree(ographite)
+ if os.path.isdir(venv_dir):
+ shutil.rmtree(venv_dir)
diff --git a/tests/support/helpers.py b/tests/support/helpers.py
index 05989848b7..5864c83b14 100644
--- a/tests/support/helpers.py
+++ b/tests/support/helpers.py
@@ -24,6 +24,7 @@ import signal
import socket
import string
import sys
+import tempfile
import threading
import time
import tornado.ioloop
@@ -50,7 +51,7 @@ except ImportError:
# Import Salt Tests Support libs
from tests.support.unit import skip, _id
from tests.support.mock import patch
-from tests.support.paths import FILES
+from tests.support.paths import FILES, TMP
log = logging.getLogger(__name__)
@@ -954,6 +955,24 @@ def with_system_user_and_group(username, group,
return decorator
+def with_tempfile(func):
+ '''
+ Generates a tempfile and cleans it up when test completes.
+ '''
+ @functools.wraps(func)
+ def wrapper(self, *args, **kwargs):
+ fd_, name = tempfile.mkstemp(prefix='__salt.test.', dir=TMP)
+ os.close(fd_)
+ del fd_
+ ret = func(self, name, *args, **kwargs)
+ try:
+ os.remove(name)
+ except Exception:
+ pass
+ return ret
+ return wrapper
+
+
def requires_system_grains(func):
'''
Function decorator which loads and passes the system's grains to the test
diff --git a/tests/unit/config/test_config.py b/tests/unit/config/test_config.py
index 243b144fbe..d9a6aa7c9f 100644
--- a/tests/unit/config/test_config.py
+++ b/tests/unit/config/test_config.py
@@ -27,6 +27,7 @@ import salt.utils.files
import salt.utils.network
import salt.utils.platform
import salt.utils.yaml
+from salt.ext import six
from salt.syspaths import CONFIG_DIR
from salt import config as sconfig
from salt.exceptions import (
@@ -604,6 +605,91 @@ class ConfigTestCase(TestCase, AdaptedConfigurationTestCaseMixin):
self.assertEqual(syndic_opts['_master_conf_file'], minion_conf_path)
self.assertEqual(syndic_opts['_minion_conf_file'], syndic_conf_path)
+ def _get_tally(self, conf_func):
+ '''
+ This ensures that any strings which are loaded are unicode strings
+ '''
+ tally = {}
+
+ def _count_strings(config):
+ if isinstance(config, dict):
+ for key, val in six.iteritems(config):
+ log.debug('counting strings in dict key: %s', key)
+ log.debug('counting strings in dict val: %s', val)
+ _count_strings(key)
+ _count_strings(val)
+ elif isinstance(config, list):
+ log.debug('counting strings in list: %s', config)
+ for item in config:
+ _count_strings(item)
+ else:
+ if isinstance(config, six.string_types):
+ if isinstance(config, six.text_type):
+ tally['unicode'] = tally.get('unicode', 0) + 1
+ else:
+ # We will never reach this on PY3
+ tally.setdefault('non_unicode', []).append(config)
+
+ fpath = salt.utils.files.mkstemp(dir=TMP)
+ try:
+ with salt.utils.files.fopen(fpath, 'w') as wfh:
+ wfh.write(textwrap.dedent('''
+ foo: bar
+ mylist:
+ - somestring
+ - 9
+ - 123.456
+ - True
+ - nested:
+ - key: val
+ - nestedlist:
+ - foo
+ - bar
+ - baz
+ mydict:
+ - somestring: 9
+ - 123.456: 789
+ - True: False
+ - nested:
+ - key: val
+ - nestedlist:
+ - foo
+ - bar
+ - baz'''))
+ if conf_func is sconfig.master_config:
+ wfh.write('\n\n')
+ wfh.write(textwrap.dedent('''
+ rest_cherrypy:
+ port: 8000
+ disable_ssl: True
+ app_path: /beacon_demo
+ app: /srv/web/html/index.html
+ static: /srv/web/static'''))
+ config = conf_func(fpath)
+ _count_strings(config)
+ return tally
+ finally:
+ if os.path.isfile(fpath):
+ os.unlink(fpath)
+
+ def test_conf_file_strings_are_unicode_for_master(self):
+ '''
+ This ensures that any strings which are loaded are unicode strings
+ '''
+ tally = self._get_tally(sconfig.master_config)
+ non_unicode = tally.get('non_unicode', [])
+ self.assertEqual(len(non_unicode), 8 if six.PY2 else 0, non_unicode)
+ self.assertTrue(tally['unicode'] > 0)
+
+ def test_conf_file_strings_are_unicode_for_minion(self):
+ '''
+ This ensures that any strings which are loaded are unicode strings
+ '''
+ tally = self._get_tally(sconfig.minion_config)
+ non_unicode = tally.get('non_unicode', [])
+ self.assertEqual(len(non_unicode), 0, non_unicode)
+ self.assertTrue(tally['unicode'] > 0)
+
# <---- Salt Cloud Configuration Tests ---------------------------------------------
# cloud_config tests
diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py
index 5582317fd8..ba0a8ffd31 100644
--- a/tests/unit/grains/test_core.py
+++ b/tests/unit/grains/test_core.py
@@ -927,7 +927,7 @@ SwapTotal: 4789244 kB'''
('rinzler.evil-corp.com', [], ['5.6.7.8']),
('foo.bar.baz', [], ['fe80::a8b2:93ff:fe00:0']),
('bluesniff.foo.bar', [], ['fe80::a8b2:93ff:dead:beef'])]
- ret = {'fqdns': ['rinzler.evil-corp.com', 'foo.bar.baz', 'bluesniff.foo.bar']}
+ ret = {'fqdns': ['bluesniff.foo.bar', 'foo.bar.baz', 'rinzler.evil-corp.com']}
self._run_fqdns_test(reverse_resolv_mock, ret)
def _run_fqdns_test(self, reverse_resolv_mock, ret):
diff --git a/tests/unit/modules/test_dockermod.py b/tests/unit/modules/test_dockermod.py
index e697ae61e1..ccd06b52e4 100644
--- a/tests/unit/modules/test_dockermod.py
+++ b/tests/unit/modules/test_dockermod.py
@@ -755,6 +755,35 @@ class DockerTestCase(TestCase, LoaderModuleMockMixin):
ret = docker_mod.compare_containers('container1', 'container2')
self.assertEqual(ret, {})
+ def test_compare_container_ulimits_order(self):
+ '''
+ Test comparing two containers when the order of the Ulimits HostConfig
+ values are different, but the values are the same.
+ '''
+ def _inspect_container_effect(id_):
+ return {
+ 'container1': {'Config': {},
+ 'HostConfig': {
+ 'Ulimits': [
+ {u'Hard': -1, u'Soft': -1, u'Name': u'core'},
+ {u'Hard': 65536, u'Soft': 65536, u'Name': u'nofile'}
+ ]
+ }},
+ 'container2': {'Config': {},
+ 'HostConfig': {
+ 'Ulimits': [
+ {u'Hard': 65536, u'Soft': 65536, u'Name': u'nofile'},
+ {u'Hard': -1, u'Soft': -1, u'Name': u'core'}
+ ]
+ }},
+ }[id_]
+
+ inspect_container_mock = MagicMock(side_effect=_inspect_container_effect)
+
+ with patch.object(docker_mod, 'inspect_container', inspect_container_mock):
+ ret = docker_mod.compare_container('container1', 'container2')
+ self.assertEqual(ret, {})
+
def test_resolve_tag(self):
'''
Test the resolve_tag function. It runs docker.insect_image on the image
diff --git a/tests/unit/modules/test_file.py b/tests/unit/modules/test_file.py
index c7ec9eff86..309330fe92 100644
--- a/tests/unit/modules/test_file.py
+++ b/tests/unit/modules/test_file.py
@@ -285,10 +285,11 @@ class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin):
"We shall say 'Ni' again to you, if you do not appease us."
])
filemod.blockreplace(self.tfile.name,
- '#-- START BLOCK 1',
- '#-- END BLOCK 1',
- new_multiline_content,
- backup=False)
+ marker_start='#-- START BLOCK 1',
+ marker_end='#-- END BLOCK 1',
+ content=new_multiline_content,
+ backup=False,
+ append_newline=None)
with salt.utils.files.fopen(self.tfile.name, 'rb') as fp:
filecontent = fp.read()
@@ -306,9 +307,9 @@ class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin):
CommandExecutionError,
filemod.blockreplace,
self.tfile.name,
- '#-- START BLOCK 2',
- '#-- END BLOCK 2',
- new_content,
+ marker_start='#-- START BLOCK 2',
+ marker_end='#-- END BLOCK 2',
+ content=new_content,
append_if_not_found=False,
backup=False
)
@@ -319,9 +320,9 @@ class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin):
)
filemod.blockreplace(self.tfile.name,
- '#-- START BLOCK 2',
- '#-- END BLOCK 2',
- new_content,
+ marker_start='#-- START BLOCK 2',
+ marker_end='#-- END BLOCK 2',
+ content=new_content,
backup=False,
append_if_not_found=True)
@@ -382,9 +383,9 @@ class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin):
CommandExecutionError,
filemod.blockreplace,
self.tfile.name,
- '#-- START BLOCK 2',
- '#-- END BLOCK 2',
- new_content,
+ marker_start='#-- START BLOCK 2',
+ marker_end='#-- END BLOCK 2',
+ content=new_content,
prepend_if_not_found=False,
backup=False
)
@@ -396,8 +397,9 @@ class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin):
fp.read())
filemod.blockreplace(self.tfile.name,
- '#-- START BLOCK 2', '#-- END BLOCK 2',
- new_content,
+ marker_start='#-- START BLOCK 2',
+ marker_end='#-- END BLOCK 2',
+ content=new_content,
backup=False,
prepend_if_not_found=True)
@@ -410,9 +412,9 @@ class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin):
def test_replace_partial_marked_lines(self):
filemod.blockreplace(self.tfile.name,
- '// START BLOCK',
- '// END BLOCK',
- 'new content 1',
+ marker_start='// START BLOCK',
+ marker_end='// END BLOCK',
+ content='new content 1',
backup=False)
with salt.utils.files.fopen(self.tfile.name, 'r') as fp:
@@ -420,7 +422,7 @@ class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin):
self.assertIn('new content 1', filecontent)
self.assertNotIn('to be removed', filecontent)
self.assertIn('first part of start line', filecontent)
- self.assertIn('first part of end line', filecontent)
+ self.assertNotIn('first part of end line', filecontent)
self.assertIn('part of start line not removed', filecontent)
self.assertIn('part of end line not removed', filecontent)
@@ -430,7 +432,9 @@ class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin):
filemod.blockreplace(
self.tfile.name,
- '// START BLOCK', '// END BLOCK', 'new content 2',
+ marker_start='// START BLOCK',
+ marker_end='// END BLOCK',
+ content='new content 2',
backup=fext)
self.assertTrue(os.path.exists(bak_file))
@@ -441,22 +445,27 @@ class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin):
bak_file = '{0}{1}'.format(self.tfile.name, fext)
filemod.blockreplace(self.tfile.name,
- '// START BLOCK', '// END BLOCK', 'new content 3',
+ marker_start='// START BLOCK',
+ marker_end='// END BLOCK',
+ content='new content 3',
backup=False)
self.assertFalse(os.path.exists(bak_file))
def test_no_modifications(self):
filemod.blockreplace(self.tfile.name,
- '// START BLOCK', '// END BLOCK',
- 'new content 4',
- backup=False)
+ marker_start='#-- START BLOCK 1',
+ marker_end='#-- END BLOCK 1',
+ content='new content 4',
+ backup=False,
+ append_newline=None)
before_ctime = os.stat(self.tfile.name).st_mtime
filemod.blockreplace(self.tfile.name,
- '// START BLOCK',
- '// END BLOCK',
- 'new content 4',
- backup=False)
+ marker_start='#-- START BLOCK 1',
+ marker_end='#-- END BLOCK 1',
+ content='new content 4',
+ backup=False,
+ append_newline=None)
after_ctime = os.stat(self.tfile.name).st_mtime
self.assertEqual(before_ctime, after_ctime)
@@ -464,9 +473,9 @@ class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin):
def test_dry_run(self):
before_ctime = os.stat(self.tfile.name).st_mtime
filemod.blockreplace(self.tfile.name,
- '// START BLOCK',
- '// END BLOCK',
- 'new content 5',
+ marker_start='// START BLOCK',
+ marker_end='// END BLOCK',
+ content='new content 5',
dry_run=True)
after_ctime = os.stat(self.tfile.name).st_mtime
@@ -474,18 +483,18 @@ class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin):
def test_show_changes(self):
ret = filemod.blockreplace(self.tfile.name,
- '// START BLOCK',
- '// END BLOCK',
- 'new content 6',
+ marker_start='// START BLOCK',
+ marker_end='// END BLOCK',
+ content='new content 6',
backup=False,
show_changes=True)
self.assertTrue(ret.startswith('---')) # looks like a diff
ret = filemod.blockreplace(self.tfile.name,
- '// START BLOCK',
- '// END BLOCK',
- 'new content 7',
+ marker_start='// START BLOCK',
+ marker_end='// END BLOCK',
+ content='new content 7',
backup=False,
show_changes=False)
@@ -496,9 +505,9 @@ class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin):
CommandExecutionError,
filemod.blockreplace,
self.tfile.name,
- '#-- START BLOCK UNFINISHED',
- '#-- END BLOCK UNFINISHED',
- 'foobar',
+ marker_start='#-- START BLOCK UNFINISHED',
+ marker_end='#-- END BLOCK UNFINISHED',
+ content='foobar',
backup=False
)
diff --git a/tests/unit/modules/test_gentoo_service.py b/tests/unit/modules/test_gentoo_service.py
index b8e614566a..d88a02bad5 100644
--- a/tests/unit/modules/test_gentoo_service.py
+++ b/tests/unit/modules/test_gentoo_service.py
@@ -151,7 +151,9 @@ class GentooServicesTestCase(TestCase, LoaderModuleMockMixin):
mock = MagicMock(return_value=True)
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': mock}):
self.assertFalse(gentoo_service.start('name'))
- mock.assert_called_once_with('/etc/init.d/name start', python_shell=False)
+ mock.assert_called_once_with('/etc/init.d/name start',
+ ignore_retcode=False,
+ python_shell=False)
def test_stop(self):
'''
@@ -160,7 +162,9 @@ class GentooServicesTestCase(TestCase, LoaderModuleMockMixin):
mock = MagicMock(return_value=True)
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': mock}):
self.assertFalse(gentoo_service.stop('name'))
- mock.assert_called_once_with('/etc/init.d/name stop', python_shell=False)
+ mock.assert_called_once_with('/etc/init.d/name stop',
+ ignore_retcode=False,
+ python_shell=False)
def test_restart(self):
'''
@@ -169,7 +173,9 @@ class GentooServicesTestCase(TestCase, LoaderModuleMockMixin):
mock = MagicMock(return_value=True)
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': mock}):
self.assertFalse(gentoo_service.restart('name'))
- mock.assert_called_once_with('/etc/init.d/name restart', python_shell=False)
+ mock.assert_called_once_with('/etc/init.d/name restart',
+ ignore_retcode=False,
+ python_shell=False)
def test_reload_(self):
'''
@@ -178,7 +184,9 @@ class GentooServicesTestCase(TestCase, LoaderModuleMockMixin):
mock = MagicMock(return_value=True)
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': mock}):
self.assertFalse(gentoo_service.reload_('name'))
- mock.assert_called_once_with('/etc/init.d/name reload', python_shell=False)
+ mock.assert_called_once_with('/etc/init.d/name reload',
+ ignore_retcode=False,
+ python_shell=False)
def test_zap(self):
'''
@@ -187,7 +195,9 @@ class GentooServicesTestCase(TestCase, LoaderModuleMockMixin):
mock = MagicMock(return_value=True)
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': mock}):
self.assertFalse(gentoo_service.zap('name'))
- mock.assert_called_once_with('/etc/init.d/name zap', python_shell=False)
+ mock.assert_called_once_with('/etc/init.d/name zap',
+ ignore_retcode=False,
+ python_shell=False)
def test_status(self):
'''
@@ -201,25 +211,33 @@ class GentooServicesTestCase(TestCase, LoaderModuleMockMixin):
mock = MagicMock(return_value=0)
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': mock}):
self.assertTrue(gentoo_service.status('name'))
- mock.assert_called_once_with('/etc/init.d/name status', python_shell=False)
+ mock.assert_called_once_with('/etc/init.d/name status',
+ ignore_retcode=True,
+ python_shell=False)
# service is not running
mock = MagicMock(return_value=1)
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': mock}):
self.assertFalse(gentoo_service.status('name'))
- mock.assert_called_once_with('/etc/init.d/name status', python_shell=False)
+ mock.assert_called_once_with('/etc/init.d/name status',
+ ignore_retcode=True,
+ python_shell=False)
# service is stopped
mock = MagicMock(return_value=3)
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': mock}):
self.assertFalse(gentoo_service.status('name'))
- mock.assert_called_once_with('/etc/init.d/name status', python_shell=False)
+ mock.assert_called_once_with('/etc/init.d/name status',
+ ignore_retcode=True,
+ python_shell=False)
# service has crashed
mock = MagicMock(return_value=32)
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': mock}):
self.assertFalse(gentoo_service.status('name'))
- mock.assert_called_once_with('/etc/init.d/name status', python_shell=False)
+ mock.assert_called_once_with('/etc/init.d/name status',
+ ignore_retcode=True,
+ python_shell=False)
def test_enable(self):
'''
@@ -228,7 +246,9 @@ class GentooServicesTestCase(TestCase, LoaderModuleMockMixin):
rc_update_mock = MagicMock(return_value=0)
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertTrue(gentoo_service.enable('name'))
- rc_update_mock.assert_called_once_with('rc-update add name', python_shell=False)
+ rc_update_mock.assert_called_once_with('rc-update add name',
+ ignore_retcode=False,
+ python_shell=False)
rc_update_mock.reset_mock()
# move service from 'l1' to 'l2' runlevel
@@ -238,8 +258,12 @@ class GentooServicesTestCase(TestCase, LoaderModuleMockMixin):
with patch.dict(gentoo_service.__salt__, {'cmd.run': level_list_mock}):
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertTrue(gentoo_service.enable('name', runlevels='l2'))
- rc_update_mock.assert_has_calls([call('rc-update delete name l1', python_shell=False),
- call('rc-update add name l2', python_shell=False)])
+ rc_update_mock.assert_has_calls([call('rc-update delete name l1',
+ ignore_retcode=False,
+ python_shell=False),
+ call('rc-update add name l2',
+ ignore_retcode=False,
+ python_shell=False)])
rc_update_mock.reset_mock()
# requested levels are the same as the current ones
@@ -260,7 +284,9 @@ class GentooServicesTestCase(TestCase, LoaderModuleMockMixin):
with patch.dict(gentoo_service.__salt__, {'cmd.run': level_list_mock}):
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertTrue(gentoo_service.enable('name', runlevels=['l2', 'l1']))
- rc_update_mock.assert_called_once_with('rc-update add name l2', python_shell=False)
+ rc_update_mock.assert_called_once_with('rc-update add name l2',
+ ignore_retcode=False,
+ python_shell=False)
rc_update_mock.reset_mock()
# remove service from 'l1' runlevel
@@ -269,15 +295,21 @@ class GentooServicesTestCase(TestCase, LoaderModuleMockMixin):
with patch.dict(gentoo_service.__salt__, {'cmd.run': level_list_mock}):
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertTrue(gentoo_service.enable('name', runlevels=['l2']))
- rc_update_mock.assert_called_once_with('rc-update delete name l1', python_shell=False)
+ rc_update_mock.assert_called_once_with('rc-update delete name l1',
+ ignore_retcode=False,
+ python_shell=False)
rc_update_mock.reset_mock()
# move service from 'l2' add to 'l3', leaving at l1
with patch.dict(gentoo_service.__salt__, {'cmd.run': level_list_mock}):
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertTrue(gentoo_service.enable('name', runlevels=['l1', 'l3']))
- rc_update_mock.assert_has_calls([call('rc-update delete name l2', python_shell=False),
- call('rc-update add name l3', python_shell=False)])
+ rc_update_mock.assert_has_calls([call('rc-update delete name l2',
+ ignore_retcode=False,
+ python_shell=False),
+ call('rc-update add name l3',
+ ignore_retcode=False,
+ python_shell=False)])
rc_update_mock.reset_mock()
# remove from l1, l3, and add to l2, l4, and leave at l5
@@ -286,15 +318,21 @@ class GentooServicesTestCase(TestCase, LoaderModuleMockMixin):
with patch.dict(gentoo_service.__salt__, {'cmd.run': level_list_mock}):
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertTrue(gentoo_service.enable('name', runlevels=['l2', 'l4', 'l5']))
- rc_update_mock.assert_has_calls([call('rc-update delete name l1 l3', python_shell=False),
- call('rc-update add name l2 l4', python_shell=False)])
+ rc_update_mock.assert_has_calls([call('rc-update delete name l1 l3',
+ ignore_retcode=False,
+ python_shell=False),
+ call('rc-update add name l2 l4',
+ ignore_retcode=False,
+ python_shell=False)])
rc_update_mock.reset_mock()
# rc-update failed
rc_update_mock = MagicMock(return_value=1)
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertFalse(gentoo_service.enable('name'))
- rc_update_mock.assert_called_once_with('rc-update add name', python_shell=False)
+ rc_update_mock.assert_called_once_with('rc-update add name',
+ ignore_retcode=False,
+ python_shell=False)
rc_update_mock.reset_mock()
# move service delete failed
@@ -303,7 +341,9 @@ class GentooServicesTestCase(TestCase, LoaderModuleMockMixin):
with patch.dict(gentoo_service.__salt__, {'cmd.run': level_list_mock}):
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertFalse(gentoo_service.enable('name', runlevels='l2'))
- rc_update_mock.assert_called_once_with('rc-update delete name l1', python_shell=False)
+ rc_update_mock.assert_called_once_with('rc-update delete name l1',
+ ignore_retcode=False,
+ python_shell=False)
rc_update_mock.reset_mock()
# move service delete succeeds. add fails
@@ -312,8 +352,12 @@ class GentooServicesTestCase(TestCase, LoaderModuleMockMixin):
with patch.dict(gentoo_service.__salt__, {'cmd.run': level_list_mock}):
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertFalse(gentoo_service.enable('name', runlevels='l2'))
- rc_update_mock.assert_has_calls([call('rc-update delete name l1', python_shell=False),
- call('rc-update add name l2', python_shell=False)])
+ rc_update_mock.assert_has_calls([call('rc-update delete name l1',
+ ignore_retcode=False,
+ python_shell=False),
+ call('rc-update add name l2',
+ ignore_retcode=False,
+ python_shell=False)])
rc_update_mock.reset_mock()
def test_disable(self):
@@ -323,7 +367,9 @@ class GentooServicesTestCase(TestCase, LoaderModuleMockMixin):
rc_update_mock = MagicMock(return_value=0)
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertTrue(gentoo_service.disable('name'))
- rc_update_mock.assert_called_once_with('rc-update delete name', python_shell=False)
+ rc_update_mock.assert_called_once_with('rc-update delete name',
+ ignore_retcode=False,
+ python_shell=False)
rc_update_mock.reset_mock()
# disable service
@@ -334,6 +380,7 @@ class GentooServicesTestCase(TestCase, LoaderModuleMockMixin):
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertTrue(gentoo_service.disable('name', runlevels='l1'))
rc_update_mock.assert_called_once_with('rc-update delete name l1',
+ ignore_retcode=False,
python_shell=False)
rc_update_mock.reset_mock()
@@ -344,6 +391,7 @@ class GentooServicesTestCase(TestCase, LoaderModuleMockMixin):
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertTrue(gentoo_service.disable('name', runlevels=['l1']))
rc_update_mock.assert_called_once_with('rc-update delete name l1',
+ ignore_retcode=False,
python_shell=False)
rc_update_mock.reset_mock()
@@ -354,6 +402,7 @@ class GentooServicesTestCase(TestCase, LoaderModuleMockMixin):
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertTrue(gentoo_service.disable('name', runlevels=['l1']))
rc_update_mock.assert_called_once_with('rc-update delete name l1',
+ ignore_retcode=False,
python_shell=False)
rc_update_mock.reset_mock()
@@ -373,6 +422,7 @@ class GentooServicesTestCase(TestCase, LoaderModuleMockMixin):
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertTrue(gentoo_service.disable('name', runlevels=['l1', 'l3']))
rc_update_mock.assert_called_once_with('rc-update delete name l1 l3',
+ ignore_retcode=False,
python_shell=False)
rc_update_mock.reset_mock()
@@ -380,7 +430,9 @@ class GentooServicesTestCase(TestCase, LoaderModuleMockMixin):
rc_update_mock = MagicMock(return_value=1)
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertFalse(gentoo_service.disable('name'))
- rc_update_mock.assert_called_once_with('rc-update delete name', python_shell=False)
+ rc_update_mock.assert_called_once_with('rc-update delete name',
+ ignore_retcode=False,
+ python_shell=False)
rc_update_mock.reset_mock()
# move service delete failed
@@ -389,7 +441,9 @@ class GentooServicesTestCase(TestCase, LoaderModuleMockMixin):
with patch.dict(gentoo_service.__salt__, {'cmd.run': level_list_mock}):
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertFalse(gentoo_service.disable('name', runlevels='l1'))
- rc_update_mock.assert_called_once_with('rc-update delete name l1', python_shell=False)
+ rc_update_mock.assert_called_once_with('rc-update delete name l1',
+ ignore_retcode=False,
+ python_shell=False)
rc_update_mock.reset_mock()
# move service delete succeeds. add fails
@@ -399,6 +453,7 @@ class GentooServicesTestCase(TestCase, LoaderModuleMockMixin):
with patch.dict(gentoo_service.__salt__, {'cmd.retcode': rc_update_mock}):
self.assertFalse(gentoo_service.disable('name', runlevels=['l1', 'l3']))
rc_update_mock.assert_called_once_with('rc-update delete name l1 l3',
+ ignore_retcode=False,
python_shell=False)
rc_update_mock.reset_mock()
diff --git a/tests/unit/modules/test_gpg.py b/tests/unit/modules/test_gpg.py
new file mode 100644
index 0000000000..564510e9c2
--- /dev/null
+++ b/tests/unit/modules/test_gpg.py
@@ -0,0 +1,227 @@
+# -*- coding: utf-8 -*-
+'''
+ :codeauthor: :email:`Gareth J. Greenaway `
+'''
+
+# Import Python libs
+from __future__ import absolute_import, print_function, unicode_literals
+
+import datetime
+import time
+
+# Import Salt Testing Libs
+from tests.support.mixins import LoaderModuleMockMixin
+from tests.support.unit import TestCase, skipIf
+from tests.support.mock import (
+ MagicMock,
+ patch,
+ NO_MOCK,
+ NO_MOCK_REASON
+)
+
+# Import Salt Libs
+import salt.modules.gpg as gpg
+
+
+try:
+ import gnupg # pylint: disable=import-error,unused-import
+ HAS_GPG = True
+except ImportError:
+ HAS_GPG = False
+
+
+@skipIf(NO_MOCK, NO_MOCK_REASON)
+class GpgTestCase(TestCase, LoaderModuleMockMixin):
+ '''
+ Test cases for salt.modules.gpg
+ '''
+ def setup_loader_modules(self):
+ return {gpg: {'__salt__': {}}}
+
+ @skipIf(not HAS_GPG, 'GPG Module Unavailable')
+ def test_list_keys(self):
+ '''
+ Test gpg.list_keys
+ '''
+
+ _user_mock = {u'shell': u'/bin/bash',
+ u'workphone': u'',
+ u'uid': 0,
+ u'passwd': u'x',
+ u'roomnumber': u'',
+ u'gid': 0,
+ u'groups': [
+ u'root'
+ ],
+ u'home': u'/root',
+ u'fullname': u'root',
+ u'homephone': u'',
+ u'name': u'root'}
+
+ _list_result = [{u'dummy': u'',
+ u'keyid': u'xxxxxxxxxxxxxxxx',
+ u'expires': u'2011188692',
+ u'sigs': [],
+ u'subkeys': [[u'xxxxxxxxxxxxxxxx', u'e', u'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx']],
+ u'length': u'4096',
+ u'ownertrust': u'-',
+ u'sig': u'',
+ u'algo': u'1',
+ u'fingerprint': u'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx',
+ u'date': u'1506612692',
+ u'trust': u'-',
+ u'type': u'pub',
+ u'uids': [u'GPG Person ']}]
+
+ _expected_result = [{u'keyid': u'xxxxxxxxxxxxxxxx',
+ u'uids': [u'GPG Person '],
+ u'created': '2017-09-28',
+ u'expires': '2033-09-24',
+ u'keyLength': u'4096',
+ u'ownerTrust': u'Unknown',
+ u'fingerprint': u'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx',
+ u'trust': u'Unknown'}]
+
+ mock_opt = MagicMock(return_value='root')
+ with patch.dict(gpg.__salt__, {'user.info': MagicMock(return_value=_user_mock)}):
+ with patch.dict(gpg.__salt__, {'config.option': mock_opt}):
+ with patch.object(gpg, '_list_keys', return_value=_list_result):
+ self.assertEqual(gpg.list_keys(), _expected_result)
+
+ @skipIf(not HAS_GPG, 'GPG Module Unavailable')
+ def test_get_key(self):
+ '''
+ Test gpg.get_key
+ '''
+
+ _user_mock = {u'shell': u'/bin/bash',
+ u'workphone': u'',
+ u'uid': 0,
+ u'passwd': u'x',
+ u'roomnumber': u'',
+ u'gid': 0,
+ u'groups': [
+ u'root'
+ ],
+ u'home': u'/root',
+ u'fullname': u'root',
+ u'homephone': u'',
+ u'name': u'root'}
+
+ _list_result = [{u'dummy': u'',
+ u'keyid': u'xxxxxxxxxxxxxxxx',
+ u'expires': u'2011188692',
+ u'sigs': [],
+ u'subkeys': [[u'xxxxxxxxxxxxxxxx', u'e', u'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx']],
+ u'length': u'4096',
+ u'ownertrust': u'-',
+ u'sig': u'',
+ u'algo': u'1',
+ u'fingerprint': u'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx',
+ u'date': u'1506612692',
+ u'trust': u'-',
+ u'type': u'pub',
+ u'uids': [u'GPG Person ']}]
+
+ _expected_result = {u'fingerprint': u'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx',
+ u'keyid': u'xxxxxxxxxxxxxxxx',
+ u'uids': [u'GPG Person '],
+ u'created': u'2017-09-28',
+ u'trust': u'Unknown',
+ u'ownerTrust': u'Unknown',
+ u'expires': u'2033-09-24',
+ u'keyLength': u'4096'}
+
+ mock_opt = MagicMock(return_value='root')
+ with patch.dict(gpg.__salt__, {'user.info': MagicMock(return_value=_user_mock)}):
+ with patch.dict(gpg.__salt__, {'config.option': mock_opt}):
+ with patch.object(gpg, '_list_keys', return_value=_list_result):
+ ret = gpg.get_key('xxxxxxxxxxxxxxxx')
+ self.assertEqual(ret, _expected_result)
+
+ @skipIf(not HAS_GPG, 'GPG Module Unavailable')
+ def test_delete_key(self):
+ '''
+ Test gpg.delete_key
+ '''
+
+ _user_mock = {u'shell': u'/bin/bash',
+ u'workphone': u'',
+ u'uid': 0,
+ u'passwd': u'x',
+ u'roomnumber': u'',
+ u'gid': 0,
+ u'groups': [
+ u'root'
+ ],
+ u'home': u'/root',
+ u'fullname': u'root',
+ u'homephone': u'',
+ u'name': u'root'}
+
+ _list_result = [{'dummy': u'',
+ 'keyid': u'xxxxxxxxxxxxxxxx',
+ 'expires': u'2011188692',
+ 'sigs': [],
+ 'subkeys': [[u'xxxxxxxxxxxxxxxx', u'e', u'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx']],
+ 'length': u'4096',
+ 'ownertrust': u'-',
+ 'sig': u'',
+ 'algo': u'1',
+ 'fingerprint': u'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx',
+ 'date': u'1506612692',
+ 'trust': u'-',
+ 'type': u'pub',
+ 'uids': [u'GPG Person ']}]
+
+ _expected_result = {u'res': True,
+ u'message': u'Secret key for xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx deleted\nPublic key for xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx deleted'}
+
+ mock_opt = MagicMock(return_value='root')
+ with patch.dict(gpg.__salt__, {'user.info': MagicMock(return_value=_user_mock)}):
+ with patch.dict(gpg.__salt__, {'config.option': mock_opt}):
+ with patch.object(gpg, '_list_keys', return_value=_list_result):
+ with patch('salt.modules.gpg.gnupg.GPG.delete_keys', MagicMock(return_value='ok')):
+ ret = gpg.delete_key('xxxxxxxxxxxxxxxx', delete_secret=True)
+ self.assertEqual(ret, _expected_result)
+
+ @skipIf(not HAS_GPG, 'GPG Module Unavailable')
+ def test_search_keys(self):
+ '''
+ Test gpg.search_keys
+ '''
+
+ _user_mock = {'shell': '/bin/bash',
+ 'workphone': '',
+ 'uid': 0,
+ 'passwd': 'x',
+ 'roomnumber': '',
+ 'gid': 0,
+ 'groups': [
+ 'root'
+ ],
+ 'home': '/root',
+ 'fullname': 'root',
+ 'homephone': '',
+ 'name': 'root'}
+
+ _search_result = [{u'keyid': u'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx',
+ u'uids': [u'GPG Person '],
+ u'expires': u'',
+ u'sigs': [],
+ u'length': u'1024',
+ u'algo': u'17',
+ u'date': int(time.mktime(datetime.datetime(2004, 11, 13).timetuple())),
+ u'type': u'pub'}]
+
+ _expected_result = [{u'uids': [u'GPG Person '],
+ 'created': '2004-11-13',
+ u'keyLength': u'1024',
+ u'keyid': u'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'}]
+
+ mock_opt = MagicMock(return_value='root')
+ with patch.dict(gpg.__salt__, {'user.info': MagicMock(return_value=_user_mock)}):
+ with patch.dict(gpg.__salt__, {'config.option': mock_opt}):
+ with patch.object(gpg, '_search_keys', return_value=_search_result):
+ ret = gpg.search_keys('person@example.com')
+ self.assertEqual(ret, _expected_result)
diff --git a/tests/unit/modules/test_kapacitor.py b/tests/unit/modules/test_kapacitor.py
index f8b3adcaa4..5eb592adce 100644
--- a/tests/unit/modules/test_kapacitor.py
+++ b/tests/unit/modules/test_kapacitor.py
@@ -14,6 +14,10 @@ from tests.support.mock import Mock, patch
class KapacitorTestCase(TestCase, LoaderModuleMockMixin):
+ env = {
+ 'KAPACITOR_UNSAFE_SSL': 'false',
+ 'KAPACITOR_URL': 'http://localhost:9092'
+ }
def setup_loader_modules(self):
return {
@@ -51,22 +55,22 @@ class KapacitorTestCase(TestCase, LoaderModuleMockMixin):
with patch.dict(kapacitor.__salt__, {'cmd.run_all': cmd_mock}):
kapacitor.define_task('taskname', '/tmp/script.tick')
cmd_mock.assert_called_once_with('kapacitor define taskname '
- '-tick /tmp/script.tick -type stream')
+ '-tick /tmp/script.tick -type stream', env=self.__class__.env)
def test_enable_task(self):
cmd_mock = Mock(return_value={'retcode': 0})
with patch.dict(kapacitor.__salt__, {'cmd.run_all': cmd_mock}):
kapacitor.enable_task('taskname')
- cmd_mock.assert_called_once_with('kapacitor enable taskname')
+ cmd_mock.assert_called_once_with('kapacitor enable taskname', env=self.__class__.env)
def test_disable_task(self):
cmd_mock = Mock(return_value={'retcode': 0})
with patch.dict(kapacitor.__salt__, {'cmd.run_all': cmd_mock}):
kapacitor.disable_task('taskname')
- cmd_mock.assert_called_once_with('kapacitor disable taskname')
+ cmd_mock.assert_called_once_with('kapacitor disable taskname', env=self.__class__.env)
def test_delete_task(self):
cmd_mock = Mock(return_value={'retcode': 0})
with patch.dict(kapacitor.__salt__, {'cmd.run_all': cmd_mock}):
kapacitor.delete_task('taskname')
- cmd_mock.assert_called_once_with('kapacitor delete tasks taskname')
+ cmd_mock.assert_called_once_with('kapacitor delete tasks taskname', env=self.__class__.env)
diff --git a/tests/unit/modules/test_kubernetes.py b/tests/unit/modules/test_kubernetes.py
index 9f24df40ac..4cdcf6036a 100644
--- a/tests/unit/modules/test_kubernetes.py
+++ b/tests/unit/modules/test_kubernetes.py
@@ -5,6 +5,7 @@
# Import Python Libs
from __future__ import absolute_import
+import os
from contextlib import contextmanager
@@ -18,6 +19,7 @@ from tests.support.mock import (
NO_MOCK_REASON
)
+import salt.utils.files
from salt.modules import kubernetes
@@ -29,9 +31,6 @@ def mock_kubernetes_library():
test, which blows up. This prevents that specific blow-up once
"""
with patch('salt.modules.kubernetes.kubernetes') as mock_kubernetes_lib:
- mock_kubernetes_lib.client.configuration.ssl_ca_cert = ''
- mock_kubernetes_lib.client.configuration.cert_file = ''
- mock_kubernetes_lib.client.configuration.key_file = ''
yield mock_kubernetes_lib
@@ -56,7 +55,7 @@ class KubernetesTestCase(TestCase, LoaderModuleMockMixin):
:return:
'''
with mock_kubernetes_library() as mock_kubernetes_lib:
- with patch.dict(kubernetes.__salt__, {'config.option': Mock(return_value="")}):
+ with patch.dict(kubernetes.__salt__, {'config.option': Mock(side_effect=self.settings)}):
mock_kubernetes_lib.client.CoreV1Api.return_value = Mock(
**{"list_node.return_value.to_dict.return_value":
{'items': [{'metadata': {'name': 'mock_node_name'}}]}}
@@ -70,7 +69,7 @@ class KubernetesTestCase(TestCase, LoaderModuleMockMixin):
:return:
'''
with mock_kubernetes_library() as mock_kubernetes_lib:
- with patch.dict(kubernetes.__salt__, {'config.option': Mock(return_value="")}):
+ with patch.dict(kubernetes.__salt__, {'config.option': Mock(side_effect=self.settings)}):
mock_kubernetes_lib.client.ExtensionsV1beta1Api.return_value = Mock(
**{"list_namespaced_deployment.return_value.to_dict.return_value":
{'items': [{'metadata': {'name': 'mock_deployment_name'}}]}}
@@ -85,7 +84,7 @@ class KubernetesTestCase(TestCase, LoaderModuleMockMixin):
:return:
'''
with mock_kubernetes_library() as mock_kubernetes_lib:
- with patch.dict(kubernetes.__salt__, {'config.option': Mock(return_value="")}):
+ with patch.dict(kubernetes.__salt__, {'config.option': Mock(side_effect=self.settings)}):
mock_kubernetes_lib.client.CoreV1Api.return_value = Mock(
**{"list_namespaced_service.return_value.to_dict.return_value":
{'items': [{'metadata': {'name': 'mock_service_name'}}]}}
@@ -99,7 +98,7 @@ class KubernetesTestCase(TestCase, LoaderModuleMockMixin):
:return:
'''
with mock_kubernetes_library() as mock_kubernetes_lib:
- with patch.dict(kubernetes.__salt__, {'config.option': Mock(return_value="")}):
+ with patch.dict(kubernetes.__salt__, {'config.option': Mock(side_effect=self.settings)}):
mock_kubernetes_lib.client.CoreV1Api.return_value = Mock(
**{"list_namespaced_pod.return_value.to_dict.return_value":
{'items': [{'metadata': {'name': 'mock_pod_name'}}]}}
@@ -115,7 +114,7 @@ class KubernetesTestCase(TestCase, LoaderModuleMockMixin):
'''
with mock_kubernetes_library() as mock_kubernetes_lib:
with patch('salt.modules.kubernetes.show_deployment', Mock(return_value=None)):
- with patch.dict(kubernetes.__salt__, {'config.option': Mock(return_value="")}):
+ with patch.dict(kubernetes.__salt__, {'config.option': Mock(side_effect=self.settings)}):
mock_kubernetes_lib.client.V1DeleteOptions = Mock(return_value="")
mock_kubernetes_lib.client.ExtensionsV1beta1Api.return_value = Mock(
**{"delete_namespaced_deployment.return_value.to_dict.return_value": {'code': ''}}
@@ -131,7 +130,7 @@ class KubernetesTestCase(TestCase, LoaderModuleMockMixin):
:return:
'''
with mock_kubernetes_library() as mock_kubernetes_lib:
- with patch.dict(kubernetes.__salt__, {'config.option': Mock(return_value="")}):
+ with patch.dict(kubernetes.__salt__, {'config.option': Mock(side_effect=self.settings)}):
mock_kubernetes_lib.client.ExtensionsV1beta1Api.return_value = Mock(
**{"create_namespaced_deployment.return_value.to_dict.return_value": {}}
)
@@ -141,23 +140,47 @@ class KubernetesTestCase(TestCase, LoaderModuleMockMixin):
kubernetes.kubernetes.client.ExtensionsV1beta1Api().
create_namespaced_deployment().to_dict.called)
- def test_setup_client_key_file(self):
+ @staticmethod
+ def settings(name, value=None):
'''
- Test that the `kubernetes.client-key-file` configuration isn't overwritten
+ Test helper
+ :return: settings or default
+ '''
+ data = {
+ 'kubernetes.kubeconfig': '/home/testuser/.minikube/kubeconfig.cfg',
+ 'kubernetes.context': 'minikube'
+ }
+ return data.get(name, value)
+
+ def test_setup_kubeconfig_file(self):
+ '''
+ Test that the `kubernetes.kubeconfig` configuration isn't overwritten
:return:
'''
- def settings(name, value=None):
- data = {
- 'kubernetes.client-key-file': '/home/testuser/.minikube/client.key',
- }
- return data.get(name, value)
+ with mock_kubernetes_library() as mock_kubernetes_lib:
+ with patch.dict(kubernetes.__salt__, {'config.option': Mock(side_effect=self.settings)}):
+ mock_kubernetes_lib.config.load_kube_config = Mock()
+ config = kubernetes._setup_conn()
+ self.assertEqual(
+ self.settings('kubernetes.kubeconfig'),
+ config['kubeconfig'],
+ )
- with patch.dict(kubernetes.__salt__, {'config.option': Mock(side_effect=settings)}):
- config = kubernetes._setup_conn()
- self.assertEqual(
- settings('kubernetes.client-key-file'),
- config['key_file'],
- )
+ def test_setup_kubeconfig_data_overwrite(self):
+ '''
+ Test that provided `kubernetes.kubeconfig` configuration is overwritten
+ by provided kubeconfig_data in the command
+ :return:
+ '''
+ with mock_kubernetes_library() as mock_kubernetes_lib:
+ with patch.dict(kubernetes.__salt__, {'config.option': Mock(side_effect=self.settings)}):
+ mock_kubernetes_lib.config.load_kube_config = Mock()
+ config = kubernetes._setup_conn(kubeconfig_data='MTIzNDU2Nzg5MAo=', context='newcontext')
+ self.assertTrue(config['kubeconfig'].startswith('/tmp/salt-kubeconfig-'))
+ self.assertTrue(os.path.exists(config['kubeconfig']))
+ with salt.utils.files.fopen(config['kubeconfig'], 'r') as kcfg:
+ self.assertEqual('1234567890\n', kcfg.read())
+ kubernetes._cleanup(**config)
def test_node_labels(self):
'''
diff --git a/tests/unit/modules/test_openscap.py b/tests/unit/modules/test_openscap.py
index eb8ad1225b..6e17148de1 100644
--- a/tests/unit/modules/test_openscap.py
+++ b/tests/unit/modules/test_openscap.py
@@ -28,8 +28,10 @@ class OpenscapTestCase(TestCase):
policy_file = '/usr/share/openscap/policy-file-xccdf.xml'
def setUp(self):
+ import salt.modules.openscap
+ salt.modules.openscap.__salt__ = MagicMock()
patchers = [
- patch('salt.modules.openscap.Caller', MagicMock()),
+ patch('salt.modules.openscap.__salt__', MagicMock()),
patch('salt.modules.openscap.shutil.rmtree', Mock()),
patch(
'salt.modules.openscap.tempfile.mkdtemp',
@@ -68,8 +70,7 @@ class OpenscapTestCase(TestCase):
cwd=openscap.tempfile.mkdtemp.return_value,
stderr=PIPE,
stdout=PIPE)
- openscap.Caller().cmd.assert_called_once_with(
- 'cp.push_dir', self.random_temp_dir)
+ openscap.__salt__['cp.push_dir'].assert_called_once_with(self.random_temp_dir)
self.assertEqual(openscap.shutil.rmtree.call_count, 1)
self.assertEqual(
response,
@@ -106,8 +107,7 @@ class OpenscapTestCase(TestCase):
cwd=openscap.tempfile.mkdtemp.return_value,
stderr=PIPE,
stdout=PIPE)
- openscap.Caller().cmd.assert_called_once_with(
- 'cp.push_dir', self.random_temp_dir)
+ openscap.__salt__['cp.push_dir'].assert_called_once_with(self.random_temp_dir)
self.assertEqual(openscap.shutil.rmtree.call_count, 1)
self.assertEqual(
response,
diff --git a/tests/unit/modules/test_pip.py b/tests/unit/modules/test_pip.py
index 25f8db567f..bb346ba2cc 100644
--- a/tests/unit/modules/test_pip.py
+++ b/tests/unit/modules/test_pip.py
@@ -937,6 +937,27 @@ class PipTestCase(TestCase, LoaderModuleMockMixin):
)
self.assertEqual(ret, eggs)
+ mock = MagicMock(
+ return_value={
+ 'retcode': 0,
+ 'stdout': '\n'.join(eggs)
+ }
+ )
+ # Passing env_vars passes them to underlying command?
+ with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
+ with patch('salt.modules.pip.version',
+ MagicMock(return_value='6.1.1')):
+ ret = pip.freeze(env_vars={"foo": "bar"})
+ mock.assert_called_once_with(
+ ['pip', 'freeze'],
+ cwd=None,
+ runas=None,
+ use_vt=False,
+ python_shell=False,
+ env={"foo": "bar"}
+ )
+ self.assertEqual(ret, eggs)
+
# Non zero returncode raises exception?
mock = MagicMock(return_value={'retcode': 1, 'stderr': 'CABOOOOMMM!'})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
diff --git a/tests/unit/netapi/rest_tornado/test_utils.py b/tests/unit/netapi/rest_tornado/test_utils.py
index 5df66cb2d1..5ba08a31f3 100644
--- a/tests/unit/netapi/rest_tornado/test_utils.py
+++ b/tests/unit/netapi/rest_tornado/test_utils.py
@@ -91,7 +91,7 @@ class TestEventListener(AsyncTestCase):
{'sock_dir': SOCK_DIR,
'transport': 'zeromq'})
self._finished = False # fit to event_listener's behavior
- event_future = event_listener.get_event(self, 'evt1', self.stop) # get an event future
+ event_future = event_listener.get_event(self, 'evt1', callback=self.stop) # get an event future
me.fire_event({'data': 'foo2'}, 'evt2') # fire an event we don't want
me.fire_event({'data': 'foo1'}, 'evt1') # fire an event we do want
self.wait() # wait for the future
diff --git a/tests/unit/renderers/test_aws_kms.py b/tests/unit/renderers/test_aws_kms.py
new file mode 100644
index 0000000000..5855859f74
--- /dev/null
+++ b/tests/unit/renderers/test_aws_kms.py
@@ -0,0 +1,205 @@
+# -*- coding: utf-8 -*-
+
+'''
+Unit tests for AWS KMS Decryption Renderer.
+'''
+# pylint: disable=protected-access
+
+# Import Python Libs
+from __future__ import absolute_import, print_function, unicode_literals
+
+# Import Salt Testing libs
+from tests.support.mixins import LoaderModuleMockMixin
+from tests.support.unit import skipIf, TestCase
+from tests.support.mock import (
+ NO_MOCK,
+ NO_MOCK_REASON,
+ MagicMock,
+ patch
+)
+
+# Import Salt libs
+import salt.exceptions
+import salt.renderers.aws_kms as aws_kms
+
+try:
+ import botocore.exceptions
+ import botocore.session
+ import botocore.stub
+ NO_BOTOCORE = False
+except ImportError:
+ NO_BOTOCORE = True
+
+try:
+ import cryptography.fernet as fernet
+ NO_FERNET = False
+except ImportError:
+ NO_FERNET = True
+
+
+PLAINTEXT_SECRET = 'Use more salt.'
+ENCRYPTED_DATA_KEY = 'encrypted-data-key'
+PLAINTEXT_DATA_KEY = b'plaintext-data-key'
+BASE64_DATA_KEY = b'cGxhaW50ZXh0LWRhdGEta2V5'
+AWS_PROFILE = 'test-profile'
+REGION_NAME = 'us-test-1'
+
+
+@skipIf(NO_MOCK, NO_MOCK_REASON)
+@skipIf(NO_BOTOCORE, 'Unable to import botocore libraries')
+class AWSKMSTestCase(TestCase, LoaderModuleMockMixin):
+
+ '''
+ unit test AWS KMS renderer
+ '''
+
+ def setup_loader_modules(self):
+ return {aws_kms: {}}
+
+ def test__cfg_data_key(self):
+ '''
+ _cfg_data_key returns the aws_kms:data_key from configuration.
+ '''
+ config = {'aws_kms': {'data_key': ENCRYPTED_DATA_KEY}}
+ with patch.dict(aws_kms.__salt__, {'config.get': config.get}): # pylint: disable=no-member
+ self.assertEqual(aws_kms._cfg_data_key(), ENCRYPTED_DATA_KEY,
+ '_cfg_data_key did not return the data key configured in __salt__.')
+ with patch.dict(aws_kms.__opts__, config): # pylint: disable=no-member
+ self.assertEqual(aws_kms._cfg_data_key(), ENCRYPTED_DATA_KEY,
+ '_cfg_data_key did not return the data key configured in __opts__.')
+
+ def test__cfg_data_key_no_key(self):
+ '''
+ When no aws_kms:data_key is configured,
+ calling _cfg_data_key should raise a SaltConfigurationError
+ '''
+ self.assertRaises(salt.exceptions.SaltConfigurationError, aws_kms._cfg_data_key)
+
+ def test__session_profile(self): # pylint: disable=no-self-use
+ '''
+ _session instantiates boto3.Session with the configured profile_name
+ '''
+ with patch.object(aws_kms, '_cfg', lambda k: AWS_PROFILE):
+ with patch('boto3.Session') as session:
+ aws_kms._session()
+ session.assert_called_with(profile_name=AWS_PROFILE)
+
+ def test__session_noprofile(self):
+ '''
+ _session raises a SaltConfigurationError
+ when boto3 raises botocore.exceptions.ProfileNotFound.
+ '''
+ with patch('boto3.Session') as session:
+ session.side_effect = botocore.exceptions.ProfileNotFound(profile=AWS_PROFILE)
+ self.assertRaises(salt.exceptions.SaltConfigurationError, aws_kms._session)
+
+ def test__session_noregion(self):
+ '''
+ _session raises a SaltConfigurationError
+ when boto3 raises botocore.exceptions.NoRegionError
+ '''
+ with patch('boto3.Session') as session:
+ session.side_effect = botocore.exceptions.NoRegionError
+ self.assertRaises(salt.exceptions.SaltConfigurationError, aws_kms._session)
+
+ def test__kms(self): # pylint: disable=no-self-use
+ '''
+ _kms calls boto3.Session.client with 'kms' as its only argument.
+ '''
+ with patch('boto3.Session.client') as client:
+ aws_kms._kms()
+ client.assert_called_with('kms')
+
+ def test__kms_noregion(self):
+ '''
+ _kms raises a SaltConfigurationError
+ when boto3 raises a NoRegionError.
+ '''
+ with patch('boto3.Session') as session:
+ session.side_effect = botocore.exceptions.NoRegionError
+ self.assertRaises(salt.exceptions.SaltConfigurationError, aws_kms._kms)
+
+ def test__api_decrypt(self): # pylint: disable=no-self-use
+ '''
+ _api_decrypt_response calls kms.decrypt with the
+ configured data key as the CiphertextBlob kwarg.
+ '''
+ kms_client = MagicMock()
+ with patch.object(aws_kms, '_kms') as kms_getter:
+ kms_getter.return_value = kms_client
+ with patch.object(aws_kms, '_cfg_data_key', lambda: ENCRYPTED_DATA_KEY):
+ aws_kms._api_decrypt()
+ kms_client.decrypt.assert_called_with(CiphertextBlob=ENCRYPTED_DATA_KEY) # pylint: disable=no-member
+
+ def test__api_decrypt_badkey(self):
+ '''
+ _api_decrypt_response raises SaltConfigurationError
+ when kms.decrypt raises a botocore.exceptions.ClientError
+ with an error_code of 'InvalidCiphertextException'.
+ '''
+ kms_client = MagicMock()
+ kms_client.decrypt.side_effect = botocore.exceptions.ClientError( # pylint: disable=no-member
+ error_response={'Error': {'Code': 'InvalidCiphertextException'}},
+ operation_name='Decrypt',
+ )
+ with patch.object(aws_kms, '_kms') as kms_getter:
+ kms_getter.return_value = kms_client
+ with patch.object(aws_kms, '_cfg_data_key', lambda: ENCRYPTED_DATA_KEY):
+ self.assertRaises(salt.exceptions.SaltConfigurationError, aws_kms._api_decrypt)
+
+ def test__plaintext_data_key(self):
+ '''
+ _plaintext_data_key returns the 'Plaintext' value from the response.
+ It caches the response and only calls _api_decrypt exactly once.
+ '''
+ with patch.object(aws_kms, '_api_decrypt', return_value={'KeyId': 'key-id', 'Plaintext': PLAINTEXT_DATA_KEY}) as api_decrypt:
+ self.assertEqual(aws_kms._plaintext_data_key(), PLAINTEXT_DATA_KEY)
+ aws_kms._plaintext_data_key()
+ api_decrypt.assert_called_once()
+
+ def test__base64_plaintext_data_key(self):
+ '''
+ _base64_plaintext_data_key returns the urlsafe base64 encoded plain text data key.
+ '''
+ with patch.object(aws_kms, '_plaintext_data_key', return_value=PLAINTEXT_DATA_KEY):
+ self.assertEqual(aws_kms._base64_plaintext_data_key(), BASE64_DATA_KEY)
+
+ @skipIf(NO_FERNET, 'Failed to import cryptography.fernet')
+ def test__decrypt_ciphertext(self):
+ '''
+ test _decrypt_ciphertext
+ '''
+ test_key = fernet.Fernet.generate_key()
+ crypted = fernet.Fernet(test_key).encrypt(PLAINTEXT_SECRET.encode())
+ with patch.object(aws_kms, '_base64_plaintext_data_key', return_value=test_key):
+ self.assertEqual(aws_kms._decrypt_ciphertext(crypted), PLAINTEXT_SECRET)
+
+ @skipIf(NO_FERNET, 'Failed to import cryptography.fernet')
+ def test__decrypt_object(self):
+ '''
+ Test _decrypt_object
+ '''
+ test_key = fernet.Fernet.generate_key()
+ crypted = fernet.Fernet(test_key).encrypt(PLAINTEXT_SECRET.encode())
+ secret_map = {'secret': PLAINTEXT_SECRET}
+ crypted_map = {'secret': crypted}
+
+ secret_list = [PLAINTEXT_SECRET]
+ crypted_list = [crypted]
+
+ with patch.object(aws_kms, '_base64_plaintext_data_key', return_value=test_key):
+ self.assertEqual(aws_kms._decrypt_object(PLAINTEXT_SECRET), PLAINTEXT_SECRET)
+ self.assertEqual(aws_kms._decrypt_object(crypted), PLAINTEXT_SECRET)
+ self.assertEqual(aws_kms._decrypt_object(crypted_map), secret_map)
+ self.assertEqual(aws_kms._decrypt_object(crypted_list), secret_list)
+ self.assertEqual(aws_kms._decrypt_object(None), None)
+
+ @skipIf(NO_FERNET, 'Failed to import cryptography.fernet')
+ def test_render(self):
+ '''
+ Test that we can decrypt some data.
+ '''
+ test_key = fernet.Fernet.generate_key()
+ crypted = fernet.Fernet(test_key).encrypt(PLAINTEXT_SECRET.encode())
+ with patch.object(aws_kms, '_base64_plaintext_data_key', return_value=test_key):
+ self.assertEqual(aws_kms.render(crypted), PLAINTEXT_SECRET)
diff --git a/tests/unit/ssh/test_ssh.py b/tests/unit/ssh/test_ssh.py
new file mode 100644
index 0000000000..b80483b0b7
--- /dev/null
+++ b/tests/unit/ssh/test_ssh.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+'''
+ :codeauthor: :email:`Daniel Wallace `
-'''
-
-# Import Python Libs
-from __future__ import absolute_import, unicode_literals, print_function
-
-# Import Salt Testing Libs
-from tests.support.mixins import LoaderModuleMockMixin
-from tests.support.unit import TestCase, skipIf
-from tests.support.mock import (
- MagicMock,
- patch,
- NO_MOCK,
- NO_MOCK_REASON
-)
-
-# Import Salt Libs
-import salt.states.win_update as win_update
-
-
-class MockPyWinUpdater(object):
- '''
- Mock PyWinUpdater class
- '''
- def __init__(self):
- pass
-
- @staticmethod
- def SetCategories(arg):
- '''
- Mock SetCategories
- '''
- return arg
-
- @staticmethod
- def SetIncludes(arg):
- '''
- Mock SetIncludes
- '''
- return arg
-
- @staticmethod
- def GetInstallationResults():
- '''
- Mock GetInstallationResults
- '''
- return True
-
- @staticmethod
- def GetDownloadResults():
- '''
- Mock GetDownloadResults
- '''
- return True
-
- @staticmethod
- def SetSkips(arg):
- return True
-
-
-@skipIf(NO_MOCK, NO_MOCK_REASON)
-class WinUpdateTestCase(TestCase, LoaderModuleMockMixin):
- '''
- Validate the win_update state
- '''
- def setup_loader_modules(self):
- return {win_update: {'PyWinUpdater': MockPyWinUpdater}}
-
- def test_installed(self):
- '''
- Test to install specified windows updates
- '''
- ret = {'name': 'salt',
- 'changes': {},
- 'result': False,
- 'comment': '',
- 'warnings': ["The 'win_update' module is deprecated, and will "
- "be removed in Salt Fluorine. Please use the "
- "'win_wua' module instead."]}
-
- mock = MagicMock(side_effect=[['Saltstack', False, 5],
- ['Saltstack', True, 5],
- ['Saltstack', True, 5],
- ['Saltstack', True, 5]])
- with patch.object(win_update, '_search', mock):
- ret.update({'comment': 'Saltstack'})
- self.assertDictEqual(win_update.installed('salt'), ret)
-
- mock = MagicMock(side_effect=[['dude', False, 5],
- ['dude', True, 5],
- ['dude', True, 5]])
- with patch.object(win_update, '_download', mock):
- ret.update({'comment': 'Saltstackdude'})
- self.assertDictEqual(win_update.installed('salt'), ret)
-
- mock = MagicMock(side_effect=[['@Me', False, 5],
- ['@Me', True, 5]])
- with patch.object(win_update, '_install', mock):
- ret.update({'comment': 'Saltstackdude@Me'})
- self.assertDictEqual(win_update.installed('salt'), ret)
-
- ret.update({'changes': True, 'result': True})
- self.assertDictEqual(win_update.installed('salt'), ret)
-
- def test_downloaded(self):
- '''
- Test to cache updates for later install.
- '''
- ret = {'name': 'salt',
- 'changes': {},
- 'result': False,
- 'comment': '',
- 'warnings': ["The 'win_update' module is deprecated, and will "
- "be removed in Salt Fluorine. Please use the "
- "'win_wua' module instead."]}
-
- mock = MagicMock(side_effect=[['Saltstack', False, 5],
- ['Saltstack', True, 5],
- ['Saltstack', True, 5]])
- with patch.object(win_update, '_search', mock):
- ret.update({'comment': 'Saltstack'})
- self.assertDictEqual(win_update.downloaded('salt'), ret)
-
- mock = MagicMock(side_effect=[['dude', False, 5],
- ['dude', True, 5]])
- with patch.object(win_update, '_download', mock):
- ret.update({'comment': 'Saltstackdude'})
- self.assertDictEqual(win_update.downloaded('salt'), ret)
-
- ret.update({'changes': True, 'result': True})
- self.assertDictEqual(win_update.downloaded('salt'), ret)
diff --git a/tests/unit/test_payload.py b/tests/unit/test_payload.py
index 47312bf6e7..439354f677 100644
--- a/tests/unit/test_payload.py
+++ b/tests/unit/test_payload.py
@@ -7,24 +7,24 @@
~~~~~~~~~~~~~~~~~~~~~~~
'''
-# Import Salt libs
+# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import time
import errno
import threading
+import datetime
# Import Salt Testing libs
from tests.support.unit import skipIf, TestCase
-from tests.support.helpers import MockWraps
-from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch
+from tests.support.mock import NO_MOCK, NO_MOCK_REASON
-# Import salt libs
-import salt.payload
+# Import Salt libs
+from salt.utils import immutabletypes
from salt.utils.odict import OrderedDict
import salt.exceptions
+import salt.payload
# Import 3rd-party libs
-import msgpack
import zmq
from salt.ext import six
@@ -49,15 +49,109 @@ class PayloadTestCase(TestCase):
self.assertNoOrderedDict(chunk)
def test_list_nested_odicts(self):
- with patch('msgpack.version', (0, 1, 13)):
- msgpack.dumps = MockWraps(
- msgpack.dumps, 1, TypeError('ODict TypeError Forced')
- )
- payload = salt.payload.Serial('msgpack')
- idata = {'pillar': [OrderedDict(environment='dev')]}
- odata = payload.loads(payload.dumps(idata.copy()))
- self.assertNoOrderedDict(odata)
- self.assertEqual(idata, odata)
+ payload = salt.payload.Serial('msgpack')
+ idata = {'pillar': [OrderedDict(environment='dev')]}
+ odata = payload.loads(payload.dumps(idata.copy()))
+ self.assertNoOrderedDict(odata)
+ self.assertEqual(idata, odata)
+
+ def test_datetime_dump_load(self):
+ '''
+ Check the custom datetime handler can understand itself
+ '''
+ payload = salt.payload.Serial('msgpack')
+ dtvalue = datetime.datetime(2001, 2, 3, 4, 5, 6, 7)
+ idata = {dtvalue: dtvalue}
+ sdata = payload.dumps(idata.copy())
+ odata = payload.loads(sdata)
+ self.assertEqual(
+ sdata,
+ b'\x81\xc7\x18N20010203T04:05:06.000007\xc7\x18N20010203T04:05:06.000007')
+ self.assertEqual(idata, odata)
+
+ def test_verylong_dump_load(self):
+ '''
+ Test verylong encoder/decoder
+ '''
+ payload = salt.payload.Serial('msgpack')
+ idata = {'jid': 20180227140750302662}
+ sdata = payload.dumps(idata.copy())
+ odata = payload.loads(sdata)
+ idata['jid'] = '{0}'.format(idata['jid'])
+ self.assertEqual(idata, odata)
+
+ def test_immutable_dict_dump_load(self):
+ '''
+ Test immutable dict encoder/decoder
+ '''
+ payload = salt.payload.Serial('msgpack')
+ idata = {'dict': {'key': 'value'}}
+ sdata = payload.dumps({'dict': immutabletypes.ImmutableDict(idata['dict'])})
+ odata = payload.loads(sdata)
+ self.assertEqual(idata, odata)
+
+ def test_immutable_list_dump_load(self):
+ '''
+ Test immutable list encoder/decoder
+ '''
+ payload = salt.payload.Serial('msgpack')
+ idata = {'list': [1, 2, 3]}
+ sdata = payload.dumps({'list': immutabletypes.ImmutableList(idata['list'])})
+ odata = payload.loads(sdata)
+ self.assertEqual(idata, odata)
+
+ def test_immutable_set_dump_load(self):
+ '''
+ Test immutable set encoder/decoder
+ '''
+ payload = salt.payload.Serial('msgpack')
+ idata = {'set': ['red', 'green', 'blue']}
+ sdata = payload.dumps({'set': immutabletypes.ImmutableSet(idata['set'])})
+ odata = payload.loads(sdata)
+ self.assertEqual(idata, odata)
+
+ def test_odict_dump_load(self):
+ '''
+ Test odict just works. It wasn't until msgpack 0.2.0
+ '''
+ payload = salt.payload.Serial('msgpack')
+ data = OrderedDict()
+ data['a'] = 'b'
+ data['y'] = 'z'
+ data['j'] = 'k'
+ data['w'] = 'x'
+ sdata = payload.dumps({'set': data})
+ odata = payload.loads(sdata)
+ self.assertEqual({'set': dict(data)}, odata)
+
+ def test_mixed_dump_load(self):
+ '''
+ Test we can handle all exceptions at once
+ '''
+ payload = salt.payload.Serial('msgpack')
+ dtvalue = datetime.datetime(2001, 2, 3, 4, 5, 6, 7)
+ od = OrderedDict()
+ od['a'] = 'b'
+ od['y'] = 'z'
+ od['j'] = 'k'
+ od['w'] = 'x'
+ idata = {dtvalue: dtvalue, # datetime
+ 'jid': 20180227140750302662, # long int
+ 'dict': immutabletypes.ImmutableDict({'key': 'value'}), # immutable dict
+ 'list': immutabletypes.ImmutableList([1, 2, 3]), # immutable list
+ 'set': immutabletypes.ImmutableSet(('red', 'green', 'blue')), # immutable set
+ 'odict': od, # odict
+ }
+ edata = {dtvalue: dtvalue, # datetime, == input
+ 'jid': '20180227140750302662', # string repr of long int
+ 'dict': {'key': 'value'}, # builtin dict
+ 'list': [1, 2, 3], # builtin list
+ 'set': ['red', 'green', 'blue'], # builtin set
+ 'odict': dict(od), # builtin dict
+ }
+ sdata = payload.dumps(idata)
+ odata = payload.loads(sdata)
+ self.assertEqual(edata, odata)
class SREQTestCase(TestCase):
diff --git a/tests/unit/utils/test_args.py b/tests/unit/utils/test_args.py
index c58e2c8d06..247c319b27 100644
--- a/tests/unit/utils/test_args.py
+++ b/tests/unit/utils/test_args.py
@@ -3,6 +3,7 @@
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from collections import namedtuple
+import logging
# Import Salt Libs
from salt.exceptions import SaltInvocationError
@@ -19,6 +20,8 @@ from tests.support.mock import (
patch
)
+log = logging.getLogger(__name__)
+
class ArgsTestCase(TestCase):
'''
@@ -202,3 +205,63 @@ class ArgsTestCase(TestCase):
self.assertEqual(fun, 'amod.afunc')
self.assertEqual(args, ['double " single \'', 'double " single \''])
self.assertEqual(kwargs, {'kw1': 'equal=equal', 'kw2': 'val2'})
+
+ def test_yamlify_arg(self):
+ '''
+ Test that we properly yamlify CLI input. In several of the tests below
+ assertIs is used instead of assertEqual. This is because we want to
+ confirm that the return value is not a copy of the original, but the
+ same instance as the original.
+ '''
+ def _yamlify_arg(item):
+ log.debug('Testing yamlify_arg with %r', item)
+ return salt.utils.args.yamlify_arg(item)
+
+ # Make sure non-strings are just returned back
+ for item in (True, False, None, 123, 45.67, ['foo'], {'foo': 'bar'}):
+ self.assertIs(_yamlify_arg(item), item)
+
+ # Make sure whitespace-only isn't loaded as None
+ for item in ('', '\t', ' '):
+ self.assertIs(_yamlify_arg(item), item)
+
+ # This value would be loaded as an int (123), the underscores would be
+ # ignored. Test that we identify this case and return the original
+ # value.
+ item = '1_2_3'
+ self.assertIs(_yamlify_arg(item), item)
+
+ # The '#' is treated as a comment when not part of a data structure, we
+ # don't want that behavior
+ for item in ('# hash at beginning', 'Hello world! # hash elsewhere'):
+ self.assertIs(_yamlify_arg(item), item)
+
+ # However we _do_ want the # to be intact if it _is_ within a data
+ # structure.
+ item = '["foo", "bar", "###"]'
+ self.assertEqual(_yamlify_arg(item), ["foo", "bar", "###"])
+ item = '{"foo": "###"}'
+ self.assertEqual(_yamlify_arg(item), {"foo": "###"})
+
+ # The string "None" should load _as_ None
+ self.assertIs(_yamlify_arg('None'), None)
+
+ # Leading dashes, or strings containing colons, will result in lists
+ # and dicts, and we only want to load lists and dicts when the strings
+ # look like data structures.
+ for item in ('- foo', 'foo: bar'):
+ self.assertIs(_yamlify_arg(item), item)
+
+ # Make sure we don't load '|' as ''
+ item = '|'
+ self.assertIs(_yamlify_arg(item), item)
+
+ # Make sure we load ints, floats, and strings correctly
+ self.assertEqual(_yamlify_arg('123'), 123)
+ self.assertEqual(_yamlify_arg('45.67'), 45.67)
+ self.assertEqual(_yamlify_arg('foo'), 'foo')
+
+ # We tested list/dict loading above, but there is separate logic when
+ # the string contains a '#', so we need to test again here.
+ self.assertEqual(_yamlify_arg('["foo", "bar"]'), ["foo", "bar"])
+ self.assertEqual(_yamlify_arg('{"foo": "bar"}'), {"foo": "bar"})
diff --git a/tests/unit/utils/test_ssdp.py b/tests/unit/utils/test_ssdp.py
index e74a2ce71f..88ea176416 100644
--- a/tests/unit/utils/test_ssdp.py
+++ b/tests/unit/utils/test_ssdp.py
@@ -305,7 +305,7 @@ class SSDPFactoryTestCase(TestCase):
assert factory.log.debug.called
assert factory.disable_hidden
assert factory._sendto.called
- assert factory._sendto.call_args[0][0] == "{}:@:{{}}".format(signature)
+ assert factory._sendto.call_args[0][0] == salt.utils.stringutils.to_bytes("{}:@:{{}}".format(signature))
assert 'Received "%s" from %s:%s' in factory.log.debug.call_args[0][0]
diff --git a/tests/unit/utils/test_state.py b/tests/unit/utils/test_state.py
index 695def80b8..d076e7d004 100644
--- a/tests/unit/utils/test_state.py
+++ b/tests/unit/utils/test_state.py
@@ -482,7 +482,7 @@ class UtilStateMergeSubreturnTestcase(TestCase):
res = salt.utils.state.merge_subreturn(m, s)
self.assertFalse(res['result'])
- # False result cannot be overriden
+ # False result cannot be overridden
for any_result in [True, None, False]:
m = copy.deepcopy(self.main_ret)
m['result'] = False