Merge branch 'develop' into develop

This commit is contained in:
Nicole Thomas 2018-02-28 10:38:55 -05:00 committed by GitHub
commit a0869fe533
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
128 changed files with 12001 additions and 5017 deletions

View File

@ -46,6 +46,8 @@ class Mock(object):
data = self.__mapping.get(name)
elif name in ('__file__', '__path__'):
data = '/dev/null'
elif name == '__qualname__':
raise AttributeError("'Mock' object has no attribute '__qualname__'")
else:
data = Mock(mapping=self.__mapping)
return data
@ -248,7 +250,7 @@ on_saltstack = 'SALT_ON_SALTSTACK' in os.environ
project = 'Salt'
version = salt.version.__version__
latest_release = '2017.7.3' # latest release
latest_release = '2017.7.4' # latest release
previous_release = '2016.11.9' # latest release from previous branch
previous_release_dir = '2016.11' # path on web server for previous branch
next_release = '' # next release

View File

@ -1044,6 +1044,40 @@ cache events are fired when a minion requests a minion data cache refresh.
minion_data_cache_events: True
.. conf_master:: http_connect_timeout
``http_connect_timeout``
------------------------
.. versionadded:: Fluorine
Default: ``20``
HTTP connection timeout in seconds.
Applied when fetching files using tornado back-end.
Should be greater than overall download time.
.. code-block:: yaml
http_connect_timeout: 20
.. conf_master:: http_request_timeout
``http_request_timeout``
------------------------
.. versionadded:: 2015.8.0
Default: ``3600``
HTTP request timeout in seconds.
Applied when fetching files using tornado back-end.
Should be greater than overall download time.
.. code-block:: yaml
http_request_timeout: 3600
.. _salt-ssh-configuration:
Salt-SSH Configuration

View File

@ -1267,6 +1267,40 @@ talking to the intended master.
syndic_finger: 'ab:30:65:2a:d6:9e:20:4f:d8:b2:f3:a7:d4:65:50:10'
.. conf_minion:: http_connect_timeout
``http_connect_timeout``
------------------------
.. versionadded:: Fluorine
Default: ``20``
HTTP connection timeout in seconds.
Applied when fetching files using tornado back-end.
Should be greater than overall download time.
.. code-block:: yaml
http_connect_timeout: 20
.. conf_minion:: http_request_timeout
``http_request_timeout``
------------------------
.. versionadded:: 2015.8.0
Default: ``3600``
HTTP request timeout in seconds.
Applied when fetching files using tornado back-end.
Should be greater than overall download time.
.. code-block:: yaml
http_request_timeout: 3600
.. conf_minion:: proxy_host
``proxy_host``

View File

@ -277,6 +277,7 @@ execution modules
napalm_users
napalm_yang_mod
netaddress
netbox
netbsd_sysctl
netbsdservice
netscaler

View File

@ -0,0 +1,7 @@
==========================
salt.modules.netbox module
==========================
.. automodule:: salt.modules.netbox
:members:

View File

@ -223,7 +223,7 @@ branches, and dot release branches.
.. note::
GitHub will open pull requests against Salt's main branch, ``develop``,
byndefault. Be sure to check which branch is selected when creating the
by default. Be sure to check which branch is selected when creating the
pull request.
The Develop Branch

View File

@ -1258,7 +1258,7 @@ target platform, and any other installation or usage instructions or tips.
A sample skeleton for the ``README.rst`` file:
.. code-block:: rest
.. code-block:: restructuredtext
===
foo
@ -1269,7 +1269,7 @@ A sample skeleton for the ``README.rst`` file:
.. note::
See the full `Salt Formulas installation and usage instructions
<http://docs.saltstack.com/en/latest/topics/development/conventions/formulas.html>`_.
<https://docs.saltstack.com/en/latest/topics/development/conventions/formulas.html>`_.
Available states
================
@ -1298,7 +1298,7 @@ A sample skeleton for the `CHANGELOG.rst` file:
:file:`CHANGELOG.rst`:
.. code-block:: rest
.. code-block:: restructuredtext
foo formula
===========

View File

@ -1384,7 +1384,7 @@ Example:
.. code-block:: jinja
{{ 'www.google.com' | dns_check }}
{{ 'www.google.com' | dns_check(port=443) }}
Returns:

View File

@ -2,7 +2,7 @@
Salt 2016.11.9 Release Notes
============================
Version 2016.11.9 is a bugfix release for :ref:`2016.11.0 <release-2016-11-0>`.]
Version 2016.11.9 is a bugfix release for :ref:`2016.11.0 <release-2016-11-0>`.
Changes for v2016.11.8..v2016.11.9
----------------------------------------------------------------

View File

@ -0,0 +1,80 @@
===========================
Salt 2017.7.4 Release Notes
===========================
Version 2017.7.4 is a bugfix release for :ref:`2017.7.0 <release-2017-7-0>`.
Changes for v2017.7.3..v2017.7.4
---------------------------------------------------------------
Extended changelog courtesy of Todd Stansell (https://github.com/tjstansell/salt-changelogs):
*Generated at: 2018-02-13T16:29:07Z*
Statistics:
- Total Merges: **4**
- Total Issue references: **3**
- Total PR references: **7**
Changes:
- **PR** `#45981`_: (*gtmanfred*) use local config for vault when masterless
@ *2018-02-13T15:22:01Z*
- **ISSUE** `#45976`_: (*grobinson-blockchain*) 6a5e0f9 introduces regression that breaks Vault module for salt masterless
| refs: `#45981`_
* ca76a0b328 Merge pull request `#45981`_ from gtmanfred/2017.7.3
* 0d448457dc apparently local is not set by default
* 2a92f4bc16 use local config for vault when masterless
- **PR** `#45953`_: (*rallytime*) Back-port `#45928`_ to 2017.7.3
@ *2018-02-09T22:29:10Z*
- **ISSUE** `#45915`_: (*MatthiasKuehneEllerhold*) 2017.7.3: Salt-SSH & Vault Pillar: Permission denied "minion.pem"
| refs: `#45928`_
- **PR** `#45928`_: (*garethgreenaway*) [2017.7] Fixing vault when used with pillar over salt-ssh
| refs: `#45953`_
* 6530649dbc Merge pull request `#45953`_ from rallytime/`bp-45928`_-2017.7.3
* 85363189d1 Fixing vault when used with pillar over salt-ssh
- **PR** `#45934`_: (*rallytime*) Back-port `#45902`_ to 2017.7.3
@ *2018-02-09T16:31:08Z*
- **ISSUE** `#45893`_: (*CrackerJackMack*) archive.extracted ValueError "No path specified" in 2017.7.3
| refs: `#45902`_
- **PR** `#45902`_: (*terminalmage*) Check the effective saltenv for cached archive
| refs: `#45934`_
* fb378cebb0 Merge pull request `#45934`_ from rallytime/`bp-45902`_
* bb83e8b345 Add regression test for issue 45893
* cdda66d759 Remove duplicated section in docstring and fix example
* 4b6351cda6 Check the effective saltenv for cached archive
- **PR** `#45935`_: (*rallytime*) Back-port `#45742`_ to 2017.7.3
@ *2018-02-09T14:02:26Z*
- **PR** `#45742`_: (*marccardinal*) list.copy() is not compatible with python 2.7
| refs: `#45935`_
* 0d74151c71 Merge pull request `#45935`_ from rallytime/`bp-45742`_
* 6a0b5f7af3 Removed the chained copy
* ad1150fad4 list.copy() is not compatible with python 2.7
.. _`#45742`: https://github.com/saltstack/salt/pull/45742
.. _`#45893`: https://github.com/saltstack/salt/issues/45893
.. _`#45902`: https://github.com/saltstack/salt/pull/45902
.. _`#45915`: https://github.com/saltstack/salt/issues/45915
.. _`#45928`: https://github.com/saltstack/salt/pull/45928
.. _`#45934`: https://github.com/saltstack/salt/pull/45934
.. _`#45935`: https://github.com/saltstack/salt/pull/45935
.. _`#45953`: https://github.com/saltstack/salt/pull/45953
.. _`#45976`: https://github.com/saltstack/salt/issues/45976
.. _`#45981`: https://github.com/saltstack/salt/pull/45981
.. _`bp-45742`: https://github.com/saltstack/salt/pull/45742
.. _`bp-45902`: https://github.com/saltstack/salt/pull/45902
.. _`bp-45928`: https://github.com/saltstack/salt/pull/45928

View File

@ -216,7 +216,7 @@ The functions have been moved as follows:
instead.
- ``salt.utils.refresh_dns``: use ``salt.utils.network.refresh_dns`` instead.
- ``salt.utils.dns_check``: use ``salt.utils.network.dns_check`` instead.
- ``salt.utils.get_context``: use ``salt.utils.templates.get_context`` instead.
- ``salt.utils.get_context``: use ``salt.utils.stringutils.get_context`` instead.
- ``salt.utils.get_master_key``: use ``salt.utils.master.get_master_key``
instead.
- ``salt.utils.get_values_of_matching_keys``: use

View File

@ -302,25 +302,30 @@ can define multiple versions for the same piece of software. The lines following
the version are indented two more spaces and contain all the information needed
to install that package.
.. warning:: The package name and the ``full_name`` must be unique to all
other packages in the software repository.
.. warning::
The package name and the ``full_name`` must be unique to all other packages
in the software repository.
The version line is the version for the package to be installed. It is used when
you need to install a specific version of a piece of software.
.. warning:: The version must be enclosed in quotes, otherwise the yaml parser
will remove trailing zeros.
.. warning::
The version must be enclosed in quotes, otherwise the yaml parser will
remove trailing zeros.
.. note::
There are unique situations where previous versions are unavailable. Take
Google Chrome for example. There is only one url provided for a standalone
installation of Google Chrome.
.. note:: There are unique situations where previous versions are unavailable.
Take Google Chrome for example. There is only one url provided for a
standalone installation of Google Chrome.
(https://dl.google.com/edgedl/chrome/install/GoogleChromeStandaloneEnterprise.msi)
When a new version is released, the url just points to the new version. To
handle situations such as these, set the version to `latest`. Salt will
install the version of Chrome at the URL and report that version. Here's an
example:
.. code-block:: bash
.. code-block:: yaml
chrome:
latest:
@ -335,12 +340,13 @@ you need to install a specific version of a piece of software.
Available parameters are as follows:
:param str full_name: The Full Name for the software as shown in "Programs and
Features" in the control panel. You can also get this information by
installing the package manually and then running ``pkg.list_pkgs``. Here's
an example of the output from ``pkg.list_pkgs``:
:param str full_name:
The Full Name for the software as shown in "Programs and Features" in the
control panel. You can also get this information by installing the package
manually and then running ``pkg.list_pkgs``. Here's an example of the output
from ``pkg.list_pkgs``:
.. code-block:: bash
.. code-block:: bash
salt 'test-2008' pkg.list_pkgs
test-2008
@ -364,15 +370,16 @@ Available parameters are as follows:
Salt Minion 0.16.0:
0.16.0
Notice the Full Name for Firefox: Mozilla Firefox 17.0.0 (x86 en-US). That's
exactly what's in the ``full_name`` parameter in the software definition file.
Notice the Full Name for Firefox: ``Mozilla Firefox 17.0.0 (x86 en-US)``.
That's exactly what's in the ``full_name`` parameter in the software
definition file.
If any of the software insalled on the machine matches one of the software
definition files in the repository the full_name will be automatically renamed
to the package name. The example below shows the ``pkg.list_pkgs`` for a
machine that already has Mozilla Firefox 17.0.1 installed.
If any of the software installed on the machine matches one of the software
definition files in the repository, the full_name will be automatically
renamed to the package name. The example below shows the ``pkg.list_pkgs``
for a machine that already has Mozilla Firefox 17.0.1 installed.
.. code-block:: bash
.. code-block:: bash
test-2008:
----------
@ -395,47 +402,68 @@ machine that already has Mozilla Firefox 17.0.1 installed.
nsclient:
0.3.9.328
.. important:: The version number and ``full_name`` need to match the output
from ``pkg.list_pkgs`` so that the status can be verified when running
.. important::
The version number and ``full_name`` need to match the output from
``pkg.list_pkgs`` so that the status can be verified when running a
highstate.
.. note:: It is still possible to successfully install packages using
``pkg.install`` even if they don't match. This can make troubleshooting
difficult so be careful.
.. note::
It is still possible to successfully install packages using
``pkg.install``, even if the ``full_name`` or the version number don't
match. However, this can make troubleshooting issues difficult, so be
careful.
:param str installer: The path to the ``.exe`` or ``.msi`` to use to install the
package. This can be a path or a URL. If it is a URL or a salt path
(salt://), the package will be cached locally and then executed. If it is a
path to a file on disk or a file share, it will be executed directly.
.. tip::
To force salt to display the full name when there's already an existing
package definition file on the system, you can pass a bogus ``saltenv``
parameter to the command like so: ``pkg.list_pkgs saltenv=NotARealEnv``
:param str install_flags: Any flags that need to be passed to the installer to
make it perform a silent install. These can often be found by adding ``/?``
or ``/h`` when running the installer from the command-line. A great resource
for finding these silent install flags can be found on the WPKG project's wiki_:
:param str installer:
The path to the ``.exe`` or ``.msi`` to use to install the package. This can
be a path or a URL. If it is a URL or a salt path (``salt://``), the package
will be cached locally and then executed. If it is a path to a file on disk
or a file share, it will be executed directly.
Salt will not return if the installer is waiting for user input so these are
important.
.. note::
If storing software in the same location as the winrepo it is best
practice to place each installer in its own directory rather than the
root of winrepo. Then you can place your package definition file in the
same directory. It is best practice to name the file ``init.sls``. This
will be picked up by ``pkg.refresh_db`` and processed properly.
:param str uninstaller: The path to the program used to uninstall this software.
This can be the path to the same `exe` or `msi` used to install the
software. It can also be a GUID. You can find this value in the registry
under the following keys:
:param str install_flags:
Any flags that need to be passed to the installer to make it perform a
silent install. These can often be found by adding ``/?`` or ``/h`` when
running the installer from the command-line. A great resource for finding
these silent install flags can be found on the WPKG project's wiki_:
.. warning::
Salt will not return if the installer is waiting for user input so it is
imperative that the software package being installed has the ability to
install silently.
:param str uninstaller:
The path to the program used to uninstall this software. This can be the
path to the same `exe` or `msi` used to install the software. It can also be
a GUID. You can find this value in the registry under the following keys:
- Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall
- Software\\Wow6432None\\Microsoft\\Windows\\CurrentVersion\\Uninstall
:param str uninstall_flags: Any flags that need to be passed to the uninstaller
to make it perform a silent uninstall. These can often be found by adding
``/?`` or ``/h`` when running the uninstaller from the command-line. A great
resource for finding these silent install flags can be found on the WPKG
project's wiki_:
:param str uninstall_flags:
Any flags that need to be passed to the uninstaller to make it perform a
silent uninstall. These can often be found by adding ``/?`` or ``/h`` when
running the uninstaller from the command-line. A great resource for finding
these silent install flags can be found on the WPKG project's wiki_:
Salt will not return if the uninstaller is waiting for user input so these are
important.
.. warning::
Salt will not return if the uninstaller is waiting for user input so it
is imperative that the software package being uninstalled has the
ability to uninstall silently.
Here are some examples of installer and uninstaller settings:
Here are some examples of installer and uninstaller settings:
.. code-block:: yaml
.. code-block:: yaml
7zip:
'9.20.00.0':
@ -447,9 +475,10 @@ Here are some examples of installer and uninstaller settings:
uninstaller: '{23170F69-40C1-2702-0920-000001000000}'
uninstall_flags: '/qn /norestart'
Alternatively the ``uninstaller`` can also simply repeat the URL of the msi file.
Alternatively the ``uninstaller`` can also simply repeat the URL of an msi
file:
.. code-block:: yaml
.. code-block:: yaml
7zip:
'9.20.00.0':
@ -461,9 +490,12 @@ Alternatively the ``uninstaller`` can also simply repeat the URL of the msi file
uninstaller: salt://win/repo/7zip/7z920-x64.msi
uninstall_flags: '/qn /norestart'
:param msiexec: This tells salt to use ``msiexec /i`` to install the
package and ``msiexec /x`` to uninstall. This is for `.msi` installations.
Possible options are: True, False or path to msiexec on your system
:param msiexec:
This tells salt to use ``msiexec /i`` to install the package and
``msiexec /x`` to uninstall. This is for ``.msi`` installations. Possible
options are: True, False or the path to ``msiexec.exe`` on your system
.. code-block:: yaml
7zip:
'9.20.00.0':
@ -475,27 +507,34 @@ Alternatively the ``uninstaller`` can also simply repeat the URL of the msi file
uninstaller: salt://win/repo/7zip/7z920-x64.msi
uninstall_flags: '/qn /norestart'
:param str arch: This selects which ``msiexec.exe`` to use. Possible values:
``x86``, ``x64``
:param bool allusers:
This parameter is specific to ``.msi`` installations. It tells ``msiexec``
to install the software for all users. The default is ``True``.
:param bool allusers: This parameter is specific to `.msi` installations. It
tells `msiexec` to install the software for all users. The default is True.
:param bool cache_dir:
If ``True`` and the installer URL begins with ``salt://``, the entire
directory where the installer resides will be recursively cached. This is
useful for installers that depend on other files in the same directory for
installation.
:param bool cache_dir: If true when installer URL begins with salt://, the
entire directory where the installer resides will be recursively cached.
This is useful for installers that depend on other files in the same
directory for installation.
.. warning::
Be aware that all files and directories in the same location as the
installer file will be copied down to the minion. If you place your
installer file in the root of winrepo (``/srv/salt/win/repo-ng``) and
``cache_dir: True`` the entire contents of winrepo will be cached to
the minion. Therefore, it is best practice to place your installer files
in a subdirectory if they are to be stored in winrepo.
:param str cache_file:
When installer URL begins with salt://, this indicates single file to copy
down for use with the installer. Copied to the same location as the
installer. Use this over ``cache_dir`` if there are many files in the
When the installer URL begins with ``salt://``, this indicates a single file
to copy down for use with the installer. It is copied to the same location
as the installer. Use this over ``cache_dir`` if there are many files in the
directory and you only need a specific file and don't want to cache
additional files that may reside in the installer directory.
Here's an example for a software package that has dependent files:
Here's an example for a software package that has dependent files:
.. code-block:: yaml
.. code-block:: yaml
sqlexpress:
'12.0.2000.8':
@ -505,20 +544,23 @@ Here's an example for a software package that has dependent files:
install_flags: '/ACTION=install /IACCEPTSQLSERVERLICENSETERMS /Q'
cache_dir: True
:param bool use_scheduler: If true, windows will use the task scheduler to run
the installation. This is useful for running the salt installation itself as
the installation process kills any currently running instances of salt.
:param bool use_scheduler:
If ``True``, Windows will use the task scheduler to run the installation.
This is useful for running the Salt installation itself as the installation
process kills any currently running instances of Salt.
:param str source_hash: This tells salt to compare a hash sum of the installer
to the provided hash sum before execution. The value can be formatted as
``hash_algorithm=hash_sum``, or it can be a URI to a file containing the hash
sum.
For a list of supported algorithms, see the `hashlib documentation
<https://docs.python.org/2/library/hashlib.html>`_.
:param str source_hash:
This tells Salt to compare a hash sum of the installer to the provided hash
sum before execution. The value can be formatted as
``<hash_algorithm>=<hash_sum>``, or it can be a URI to a file containing the
hash sum.
Here's an example of source_hash usage:
For a list of supported algorithms, see the `hashlib documentation
<https://docs.python.org/2/library/hashlib.html>`_.
.. code-block:: yaml
Here's an example of source_hash usage:
.. code-block:: yaml
messageanalyzer:
'4.0.7551.0':

View File

@ -3,6 +3,6 @@ msgpack-python>0.3
PyYAML
MarkupSafe
requests>=1.0.0
tornado>=4.2.1
tornado>=4.2.1,<5.0
# Required by Tornado to handle threads stuff.
futures>=2.0

View File

@ -14,6 +14,7 @@ from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import os.path
import errno
import shutil
import tempfile
@ -45,13 +46,14 @@ def store(bank, key, data, cachedir):
Store information in a file.
'''
base = os.path.join(cachedir, os.path.normpath(bank))
if not os.path.isdir(base):
try:
os.makedirs(base)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise SaltCacheError(
'The cache directory, {0}, does not exist and could not be '
'created: {1}'.format(base, exc)
'The cache directory, {0}, could not be created: {1}'.format(
base, exc
)
)
outfile = os.path.join(base, '{0}.p'.format(key))

View File

@ -119,6 +119,7 @@ class Master(salt.utils.parsers.MasterOptionParser, DaemonsMixin): # pylint: di
Creates a master server
'''
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
if hasattr(self.master, 'process_manager'): # IofloMaster has no process manager
# escalate signal to the process manager processes
self.master.process_manager.stop_restarting()
self.master.process_manager.send_signal_to_processes(signum)
@ -151,7 +152,6 @@ class Master(salt.utils.parsers.MasterOptionParser, DaemonsMixin): # pylint: di
os.path.join(self.config['cachedir'], 'jobs'),
os.path.join(self.config['cachedir'], 'proc'),
self.config['sock_dir'],
self.config['key_dir'],
self.config['token_dir'],
self.config['syndic_dir'],
self.config['sqlite_queue_dir'],
@ -166,7 +166,7 @@ class Master(salt.utils.parsers.MasterOptionParser, DaemonsMixin): # pylint: di
self.config['user'],
permissive=self.config['permissive_pki_access'],
root_dir=self.config['root_dir'],
sensitive_dirs=[self.config['pki_dir'], self.config['key_dir']],
pki_dir=self.config['pki_dir'],
)
# Clear out syndics from cachedir
for syndic_file in os.listdir(self.config['syndic_dir']):
@ -234,6 +234,7 @@ class Minion(salt.utils.parsers.MinionOptionParser, DaemonsMixin): # pylint: di
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
# escalate signal to the process manager processes
if hasattr(self.minion, 'stop'):
self.minion.stop(signum)
super(Minion, self)._handle_signals(signum, sigframe)
@ -287,7 +288,7 @@ class Minion(salt.utils.parsers.MinionOptionParser, DaemonsMixin): # pylint: di
self.config['user'],
permissive=self.config['permissive_pki_access'],
root_dir=self.config['root_dir'],
sensitive_dirs=[self.config['pki_dir']],
pki_dir=self.config['pki_dir'],
)
except OSError as error:
self.environment_failure(error)
@ -392,7 +393,7 @@ class Minion(salt.utils.parsers.MinionOptionParser, DaemonsMixin): # pylint: di
:param exitmsg
'''
self.action_log_info('Shutting down')
if hasattr(self, 'minion'):
if hasattr(self, 'minion') and hasattr(self.minion, 'destroy'):
self.minion.destroy()
super(Minion, self).shutdown(
exitcode, ('The Salt {0} is shutdown. {1}'.format(
@ -469,7 +470,7 @@ class ProxyMinion(salt.utils.parsers.ProxyMinionOptionParser, DaemonsMixin): #
self.config['user'],
permissive=self.config['permissive_pki_access'],
root_dir=self.config['root_dir'],
sensitive_dirs=[self.config['pki_dir']],
pki_dir=self.config['pki_dir'],
)
except OSError as error:
self.environment_failure(error)
@ -578,7 +579,7 @@ class Syndic(salt.utils.parsers.SyndicOptionParser, DaemonsMixin): # pylint: di
self.config['user'],
permissive=self.config['permissive_pki_access'],
root_dir=self.config['root_dir'],
sensitive_dirs=[self.config['pki_dir']],
pki_dir=self.config['pki_dir'],
)
except OSError as error:
self.environment_failure(error)

View File

@ -10,6 +10,7 @@ import os
import salt.utils.job
import salt.utils.parsers
import salt.utils.stringutils
import salt.log
from salt.utils.args import yamlify_arg
from salt.utils.verify import verify_log
from salt.exceptions import (
@ -38,6 +39,7 @@ class SaltCMD(salt.utils.parsers.SaltCMDOptionParser):
import salt.client
self.parse_args()
if self.config['log_level'] not in ('quiet', ):
# Setup file logging!
self.setup_logfile_logger()
verify_log(self.config)
@ -82,7 +84,7 @@ class SaltCMD(salt.utils.parsers.SaltCMDOptionParser):
if 'token' in self.config:
import salt.utils.files
try:
with salt.utils.files.fopen(os.path.join(self.config['key_dir'], '.root_key'), 'r') as fp_:
with salt.utils.files.fopen(os.path.join(self.config['cachedir'], '.root_key'), 'r') as fp_:
kwargs['key'] = fp_.readline()
except IOError:
kwargs['token'] = self.config['token']

View File

@ -194,11 +194,11 @@ class LocalClient(object):
# The username may contain '\' if it is in Windows
# 'DOMAIN\username' format. Fix this for the keyfile path.
key_user = key_user.replace('\\', '_')
keyfile = os.path.join(self.opts['key_dir'],
keyfile = os.path.join(self.opts['cachedir'],
'.{0}_key'.format(key_user))
try:
# Make sure all key parent directories are accessible
salt.utils.verify.check_path_traversal(self.opts['key_dir'],
salt.utils.verify.check_path_traversal(self.opts['cachedir'],
key_user,
self.skip_perm_errors)
with salt.utils.files.fopen(keyfile, 'r') as key:

View File

@ -161,17 +161,17 @@ do
py_cmd_path=`"$py_cmd" -c \
'from __future__ import print_function;
import sys; print(sys.executable);'`
cmdpath=$(command -v $py_cmd 2>/dev/null || which $py_cmd 2>/dev/null)
cmdpath=`command -v $py_cmd 2>/dev/null || which $py_cmd 2>/dev/null`
if file $cmdpath | grep "shell script" > /dev/null
then
ex_vars="'PATH', 'LD_LIBRARY_PATH', 'MANPATH', \
'XDG_DATA_DIRS', 'PKG_CONFIG_PATH'"
export $($py_cmd -c \
export `$py_cmd -c \
"from __future__ import print_function;
import sys;
import os;
map(sys.stdout.write, ['{{{{0}}}}={{{{1}}}} ' \
.format(x, os.environ[x]) for x in [$ex_vars]])")
.format(x, os.environ[x]) for x in [$ex_vars]])"`
exec $SUDO PATH=$PATH LD_LIBRARY_PATH=$LD_LIBRARY_PATH \
MANPATH=$MANPATH XDG_DATA_DIRS=$XDG_DATA_DIRS \
PKG_CONFIG_PATH=$PKG_CONFIG_PATH \
@ -880,6 +880,7 @@ class Single(object):
# Pre apply changeable defaults
self.minion_opts = {
'grains_cache': True,
'log_file': 'salt-call.log',
}
self.minion_opts.update(opts.get('ssh_minion_opts', {}))
if minion_opts is not None:
@ -889,7 +890,6 @@ class Single(object):
'root_dir': os.path.join(self.thin_dir, 'running_data'),
'id': self.id,
'sock_dir': '/',
'log_file': 'salt-call.log',
'fileserver_list_cache_time': 3,
})
self.minion_config = salt.serializers.yaml.serialize(self.minion_opts)

View File

@ -32,7 +32,7 @@ from salt.utils.versions import LooseVersion as _LooseVersion
# Import libcloud
try:
import libcloud
from libcloud.compute.base import NodeState
from libcloud.compute.base import NodeDriver, NodeState
from libcloud.compute.base import NodeAuthPassword
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
@ -52,9 +52,6 @@ try:
except ImportError:
HAS_LIBCLOUD = False
# Import generic libcloud functions
# from salt.cloud.libcloudfuncs import *
# Import salt.cloud libs
from salt.cloud.libcloudfuncs import * # pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
from salt.utils.functools import namespaced_function
@ -217,7 +214,6 @@ def create(vm_):
log.info('Creating Cloud VM %s', vm_['name'])
conn = get_conn()
rootPw = NodeAuthPassword(vm_['auth'])
location = conn.ex_get_location_by_id(vm_['location'])
images = conn.list_images(location=location)
@ -248,15 +244,13 @@ def create(vm_):
kwargs = {
'name': vm_['name'],
'image': image,
'auth': rootPw,
'ex_description': vm_['description'],
'ex_network_domain': network_domain,
'ex_vlan': vlan,
'ex_is_started': vm_['is_started']
}
event_data = kwargs.copy()
del event_data['auth']
event_data = _to_event_data(kwargs)
__utils__['cloud.fire_event'](
'event',
@ -267,6 +261,10 @@ def create(vm_):
transport=__opts__['transport']
)
# Initial password (excluded from event payload)
initial_password = NodeAuthPassword(vm_['auth'])
kwargs['auth'] = initial_password
try:
data = conn.create_node(**kwargs)
except Exception as exc:
@ -280,7 +278,7 @@ def create(vm_):
return False
try:
data = salt.utils.cloud.wait_for_ip(
data = __utils__['cloud.wait_for_ip'](
_query_node_data,
update_args=(vm_, data),
timeout=config.get_cloud_config_value(
@ -306,7 +304,7 @@ def create(vm_):
ip_address = preferred_ip(vm_, data.public_ips)
log.debug('Using IP address %s', ip_address)
if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips':
if __utils__['cloud.get_salt_interface'](vm_, __opts__) == 'private_ips':
salt_ip_address = preferred_ip(vm_, data.private_ips)
log.info('Salt interface set to: %s', salt_ip_address)
else:
@ -322,7 +320,7 @@ def create(vm_):
vm_['ssh_host'] = ip_address
vm_['password'] = vm_['auth']
ret = salt.utils.cloud.bootstrap(vm_, __opts__)
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data.__dict__)
@ -414,11 +412,13 @@ def create_lb(kwargs=None, call=None):
log.debug('Network Domain: %s', network_domain.id)
lb_conn.ex_set_current_network_domain(network_domain.id)
event_data = _to_event_data(kwargs)
__utils__['cloud.fire_event'](
'event',
'create load_balancer',
'salt/cloud/loadbalancer/creating',
args=kwargs,
args=event_data,
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
@ -427,11 +427,13 @@ def create_lb(kwargs=None, call=None):
name, port, protocol, algorithm, members
)
event_data = _to_event_data(kwargs)
__utils__['cloud.fire_event'](
'event',
'created load_balancer',
'salt/cloud/loadbalancer/created',
args=kwargs,
args=event_data,
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
@ -573,3 +575,46 @@ def get_lb_conn(dd_driver=None):
'Missing dimensiondata_driver for get_lb_conn method.'
)
return get_driver_lb(Provider_lb.DIMENSIONDATA)(user_id, key, region=region)
def _to_event_data(obj):
'''
Convert the specified object into a form that can be serialised by msgpack as event data.
:param obj: The object to convert.
'''
if obj is None:
return None
if isinstance(obj, bool):
return obj
if isinstance(obj, int):
return obj
if isinstance(obj, float):
return obj
if isinstance(obj, str):
return obj
if isinstance(obj, bytes):
return obj
if isinstance(obj, dict):
return obj
if isinstance(obj, NodeDriver): # Special case for NodeDriver (cyclic references)
return obj.name
if isinstance(obj, list):
return [_to_event_data(item) for item in obj]
event_data = {}
for attribute_name in dir(obj):
if attribute_name.startswith('_'):
continue
attribute_value = getattr(obj, attribute_name)
if callable(attribute_value): # Strip out methods
continue
event_data[attribute_name] = _to_event_data(attribute_value)
return event_data

View File

@ -200,8 +200,7 @@ def get_dependencies():
'''
deps = {
'requests': HAS_REQUESTS,
'm2crypto': HAS_M2,
'pycrypto': HAS_PYCRYPTO
'pycrypto or m2crypto': HAS_M2 or HAS_PYCRYPTO
}
return config.check_driver_dependencies(
__virtualname__,

View File

@ -355,7 +355,7 @@ def _get_ips(node, addr_type='public'):
ret = []
for _, interface in node.addresses.items():
for addr in interface:
if addr_type in ('floating', 'fixed') and addr_type == addr['OS-EXT-IPS:type']:
if addr_type in ('floating', 'fixed') and addr_type == addr.get('OS-EXT-IPS:type'):
ret.append(addr['addr'])
elif addr_type == 'public' and __utils__['cloud.is_public_ip'](addr['addr']):
ret.append(addr['addr'])

View File

@ -565,11 +565,6 @@ def create(vm_):
record = {}
ret = {}
# Since using "provider: <provider-engine>" is deprecated, alias provider
# to use driver: "driver: <provider-engine>"
if 'provider' in vm_:
vm_['driver'] = vm_.pop('provider')
# fire creating event
__utils__['cloud.fire_event'](
'event',

View File

@ -197,9 +197,6 @@ VALID_OPTS = {
# The directory used to store public key data
'pki_dir': six.string_types,
# The directory to store authentication keys of a master's local environment.
'key_dir': six.string_types,
# A unique identifier for this daemon
'id': six.string_types,
@ -284,6 +281,7 @@ VALID_OPTS = {
# Location of the files a minion should look for. Set to 'local' to never ask the master.
'file_client': six.string_types,
'local': bool,
# When using a local file_client, this parameter is used to allow the client to connect to
# a master for remote execution.
@ -1050,6 +1048,10 @@ VALID_OPTS = {
# If set, all minion exec module actions will be rerouted through sudo as this user
'sudo_user': six.string_types,
# HTTP connection timeout in seconds. Applied for tornado http fetch functions like cp.get_url
# should be greater than overall download time
'http_connect_timeout': float,
# HTTP request timeout in seconds. Applied for tornado http fetch functions like cp.get_url
# should be greater than overall download time
'http_request_timeout': float,
@ -1246,6 +1248,7 @@ DEFAULT_MINION_OPTS = {
'base': [salt.syspaths.BASE_THORIUM_ROOTS_DIR],
},
'file_client': 'remote',
'local': False,
'use_master_when_local': False,
'file_roots': {
'base': [salt.syspaths.BASE_FILE_ROOTS_DIR,
@ -1451,6 +1454,7 @@ DEFAULT_MINION_OPTS = {
'cache_sreqs': True,
'cmd_safe': True,
'sudo_user': '',
'http_connect_timeout': 20.0, # tornado default - 20 seconds
'http_request_timeout': 1 * 60 * 60.0, # 1 hour
'http_max_body': 100 * 1024 * 1024 * 1024, # 100GB
'event_match_type': 'startswith',
@ -1495,7 +1499,6 @@ DEFAULT_MASTER_OPTS = {
'archive_jobs': False,
'root_dir': salt.syspaths.ROOT_DIR,
'pki_dir': os.path.join(salt.syspaths.CONFIG_DIR, 'pki', 'master'),
'key_dir': os.path.join(salt.syspaths.CONFIG_DIR, 'key'),
'key_cache': '',
'cachedir': os.path.join(salt.syspaths.CACHE_DIR, 'master'),
'file_roots': {
@ -1785,6 +1788,7 @@ DEFAULT_MASTER_OPTS = {
'rotate_aes_key': True,
'cache_sreqs': True,
'dummy_pub': False,
'http_connect_timeout': 20.0, # tornado default - 20 seconds
'http_request_timeout': 1 * 60 * 60.0, # 1 hour
'http_max_body': 100 * 1024 * 1024 * 1024, # 100GB
'python2_bin': 'python2',
@ -2310,6 +2314,12 @@ def prepend_root_dir(opts, path_options):
path = tmp_path_root_dir
else:
path = tmp_path_def_root_dir
elif salt.utils.platform.is_windows() and not os.path.splitdrive(path)[0]:
# In windows, os.path.isabs resolves '/' to 'C:\\' or whatever
# the root drive is. This elif prevents the next from being
# hit, so that the root_dir is prefixed in cases where the
# drive is not prefixed on a config option
pass
elif os.path.isabs(path):
# Absolute path (not default or overriden root_dir)
# No prepending required
@ -2497,7 +2507,7 @@ def syndic_config(master_config_path,
opts.update(syndic_opts)
# Prepend root_dir to other paths
prepend_root_dirs = [
'pki_dir', 'key_dir', 'cachedir', 'pidfile', 'sock_dir', 'extension_modules',
'pki_dir', 'cachedir', 'pidfile', 'sock_dir', 'extension_modules',
'autosign_file', 'autoreject_file', 'token_dir', 'autosign_grains_dir'
]
for config_key in ('log_file', 'key_logfile', 'syndic_log_file'):
@ -3645,7 +3655,7 @@ def _adjust_log_file_override(overrides, default_log_file):
if overrides.get('log_dir'):
# Adjust log_file if a log_dir override is introduced
if overrides.get('log_file'):
if not os.path.abspath(overrides['log_file']):
if not os.path.isabs(overrides['log_file']):
# Prepend log_dir if log_file is relative
overrides['log_file'] = os.path.join(overrides['log_dir'],
overrides['log_file'])
@ -3934,7 +3944,7 @@ def apply_master_config(overrides=None, defaults=None):
# Prepend root_dir to other paths
prepend_root_dirs = [
'pki_dir', 'key_dir', 'cachedir', 'pidfile', 'sock_dir', 'extension_modules',
'pki_dir', 'cachedir', 'pidfile', 'sock_dir', 'extension_modules',
'autosign_file', 'autoreject_file', 'token_dir', 'syndic_dir',
'sqlite_queue_dir', 'autosign_grains_dir'
]

View File

@ -121,6 +121,7 @@ class IofloMinion(object):
'''
warn_deprecated()
self.opts = opts
self.restart = False
def tune_in(self, behaviors=None):
'''

View File

@ -186,11 +186,11 @@ def mk_key(opts, user):
# The username may contain '\' if it is in Windows
# 'DOMAIN\username' format. Fix this for the keyfile path.
keyfile = os.path.join(
opts['key_dir'], '.{0}_key'.format(user.replace('\\', '_'))
opts['cachedir'], '.{0}_key'.format(user.replace('\\', '_'))
)
else:
keyfile = os.path.join(
opts['key_dir'], '.{0}_key'.format(user)
opts['cachedir'], '.{0}_key'.format(user)
)
if os.path.exists(keyfile):

View File

@ -11,7 +11,12 @@ framer minionudpstack be active first start
exit
do salt raet road stack closer per inode ".salt.road.manor."
framer bootstrap be active first join
framer bootstrap be active first setup
frame setup
enter
do salt raet road usher minion setup per inode ".salt.road.manor."
go join
frame join
print Joining...
enter
@ -44,7 +49,7 @@ framer bootstrap be active first join
frame message
print Messaging...
enter
do raet road stack messenger to contents "Minion 1 Hello" code 15 \
do raet road stack messenger with contents "Minion 1 Hello" code 15 \
per inode ".salt.road.manor."
go next

View File

@ -22,6 +22,10 @@ def test():
if not os.path.exists(pkiDirpath):
os.makedirs(pkiDirpath)
keyDirpath = os.path.join('/tmp', 'raet', 'testo', 'key')
if not os.path.exists(keyDirpath):
os.makedirs(keyDirpath)
acceptedDirpath = os.path.join(pkiDirpath, 'accepted')
if not os.path.exists(acceptedDirpath):
os.makedirs(acceptedDirpath)
@ -64,10 +68,12 @@ def test():
client_acl=dict(),
publisher_acl=dict(),
pki_dir=pkiDirpath,
key_dir=keyDirpath,
sock_dir=sockDirpath,
cachedir=cacheDirpath,
open_mode=True,
auto_accept=True,
client_acl_verify=True,
)
master = salt.daemons.flo.IofloMaster(opts=opts)

View File

@ -348,8 +348,8 @@ if __name__ == '__main__' and __package__ is None:
#console.reinit(verbosity=console.Wordage.concise)
#runAll() # run all unittests
runAll() # run all unittests
runSome() # only run some
#runSome() # only run some
#runOne('testParseHostname')

View File

@ -5,6 +5,7 @@ Raet Ioflo Behavior Unittests
from __future__ import absolute_import, print_function, unicode_literals
import sys
from salt.ext.six.moves import map
import importlib
# pylint: disable=blacklisted-import
if sys.version_info < (2, 7):
import unittest2 as unittest
@ -40,6 +41,9 @@ class PresenterTestCase(testing.FrameIofloTestCase):
'''
Call super if override so House Framer and Frame are setup correctly
'''
behaviors = ['salt.daemons.flo', 'salt.daemons.test.plan']
for behavior in behaviors:
mod = importlib.import_module(behavior)
super(PresenterTestCase, self).setUp()
def tearDown(self):

View File

@ -7,6 +7,7 @@ from __future__ import absolute_import, print_function, unicode_literals
# pylint: skip-file
# pylint: disable=C0103
import sys
import salt.utils.stringutils
from salt.ext.six.moves import map
if sys.version_info < (2, 7):
import unittest2 as unittest
@ -30,12 +31,15 @@ from raet.road import estating, keeping, stacking
from salt.key import RaetKey
def setUpModule():
console.reinit(verbosity=console.Wordage.concise)
def tearDownModule():
pass
class BasicTestCase(unittest.TestCase):
""""""
@ -129,8 +133,9 @@ class BasicTestCase(unittest.TestCase):
main = self.createRoadData(name='main', base=self.baseDirpath)
self.mainKeeper.write_local(main['prihex'], main['sighex'])
localkeys = self.mainKeeper.read_local()
self.assertDictEqual(localkeys, {'priv': main['prihex'],
'sign': main['sighex']})
self.assertDictEqual(localkeys,
{'priv': salt.utils.stringutils.to_str(main['prihex']),
'sign': salt.utils.stringutils.to_str(main['sighex'])})
allkeys = self.mainKeeper.all_keys()
self.assertDictEqual(allkeys, {'accepted': [],
'local': [self.localFilepath],
@ -149,38 +154,37 @@ class BasicTestCase(unittest.TestCase):
self.assertDictEqual(allkeys, {'accepted': ['other1', 'other2'],
'local': [self.localFilepath],
'pending': [],
'rejected': []} )
'rejected': []})
remotekeys = self.mainKeeper.read_remote(other1['name'])
self.assertDictEqual(remotekeys, { 'minion_id': 'other1',
'pub': other1['pubhex'],
'verify': other1['verhex']} )
self.assertDictEqual(remotekeys, {'minion_id': 'other1',
'pub': salt.utils.stringutils.to_str(other1['pubhex']),
'verify': salt.utils.stringutils.to_str(other1['verhex'])})
remotekeys = self.mainKeeper.read_remote(other2['name'])
self.assertDictEqual(remotekeys, { 'minion_id': 'other2',
'pub': other2['pubhex'],
'verify': other2['verhex']} )
self.assertDictEqual(remotekeys, {'minion_id': 'other2',
'pub': salt.utils.stringutils.to_str(other2['pubhex']),
'verify': salt.utils.stringutils.to_str(other2['verhex'])})
listkeys = self.mainKeeper.list_keys()
self.assertDictEqual(listkeys, {'accepted': ['other1', 'other2'],
'rejected': [],
'pending': []})
allremotekeys = self.mainKeeper.read_all_remote()
self.assertDictEqual(allremotekeys, {'other1':
{'verify': other1['verhex'],
self.assertDictEqual(allremotekeys,
{'other1':
{'verify': salt.utils.stringutils.to_str(other1['verhex']),
'minion_id': 'other1',
'acceptance': 'accepted',
'pub': other1['pubhex'],},
'pub': salt.utils.stringutils.to_str(other1['pubhex']), },
'other2':
{'verify': other2['verhex'],
{'verify': salt.utils.stringutils.to_str(other2['verhex']),
'minion_id': 'other2',
'acceptance': 'accepted',
'pub': other2['pubhex'],}
'pub': salt.utils.stringutils.to_str(other2['pubhex']), }
})
def testManualAccept(self):
'''
Basic function of RaetKey in non auto accept mode
@ -199,8 +203,9 @@ class BasicTestCase(unittest.TestCase):
main = self.createRoadData(name='main', base=self.baseDirpath)
self.mainKeeper.write_local(main['prihex'], main['sighex'])
localkeys = self.mainKeeper.read_local()
self.assertDictEqual(localkeys, {'priv': main['prihex'],
'sign': main['sighex']})
self.assertDictEqual(localkeys,
{'priv': salt.utils.stringutils.to_str(main['prihex']),
'sign': salt.utils.stringutils.to_str(main['sighex'])})
allkeys = self.mainKeeper.all_keys()
self.assertDictEqual(allkeys, {'accepted': [],
'local': [self.localFilepath],
@ -219,7 +224,7 @@ class BasicTestCase(unittest.TestCase):
self.assertDictEqual(allkeys, {'accepted': [],
'local': [self.localFilepath],
'pending': ['other1', 'other2'],
'rejected': []} )
'rejected': []})
remotekeys = self.mainKeeper.read_remote(other1['name'])
self.assertDictEqual(remotekeys, {})
@ -232,18 +237,20 @@ class BasicTestCase(unittest.TestCase):
'rejected': [],
'pending': ['other1', 'other2']})
allremotekeys = self.mainKeeper.read_all_remote()
self.assertDictEqual(allremotekeys, {'other1':
{'verify': other1['verhex'],
self.assertDictEqual(allremotekeys,
{'other1':
{'verify': salt.utils.stringutils.to_str(other1['verhex']),
'minion_id': 'other1',
'acceptance': 'pending',
'pub': other1['pubhex'],},
'pub': salt.utils.stringutils.to_str(other1['pubhex']),
},
'other2':
{'verify': other2['verhex'],
{'verify': salt.utils.stringutils.to_str(other2['verhex']),
'minion_id': 'other2',
'acceptance': 'pending',
'pub': other2['pubhex'],}
'pub': salt.utils.stringutils.to_str(other2['pubhex']),
}
})
self.mainKeeper.accept_all()
@ -252,35 +259,37 @@ class BasicTestCase(unittest.TestCase):
self.assertDictEqual(allkeys, {'accepted': ['other1', 'other2'],
'local': [self.localFilepath],
'pending': [],
'rejected': []} )
'rejected': []})
remotekeys = self.mainKeeper.read_remote(other1['name'])
self.assertDictEqual(remotekeys, { 'minion_id': 'other1',
'pub': other1['pubhex'],
'verify': other1['verhex']} )
self.assertDictEqual(remotekeys, {'minion_id': 'other1',
'pub': salt.utils.stringutils.to_str(other1['pubhex']),
'verify': salt.utils.stringutils.to_str(other1['verhex'])})
remotekeys = self.mainKeeper.read_remote(other2['name'])
self.assertDictEqual(remotekeys, { 'minion_id': 'other2',
'pub': other2['pubhex'],
'verify': other2['verhex']} )
self.assertDictEqual(remotekeys, {'minion_id': 'other2',
'pub': salt.utils.stringutils.to_str(other2['pubhex']),
'verify': salt.utils.stringutils.to_str(other2['verhex'])})
listkeys = self.mainKeeper.list_keys()
self.assertDictEqual(listkeys, {'accepted': ['other1', 'other2'],
'rejected': [],
'pending': []})
allremotekeys = self.mainKeeper.read_all_remote()
self.assertDictEqual(allremotekeys, {'other1':
{'verify': other1['verhex'],
self.assertDictEqual(allremotekeys,
{'other1':
{'verify': salt.utils.stringutils.to_str(other1['verhex']),
'minion_id': 'other1',
'acceptance': 'accepted',
'pub': other1['pubhex'],},
'pub': salt.utils.stringutils.to_str(other1['pubhex']),
},
'other2':
{'verify': other2['verhex'],
{'verify': salt.utils.stringutils.to_str(other2['verhex']),
'minion_id': 'other2',
'acceptance': 'accepted',
'pub': other2['pubhex'],}
'pub': salt.utils.stringutils.to_str(other2['pubhex']),
}
})
def testDelete(self):
@ -301,8 +310,9 @@ class BasicTestCase(unittest.TestCase):
main = self.createRoadData(name='main', base=self.baseDirpath)
self.mainKeeper.write_local(main['prihex'], main['sighex'])
localkeys = self.mainKeeper.read_local()
self.assertDictEqual(localkeys, {'priv': main['prihex'],
'sign': main['sighex']})
self.assertDictEqual(localkeys,
{'priv': salt.utils.stringutils.to_str(main['prihex']),
'sign': salt.utils.stringutils.to_str(main['sighex'])})
allkeys = self.mainKeeper.all_keys()
self.assertDictEqual(allkeys, {'accepted': [],
'local': [self.localFilepath],
@ -321,35 +331,39 @@ class BasicTestCase(unittest.TestCase):
self.assertDictEqual(allkeys, {'accepted': ['other1', 'other2'],
'local': [self.localFilepath],
'pending': [],
'rejected': []} )
'rejected': []})
remotekeys = self.mainKeeper.read_remote(other1['name'])
self.assertDictEqual(remotekeys, { 'minion_id': 'other1',
'pub': other1['pubhex'],
'verify': other1['verhex']} )
self.assertDictEqual(remotekeys, {'minion_id': 'other1',
'pub': salt.utils.stringutils.to_str(other1['pubhex']),
'verify': salt.utils.stringutils.to_str(other1['verhex']),
})
remotekeys = self.mainKeeper.read_remote(other2['name'])
self.assertDictEqual(remotekeys, { 'minion_id': 'other2',
'pub': other2['pubhex'],
'verify': other2['verhex']} )
self.assertDictEqual(remotekeys, {'minion_id': 'other2',
'pub': salt.utils.stringutils.to_str(other2['pubhex']),
'verify': salt.utils.stringutils.to_str(other2['verhex']),
})
listkeys = self.mainKeeper.list_keys()
self.assertDictEqual(listkeys, {'accepted': ['other1', 'other2'],
'rejected': [],
'pending': []})
allremotekeys = self.mainKeeper.read_all_remote()
self.assertDictEqual(allremotekeys, {'other1':
{'verify': other1['verhex'],
self.assertDictEqual(allremotekeys,
{'other1':
{'verify': salt.utils.stringutils.to_str(other1['verhex']),
'minion_id': 'other1',
'acceptance': 'accepted',
'pub': other1['pubhex']},
'pub': salt.utils.stringutils.to_str(other1['pubhex'])
},
'other2':
{'verify': other2['verhex'],
{'verify': salt.utils.stringutils.to_str(other2['verhex']),
'minion_id': 'other2',
'acceptance': 'accepted',
'pub': other2['pubhex'],}
'pub': salt.utils.stringutils.to_str(other2['pubhex']),
}
})
self.mainKeeper.delete_key(match=other1['name'])
@ -358,33 +372,32 @@ class BasicTestCase(unittest.TestCase):
self.assertDictEqual(allkeys, {'accepted': ['other2'],
'local': [self.localFilepath],
'pending': [],
'rejected': []} )
'rejected': []})
remotekeys = self.mainKeeper.read_remote(other1['name'])
self.assertDictEqual(remotekeys, {} )
self.assertDictEqual(remotekeys, {})
remotekeys = self.mainKeeper.read_remote(other2['name'])
self.assertDictEqual(remotekeys, { 'minion_id': 'other2',
'pub': other2['pubhex'],
'verify': other2['verhex']} )
self.assertDictEqual(remotekeys, {'minion_id': 'other2',
'pub': salt.utils.stringutils.to_str(other2['pubhex']),
'verify': salt.utils.stringutils.to_str(other2['verhex'])})
listkeys = self.mainKeeper.list_keys()
self.assertDictEqual(listkeys, {'accepted': [ 'other2'],
self.assertDictEqual(listkeys, {'accepted': ['other2'],
'rejected': [],
'pending': []})
allremotekeys = self.mainKeeper.read_all_remote()
self.assertDictEqual(allremotekeys, {
'other2':
{'verify': other2['verhex'],
self.assertDictEqual(allremotekeys,
{'other2':
{'verify': salt.utils.stringutils.to_str(other2['verhex']),
'minion_id': 'other2',
'acceptance': 'accepted',
'pub': other2['pubhex'],}
'pub': salt.utils.stringutils.to_str(other2['pubhex']),
}
})
def runOne(test):
'''
Unittest Runner
@ -393,6 +406,7 @@ def runOne(test):
suite = unittest.TestSuite([test])
unittest.TextTestRunner(verbosity=2).run(suite)
def runSome():
'''
Unittest runner
@ -407,6 +421,7 @@ def runSome():
suite = unittest.TestSuite(tests)
unittest.TextTestRunner(verbosity=2).run(suite)
def runAll():
'''
Unittest runner
@ -416,12 +431,12 @@ def runAll():
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == '__main__' and __package__ is None:
# console.reinit(verbosity=console.Wordage.concise)
#console.reinit(verbosity=console.Wordage.concise)
runAll() # run all unittests
runAll() #run all unittests
# runSome() #only run some
#runSome()#only run some
#runOne('testDelete')
# runOne('testDelete')

View File

@ -13,10 +13,11 @@ else:
# pylint: enable=blacklisted-import
import os
import stat
import time
import tempfile
import shutil
import socket
import stat
import tempfile
import time
from ioflo.aid.odicting import odict
from ioflo.aid.timing import StoreTimer
@ -29,6 +30,7 @@ from raet.road import estating, stacking
from salt.daemons import salting
import salt.utils.kinds as kinds
import salt.utils.stringutils
def setUpModule():
@ -232,19 +234,20 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
os.path.join('main', 'raet', 'main_master')))
self.assertTrue(main.ha, ("0.0.0.0", raeting.RAET_PORT))
self.assertIs(main.keep.auto, raeting.AutoMode.never.value)
self.assertDictEqual(main.keep.loadLocalData(), {'name': mainData['name'],
self.assertDictEqual(main.keep.loadLocalData(),
{'name': mainData['name'],
'uid': 1,
'ha': ['127.0.0.1', 7530],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'sid': 0,
'puid': 1,
'aha': ['0.0.0.0', 7530],
'role': mainData['role'],
'sighex': mainData['sighex'],
'prihex': mainData['prihex'],
'sighex': salt.utils.stringutils.to_str(mainData['sighex']),
'prihex': salt.utils.stringutils.to_str(mainData['prihex']),
})
data1 = self.createRoadData(role='remote1',
@ -282,7 +285,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7532],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'main': False,
'kind': data1['kind'],
@ -290,8 +293,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'joined': None,
'role': data1['role'],
'acceptance': 0,
'verhex': data1['verhex'],
'pubhex': data1['pubhex'],
'verhex': salt.utils.stringutils.to_str(data1['verhex']),
'pubhex': salt.utils.stringutils.to_str(data1['pubhex']),
},
'remote2_minion':
{'name': data2['name'],
@ -300,7 +303,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7533],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'main': False,
'kind': data2['kind'],
@ -308,8 +311,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'joined': None,
'role': data2['role'],
'acceptance': 0,
'verhex': data2['verhex'],
'pubhex': data2['pubhex'],
'verhex': salt.utils.stringutils.to_str(data2['verhex']),
'pubhex': salt.utils.stringutils.to_str(data2['pubhex']),
}
})
@ -362,14 +365,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7531],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'sid': 0,
'puid': 1,
'aha': ['0.0.0.0', 7531],
'role': otherData['role'],
'sighex': otherData['sighex'],
'prihex': otherData['prihex'],
'sighex': salt.utils.stringutils.to_str(otherData['sighex']),
'prihex': salt.utils.stringutils.to_str(otherData['prihex']),
})
data3 = self.createRoadData(role='remote3',
@ -405,7 +408,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7534],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'main': False,
'kind': data3['kind'],
@ -413,8 +416,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'joined': None,
'role': data3['role'],
'acceptance': 0,
'verhex': data3['verhex'],
'pubhex': data3['pubhex'],
'verhex': salt.utils.stringutils.to_str(data3['verhex']),
'pubhex': salt.utils.stringutils.to_str(data3['pubhex']),
},
'remote4_minion':
{
@ -424,7 +427,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7535],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'main': False,
'kind': data4['kind'],
@ -432,8 +435,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'joined': None,
'role': data4['role'],
'acceptance': 0,
'verhex': data4['verhex'],
'pubhex': data4['pubhex'],
'verhex': salt.utils.stringutils.to_str(data4['verhex']),
'pubhex': salt.utils.stringutils.to_str(data4['pubhex']),
}
})
@ -477,14 +480,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7530],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'sid': 0,
'puid': 1,
'aha': ['0.0.0.0', 7530],
'role': mainData['role'],
'sighex': mainData['sighex'],
'prihex': mainData['prihex'],
'sighex': salt.utils.stringutils.to_str(mainData['sighex']),
'prihex': salt.utils.stringutils.to_str(mainData['prihex']),
})
data1 = self.createRoadData(role='remote1',
@ -520,7 +523,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7532],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'main': False,
'kind': data1['kind'],
@ -528,8 +531,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'joined': None,
'role': data1['role'],
'acceptance': 1,
'verhex': data1['verhex'],
'pubhex': data1['pubhex'],
'verhex': salt.utils.stringutils.to_str(data1['verhex']),
'pubhex': salt.utils.stringutils.to_str(data1['pubhex']),
},
'remote2_minion':
{'name': data2['name'],
@ -538,7 +541,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7533],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'main': False,
'kind': data2['kind'],
@ -546,8 +549,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'joined': None,
'role': data2['role'],
'acceptance': 1,
'verhex': data2['verhex'],
'pubhex': data2['pubhex'],
'verhex': salt.utils.stringutils.to_str(data2['verhex']),
'pubhex': salt.utils.stringutils.to_str(data2['pubhex']),
}
})
@ -600,14 +603,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7531],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'sid': 0,
'puid': 1,
'aha': ['0.0.0.0', 7531],
'role': otherData['role'],
'sighex': otherData['sighex'],
'prihex': otherData['prihex'],
'sighex': salt.utils.stringutils.to_str(otherData['sighex']),
'prihex': salt.utils.stringutils.to_str(otherData['prihex']),
})
data3 = self.createRoadData(role='remote3',
@ -643,7 +646,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7534],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'main': False,
'kind': data3['kind'],
@ -651,8 +654,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'joined': None,
'role': data3['role'],
'acceptance': 1,
'verhex': data3['verhex'],
'pubhex': data3['pubhex'],
'verhex': salt.utils.stringutils.to_str(data3['verhex']),
'pubhex': salt.utils.stringutils.to_str(data3['pubhex']),
},
'remote4_minion':
{
@ -662,7 +665,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7535],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'main': False,
'kind': data4['kind'],
@ -670,8 +673,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'joined': None,
'role': data4['role'],
'acceptance': 1,
'verhex': data4['verhex'],
'pubhex': data4['pubhex'],
'verhex': salt.utils.stringutils.to_str(data4['verhex']),
'pubhex': salt.utils.stringutils.to_str(data4['pubhex']),
}
})
@ -715,13 +718,13 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7530],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'sid': 0,
'puid': 1,
'aha': ['0.0.0.0', 7530],
'sighex': mainData['sighex'],
'prihex': mainData['prihex'],
'sighex': salt.utils.stringutils.to_str(mainData['sighex']),
'prihex': salt.utils.stringutils.to_str(mainData['prihex']),
'role': mainData['role'],
})
@ -759,7 +762,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7532],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'main': False,
'kind': data1['kind'],
@ -767,8 +770,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'joined': None,
'role': data1['role'],
'acceptance': 1,
'verhex': data1['verhex'],
'pubhex': data1['pubhex'],
'verhex': salt.utils.stringutils.to_str(data1['verhex']),
'pubhex': salt.utils.stringutils.to_str(data1['pubhex']),
},
'remote2_minion':
{
@ -778,7 +781,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7533],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'main': False,
'kind': data2['kind'],
@ -786,8 +789,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'joined': None,
'role': data2['role'],
'acceptance': 1,
'verhex': data2['verhex'],
'pubhex': data2['pubhex'],
'verhex': salt.utils.stringutils.to_str(data2['verhex']),
'pubhex': salt.utils.stringutils.to_str(data2['pubhex']),
}
})
@ -840,14 +843,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7531],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'sid': 0,
'puid': 1,
'aha': ['0.0.0.0', 7531],
'role': otherData['role'],
'sighex': otherData['sighex'],
'prihex': otherData['prihex'],
'sighex': salt.utils.stringutils.to_str(otherData['sighex']),
'prihex': salt.utils.stringutils.to_str(otherData['prihex']),
})
data3 = self.createRoadData(role='remote3',
@ -883,7 +886,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7534],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'main': False,
'kind': data3['kind'],
@ -891,8 +894,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'joined': None,
'role': data3['role'],
'acceptance': 1,
'verhex': data3['verhex'],
'pubhex': data3['pubhex'],
'verhex': salt.utils.stringutils.to_str(data3['verhex']),
'pubhex': salt.utils.stringutils.to_str(data3['pubhex']),
},
'remote4_minion':
{
@ -902,7 +905,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7535],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'main': False,
'kind': data4['kind'],
@ -910,8 +913,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'joined': None,
'role': data4['role'],
'acceptance': 1,
'verhex': data4['verhex'],
'pubhex': data4['pubhex'],
'verhex': salt.utils.stringutils.to_str(data4['verhex']),
'pubhex': salt.utils.stringutils.to_str(data4['pubhex']),
}
})
@ -955,14 +958,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7530],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'sid': 0,
'puid': 1,
'aha': ['0.0.0.0', 7530],
'role': mainData['role'],
'sighex': mainData['sighex'],
'prihex': mainData['prihex'],
'sighex': salt.utils.stringutils.to_str(mainData['sighex']),
'prihex': salt.utils.stringutils.to_str(mainData['prihex']),
})
# add multiple remotes all with same role
@ -1006,7 +1009,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7532],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'main': False,
'kind': data1['kind'],
@ -1014,8 +1017,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'joined': None,
'role': data1['role'],
'acceptance': 0,
'verhex': data1['verhex'],
'pubhex': data1['pubhex'],
'verhex': salt.utils.stringutils.to_str(data1['verhex']),
'pubhex': salt.utils.stringutils.to_str(data1['pubhex']),
},
'primary_caller':
{
@ -1025,7 +1028,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7533],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'main': False,
'kind': data2['kind'],
@ -1033,8 +1036,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'joined': None,
'role': data1['role'],
'acceptance': 0,
'verhex': data1['verhex'],
'pubhex': data1['pubhex'],
'verhex': salt.utils.stringutils.to_str(data1['verhex']),
'pubhex': salt.utils.stringutils.to_str(data1['pubhex']),
}
})
@ -1104,14 +1107,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7530],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'sid': 0,
'puid': 1,
'aha': ['0.0.0.0', 7530],
'role': mainData['role'],
'sighex': mainData['sighex'],
'prihex': mainData['prihex'],
'sighex': salt.utils.stringutils.to_str(mainData['sighex']),
'prihex': salt.utils.stringutils.to_str(mainData['prihex']),
})
# add multiple remotes all with same role
@ -1149,7 +1152,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7532],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'main': False,
'kind': data1['kind'],
@ -1157,8 +1160,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'joined': None,
'role': data1['role'],
'acceptance': 1,
'verhex': data2['verhex'],
'pubhex': data2['pubhex'],
'verhex': salt.utils.stringutils.to_str(data2['verhex']),
'pubhex': salt.utils.stringutils.to_str(data2['pubhex']),
},
'primary_syndic':
{
@ -1168,7 +1171,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7533],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'main': False,
'kind': data2['kind'],
@ -1176,8 +1179,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'joined': None,
'role': data2['role'],
'acceptance': 1,
'verhex': data2['verhex'],
'pubhex': data2['pubhex'],
'verhex': salt.utils.stringutils.to_str(data2['verhex']),
'pubhex': salt.utils.stringutils.to_str(data2['pubhex']),
}
})
@ -1248,14 +1251,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7530],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'sid': 0,
'puid': 1,
'aha': ['0.0.0.0', 7530],
'role': mainData['role'],
'sighex': mainData['sighex'],
'prihex': mainData['prihex'],
'sighex': salt.utils.stringutils.to_str(mainData['sighex']),
'prihex': salt.utils.stringutils.to_str(mainData['prihex']),
})
# add multiple remotes all with same role but different keys
@ -1300,7 +1303,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7532],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'main': False,
'kind': data1['kind'],
@ -1308,8 +1311,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'joined': None,
'role': data1['role'],
'acceptance': 1,
'verhex': data1['verhex'],
'pubhex': data1['pubhex'],
'verhex': salt.utils.stringutils.to_str(data1['verhex']),
'pubhex': salt.utils.stringutils.to_str(data1['pubhex']),
},
'primary_syndic':
{
@ -1319,7 +1322,7 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7533],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'main': False,
'kind': data2['kind'],
@ -1327,8 +1330,8 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'joined': None,
'role': data2['role'],
'acceptance': 1,
'verhex': data1['verhex'],
'pubhex': data1['pubhex'],
'verhex': salt.utils.stringutils.to_str(data1['verhex']),
'pubhex': salt.utils.stringutils.to_str(data1['pubhex']),
}
})
@ -1399,14 +1402,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7530],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'sid': 0,
'puid': 1,
'aha': ['0.0.0.0', 7530],
'role': mainData['role'],
'sighex': mainData['sighex'],
'prihex': mainData['prihex'],
'sighex': salt.utils.stringutils.to_str(mainData['sighex']),
'prihex': salt.utils.stringutils.to_str(mainData['prihex']),
})
opts = self.createOpts(role='other',
@ -1441,14 +1444,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7531],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'sid': 0,
'puid': 1,
'aha': ['0.0.0.0', 7531],
'role': otherData['role'],
'sighex': otherData['sighex'],
'prihex': otherData['prihex'],
'sighex': salt.utils.stringutils.to_str(otherData['sighex']),
'prihex': salt.utils.stringutils.to_str(otherData['prihex']),
})
self.join(other, main)
@ -1524,14 +1527,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7530],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'sid': 0,
'puid': 1,
'aha': ['0.0.0.0', 7530],
'role': mainData['role'],
'sighex': mainData['sighex'],
'prihex': mainData['prihex'],
'sighex': salt.utils.stringutils.to_str(mainData['sighex']),
'prihex': salt.utils.stringutils.to_str(mainData['prihex']),
})
opts = self.createOpts(role='other',
@ -1566,14 +1569,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7531],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'sid': 0,
'puid': 1,
'aha': ['0.0.0.0', 7531],
'role': otherData['role'],
'sighex': otherData['sighex'],
'prihex': otherData['prihex'],
'sighex': salt.utils.stringutils.to_str(otherData['sighex']),
'prihex': salt.utils.stringutils.to_str(otherData['prihex']),
})
self.join(other, main)
@ -1645,14 +1648,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7530],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'sid': 0,
'puid': 1,
'aha': ['0.0.0.0', 7530],
'role': mainData['role'],
'sighex': mainData['sighex'],
'prihex': mainData['prihex'],
'sighex': salt.utils.stringutils.to_str(mainData['sighex']),
'prihex': salt.utils.stringutils.to_str(mainData['prihex']),
})
opts = self.createOpts(role='other',
@ -1687,13 +1690,13 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7531],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'sid': 0,
'puid': 1,
'aha': ['0.0.0.0', 7531],
'sighex': otherData['sighex'],
'prihex': otherData['prihex'],
'sighex': salt.utils.stringutils.to_str(otherData['sighex']),
'prihex': salt.utils.stringutils.to_str(otherData['prihex']),
'role': otherData['role'],
})
@ -1766,14 +1769,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7530],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'sid': 0,
'puid': 1,
'aha': ['0.0.0.0', 7530],
'role': mainData['role'],
'sighex': mainData['sighex'],
'prihex': mainData['prihex'],
'sighex': salt.utils.stringutils.to_str(mainData['sighex']),
'prihex': salt.utils.stringutils.to_str(mainData['prihex']),
})
opts = self.createOpts(role='primary',
@ -1808,14 +1811,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7531],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'sid': 0,
'puid': 1,
'aha': ['0.0.0.0', 7531],
'role': other1Data['role'],
'sighex': other1Data['sighex'],
'prihex': other1Data['prihex'],
'sighex': salt.utils.stringutils.to_str(other1Data['sighex']),
'prihex': salt.utils.stringutils.to_str(other1Data['prihex']),
})
self.join(other1, main)
@ -1876,13 +1879,13 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7532],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'sid': 0,
'puid': 1,
'aha': ['0.0.0.0', 7532],
'sighex': other2Data['sighex'],
'prihex': other2Data['prihex'],
'sighex': salt.utils.stringutils.to_str(other2Data['sighex']),
'prihex': salt.utils.stringutils.to_str(other2Data['prihex']),
'role': other2Data['role'],
})
@ -1936,14 +1939,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7532],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'sid': 0,
'puid': 1,
'aha': ['0.0.0.0', 7532],
'role': other2Data['role'],
'sighex': other1Data['sighex'],
'prihex': other1Data['prihex'],
'sighex': salt.utils.stringutils.to_str(other1Data['sighex']),
'prihex': salt.utils.stringutils.to_str(other1Data['prihex']),
})
# should join since same role and keys
@ -2021,14 +2024,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7530],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'sid': 0,
'puid': 1,
'aha': ['0.0.0.0', 7530],
'role': mainData['role'],
'sighex': mainData['sighex'],
'prihex': mainData['prihex'],
'sighex': salt.utils.stringutils.to_str(mainData['sighex']),
'prihex': salt.utils.stringutils.to_str(mainData['prihex']),
})
opts = self.createOpts(role='primary',
@ -2063,14 +2066,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7531],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'sid': 0,
'puid': 1,
'aha': ['0.0.0.0', 7531],
'role': other1Data['role'],
'sighex': other1Data['sighex'],
'prihex': other1Data['prihex'],
'sighex': salt.utils.stringutils.to_str(other1Data['sighex']),
'prihex': salt.utils.stringutils.to_str(other1Data['prihex']),
})
self.join(other1, main)
@ -2130,14 +2133,14 @@ class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
'ha': ['127.0.0.1', 7532],
'iha': None,
'natted': None,
'fqdn': '1.0.0.127.in-addr.arpa',
'fqdn': socket.getfqdn('127.0.0.1'),
'dyned': None,
'sid': 0,
'puid': 1,
'aha': ['0.0.0.0', 7532],
'role': other2Data['role'],
'sighex': other2Data['sighex'],
'prihex': other2Data['prihex'],
'sighex': salt.utils.stringutils.to_str(other2Data['sighex']),
'prihex': salt.utils.stringutils.to_str(other2Data['prihex']),
})
# should join since open mode
@ -2225,8 +2228,8 @@ if __name__ == '__main__' and __package__ is None:
#console.reinit(verbosity=console.Wordage.concise)
#runAll() # run all unittests
runAll() # run all unittests
runSome() # only run some
#runSome() # only run some
#runOne('testBootstrapRoleAuto')

View File

@ -5,6 +5,7 @@ Raet Ioflo Behavior Unittests
from __future__ import absolute_import, print_function, unicode_literals
import sys
from salt.ext.six.moves import map
import importlib
# pylint: disable=blacklisted-import
if sys.version_info < (2, 7):
import unittest2 as unittest
@ -43,6 +44,9 @@ class StatsEventerTestCase(testing.FrameIofloTestCase):
'''
Call super if override so House Framer and Frame are setup correctly
'''
behaviors = ['salt.daemons.flo', 'salt.daemons.test.plan']
for behavior in behaviors:
mod = importlib.import_module(behavior)
super(StatsEventerTestCase, self).setUp()
def tearDown(self):
@ -723,8 +727,8 @@ if __name__ == '__main__' and __package__ is None:
# console.reinit(verbosity=console.Wordage.concise)
#runAll() # run all unittests
runAll() # run all unittests
runSome() # only run some
#runSome() # only run some
#runOne('testMasterLaneStats')

View File

@ -285,7 +285,7 @@ class SaltRenderError(SaltException):
if self.line_num and self.buffer:
# Avoid circular import
import salt.utils.templates
self.context = salt.utils.templates.get_context(
self.context = salt.utils.stringutils.get_context(
self.buffer,
self.line_num,
marker=marker

View File

@ -19,22 +19,22 @@ import logging
import salt.utils.dictupdate
import salt.utils.path
import salt.utils.platform
try:
# The zfs_support grain will only be set to True if this module is supported
# This allows the grain to be set to False on systems that don't support zfs
# _conform_value is only called if zfs_support is set to True
from salt.modules.zfs import _conform_value
except ImportError:
pass
# Solve the Chicken and egg problem where grains need to run before any
# of the modules are loaded and are generally available for any usage.
import salt.modules.cmdmod
import salt.utils.zfs
__virtualname__ = 'zfs'
__salt__ = {
'cmd.run': salt.modules.cmdmod.run,
}
__utils__ = {
'zfs.is_supported': salt.utils.zfs.is_supported,
'zfs.has_feature_flags': salt.utils.zfs.has_feature_flags,
'zfs.zpool_command': salt.utils.zfs.zpool_command,
'zfs.to_size': salt.utils.zfs.to_size,
}
log = logging.getLogger(__name__)
@ -48,45 +48,6 @@ def __virtual__():
return __virtualname__
def _check_retcode(cmd):
'''
Simple internal wrapper for cmdmod.retcode
'''
return salt.modules.cmdmod.retcode(cmd, output_loglevel='quiet', ignore_retcode=True) == 0
def _zfs_support():
'''
Provide information about zfs kernel module
'''
grains = {'zfs_support': False}
# Check for zfs support
# NOTE: ZFS on Windows is in development
# NOTE: ZFS on NetBSD is in development
on_supported_platform = False
if salt.utils.platform.is_sunos() and salt.utils.path.which('zfs'):
on_supported_platform = True
elif salt.utils.platform.is_freebsd() and _check_retcode('kldstat -q -m zfs'):
on_supported_platform = True
elif salt.utils.platform.is_linux():
modinfo = salt.utils.path.which('modinfo')
if modinfo:
on_supported_platform = _check_retcode('{0} zfs'.format(modinfo))
else:
on_supported_platform = _check_retcode('ls /sys/module/zfs')
# NOTE: fallback to zfs-fuse if needed
if not on_supported_platform and salt.utils.path.which('zfs-fuse'):
on_supported_platform = True
# Additional check for the zpool command
if on_supported_platform and salt.utils.path.which('zpool'):
grains['zfs_support'] = True
return grains
def _zfs_pool_data():
'''
Provide grains about zpools
@ -94,12 +55,16 @@ def _zfs_pool_data():
grains = {}
# collect zpool data
zpool_cmd = salt.utils.path.which('zpool')
for zpool in __salt__['cmd.run']('{zpool} list -H -p -o name,size'.format(zpool=zpool_cmd)).splitlines():
zpool_list_cmd = __utils__['zfs.zpool_command'](
'list',
flags=['-H', '-p'],
opts={'-o': 'name,size'},
)
for zpool in __salt__['cmd.run'](zpool_list_cmd).splitlines():
if 'zpool' not in grains:
grains['zpool'] = {}
zpool = zpool.split()
grains['zpool'][zpool[0]] = _conform_value(zpool[1], True)
grains['zpool'][zpool[0]] = __utils__['zfs.to_size'](zpool[1], True)
# return grain data
return grains
@ -110,8 +75,8 @@ def zfs():
Provide grains for zfs/zpool
'''
grains = {}
grains = salt.utils.dictupdate.update(grains, _zfs_support(), merge_lists=True)
grains['zfs_support'] = __utils__['zfs.is_supported']()
grains['zfs_feature_flags'] = __utils__['zfs.has_feature_flags']()
if grains['zfs_support']:
grains = salt.utils.dictupdate.update(grains, _zfs_pool_data(), merge_lists=True)

View File

@ -125,7 +125,7 @@ class KeyCLI(object):
if self.opts['eauth']:
if 'token' in self.opts:
try:
with salt.utils.files.fopen(os.path.join(self.opts['key_dir'], '.root_key'), 'r') as fp_:
with salt.utils.files.fopen(os.path.join(self.opts['cachedir'], '.root_key'), 'r') as fp_:
low['key'] = \
salt.utils.stringutils.to_unicode(fp_.readline())
except IOError:
@ -1082,6 +1082,8 @@ class RaetKey(Key):
pre_path = os.path.join(pre, minion_id)
rej_path = os.path.join(rej, minion_id)
# open mode is turned on, force accept the key
pub = salt.utils.stringutils.to_str(pub)
verify = salt.utils.stringutils.to_str(verify)
keydata = {
'minion_id': minion_id,
'pub': pub,
@ -1148,7 +1150,7 @@ class RaetKey(Key):
verify: <verify>
'''
path = os.path.join(self.opts['pki_dir'], status, minion_id)
with salt.utils.files.fopen(path, 'r') as fp_:
with salt.utils.files.fopen(path, 'rb') as fp_:
keydata = self.serial.loads(fp_.read())
return 'pub: {0}\nverify: {1}'.format(
keydata['pub'],
@ -1158,7 +1160,7 @@ class RaetKey(Key):
'''
Return a sha256 kingerprint for the key
'''
with salt.utils.files.fopen(path, 'r') as fp_:
with salt.utils.files.fopen(path, 'rb') as fp_:
keydata = self.serial.loads(fp_.read())
key = 'pub: {0}\nverify: {1}'.format(
keydata['pub'],
@ -1442,7 +1444,7 @@ class RaetKey(Key):
if os.path.exists(path):
#mode = os.stat(path).st_mode
os.chmod(path, stat.S_IWUSR | stat.S_IRUSR)
with salt.utils.files.fopen(path, 'w+') as fp_:
with salt.utils.files.fopen(path, 'w+b') as fp_:
fp_.write(self.serial.dumps(keydata))
os.chmod(path, stat.S_IRUSR)
os.umask(c_umask)

View File

@ -17,6 +17,7 @@ import logging.handlers
# Import salt libs
from salt.log.mixins import NewStyleClassMixIn, ExcInfoOnLogLevelFormatMixIn
from salt.ext.six.moves import queue
log = logging.getLogger(__name__)
@ -176,9 +177,9 @@ if sys.version_info < (3, 2):
'''
try:
self.queue.put_nowait(record)
except self.queue.Full:
except queue.Full:
sys.stderr.write('[WARNING ] Message queue is full, '
'unable to write "{0}" to log', record
'unable to write "{0}" to log'.format(record)
)
def prepare(self, record):

View File

@ -120,6 +120,7 @@ __MP_LOGGING_QUEUE = None
__MP_LOGGING_QUEUE_PROCESS = None
__MP_LOGGING_QUEUE_HANDLER = None
__MP_IN_MAINPROCESS = multiprocessing.current_process().name == 'MainProcess'
__MP_MAINPROCESS_ID = None
class __NullLoggingHandler(TemporaryLoggingHandler):
@ -822,6 +823,7 @@ def set_multiprocessing_logging_queue(queue):
def setup_multiprocessing_logging_listener(opts, queue=None):
global __MP_LOGGING_QUEUE_PROCESS
global __MP_LOGGING_LISTENER_CONFIGURED
global __MP_MAINPROCESS_ID
if __MP_IN_MAINPROCESS is False:
# We're not in the MainProcess, return! No logging listener setup shall happen
@ -830,6 +832,11 @@ def setup_multiprocessing_logging_listener(opts, queue=None):
if __MP_LOGGING_LISTENER_CONFIGURED is True:
return
if __MP_MAINPROCESS_ID is not None and __MP_MAINPROCESS_ID != os.getpid():
# We're not in the MainProcess, return! No logging listener setup shall happen
return
__MP_MAINPROCESS_ID = os.getpid()
__MP_LOGGING_QUEUE_PROCESS = multiprocessing.Process(
target=__process_multiprocessing_logging_queue,
args=(opts, queue or get_multiprocessing_logging_queue(),)
@ -967,6 +974,11 @@ def shutdown_multiprocessing_logging_listener(daemonizing=False):
if __MP_LOGGING_QUEUE_PROCESS is None:
return
if __MP_MAINPROCESS_ID is not None and __MP_MAINPROCESS_ID != os.getpid():
# We're not in the MainProcess, return! No logging listener setup shall happen
return
if __MP_LOGGING_QUEUE_PROCESS.is_alive():
logging.getLogger(__name__).debug('Stopping the multiprocessing logging queue listener')
try:

View File

@ -32,12 +32,12 @@ Connection module for Amazon Cloud Formation
# keep lint from choking on _get_conn and _cache_id
#pylint: disable=E0602
from __future__ import absolute_import, print_function, unicode_literals
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt libs
from salt.ext import six
import salt.utils.versions
log = logging.getLogger(__name__)
@ -72,7 +72,9 @@ def exists(name, region=None, key=None, keyid=None, profile=None):
'''
Check to see if a stack exists.
CLI example::
CLI Example:
.. code-block:: bash
salt myminion boto_cfn.exists mystack region=us-east-1
'''
@ -94,7 +96,9 @@ def describe(name, region=None, key=None, keyid=None, profile=None):
.. versionadded:: 2015.8.0
CLI example::
CLI Example:
.. code-block:: bash
salt myminion boto_cfn.describe mystack region=us-east-1
'''
@ -135,7 +139,9 @@ def create(name, template_body=None, template_url=None, parameters=None, notific
'''
Create a CFN stack.
CLI example to create a stack::
CLI Example:
.. code-block:: bash
salt myminion boto_cfn.create mystack template_url='https://s3.amazonaws.com/bucket/template.cft' \
region=us-east-1
@ -161,7 +167,9 @@ def update_stack(name, template_body=None, template_url=None, parameters=None, n
.. versionadded:: 2015.8.0
CLI example to update a stack::
CLI Example:
.. code-block:: bash
salt myminion boto_cfn.update_stack mystack template_url='https://s3.amazonaws.com/bucket/template.cft' \
region=us-east-1
@ -186,7 +194,9 @@ def delete(name, region=None, key=None, keyid=None, profile=None):
'''
Delete a CFN stack.
CLI example to delete a stack::
CLI Example:
.. code-block:: bash
salt myminion boto_cfn.delete mystack region=us-east-1
'''
@ -205,7 +215,9 @@ def get_template(name, region=None, key=None, keyid=None, profile=None):
'''
Check to see if attributes are set on a CFN stack.
CLI example::
CLI Example:
.. code-block:: bash
salt myminion boto_cfn.get_template mystack
'''
@ -228,7 +240,9 @@ def validate_template(template_body=None, template_url=None, region=None, key=No
.. versionadded:: 2015.8.0
CLI example::
CLI Example:
.. code-block:: bash
salt myminion boto_cfn.validate_template mystack-template
'''

View File

@ -2586,6 +2586,7 @@ def describe_route_tables(route_table_id=None, route_table_name=None,
'instance_id': 'Instance',
'interface_id': 'NetworkInterfaceId',
'nat_gateway_id': 'NatGatewayId',
'vpc_peering_connection_id': 'VpcPeeringConnectionId',
}
assoc_keys = {'id': 'RouteTableAssociationId',
'main': 'Main',

File diff suppressed because it is too large Load Diff

View File

@ -630,7 +630,6 @@ def _client_wrapper(attr, *args, **kwargs):
)
ret = func(*args, **kwargs)
except docker.errors.APIError as exc:
log.exception('Encountered error running API function %s', attr)
if catch_api_errors:
# Generic handling of Docker API errors
raise CommandExecutionError(

View File

@ -29,7 +29,7 @@ try:
except (NameError, KeyError):
import salt.modules.cmdmod
__salt__ = {
'cmd.run_all': salt.modules.cmdmod._run_all_quiet
'cmd.run_all': salt.modules.cmdmod.run_all
}
@ -95,8 +95,7 @@ def __execute_cmd(command, host=None,
output_loglevel='quiet')
if cmd['retcode'] != 0:
log.warning('racadm return an exit code \'{0}\'.'
.format(cmd['retcode']))
log.warning('racadm returned an exit code of %s', cmd['retcode'])
return False
return True
@ -129,8 +128,7 @@ def __execute_ret(command, host=None,
output_loglevel='quiet')
if cmd['retcode'] != 0:
log.warning('racadm return an exit code \'{0}\'.'
.format(cmd['retcode']))
log.warning('racadm returned an exit code of %s', cmd['retcode'])
else:
fmtlines = []
for l in cmd['stdout'].splitlines():
@ -193,8 +191,7 @@ def system_info(host=None,
module=module)
if cmd['retcode'] != 0:
log.warning('racadm return an exit code \'{0}\'.'
.format(cmd['retcode']))
log.warning('racadm returned an exit code of %s', cmd['retcode'])
return cmd
return __parse_drac(cmd['stdout'])
@ -272,8 +269,7 @@ def network_info(host=None,
module=module)
if cmd['retcode'] != 0:
log.warning('racadm return an exit code \'{0}\'.'
.format(cmd['retcode']))
log.warning('racadm returned an exit code of %s', cmd['retcode'])
cmd['stdout'] = 'Network:\n' + 'Device = ' + module + '\n' + \
cmd['stdout']
@ -395,8 +391,7 @@ def list_users(host=None,
admin_password=admin_password)
if cmd['retcode'] != 0:
log.warning('racadm return an exit code \'{0}\'.'
.format(cmd['retcode']))
log.warning('racadm returned an exit code of %s', cmd['retcode'])
for user in cmd['stdout'].splitlines():
if not user.startswith('cfg'):
@ -444,7 +439,7 @@ def delete_user(username,
admin_password=admin_password)
else:
log.warning('\'{0}\' does not exist'.format(username))
log.warning('User \'%s\' does not exist', username)
return False
@ -485,7 +480,7 @@ def change_password(username, password, uid=None, host=None,
host=host, admin_username=admin_username,
admin_password=admin_password, module=module)
else:
log.warning('\'{0}\' does not exist'.format(username))
log.warning('racadm: user \'%s\' does not exist', username)
return False
@ -567,7 +562,7 @@ def create_user(username, password, permissions,
users = list_users()
if username in users:
log.warning('\'{0}\' already exists'.format(username))
log.warning('racadm: user \'%s\' already exists', username)
return False
for idx in six.iterkeys(users):

View File

@ -67,7 +67,7 @@ def _localectl_status():
Parse localectl status into a dict.
:return: dict
'''
if salt.utils.which('localectl') is None:
if salt.utils.path.which('localectl') is None:
raise CommandExecutionError('Unable to find "localectl"')
ret = {}

View File

@ -30,7 +30,7 @@ def __virtual__():
Only work on Mac OS
'''
if salt.utils.platform.is_darwin() \
and _LooseVersion(__grains__['osrelease']) >= '10.9':
and _LooseVersion(__grains__['osrelease']) >= _LooseVersion('10.9'):
return True
return (
False,

508
salt/modules/purefb.py Normal file
View File

@ -0,0 +1,508 @@
# -*- coding: utf-8 -*-
##
# Copyright 2018 Pure Storage Inc
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Management of Pure Storage FlashBlade
Installation Prerequisites
--------------------------
- You will need the ``purity_fb`` python package in your python installation
path that is running salt.
.. code-block:: bash
pip install purity_fb
- Configure Pure Storage FlashBlade authentication. Use one of the following
three methods.
1) From the minion config
.. code-block:: yaml
pure_tags:
fb:
san_ip: management vip or hostname for the FlashBlade
api_token: A valid api token for the FlashBlade being managed
2) From environment (PUREFB_IP and PUREFB_API)
3) From the pillar (PUREFB_IP and PUREFB_API)
:maintainer: Simon Dodsley (simon@purestorage.com)
:maturity: new
:requires: purestorage
:platform: all
.. versionadded:: Flourine
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
from datetime import datetime
# Import Salt libs
from salt.ext import six
from salt.exceptions import CommandExecutionError
# Import 3rd party modules
try:
from purity_fb import PurityFb, FileSystem, FileSystemSnapshot, SnapshotSuffix
from purity_fb import rest, NfsRule, ProtocolRule
HAS_PURITY_FB = True
except ImportError:
HAS_PURITY_FB = False
__docformat__ = 'restructuredtext en'
__virtualname__ = 'purefb'
def __virtual__():
'''
Determine whether or not to load this module
'''
if HAS_PURITY_FB:
return __virtualname__
return (False, 'purefb execution module not loaded: purity_fb python library not available.')
def _get_blade():
'''
Get Pure Storage FlasBlade configuration
1) From the minion config
pure_tags:
fb:
san_ip: management vip or hostname for the FlashBlade
api_token: A valid api token for the FlashBlade being managed
2) From environment (PUREFB_IP and PUREFB_API)
3) From the pillar (PUREFB_IP and PUREFB_API)
'''
try:
blade_name = __opts__['pure_tags']['fb'].get('san_ip')
api_token = __opts__['pure_tags']['fb'].get('api_token')
if blade_name and api:
blade = PurityFb(blade_name)
blade.disable_verify_ssl()
except (KeyError, NameError, TypeError):
try:
blade_name = os.environ.get('PUREFB_IP')
api_token = os.environ.get('PUREFB_API')
if blade_name:
blade = PurityFb(blade_name)
blade.disable_verify_ssl()
except (ValueError, KeyError, NameError):
try:
api_token = __pillar__['PUREFB_API']
blade = PurityFb(__pillar__['PUREFB_IP'])
blade.disable_verify_ssl()
except (KeyError, NameError):
raise CommandExecutionError('No Pure Storage FlashBlade credentials found.')
try:
blade.login(api_token)
except Exception:
raise CommandExecutionError('Pure Storage FlashBlade authentication failed.')
return blade
def _get_fs(name, blade):
'''
Private function to
check for existance of a filesystem
'''
_fs = []
_fs.append(name)
try:
res = blade.file_systems.list_file_systems(names=_fs)
return res.items[0]
except rest.ApiException:
return None
def _get_snapshot(name, suffix, blade):
'''
Return name of Snapshot
or None
'''
try:
filt = 'source=\'{}\' and suffix=\'{}\''.format(name, suffix)
res = blade.file_system_snapshots.list_file_system_snapshots(filter=filt)
return res.items[0]
except rest.ApiException:
return None
def _get_deleted_fs(name, blade):
'''
Private function to check
if a file systeem has already been deleted
'''
try:
_fs = _get_fs(name, blade)
if _fs and _fs.destroyed:
return _fs
except rest.ApiException:
return None
def snap_create(name, suffix=None):
'''
Create a filesystem snapshot on a Pure Storage FlashBlade.
Will return False if filesystem selected to snap does not exist.
.. versionadded:: Flourine
name : string
name of filesystem to snapshot
suffix : string
if specificed forces snapshot name suffix. If not specified defaults to timestamp.
CLI Example:
.. code-block:: bash
salt '*' purefb.snap_create foo
salt '*' purefb.snap_create foo suffix=bar
'''
blade = _get_blade()
if suffix is None:
suffix = ('snap-' +
six.text_type((datetime.utcnow() -
datetime(1970, 1, 1, 0, 0, 0, 0)).total_seconds()))
suffix = suffix.replace('.', '')
if _get_fs(name, blade) is not None:
try:
source = []
source.append(name)
blade.file_system_snapshots.create_file_system_snapshots(sources=source,
suffix=SnapshotSuffix(suffix))
return True
except rest.ApiException:
return False
else:
return False
def snap_delete(name, suffix=None, eradicate=False):
'''
Delete a filesystem snapshot on a Pure Storage FlashBlade.
Will return False if selected snapshot does not exist.
.. versionadded:: Flourine
name : string
name of filesystem
suffix : string
name of snapshot
eradicate : boolean
Eradicate snapshot after deletion if True. Default is False
CLI Example:
.. code-block:: bash
salt '*' purefb.snap_delete foo suffix=snap eradicate=True
'''
blade = _get_blade()
if _get_snapshot(name, suffix, blade) is not None:
try:
snapname = name + '.' + suffix
new_attr = FileSystemSnapshot(destroyed=True)
blade.file_system_snapshots.update_file_system_snapshots(name=snapname,
attributes=new_attr)
except rest.ApiException:
return False
if eradicate is True:
try:
blade.file_system_snapshots.delete_file_system_snapshots(name=snapname)
return True
except rest.ApiException:
return False
else:
return True
else:
return False
def snap_eradicate(name, suffix=None):
'''
Eradicate a deleted filesystem snapshot on a Pure Storage FlashBlade.
Will return False if snapshot is not in a deleted state.
.. versionadded:: Flourine
name : string
name of filesystem
suffix : string
name of snapshot
CLI Example:
.. code-block:: bash
salt '*' purefb.snap_eradicate foo suffix=snap
'''
blade = _get_blade()
if _get_snapshot(name, suffix, blade) is not None:
snapname = name + '.' + suffix
try:
blade.file_system_snapshots.delete_file_system_snapshots(name=snapname)
return True
except rest.ApiException:
return False
else:
return False
def fs_create(name, size=None, proto='NFS', nfs_rules='*(rw,no_root_squash)', snapshot=False):
'''
Create a filesystem on a Pure Storage FlashBlade.
Will return False if filesystem already exists.
.. versionadded:: Flourine
name : string
name of filesystem (truncated to 63 characters)
proto : string
(Optional) Sharing protocol (NFS, CIFS or HTTP). If not specified default is NFS
snapshot: boolean
(Optional) Are snapshots enabled on the filesystem. Default is False
nfs_rules : string
(Optional) export rules for NFS. If not specified default is *(rw,no_root_squash)
Refer to Pure Storage documentation for formatting rules.
size : string
if specified capacity of filesystem. If not specified default to 32G.
Refer to Pure Storage documentation for formatting rules.
CLI Example:
.. code-block:: bash
salt '*' purefb.fs_create foo proto=CIFS
salt '*' purefb.fs_create foo size=10T
'''
if len(name) > 63:
name = name[0:63]
blade = _get_blade()
print(proto)
if _get_fs(name, blade) is None:
if size is None:
size = __utils__['stringutils.human_to_bytes']('32G')
else:
size = __utils__['stringutils.human_to_bytes'](size)
if proto.lower() == 'nfs':
fs_obj = FileSystem(name=name,
provisioned=size,
fast_remove_directory_enabled=True,
snapshot_directory_enabled=snapshot,
nfs=NfsRule(enabled=True, rules=nfs_rules),
)
elif proto.lower() == 'cifs':
fs_obj = FileSystem(name=name,
provisioned=size,
fast_remove_directory_enabled=True,
snapshot_directory_enabled=snapshot,
smb=ProtocolRule(enabled=True),
)
elif proto.lower() == 'http':
fs_obj = FileSystem(name=name,
provisioned=size,
fast_remove_directory_enabled=True,
snapshot_directory_enabled=snapshot,
http=ProtocolRule(enabled=True),
)
else:
return False
try:
blade.file_systems.create_file_systems(fs_obj)
return True
except rest.ApiException:
return False
else:
return False
def fs_delete(name, eradicate=False):
'''
Delete a share on a Pure Storage FlashBlade.
Will return False if filesystem doesn't exist or is already in a deleted state.
.. versionadded:: Flourine
name : string
name of filesystem
eradicate : boolean
(Optional) Eradicate filesystem after deletion if True. Default is False
CLI Example:
.. code-block:: bash
salt '*' purefb.fs_delete foo eradicate=True
'''
blade = _get_blade()
if _get_fs(name, blade) is not None:
try:
blade.file_systems.update_file_systems(name=name,
attributes=FileSystem(nfs=NfsRule(enabled=False),
smb=ProtocolRule(enabled=False),
http=ProtocolRule(enabled=False),
destroyed=True)
)
except rest.ApiException:
return False
if eradicate is True:
try:
blade.file_systems.delete_file_systems(name)
return True
except rest.ApiException:
return False
else:
return True
else:
return False
def fs_eradicate(name):
'''
Eradicate a deleted filesystem on a Pure Storage FlashBlade.
Will return False is filesystem is not in a deleted state.
.. versionadded:: Flourine
name : string
name of filesystem
CLI Example:
.. code-block:: bash
salt '*' purefb.fs_eradicate foo
'''
blade = _get_blade()
if _get_deleted_fs(name, blade) is not None:
try:
blade.file_systems.delete_file_systems(name)
return True
except rest.ApiException:
return False
else:
return False
def fs_extend(name, size):
'''
Resize an existing filesystem on a Pure Storage FlashBlade.
Will return False if new size is less than or equal to existing size.
.. versionadded:: Flourine
name : string
name of filesystem
size : string
New capacity of filesystem.
Refer to Pure Storage documentation for formatting rules.
CLI Example:
.. code-block:: bash
salt '*' purefb.fs_extend foo 10T
'''
attr = {}
blade = _get_blade()
_fs = _get_fs(name, blade)
if _fs is not None:
if __utils__['stringutils.human_to_bytes'](size) > _fs.provisioned:
try:
attr['provisioned'] = __utils__['stringutils.human_to_bytes'](size)
n_attr = FileSystem(**attr)
blade.file_systems.update_file_systems(name=name, attributes=n_attr)
return True
except rest.ApiException:
return False
else:
return False
else:
return False
def fs_update(name, rules, snapshot=False):
'''
Update filesystem on a Pure Storage FlashBlade.
Allows for change of NFS export rules and enabling/disabled
of snapshotting capability.
.. versionadded:: Flourine
name : string
name of filesystem
rules : string
NFS export rules for filesystem
Refer to Pure Storage documentation for formatting rules.
snapshot: boolean
(Optional) Enable/Disable snapshots on the filesystem. Default is False
CLI Example:
.. code-block:: bash
salt '*' purefb.fs_nfs_update foo rules='10.234.112.23(ro), 10.234.112.24(rw)' snapshot=True
'''
blade = _get_blade()
attr = {}
_fs = _get_fs(name, blade)
if _fs is not None:
try:
if _fs.nfs.enabled:
attr['nfs'] = NfsRule(rules=rules)
attr['snapshot_directory_enabled'] = snapshot
n_attr = FileSystem(**attr)
blade.file_systems.update_file_systems(name=name, attributes=n_attr)
return True
except rest.ApiException:
return False
else:
return False

View File

@ -34,7 +34,6 @@ from salt.ext.six.moves import range # pylint: disable=W0622,import-error
# Import third party libs
try:
import win32gui
import win32api
import win32con
import pywintypes
@ -45,6 +44,7 @@ except ImportError:
# Import Salt libs
import salt.utils.platform
import salt.utils.stringutils
import salt.utils.win_functions
from salt.exceptions import CommandExecutionError
PY2 = sys.version_info[0] == 2
@ -65,7 +65,7 @@ def __virtual__():
if not HAS_WINDOWS_MODULES:
return (False, 'reg execution module failed to load: '
'One of the following libraries did not load: '
+ 'win32gui, win32con, win32api')
'win32con, win32api, pywintypes')
return __virtualname__
@ -190,11 +190,7 @@ def broadcast_change():
salt '*' reg.broadcast_change
'''
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms644952(v=vs.85).aspx
_, res = win32gui.SendMessageTimeout(
win32con.HWND_BROADCAST, win32con.WM_SETTINGCHANGE, 0, 0,
win32con.SMTO_ABORTIFHUNG, 5000)
return not bool(res)
return salt.utils.win_functions.broadcast_setting_change('Environment')
def list_keys(hive, key=None, use_32bit_registry=False):

View File

@ -927,8 +927,8 @@ def highstate(test=None, queue=False, **kwargs):
.. code-block:: bash
salt '*' state.higstate exclude=bar,baz
salt '*' state.higstate exclude=foo*
salt '*' state.highstate exclude=bar,baz
salt '*' state.highstate exclude=foo*
salt '*' state.highstate exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
saltenv

View File

@ -2442,7 +2442,7 @@ class _policy_info(object):
elif ord(val) == 1:
return 'Enabled'
else:
return 'Invalid Value'
return 'Invalid Value: {0!r}'.format(val) # pylint: disable=repr-flag-used-in-string
else:
return 'Not Defined'
except TypeError:
@ -5066,7 +5066,10 @@ def get(policy_class=None, return_full_policy_names=True,
class_vals[policy_name] = __salt__['reg.read_value'](_pol['Registry']['Hive'],
_pol['Registry']['Path'],
_pol['Registry']['Value'])['vdata']
log.debug('Value %s found for reg policy %s', class_vals[policy_name], policy_name)
log.debug(
'Value %r found for reg policy %s',
class_vals[policy_name], policy_name
)
elif 'Secedit' in _pol:
# get value from secedit
_ret, _val = _findOptionValueInSeceditFile(_pol['Secedit']['Option'])

View File

@ -18,12 +18,11 @@ import salt.utils.args
import salt.utils.data
import salt.utils.platform
import salt.utils.stringutils
import salt.utils.win_functions
# Import 3rd-party libs
from salt.ext.six.moves import map
try:
from win32con import HWND_BROADCAST, WM_SETTINGCHANGE
from win32api import SendMessage
HAS_WIN32 = True
except ImportError:
HAS_WIN32 = False
@ -57,7 +56,14 @@ def _normalize_dir(string_):
def rehash():
'''
Send a WM_SETTINGCHANGE Broadcast to Windows to refresh the Environment
variables
variables for new processes.
.. note::
This will only affect new processes that aren't launched by services. To
apply changes to the path to services, the host must be restarted. The
``salt-minion``, if running as a service, will not see changes to the
environment until the system is restarted. See
`MSDN Documentation <https://support.microsoft.com/en-us/help/821761/changes-that-you-make-to-environment-variables-do-not-affect-services>`_
CLI Example:
@ -65,7 +71,7 @@ def rehash():
salt '*' win_path.rehash
'''
return bool(SendMessage(HWND_BROADCAST, WM_SETTINGCHANGE, 0, 'Environment'))
return salt.utils.win_functions.broadcast_setting_change('Environment')
def get_path():

View File

@ -581,28 +581,77 @@ def _refresh_db_conditional(saltenv, **kwargs):
def refresh_db(**kwargs):
'''
Fetches metadata files and calls :py:func:`pkg.genrepo
<salt.modules.win_pkg.genrepo>` to compile updated repository metadata.
r'''
Generates the local software metadata database (`winrepo.p`) on the minion.
The database is stored in a serialized format located by default at the
following location:
`C:\salt\var\cache\salt\minion\files\base\win\repo-ng\winrepo.p`
This module performs the following steps to generate the software metadata
database:
- Fetch the package definition files (.sls) from `winrepo_source_dir`
(default `salt://win/repo-ng`) and cache them in
`<cachedir>\files\<saltenv>\<winrepo_source_dir>`
(default: `C:\salt\var\cache\salt\minion\files\base\win\repo-ng`)
- Call :py:func:`pkg.genrepo <salt.modules.win_pkg.genrepo>` to parse the
package definition files and generate the repository metadata database
file (`winrepo.p`)
- Return the report received from
:py:func:`pkg.genrepo <salt.modules.win_pkg.genrepo>`
The default winrepo directory on the master is `/srv/salt/win/repo-ng`. All
files that end with `.sls` in this and all subdirectories will be used to
generate the repository metadata database (`winrepo.p`).
.. note::
- Hidden directories (directories beginning with '`.`', such as
'`.git`') will be ignored.
.. note::
There is no need to call `pkg.refresh_db` every time you work with the
pkg module. Automatic refresh will occur based on the following minion
configuration settings:
- `winrepo_cache_expire_min`
- `winrepo_cache_expire_max`
However, if the package definition files have changed, as would be the
case if you are developing a new package definition, this function
should be called to ensure the minion has the latest information about
packages available to it.
.. warning::
Directories and files fetched from <winrepo_source_dir>
(`/srv/salt/win/repo-ng`) will be processed in alphabetical order. If
two or more software definition files contain the same name, the last
one processed replaces all data from the files processed before it.
For more information see
:ref:`Windows Software Repository <windows-package-manager>`
Kwargs:
saltenv (str): Salt environment. Default: ``base``
verbose (bool):
Return verbose data structure which includes 'success_list', a list
of all sls files and the package names contained within. Default
'False'
Return a verbose data structure which includes 'success_list', a
list of all sls files and the package names contained within.
Default is 'False'
failhard (bool):
If ``True``, an error will be raised if any repo SLS files failed to
If ``True``, an error will be raised if any repo SLS files fails to
process. If ``False``, no error will be raised, and a dictionary
containing the full results will be returned.
Returns:
dict: A dictionary containing the results of the database refresh.
.. Warning::
.. note::
A result with a `total: 0` generally means that the files are in the
wrong location on the master. Try running the following command on the
minion: `salt-call -l debug pkg.refresh saltenv=base`
.. warning::
When calling this command from a state using `module.run` be sure to
pass `failhard: False`. Otherwise the state will report failure if it
encounters a bad software definition file.
@ -648,10 +697,12 @@ def refresh_db(**kwargs):
)
# Cache repo-ng locally
log.info('Fetching *.sls files from {0}'.format(repo_details.winrepo_source_dir))
__salt__['cp.cache_dir'](
repo_details.winrepo_source_dir,
saltenv,
include_pat='*.sls'
path=repo_details.winrepo_source_dir,
saltenv=saltenv,
include_pat='*.sls',
exclude_pat=r'E@\/\..*?\/' # Exclude all hidden directories (.git)
)
return genrepo(saltenv=saltenv, verbose=verbose, failhard=failhard)
@ -754,6 +805,10 @@ def genrepo(**kwargs):
to process. If ``False``, no error will be raised, and a dictionary
containing the full results will be returned.
.. note::
- Hidden directories (directories beginning with '`.`', such as
'`.git`') will be ignored.
Returns:
dict: A dictionary of the results of the command
@ -777,9 +832,16 @@ def genrepo(**kwargs):
repo_details = _get_repo_details(saltenv)
for root, _, files in salt.utils.path.os_walk(repo_details.local_dest, followlinks=False):
# Skip hidden directories (.git)
if re.search(r'[\\/]\..*', root):
log.debug('Skipping files in directory: {0}'.format(root))
continue
short_path = os.path.relpath(root, repo_details.local_dest)
if short_path == '.':
short_path = ''
for name in files:
if name.endswith('.sls'):
total_files_processed += 1
@ -1209,11 +1271,11 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
# single files
if cache_dir and installer.startswith('salt:'):
path, _ = os.path.split(installer)
__salt__['cp.cache_dir'](path,
saltenv,
False,
None,
'E@init.sls$')
__salt__['cp.cache_dir'](path=path,
saltenv=saltenv,
include_empty=False,
include_pat=None,
exclude_pat='E@init.sls$')
# Check to see if the cache_file is cached... if passed
if cache_file and cache_file.startswith('salt:'):
@ -1820,7 +1882,7 @@ def get_repo_data(saltenv='base'):
serial = salt.payload.Serial(__opts__)
with salt.utils.files.fopen(repo_details.winrepo_file, 'rb') as repofile:
try:
repodata = salt.utils.data.decode(serial.loads(repofile.read()) or {})
repodata = salt.utils.data.decode(serial.loads(repofile.read(), encoding='utf-8') or {})
__context__['winrepo.data'] = repodata
return repodata
except Exception as exc:
@ -1843,7 +1905,7 @@ def _get_name_map(saltenv='base'):
return name_map
for k in name_map:
u_name_map[k.decode('utf-8')] = name_map[k]
u_name_map[k] = name_map[k]
return u_name_map

View File

@ -211,25 +211,29 @@ def _check_versionlock():
)
def _get_repo_options(**kwargs):
def _get_options(**kwargs):
'''
Returns a list of '--enablerepo' and '--disablerepo' options to be used
in the yum command, based on the kwargs.
Returns a list of options to be used in the yum/dnf command, based on the
kwargs passed.
'''
# Get repo options from the kwargs
fromrepo = kwargs.pop('fromrepo', '')
repo = kwargs.pop('repo', '')
disablerepo = kwargs.pop('disablerepo', '')
enablerepo = kwargs.pop('enablerepo', '')
disableexcludes = kwargs.pop('disableexcludes', '')
branch = kwargs.pop('branch', '')
get_extra_options = kwargs.pop('get_extra_options', False)
# Support old 'repo' argument
if repo and not fromrepo:
fromrepo = repo
ret = []
if fromrepo:
log.info('Restricting to repo \'%s\'', fromrepo)
ret.extend(['--disablerepo=*', '--enablerepo=' + fromrepo])
ret.extend(['--disablerepo=*', '--enablerepo={0}'.format(fromrepo)])
else:
if disablerepo:
targets = [disablerepo] \
@ -245,58 +249,30 @@ def _get_repo_options(**kwargs):
else enablerepo
log.info('Enabling repo(s): %s', ', '.join(targets))
ret.extend(['--enablerepo={0}'.format(x) for x in targets])
return ret
if disableexcludes:
log.info('Disabling excludes for \'%s\'', disableexcludes)
ret.append('--disableexcludes={0}'.format(disableexcludes))
def _get_excludes_option(**kwargs):
'''
Returns a list of '--disableexcludes' option to be used in the yum command,
based on the kwargs.
'''
disable_excludes = kwargs.pop('disableexcludes', '')
ret = []
if disable_excludes:
log.info('Disabling excludes for \'%s\'', disable_excludes)
ret.append('--disableexcludes={0}'.format(disable_excludes))
return ret
def _get_branch_option(**kwargs):
'''
Returns a list of '--branch' option to be used in the yum command,
based on the kwargs. This feature requires 'branch' plugin for YUM.
'''
branch = kwargs.pop('branch', '')
ret = []
if branch:
log.info('Adding branch \'%s\'', branch)
ret.append('--branch=\'{0}\''.format(branch))
return ret
ret.append('--branch={0}'.format(branch))
def _get_extra_options(**kwargs):
'''
Returns list of extra options for yum
'''
ret = []
kwargs = salt.utils.args.clean_kwargs(**kwargs)
# Remove already handled options from kwargs
fromrepo = kwargs.pop('fromrepo', '')
repo = kwargs.pop('repo', '')
disablerepo = kwargs.pop('disablerepo', '')
enablerepo = kwargs.pop('enablerepo', '')
disable_excludes = kwargs.pop('disableexcludes', '')
branch = kwargs.pop('branch', '')
for key, value in six.iteritems(kwargs):
if get_extra_options:
# sorting here to make order uniform, makes unit testing more reliable
for key in sorted(kwargs):
if key.startswith('__'):
continue
value = kwargs[key]
if isinstance(value, six.string_types):
log.info('Adding extra option --%s=\'%s\'', key, value)
ret.append('--{0}=\'{1}\''.format(key, value))
log.info('Found extra option --%s=%s', key, value)
ret.append('--{0}={1}'.format(key, value))
elif value is True:
log.info('Adding extra option --%s', key)
log.info('Found extra option --%s', key)
ret.append('--{0}'.format(key))
log.info('Adding extra options %s', ret)
if ret:
log.info('Adding extra options: %s', ret)
return ret
@ -460,8 +436,7 @@ def latest_version(*names, **kwargs):
if len(names) == 0:
return ''
repo_arg = _get_repo_options(**kwargs)
exclude_arg = _get_excludes_option(**kwargs)
options = _get_options(**kwargs)
# Refresh before looking for the latest version available
if refresh:
@ -471,8 +446,7 @@ def latest_version(*names, **kwargs):
# Get available versions for specified package(s)
cmd = [_yum(), '--quiet']
cmd.extend(repo_arg)
cmd.extend(exclude_arg)
cmd.extend(options)
cmd.extend(['list', 'available'])
cmd.extend(names)
out = __salt__['cmd.run_all'](cmd,
@ -818,7 +792,7 @@ def list_repo_pkgs(*args, **kwargs):
disablerepo = kwargs.pop('disablerepo', '') or ''
enablerepo = kwargs.pop('enablerepo', '') or ''
repo_arg = _get_repo_options(fromrepo=fromrepo, **kwargs)
repo_arg = _get_options(fromrepo=fromrepo, **kwargs)
if fromrepo and not isinstance(fromrepo, list):
try:
@ -970,15 +944,13 @@ def list_upgrades(refresh=True, **kwargs):
salt '*' pkg.list_upgrades
'''
repo_arg = _get_repo_options(**kwargs)
exclude_arg = _get_excludes_option(**kwargs)
options = _get_options(**kwargs)
if salt.utils.data.is_true(refresh):
refresh_db(check_update=False, **kwargs)
cmd = [_yum(), '--quiet']
cmd.extend(repo_arg)
cmd.extend(exclude_arg)
cmd.extend(options)
cmd.extend(['list', 'upgrades' if _yum() == 'dnf' else 'updates'])
out = __salt__['cmd.run_all'](cmd,
output_loglevel='trace',
@ -1096,21 +1068,19 @@ def refresh_db(**kwargs):
check_update_ = kwargs.pop('check_update', True)
repo_arg = _get_repo_options(**kwargs)
exclude_arg = _get_excludes_option(**kwargs)
branch_arg = _get_branch_option(**kwargs)
options = _get_options(**kwargs)
clean_cmd = [_yum(), '--quiet', 'clean', 'expire-cache']
update_cmd = [_yum(), '--quiet', 'check-update']
clean_cmd = [_yum(), '--quiet', '--assumeyes', 'clean', 'expire-cache']
update_cmd = [_yum(), '--quiet', '--assumeyes', 'check-update']
if __grains__.get('os_family') == 'RedHat' and __grains__.get('osmajorrelease') == '7':
# This feature is disable because it is not used by Salt and lasts a lot with using large repo like EPEL
if __grains__.get('os_family') == 'RedHat' \
and __grains__.get('osmajorrelease') == 7:
# This feature is disabled because it is not used by Salt and adds a
# lot of extra time to the command with large repos like EPEL
update_cmd.append('--setopt=autocheck_running_kernel=false')
for args in (repo_arg, exclude_arg, branch_arg):
if args:
clean_cmd.extend(args)
update_cmd.extend(args)
clean_cmd.extend(options)
update_cmd.extend(options)
__salt__['cmd.run'](clean_cmd, python_shell=False)
if check_update_:
@ -1162,6 +1132,7 @@ def install(name=None,
reinstall=False,
normalize=True,
update_holds=False,
saltenv='base',
ignore_epoch=False,
**kwargs):
'''
@ -1343,9 +1314,7 @@ def install(name=None,
'version': '<new-version>',
'arch': '<new-arch>'}}}
'''
repo_arg = _get_repo_options(**kwargs)
exclude_arg = _get_excludes_option(**kwargs)
branch_arg = _get_branch_option(**kwargs)
options = _get_options(**kwargs)
if salt.utils.data.is_true(refresh):
refresh_db(**kwargs)
@ -1353,7 +1322,7 @@ def install(name=None,
try:
pkg_params, pkg_type = __salt__['pkg_resource.parse_targets'](
name, pkgs, sources, normalize=normalize, **kwargs
name, pkgs, sources, saltenv=saltenv, normalize=normalize
)
except MinionError as exc:
raise CommandExecutionError(exc)
@ -1580,9 +1549,7 @@ def install(name=None,
'''
DRY function to add args common to all yum/dnf commands
'''
for arg in (repo_arg, exclude_arg, branch_arg):
if arg:
cmd.extend(arg)
cmd.extend(options)
if skip_verify:
cmd.append('--nogpgcheck')
if downloadonly:
@ -1854,10 +1821,7 @@ def upgrade(name=None,
salt '*' pkg.upgrade security=True exclude='kernel*'
'''
repo_arg = _get_repo_options(**kwargs)
exclude_arg = _get_excludes_option(**kwargs)
branch_arg = _get_branch_option(**kwargs)
extra_args = _get_extra_options(**kwargs)
options = _get_options(get_extra_options=True, **kwargs)
if salt.utils.data.is_true(refresh):
refresh_db(**kwargs)
@ -1886,9 +1850,7 @@ def upgrade(name=None,
and __salt__['config.get']('systemd.scope', True):
cmd.extend(['systemd-run', '--scope'])
cmd.extend([_yum(), '--quiet', '-y'])
for args in (repo_arg, exclude_arg, branch_arg, extra_args):
if args:
cmd.extend(args)
cmd.extend(options)
if skip_verify:
cmd.append('--nogpgcheck')
cmd.append('upgrade')

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -482,15 +482,6 @@ def info_installed(*names, **kwargs):
t_nfo = dict()
# Translate dpkg-specific keys to a common structure
for key, value in six.iteritems(pkg_nfo):
if isinstance(value, six.string_types):
# Check, if string is encoded in a proper UTF-8
if six.PY3:
value_ = value.encode('UTF-8', 'ignore').decode('UTF-8', 'ignore')
else:
value_ = value.decode('UTF-8', 'ignore').encode('UTF-8', 'ignore')
if value != value_:
value = kwargs.get('errors', 'ignore') == 'ignore' and value_ or 'N/A (invalid UTF-8)'
log.error('Package %s has bad UTF-8 code in %s: %s', pkg_name, key, value)
if key == 'source_rpm':
t_nfo['source'] = value
else:

View File

@ -248,28 +248,6 @@ def _json_dumps(obj, **kwargs):
# - "wheel" (need async api...)
class SaltClientsMixIn(object):
'''
MixIn class to container all of the salt clients that the API needs
'''
# TODO: load this proactively, instead of waiting for a request
__saltclients = None
@property
def saltclients(self):
if SaltClientsMixIn.__saltclients is None:
local_client = salt.client.get_local_client(mopts=self.application.opts)
# TODO: refreshing clients using cachedict
SaltClientsMixIn.__saltclients = {
'local': local_client.run_job_async,
# not the actual client we'll use.. but its what we'll use to get args
'local_async': local_client.run_job_async,
'runner': salt.runner.RunnerClient(opts=self.application.opts).cmd_async,
'runner_async': None, # empty, since we use the same client as `runner`
}
return SaltClientsMixIn.__saltclients
AUTH_TOKEN_HEADER = 'X-Auth-Token'
AUTH_COOKIE_NAME = 'session_id'
@ -400,7 +378,7 @@ class EventListener(object):
del self.timeout_map[future]
class BaseSaltAPIHandler(tornado.web.RequestHandler, SaltClientsMixIn): # pylint: disable=W0223
class BaseSaltAPIHandler(tornado.web.RequestHandler): # pylint: disable=W0223
ct_out_map = (
('application/json', _json_dumps),
('application/x-yaml', salt.utils.yaml.safe_dump),
@ -428,6 +406,16 @@ class BaseSaltAPIHandler(tornado.web.RequestHandler, SaltClientsMixIn): # pylin
self.application.opts,
)
if not hasattr(self, 'saltclients'):
local_client = salt.client.get_local_client(mopts=self.application.opts)
self.saltclients = {
'local': local_client.run_job_async,
# not the actual client we'll use.. but its what we'll use to get args
'local_async': local_client.run_job_async,
'runner': salt.runner.RunnerClient(opts=self.application.opts).cmd_async,
'runner_async': None, # empty, since we use the same client as `runner`
}
@property
def token(self):
'''
@ -759,7 +747,7 @@ class SaltAuthHandler(BaseSaltAPIHandler): # pylint: disable=W0223
self.write(self.serialize(ret))
class SaltAPIHandler(BaseSaltAPIHandler, SaltClientsMixIn): # pylint: disable=W0223
class SaltAPIHandler(BaseSaltAPIHandler): # pylint: disable=W0223
'''
Main API handler for base "/"
'''

View File

@ -226,6 +226,9 @@ def strip_esc_sequence(txt):
from writing their own terminal manipulation commands
'''
if isinstance(txt, six.string_types):
try:
return txt.replace('\033', '?')
except UnicodeDecodeError:
return txt.replace(str('\033'), str('?')) # future lint: disable=blacklisted-function
else:
return txt

View File

@ -71,6 +71,7 @@ class NestDisplay(object):
endc,
suffix)
except UnicodeDecodeError:
try:
return fmt.format(
indent,
color,
@ -78,13 +79,26 @@ class NestDisplay(object):
salt.utils.stringutils.to_unicode(msg),
endc,
suffix)
except UnicodeDecodeError:
# msg contains binary data that can't be decoded
return str(fmt).format( # future lint: disable=blacklisted-function
indent,
color,
prefix,
msg,
endc,
suffix)
def display(self, ret, indent, prefix, out):
'''
Recursively iterate down through data structures to determine output
'''
if isinstance(ret, bytes):
try:
ret = salt.utils.stringutils.to_unicode(ret)
except UnicodeDecodeError:
# ret contains binary data that can't be decoded
pass
if ret is None or ret is True or ret is False:
out.append(
@ -183,4 +197,11 @@ def output(ret, **kwargs):
base_indent = kwargs.get('nested_indent', 0) \
or __opts__.get('nested_indent', 0)
nest = NestDisplay(retcode=retcode)
return '\n'.join(nest.display(ret, base_indent, '', []))
lines = nest.display(ret, base_indent, '', [])
try:
return '\n'.join(lines)
except UnicodeDecodeError:
# output contains binary data that can't be decoded
return str('\n').join( # future lint: disable=blacklisted-function
[salt.utils.stringutils.to_str(x) for x in lines]
)

View File

@ -1,19 +1,37 @@
# -*- coding: utf-8 -*-
#-*- coding: utf-8 -*-
'''
Retrieve EC2 instance data for minions.
Retrieve EC2 instance data for minions for ec2_tags and ec2_tags_list
The minion id must be the instance-id retrieved from AWS. As an
option, use_grain can be set to True. This allows the use of an
The minion id must be the AWS instance-id or value in 'tag_key'.
For example set 'tag_key' to 'Name', to have the minion-id matched against the
tag 'Name'. The tag contents must be unique. The value of tag_value can
be 'uqdn' or 'asis'. if 'uqdn' strips any domain before comparison.
The option use_grain can be set to True. This allows the use of an
instance-id grain instead of the minion-id. Since this is a potential
security risk, the configuration can be further expanded to include
a list of minions that are trusted to only allow the alternate id
of the instances to specific hosts. There is no glob matching at
this time.
The optional 'tag_list_key' indicates which keys should be added to
'ec2_tags_list' and be split by tag_list_sep (default `;`). If a tag key is
included in 'tag_list_key' it is removed from ec2_tags. If a tag does not
exist it is still included as an empty list.
Note: restart the salt-master for changes to take effect.
.. code-block:: yaml
ext_pillar:
- ec2_pillar:
tag_key: 'Name'
tag_value: 'asis'
tag_list_key:
- Role
tag_list_sep: ';'
use_grain: True
minion_ids:
- trusted-minion-1
@ -31,6 +49,8 @@ the instance.
from __future__ import absolute_import, print_function, unicode_literals
import re
import logging
import salt.ext.six as six
from salt.ext.six.moves import range
# Import salt libs
from salt.utils.versions import StrictVersion as _StrictVersion
@ -47,6 +67,9 @@ except ImportError:
# Set up logging
log = logging.getLogger(__name__)
# DEBUG boto is far too verbose
logging.getLogger('boto').setLevel(logging.WARNING)
def __virtual__():
'''
@ -76,64 +99,145 @@ def _get_instance_info():
def ext_pillar(minion_id,
pillar, # pylint: disable=W0613
use_grain=False,
minion_ids=None):
minion_ids=None,
tag_key=None,
tag_value='asis',
tag_list_key=None,
tag_list_sep=';'):
'''
Execute a command and read the output as YAML
'''
valid_tag_value = ['uqdn', 'asis']
log.debug("Querying EC2 tags for minion id %s", minion_id)
# meta-data:instance-id
grain_instance_id = __grains__.get('meta-data', {}).get('instance-id', None)
if not grain_instance_id:
# dynamic:instance-identity:document:instanceId
grain_instance_id = \
__grains__.get('dynamic', {}).get('instance-identity', {}).get('document', {}).get('instance-id', None)
if grain_instance_id and re.search(r'^i-([0-9a-z]{17}|[0-9a-z]{8})$', grain_instance_id) is None:
log.error('External pillar %s, instance-id \'%s\' is not valid for '
'\'%s\'', __name__, grain_instance_id, minion_id)
grain_instance_id = None # invalid instance id found, remove it from use.
# If minion_id is not in the format of an AWS EC2 instance, check to see
# if there is a grain named 'instance-id' use that. Because this is a
# security risk, the master config must contain a use_grain: True option
# for this external pillar, which defaults to no
if re.search(r'^i-([0-9a-z]{17}|[0-9a-z]{8})$', minion_id) is None:
if 'instance-id' not in __grains__:
log.debug("Minion-id is not in AWS instance-id formation, and there "
"is no instance-id grain for minion %s", minion_id)
# Check AWS Tag restrictions .i.e. letters, spaces, and numbers and + - = . _ : / @
if tag_key and re.match(r'[\w=.:/@-]+$', tag_key) is None:
log.error('External pillar %s, tag_key \'%s\' is not valid ',
__name__, tag_key if isinstance(tag_key, six.text_type) else 'non-string')
return {}
if not use_grain:
log.debug("Minion-id is not in AWS instance-id formation, and option "
"not set to use instance-id grain, for minion %s, use_grain "
"is %s", minion_id, use_grain)
if tag_key and tag_value not in valid_tag_value:
log.error('External pillar %s, tag_value \'%s\' is not valid must be one '
'of %s', __name__, tag_value, ' '.join(valid_tag_value))
return {}
log.debug("use_grain set to %s", use_grain)
if minion_ids is not None and minion_id not in minion_ids:
log.debug("Minion-id is not in AWS instance ID format, and minion_ids "
"is set in the ec2_pillar configuration, but minion %s is "
"not in the list of allowed minions %s", minion_id, minion_ids)
return {}
if re.search(r'^i-([0-9a-z]{17}|[0-9a-z]{8})$', __grains__['instance-id']) is not None:
minion_id = __grains__['instance-id']
log.debug("Minion-id is not in AWS instance ID format, but a grain"
" is, so using %s as the minion ID", minion_id)
if not tag_key:
base_msg = ('External pillar %s, querying EC2 tags for minion id \'%s\' '
'against instance-id', __name__, minion_id)
else:
log.debug("Nether minion id nor a grain named instance-id is in "
"AWS format, can't query EC2 tags for minion %s", minion_id)
base_msg = ('External pillar %s, querying EC2 tags for minion id \'%s\' '
'against instance-id or \'%s\' against \'%s\'', __name__, minion_id, tag_key, tag_value)
log.debug(base_msg)
find_filter = None
find_id = None
if re.search(r'^i-([0-9a-z]{17}|[0-9a-z]{8})$', minion_id) is not None:
find_filter = None
find_id = minion_id
elif tag_key:
if tag_value == 'uqdn':
find_filter = {'tag:{0}'.format(tag_key): minion_id.split('.', 1)[0]}
else:
find_filter = {'tag:{0}'.format(tag_key): minion_id}
if grain_instance_id:
# we have an untrusted grain_instance_id, use it to narrow the search
# even more. Combination will be unique even if uqdn is set.
find_filter.update({'instance-id': grain_instance_id})
# Add this if running state is not dependant on EC2Config
# find_filter.update('instance-state-name': 'running')
# no minion-id is instance-id and no suitable filter, try use_grain if enabled
if not find_filter and not find_id and use_grain:
if not grain_instance_id:
log.debug('Minion-id is not in AWS instance-id formation, and there '
'is no instance-id grain for minion %s', minion_id)
return {}
if minion_ids is not None and minion_id not in minion_ids:
log.debug('Minion-id is not in AWS instance ID format, and minion_ids '
'is set in the ec2_pillar configuration, but minion %s is '
'not in the list of allowed minions %s', minion_id, minion_ids)
return {}
find_id = grain_instance_id
if not (find_filter or find_id):
log.debug('External pillar %s, querying EC2 tags for minion id \'%s\' against '
'instance-id or \'%s\' against \'%s\' noughthing to match against',
__name__, minion_id, tag_key, tag_value)
return {}
m = boto.utils.get_instance_metadata(timeout=0.1, num_retries=1)
if len(m.keys()) < 1:
log.info("%s: not an EC2 instance, skipping", __name__)
return None
myself = boto.utils.get_instance_metadata(timeout=0.1, num_retries=1)
if len(myself.keys()) < 1:
log.info("%s: salt master not an EC2 instance, skipping", __name__)
return {}
# Get the Master's instance info, primarily the region
(instance_id, region) = _get_instance_info()
(_, region) = _get_instance_info()
try:
conn = boto.ec2.connect_to_region(region)
except boto.exception as e: # pylint: disable=E0712
log.error("%s: invalid AWS credentials.", __name__)
return None
except boto.exception.AWSConnectionError as exc:
log.error('%s: invalid AWS credentials, %s', __name__, exc)
return {}
except:
raise
if conn is None:
log.error('%s: Could not connect to region %s', __name__, region)
return {}
tags = {}
try:
_tags = conn.get_all_tags(filters={'resource-type': 'instance',
'resource-id': minion_id})
for tag in _tags:
tags[tag.name] = tag.value
except IndexError as e:
log.error("Couldn't retrieve instance information: %s", e)
return None
if find_id:
instance_data = conn.get_only_instances(instance_ids=[find_id], dry_run=False)
else:
# filters and max_results can not be used togther.
instance_data = conn.get_only_instances(filters=find_filter, dry_run=False)
return {'ec2_tags': tags}
except boto.exception.EC2ResponseError as exc:
log.error('%s failed with \'%s\'', base_msg, exc)
return {}
if not instance_data:
log.debug('%s no match using \'%s\'', base_msg, find_id if find_id else find_filter)
return {}
# Find a active instance, i.e. ignore terminated and stopped instances
active_inst = []
for inst in range(0, len(instance_data)):
if instance_data[inst].state not in ['terminated', 'stopped']:
active_inst.append(inst)
valid_inst = len(active_inst)
if not valid_inst:
log.debug('%s match found but not active \'%s\'', base_msg, find_id if find_id else find_filter)
return {}
if valid_inst > 1:
log.error('%s multiple matches, ignored, using \'%s\'', base_msg, find_id if find_id else find_filter)
return {}
instance = instance_data[active_inst[0]]
if instance.tags:
ec2_tags = instance.tags
ec2_tags_list = {}
log.debug('External pillar %s, for minion id \'%s\', tags: %s', __name__, minion_id, instance.tags)
if tag_list_key and isinstance(tag_list_key, list):
for item in tag_list_key:
if item in ec2_tags:
ec2_tags_list[item] = ec2_tags[item].split(tag_list_sep)
del ec2_tags[item] # make sure its only in ec2_tags_list
else:
ec2_tags_list[item] = [] # always return a result
return {'ec2_tags': ec2_tags, 'ec2_tags_list': ec2_tags_list}
return {}

View File

@ -1,182 +1,97 @@
# -*- coding: utf-8 -*-
'''
The ``file_tree`` external pillar allows values from all files in a directory
tree to be imported as Pillar data.
``File_tree`` is an external pillar that allows
values from all files in a directory tree to be imported as Pillar data.
.. note::
Note this is an external pillar, and is subject to the rules and constraints
governing external pillars detailed here: :ref:`external-pillars`.
This is an external pillar and is subject to the :ref:`rules and
constraints <external-pillars>` governing external pillars.
.. versionadded:: 2015.5.0
Example Configuration
---------------------
In this pillar, data is organized by either Minion ID or Nodegroup name. To
setup pillar data for a specific Minion, place it in
``<root_dir>/hosts/<minion_id>``. To setup pillar data for an entire
Nodegroup, place it in ``<root_dir>/nodegroups/<node_group>`` where
``<node_group>`` is the Nodegroup's name.
Example ``file_tree`` Pillar
============================
Master Configuration
--------------------
.. code-block:: yaml
ext_pillar:
- file_tree:
root_dir: /path/to/root/directory
root_dir: /srv/ext_pillar
follow_dir_links: False
keep_newline: True
The ``root_dir`` parameter is required and points to the directory where files
for each host are stored. The ``follow_dir_links`` parameter is optional and
defaults to False. If ``follow_dir_links`` is set to True, this external pillar
will follow symbolic links to other directories.
node_groups:
internal_servers: 'L@bob,stuart,kevin'
.. warning::
Be careful when using ``follow_dir_links``, as a recursive symlink chain
will result in unexpected results.
Pillar Configuration
--------------------
.. versionchanged:: Oxygen
If ``root_dir`` is a relative path, it will be treated as relative to the
:conf_master:`pillar_roots` of the environment specified by
:conf_minion:`pillarenv`. If an environment specifies multiple
roots, this module will search for files relative to all of them, in order,
merging the results.
.. code-block:: bash
If ``keep_newline`` is set to ``True``, then the pillar values for files ending
in newlines will keep that newline. The default behavior is to remove the
end-of-file newline. ``keep_newline`` should be turned on if the pillar data is
intended to be used to deploy a file using ``contents_pillar`` with a
:py:func:`file.managed <salt.states.file.managed>` state.
(salt-master) # tree /srv/ext_pillar
/srv/ext_pillar/
|-- hosts
| |-- bob
| | |-- apache
| | | `-- config.d
| | | |-- 00_important.conf
| | | `-- 20_bob_extra.conf
| | `-- corporate_app
| | `-- settings
| | `-- bob_settings.cfg
| `-- kevin
| |-- apache
| | `-- config.d
| | `-- 00_important.conf
| `-- corporate_app
| `-- settings
| `-- kevin_settings.cfg
`-- nodegroups
`-- internal_servers
`-- corporate_app
`-- settings
`-- common_settings.cfg
.. versionchanged:: 2015.8.4
The ``raw_data`` parameter has been renamed to ``keep_newline``. In earlier
releases, ``raw_data`` must be used. Also, this parameter can now be a list
of globs, allowing for more granular control over which pillar values keep
their end-of-file newline. The globs match paths relative to the
directories named for minion IDs and nodegroups underneath the ``root_dir``
(see the layout examples in the below sections).
Verify Pillar Data
------------------
.. code-block:: yaml
.. code-block:: bash
ext_pillar:
- file_tree:
root_dir: /path/to/root/directory
keep_newline:
- files/testdir/*
(salt-master) # salt bob pillar.items
bob:
----------
apache:
----------
config.d:
----------
00_important.conf:
<important_config important_setting="yes" />
20_bob_extra.conf:
<bob_specific_cfg has_freeze_ray="yes" />
corporate_app:
----------
settings:
----------
common_settings:
// This is the main settings file for the corporate
// internal web app
main_setting: probably
bob_settings:
role: bob
.. note::
In earlier releases, this documentation incorrectly stated that binary
files would not affected by the ``keep_newline`` configuration. However,
this module does not actually distinguish between binary and text files.
.. versionchanged:: 2017.7.0
Templating/rendering has been added. You can now specify a default render
pipeline and a black- and whitelist of (dis)allowed renderers.
``template`` must be set to ``True`` for templating to happen.
.. code-block:: yaml
ext_pillar:
- file_tree:
root_dir: /path/to/root/directory
render_default: jinja|yaml
renderer_blacklist:
- gpg
renderer_whitelist:
- jinja
- yaml
template: True
Assigning Pillar Data to Individual Hosts
-----------------------------------------
To configure pillar data for each host, this external pillar will recursively
iterate over ``root_dir``/hosts/``id`` (where ``id`` is a minion ID), and
compile pillar data with each subdirectory as a dictionary key and each file
as a value.
For example, the following ``root_dir`` tree:
.. code-block:: text
./hosts/
./hosts/test-host/
./hosts/test-host/files/
./hosts/test-host/files/testdir/
./hosts/test-host/files/testdir/file1.txt
./hosts/test-host/files/testdir/file2.txt
./hosts/test-host/files/another-testdir/
./hosts/test-host/files/another-testdir/symlink-to-file1.txt
will result in the following pillar tree for minion with ID ``test-host``:
.. code-block:: text
test-host:
----------
files:
----------
another-testdir:
----------
symlink-to-file1.txt:
Contents of file #1.
testdir:
----------
file1.txt:
Contents of file #1.
file2.txt:
Contents of file #2.
.. note::
Subdirectories underneath ``root_dir``/hosts/``id`` become nested
dictionaries, as shown above.
Assigning Pillar Data to Entire Nodegroups
------------------------------------------
To assign Pillar data to all minions in a given nodegroup, this external pillar
recursively iterates over ``root_dir``/nodegroups/``nodegroup`` (where
``nodegroup`` is the name of a nodegroup), and like for individual hosts,
compiles pillar data with each subdirectory as a dictionary key and each file
as a value.
.. important::
If the same Pillar key is set for a minion both by nodegroup and by
individual host, then the value set for the individual host will take
precedence.
For example, the following ``root_dir`` tree:
.. code-block:: text
./nodegroups/
./nodegroups/test-group/
./nodegroups/test-group/files/
./nodegroups/test-group/files/testdir/
./nodegroups/test-group/files/testdir/file1.txt
./nodegroups/test-group/files/testdir/file2.txt
./nodegroups/test-group/files/another-testdir/
./nodegroups/test-group/files/another-testdir/symlink-to-file1.txt
will result in the following pillar data for minions in the node group
``test-group``:
.. code-block:: text
test-host:
----------
files:
----------
another-testdir:
----------
symlink-to-file1.txt:
Contents of file #1.
testdir:
----------
file1.txt:
Contents of file #1.
file2.txt:
Contents of file #2.
The leaf data in the example shown is the contents of the pillar files.
'''
from __future__ import absolute_import, print_function, unicode_literals
@ -311,7 +226,130 @@ def ext_pillar(minion_id,
renderer_whitelist=None,
template=False):
'''
Compile pillar data for the specified minion ID
Compile pillar data from the given ``root_dir`` specific to Nodegroup names
and Minion IDs.
If a Minion's ID is not found at ``<root_dir>/host/<minion_id>`` or if it
is not included in any Nodegroups named at
``<root_dir>/nodegroups/<node_group>``, no pillar data provided by this
pillar module will be available for that Minion.
.. versionchanged:: 2017.7.0
Templating/rendering has been added. You can now specify a default
render pipeline and a black- and whitelist of (dis)allowed renderers.
:param:`template` must be set to ``True`` for templating to happen.
.. code-block:: yaml
ext_pillar:
- file_tree:
root_dir: /path/to/root/directory
render_default: jinja|yaml
renderer_blacklist:
- gpg
renderer_whitelist:
- jinja
- yaml
template: True
:param minion_id:
The ID of the Minion whose pillar data is to be collected
:param pillar:
Unused by the ``file_tree`` pillar module
:param root_dir:
Filesystem directory used as the root for pillar data (e.g.
``/srv/ext_pillar``)
.. versionchanged:: Oxygen
If ``root_dir`` is a relative path, it will be treated as relative to the
:conf_master:`pillar_roots` of the environment specified by
:conf_minion:`pillarenv`. If an environment specifies multiple
roots, this module will search for files relative to all of them, in order,
merging the results.
:param follow_dir_links:
Follow symbolic links to directories while collecting pillar files.
Defaults to ``False``.
.. warning::
Care should be exercised when enabling this option as it will
follow links that point outside of :param:`root_dir`.
.. warning::
Symbolic links that lead to infinite recursion are not filtered.
:param debug:
Enable debug information at log level ``debug``. Defaults to
``False``. This option may be useful to help debug errors when setting
up the ``file_tree`` pillar module.
:param keep_newline:
Preserve the end-of-file newline in files. Defaults to ``False``.
This option may either be a boolean or a list of file globs (as defined
by the `Python fnmatch package
<https://docs.python.org/library/fnmatch.html>`_) for which end-of-file
newlines are to be kept.
``keep_newline`` should be turned on if the pillar data is intended to
be used to deploy a file using ``contents_pillar`` with a
:py:func:`file.managed <salt.states.file.managed>` state.
.. versionchanged:: 2015.8.4
The ``raw_data`` parameter has been renamed to ``keep_newline``. In
earlier releases, ``raw_data`` must be used. Also, this parameter
can now be a list of globs, allowing for more granular control over
which pillar values keep their end-of-file newline. The globs match
paths relative to the directories named for Minion IDs and
Nodegroup namess underneath the :param:`root_dir`.
.. code-block:: yaml
ext_pillar:
- file_tree:
root_dir: /srv/ext_pillar
keep_newline:
- apache/config.d/*
- corporate_app/settings/*
.. note::
In earlier releases, this documentation incorrectly stated that
binary files would not affected by the ``keep_newline``. However,
this module does not actually distinguish between binary and text
files.
:param render_default:
Override Salt's :conf_master:`default global renderer <renderer>` for
the ``file_tree`` pillar.
.. code-block:: yaml
render_default: jinja
:param renderer_blacklist:
Disallow renderers for pillar files.
.. code-block:: yaml
renderer_blacklist:
- json
:param renderer_whitelist:
Allow renderers for pillar files.
.. code-block:: yaml
renderer_whitelist:
- yaml
- jinja
:param template:
Enable templating of pillar files. Defaults to ``False``.
'''
# Not used
del pillar
@ -391,7 +429,7 @@ def _ext_pillar(minion_id,
ngroup_pillar = {}
nodegroups_dir = os.path.join(root_dir, 'nodegroups')
if os.path.exists(nodegroups_dir) and len(__opts__['nodegroups']) > 0:
if os.path.exists(nodegroups_dir) and len(__opts__.get('nodegroups', ())) > 0:
master_ngroups = __opts__['nodegroups']
ext_pillar_dirs = os.listdir(nodegroups_dir)
if len(ext_pillar_dirs) > 0:
@ -419,8 +457,8 @@ def _ext_pillar(minion_id,
else:
if debug is True:
log.debug(
'file_tree: no nodegroups found in file tree directory '
'ext_pillar_dirs, skipping...'
'file_tree: no nodegroups found in file tree directory %s, skipping...',
ext_pillar_dirs
)
else:
if debug is True:
@ -428,7 +466,12 @@ def _ext_pillar(minion_id,
host_dir = os.path.join(root_dir, 'hosts', minion_id)
if not os.path.exists(host_dir):
# No data for host with this ID
if debug is True:
log.debug(
'file_tree: no pillar data for minion %s found in file tree directory %s',
minion_id,
host_dir
)
return ngroup_pillar
if not os.path.isdir(host_dir):

View File

@ -45,7 +45,7 @@ def targets(tgt, tgt_type='glob', **kwargs):
for host, addr in host_addrs.items():
addr = six.text_type(addr)
ret[addr] = copy.deepcopy(__opts__.get('roster_defaults', {}))
ret[host] = copy.deepcopy(__opts__.get('roster_defaults', {}))
for port in ports:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

View File

@ -205,7 +205,7 @@ class Runner(RunnerClient):
if self.opts.get('eauth'):
if 'token' in self.opts:
try:
with salt.utils.files.fopen(os.path.join(self.opts['key_dir'], '.root_key'), 'r') as fp_:
with salt.utils.files.fopen(os.path.join(self.opts['cachedir'], '.root_key'), 'r') as fp_:
low['key'] = salt.utils.stringutils.to_unicode(fp_.readline())
except IOError:
low['token'] = self.opts['token']

View File

@ -43,7 +43,12 @@ def _handle_interrupt(exc, original_exc, hardfail=False, trace=''):
def _handle_signals(client, signum, sigframe):
try:
# This raises AttributeError on Python 3.4 and 3.5 if there is no current exception.
# Ref: https://bugs.python.org/issue23003
trace = traceback.format_exc()
except AttributeError:
trace = ''
try:
hardcrash = client.options.hard_crash
except (AttributeError, KeyError):

View File

@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
'''
The module used to execute states in salt. A state is unlike a module
execution in that instead of just executing a command it ensure that a
certain state is present on the system.
The State Compiler is used to execute states in Salt. A state is unlike
an execution module in that instead of just executing a command, it
ensures that a certain state is present on the system.
The data sent to the state calls is as follows:
{ 'state': '<state module name>',
@ -167,6 +167,23 @@ def _l_tag(name, id_):
return _gen_tag(low)
def _calculate_fake_duration():
'''
Generate a NULL duration for when states do not run
but we want the results to be consistent.
'''
utc_start_time = datetime.datetime.utcnow()
local_start_time = utc_start_time - \
(datetime.datetime.utcnow() - datetime.datetime.now())
utc_finish_time = datetime.datetime.utcnow()
start_time = local_start_time.time().isoformat()
delta = (utc_finish_time - utc_start_time)
# duration in milliseconds.microseconds
duration = (delta.seconds * 1000000 + delta.microseconds)/1000.0
return start_time, duration
def get_accumulator_dir(cachedir):
'''
Return the directory that accumulator data is stored in, creating it if it
@ -2504,8 +2521,11 @@ class State(object):
run_dict = self.pre
else:
run_dict = running
start_time, duration = _calculate_fake_duration()
run_dict[tag] = {'changes': {},
'result': False,
'duration': duration,
'start_time': start_time,
'comment': comment,
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}
@ -2588,9 +2608,12 @@ class State(object):
_cmt = 'One or more requisite failed: {0}'.format(
', '.join(six.text_type(i) for i in failed_requisites)
)
start_time, duration = _calculate_fake_duration()
running[tag] = {
'changes': {},
'result': False,
'duration': duration,
'start_time': start_time,
'comment': _cmt,
'__run_num__': self.__run_num,
'__sls__': low['__sls__']
@ -2606,8 +2629,11 @@ class State(object):
ret = self.call(low, chunks, running)
running[tag] = ret
elif status == 'pre':
start_time, duration = _calculate_fake_duration()
pre_ret = {'changes': {},
'result': True,
'duration': duration,
'start_time': start_time,
'comment': 'No changes detected',
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}
@ -2615,15 +2641,21 @@ class State(object):
self.pre[tag] = pre_ret
self.__run_num += 1
elif status == 'onfail':
start_time, duration = _calculate_fake_duration()
running[tag] = {'changes': {},
'result': True,
'duration': duration,
'start_time': start_time,
'comment': 'State was not run because onfail req did not change',
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}
self.__run_num += 1
elif status == 'onchanges':
start_time, duration = _calculate_fake_duration()
running[tag] = {'changes': {},
'result': True,
'duration': duration,
'start_time': start_time,
'comment': 'State was not run because none of the onchanges reqs changed',
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}

View File

@ -966,7 +966,7 @@ def extracted(name,
if result['result']:
# Get the path of the file in the minion cache
cached = __salt__['cp.is_cached'](source_match)
cached = __salt__['cp.is_cached'](source_match, saltenv=__env__)
else:
log.debug(
'failed to download %s',

View File

@ -6628,7 +6628,7 @@ def cached(name,
.. code-block:: python
cached = __salt__['cp.is_cached'](source_match)
cached = __salt__['cp.is_cached'](source_match, saltenv=__env__)
This function will return the cached path of the file, or an empty string
if the file is not present in the minion cache.

View File

@ -192,9 +192,14 @@ def present(name,
salt.utils.stringutils.to_unicode(name, 'utf-8'))
return ret
try:
vdata_decoded = salt.utils.stringutils.to_unicode(vdata, 'utf-8')
except UnicodeDecodeError:
# vdata contains binary data that can't be decoded
vdata_decoded = vdata
add_change = {'Key': r'{0}\{1}'.format(hive, key),
'Entry': '{0}'.format(salt.utils.stringutils.to_unicode(vname, 'utf-8') if vname else '(Default)'),
'Value': salt.utils.stringutils.to_unicode(vdata, 'utf-8')}
'Value': vdata_decoded}
# Check for test option
if __opts__['test']:

File diff suppressed because it is too large Load Diff

View File

@ -1,13 +1,16 @@
# -*- coding: utf-8 -*-
'''
Management zpool
States for managing zpools
:maintainer: Jorge Schrauwen <sjorge@blackdot.be>
:maturity: new
:depends: zpool
:depends: salt.utils.zfs, salt.modules.zpool
:platform: smartos, illumos, solaris, freebsd, linux
.. versionadded:: 2016.3.0
.. versionchanged:: Flourine
Big refactor to remove duplicate code, better type converions and improved
consistancy in output.
.. code-block:: yaml
@ -23,12 +26,12 @@ Management zpool
- properties:
comment: salty storage pool
- layout:
mirror-0:
/dev/disk0
/dev/disk1
mirror-1:
/dev/disk2
/dev/disk3
- mirror:
- /dev/disk0
- /dev/disk1
- mirror:
- /dev/disk2
- /dev/disk3
partitionpool:
zpool.present:
@ -73,7 +76,6 @@ import logging
# Import Salt libs
from salt.utils.odict import OrderedDict
from salt.modules.zpool import _conform_value
log = logging.getLogger(__name__)
@ -85,15 +87,83 @@ def __virtual__():
'''
Provides zpool state
'''
if 'zpool.create' in __salt__:
return True
if __grains__['zfs_support']:
return __virtualname__
else:
return (
False,
'{0} state module can only be loaded on illumos, Solaris, SmartOS, FreeBSD, Linux, ...'.format(
__virtualname__
)
)
return (False, "The zpool state cannot be loaded: zfs not supported")
def _layout_to_vdev(layout, device_dir=None):
'''
Turn the layout data into usable vdevs spedcification
We need to support 2 ways of passing the layout:
.. code::
layout_new:
- mirror:
- disk0
- disk1
- mirror:
- disk2
- disk3
.. code:
layout_legacy:
mirror-0:
disk0
disk1
mirror-1:
disk2
disk3
'''
vdevs = []
# NOTE: check device_dir exists
if device_dir and not os.path.exists(device_dir):
device_dir = None
# NOTE: handle list of OrderedDicts (new layout)
if isinstance(layout, list):
# NOTE: parse each vdev as a tiny layout and just append
for vdev in layout:
if isinstance(vdev, OrderedDict):
vdevs.extend(_layout_to_vdev(vdev, device_dir))
else:
if device_dir and vdev[0] != '/':
vdev = os.path.join(device_dir, vdev)
vdevs.append(vdev)
# NOTE: handle nested OrderedDict (legacy layout)
# this is also used to parse the nested OrderedDicts
# from the new layout
elif isinstance(layout, OrderedDict):
for vdev in layout:
# NOTE: extract the vdev type and disks in the vdev
vdev_type = vdev.split('-')[0]
vdev_disk = layout[vdev]
# NOTE: skip appending the dummy type 'disk'
if vdev_type != 'disk':
vdevs.append(vdev_type)
# NOTE: ensure the disks are a list (legacy layout are not)
if not isinstance(vdev_disk, list):
vdev_disk = vdev_disk.split(' ')
# NOTE: also append the actualy disks behind the type
# also prepend device_dir to disks if required
for disk in vdev_disk:
if device_dir and disk[0] != '/':
disk = os.path.join(device_dir, disk)
vdevs.append(disk)
# NOTE: we got invalid data for layout
else:
vdevs = None
return vdevs
def present(name, properties=None, filesystem_properties=None, layout=None, config=None):
@ -115,13 +185,34 @@ def present(name, properties=None, filesystem_properties=None, layout=None, conf
The following configuration properties can be toggled in the config parameter.
- import (true) - try to import the pool before creating it if absent
- import_dirs (None) - specify additional locations to scan for devices on import
- device_dir (None, SunOS=/dev/rdsk) - specify device directory to use if not absolute path
- import_dirs (None) - specify additional locations to scan for devices on import (comma-seperated)
- device_dir (None, SunOS=/dev/dsk, Linux=/dev) - specify device directory to prepend for none absolute device paths
- force (false) - try to force the import or creation
.. note::
Because ID's inside the layout dict must be unique they need to have a suffix.
It is no longer needed to give a unique name to each top-level vdev, the old
layout format is still supported but no longer recommended.
.. code-block:: yaml
- mirror:
- /tmp/vdisk3
- /tmp/vdisk2
- mirror:
- /tmp/vdisk0
- /tmp/vdisk1
The above yaml will always result in the following zpool create:
.. code-block:: bash
zpool create mypool mirror /tmp/vdisk3 /tmp/vdisk2 mirror /tmp/vdisk0 /tmp/vdisk1
.. warning::
The legacy format is also still supported but not recommended,
because ID's inside the layout dict must be unique they need to have a suffix.
.. code-block:: yaml
@ -132,22 +223,16 @@ def present(name, properties=None, filesystem_properties=None, layout=None, conf
/tmp/vdisk0
/tmp/vdisk1
The above yaml will always result in the following zpool create:
.. code-block:: bash
zpool create mypool mirror /tmp/vdisk3 /tmp/vdisk2 mirror /tmp/vdisk0 /tmp/vdisk1
.. warning::
Pay attention to the order of your dict!
.. code-block:: yaml
mirror-0:
/tmp/vdisk0
/tmp/vdisk1
/tmp/vdisk2:
- mirror:
- /tmp/vdisk0
- /tmp/vdisk1
- /tmp/vdisk2
The above will result in the following zpool create:
@ -163,60 +248,87 @@ def present(name, properties=None, filesystem_properties=None, layout=None, conf
'result': None,
'comment': ''}
# config defaults
state_config = config if config else {}
config = {
## config defaults
default_config = {
'import': True,
'import_dirs': None,
'device_dir': None,
'force': False
}
if __grains__['kernel'] == 'SunOS':
config['device_dir'] = '/dev/rdsk'
default_config['device_dir'] = '/dev/dsk'
elif __grains__['kernel'] == 'Linux':
config['device_dir'] = '/dev'
config.update(state_config)
log.debug('zpool.present::%s::config - %s', name, config)
default_config['device_dir'] = '/dev'
# parse layout
if layout:
for root_dev in layout:
if root_dev.count('-') != 1:
continue
layout[root_dev] = layout[root_dev].keys() if isinstance(layout[root_dev], OrderedDict) else layout[root_dev].split(' ')
## merge state config
if config:
default_config.update(config)
config = default_config
log.debug('zpool.present::%s::layout - %s', name, layout)
## ensure properties are zfs values
if properties:
properties = __utils__['zfs.from_auto_dict'](properties)
elif properties is None:
properties = {}
if filesystem_properties:
filesystem_properties = __utils__['zfs.from_auto_dict'](filesystem_properties)
elif filesystem_properties is None:
filesystem_properties = {}
# ensure properties conform to the zfs parsable format
for prop in properties:
properties[prop] = _conform_value(properties[prop], True)
## parse layout
vdevs = _layout_to_vdev(layout, config['device_dir'])
if vdevs:
vdevs.insert(0, name)
# ensure the pool is present
## log configuration
log.debug('zpool.present::%s::config - %s',
name, config)
log.debug('zpool.present::%s::vdevs - %s',
name, vdevs)
log.debug('zpool.present::%s::properties - %s',
name, properties)
log.debug('zpool.present::%s::filesystem_properties - %s',
name, filesystem_properties)
## ensure the pool is present
ret['result'] = False
if __salt__['zpool.exists'](name): # update
## NOTE: don't do anything because this is a test
if __opts__['test']:
ret['result'] = True
if __salt__['zpool.exists'](name):
ret['changes'][name] = 'uptodate'
else:
ret['changes'][name] = 'imported' if config['import'] else 'created'
ret['comment'] = 'storage pool {0} was {1}'.format(name, ret['changes'][name])
## NOTE: update pool
elif __salt__['zpool.exists'](name):
ret['result'] = True
# retrieve current properties
properties_current = __salt__['zpool.get'](name)[name]
## NOTE: fetch current pool properties
properties_current = __salt__['zpool.get'](name, parsable=True)
# figure out if updates needed
## NOTE: build list of properties to update
properties_update = []
for prop in properties:
## NOTE: skip unexisting properties
if prop not in properties_current:
log.warning('zpool.present::%s::update - unknown property: %s', name, prop)
continue
## NOTE: compare current and wanted value
if properties_current[prop] != properties[prop]:
properties_update.append(prop)
# update properties
## NOTE: update pool properties
for prop in properties_update:
res = __salt__['zpool.set'](name, prop, properties[prop])
# check return
if name in res and prop in res[name] and res[name][prop] == properties[prop]:
if res['set']:
if name not in ret['changes']:
ret['changes'][name] = {}
ret['changes'][name].update(res[name])
ret['changes'][name][prop] = properties[prop]
else:
ret['result'] = False
if ret['comment'] == '':
@ -224,58 +336,47 @@ def present(name, properties=None, filesystem_properties=None, layout=None, conf
ret['comment'] = '{0} {1}'.format(ret['comment'], prop)
if ret['result']:
ret['comment'] = 'properties updated' if len(ret['changes']) > 0 else 'no update needed'
ret['comment'] = 'properties updated' if ret['changes'] else 'no update needed'
else: # import or create
if config['import']: # try import
log.debug('zpool.present::%s::importing', name)
ret['result'] = __salt__['zpool.import'](
## NOTE: import or create the pool (at least try to anyway)
else:
## NOTE: import pool
if config['import']:
mod_res = __salt__['zpool.import'](
name,
force=config['force'],
dir=config['import_dirs']
dir=config['import_dirs'],
)
ret['result'] = ret['result'].get(name) == 'imported'
ret['result'] = mod_res['imported']
if ret['result']:
ret['changes'][name] = 'imported'
ret['comment'] = 'storage pool {0} was imported'.format(name)
if not ret['result']: # create
if not layout:
ret['comment'] = 'storage pool {0} was not imported, no layout specified for creation'.format(name)
else:
## NOTE: create pool
if not ret['result'] and vdevs:
log.debug('zpool.present::%s::creating', name)
if __opts__['test']:
ret['result'] = True
else:
# construct *vdev parameter for zpool.create
params = []
params.append(name)
for root_dev in layout:
if root_dev.count('-') == 1: # special device
# NOTE: accomidate non existing 'disk' vdev
if root_dev.split('-')[0] != 'disk':
params.append(root_dev.split('-')[0]) # add the type by stripping the ID
for sub_dev in layout[root_dev]: # add all sub devices
if '/' not in sub_dev and config['device_dir'] and os.path.exists(config['device_dir']):
sub_dev = os.path.join(config['device_dir'], sub_dev)
params.append(sub_dev)
else: # normal device
if '/' not in root_dev and config['device_dir'] and os.path.exists(config['device_dir']):
root_dev = os.path.join(config['device_dir'], root_dev)
params.append(root_dev)
# execute zpool.create
ret['result'] = __salt__['zpool.create'](*params, force=config['force'], properties=properties, filesystem_properties=filesystem_properties)
if ret['result'].get(name).startswith('created'):
ret['result'] = True
else:
if ret['result'].get(name):
ret['comment'] = ret['result'].get(name)
ret['result'] = False
## NOTE: execute zpool.create
mod_res = __salt__['zpool.create'](
*vdevs,
force=config['force'],
properties=properties,
filesystem_properties=filesystem_properties
)
ret['result'] = mod_res['created']
if ret['result']:
ret['changes'][name] = 'created'
ret['comment'] = 'storage pool {0} was created'.format(name)
elif 'error' in mod_res:
ret['comment'] = mod_res['error']
else:
ret['comment'] = 'could not create storage pool {0}'.format(name)
## NOTE: give up, we cannot import the pool and we do not have a layout to create it
if not ret['result'] and not vdevs:
ret['comment'] = 'storage pool {0} was not imported, no (valid) layout specified for creation'.format(name)
return ret
@ -297,31 +398,36 @@ def absent(name, export=False, force=False):
'result': None,
'comment': ''}
# config defaults
log.debug('zpool.absent::%s::config::force = %s', name, force)
log.debug('zpool.absent::%s::config::export = %s', name, export)
## log configuration
log.debug('zpool.absent::%s::config::force = %s',
name, force)
log.debug('zpool.absent::%s::config::export = %s',
name, export)
# ensure the pool is absent
## ensure the pool is absent
if __salt__['zpool.exists'](name): # looks like we need to do some work
mod_res = {}
ret['result'] = False
if export: # try to export the zpool
# NOTE: handle test
if __opts__['test']:
ret['result'] = True
else:
ret['result'] = __salt__['zpool.export'](name, force=force)
ret['result'] = ret['result'].get(name) == 'exported'
else: # try to destroy the zpool
if __opts__['test']:
ret['result'] = True
# NOTE: try to export the pool
elif export:
mod_res = __salt__['zpool.export'](name, force=force)
ret['result'] = mod_res['exported']
# NOTE: try to destroy the pool
else:
ret['result'] = __salt__['zpool.destroy'](name, force=force)
ret['result'] = ret['result'].get(name) == 'destroyed'
mod_res = __salt__['zpool.destroy'](name, force=force)
ret['result'] = mod_res['destroyed']
if ret['result']: # update the changes and comment
ret['changes'][name] = 'exported' if export else 'destroyed'
ret['comment'] = 'storage pool {0} was {1}'.format(name, ret['changes'][name])
elif 'error' in mod_res:
ret['comment'] = mod_res['error']
else: # we are looking good
ret['result'] = True

View File

@ -558,6 +558,11 @@ class IPCMessagePublisher(object):
io_loop=self.io_loop
)
self.streams.add(stream)
def discard_after_closed():
self.streams.discard(stream)
stream.set_close_callback(discard_after_closed)
except Exception as exc:
log.error('IPC streaming error: %s', exc)

View File

@ -420,6 +420,10 @@ class AESReqServerMixin(object):
log.debug('Host key change detected in open mode.')
with salt.utils.files.fopen(pubfn, 'w+') as fp_:
fp_.write(load['pub'])
elif not load['pub']:
log.error('Public key is empty: {0}'.format(load['id']))
return {'enc': 'clear',
'load': {'ret': False}}
pub = None

View File

@ -1740,15 +1740,15 @@ def dns_check(addr, port, safe=False, ipv6=None):
def get_context(template, line, num_lines=5, marker=None):
# Late import to avoid circular import.
import salt.utils.versions
import salt.utils.templates
import salt.utils.stringutils
salt.utils.versions.warn_until(
'Neon',
'Use of \'salt.utils.get_context\' detected. This function '
'has been moved to \'salt.utils.templates.get_context\' as of '
'has been moved to \'salt.utils.stringutils.get_context\' as of '
'Salt Oxygen. This warning will be removed in Salt Neon.',
stacklevel=3
)
return salt.utils.templates.get_context(template, line, num_lines, marker)
return salt.utils.stringutils.get_context(template, line, num_lines, marker)
def get_master_key(key_user, opts, skip_perm_errors=False):

View File

@ -269,7 +269,13 @@ def shlex_split(s, **kwargs):
Only split if variable is a string
'''
if isinstance(s, six.string_types):
return shlex.split(s, **kwargs)
# On PY2, shlex.split will fail with unicode types if there are
# non-ascii characters in the string. So, we need to make sure we
# invoke it with a str type, and then decode the resulting string back
# to unicode to return it.
return salt.utils.data.decode(
shlex.split(salt.utils.stringutils.to_str(s), **kwargs)
)
else:
return s

View File

@ -114,28 +114,27 @@ def _post_processing(kwargs, skip_translate, invalid):
actual_volumes.sort()
if kwargs.get('port_bindings') is not None \
and (skip_translate is True or
all(x not in skip_translate
for x in ('port_bindings', 'expose', 'ports'))):
and all(x not in skip_translate
for x in ('port_bindings', 'expose', 'ports')):
# Make sure that all ports defined in "port_bindings" are included in
# the "ports" param.
auto_ports = list(kwargs['port_bindings'])
if auto_ports:
actual_ports = []
# Sort list to make unit tests more reliable
for port in auto_ports:
if port in actual_ports:
ports_to_bind = list(kwargs['port_bindings'])
if ports_to_bind:
ports_to_open = set(kwargs.get('ports', []))
ports_to_open.update([helpers.get_port_def(x) for x in ports_to_bind])
kwargs['ports'] = list(ports_to_open)
if 'ports' in kwargs \
and all(x not in skip_translate for x in ('expose', 'ports')):
# TCP ports should only be passed as the port number. Normalize the
# input so a port definition of 80/tcp becomes just 80 instead of
# (80, 'tcp').
for index, _ in enumerate(kwargs['ports']):
try:
if kwargs['ports'][index][1] == 'tcp':
kwargs['ports'][index] = ports_to_open[index][0]
except TypeError:
continue
if isinstance(port, six.integer_types):
actual_ports.append((port, 'tcp'))
else:
port, proto = port.split('/')
actual_ports.append((int(port), proto))
actual_ports.sort()
actual_ports = [
port if proto == 'tcp' else '{}/{}'.format(port, proto) for (port, proto) in actual_ports
]
kwargs.setdefault('ports', actual_ports)
# Functions below must match names of docker-py arguments
@ -552,13 +551,7 @@ def ports(val, **kwargs): # pylint: disable=unused-argument
raise SaltInvocationError(exc.__str__())
new_ports.update([helpers.get_port_def(x, proto)
for x in range(range_start, range_end + 1)])
ordered_new_ports = [
port if proto == 'tcp' else (port, proto) for (port, proto) in sorted(
[(new_port, 'tcp') if isinstance(new_port, six.integer_types) else new_port
for new_port in new_ports]
)
]
return ordered_new_ports
return list(new_ports)
def privileged(val, **kwargs): # pylint: disable=unused-argument

View File

@ -7,7 +7,6 @@ Classes which provide the shared base for GitFS, git_pillar, and winrepo
from __future__ import absolute_import, print_function, unicode_literals
import copy
import contextlib
import distutils
import errno
import fnmatch
import glob
@ -90,9 +89,9 @@ log = logging.getLogger(__name__)
try:
import git
import gitdb
HAS_GITPYTHON = True
GITPYTHON_VERSION = _LooseVersion(git.__version__)
except ImportError:
HAS_GITPYTHON = False
GITPYTHON_VERSION = None
try:
# Squelch warning on cent7 due to them upgrading cffi
@ -100,7 +99,31 @@ try:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
import pygit2
HAS_PYGIT2 = True
PYGIT2_VERSION = _LooseVersion(pygit2.__version__)
LIBGIT2_VERSION = _LooseVersion(pygit2.LIBGIT2_VERSION)
# Work around upstream bug where bytestrings were being decoded using the
# default encoding (which is usually ascii on Python 2). This was fixed
# on 2 Feb 2018, so releases prior to 0.26.2 will need a workaround.
if PYGIT2_VERSION <= _LooseVersion('0.26.2'):
try:
import pygit2.ffi
import pygit2.remote
except ImportError:
# If we couldn't import these, then we're using an old enough
# version where ffi isn't in use and this workaround would be
# useless.
pass
else:
def __maybe_string(ptr):
if not ptr:
return None
return pygit2.ffi.string(ptr).decode('utf-8')
pygit2.remote.maybe_string = __maybe_string
# Older pygit2 releases did not raise a specific exception class, this
# try/except makes Salt's exception catching work on any supported release.
try:
GitError = pygit2.errors.GitError
except AttributeError:
@ -111,16 +134,17 @@ except Exception as exc:
# to rebuild itself against the newer cffi). Therefore, we simply will
# catch a generic exception, and log the exception if it is anything other
# than an ImportError.
HAS_PYGIT2 = False
PYGIT2_VERSION = None
LIBGIT2_VERSION = None
if not isinstance(exc, ImportError):
log.exception('Failed to import pygit2')
# pylint: enable=import-error
# Minimum versions for backend providers
GITPYTHON_MINVER = '0.3'
PYGIT2_MINVER = '0.20.3'
LIBGIT2_MINVER = '0.20.0'
GITPYTHON_MINVER = _LooseVersion('0.3')
PYGIT2_MINVER = _LooseVersion('0.20.3')
LIBGIT2_MINVER = _LooseVersion('0.20.0')
def enforce_types(key, val):
@ -1841,10 +1865,7 @@ class Pygit2(GitProvider):
'''
Assign attributes for pygit2 callbacks
'''
# pygit2 radically changed fetching in 0.23.2
pygit2_version = pygit2.__version__
if distutils.version.LooseVersion(pygit2_version) >= \
distutils.version.LooseVersion('0.23.2'):
if PYGIT2_VERSION >= _LooseVersion('0.23.2'):
self.remotecallbacks = pygit2.RemoteCallbacks(
credentials=self.credentials)
if not self.ssl_verify:
@ -1859,7 +1880,7 @@ class Pygit2(GitProvider):
'pygit2 does not support disabling the SSL certificate '
'check in versions prior to 0.23.2 (installed: {0}). '
'Fetches for self-signed certificates will fail.'.format(
pygit2_version
PYGIT2_VERSION
)
)
@ -2435,10 +2456,10 @@ class GitBase(object):
Check if GitPython is available and at a compatible version (>= 0.3.0)
'''
def _recommend():
if HAS_PYGIT2 and 'pygit2' in self.git_providers:
if PYGIT2_VERSION and 'pygit2' in self.git_providers:
log.error(_RECOMMEND_PYGIT2, self.role, self.role)
if not HAS_GITPYTHON:
if not GITPYTHON_VERSION:
if not quiet:
log.error(
'%s is configured but could not be loaded, is GitPython '
@ -2449,18 +2470,14 @@ class GitBase(object):
elif 'gitpython' not in self.git_providers:
return False
# pylint: disable=no-member
gitver = _LooseVersion(git.__version__)
minver = _LooseVersion(GITPYTHON_MINVER)
# pylint: enable=no-member
errors = []
if gitver < minver:
if GITPYTHON_VERSION < GITPYTHON_MINVER:
errors.append(
'{0} is configured, but the GitPython version is earlier than '
'{1}. Version {2} detected.'.format(
self.role,
GITPYTHON_MINVER,
git.__version__
GITPYTHON_VERSION
)
)
if not salt.utils.path.which('git'):
@ -2486,10 +2503,10 @@ class GitBase(object):
Pygit2 must be at least 0.20.3 and libgit2 must be at least 0.20.0.
'''
def _recommend():
if HAS_GITPYTHON and 'gitpython' in self.git_providers:
if GITPYTHON_VERSION and 'gitpython' in self.git_providers:
log.error(_RECOMMEND_GITPYTHON, self.role, self.role)
if not HAS_PYGIT2:
if not PYGIT2_VERSION:
if not quiet:
log.error(
'%s is configured but could not be loaded, are pygit2 '
@ -2500,31 +2517,23 @@ class GitBase(object):
elif 'pygit2' not in self.git_providers:
return False
# pylint: disable=no-member
pygit2ver = _LooseVersion(pygit2.__version__)
pygit2_minver = _LooseVersion(PYGIT2_MINVER)
libgit2ver = _LooseVersion(pygit2.LIBGIT2_VERSION)
libgit2_minver = _LooseVersion(LIBGIT2_MINVER)
# pylint: enable=no-member
errors = []
if pygit2ver < pygit2_minver:
if PYGIT2_VERSION < PYGIT2_MINVER:
errors.append(
'{0} is configured, but the pygit2 version is earlier than '
'{1}. Version {2} detected.'.format(
self.role,
PYGIT2_MINVER,
pygit2.__version__
PYGIT2_VERSION
)
)
if libgit2ver < libgit2_minver:
if LIBGIT2_VERSION < LIBGIT2_MINVER:
errors.append(
'{0} is configured, but the libgit2 version is earlier than '
'{1}. Version {2} detected.'.format(
self.role,
LIBGIT2_MINVER,
pygit2.LIBGIT2_VERSION
LIBGIT2_VERSION
)
)
if not salt.utils.path.which('git'):
@ -2749,15 +2758,20 @@ class GitFS(GitBase):
return fnd
salt.fileserver.wait_lock(lk_fn, dest)
if os.path.isfile(blobshadest) and os.path.isfile(dest):
try:
with salt.utils.files.fopen(blobshadest, 'r') as fp_:
sha = salt.utils.stringutils.to_unicode(fp_.read())
if sha == blob_hexsha:
fnd['rel'] = path
fnd['path'] = dest
return _add_file_stat(fnd, blob_mode)
except IOError as exc:
if exc.errno != errno.ENOENT:
raise exc
with salt.utils.files.fopen(lk_fn, 'w'):
pass
for filename in glob.glob(hashes_glob):
try:
os.remove(filename)
@ -2829,17 +2843,24 @@ class GitFS(GitBase):
load['saltenv'],
'{0}.hash.{1}'.format(relpath,
self.opts['hash_type']))
if not os.path.isfile(hashdest):
if not os.path.exists(os.path.dirname(hashdest)):
try:
with salt.utils.files.fopen(hashdest, 'rb') as fp_:
ret['hsum'] = fp_.read()
return ret
except IOError as exc:
if exc.errno != errno.ENOENT:
raise exc
try:
os.makedirs(os.path.dirname(hashdest))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise exc
ret['hsum'] = salt.utils.hashutils.get_hash(path, self.opts['hash_type'])
with salt.utils.files.fopen(hashdest, 'w+') as fp_:
fp_.write(ret['hsum'])
return ret
else:
with salt.utils.files.fopen(hashdest, 'rb') as fp_:
ret['hsum'] = fp_.read()
return ret
def _file_lists(self, load, form):
'''

View File

@ -491,6 +491,7 @@ def query(url,
req_kwargs['ca_certs'] = ca_bundle
max_body = opts.get('http_max_body', salt.config.DEFAULT_MINION_OPTS['http_max_body'])
connect_timeout = opts.get('http_connect_timeout', salt.config.DEFAULT_MINION_OPTS['http_connect_timeout'])
timeout = opts.get('http_request_timeout', salt.config.DEFAULT_MINION_OPTS['http_request_timeout'])
client_argspec = None
@ -532,6 +533,7 @@ def query(url,
allow_nonstandard_methods=True,
streaming_callback=streaming_callback,
header_callback=header_callback,
connect_timeout=connect_timeout,
request_timeout=timeout,
proxy_host=proxy_host,
proxy_port=proxy_port,
@ -567,7 +569,7 @@ def query(url,
not isinstance(result_text, six.text_type):
result_text = result_text.decode(res_params['charset'])
ret['body'] = result_text
if 'Set-Cookie' in result_headers.keys() and cookies is not None:
if 'Set-Cookie' in result_headers and cookies is not None:
result_cookies = parse_cookie_header(result_headers['Set-Cookie'])
for item in result_cookies:
sess_cookies.set_cookie(item)
@ -897,12 +899,10 @@ def parse_cookie_header(header):
for cookie in cookies:
name = None
value = None
for item in cookie:
for item in list(cookie):
if item in attribs:
continue
name = item
value = cookie[item]
del cookie[name]
value = cookie.pop(item)
# cookielib.Cookie() requires an epoch
if 'expires' in cookie:
@ -910,7 +910,7 @@ def parse_cookie_header(header):
# Fill in missing required fields
for req in reqd:
if req not in cookie.keys():
if req not in cookie:
cookie[req] = ''
if cookie['version'] == '':
cookie['version'] = 0

View File

@ -850,6 +850,24 @@ class SerializerExtension(Extension, object):
value = six.text_type(value)
try:
return salt.utils.data.decode(salt.utils.yaml.safe_load(value))
except salt.utils.yaml.YAMLError as exc:
msg = 'Encountered error loading yaml: '
try:
# Reported line is off by one, add 1 to correct it
line = exc.problem_mark.line + 1
buf = exc.problem_mark.buffer
problem = exc.problem
except AttributeError:
# No context information available in the exception, fall back
# to the stringified version of the exception.
msg += six.text_type(exc)
else:
msg += '{0}\n'.format(problem)
msg += salt.utils.stringutils.get_context(
buf,
line,
marker=' <======================')
raise TemplateRuntimeError(msg)
except AttributeError:
raise TemplateRuntimeError(
'Unable to load yaml from {0}'.format(value))

View File

@ -47,6 +47,8 @@ import salt.exceptions
from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
logger = logging.getLogger(__name__)
def _sorted(mixins_or_funcs):
return sorted(
@ -64,8 +66,8 @@ class MixInMeta(type):
instance = super(MixInMeta, mcs).__new__(mcs, name, bases, attrs)
if not hasattr(instance, '_mixin_setup'):
raise RuntimeError(
'Don\'t subclass {0} in {1} if you\'re not going to use it '
'as a salt parser mix-in.'.format(mcs.__name__, name)
"Don't subclass {0} in {1} if you're not going "
"to use it as a salt parser mix-in.".format(mcs.__name__, name)
)
return instance
@ -206,7 +208,7 @@ class OptionParser(optparse.OptionParser, object):
try:
process_option_func()
except Exception as err: # pylint: disable=broad-except
logging.getLogger(__name__).exception(err)
logger.exception(err)
self.error(
'Error while processing {0}: {1}'.format(
process_option_func, traceback.format_exc(err)
@ -218,7 +220,7 @@ class OptionParser(optparse.OptionParser, object):
try:
mixin_after_parsed_func(self)
except Exception as err: # pylint: disable=broad-except
logging.getLogger(__name__).exception(err)
logger.exception(err)
self.error(
'Error while processing {0}: {1}'.format(
mixin_after_parsed_func, traceback.format_exc(err)
@ -226,7 +228,7 @@ class OptionParser(optparse.OptionParser, object):
)
if self.config.get('conf_file', None) is not None: # pylint: disable=no-member
logging.getLogger(__name__).debug(
logger.debug(
'Configuration file path: %s',
self.config['conf_file'] # pylint: disable=no-member
)
@ -259,12 +261,10 @@ class OptionParser(optparse.OptionParser, object):
try:
mixin_before_exit_func(self)
except Exception as err: # pylint: disable=broad-except
logger = logging.getLogger(__name__)
logger.exception(err)
logger.error(
'Error while processing %s: %s',
mixin_before_exit_func, traceback.format_exc(err)
)
logger.error('Error while processing %s: %s',
six.text_type(mixin_before_exit_func),
traceback.format_exc(err))
if self._setup_mp_logging_listener_ is True:
# Stop logging through the queue
log.shutdown_multiprocessing_logging()
@ -399,17 +399,13 @@ class SaltfileMixIn(six.with_metaclass(MixInMeta, object)):
return
if not os.path.isfile(self.options.saltfile):
self.error(
'\'{0}\' file does not exist.\n'.format(self.options.saltfile)
)
self.error("'{0}' file does not exist.\n".format(self.options.saltfile))
# Make sure we have an absolute path
self.options.saltfile = os.path.abspath(self.options.saltfile)
# Make sure we let the user know that we will be loading a Saltfile
logging.getLogger(__name__).info(
'Loading Saltfile from \'%s\'', self.options.saltfile
)
logger.info("Loading Saltfile from '%s'", six.text_type(self.options.saltfile))
try:
saltfile_config = config._read_conf_file(saltfile)
@ -488,8 +484,7 @@ class HardCrashMixin(six.with_metaclass(MixInMeta, object)):
hard_crash = os.environ.get('SALT_HARD_CRASH', False)
self.add_option(
'--hard-crash', action='store_true', default=hard_crash,
help=('Raise any original exception rather than exiting gracefully. '
'Default: %default.')
help='Raise any original exception rather than exiting gracefully. Default: %default.'
)
@ -500,9 +495,8 @@ class NoParseMixin(six.with_metaclass(MixInMeta, object)):
no_parse = os.environ.get('SALT_NO_PARSE', '')
self.add_option(
'--no-parse', default=no_parse,
help=('Comma-separated list of named CLI arguments (i.e. '
'argname=value) which should not be parsed as Python '
'data types'),
help='Comma-separated list of named CLI arguments (i.e. argname=value) '
'which should not be parsed as Python data types',
metavar='argname1,argname2,...',
)
@ -527,11 +521,10 @@ class ConfigDirMixIn(six.with_metaclass(MixInMeta, object)):
config_dir = os.environ.get(self._default_config_dir_env_var_, None)
if not config_dir:
config_dir = self._default_config_dir_
logging.getLogger(__name__).debug('SYSPATHS setup as: %s', syspaths.CONFIG_DIR)
logger.debug('SYSPATHS setup as: %s', six.text_type(syspaths.CONFIG_DIR))
self.add_option(
'-c', '--config-dir', default=config_dir,
help=('Pass in an alternative configuration directory. Default: '
'\'%default\'.')
help="Pass in an alternative configuration directory. Default: '%default'."
)
def process_config_dir(self):
@ -539,7 +532,7 @@ class ConfigDirMixIn(six.with_metaclass(MixInMeta, object)):
if not os.path.isdir(self.options.config_dir):
# No logging is configured yet
sys.stderr.write(
'WARNING: CONFIG \'{0}\' directory does not exist.\n'.format(
"WARNING: CONFIG '{0}' directory does not exist.\n".format(
self.options.config_dir
)
)
@ -717,7 +710,7 @@ class LogLevelMixIn(six.with_metaclass(MixInMeta, object)):
# Remove it from config so it inherits from log_file
self.config.pop(self._logfile_config_setting_name_)
if self.config['verify_env']:
if self.config['verify_env'] and self.config['log_level'] not in ('quiet', ):
# Verify the logfile if it was explicitly set but do not try to
# verify the default
if logfile is not None and not logfile.startswith(('tcp://', 'udp://', 'file://')):
@ -794,12 +787,11 @@ class LogLevelMixIn(six.with_metaclass(MixInMeta, object)):
logfile_basename = os.path.basename(
self._default_logging_logfile_
)
logging.getLogger(__name__).debug(
'The user \'%s\' is not allowed to write to \'%s\'. '
'The log file will be stored in '
'\'~/.salt/\'%s\'.log\'',
current_user, logfile, logfile_basename
)
logger.debug("The user '%s' is not allowed to write to '%s'. "
"The log file will be stored in '~/.salt/'%s'.log'",
six.text_type(current_user),
six.text_type(logfile),
six.text_type(logfile_basename))
logfile = os.path.join(
user_salt_dir, '{0}.log'.format(logfile_basename)
)
@ -814,10 +806,10 @@ class LogLevelMixIn(six.with_metaclass(MixInMeta, object)):
# Not supported on platforms other than Windows.
# Other platforms may use an external tool such as 'logrotate'
if log_rotate_max_bytes != 0:
logging.getLogger(__name__).warning('\'log_rotate_max_bytes\' is only supported on Windows')
logger.warning("'log_rotate_max_bytes' is only supported on Windows")
log_rotate_max_bytes = 0
if log_rotate_backup_count != 0:
logging.getLogger(__name__).warning('\'log_rotate_backup_count\' is only supported on Windows')
logger.warning("'log_rotate_backup_count' is only supported on Windows")
log_rotate_backup_count = 0
# Save the settings back to the configuration
@ -962,22 +954,23 @@ class DaemonMixIn(six.with_metaclass(MixInMeta, object)):
default=os.path.join(
syspaths.PIDFILE_DIR, '{0}.pid'.format(self.get_prog_name())
),
help=('Specify the location of the pidfile. Default: \'%default\'.')
help="Specify the location of the pidfile. Default: '%default'."
)
def _mixin_before_exit(self):
if hasattr(self, 'config') and self.config.get('pidfile', ''):
if hasattr(self, 'config') and self.config.get('pidfile'):
# We've loaded and merged options into the configuration, it's safe
# to query about the pidfile
if self.check_pidfile():
try:
os.unlink(self.config['pidfile'])
except OSError as err:
logging.getLogger(__name__).info(
'PIDfile could not be deleted: {0}'.format(
self.config['pidfile']
)
)
# Log error only when running salt-master as a root user.
# Otherwise this can be ignored, since salt-master is able to
# overwrite the PIDfile on the next start.
if not os.getuid():
logger.info('PIDfile could not be deleted: %s', six.text_type(self.config['pidfile']))
logger.debug(six.text_type(err))
def set_pidfile(self):
from salt.utils.process import set_pidfile
@ -2739,31 +2732,31 @@ class SaltCallOptionParser(six.with_metaclass(OptionParserMeta,
role = opts.get('id')
if not role:
emsg = ("Missing role required to setup RAET SaltCaller.")
logging.getLogger(__name__).error(emsg + "\n")
emsg = "Missing role required to setup RAET SaltCaller."
logger.error(emsg)
raise ValueError(emsg)
kind = opts.get('__role') # application kind 'master', 'minion', etc
if kind not in kinds.APPL_KINDS:
emsg = ("Invalid application kind = '{0}' for RAET SaltCaller.".format(kind))
logging.getLogger(__name__).error(emsg + "\n")
emsg = "Invalid application kind = '{0}' for RAET SaltCaller.".format(six.text_type(kind))
logger.error(emsg)
raise ValueError(emsg)
if kind in [kinds.APPL_KIND_NAMES[kinds.applKinds.minion],
kinds.APPL_KIND_NAMES[kinds.applKinds.caller], ]:
if kind in [kinds.APPL_KIND_NAMES[kinds.applKinds.minion], kinds.APPL_KIND_NAMES[kinds.applKinds.caller]]:
lanename = "{0}_{1}".format(role, kind)
else:
emsg = ("Unsupported application kind '{0}' for RAET SaltCaller.".format(kind))
logging.getLogger(__name__).error(emsg + '\n')
emsg = "Unsupported application kind '{0}' for RAET SaltCaller.".format(six.text_type(kind))
logger.error(emsg)
raise ValueError(emsg)
if kind == kinds.APPL_KIND_NAMES[kinds.applKinds.minion]: # minion check
try:
from raet.lane.yarding import Yard # pylint: disable=3rd-party-module-not-gated
ha, dirpath = Yard.computeHa(dirpath, lanename, yardname) # pylint: disable=invalid-name
if (os.path.exists(ha) and
not os.path.isfile(ha) and
not os.path.isdir(ha)): # minion manor yard
if os.path.exists(ha) and not os.path.isfile(ha) and not os.path.isdir(ha): # minion manor yard
return True
except ImportError as ex:
logger.error("Error while importing Yard: %s", ex)
return False
def process_module_dirs(self):
@ -2822,13 +2815,13 @@ class SaltRunOptionParser(six.with_metaclass(OptionParserMeta,
'--async',
default=False,
action='store_true',
help=('Start the runner operation and immediately return control.')
help='Start the runner operation and immediately return control.'
)
self.add_option(
'--skip-grains',
default=False,
action='store_true',
help=('Do not load grains.')
help='Do not load grains.'
)
group = self.output_options_group = optparse.OptionGroup(
self, 'Output Options', 'Configure your preferred output format.'
@ -2946,14 +2939,13 @@ class SaltSSHOptionParser(six.with_metaclass(OptionParserMeta,
'-v', '--verbose',
default=False,
action='store_true',
help=('Turn on command verbosity, display jid.')
help='Turn on command verbosity, display jid.'
)
self.add_option(
'-s', '--static',
default=False,
action='store_true',
help=('Return the data from minions as a group after they '
'all return.')
help='Return the data from minions as a group after they all return.'
)
self.add_option(
'-w', '--wipe',

View File

@ -28,7 +28,8 @@ def _load_libcrypto():
Load OpenSSL libcrypto
'''
if sys.platform.startswith('win'):
return cdll.LoadLibrary('libeay32')
# cdll.LoadLibrary on windows requires an 'str' argument
return cdll.LoadLibrary(str('libeay32')) # future lint: disable=blacklisted-function
elif getattr(sys, 'frozen', False) and salt.utils.platform.is_smartos():
return cdll.LoadLibrary(glob.glob(os.path.join(
os.path.dirname(sys.executable),

View File

@ -5,6 +5,7 @@ Functions for manipulating or otherwise processing strings
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import base64
import errno
import fnmatch
import logging
@ -203,7 +204,7 @@ def is_binary(data):
@jinja_filter('random_str')
def random(size=32):
key = os.urandom(size)
return key.encode('base64').replace('\n', '')[:size]
return to_unicode(base64.b64encode(key).replace(b'\n', b'')[:size])
@jinja_filter('contains_whitespace')
@ -429,3 +430,38 @@ def print_cli(msg, retries=10, step=0.01):
else:
raise
break
def get_context(template, line, num_lines=5, marker=None):
'''
Returns debugging context around a line in a given string
Returns:: string
'''
template_lines = template.splitlines()
num_template_lines = len(template_lines)
# In test mode, a single line template would return a crazy line number like,
# 357. Do this sanity check and if the given line is obviously wrong, just
# return the entire template
if line > num_template_lines:
return template
context_start = max(0, line - num_lines - 1) # subt 1 for 0-based indexing
context_end = min(num_template_lines, line + num_lines)
error_line_in_context = line - context_start - 1 # subtr 1 for 0-based idx
buf = []
if context_start > 0:
buf.append('[...]')
error_line_in_context += 1
buf.extend(template_lines[context_start:context_end])
if context_end < num_template_lines:
buf.append('[...]')
if marker:
buf[error_line_in_context] += marker
return '---\n{0}\n---'.format('\n'.join(buf))

View File

@ -95,41 +95,6 @@ class AliasedModule(object):
return getattr(self.wrapped, name)
def get_context(template, line, num_lines=5, marker=None):
'''
Returns debugging context around a line in a given string
Returns:: string
'''
template_lines = template.splitlines()
num_template_lines = len(template_lines)
# in test, a single line template would return a crazy line number like,
# 357. do this sanity check and if the given line is obviously wrong, just
# return the entire template
if line > num_template_lines:
return template
context_start = max(0, line - num_lines - 1) # subt 1 for 0-based indexing
context_end = min(num_template_lines, line + num_lines)
error_line_in_context = line - context_start - 1 # subtr 1 for 0-based idx
buf = []
if context_start > 0:
buf.append('[...]')
error_line_in_context += 1
buf.extend(template_lines[context_start:context_end])
if context_end < num_template_lines:
buf.append('[...]')
if marker:
buf[error_line_in_context] += marker
return '---\n{0}\n---'.format('\n'.join(buf))
def wrap_tmpl_func(render_str):
def render_tmpl(tmplsrc,
@ -202,11 +167,9 @@ def wrap_tmpl_func(render_str):
tmplsrc.close()
try:
output = render_str(tmplstr, context, tmplpath)
if six.PY2:
output = output.encode(SLS_ENCODING)
if salt.utils.platform.is_windows():
newline = False
if salt.utils.stringutils.to_unicode(output).endswith(('\n', os.linesep)):
if salt.utils.stringutils.to_unicode(output, encoding=SLS_ENCODING).endswith(('\n', os.linesep)):
newline = True
# Write out with Windows newlines
output = os.linesep.join(output.splitlines())
@ -223,9 +186,7 @@ def wrap_tmpl_func(render_str):
if to_str: # then render as string
return dict(result=True, data=output)
with tempfile.NamedTemporaryFile('wb', delete=False, prefix=salt.utils.files.TEMPFILE_PREFIX) as outf:
if six.PY3:
output = output.encode(SLS_ENCODING)
outf.write(output)
outf.write(salt.utils.stringutils.to_bytes(output, encoding=SLS_ENCODING))
# Note: If nothing is replaced or added by the rendering
# function, then the contents of the output file will
# be exactly the same as the input.
@ -315,7 +276,7 @@ def _get_jinja_error(trace, context=None):
out = '\n{0}\n'.format(msg.splitlines()[0])
with salt.utils.files.fopen(template_path) as fp_:
template_contents = salt.utils.stringutils.to_unicode(fp_.read())
out += get_context(
out += salt.utils.stringutils.get_context(
template_contents,
line,
marker=' <======================')
@ -417,15 +378,6 @@ def render_jinja_tmpl(tmplstr, context, tmplpath=None):
template = jinja_env.from_string(tmplstr)
template.globals.update(decoded_context)
output = template.render(**decoded_context)
except jinja2.exceptions.TemplateSyntaxError as exc:
trace = traceback.extract_tb(sys.exc_info()[2])
line, out = _get_jinja_error(trace, context=decoded_context)
if not line:
tmplstr = ''
raise SaltRenderError(
'Jinja syntax error: {0}{1}'.format(exc, out),
line,
tmplstr)
except jinja2.exceptions.UndefinedError as exc:
trace = traceback.extract_tb(sys.exc_info()[2])
out = _get_jinja_error(trace, context=decoded_context)[1]
@ -436,6 +388,16 @@ def render_jinja_tmpl(tmplstr, context, tmplpath=None):
'Jinja variable {0}{1}'.format(
exc, out),
buf=tmplstr)
except (jinja2.exceptions.TemplateRuntimeError,
jinja2.exceptions.TemplateSyntaxError) as exc:
trace = traceback.extract_tb(sys.exc_info()[2])
line, out = _get_jinja_error(trace, context=decoded_context)
if not line:
tmplstr = ''
raise SaltRenderError(
'Jinja syntax error: {0}{1}'.format(exc, out),
line,
tmplstr)
except (SaltInvocationError, CommandExecutionError) as exc:
trace = traceback.extract_tb(sys.exc_info()[2])
line, out = _get_jinja_error(trace, context=decoded_context)

View File

@ -98,7 +98,7 @@ def _get_vault_connection():
Get the connection details for calling Vault, from local configuration if
it exists, or from the master otherwise
'''
if 'vault' in __opts__ and __opts__.get('__role', 'minion') == 'master':
def _use_local_config():
log.debug('Using Vault connection details from local config')
try:
if __opts__['vault']['auth']['method'] == 'approle':
@ -123,6 +123,11 @@ def _get_vault_connection():
except KeyError as err:
errmsg = 'Minion has "vault" config section, but could not find key "{0}" within'.format(err.message)
raise salt.exceptions.CommandExecutionError(errmsg)
if 'vault' in __opts__ and __opts__.get('__role', 'minion') == 'master':
return _use_local_config()
elif any((__opts__['local'], __opts__['file_client'] == 'local', __opts__['master_type'] == 'disable')):
return _use_local_config()
else:
log.debug('Contacting master for Vault connection details')
return _get_token_and_url_from_master()

View File

@ -31,7 +31,6 @@ import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.user
import salt.utils.versions
log = logging.getLogger(__name__)
@ -204,28 +203,16 @@ def verify_env(
permissive=False,
pki_dir='',
skip_extra=False,
root_dir=ROOT_DIR,
sensitive_dirs=None):
root_dir=ROOT_DIR):
'''
Verify that the named directories are in place and that the environment
can shake the salt
'''
if pki_dir:
salt.utils.versions.warn_until(
'Neon',
'Use of \'pki_dir\' was detected: \'pki_dir\' has been deprecated '
'in favor of \'sensitive_dirs\'. Support for \'pki_dir\' will be '
'removed in Salt Neon.'
)
sensitive_dirs = sensitive_dirs or []
sensitive_dirs.append(list(pki_dir))
if salt.utils.platform.is_windows():
return win_verify_env(root_dir,
dirs,
permissive=permissive,
skip_extra=skip_extra,
sensitive_dirs=sensitive_dirs)
skip_extra=skip_extra)
import pwd # after confirming not running Windows
try:
pwnam = pwd.getpwnam(user)
@ -300,11 +287,10 @@ def verify_env(
# to read in what it needs to integrate.
#
# If the permissions aren't correct, default to the more secure 700.
# If acls are enabled, the sensitive_dirs (i.e. pki_dir, key_dir) needs to
# remain readable, this is still secure because the private keys are still
# only readable by the user running the master
sensitive_dirs = sensitive_dirs or []
if dir_ in sensitive_dirs:
# If acls are enabled, the pki_dir needs to remain readable, this
# is still secure because the private keys are still only readable
# by the user running the master
if dir_ == pki_dir:
smode = stat.S_IMODE(mode.st_mode)
if smode != 448 and smode != 488:
if os.access(dir_, os.W_OK):
@ -555,22 +541,11 @@ def win_verify_env(
dirs,
permissive=False,
pki_dir='',
skip_extra=False,
sensitive_dirs=None):
skip_extra=False):
'''
Verify that the named directories are in place and that the environment
can shake the salt
'''
if pki_dir:
salt.utils.versions.warn_until(
'Neon',
'Use of \'pki_dir\' was detected: \'pki_dir\' has been deprecated '
'in favor of \'sensitive_dirs\'. Support for \'pki_dir\' will be '
'removed in Salt Neon.'
)
sensitive_dirs = sensitive_dirs or []
sensitive_dirs.append(list(pki_dir))
import salt.utils.win_functions
import salt.utils.win_dacl
import salt.utils.path
@ -647,9 +622,8 @@ def win_verify_env(
sys.stderr.write(msg.format(dir_, err))
sys.exit(err.errno)
# The senitive_dirs (i.e. pki_dir, key_dir) gets its own permissions
sensitive_dirs = sensitive_dirs or []
if dir_ in sensitive_dirs:
# The PKI dir gets its own permissions
if dir_ == pki_dir:
try:
# Make Administrators group the owner
salt.utils.win_dacl.set_owner(path, 'S-1-5-32-544')

View File

@ -5,6 +5,8 @@ missing functions in other modules
'''
from __future__ import absolute_import, print_function, unicode_literals
import platform
import re
import ctypes
# Import Salt Libs
from salt.exceptions import CommandExecutionError
@ -16,6 +18,7 @@ try:
import win32api
import win32net
import win32security
from win32con import HWND_BROADCAST, WM_SETTINGCHANGE, SMTO_ABORTIFHUNG
HAS_WIN32 = True
except ImportError:
HAS_WIN32 = False
@ -168,3 +171,122 @@ def enable_ctrl_logoff_handler():
lambda event: True if event == ctrl_logoff_event else False,
1
)
def escape_argument(arg):
'''
Escape the argument for the cmd.exe shell.
See http://blogs.msdn.com/b/twistylittlepassagesallalike/archive/2011/04/23/everyone-quotes-arguments-the-wrong-way.aspx
First we escape the quote chars to produce a argument suitable for
CommandLineToArgvW. We don't need to do this for simple arguments.
Args:
arg (str): a single command line argument to escape for the cmd.exe shell
Returns:
str: an escaped string suitable to be passed as a program argument to the cmd.exe shell
'''
if not arg or re.search(r'(["\s])', arg):
arg = '"' + arg.replace('"', r'\"') + '"'
return escape_for_cmd_exe(arg)
def escape_for_cmd_exe(arg):
'''
Escape an argument string to be suitable to be passed to
cmd.exe on Windows
This method takes an argument that is expected to already be properly
escaped for the receiving program to be properly parsed. This argument
will be further escaped to pass the interpolation performed by cmd.exe
unchanged.
Any meta-characters will be escaped, removing the ability to e.g. use
redirects or variables.
Args:
arg (str): a single command line argument to escape for cmd.exe
Returns:
str: an escaped string suitable to be passed as a program argument to cmd.exe
'''
meta_chars = '()%!^"<>&|'
meta_re = re.compile('(' + '|'.join(re.escape(char) for char in list(meta_chars)) + ')')
meta_map = {char: "^{0}".format(char) for char in meta_chars}
def escape_meta_chars(m):
char = m.group(1)
return meta_map[char]
return meta_re.sub(escape_meta_chars, arg)
def broadcast_setting_change(message='Environment'):
'''
Send a WM_SETTINGCHANGE Broadcast to all Windows
Args:
message (str):
A string value representing the portion of the system that has been
updated and needs to be refreshed. Default is ``Environment``. These
are some common values:
- "Environment" : to effect a change in the environment variables
- "intl" : to effect a change in locale settings
- "Policy" : to effect a change in Group Policy Settings
- a leaf node in the registry
- the name of a section in the ``Win.ini`` file
See lParam within msdn docs for
`WM_SETTINGCHANGE <https://msdn.microsoft.com/en-us/library/ms725497%28VS.85%29.aspx>`_
for more information on Broadcasting Messages.
See GWL_WNDPROC within msdn docs for
`SetWindowLong <https://msdn.microsoft.com/en-us/library/windows/desktop/ms633591(v=vs.85).aspx>`_
for information on how to retrieve those messages.
.. note::
This will only affect new processes that aren't launched by services. To
apply changes to the path or registry to services, the host must be
restarted. The ``salt-minion``, if running as a service, will not see
changes to the environment until the system is restarted. Services
inherit their environment from ``services.exe`` which does not respond
to messaging events. See
`MSDN Documentation <https://support.microsoft.com/en-us/help/821761/changes-that-you-make-to-environment-variables-do-not-affect-services>`_
for more information.
CLI Example:
... code-block:: python
import salt.utils.win_functions
salt.utils.win_functions.broadcast_setting_change('Environment')
'''
# Listen for messages sent by this would involve working with the
# SetWindowLong function. This can be accessed via win32gui or through
# ctypes. You can find examples on how to do this by searching for
# `Accessing WGL_WNDPROC` on the internet. Here are some examples of how
# this might work:
#
# # using win32gui
# import win32con
# import win32gui
# old_function = win32gui.SetWindowLong(window_handle, win32con.GWL_WNDPROC, new_function)
#
# # using ctypes
# import ctypes
# import win32con
# from ctypes import c_long, c_int
# user32 = ctypes.WinDLL('user32', use_last_error=True)
# WndProcType = ctypes.WINFUNCTYPE(c_int, c_long, c_int, c_int)
# new_function = WndProcType
# old_function = user32.SetWindowLongW(window_handle, win32con.GWL_WNDPROC, new_function)
broadcast_message = ctypes.create_unicode_buffer(message)
user32 = ctypes.WinDLL('user32', use_last_error=True)
result = user32.SendMessageTimeoutW(HWND_BROADCAST, WM_SETTINGCHANGE, 0,
broadcast_message, SMTO_ABORTIFHUNG,
5000, 0)
return result == 1

View File

@ -14,7 +14,8 @@ except (ImportError, ValueError):
HAS_WIN32 = False
if HAS_WIN32:
kernel32 = ctypes.WinDLL('kernel32', use_last_error=True)
kernel32 = ctypes.WinDLL(str('kernel32'), # future lint: disable=blacklisted-function
use_last_error=True)
# Although utils are often directly imported, it is also possible to use the

View File

@ -46,8 +46,8 @@ def __virtual__():
if HAS_WIN32:
# ctypes definitions
kernel32 = ctypes.WinDLL('kernel32')
advapi32 = ctypes.WinDLL('advapi32')
kernel32 = ctypes.WinDLL(str('kernel32')) # future lint: disable=blacklisted-function
advapi32 = ctypes.WinDLL(str('advapi32')) # future lint: disable=blacklisted-function
INVALID_HANDLE_VALUE = wintypes.HANDLE(-1).value
INVALID_DWORD_VALUE = wintypes.DWORD(-1).value # ~WinAPI

View File

@ -5,10 +5,9 @@ Custom YAML loading in Salt
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import re
import warnings
# Import third party libs
import re
import yaml # pylint: disable=blacklisted-import
from yaml.nodes import MappingNode, SequenceNode
from yaml.constructor import ConstructorError
@ -18,6 +17,8 @@ try:
except Exception:
pass
import salt.utils.stringutils
__all__ = ['SaltYamlSafeLoader', 'load', 'safe_load']
@ -30,7 +31,7 @@ warnings.simplefilter('always', category=DuplicateKeyWarning)
# with code integrated from https://gist.github.com/844388
class SaltYamlSafeLoader(yaml.SafeLoader, object):
class SaltYamlSafeLoader(yaml.SafeLoader):
'''
Create a custom YAML loader that uses the custom constructor. This allows
for the YAML loading defaults to be manipulated based on needs within salt
@ -46,6 +47,9 @@ class SaltYamlSafeLoader(yaml.SafeLoader, object):
self.add_constructor(
'tag:yaml.org,2002:omap',
type(self).construct_yaml_map)
self.add_constructor(
'tag:yaml.org,2002:str',
type(self).construct_yaml_str)
self.add_constructor(
'tag:yaml.org,2002:python/unicode',
type(self).construct_unicode)
@ -76,18 +80,25 @@ class SaltYamlSafeLoader(yaml.SafeLoader, object):
self.flatten_mapping(node)
context = 'while constructing a mapping'
mapping = self.dictclass()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError:
err = ('While constructing a mapping {0} found unacceptable '
'key {1}').format(node.start_mark, key_node.start_mark)
raise ConstructorError(err)
raise ConstructorError(
context,
node.start_mark,
"found unacceptable key {0}".format(key_node.value),
key_node.start_mark)
value = self.construct_object(value_node, deep=deep)
if key in mapping:
raise ConstructorError('Conflicting ID \'{0}\''.format(key))
raise ConstructorError(
context,
node.start_mark,
"found conflicting ID '{0}'".format(key),
key_node.start_mark)
mapping[key] = value
return mapping
@ -112,6 +123,10 @@ class SaltYamlSafeLoader(yaml.SafeLoader, object):
node.value = eval(node.value, {}, {}) # pylint: disable=W0123
return super(SaltYamlSafeLoader, self).construct_scalar(node)
def construct_yaml_str(self, node):
value = self.construct_scalar(node)
return salt.utils.stringutils.to_unicode(value)
def flatten_mapping(self, node):
merge = []
index = 0

714
salt/utils/zfs.py Normal file
View File

@ -0,0 +1,714 @@
# -*- coding: utf-8 -*-
'''
Utility functions for zfs
These functions are for dealing with type conversion and basic execution
:maintainer: Jorge Schrauwen <sjorge@blackdot.be>
:maturity: new
:depends: salt.utils.stringutils, salt.ext, salt.module.cmdmod
:platform: illumos,freebsd,linux
.. versionadded:: Fluorine
'''
# Import python libs
from __future__ import absolute_import, unicode_literals, print_function
import os
import re
import math
import logging
from numbers import Number
# Import salt libs
from salt.utils.decorators import memoize as real_memoize
from salt.utils.odict import OrderedDict
from salt.utils.stringutils import to_num as str_to_num
import salt.modules.cmdmod
# Import 3rd-party libs
from salt.ext.six.moves import zip
# Size conversion data
re_zfs_size = re.compile(r'^(\d+|\d+(?=\d*)\.\d+)([KkMmGgTtPpEe][Bb]?)$')
zfs_size = ['K', 'M', 'G', 'T', 'P', 'E']
log = logging.getLogger(__name__)
def _check_retcode(cmd):
'''
Simple internal wrapper for cmdmod.retcode
'''
return salt.modules.cmdmod.retcode(cmd, output_loglevel='quiet', ignore_retcode=True) == 0
def _exec(**kwargs):
'''
Simple internal wrapper for cmdmod.run
'''
if 'ignore_retcode' not in kwargs:
kwargs['ignore_retcode'] = True
if 'output_loglevel' not in kwargs:
kwargs['output_loglevel'] = 'quiet'
return salt.modules.cmdmod.run_all(**kwargs)
def _merge_last(values, merge_after, merge_with=' '):
'''
Merge values all values after X into the last value
'''
if len(values) > merge_after:
values = values[0:(merge_after-1)] + [merge_with.join(values[(merge_after-1):])]
return values
def _property_normalize_name(name):
'''
Normalizes property names
'''
if '@' in name:
name = name[:name.index('@')+1]
return name
def _property_detect_type(name, values):
'''
Detect the datatype of a property
'''
value_type = 'str'
if values.startswith('on | off'):
value_type = 'bool'
elif values.startswith('yes | no'):
value_type = 'bool_alt'
elif values in ['<size>', '<size> | none']:
value_type = 'size'
elif values in ['<count>', '<count> | none', '<guid>']:
value_type = 'numeric'
elif name in ['sharenfs', 'sharesmb', 'canmount']:
value_type = 'bool'
elif name in ['version', 'copies']:
value_type = 'numeric'
return value_type
def _property_create_dict(header, data):
'''
Create a property dict
'''
prop = dict(zip(header, _merge_last(data, len(header))))
prop['name'] = _property_normalize_name(prop['property'])
prop['type'] = _property_detect_type(prop['name'], prop['values'])
prop['edit'] = from_bool(prop['edit'])
if 'inherit' in prop:
prop['inherit'] = from_bool(prop['inherit'])
del prop['property']
return prop
def _property_parse_cmd(cmd, alias=None):
'''
Parse output of zpool/zfs get command
'''
if not alias:
alias = {}
properties = {}
# NOTE: append get to command
if cmd[-3:] != 'get':
cmd += ' get'
# NOTE: parse output
prop_hdr = []
for prop_data in _exec(cmd=cmd)['stderr'].split('\n'):
# NOTE: make the line data more managable
prop_data = prop_data.lower().split()
# NOTE: skip empty lines
if not prop_data:
continue
# NOTE: parse header
elif prop_data[0] == 'property':
prop_hdr = prop_data
continue
# NOTE: skip lines after data
elif not prop_hdr or prop_data[1] not in ['no', 'yes']:
continue
# NOTE: create property dict
prop = _property_create_dict(prop_hdr, prop_data)
# NOTE: add property to dict
properties[prop['name']] = prop
if prop['name'] in alias:
properties[alias[prop['name']]] = prop
# NOTE: cleanup some duplicate data
del prop['name']
return properties
def _auto(direction, name, value, source='auto', convert_to_human=True):
'''
Internal magic for from_auto and to_auto
'''
# NOTE: check direction
if direction not in ['to', 'from']:
return value
# NOTE: collect property data
props = property_data_zpool()
if source == 'zfs':
props = property_data_zfs()
elif source == 'auto':
props.update(property_data_zfs())
# NOTE: figure out the conversion type
value_type = props[name]['type'] if name in props else 'str'
# NOTE: convert
if value_type == 'size' and direction == 'to':
return globals()['{}_{}'.format(direction, value_type)](value, convert_to_human)
return globals()['{}_{}'.format(direction, value_type)](value)
@real_memoize
def _zfs_cmd():
'''
Return the path of the zfs binary if present
'''
# Get the path to the zfs binary.
return salt.utils.path.which('zfs')
@real_memoize
def _zpool_cmd():
'''
Return the path of the zpool binary if present
'''
# Get the path to the zfs binary.
return salt.utils.path.which('zpool')
def _command(source, command, flags=None, opts=None,
property_name=None, property_value=None,
filesystem_properties=None, pool_properties=None,
target=None):
'''
Build and properly escape a zfs command
.. note::
Input is not considered safe and will be passed through
to_auto(from_auto('input_here')), you do not need to do so
your self first.
'''
# NOTE: start with the zfs binary and command
cmd = []
cmd.append(_zpool_cmd() if source == 'zpool' else _zfs_cmd())
cmd.append(command)
# NOTE: append flags if we have any
if flags is None:
flags = []
for flag in flags:
cmd.append(flag)
# NOTE: append options
# we pass through 'sorted' to garentee the same order
if opts is None:
opts = {}
for opt in sorted(opts):
if not isinstance(opts[opt], list):
opts[opt] = [opts[opt]]
for val in opts[opt]:
cmd.append(opt)
cmd.append(to_str(val))
# NOTE: append filesystem properties (really just options with a key/value)
# we pass through 'sorted' to garentee the same order
if filesystem_properties is None:
filesystem_properties = {}
for fsopt in sorted(filesystem_properties):
cmd.append('-O' if source == 'zpool' else '-o')
cmd.append('{key}={val}'.format(
key=fsopt,
val=to_auto(fsopt, filesystem_properties[fsopt], source='zfs', convert_to_human=False),
))
# NOTE: append pool properties (really just options with a key/value)
# we pass through 'sorted' to garentee the same order
if pool_properties is None:
pool_properties = {}
for fsopt in sorted(pool_properties):
cmd.append('-o')
cmd.append('{key}={val}'.format(
key=fsopt,
val=to_auto(fsopt, pool_properties[fsopt], source='zpool', convert_to_human=False),
))
# NOTE: append property and value
# the set command takes a key=value pair, we need to support this
if property_name is not None:
if property_value is not None:
if not isinstance(property_name, list):
property_name = [property_name]
if not isinstance(property_value, list):
property_value = [property_value]
for key, val in zip(property_name, property_value):
cmd.append('{key}={val}'.format(
key=key,
val=to_auto(key, val, source=source, convert_to_human=False),
))
else:
cmd.append(property_name)
# NOTE: append the target(s)
if target is not None:
if not isinstance(target, list):
target = [target]
for tgt in target:
# NOTE: skip None list items
# we do not want to skip False and 0!
if tgt is None:
continue
cmd.append(to_str(tgt))
return ' '.join(cmd)
@real_memoize
def is_supported():
'''
Check the system for ZFS support
'''
# Check for supported platforms
# NOTE: ZFS on Windows is in development
# NOTE: ZFS on NetBSD is in development
on_supported_platform = False
if salt.utils.platform.is_sunos():
on_supported_platform = True
elif salt.utils.platform.is_freebsd() and _check_retcode('kldstat -q -m zfs'):
on_supported_platform = True
elif salt.utils.platform.is_linux() and os.path.exists('/sys/module/zfs'):
on_supported_platform = True
elif salt.utils.platform.is_linux() and salt.utils.path.which('zfs-fuse'):
on_supported_platform = True
elif salt.utils.platform.is_darwin() and \
os.path.exists('/Library/Extensions/zfs.kext') and \
os.path.exists('/dev/zfs'):
on_supported_platform = True
# Additional check for the zpool command
return (_zpool_cmd() and on_supported_platform) is True
@real_memoize
def has_feature_flags():
'''
Check if zpool-features is available
'''
# get man location
man = salt.utils.path.which('man')
return _check_retcode('{man} zpool-features'.format(
man=man
)) if man else False
@real_memoize
def property_data_zpool():
'''
Return a dict of zpool properties
.. note::
Each property will have an entry with the following info:
- edit : boolean - is this property editable after pool creation
- type : str - either bool, bool_alt, size, numeric, or string
- values : str - list of possible values
.. warning::
This data is probed from the output of 'zpool get' with some suplimental
data that is hardcoded. There is no better way to get this informatio aside
from reading the code.
'''
# NOTE: man page also mentions a few short forms
property_data = _property_parse_cmd(_zpool_cmd(), {
'allocated': 'alloc',
'autoexpand': 'expand',
'autoreplace': 'replace',
'listsnapshots': 'listsnaps',
'fragmentation': 'frag',
})
# NOTE: zpool status/iostat has a few extra fields
zpool_size_extra = [
'capacity-alloc', 'capacity-free',
'operations-read', 'operations-write',
'bandwith-read', 'bandwith-write',
'read', 'write',
]
zpool_numeric_extra = [
'cksum', 'cap',
]
for prop in zpool_size_extra:
property_data[prop] = {
'edit': False,
'type': 'size',
'values': '<size>',
}
for prop in zpool_numeric_extra:
property_data[prop] = {
'edit': False,
'type': 'numeric',
'values': '<count>',
}
return property_data
@real_memoize
def property_data_zfs():
'''
Return a dict of zfs properties
.. note::
Each property will have an entry with the following info:
- edit : boolean - is this property editable after pool creation
- inherit : boolean - is this property inheritable
- type : str - either bool, bool_alt, size, numeric, or string
- values : str - list of possible values
.. warning::
This data is probed from the output of 'zfs get' with some suplimental
data that is hardcoded. There is no better way to get this informatio aside
from reading the code.
'''
return _property_parse_cmd(_zfs_cmd(), {
'available': 'avail',
'logicalreferenced': 'lrefer.',
'logicalused': 'lused.',
'referenced': 'refer',
'volblocksize': 'volblock',
'compression': 'compress',
'readonly': 'rdonly',
'recordsize': 'recsize',
'refreservation': 'refreserv',
'reservation': 'reserv',
})
def from_numeric(value):
'''
Convert zfs numeric to python int
'''
if value == 'none':
value = None
elif value:
value = str_to_num(value)
return value
def to_numeric(value):
'''
Convert python int to zfs numeric
'''
value = from_numeric(value)
if value is None:
value = 'none'
return value
def from_bool(value):
'''
Convert zfs bool to python bool
'''
if value in ['on', 'yes']:
value = True
elif value in ['off', 'no']:
value = False
elif value == 'none':
value = None
return value
def from_bool_alt(value):
'''
Convert zfs bool_alt to python bool
'''
return from_bool(value)
def to_bool(value):
'''
Convert python bool to zfs on/off bool
'''
value = from_bool(value)
if isinstance(value, bool):
value = 'on' if value else 'off'
elif value is None:
value = 'none'
return value
def to_bool_alt(value):
'''
Convert python to zfs yes/no value
'''
value = from_bool_alt(value)
if isinstance(value, bool):
value = 'yes' if value else 'no'
elif value is None:
value = 'none'
return value
def from_size(value):
'''
Convert zfs size (human readble) to python int (bytes)
'''
match_size = re_zfs_size.match(str(value))
if match_size:
v_unit = match_size.group(2).upper()[0]
v_size = float(match_size.group(1))
v_multiplier = math.pow(1024, zfs_size.index(v_unit) + 1)
value = v_size * v_multiplier
if int(value) == value:
value = int(value)
elif value is not None:
value = str(value)
return from_numeric(value)
def to_size(value, convert_to_human=True):
'''
Convert python int (bytes) to zfs size
NOTE: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/pyzfs/common/util.py#114
'''
value = from_size(value)
if value is None:
value = 'none'
if isinstance(value, Number) and value > 1024 and convert_to_human:
v_power = int(math.floor(math.log(value, 1024)))
v_multiplier = math.pow(1024, v_power)
# NOTE: zfs is a bit odd on how it does the rounding,
# see libzfs implementation linked above
v_size_float = float(value) / v_multiplier
if v_size_float == int(v_size_float):
value = "{:.0f}{}".format(
v_size_float,
zfs_size[v_power-1],
)
else:
for v_precision in ["{:.2f}{}", "{:.1f}{}", "{:.0f}{}"]:
v_size = v_precision.format(
v_size_float,
zfs_size[v_power-1],
)
if len(v_size) <= 5:
value = v_size
break
return value
def from_str(value):
'''
Decode zfs safe string (used for name, path, ...)
'''
if value == 'none':
value = None
if value:
value = str(value)
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
value = value.replace('\\"', '"')
return value
def to_str(value):
'''
Encode zfs safe string (used for name, path, ...)
'''
value = from_str(value)
if value:
value = value.replace('"', '\\"')
if ' ' in value:
value = '"' + value + '"'
elif value is None:
value = 'none'
return value
def from_auto(name, value, source='auto'):
'''
Convert zfs value to python value
'''
return _auto('from', name, value, source)
def to_auto(name, value, source='auto', convert_to_human=True):
'''
Convert python value to zfs value
'''
return _auto('to', name, value, source, convert_to_human)
def from_auto_dict(values, source='auto'):
'''
Pass an entire dictionary to from_auto
.. note::
The key will be passed as the name
'''
for name, value in values.items():
values[name] = from_auto(name, value, source)
return values
def to_auto_dict(values, source='auto', convert_to_human=True):
'''
Pass an entire dictionary to to_auto
.. note::
The key will be passed as the name
'''
for name, value in values.items():
values[name] = to_auto(name, value, source, convert_to_human)
return values
def is_snapshot(name):
'''
Check if name is a valid snapshot name
'''
return from_str(name).count('@') == 1
def is_bookmark(name):
'''
Check if name is a valid bookmark name
'''
return from_str(name).count('#') == 1
def is_dataset(name):
'''
Check if name is a valid filesystem or volume name
'''
return not is_snapshot(name) and not is_bookmark(name)
def zfs_command(command, flags=None, opts=None, property_name=None, property_value=None,
filesystem_properties=None, target=None):
'''
Build and properly escape a zfs command
.. note::
Input is not considered safe and will be passed through
to_auto(from_auto('input_here')), you do not need to do so
your self first.
'''
return _command(
'zfs',
command=command,
flags=flags,
opts=opts,
property_name=property_name,
property_value=property_value,
filesystem_properties=filesystem_properties,
pool_properties=None,
target=target,
)
def zpool_command(command, flags=None, opts=None, property_name=None, property_value=None,
filesystem_properties=None, pool_properties=None, target=None):
'''
Build and properly escape a zpool command
.. note::
Input is not considered safe and will be passed through
to_auto(from_auto('input_here')), you do not need to do so
your self first.
'''
return _command(
'zpool',
command=command,
flags=flags,
opts=opts,
property_name=property_name,
property_value=property_value,
filesystem_properties=filesystem_properties,
pool_properties=pool_properties,
target=target,
)
def parse_command_result(res, label=None):
'''
Parse the result of a zpool/zfs command
.. note::
Output on failure is rather predicatable.
- retcode > 0
- each 'error' is a line on stderr
- optional 'Usage:' block under those with hits
We simple check those and return a OrderedDict were
we set label = True|False and error = error_messages
'''
ret = OrderedDict()
if label:
ret[label] = res['retcode'] == 0
if res['retcode'] != 0:
ret['error'] = []
for error in res['stderr'].splitlines():
if error.lower().startswith('usage:'):
break
if error.lower().startswith("use '-f'"):
error = error.replace('-f', 'force=True')
if error.lower().startswith("use '-r'"):
error = error.replace('-r', 'recursive=True')
ret['error'].append(error)
if ret['error']:
ret['error'] = "\n".join(ret['error'])
else:
del ret['error']
return ret
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4

View File

@ -0,0 +1,137 @@
# -*- coding: utf-8 -*-
'''
Integration tests for the Dimension Data cloud provider
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import random
import string
# Import Salt Testing Libs
from tests.support.case import ShellCase
from tests.support.paths import FILES
from tests.support.helpers import expensiveTest
# Import Salt Libs
from salt.config import cloud_providers_config
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
# Create the cloud instance name to be used throughout the tests
INSTANCE_NAME = _random_name('CLOUD-TEST-')
PROVIDER_NAME = 'dimensiondata'
def _random_name(size=6):
'''
Generates a random cloud instance name
'''
return 'cloud-test-' + ''.join(
random.choice(string.ascii_lowercase + string.digits)
for x in range(size)
)
class DimensionDataTest(ShellCase):
'''
Integration tests for the Dimension Data cloud provider in Salt-Cloud
'''
@expensiveTest
def setUp(self):
'''
Sets up the test requirements
'''
super(DimensionDataTest, self).setUp()
# check if appropriate cloud provider and profile files are present
profile_str = 'dimensiondata-config'
providers = self.run_cloud('--list-providers')
if profile_str + ':' not in providers:
self.skipTest(
'Configuration file for {0} was not found. Check {0}.conf files '
'in tests/integration/files/conf/cloud.*.d/ to run these tests.'
.format(PROVIDER_NAME)
)
# check if user_id, key, and region are present
config = cloud_providers_config(
os.path.join(
FILES,
'conf',
'cloud.providers.d',
PROVIDER_NAME + '.conf'
)
)
user_id = config[profile_str][PROVIDER_NAME]['user_id']
key = config[profile_str][PROVIDER_NAME]['key']
region = config[profile_str][PROVIDER_NAME]['region']
if user_id == '' or key == '' or region == '':
self.skipTest(
'A user Id, password, and a region '
'must be provided to run these tests. Check '
'tests/integration/files/conf/cloud.providers.d/{0}.conf'
.format(PROVIDER_NAME)
)
def test_list_images(self):
'''
Tests the return of running the --list-images command for the dimensiondata cloud provider
'''
image_list = self.run_cloud('--list-images {0}'.format(PROVIDER_NAME))
self.assertIn(
'Ubuntu 14.04 2 CPU',
[i.strip() for i in image_list]
)
def test_list_locations(self):
'''
Tests the return of running the --list-locations command for the dimensiondata cloud provider
'''
_list_locations = self.run_cloud('--list-locations {0}'.format(PROVIDER_NAME))
self.assertIn(
'Australia - Melbourne MCP2',
[i.strip() for i in _list_locations]
)
def test_list_sizes(self):
'''
Tests the return of running the --list-sizes command for the dimensiondata cloud provider
'''
_list_sizes = self.run_cloud('--list-sizes {0}'.format(PROVIDER_NAME))
self.assertIn(
'default',
[i.strip() for i in _list_sizes]
)
def test_instance(self):
'''
Test creating an instance on Dimension Data's cloud
'''
# check if instance with salt installed returned
try:
self.assertIn(
INSTANCE_NAME,
[i.strip() for i in self.run_cloud('-p dimensiondata-test {0}'.format(INSTANCE_NAME), timeout=500)]
)
except AssertionError:
self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500)
raise
# delete the instance
try:
self.assertIn(
'True',
[i.strip() for i in self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500)]
)
except AssertionError:
raise
# Final clean-up of created instance, in case something went wrong.
# This was originally in a tearDown function, but that didn't make sense
# To run this for each test when not all tests create instances.
if INSTANCE_NAME in [i.strip() for i in self.run_cloud('--query')]:
self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500)

View File

@ -36,7 +36,7 @@ class GCETest(ShellCase):
provider = 'gce'
providers = self.run_cloud('--list-providers')
# Create the cloud instance name to be used throughout the tests
self.INSTANCE_NAME = generate_random_name('cloud-test-')
self.INSTANCE_NAME = generate_random_name('cloud-test-').lower()
if profile_str not in providers:
self.skipTest(

View File

@ -45,7 +45,7 @@ def __has_required_azure():
else:
version = LooseVersion(azure.common.__version__)
if REQUIRED_AZURE <= version:
if LooseVersion(REQUIRED_AZURE) <= version:
return True
return False

View File

@ -0,0 +1,11 @@
dimensiondata-test:
provider: dimensiondata-config
image: 42816eb2-9846-4483-95c3-7d7fbddebf2c
size: default
location: AU10
is_started: yes
description: 'Salt Ubuntu test'
network_domain: ''
vlan: ''
ssh_interface: private_ips
auth: ''

View File

@ -0,0 +1,5 @@
dimensiondata-config:
driver: dimensiondata
user_id: ''
key: ''
region: 'dd-au'

View File

@ -0,0 +1 @@


View File

@ -0,0 +1,5 @@
test_non_base_env:
archive.extracted:
- name: {{ pillar['issue45893.name'] }}
- source: salt://issue45893/custom.tar.gz
- keep: False

View File

@ -144,7 +144,7 @@ class CMDModuleTest(ModuleCase):
'''
self.assertEqual(self.run_function('cmd.run',
['bad_command --foo']).rstrip(),
'ERROR: This shell command is not permitted: "bad_command --foo"')
'ERROR: The shell command "bad_command --foo" is not permitted')
def test_script(self):
'''

View File

@ -1379,6 +1379,20 @@ class StateModuleTest(ModuleCase, SaltReturnAssertsMixin):
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
def test_onchanges_requisite_with_duration(self):
'''
Tests a simple state using the onchanges requisite
the state will not run but results will include duration
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_simple')
# Then, test the result of the state run when changes are not expected to happen
# and ensure duration is included in the results
test_data = state_run['cmd_|-test_non_changing_state_|-echo "Should not run"_|-run']
self.assertIn('duration', test_data)
# onfail tests
def test_onfail_requisite(self):
@ -1450,6 +1464,18 @@ class StateModuleTest(ModuleCase, SaltReturnAssertsMixin):
expected_result = 'State was not run because onfail req did not change'
self.assertIn(expected_result, test_data)
def test_onfail_requisite_with_duration(self):
'''
Tests a simple state using the onfail requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_simple')
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']
self.assertIn('duration', test_data)
# listen tests
def test_listen_requisite(self):

View File

@ -87,24 +87,27 @@ from tests.support.unit import skipIf
# Import Salt libs
import salt.utils.path
import salt.utils.platform
from salt.utils.gitfs import GITPYTHON_MINVER, PYGIT2_MINVER
from salt.utils.versions import LooseVersion
from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES as VIRTUALENV_NAMES
from salt.ext.six.moves import range # pylint: disable=redefined-builtin
from salt.utils.gitfs import (
GITPYTHON_VERSION,
GITPYTHON_MINVER,
PYGIT2_VERSION,
PYGIT2_MINVER,
LIBGIT2_VERSION,
LIBGIT2_MINVER
)
# Check for requisite components
try:
import git
HAS_GITPYTHON = \
LooseVersion(git.__version__) >= LooseVersion(GITPYTHON_MINVER)
HAS_GITPYTHON = GITPYTHON_VERSION >= GITPYTHON_MINVER
except ImportError:
HAS_GITPYTHON = False
try:
import pygit2
HAS_PYGIT2 = \
LooseVersion(pygit2.__version__) >= LooseVersion(PYGIT2_MINVER)
except ImportError:
HAS_PYGIT2 = PYGIT2_VERSION >= PYGIT2_MINVER \
and LIBGIT2_VERSION >= LIBGIT2_MINVER
except AttributeError:
HAS_PYGIT2 = False
HAS_SSHD = bool(salt.utils.path.which('sshd'))
@ -419,7 +422,7 @@ class TestGitPythonAuthenticatedHTTP(TestGitPythonHTTP, GitPythonMixin):
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(_windows_or_mac(), 'minion is windows or mac')
@skip_if_not_root
@skipIf(not HAS_PYGIT2, 'pygit2 >= {0} required'.format(PYGIT2_MINVER))
@skipIf(not HAS_PYGIT2, 'pygit2 >= {0} and libgit2 >= {1} required'.format(PYGIT2_MINVER, LIBGIT2_MINVER))
@skipIf(not HAS_SSHD, 'sshd not present')
class TestPygit2SSH(GitPillarSSHTestBase):
'''
@ -433,12 +436,6 @@ class TestPygit2SSH(GitPillarSSHTestBase):
username = USERNAME
passphrase = PASSWORD
def setUp(self):
super(TestPygit2SSH, self).setUp()
if self.is_el7(): # pylint: disable=E1120
self.skipTest(
'skipped until EPEL7 fixes pygit2/libgit2 version mismatch')
@requires_system_grains
def test_single_source(self, grains):
'''
@ -1199,19 +1196,13 @@ class TestPygit2SSH(GitPillarSSHTestBase):
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(_windows_or_mac(), 'minion is windows or mac')
@skip_if_not_root
@skipIf(not HAS_PYGIT2, 'pygit2 >= {0} required'.format(PYGIT2_MINVER))
@skipIf(not HAS_PYGIT2, 'pygit2 >= {0} and libgit2 >= {1} required'.format(PYGIT2_MINVER, LIBGIT2_MINVER))
@skipIf(not HAS_NGINX, 'nginx not present')
@skipIf(not HAS_VIRTUALENV, 'virtualenv not present')
class TestPygit2HTTP(GitPillarHTTPTestBase):
'''
Test git_pillar with pygit2 using SSH authentication
'''
def setUp(self):
super(TestPygit2HTTP, self).setUp()
if self.is_el7(): # pylint: disable=E1120
self.skipTest(
'skipped until EPEL7 fixes pygit2/libgit2 version mismatch')
def test_single_source(self):
'''
Test using a single ext_pillar repo
@ -1452,7 +1443,7 @@ class TestPygit2HTTP(GitPillarHTTPTestBase):
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(_windows_or_mac(), 'minion is windows or mac')
@skip_if_not_root
@skipIf(not HAS_PYGIT2, 'pygit2 >= {0} required'.format(PYGIT2_MINVER))
@skipIf(not HAS_PYGIT2, 'pygit2 >= {0} and libgit2 >= {1} required'.format(PYGIT2_MINVER, LIBGIT2_MINVER))
@skipIf(not HAS_NGINX, 'nginx not present')
@skipIf(not HAS_VIRTUALENV, 'virtualenv not present')
class TestPygit2AuthenticatedHTTP(GitPillarHTTPTestBase):
@ -1465,12 +1456,6 @@ class TestPygit2AuthenticatedHTTP(GitPillarHTTPTestBase):
user = USERNAME
password = PASSWORD
def setUp(self):
super(TestPygit2AuthenticatedHTTP, self).setUp()
if self.is_el7(): # pylint: disable=E1120
self.skipTest(
'skipped until EPEL7 fixes pygit2/libgit2 version mismatch')
def test_single_source(self):
'''
Test using a single ext_pillar repo

Some files were not shown because too many files have changed in this diff Show More