mirror of
https://github.com/valitydev/salt.git
synced 2024-11-08 17:33:54 +00:00
Merge branch 'develop' into docker-present-pass-kwargs
This commit is contained in:
commit
49677b4854
4
.github/stale.yml
vendored
4
.github/stale.yml
vendored
@ -1,8 +1,8 @@
|
||||
# Probot Stale configuration file
|
||||
|
||||
# Number of days of inactivity before an issue becomes stale
|
||||
# 1100 is approximately 3 years
|
||||
daysUntilStale: 1100
|
||||
# 1060 is approximately 2 years and 11 months
|
||||
daysUntilStale: 1060
|
||||
|
||||
# Number of days of inactivity before a stale issue is closed
|
||||
daysUntilClose: 7
|
||||
|
@ -4,9 +4,14 @@
|
||||
"name": "ryan-lane",
|
||||
"files": ["salt/**/*boto*.py"],
|
||||
"skipTeamPrs": false
|
||||
},
|
||||
{
|
||||
"name": "tkwilliams",
|
||||
"files": ["salt/**/*boto*.py"],
|
||||
"skipTeamPrs": false
|
||||
}
|
||||
],
|
||||
"skipTitle": "Merge forward",
|
||||
"userBlacklist": ["cvrebert", "markusgattol", "olliewalsh"]
|
||||
"userBlacklist": ["cvrebert", "markusgattol", "olliewalsh", "basepi"]
|
||||
}
|
||||
|
||||
|
12
README.rst
12
README.rst
@ -67,10 +67,11 @@ Engage SaltStack
|
||||
|
||||
`SaltConf`_, **User Groups and Meetups** - SaltStack has a vibrant and `global
|
||||
community`_ of customers, users, developers and enthusiasts. Connect with other
|
||||
Salted folks in your area of the world, or join `SaltConf16`_, the SaltStack
|
||||
annual user conference, April 19-21 in Salt Lake City. Please let us know if
|
||||
you would like to start a user group or if we should add your existing
|
||||
SaltStack user group to this list by emailing: info@saltstack.com
|
||||
Salted folks in your area of the world, or join `SaltConf`_, the SaltStack
|
||||
annual user conference held in Salt Lake City. Please visit the `SaltConf`_ site
|
||||
for details of our next conference. Also, please let us know if you would like
|
||||
to start a user group or if we should add your existing SaltStack user group to
|
||||
this list by emailing: info@saltstack.com
|
||||
|
||||
**SaltStack Training** - Get access to proprietary `SaltStack education
|
||||
offerings`_ through instructor-led training offered on-site, virtually or at
|
||||
@ -89,9 +90,8 @@ services`_ offerings.
|
||||
* LinkedIn Group - `<https://www.linkedin.com/groups/4877160>`_
|
||||
* Google+ - `<https://plus.google.com/b/112856352920437801867/+SaltStackInc/posts>`_
|
||||
|
||||
.. _SaltConf: http://www.youtube.com/user/saltstack
|
||||
.. _global community: http://www.meetup.com/pro/saltstack/
|
||||
.. _SaltConf16: http://saltconf.com/
|
||||
.. _SaltConf: http://saltconf.com/
|
||||
.. _SaltStack education offerings: http://saltstack.com/training/
|
||||
.. _SaltStack Certified Engineer (SSCE): http://saltstack.com/certification/
|
||||
.. _SaltStack professional services: http://saltstack.com/services/
|
||||
|
@ -59,15 +59,14 @@
|
||||
|
||||
# Directory for custom modules. This directory can contain subdirectories for
|
||||
# each of Salt's module types such as "runners", "output", "wheel", "modules",
|
||||
# "states", "returners", etc.
|
||||
#extension_modules: <no default>
|
||||
# "states", "returners", "engines", "utils", etc.
|
||||
#extension_modules: /var/cache/salt/master/extmods
|
||||
|
||||
# Directory for custom modules. This directory can contain subdirectories for
|
||||
# each of Salt's module types such as "runners", "output", "wheel", "modules",
|
||||
# "states", "returners", "engines", etc.
|
||||
# "states", "returners", "engines", "utils", etc.
|
||||
# Like 'extension_modules' but can take an array of paths
|
||||
#module_dirs: <no default>
|
||||
# - /var/cache/salt/minion/extmods
|
||||
#module_dirs: []
|
||||
|
||||
# Verify and set permissions on configuration directories at startup:
|
||||
#verify_env: True
|
||||
|
@ -183,8 +183,8 @@ The directory to store the pki authentication keys.
|
||||
|
||||
Directory for custom modules. This directory can contain subdirectories for
|
||||
each of Salt's module types such as ``runners``, ``output``, ``wheel``,
|
||||
``modules``, ``states``, ``returners``, ``engines``, etc. This path is appended to
|
||||
:conf_master:`root_dir`.
|
||||
``modules``, ``states``, ``returners``, ``engines``, ``utils``, etc.
|
||||
This path is appended to :conf_master:`root_dir`.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
@ -3787,7 +3787,7 @@ they were created by a different master.
|
||||
Default: ``True``
|
||||
|
||||
Normally, when processing :ref:`git_pillar remotes
|
||||
<git-pillar-2015-8-0-and-later>`, if more than one repo under the same ``git``
|
||||
<git-pillar-configuration>`, if more than one repo under the same ``git``
|
||||
section in the ``ext_pillar`` configuration refers to the same pillar
|
||||
environment, then each repo in a given environment will have access to the
|
||||
other repos' files to be referenced in their top files. However, it may be
|
||||
|
@ -451,7 +451,7 @@ For example:
|
||||
'''
|
||||
Only load if git exists on the system
|
||||
'''
|
||||
if salt.utils.which('git') is None:
|
||||
if salt.utils.path.which('git') is None:
|
||||
return (False,
|
||||
'The git execution module cannot be loaded: git unavailable.')
|
||||
else:
|
||||
|
@ -122,7 +122,7 @@ This example, simplified from the pkg state, shows how to create mod_aggregate f
|
||||
for chunk in chunks:
|
||||
# The state runtime uses "tags" to track completed jobs, it may
|
||||
# look familiar with the _|-
|
||||
tag = salt.utils.gen_state_tag(chunk)
|
||||
tag = __utils__['state.gen_tag'](chunk)
|
||||
if tag in running:
|
||||
# Already ran the pkg state, skip aggregation
|
||||
continue
|
||||
|
@ -15,9 +15,7 @@ More information about Azure is located at `http://www.windowsazure.com/
|
||||
|
||||
Dependencies
|
||||
============
|
||||
* `Microsoft Azure SDK for Python <https://pypi.python.org/pypi/azure>`_ >= 2.0rc6
|
||||
* `Microsoft Azure Storage SDK for Python <https://pypi.python.org/pypi/azure-storage>`_ >= 0.32
|
||||
* The python-requests library, for Python < 2.7.9.
|
||||
* Azure Cli ```pip install 'azure-cli>=2.0.12'```
|
||||
* A Microsoft Azure account
|
||||
* `Salt <https://github.com/saltstack/salt>`_
|
||||
|
||||
|
@ -260,6 +260,13 @@ The Salt development team will back-port bug fixes made to ``develop`` to the
|
||||
current release branch if the contributor cannot create the pull request
|
||||
against that branch.
|
||||
|
||||
Release Branches
|
||||
----------------
|
||||
|
||||
For each release a branch will be created when we are ready to tag. The branch will be the same name as the tag minus the v. For example, the v2017.7.1 release was created from the 2017.7.1 branch. This branching strategy will allow for more stability when there is a need for a re-tag during the testing phase of our releases.
|
||||
|
||||
Once the branch is created, the fixes required for a given release, as determined by the SaltStack release team, will be added to this branch. All commits in this branch will be merged forward into the parent branch as well.
|
||||
|
||||
Keeping Salt Forks in Sync
|
||||
==========================
|
||||
|
||||
|
@ -106,7 +106,7 @@ bringing with it the ability to access authenticated repositories.
|
||||
|
||||
Using the new features will require updates to the git ext_pillar
|
||||
configuration, further details can be found in the :ref:`pillar.git_pillar
|
||||
<git-pillar-2015-8-0-and-later>` docs.
|
||||
<git-pillar-configuration>` docs.
|
||||
|
||||
.. _pygit2: https://github.com/libgit2/pygit2
|
||||
|
||||
|
@ -617,6 +617,11 @@ Profitbricks Cloud Updated Dependency
|
||||
The minimum version of the ``profitbrick`` python package for the ``profitbricks``
|
||||
cloud driver has changed from 3.0.0 to 3.1.0.
|
||||
|
||||
Azure Cloud Updated Dependency
|
||||
------------------------------
|
||||
|
||||
The azure sdk used for the ``azurearm`` cloud driver now depends on ``azure-cli>=2.0.12``
|
||||
|
||||
Module Deprecations
|
||||
===================
|
||||
|
||||
@ -708,6 +713,13 @@ during blackout. This release adds support for using this feature in the grains
|
||||
as well, by using special grains keys ``minion_blackout`` and
|
||||
``minion_blackout_whitelist``.
|
||||
|
||||
Pillar Deprecations
|
||||
-------------------
|
||||
|
||||
The legacy configuration for ``git_pillar`` has been removed. Please use the new
|
||||
configuration for ``git_pillar``, which is documented in the external pillar module
|
||||
for :mod:`git_pillar <salt.pillar.git_pillar>`.
|
||||
|
||||
Utils Deprecations
|
||||
==================
|
||||
|
||||
|
@ -1110,15 +1110,8 @@ Using Git as an External Pillar Source
|
||||
The git external pillar (a.k.a. git_pillar) has been rewritten for the 2015.8.0
|
||||
release. This rewrite brings with it pygit2_ support (allowing for access to
|
||||
authenticated repositories), as well as more granular support for per-remote
|
||||
configuration.
|
||||
|
||||
To make use of the new features, changes to the git ext_pillar configuration
|
||||
must be made. The new configuration schema is detailed :ref:`here
|
||||
<git-pillar-2015-8-0-and-later>`.
|
||||
|
||||
For Salt releases before 2015.8.0, click :ref:`here <git-pillar-pre-2015-8-0>`
|
||||
for documentation.
|
||||
|
||||
configuration. This configuration schema is detailed :ref:`here
|
||||
<git-pillar-configuration>`.
|
||||
|
||||
.. _faq-gitfs-bug:
|
||||
|
||||
|
@ -91,8 +91,8 @@ Also you could even write your utility modules in object oriented fashion:
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
My utils module
|
||||
---------------
|
||||
My OOP-style utils module
|
||||
-------------------------
|
||||
|
||||
This module contains common functions for use in my other custom types.
|
||||
'''
|
||||
|
@ -31,6 +31,7 @@ import salt.transport.client
|
||||
import salt.utils
|
||||
import salt.utils.files
|
||||
import salt.utils.minions
|
||||
import salt.utils.versions
|
||||
import salt.payload
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
@ -215,8 +216,9 @@ class LoadAuth(object):
|
||||
acl_ret = self.__get_acl(load)
|
||||
tdata['auth_list'] = acl_ret
|
||||
|
||||
if 'groups' in load:
|
||||
tdata['groups'] = load['groups']
|
||||
groups = self.get_groups(load)
|
||||
if groups:
|
||||
tdata['groups'] = groups
|
||||
|
||||
return self.tokens["{0}.mk_token".format(self.opts['eauth_tokens'])](self.opts, tdata)
|
||||
|
||||
@ -333,7 +335,7 @@ class LoadAuth(object):
|
||||
return False
|
||||
return True
|
||||
|
||||
def get_auth_list(self, load):
|
||||
def get_auth_list(self, load, token=None):
|
||||
'''
|
||||
Retrieve access list for the user specified in load.
|
||||
The list is built by eauth module or from master eauth configuration.
|
||||
@ -341,30 +343,37 @@ class LoadAuth(object):
|
||||
list if the user has no rights to execute anything on this master and returns non-empty list
|
||||
if user is allowed to execute particular functions.
|
||||
'''
|
||||
# Get auth list from token
|
||||
if token and self.opts['keep_acl_in_token'] and 'auth_list' in token:
|
||||
return token['auth_list']
|
||||
# Get acl from eauth module.
|
||||
auth_list = self.__get_acl(load)
|
||||
if auth_list is not None:
|
||||
return auth_list
|
||||
|
||||
if load['eauth'] not in self.opts['external_auth']:
|
||||
eauth = token['eauth'] if token else load['eauth']
|
||||
if eauth not in self.opts['external_auth']:
|
||||
# No matching module is allowed in config
|
||||
log.warning('Authorization failure occurred.')
|
||||
return None
|
||||
|
||||
name = self.load_name(load) # The username we are attempting to auth with
|
||||
groups = self.get_groups(load) # The groups this user belongs to
|
||||
eauth_config = self.opts['external_auth'][load['eauth']]
|
||||
if groups is None or groups is False:
|
||||
if token:
|
||||
name = token['name']
|
||||
groups = token.get('groups')
|
||||
else:
|
||||
name = self.load_name(load) # The username we are attempting to auth with
|
||||
groups = self.get_groups(load) # The groups this user belongs to
|
||||
eauth_config = self.opts['external_auth'][eauth]
|
||||
if not groups:
|
||||
groups = []
|
||||
group_perm_keys = [item for item in eauth_config if item.endswith('%')] # The configured auth groups
|
||||
|
||||
# First we need to know if the user is allowed to proceed via any of their group memberships.
|
||||
group_auth_match = False
|
||||
for group_config in group_perm_keys:
|
||||
group_config = group_config.rstrip('%')
|
||||
for group in groups:
|
||||
if group == group_config:
|
||||
group_auth_match = True
|
||||
if group_config.rstrip('%') in groups:
|
||||
group_auth_match = True
|
||||
break
|
||||
# If a group_auth_match is set it means only that we have a
|
||||
# user which matches at least one or more of the groups defined
|
||||
# in the configuration file.
|
||||
@ -410,6 +419,13 @@ class Authorize(object):
|
||||
The authorization engine used by EAUTH
|
||||
'''
|
||||
def __init__(self, opts, load, loadauth=None):
|
||||
salt.utils.versions.warn_until(
|
||||
'Neon',
|
||||
'The \'Authorize\' class has been deprecated. Please use the '
|
||||
'\'LoadAuth\', \'Reslover\', or \'AuthUser\' classes instead. '
|
||||
'Support for the \'Authorze\' class will be removed in Salt '
|
||||
'{version}.'
|
||||
)
|
||||
self.opts = salt.config.master_config(opts['conf_file'])
|
||||
self.load = load
|
||||
self.ckminions = salt.utils.minions.CkMinions(opts)
|
||||
|
@ -378,7 +378,7 @@ def groups(username, **kwargs):
|
||||
search_results = bind.search_s(search_base,
|
||||
ldap.SCOPE_SUBTREE,
|
||||
search_string,
|
||||
[_config('accountattributename'), 'cn'])
|
||||
[_config('accountattributename'), 'cn', _config('groupattribute')])
|
||||
for _, entry in search_results:
|
||||
if username in entry[_config('accountattributename')]:
|
||||
group_list.append(entry['cn'][0])
|
||||
@ -390,7 +390,7 @@ def groups(username, **kwargs):
|
||||
|
||||
# Only test user auth on first call for job.
|
||||
# 'show_jid' only exists on first payload so we can use that for the conditional.
|
||||
if 'show_jid' in kwargs and not _bind(username, kwargs['password'],
|
||||
if 'show_jid' in kwargs and not _bind(username, kwargs.get('password'),
|
||||
anonymous=_config('auth_by_group_membership_only', mandatory=False) and
|
||||
_config('anonymous', mandatory=False)):
|
||||
log.error('LDAP username and password do not match')
|
||||
|
@ -59,7 +59,7 @@ class Beacon(object):
|
||||
|
||||
if 'enabled' in current_beacon_config:
|
||||
if not current_beacon_config['enabled']:
|
||||
log.trace('Beacon {0} disabled'.format(mod))
|
||||
log.trace('Beacon %s disabled', mod)
|
||||
continue
|
||||
else:
|
||||
# remove 'enabled' item before processing the beacon
|
||||
@ -68,7 +68,7 @@ class Beacon(object):
|
||||
else:
|
||||
self._remove_list_item(config[mod], 'enabled')
|
||||
|
||||
log.trace('Beacon processing: {0}'.format(mod))
|
||||
log.trace('Beacon processing: %s', mod)
|
||||
fun_str = '{0}.beacon'.format(mod)
|
||||
validate_str = '{0}.validate'.format(mod)
|
||||
if fun_str in self.beacons:
|
||||
@ -77,10 +77,10 @@ class Beacon(object):
|
||||
if interval:
|
||||
b_config = self._trim_config(b_config, mod, 'interval')
|
||||
if not self._process_interval(mod, interval):
|
||||
log.trace('Skipping beacon {0}. Interval not reached.'.format(mod))
|
||||
log.trace('Skipping beacon %s. Interval not reached.', mod)
|
||||
continue
|
||||
if self._determine_beacon_config(current_beacon_config, 'disable_during_state_run'):
|
||||
log.trace('Evaluting if beacon {0} should be skipped due to a state run.'.format(mod))
|
||||
log.trace('Evaluting if beacon %s should be skipped due to a state run.', mod)
|
||||
b_config = self._trim_config(b_config, mod, 'disable_during_state_run')
|
||||
is_running = False
|
||||
running_jobs = salt.utils.minion.running(self.opts)
|
||||
@ -90,10 +90,10 @@ class Beacon(object):
|
||||
if is_running:
|
||||
close_str = '{0}.close'.format(mod)
|
||||
if close_str in self.beacons:
|
||||
log.info('Closing beacon {0}. State run in progress.'.format(mod))
|
||||
log.info('Closing beacon %s. State run in progress.', mod)
|
||||
self.beacons[close_str](b_config[mod])
|
||||
else:
|
||||
log.info('Skipping beacon {0}. State run in progress.'.format(mod))
|
||||
log.info('Skipping beacon %s. State run in progress.', mod)
|
||||
continue
|
||||
# Update __grains__ on the beacon
|
||||
self.beacons[fun_str].__globals__['__grains__'] = grains
|
||||
@ -120,7 +120,7 @@ class Beacon(object):
|
||||
if runonce:
|
||||
self.disable_beacon(mod)
|
||||
else:
|
||||
log.warning('Unable to process beacon {0}'.format(mod))
|
||||
log.warning('Unable to process beacon %s', mod)
|
||||
return ret
|
||||
|
||||
def _trim_config(self, b_config, mod, key):
|
||||
@ -149,19 +149,19 @@ class Beacon(object):
|
||||
Process beacons with intervals
|
||||
Return True if a beacon should be run on this loop
|
||||
'''
|
||||
log.trace('Processing interval {0} for beacon mod {1}'.format(interval, mod))
|
||||
log.trace('Processing interval %s for beacon mod %s', interval, mod)
|
||||
loop_interval = self.opts['loop_interval']
|
||||
if mod in self.interval_map:
|
||||
log.trace('Processing interval in map')
|
||||
counter = self.interval_map[mod]
|
||||
log.trace('Interval counter: {0}'.format(counter))
|
||||
log.trace('Interval counter: %s', counter)
|
||||
if counter * loop_interval >= interval:
|
||||
self.interval_map[mod] = 1
|
||||
return True
|
||||
else:
|
||||
self.interval_map[mod] += 1
|
||||
else:
|
||||
log.trace('Interval process inserting mod: {0}'.format(mod))
|
||||
log.trace('Interval process inserting mod: %s', mod)
|
||||
self.interval_map[mod] = 1
|
||||
return False
|
||||
|
||||
@ -214,6 +214,45 @@ class Beacon(object):
|
||||
|
||||
return True
|
||||
|
||||
def list_available_beacons(self):
|
||||
'''
|
||||
List the available beacons
|
||||
'''
|
||||
_beacons = ['{0}'.format(_beacon.replace('.beacon', ''))
|
||||
for _beacon in self.beacons if '.beacon' in _beacon]
|
||||
|
||||
# Fire the complete event back along with the list of beacons
|
||||
evt = salt.utils.event.get_event('minion', opts=self.opts)
|
||||
evt.fire_event({'complete': True, 'beacons': _beacons},
|
||||
tag='/salt/minion/minion_beacons_list_available_complete')
|
||||
|
||||
return True
|
||||
|
||||
def validate_beacon(self, name, beacon_data):
|
||||
'''
|
||||
Return available beacon functions
|
||||
'''
|
||||
validate_str = '{}.validate'.format(name)
|
||||
# Run the validate function if it's available,
|
||||
# otherwise there is a warning about it being missing
|
||||
if validate_str in self.beacons:
|
||||
if 'enabled' in beacon_data:
|
||||
del beacon_data['enabled']
|
||||
valid, vcomment = self.beacons[validate_str](beacon_data)
|
||||
else:
|
||||
log.info('Beacon %s does not have a validate'
|
||||
' function, skipping validation.', name)
|
||||
valid = True
|
||||
|
||||
# Fire the complete event back along with the list of beacons
|
||||
evt = salt.utils.event.get_event('minion', opts=self.opts)
|
||||
evt.fire_event({'complete': True,
|
||||
'vcomment': vcomment,
|
||||
'valid': valid},
|
||||
tag='/salt/minion/minion_beacon_validation_complete')
|
||||
|
||||
return True
|
||||
|
||||
def add_beacon(self, name, beacon_data):
|
||||
'''
|
||||
Add a beacon item
|
||||
@ -224,9 +263,9 @@ class Beacon(object):
|
||||
|
||||
if name in self.opts['beacons']:
|
||||
log.info('Updating settings for beacon '
|
||||
'item: {0}'.format(name))
|
||||
'item: %s', name)
|
||||
else:
|
||||
log.info('Added new beacon item {0}'.format(name))
|
||||
log.info('Added new beacon item %s', name)
|
||||
self.opts['beacons'].update(data)
|
||||
|
||||
# Fire the complete event back along with updated list of beacons
|
||||
@ -245,7 +284,7 @@ class Beacon(object):
|
||||
data[name] = beacon_data
|
||||
|
||||
log.info('Updating settings for beacon '
|
||||
'item: {0}'.format(name))
|
||||
'item: %s', name)
|
||||
self.opts['beacons'].update(data)
|
||||
|
||||
# Fire the complete event back along with updated list of beacons
|
||||
@ -261,7 +300,7 @@ class Beacon(object):
|
||||
'''
|
||||
|
||||
if name in self.opts['beacons']:
|
||||
log.info('Deleting beacon item {0}'.format(name))
|
||||
log.info('Deleting beacon item %s', name)
|
||||
del self.opts['beacons'][name]
|
||||
|
||||
# Fire the complete event back along with updated list of beacons
|
||||
|
9
salt/cache/consul.py
vendored
9
salt/cache/consul.py
vendored
@ -4,6 +4,8 @@ Minion data cache plugin for Consul key/value data store.
|
||||
|
||||
.. versionadded:: 2016.11.2
|
||||
|
||||
:depends: python-consul >= 0.2.0
|
||||
|
||||
It is up to the system administrator to set up and configure the Consul
|
||||
infrastructure. All is needed for this plugin is a working Consul agent
|
||||
with a read-write access to the key-value store.
|
||||
@ -81,8 +83,11 @@ def __virtual__():
|
||||
'verify': __opts__.get('consul.verify', True),
|
||||
}
|
||||
|
||||
global api
|
||||
api = consul.Consul(**consul_kwargs)
|
||||
try:
|
||||
global api
|
||||
api = consul.Consul(**consul_kwargs)
|
||||
except AttributeError:
|
||||
return (False, "Failed to invoke consul.Consul, please make sure you have python-consul >= 0.2.0 installed")
|
||||
|
||||
return __virtualname__
|
||||
|
||||
|
@ -20,6 +20,7 @@ import sys
|
||||
import salt.client
|
||||
import salt.output
|
||||
import salt.utils
|
||||
import salt.utils.files
|
||||
import salt.utils.gzip_util
|
||||
import salt.utils.itertools
|
||||
import salt.utils.minions
|
||||
@ -112,7 +113,7 @@ class SaltCP(object):
|
||||
err = 'The referenced file, {0} is not available.'.format(fn_)
|
||||
sys.stderr.write(err + '\n')
|
||||
sys.exit(42)
|
||||
with salt.utils.fopen(fn_, 'r') as fp_:
|
||||
with salt.utils.files.fopen(fn_, 'r') as fp_:
|
||||
data = fp_.read()
|
||||
return {fn_: data}
|
||||
|
||||
|
@ -16,7 +16,8 @@ import copy as pycopy
|
||||
# Import Salt libs
|
||||
import salt.exceptions
|
||||
import salt.minion
|
||||
import salt.utils
|
||||
import salt.utils # Can be removed once daemonize, get_specific_user, format_call are moved
|
||||
import salt.utils.args
|
||||
import salt.utils.doc
|
||||
import salt.utils.error
|
||||
import salt.utils.event
|
||||
@ -25,6 +26,7 @@ import salt.utils.job
|
||||
import salt.utils.lazy
|
||||
import salt.utils.platform
|
||||
import salt.utils.process
|
||||
import salt.utils.state
|
||||
import salt.utils.versions
|
||||
import salt.transport
|
||||
import salt.log.setup
|
||||
@ -396,7 +398,7 @@ class SyncClientMixin(object):
|
||||
data[u'success'] = True
|
||||
if isinstance(data[u'return'], dict) and u'data' in data[u'return']:
|
||||
# some functions can return boolean values
|
||||
data[u'success'] = salt.utils.check_state_result(data[u'return'][u'data'])
|
||||
data[u'success'] = salt.utils.state.check_result(data[u'return'][u'data'])
|
||||
except (Exception, SystemExit) as ex:
|
||||
if isinstance(ex, salt.exceptions.NotImplemented):
|
||||
data[u'return'] = str(ex)
|
||||
|
@ -1049,8 +1049,7 @@ class Single(object):
|
||||
opts_pkg[u'id'],
|
||||
opts_pkg.get(u'environment', u'base')
|
||||
)
|
||||
pillar_dirs = {}
|
||||
pillar_data = pillar.compile_pillar(pillar_dirs=pillar_dirs)
|
||||
pillar_data = pillar.compile_pillar()
|
||||
|
||||
# TODO: cache minion opts in datap in master.py
|
||||
data = {u'opts': opts_pkg,
|
||||
|
@ -5,7 +5,7 @@ correct cloud modules
|
||||
'''
|
||||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import, print_function, generators
|
||||
from __future__ import absolute_import, print_function, generators, unicode_literals
|
||||
import os
|
||||
import copy
|
||||
import glob
|
||||
|
@ -65,6 +65,7 @@ import salt.config as config
|
||||
import salt.utils
|
||||
import salt.utils.cloud
|
||||
import salt.utils.files
|
||||
from salt.utils.versions import LooseVersion
|
||||
from salt.ext import six
|
||||
import salt.version
|
||||
from salt.exceptions import (
|
||||
@ -79,7 +80,6 @@ HAS_LIBS = False
|
||||
try:
|
||||
import salt.utils.msazure
|
||||
from salt.utils.msazure import object_to_dict
|
||||
import azure.storage
|
||||
from azure.common.credentials import (
|
||||
UserPassCredentials,
|
||||
ServicePrincipalCredentials,
|
||||
@ -115,7 +115,9 @@ try:
|
||||
from azure.mgmt.storage import StorageManagementClient
|
||||
from azure.mgmt.web import WebSiteManagementClient
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
HAS_LIBS = True
|
||||
from azure.multiapi.storage.v2016_05_31 import CloudStorageAccount
|
||||
from azure.cli import core
|
||||
HAS_LIBS = LooseVersion(core.__version__) >= LooseVersion("2.0.12")
|
||||
except ImportError:
|
||||
pass
|
||||
# pylint: enable=wrong-import-position,wrong-import-order
|
||||
@ -1728,7 +1730,7 @@ def list_containers(call=None, kwargs=None): # pylint: disable=unused-argument
|
||||
if not storconn:
|
||||
storconn = get_conn(StorageManagementClient)
|
||||
|
||||
storageaccount = azure.storage.CloudStorageAccount(
|
||||
storageaccount = CloudStorageAccount(
|
||||
config.get_cloud_config_value(
|
||||
'storage_account',
|
||||
get_configured_provider(), __opts__, search_global=False
|
||||
@ -1769,7 +1771,7 @@ def list_blobs(call=None, kwargs=None): # pylint: disable=unused-argument
|
||||
'A container must be specified'
|
||||
)
|
||||
|
||||
storageaccount = azure.storage.CloudStorageAccount(
|
||||
storageaccount = CloudStorageAccount(
|
||||
config.get_cloud_config_value(
|
||||
'storage_account',
|
||||
get_configured_provider(), __opts__, search_global=False
|
||||
@ -1809,7 +1811,7 @@ def delete_blob(call=None, kwargs=None): # pylint: disable=unused-argument
|
||||
'A blob must be specified'
|
||||
)
|
||||
|
||||
storageaccount = azure.storage.CloudStorageAccount(
|
||||
storageaccount = CloudStorageAccount(
|
||||
config.get_cloud_config_value(
|
||||
'storage_account',
|
||||
get_configured_provider(), __opts__, search_global=False
|
||||
|
@ -87,7 +87,6 @@ import pprint
|
||||
import time
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
import salt.config as config
|
||||
from salt.exceptions import (
|
||||
SaltCloudConfigError,
|
||||
@ -96,6 +95,7 @@ from salt.exceptions import (
|
||||
SaltCloudExecutionTimeout,
|
||||
SaltCloudSystemExit
|
||||
)
|
||||
import salt.utils.files
|
||||
|
||||
# Import salt.cloud libs
|
||||
import salt.utils.cloud
|
||||
@ -805,7 +805,7 @@ def load_public_key(vm_):
|
||||
)
|
||||
)
|
||||
|
||||
with salt.utils.fopen(public_key_filename, 'r') as public_key:
|
||||
with salt.utils.files.fopen(public_key_filename, 'r') as public_key:
|
||||
key = public_key.read().replace('\n', '')
|
||||
|
||||
return key
|
||||
|
@ -24,7 +24,6 @@ import logging
|
||||
# Import salt libs
|
||||
from salt.exceptions import SaltCloudSystemExit
|
||||
import salt.config as config
|
||||
import salt.utils.cloud as cloud
|
||||
|
||||
# Import Third Party Libs
|
||||
try:
|
||||
@ -136,7 +135,7 @@ def create(vm_info):
|
||||
)
|
||||
|
||||
log.debug("Going to fire event: starting create")
|
||||
cloud.fire_event(
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'starting create',
|
||||
'salt/cloud/{0}/creating'.format(vm_info['name']),
|
||||
@ -151,7 +150,7 @@ def create(vm_info):
|
||||
'clone_from': vm_info['clonefrom']
|
||||
}
|
||||
|
||||
cloud.fire_event(
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'requesting instance',
|
||||
'salt/cloud/{0}/requesting'.format(vm_info['name']),
|
||||
@ -174,10 +173,10 @@ def create(vm_info):
|
||||
vm_info['key_filename'] = key_filename
|
||||
vm_info['ssh_host'] = ip
|
||||
|
||||
res = cloud.bootstrap(vm_info, __opts__)
|
||||
res = __utils__['cloud.bootstrap'](vm_info)
|
||||
vm_result.update(res)
|
||||
|
||||
cloud.fire_event(
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'created machine',
|
||||
'salt/cloud/{0}/created'.format(vm_info['name']),
|
||||
@ -269,7 +268,7 @@ def list_nodes(kwargs=None, call=None):
|
||||
"private_ips",
|
||||
"public_ips",
|
||||
]
|
||||
return cloud.list_nodes_select(
|
||||
return __utils__['cloud.list_nodes_select'](
|
||||
list_nodes_full('function'), attributes, call,
|
||||
)
|
||||
|
||||
@ -278,7 +277,7 @@ def list_nodes_select(call=None):
|
||||
"""
|
||||
Return a list of the VMs that are on the provider, with select fields
|
||||
"""
|
||||
return cloud.list_nodes_select(
|
||||
return __utils__['cloud.list_nodes_select'](
|
||||
list_nodes_full('function'), __opts__['query.selection'], call,
|
||||
)
|
||||
|
||||
@ -306,7 +305,7 @@ def destroy(name, call=None):
|
||||
if not vb_machine_exists(name):
|
||||
return "{0} doesn't exist and can't be deleted".format(name)
|
||||
|
||||
cloud.fire_event(
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'destroying instance',
|
||||
'salt/cloud/{0}/destroying'.format(name),
|
||||
@ -317,7 +316,7 @@ def destroy(name, call=None):
|
||||
|
||||
vb_destroy_machine(name)
|
||||
|
||||
cloud.fire_event(
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'destroyed instance',
|
||||
'salt/cloud/{0}/destroyed'.format(name),
|
||||
|
@ -32,6 +32,8 @@ import salt.utils.atomicfile
|
||||
import salt.utils.event
|
||||
import salt.utils.files
|
||||
import salt.utils.gitfs
|
||||
import salt.utils.verify
|
||||
import salt.utils.minions
|
||||
import salt.utils.gzip_util
|
||||
import salt.utils.jid
|
||||
import salt.utils.minions
|
||||
@ -64,44 +66,19 @@ def init_git_pillar(opts):
|
||||
ret = []
|
||||
for opts_dict in [x for x in opts.get('ext_pillar', [])]:
|
||||
if 'git' in opts_dict:
|
||||
if isinstance(opts_dict['git'], six.string_types):
|
||||
# Legacy git pillar code
|
||||
try:
|
||||
import git
|
||||
except ImportError:
|
||||
return ret
|
||||
parts = opts_dict['git'].strip().split()
|
||||
try:
|
||||
br = parts[0]
|
||||
loc = parts[1]
|
||||
except IndexError:
|
||||
log.critical(
|
||||
'Unable to extract external pillar data: {0}'
|
||||
.format(opts_dict['git'])
|
||||
)
|
||||
try:
|
||||
pillar = salt.utils.gitfs.GitPillar(opts)
|
||||
pillar.init_remotes(
|
||||
opts_dict['git'],
|
||||
git_pillar.PER_REMOTE_OVERRIDES,
|
||||
git_pillar.PER_REMOTE_ONLY
|
||||
)
|
||||
ret.append(pillar)
|
||||
except FileserverConfigError:
|
||||
if opts.get('git_pillar_verify_config', True):
|
||||
raise
|
||||
else:
|
||||
ret.append(
|
||||
git_pillar._LegacyGitPillar(
|
||||
br,
|
||||
loc,
|
||||
opts
|
||||
)
|
||||
)
|
||||
else:
|
||||
# New git_pillar code
|
||||
try:
|
||||
pillar = salt.utils.gitfs.GitPillar(opts)
|
||||
pillar.init_remotes(
|
||||
opts_dict['git'],
|
||||
git_pillar.PER_REMOTE_OVERRIDES,
|
||||
git_pillar.PER_REMOTE_ONLY
|
||||
)
|
||||
ret.append(pillar)
|
||||
except FileserverConfigError:
|
||||
if opts.get('git_pillar_verify_config', True):
|
||||
raise
|
||||
else:
|
||||
log.critical('Could not initialize git_pillar')
|
||||
log.critical('Could not initialize git_pillar')
|
||||
return ret
|
||||
|
||||
|
||||
@ -705,8 +682,7 @@ class RemoteFuncs(object):
|
||||
load.get('ext'),
|
||||
self.mminion.functions,
|
||||
pillar_override=load.get('pillar_override', {}))
|
||||
pillar_dirs = {}
|
||||
data = pillar.compile_pillar(pillar_dirs=pillar_dirs)
|
||||
data = pillar.compile_pillar()
|
||||
if self.opts.get('minion_data_cache', False):
|
||||
self.cache.store('minions/{0}'.format(load['id']),
|
||||
'data',
|
||||
@ -1043,12 +1019,7 @@ class LocalFuncs(object):
|
||||
return dict(error=dict(name=err_name,
|
||||
message='Authentication failure of type "token" occurred.'))
|
||||
username = token['name']
|
||||
if self.opts['keep_acl_in_token'] and 'auth_list' in token:
|
||||
auth_list = token['auth_list']
|
||||
else:
|
||||
load['eauth'] = token['eauth']
|
||||
load['username'] = username
|
||||
auth_list = self.loadauth.get_auth_list(load)
|
||||
auth_list = self.loadauth.get_auth_list(load, token)
|
||||
else:
|
||||
auth_type = 'eauth'
|
||||
err_name = 'EauthAuthenticationError'
|
||||
@ -1090,12 +1061,7 @@ class LocalFuncs(object):
|
||||
return dict(error=dict(name=err_name,
|
||||
message='Authentication failure of type "token" occurred.'))
|
||||
username = token['name']
|
||||
if self.opts['keep_acl_in_token'] and 'auth_list' in token:
|
||||
auth_list = token['auth_list']
|
||||
else:
|
||||
load['eauth'] = token['eauth']
|
||||
load['username'] = username
|
||||
auth_list = self.loadauth.get_auth_list(load)
|
||||
auth_list = self.loadauth.get_auth_list(load, token)
|
||||
elif 'eauth' in load:
|
||||
auth_type = 'eauth'
|
||||
err_name = 'EauthAuthenticationError'
|
||||
@ -1205,12 +1171,7 @@ class LocalFuncs(object):
|
||||
return ''
|
||||
|
||||
# Get acl from eauth module.
|
||||
if self.opts['keep_acl_in_token'] and 'auth_list' in token:
|
||||
auth_list = token['auth_list']
|
||||
else:
|
||||
extra['eauth'] = token['eauth']
|
||||
extra['username'] = token['name']
|
||||
auth_list = self.loadauth.get_auth_list(extra)
|
||||
auth_list = self.loadauth.get_auth_list(extra, token)
|
||||
|
||||
# Authorize the request
|
||||
if not self.ckminions.auth_check(
|
||||
|
@ -543,7 +543,7 @@ def _virtual(osdata):
|
||||
command = 'system_profiler'
|
||||
args = ['SPDisplaysDataType']
|
||||
elif osdata['kernel'] == 'SunOS':
|
||||
virtinfo = salt.utils.which('virtinfo')
|
||||
virtinfo = salt.utils.path.which('virtinfo')
|
||||
if virtinfo:
|
||||
try:
|
||||
ret = __salt__['cmd.run_all']('{0} -a'.format(virtinfo))
|
||||
@ -805,6 +805,8 @@ def _virtual(osdata):
|
||||
grains['virtual_subtype'] = 'ovirt'
|
||||
elif 'Google' in output:
|
||||
grains['virtual'] = 'gce'
|
||||
elif 'BHYVE' in output:
|
||||
grains['virtual'] = 'bhyve'
|
||||
except IOError:
|
||||
pass
|
||||
elif osdata['kernel'] == 'FreeBSD':
|
||||
|
@ -148,7 +148,7 @@ def _linux_disks():
|
||||
|
||||
|
||||
def _windows_disks():
|
||||
wmic = salt.utils.which('wmic')
|
||||
wmic = salt.utils.path.which('wmic')
|
||||
|
||||
namespace = r'\\root\microsoft\windows\storage'
|
||||
path = 'MSFT_PhysicalDisk'
|
||||
|
@ -17,6 +17,7 @@ metadata server set `metadata_server_grains: True`.
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import python libs
|
||||
import json
|
||||
import os
|
||||
import socket
|
||||
|
||||
@ -47,16 +48,30 @@ def _search(prefix="latest/"):
|
||||
Recursively look up all grains in the metadata server
|
||||
'''
|
||||
ret = {}
|
||||
for line in http.query(os.path.join(HOST, prefix))['body'].split('\n'):
|
||||
linedata = http.query(os.path.join(HOST, prefix))
|
||||
if 'body' not in linedata:
|
||||
return ret
|
||||
for line in linedata['body'].split('\n'):
|
||||
if line.endswith('/'):
|
||||
ret[line[:-1]] = _search(prefix=os.path.join(prefix, line))
|
||||
elif prefix == 'latest/':
|
||||
# (gtmanfred) The first level should have a forward slash since
|
||||
# they have stuff underneath. This will not be doubled up though,
|
||||
# because lines ending with a slash are checked first.
|
||||
ret[line] = _search(prefix=os.path.join(prefix, line + '/'))
|
||||
elif line.endswith(('dynamic', 'meta-data')):
|
||||
ret[line] = _search(prefix=os.path.join(prefix, line))
|
||||
elif '=' in line:
|
||||
key, value = line.split('=')
|
||||
ret[value] = _search(prefix=os.path.join(prefix, key))
|
||||
else:
|
||||
ret[line] = http.query(os.path.join(HOST, prefix, line))['body']
|
||||
retdata = http.query(os.path.join(HOST, prefix, line)).get('body', None)
|
||||
# (gtmanfred) This try except block is slightly faster than
|
||||
# checking if the string starts with a curly brace
|
||||
try:
|
||||
ret[line] = json.loads(retdata)
|
||||
except ValueError:
|
||||
ret[line] = retdata
|
||||
return ret
|
||||
|
||||
|
||||
|
@ -447,8 +447,8 @@ def optional_args(proxy=None):
|
||||
device2:
|
||||
True
|
||||
'''
|
||||
opt_args = _get_device_grain('optional_args', proxy=proxy)
|
||||
if _FORBIDDEN_OPT_ARGS:
|
||||
opt_args = _get_device_grain('optional_args', proxy=proxy) or {}
|
||||
if opt_args and _FORBIDDEN_OPT_ARGS:
|
||||
for arg in _FORBIDDEN_OPT_ARGS:
|
||||
opt_args.pop(arg, None)
|
||||
return {'optional_args': opt_args}
|
||||
|
@ -315,7 +315,7 @@ class Maintenance(salt.utils.process.SignalHandlingMultiprocessingProcess):
|
||||
'''
|
||||
try:
|
||||
for pillar in self.git_pillar:
|
||||
pillar.update()
|
||||
pillar.fetch_remotes()
|
||||
except Exception as exc:
|
||||
log.error(u'Exception caught while updating git_pillar',
|
||||
exc_info=True)
|
||||
@ -471,18 +471,18 @@ class Master(SMaster):
|
||||
pass
|
||||
|
||||
if self.opts.get(u'git_pillar_verify_config', True):
|
||||
non_legacy_git_pillars = [
|
||||
git_pillars = [
|
||||
x for x in self.opts.get(u'ext_pillar', [])
|
||||
if u'git' in x
|
||||
and not isinstance(x[u'git'], six.string_types)
|
||||
]
|
||||
if non_legacy_git_pillars:
|
||||
if git_pillars:
|
||||
try:
|
||||
new_opts = copy.deepcopy(self.opts)
|
||||
from salt.pillar.git_pillar \
|
||||
import PER_REMOTE_OVERRIDES as per_remote_overrides, \
|
||||
PER_REMOTE_ONLY as per_remote_only
|
||||
for repo in non_legacy_git_pillars:
|
||||
for repo in git_pillars:
|
||||
new_opts[u'ext_pillar'] = [repo]
|
||||
try:
|
||||
git_pillar = salt.utils.gitfs.GitPillar(new_opts)
|
||||
@ -1304,7 +1304,6 @@ class AESFuncs(object):
|
||||
return False
|
||||
load[u'grains'][u'id'] = load[u'id']
|
||||
|
||||
pillar_dirs = {}
|
||||
pillar = salt.pillar.get_pillar(
|
||||
self.opts,
|
||||
load[u'grains'],
|
||||
@ -1313,7 +1312,7 @@ class AESFuncs(object):
|
||||
ext=load.get(u'ext'),
|
||||
pillar_override=load.get(u'pillar_override', {}),
|
||||
pillarenv=load.get(u'pillarenv'))
|
||||
data = pillar.compile_pillar(pillar_dirs=pillar_dirs)
|
||||
data = pillar.compile_pillar()
|
||||
self.fs_.update_opts()
|
||||
if self.opts.get(u'minion_data_cache', False):
|
||||
self.masterapi.cache.store(u'minions/{0}'.format(load[u'id']),
|
||||
@ -1677,12 +1676,7 @@ class ClearFuncs(object):
|
||||
message=u'Authentication failure of type "token" occurred.'))
|
||||
|
||||
# Authorize
|
||||
if self.opts[u'keep_acl_in_token'] and u'auth_list' in token:
|
||||
auth_list = token[u'auth_list']
|
||||
else:
|
||||
clear_load[u'eauth'] = token[u'eauth']
|
||||
clear_load[u'username'] = token[u'name']
|
||||
auth_list = self.loadauth.get_auth_list(clear_load)
|
||||
auth_list = self.loadauth.get_auth_list(clear_load, token)
|
||||
|
||||
if not self.ckminions.runner_check(auth_list, clear_load[u'fun'], clear_load.get(u'kwarg', {})):
|
||||
return dict(error=dict(name=u'TokenAuthenticationError',
|
||||
@ -1745,12 +1739,7 @@ class ClearFuncs(object):
|
||||
message=u'Authentication failure of type "token" occurred.'))
|
||||
|
||||
# Authorize
|
||||
if self.opts[u'keep_acl_in_token'] and u'auth_list' in token:
|
||||
auth_list = token[u'auth_list']
|
||||
else:
|
||||
clear_load[u'eauth'] = token[u'eauth']
|
||||
clear_load[u'username'] = token[u'name']
|
||||
auth_list = self.loadauth.get_auth_list(clear_load)
|
||||
auth_list = self.loadauth.get_auth_list(clear_load, token)
|
||||
if not self.ckminions.wheel_check(auth_list, clear_load[u'fun'], clear_load.get(u'kwarg', {})):
|
||||
return dict(error=dict(name=u'TokenAuthenticationError',
|
||||
message=(u'Authentication failure of type "token" occurred for '
|
||||
@ -1867,12 +1856,7 @@ class ClearFuncs(object):
|
||||
return u''
|
||||
|
||||
# Get acl
|
||||
if self.opts[u'keep_acl_in_token'] and u'auth_list' in token:
|
||||
auth_list = token[u'auth_list']
|
||||
else:
|
||||
extra[u'eauth'] = token[u'eauth']
|
||||
extra[u'username'] = token[u'name']
|
||||
auth_list = self.loadauth.get_auth_list(extra)
|
||||
auth_list = self.loadauth.get_auth_list(extra, token)
|
||||
|
||||
# Authorize the request
|
||||
if not self.ckminions.auth_check(
|
||||
|
@ -1930,6 +1930,10 @@ class Minion(MinionBase):
|
||||
self.beacons.disable_beacon(name)
|
||||
elif func == u'list':
|
||||
self.beacons.list_beacons()
|
||||
elif func == u'list_available':
|
||||
self.beacons.list_available_beacons()
|
||||
elif func == u'validate_beacon':
|
||||
self.beacons.validate_beacon(name, beacon_data)
|
||||
|
||||
def environ_setenv(self, tag, data):
|
||||
'''
|
||||
|
@ -447,11 +447,15 @@ def config(name, config, edit=True):
|
||||
salt '*' apache.config /etc/httpd/conf.d/ports.conf config="[{'Listen': '22'}]"
|
||||
'''
|
||||
|
||||
configs = []
|
||||
for entry in config:
|
||||
key = next(six.iterkeys(entry))
|
||||
configs = _parse_config(entry[key], key)
|
||||
if edit:
|
||||
with salt.utils.files.fopen(name, 'w') as configfile:
|
||||
configfile.write('# This file is managed by Salt.\n')
|
||||
configfile.write(configs)
|
||||
return configs
|
||||
configs.append(_parse_config(entry[key], key))
|
||||
|
||||
# Python auto-correct line endings
|
||||
configstext = "\n".join(configs)
|
||||
if edit:
|
||||
with salt.utils.files.fopen(name, 'w') as configfile:
|
||||
configfile.write('# This file is managed by Salt.\n')
|
||||
configfile.write(configstext)
|
||||
return configstext
|
||||
|
498
salt/modules/aptly.py
Normal file
498
salt/modules/aptly.py
Normal file
@ -0,0 +1,498 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Aptly Debian repository manager.
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
'''
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
|
||||
# Import salt libs
|
||||
from salt.exceptions import SaltInvocationError
|
||||
import salt.utils.path
|
||||
import salt.utils.stringutils as stringutils
|
||||
|
||||
_DEFAULT_CONFIG_PATH = '/etc/aptly.conf'
|
||||
_LOG = logging.getLogger(__name__)
|
||||
|
||||
# Define the module's virtual name
|
||||
__virtualname__ = 'aptly'
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only works on systems with the aptly binary in the system path.
|
||||
'''
|
||||
if salt.utils.path.which('aptly'):
|
||||
return __virtualname__
|
||||
return (False, 'The aptly binaries required cannot be found or are not installed.')
|
||||
|
||||
|
||||
def _cmd_run(cmd):
|
||||
'''
|
||||
Run the aptly command.
|
||||
|
||||
:return: The string output of the command.
|
||||
:rtype: str
|
||||
'''
|
||||
cmd.insert(0, 'aptly')
|
||||
cmd_ret = __salt__['cmd.run_all'](cmd, ignore_retcode=True)
|
||||
|
||||
if cmd_ret['retcode'] != 0:
|
||||
_LOG.debug('Unable to execute command: %s\nError: %s', cmd,
|
||||
cmd_ret['stderr'])
|
||||
|
||||
return cmd_ret['stdout']
|
||||
|
||||
|
||||
def _format_repo_args(comment=None, component=None, distribution=None,
|
||||
uploaders_file=None, saltenv='base'):
|
||||
'''
|
||||
Format the common arguments for creating or editing a repository.
|
||||
|
||||
:param str comment: The description of the repository.
|
||||
:param str component: The default component to use when publishing.
|
||||
:param str distribution: The default distribution to use when publishing.
|
||||
:param str uploaders_file: The repository upload restrictions config.
|
||||
:param str saltenv: The environment the file resides in.
|
||||
|
||||
:return: A list of the arguments formatted as aptly arguments.
|
||||
:rtype: list
|
||||
'''
|
||||
ret = list()
|
||||
cached_uploaders_path = None
|
||||
settings = {'comment': comment, 'component': component,
|
||||
'distribution': distribution}
|
||||
|
||||
if uploaders_file:
|
||||
cached_uploaders_path = __salt__['cp.cache_file'](uploaders_file, saltenv)
|
||||
|
||||
if not cached_uploaders_path:
|
||||
_LOG.error('Unable to get cached copy of file: %s', uploaders_file)
|
||||
return False
|
||||
|
||||
for setting in settings:
|
||||
if settings[setting] is not None:
|
||||
ret.append('-{}={}'.format(setting, settings[setting]))
|
||||
|
||||
if cached_uploaders_path:
|
||||
ret.append('-uploaders-file={}'.format(cached_uploaders_path))
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def _validate_config(config_path):
|
||||
'''
|
||||
Validate that the configuration file exists and is readable.
|
||||
|
||||
:param str config_path: The path to the configuration file for the aptly instance.
|
||||
|
||||
:return: None
|
||||
:rtype: None
|
||||
'''
|
||||
_LOG.debug('Checking configuration file: %s', config_path)
|
||||
|
||||
if not os.path.isfile(config_path):
|
||||
message = 'Unable to get configuration file: {}'.format(config_path)
|
||||
_LOG.error(message)
|
||||
raise SaltInvocationError(message)
|
||||
|
||||
|
||||
def get_config(config_path=_DEFAULT_CONFIG_PATH):
|
||||
'''
|
||||
Get the configuration data.
|
||||
|
||||
:param str config_path: The path to the configuration file for the aptly instance.
|
||||
|
||||
:return: A dictionary containing the configuration data.
|
||||
:rtype: dict
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' aptly.get_config
|
||||
'''
|
||||
_validate_config(config_path)
|
||||
|
||||
cmd = ['config', 'show', '-config={}'.format(config_path)]
|
||||
|
||||
cmd_ret = _cmd_run(cmd)
|
||||
|
||||
return json.loads(cmd_ret)
|
||||
|
||||
|
||||
def list_repos(config_path=_DEFAULT_CONFIG_PATH, with_packages=False):
|
||||
'''
|
||||
List all of the repos.
|
||||
|
||||
:param str config_path: The path to the configuration file for the aptly instance.
|
||||
:param bool with_packages: Return a list of packages in the repo.
|
||||
|
||||
:return: A dictionary of the repositories.
|
||||
:rtype: dict
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' aptly.list_repos
|
||||
'''
|
||||
_validate_config(config_path)
|
||||
|
||||
ret = dict()
|
||||
cmd = ['repo', 'list', '-config={}'.format(config_path), '-raw=true']
|
||||
|
||||
cmd_ret = _cmd_run(cmd)
|
||||
repos = [line.strip() for line in cmd_ret.splitlines()]
|
||||
|
||||
_LOG.debug('Found repositories: %s', len(repos))
|
||||
|
||||
for name in repos:
|
||||
ret[name] = get_repo(name=name, config_path=config_path,
|
||||
with_packages=with_packages)
|
||||
return ret
|
||||
|
||||
|
||||
def get_repo(name, config_path=_DEFAULT_CONFIG_PATH, with_packages=False):
|
||||
'''
|
||||
Get the details of the repository.
|
||||
|
||||
:param str name: The name of the repository.
|
||||
:param str config_path: The path to the configuration file for the aptly instance.
|
||||
:param bool with_packages: Return a list of packages in the repo.
|
||||
|
||||
:return: A dictionary containing information about the repository.
|
||||
:rtype: dict
|
||||
'''
|
||||
_validate_config(config_path)
|
||||
|
||||
ret = dict()
|
||||
cmd = ['repo', 'show', '-config={}'.format(config_path),
|
||||
'-with-packages={}'.format(str(with_packages).lower()),
|
||||
name]
|
||||
|
||||
cmd_ret = _cmd_run(cmd)
|
||||
|
||||
for line in cmd_ret.splitlines():
|
||||
try:
|
||||
# Extract the settings and their values, and attempt to format
|
||||
# them to match their equivalent setting names.
|
||||
items = line.split(':')
|
||||
key = items[0].lower().replace('default', '').strip()
|
||||
key = ' '.join(key.split()).replace(' ', '_')
|
||||
ret[key] = stringutils.to_none(stringutils.to_num(items[1].strip()))
|
||||
except (AttributeError, IndexError):
|
||||
# If the line doesn't have the separator or is otherwise invalid, skip it.
|
||||
_LOG.debug('Skipping line: %s', line)
|
||||
|
||||
if ret:
|
||||
_LOG.debug('Found repository: %s', name)
|
||||
else:
|
||||
_LOG.debug('Unable to find repository: %s', name)
|
||||
return ret
|
||||
|
||||
|
||||
def new_repo(name, config_path=_DEFAULT_CONFIG_PATH, comment=None, component=None,
|
||||
distribution=None, uploaders_file=None, from_snapshot=None,
|
||||
saltenv='base'):
|
||||
'''
|
||||
Create the new repository.
|
||||
|
||||
:param str name: The name of the repository.
|
||||
:param str config_path: The path to the configuration file for the aptly instance.
|
||||
:param str comment: The description of the repository.
|
||||
:param str component: The default component to use when publishing.
|
||||
:param str distribution: The default distribution to use when publishing.
|
||||
:param str uploaders_file: The repository upload restrictions config.
|
||||
:param str from_snapshot: The snapshot to initialize the repository contents from.
|
||||
:param str saltenv: The environment the file resides in.
|
||||
|
||||
:return: A boolean representing whether all changes succeeded.
|
||||
:rtype: bool
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' aptly.new_repo name="test-repo" comment="Test main repo" component="main" distribution="trusty"
|
||||
'''
|
||||
_validate_config(config_path)
|
||||
|
||||
current_repo = __salt__['aptly.get_repo'](name=name)
|
||||
|
||||
if current_repo:
|
||||
_LOG.debug('Repository already exists: %s', name)
|
||||
return True
|
||||
|
||||
cmd = ['repo', 'create', '-config={}'.format(config_path)]
|
||||
repo_params = _format_repo_args(comment=comment, component=component,
|
||||
distribution=distribution,
|
||||
uploaders_file=uploaders_file, saltenv=saltenv)
|
||||
cmd.extend(repo_params)
|
||||
cmd.append(name)
|
||||
|
||||
if from_snapshot:
|
||||
cmd.extend(['from', 'snapshot', from_snapshot])
|
||||
|
||||
_cmd_run(cmd)
|
||||
repo = __salt__['aptly.get_repo'](name=name)
|
||||
|
||||
if repo:
|
||||
_LOG.debug('Created repo: %s', name)
|
||||
return True
|
||||
_LOG.error('Unable to create repo: %s', name)
|
||||
return False
|
||||
|
||||
|
||||
def set_repo(name, config_path=_DEFAULT_CONFIG_PATH, comment=None, component=None,
|
||||
distribution=None, uploaders_file=None, saltenv='base'):
|
||||
'''
|
||||
Configure the repository settings.
|
||||
|
||||
:param str name: The name of the repository.
|
||||
:param str config_path: The path to the configuration file for the aptly instance.
|
||||
:param str comment: The description of the repository.
|
||||
:param str component: The default component to use when publishing.
|
||||
:param str distribution: The default distribution to use when publishing.
|
||||
:param str uploaders_file: The repository upload restrictions config.
|
||||
:param str from_snapshot: The snapshot to initialize the repository contents from.
|
||||
:param str saltenv: The environment the file resides in.
|
||||
|
||||
:return: A boolean representing whether all changes succeeded.
|
||||
:rtype: bool
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' aptly.set_repo name="test-repo" comment="Test universe repo" component="universe" distribution="xenial"
|
||||
'''
|
||||
_validate_config(config_path)
|
||||
|
||||
failed_settings = dict()
|
||||
|
||||
# Only check for settings that were passed in and skip the rest.
|
||||
settings = {'comment': comment, 'component': component,
|
||||
'distribution': distribution}
|
||||
|
||||
for setting in list(settings):
|
||||
if settings[setting] is None:
|
||||
settings.pop(setting, None)
|
||||
|
||||
current_settings = __salt__['aptly.get_repo'](name=name)
|
||||
|
||||
if not current_settings:
|
||||
_LOG.error('Unable to get repo: %s', name)
|
||||
return False
|
||||
|
||||
# Discard any additional settings that get_repo gives
|
||||
# us that are not present in the provided arguments.
|
||||
for current_setting in list(current_settings):
|
||||
if current_setting not in settings:
|
||||
current_settings.pop(current_setting, None)
|
||||
|
||||
# Check the existing repo settings to see if they already have the desired values.
|
||||
if settings == current_settings:
|
||||
_LOG.debug('Settings already have the desired values for repository: %s', name)
|
||||
return True
|
||||
|
||||
cmd = ['repo', 'edit', '-config={}'.format(config_path)]
|
||||
|
||||
repo_params = _format_repo_args(comment=comment, component=component,
|
||||
distribution=distribution,
|
||||
uploaders_file=uploaders_file, saltenv=saltenv)
|
||||
cmd.extend(repo_params)
|
||||
cmd.append(name)
|
||||
|
||||
_cmd_run(cmd)
|
||||
new_settings = __salt__['aptly.get_repo'](name=name)
|
||||
|
||||
# Check the new repo settings to see if they have the desired values.
|
||||
for setting in settings:
|
||||
if settings[setting] != new_settings[setting]:
|
||||
failed_settings.update({setting: settings[setting]})
|
||||
|
||||
if failed_settings:
|
||||
_LOG.error('Unable to change settings for the repository: %s', name)
|
||||
return False
|
||||
_LOG.debug('Settings successfully changed to the desired values for repository: %s', name)
|
||||
return True
|
||||
|
||||
|
||||
def delete_repo(name, config_path=_DEFAULT_CONFIG_PATH, force=False):
|
||||
'''
|
||||
Remove the repository.
|
||||
|
||||
:param str name: The name of the repository.
|
||||
:param str config_path: The path to the configuration file for the aptly instance.
|
||||
:param bool force: Whether to remove the repository even if it is used as the source
|
||||
of an existing snapshot.
|
||||
|
||||
:return: A boolean representing whether all changes succeeded.
|
||||
:rtype: bool
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' aptly.delete_repo name="test-repo"
|
||||
'''
|
||||
_validate_config(config_path)
|
||||
|
||||
current_repo = __salt__['aptly.get_repo'](name=name)
|
||||
|
||||
if not current_repo:
|
||||
_LOG.debug('Repository already absent: %s', name)
|
||||
return True
|
||||
|
||||
cmd = ['repo', 'drop', '-config={}'.format(config_path),
|
||||
'-force={}'.format(str(force).lower()), name]
|
||||
|
||||
_cmd_run(cmd)
|
||||
repo = __salt__['aptly.get_repo'](name=name)
|
||||
|
||||
if repo:
|
||||
_LOG.error('Unable to remove repo: %s', name)
|
||||
return False
|
||||
_LOG.debug('Removed repo: %s', name)
|
||||
return True
|
||||
|
||||
|
||||
def list_mirrors(config_path=_DEFAULT_CONFIG_PATH):
|
||||
'''
|
||||
Get a list of all the mirrors.
|
||||
|
||||
:param str config_path: The path to the configuration file for the aptly instance.
|
||||
|
||||
:return: A list of the mirror names.
|
||||
:rtype: list
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' aptly.list_mirrors
|
||||
'''
|
||||
_validate_config(config_path)
|
||||
|
||||
cmd = ['mirror', 'list', '-config={}'.format(config_path), '-raw=true']
|
||||
|
||||
cmd_ret = _cmd_run(cmd)
|
||||
ret = [line.strip() for line in cmd_ret.splitlines()]
|
||||
|
||||
_LOG.debug('Found mirrors: %s', len(ret))
|
||||
return ret
|
||||
|
||||
|
||||
def list_published(config_path=_DEFAULT_CONFIG_PATH):
|
||||
'''
|
||||
Get a list of all the published repositories.
|
||||
|
||||
:param str config_path: The path to the configuration file for the aptly instance.
|
||||
|
||||
:return: A list of the published repository names.
|
||||
:rtype: list
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' aptly.list_published
|
||||
'''
|
||||
_validate_config(config_path)
|
||||
|
||||
cmd = ['publish', 'list', '-config={}'.format(config_path), '-raw=true']
|
||||
|
||||
cmd_ret = _cmd_run(cmd)
|
||||
ret = [line.strip() for line in cmd_ret.splitlines()]
|
||||
|
||||
_LOG.debug('Found published repositories: %s', len(ret))
|
||||
return ret
|
||||
|
||||
|
||||
def list_snapshots(config_path=_DEFAULT_CONFIG_PATH, sort_by_time=False):
|
||||
'''
|
||||
Get a list of all the snapshots.
|
||||
|
||||
:param str config_path: The path to the configuration file for the aptly instance.
|
||||
:param bool sort_by_time: Whether to sort by creation time instead of by name.
|
||||
|
||||
:return: A list of the snapshot names.
|
||||
:rtype: list
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' aptly.list_snapshots
|
||||
'''
|
||||
_validate_config(config_path)
|
||||
|
||||
cmd = ['snapshot', 'list', '-config={}'.format(config_path), '-raw=true']
|
||||
|
||||
if sort_by_time:
|
||||
cmd.append('-sort=time')
|
||||
else:
|
||||
cmd.append('-sort=name')
|
||||
|
||||
cmd_ret = _cmd_run(cmd)
|
||||
ret = [line.strip() for line in cmd_ret.splitlines()]
|
||||
|
||||
_LOG.debug('Found snapshots: %s', len(ret))
|
||||
return ret
|
||||
|
||||
|
||||
def cleanup_db(config_path=_DEFAULT_CONFIG_PATH, dry_run=False):
|
||||
'''
|
||||
Remove data regarding unreferenced packages and delete files in the package pool that
|
||||
are no longer being used by packages.
|
||||
|
||||
:param bool dry_run: Report potential changes without making any changes.
|
||||
|
||||
:return: A dictionary of the package keys and files that were removed.
|
||||
:rtype: dict
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' aptly.cleanup_db
|
||||
'''
|
||||
_validate_config(config_path)
|
||||
|
||||
ret = {'deleted_keys': list(),
|
||||
'deleted_files': list()}
|
||||
|
||||
cmd = ['db', 'cleanup', '-config={}'.format(config_path),
|
||||
'-dry-run={}'.format(str(dry_run).lower()),
|
||||
'-verbose=true']
|
||||
|
||||
cmd_ret = _cmd_run(cmd)
|
||||
|
||||
type_pattern = r'^List\s+[\w\s]+(?P<package_type>(file|key)s)[\w\s]+:$'
|
||||
list_pattern = r'^\s+-\s+(?P<package>.*)$'
|
||||
current_block = None
|
||||
|
||||
for line in cmd_ret.splitlines():
|
||||
if current_block:
|
||||
match = re.search(list_pattern, line)
|
||||
if match:
|
||||
package_type = 'deleted_{}'.format(current_block)
|
||||
ret[package_type].append(match.group('package'))
|
||||
else:
|
||||
current_block = None
|
||||
# Intentionally not using an else here, in case of a situation where
|
||||
# the next list header might be bordered by the previous list.
|
||||
if not current_block:
|
||||
match = re.search(type_pattern, line)
|
||||
if match:
|
||||
current_block = match.group('package_type')
|
||||
|
||||
_LOG.debug('Package keys identified for deletion: %s', len(ret['deleted_keys']))
|
||||
_LOG.debug('Package files identified for deletion: %s', len(ret['deleted_files']))
|
||||
return ret
|
@ -143,19 +143,6 @@ def _reconstruct_ppa_name(owner_name, ppa_name):
|
||||
return 'ppa:{0}/{1}'.format(owner_name, ppa_name)
|
||||
|
||||
|
||||
def _get_repo(**kwargs):
|
||||
'''
|
||||
Check the kwargs for either 'fromrepo' or 'repo' and return the value.
|
||||
'fromrepo' takes precedence over 'repo'.
|
||||
'''
|
||||
for key in ('fromrepo', 'repo'):
|
||||
try:
|
||||
return kwargs[key]
|
||||
except KeyError:
|
||||
pass
|
||||
return ''
|
||||
|
||||
|
||||
def _check_apt():
|
||||
'''
|
||||
Abort if python-apt is not installed
|
||||
@ -250,18 +237,11 @@ def latest_version(*names, **kwargs):
|
||||
'''
|
||||
refresh = salt.utils.is_true(kwargs.pop('refresh', True))
|
||||
show_installed = salt.utils.is_true(kwargs.pop('show_installed', False))
|
||||
|
||||
if 'repo' in kwargs:
|
||||
# Remember to kill _get_repo() too when removing this warning.
|
||||
salt.utils.versions.warn_until(
|
||||
'Hydrogen',
|
||||
'The \'repo\' argument to apt.latest_version is deprecated, and '
|
||||
'will be removed in Salt {version}. Please use \'fromrepo\' '
|
||||
'instead.'
|
||||
raise SaltInvocationError(
|
||||
'The \'repo\' argument is invalid, use \'fromrepo\' instead'
|
||||
)
|
||||
fromrepo = _get_repo(**kwargs)
|
||||
kwargs.pop('fromrepo', None)
|
||||
kwargs.pop('repo', None)
|
||||
fromrepo = kwargs.pop('fromrepo', None)
|
||||
cache_valid_time = kwargs.pop('cache_valid_time', 0)
|
||||
|
||||
if len(names) == 0:
|
||||
@ -1453,9 +1433,10 @@ def _get_upgradable(dist_upgrade=True, **kwargs):
|
||||
cmd.append('dist-upgrade')
|
||||
else:
|
||||
cmd.append('upgrade')
|
||||
fromrepo = _get_repo(**kwargs)
|
||||
if fromrepo:
|
||||
cmd.extend(['-o', 'APT::Default-Release={0}'.format(fromrepo)])
|
||||
try:
|
||||
cmd.extend(['-o', 'APT::Default-Release={0}'.format(kwargs['fromrepo'])])
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
call = __salt__['cmd.run_all'](cmd,
|
||||
python_shell=False,
|
||||
@ -2147,44 +2128,44 @@ def mod_repo(repo, saltenv='base', **kwargs):
|
||||
|
||||
The following options are available to modify a repo definition:
|
||||
|
||||
architectures
|
||||
a comma separated list of supported architectures, e.g. ``amd64``
|
||||
If this option is not set, all architectures (configured in the
|
||||
system) will be used.
|
||||
architectures
|
||||
A comma-separated list of supported architectures, e.g. ``amd64`` If
|
||||
this option is not set, all architectures (configured in the system)
|
||||
will be used.
|
||||
|
||||
comps
|
||||
a comma separated list of components for the repo, e.g. ``main``
|
||||
comps
|
||||
A comma separated list of components for the repo, e.g. ``main``
|
||||
|
||||
file
|
||||
a file name to be used
|
||||
file
|
||||
A file name to be used
|
||||
|
||||
keyserver
|
||||
keyserver to get gpg key from
|
||||
keyserver
|
||||
Keyserver to get gpg key from
|
||||
|
||||
keyid
|
||||
key id to load with the keyserver argument
|
||||
keyid
|
||||
Key ID to load with the ``keyserver`` argument
|
||||
|
||||
key_url
|
||||
URL to a GPG key to add to the APT GPG keyring
|
||||
key_url
|
||||
URL to a GPG key to add to the APT GPG keyring
|
||||
|
||||
key_text
|
||||
GPG key in string form to add to the APT GPG keyring
|
||||
key_text
|
||||
GPG key in string form to add to the APT GPG keyring
|
||||
|
||||
consolidate
|
||||
if ``True``, will attempt to de-dup and consolidate sources
|
||||
consolidate : False
|
||||
If ``True``, will attempt to de-duplicate and consolidate sources
|
||||
|
||||
comments
|
||||
Sometimes you want to supply additional information, but not as
|
||||
enabled configuration. All comments provided here will be joined
|
||||
into a single string and appended to the repo configuration with a
|
||||
comment marker (#) before it.
|
||||
comments
|
||||
Sometimes you want to supply additional information, but not as
|
||||
enabled configuration. All comments provided here will be joined
|
||||
into a single string and appended to the repo configuration with a
|
||||
comment marker (#) before it.
|
||||
|
||||
.. versionadded:: 2015.8.9
|
||||
.. versionadded:: 2015.8.9
|
||||
|
||||
.. note:: Due to the way keys are stored for APT, there is a known issue
|
||||
where the key won't be updated unless another change is made
|
||||
at the same time. Keys should be properly added on initial
|
||||
configuration.
|
||||
.. note::
|
||||
Due to the way keys are stored for APT, there is a known issue where
|
||||
the key won't be updated unless another change is made at the same
|
||||
time. Keys should be properly added on initial configuration.
|
||||
|
||||
CLI Examples:
|
||||
|
||||
@ -2193,6 +2174,17 @@ def mod_repo(repo, saltenv='base', **kwargs):
|
||||
salt '*' pkg.mod_repo 'myrepo definition' uri=http://new/uri
|
||||
salt '*' pkg.mod_repo 'myrepo definition' comps=main,universe
|
||||
'''
|
||||
if 'refresh_db' in kwargs:
|
||||
salt.utils.versions.warn_until(
|
||||
'Neon',
|
||||
'The \'refresh_db\' argument to \'pkg.mod_repo\' has been '
|
||||
'renamed to \'refresh\'. Support for using \'refresh_db\' will be '
|
||||
'removed in the Neon release of Salt.'
|
||||
)
|
||||
refresh = kwargs['refresh_db']
|
||||
else:
|
||||
refresh = kwargs.get('refresh', True)
|
||||
|
||||
_check_apt()
|
||||
# to ensure no one sets some key values that _shouldn't_ be changed on the
|
||||
# object itself, this is just a white-list of "ok" to set properties
|
||||
@ -2225,7 +2217,7 @@ def mod_repo(repo, saltenv='base', **kwargs):
|
||||
)
|
||||
)
|
||||
# explicit refresh when a repo is modified.
|
||||
if kwargs.get('refresh_db', True):
|
||||
if refresh:
|
||||
refresh_db()
|
||||
return {repo: out}
|
||||
else:
|
||||
@ -2429,7 +2421,7 @@ def mod_repo(repo, saltenv='base', **kwargs):
|
||||
setattr(mod_source, key, kwargs[key])
|
||||
sources.save()
|
||||
# on changes, explicitly refresh
|
||||
if kwargs.get('refresh_db', True):
|
||||
if refresh:
|
||||
refresh_db()
|
||||
return {
|
||||
repo: {
|
||||
|
@ -202,45 +202,48 @@ def _get_snapshot_url(artifactory_url, repository, group_id, artifact_id, versio
|
||||
has_classifier = classifier is not None and classifier != ""
|
||||
|
||||
if snapshot_version is None:
|
||||
snapshot_version_metadata = _get_snapshot_version_metadata(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, version=version, headers=headers)
|
||||
try:
|
||||
snapshot_version_metadata = _get_snapshot_version_metadata(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, version=version, headers=headers)
|
||||
if packaging not in snapshot_version_metadata['snapshot_versions']:
|
||||
error_message = '''Cannot find requested packaging '{packaging}' in the snapshot version metadata.
|
||||
artifactory_url: {artifactory_url}
|
||||
repository: {repository}
|
||||
group_id: {group_id}
|
||||
artifact_id: {artifact_id}
|
||||
packaging: {packaging}
|
||||
classifier: {classifier}
|
||||
version: {version}'''.format(
|
||||
artifactory_url=artifactory_url,
|
||||
repository=repository,
|
||||
group_id=group_id,
|
||||
artifact_id=artifact_id,
|
||||
packaging=packaging,
|
||||
classifier=classifier,
|
||||
version=version)
|
||||
raise ArtifactoryError(error_message)
|
||||
|
||||
if packaging not in snapshot_version_metadata['snapshot_versions']:
|
||||
error_message = '''Cannot find requested packaging '{packaging}' in the snapshot version metadata.
|
||||
artifactory_url: {artifactory_url}
|
||||
repository: {repository}
|
||||
group_id: {group_id}
|
||||
artifact_id: {artifact_id}
|
||||
packaging: {packaging}
|
||||
classifier: {classifier}
|
||||
version: {version}'''.format(
|
||||
artifactory_url=artifactory_url,
|
||||
repository=repository,
|
||||
group_id=group_id,
|
||||
artifact_id=artifact_id,
|
||||
packaging=packaging,
|
||||
classifier=classifier,
|
||||
version=version)
|
||||
raise ArtifactoryError(error_message)
|
||||
if has_classifier and classifier not in snapshot_version_metadata['snapshot_versions']:
|
||||
error_message = '''Cannot find requested classifier '{classifier}' in the snapshot version metadata.
|
||||
artifactory_url: {artifactory_url}
|
||||
repository: {repository}
|
||||
group_id: {group_id}
|
||||
artifact_id: {artifact_id}
|
||||
packaging: {packaging}
|
||||
classifier: {classifier}
|
||||
version: {version}'''.format(
|
||||
artifactory_url=artifactory_url,
|
||||
repository=repository,
|
||||
group_id=group_id,
|
||||
artifact_id=artifact_id,
|
||||
packaging=packaging,
|
||||
classifier=classifier,
|
||||
version=version)
|
||||
raise ArtifactoryError(error_message)
|
||||
|
||||
if has_classifier and classifier not in snapshot_version_metadata['snapshot_versions']:
|
||||
error_message = '''Cannot find requested classifier '{classifier}' in the snapshot version metadata.
|
||||
artifactory_url: {artifactory_url}
|
||||
repository: {repository}
|
||||
group_id: {group_id}
|
||||
artifact_id: {artifact_id}
|
||||
packaging: {packaging}
|
||||
classifier: {classifier}
|
||||
version: {version}'''.format(
|
||||
artifactory_url=artifactory_url,
|
||||
repository=repository,
|
||||
group_id=group_id,
|
||||
artifact_id=artifact_id,
|
||||
packaging=packaging,
|
||||
classifier=classifier,
|
||||
version=version)
|
||||
raise ArtifactoryError(error_message)
|
||||
|
||||
snapshot_version = snapshot_version_metadata['snapshot_versions'][packaging]
|
||||
snapshot_version = snapshot_version_metadata['snapshot_versions'][packaging]
|
||||
except CommandExecutionError as err:
|
||||
log.error('Could not fetch maven-metadata.xml. Assuming snapshot_version=%s.', version)
|
||||
snapshot_version = version
|
||||
|
||||
group_url = __get_group_id_subpath(group_id, use_literal_group_id)
|
||||
|
||||
|
@ -200,7 +200,7 @@ def execute(context=None, lens=None, commands=(), load_path=None):
|
||||
method = METHOD_MAP[cmd]
|
||||
nargs = arg_map[method]
|
||||
|
||||
parts = salt.utils.args.shlex_split(arg, posix=False)
|
||||
parts = salt.utils.args.shlex_split(arg)
|
||||
|
||||
if len(parts) not in nargs:
|
||||
err = '{0} takes {1} args: {2}'.format(method, nargs, parts)
|
||||
|
@ -14,6 +14,7 @@ import os
|
||||
import yaml
|
||||
|
||||
# Import Salt libs
|
||||
import salt.ext.six as six
|
||||
import salt.utils.event
|
||||
import salt.utils.files
|
||||
from salt.ext.six.moves import map
|
||||
@ -69,6 +70,47 @@ def list_(return_yaml=True):
|
||||
return {'beacons': {}}
|
||||
|
||||
|
||||
def list_available(return_yaml=True):
|
||||
'''
|
||||
List the beacons currently available on the minion
|
||||
|
||||
:param return_yaml: Whether to return YAML formatted output, default True
|
||||
:return: List of currently configured Beacons.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' beacons.list_available
|
||||
|
||||
'''
|
||||
beacons = None
|
||||
|
||||
try:
|
||||
eventer = salt.utils.event.get_event('minion', opts=__opts__)
|
||||
res = __salt__['event.fire']({'func': 'list_available'}, 'manage_beacons')
|
||||
if res:
|
||||
event_ret = eventer.get_event(tag='/salt/minion/minion_beacons_list_available_complete', wait=30)
|
||||
if event_ret and event_ret['complete']:
|
||||
beacons = event_ret['beacons']
|
||||
except KeyError:
|
||||
# Effectively a no-op, since we can't really return without an event system
|
||||
ret = {}
|
||||
ret['result'] = False
|
||||
ret['comment'] = 'Event module not available. Beacon add failed.'
|
||||
return ret
|
||||
|
||||
if beacons:
|
||||
if return_yaml:
|
||||
tmp = {'beacons': beacons}
|
||||
yaml_out = yaml.safe_dump(tmp, default_flow_style=False)
|
||||
return yaml_out
|
||||
else:
|
||||
return beacons
|
||||
else:
|
||||
return {'beacons': {}}
|
||||
|
||||
|
||||
def add(name, beacon_data, **kwargs):
|
||||
'''
|
||||
Add a beacon on the minion
|
||||
@ -95,37 +137,34 @@ def add(name, beacon_data, **kwargs):
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'Beacon: {0} would be added.'.format(name)
|
||||
else:
|
||||
# Attempt to load the beacon module so we have access to the validate function
|
||||
try:
|
||||
beacon_module = __import__('salt.beacons.' + name, fromlist=['validate'])
|
||||
log.debug('Successfully imported beacon.')
|
||||
except ImportError:
|
||||
ret['comment'] = 'Beacon {0} does not exist'.format(name)
|
||||
return ret
|
||||
|
||||
# Attempt to validate
|
||||
if hasattr(beacon_module, 'validate'):
|
||||
_beacon_data = beacon_data
|
||||
if 'enabled' in _beacon_data:
|
||||
del _beacon_data['enabled']
|
||||
valid, vcomment = beacon_module.validate(_beacon_data)
|
||||
else:
|
||||
log.info('Beacon {0} does not have a validate'
|
||||
' function, skipping validation.'.format(name))
|
||||
valid = True
|
||||
|
||||
if not valid:
|
||||
ret['result'] = False
|
||||
ret['comment'] = ('Beacon {0} configuration invalid, '
|
||||
'not adding.\n{1}'.format(name, vcomment))
|
||||
return ret
|
||||
|
||||
try:
|
||||
# Attempt to load the beacon module so we have access to the validate function
|
||||
eventer = salt.utils.event.get_event('minion', opts=__opts__)
|
||||
res = __salt__['event.fire']({'name': name, 'beacon_data': beacon_data, 'func': 'add'}, 'manage_beacons')
|
||||
res = __salt__['event.fire']({'name': name,
|
||||
'beacon_data': beacon_data,
|
||||
'func': 'validate_beacon'},
|
||||
'manage_beacons')
|
||||
if res:
|
||||
event_ret = eventer.get_event(tag='/salt/minion/minion_beacon_validation_complete', wait=30)
|
||||
valid = event_ret['valid']
|
||||
vcomment = event_ret['vcomment']
|
||||
|
||||
if not valid:
|
||||
ret['result'] = False
|
||||
ret['comment'] = ('Beacon {0} configuration invalid, '
|
||||
'not adding.\n{1}'.format(name, vcomment))
|
||||
return ret
|
||||
|
||||
except KeyError:
|
||||
# Effectively a no-op, since we can't really return without an event system
|
||||
ret['comment'] = 'Event module not available. Beacon add failed.'
|
||||
|
||||
try:
|
||||
res = __salt__['event.fire']({'name': name,
|
||||
'beacon_data': beacon_data,
|
||||
'func': 'add'}, 'manage_beacons')
|
||||
if res:
|
||||
event_ret = eventer.get_event(tag='/salt/minion/minion_beacon_add_complete', wait=30)
|
||||
log.debug('=== event_ret {} ==='.format(event_ret))
|
||||
if event_ret and event_ret['complete']:
|
||||
beacons = event_ret['beacons']
|
||||
if name in beacons and beacons[name] == beacon_data:
|
||||
@ -165,29 +204,32 @@ def modify(name, beacon_data, **kwargs):
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'Beacon: {0} would be added.'.format(name)
|
||||
else:
|
||||
# Attempt to load the beacon module so we have access to the validate function
|
||||
try:
|
||||
beacon_module = __import__('salt.beacons.' + name, fromlist=['validate'])
|
||||
log.debug('Successfully imported beacon.')
|
||||
except ImportError:
|
||||
ret['comment'] = 'Beacon {0} does not exist'.format(name)
|
||||
return ret
|
||||
# Attempt to load the beacon module so we have access to the validate function
|
||||
eventer = salt.utils.event.get_event('minion', opts=__opts__)
|
||||
res = __salt__['event.fire']({'name': name,
|
||||
'beacon_data': beacon_data,
|
||||
'func': 'validate_beacon'},
|
||||
'manage_beacons')
|
||||
if res:
|
||||
event_ret = eventer.get_event(tag='/salt/minion/minion_beacon_validation_complete', wait=30)
|
||||
valid = event_ret['valid']
|
||||
vcomment = event_ret['vcomment']
|
||||
|
||||
# Attempt to validate
|
||||
if hasattr(beacon_module, 'validate'):
|
||||
_beacon_data = beacon_data
|
||||
if 'enabled' in _beacon_data:
|
||||
del _beacon_data['enabled']
|
||||
valid, vcomment = beacon_module.validate(_beacon_data)
|
||||
else:
|
||||
log.info('Beacon {0} does not have a validate'
|
||||
' function, skipping validation.'.format(name))
|
||||
valid = True
|
||||
if not valid:
|
||||
ret['result'] = False
|
||||
ret['comment'] = ('Beacon {0} configuration invalid, '
|
||||
'not adding.\n{1}'.format(name, vcomment))
|
||||
return ret
|
||||
|
||||
except KeyError:
|
||||
# Effectively a no-op, since we can't really return without an event system
|
||||
ret['comment'] = 'Event module not available. Beacon modify failed.'
|
||||
|
||||
if not valid:
|
||||
ret['result'] = False
|
||||
ret['comment'] = ('Beacon {0} configuration invalid, '
|
||||
'not adding.\n{1}'.format(name, vcomment))
|
||||
'not modifying.\n{1}'.format(name, vcomment))
|
||||
return ret
|
||||
|
||||
_current = current_beacons[name]
|
||||
@ -197,10 +239,14 @@ def modify(name, beacon_data, **kwargs):
|
||||
ret['comment'] = 'Job {0} in correct state'.format(name)
|
||||
return ret
|
||||
|
||||
_current_lines = ['{0}:{1}\n'.format(key, value)
|
||||
for (key, value) in sorted(_current.items())]
|
||||
_new_lines = ['{0}:{1}\n'.format(key, value)
|
||||
for (key, value) in sorted(_new.items())]
|
||||
_current_lines = []
|
||||
for _item in _current:
|
||||
_current_lines.extend(['{0}:{1}\n'.format(key, value)
|
||||
for (key, value) in six.iteritems(_item)])
|
||||
_new_lines = []
|
||||
for _item in _new:
|
||||
_new_lines.extend(['{0}:{1}\n'.format(key, value)
|
||||
for (key, value) in six.iteritems(_item)])
|
||||
_diff = difflib.unified_diff(_current_lines, _new_lines)
|
||||
|
||||
ret['changes'] = {}
|
||||
|
@ -1164,7 +1164,7 @@ def delete_pool_member(hostname, username, password, name, member):
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' bigip.delete_node bigip admin admin my-pool 10.2.2.2:80
|
||||
salt '*' bigip.delete_pool_member bigip admin admin my-pool 10.2.2.2:80
|
||||
'''
|
||||
|
||||
#build session
|
||||
|
@ -135,6 +135,7 @@ def create_file_system(name,
|
||||
key=None,
|
||||
profile=None,
|
||||
region=None,
|
||||
creation_token=None,
|
||||
**kwargs):
|
||||
'''
|
||||
Creates a new, empty file system.
|
||||
@ -146,6 +147,10 @@ def create_file_system(name,
|
||||
(string) - The PerformanceMode of the file system. Can be either
|
||||
generalPurpose or maxIO
|
||||
|
||||
creation_token
|
||||
(string) - A unique name to be used as reference when creating an EFS.
|
||||
This will ensure idempotency. Set to name if not specified otherwise
|
||||
|
||||
returns
|
||||
(dict) - A dict of the data for the elastic file system
|
||||
|
||||
@ -155,10 +160,11 @@ def create_file_system(name,
|
||||
|
||||
salt 'my-minion' boto_efs.create_file_system efs-name generalPurpose
|
||||
'''
|
||||
import os
|
||||
import base64
|
||||
creation_token = base64.b64encode(os.urandom(46), ['-', '_'])
|
||||
tags = [{"Key": "Name", "Value": name}]
|
||||
|
||||
if creation_token is None:
|
||||
creation_token = name
|
||||
|
||||
tags = {"Key": "Name", "Value": name}
|
||||
|
||||
client = _get_conn(key=key, keyid=keyid, profile=profile, region=region)
|
||||
|
||||
@ -372,6 +378,7 @@ def get_file_systems(filesystemid=None,
|
||||
key=None,
|
||||
profile=None,
|
||||
region=None,
|
||||
creation_token=None,
|
||||
**kwargs):
|
||||
'''
|
||||
Get all EFS properties or a specific instance property
|
||||
@ -380,6 +387,12 @@ def get_file_systems(filesystemid=None,
|
||||
filesystemid
|
||||
(string) - ID of the file system to retrieve properties
|
||||
|
||||
creation_token
|
||||
(string) - A unique token that identifies an EFS.
|
||||
If fileysystem created via create_file_system this would
|
||||
either be explictitly passed in or set to name.
|
||||
You can limit your search with this.
|
||||
|
||||
returns
|
||||
(list[dict]) - list of all elastic file system properties
|
||||
|
||||
@ -393,9 +406,16 @@ def get_file_systems(filesystemid=None,
|
||||
result = None
|
||||
client = _get_conn(key=key, keyid=keyid, profile=profile, region=region)
|
||||
|
||||
if filesystemid:
|
||||
if filesystemid and creation_token:
|
||||
response = client.describe_file_systems(FileSystemId=filesystemid,
|
||||
CreationToken=creation_token)
|
||||
result = response["FileSystems"]
|
||||
elif filesystemid:
|
||||
response = client.describe_file_systems(FileSystemId=filesystemid)
|
||||
result = response["FileSystems"]
|
||||
elif creation_token:
|
||||
response = client.describe_file_systems(CreationToken=creation_token)
|
||||
result = response["FileSystems"]
|
||||
else:
|
||||
response = client.describe_file_systems()
|
||||
|
||||
|
@ -49,6 +49,7 @@ from __future__ import absolute_import
|
||||
# Import Python libs
|
||||
import logging
|
||||
import json
|
||||
import time
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@ -160,49 +161,60 @@ def get_elb_config(name, region=None, key=None, keyid=None, profile=None):
|
||||
salt myminion boto_elb.exists myelb region=us-east-1
|
||||
'''
|
||||
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
|
||||
wait = 60
|
||||
orig_wait = wait
|
||||
|
||||
try:
|
||||
lb = conn.get_all_load_balancers(load_balancer_names=[name])
|
||||
lb = lb[0]
|
||||
ret = {}
|
||||
ret['availability_zones'] = lb.availability_zones
|
||||
listeners = []
|
||||
for _listener in lb.listeners:
|
||||
listener_dict = {}
|
||||
listener_dict['elb_port'] = _listener.load_balancer_port
|
||||
listener_dict['elb_protocol'] = _listener.protocol
|
||||
listener_dict['instance_port'] = _listener.instance_port
|
||||
listener_dict['instance_protocol'] = _listener.instance_protocol
|
||||
listener_dict['policies'] = _listener.policy_names
|
||||
if _listener.ssl_certificate_id:
|
||||
listener_dict['certificate'] = _listener.ssl_certificate_id
|
||||
listeners.append(listener_dict)
|
||||
ret['listeners'] = listeners
|
||||
backends = []
|
||||
for _backend in lb.backends:
|
||||
bs_dict = {}
|
||||
bs_dict['instance_port'] = _backend.instance_port
|
||||
bs_dict['policies'] = [p.policy_name for p in _backend.policies]
|
||||
backends.append(bs_dict)
|
||||
ret['backends'] = backends
|
||||
ret['subnets'] = lb.subnets
|
||||
ret['security_groups'] = lb.security_groups
|
||||
ret['scheme'] = lb.scheme
|
||||
ret['dns_name'] = lb.dns_name
|
||||
ret['tags'] = _get_all_tags(conn, name)
|
||||
lb_policy_lists = [
|
||||
lb.policies.app_cookie_stickiness_policies,
|
||||
lb.policies.lb_cookie_stickiness_policies,
|
||||
lb.policies.other_policies
|
||||
]
|
||||
policies = []
|
||||
for policy_list in lb_policy_lists:
|
||||
policies += [p.policy_name for p in policy_list]
|
||||
ret['policies'] = policies
|
||||
return ret
|
||||
except boto.exception.BotoServerError as error:
|
||||
log.debug(error)
|
||||
return {}
|
||||
while True:
|
||||
try:
|
||||
lb = conn.get_all_load_balancers(load_balancer_names=[name])
|
||||
lb = lb[0]
|
||||
ret = {}
|
||||
ret['availability_zones'] = lb.availability_zones
|
||||
listeners = []
|
||||
for _listener in lb.listeners:
|
||||
listener_dict = {}
|
||||
listener_dict['elb_port'] = _listener.load_balancer_port
|
||||
listener_dict['elb_protocol'] = _listener.protocol
|
||||
listener_dict['instance_port'] = _listener.instance_port
|
||||
listener_dict['instance_protocol'] = _listener.instance_protocol
|
||||
listener_dict['policies'] = _listener.policy_names
|
||||
if _listener.ssl_certificate_id:
|
||||
listener_dict['certificate'] = _listener.ssl_certificate_id
|
||||
listeners.append(listener_dict)
|
||||
ret['listeners'] = listeners
|
||||
backends = []
|
||||
for _backend in lb.backends:
|
||||
bs_dict = {}
|
||||
bs_dict['instance_port'] = _backend.instance_port
|
||||
bs_dict['policies'] = [p.policy_name for p in _backend.policies]
|
||||
backends.append(bs_dict)
|
||||
ret['backends'] = backends
|
||||
ret['subnets'] = lb.subnets
|
||||
ret['security_groups'] = lb.security_groups
|
||||
ret['scheme'] = lb.scheme
|
||||
ret['dns_name'] = lb.dns_name
|
||||
ret['tags'] = _get_all_tags(conn, name)
|
||||
lb_policy_lists = [
|
||||
lb.policies.app_cookie_stickiness_policies,
|
||||
lb.policies.lb_cookie_stickiness_policies,
|
||||
lb.policies.other_policies
|
||||
]
|
||||
policies = []
|
||||
for policy_list in lb_policy_lists:
|
||||
policies += [p.policy_name for p in policy_list]
|
||||
ret['policies'] = policies
|
||||
return ret
|
||||
except boto.exception.BotoServerError as error:
|
||||
if getattr(error, 'error_code', '') == 'Throttling':
|
||||
if wait > 0:
|
||||
sleep = wait if wait % 5 == wait else 5
|
||||
log.info('Throttled by AWS API, will retry in 5 seconds.')
|
||||
time.sleep(sleep)
|
||||
wait -= sleep
|
||||
continue
|
||||
log.error('API still throttling us after {0} seconds!'.format(orig_wait))
|
||||
log.error(error)
|
||||
return {}
|
||||
|
||||
|
||||
def listener_dict_to_tuple(listener):
|
||||
|
@ -72,11 +72,20 @@ def __virtual__():
|
||||
return True
|
||||
|
||||
|
||||
def create_target_group(name, protocol, port, vpc_id,
|
||||
region=None, key=None, keyid=None, profile=None,
|
||||
health_check_protocol='HTTP', health_check_port='traffic-port',
|
||||
health_check_path='/', health_check_interval_seconds=30,
|
||||
health_check_timeout_seconds=5, healthy_threshold_count=5,
|
||||
def create_target_group(name,
|
||||
protocol,
|
||||
port,
|
||||
vpc_id,
|
||||
region=None,
|
||||
key=None,
|
||||
keyid=None,
|
||||
profile=None,
|
||||
health_check_protocol='HTTP',
|
||||
health_check_port='traffic-port',
|
||||
health_check_path='/',
|
||||
health_check_interval_seconds=30,
|
||||
health_check_timeout_seconds=5,
|
||||
healthy_threshold_count=5,
|
||||
unhealthy_threshold_count=2):
|
||||
'''
|
||||
Create target group if not present.
|
||||
@ -125,36 +134,40 @@ def create_target_group(name, protocol, port, vpc_id,
|
||||
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
|
||||
if target_group_exists(name, region, key, keyid, profile):
|
||||
return True
|
||||
else:
|
||||
try:
|
||||
lb = conn.create_target_group(Name=name, Protocol=protocol, Port=port,
|
||||
VpcId=vpc_id, HealthCheckProtocol=health_check_protocol,
|
||||
HealthCheckPort=health_check_port,
|
||||
HealthCheckPath=health_check_path,
|
||||
HealthCheckIntervalSeconds=health_check_interval_seconds,
|
||||
HealthCheckTimeoutSeconds=health_check_timeout_seconds,
|
||||
HealthyThresholdCount=healthy_threshold_count,
|
||||
UnhealthyThresholdCount=unhealthy_threshold_count)
|
||||
if lb:
|
||||
log.info('Created ALB {0}: {1}'.format(name,
|
||||
lb['TargetGroups'][0]['TargetGroupArn']))
|
||||
return True
|
||||
else:
|
||||
log.error('Failed to create ALB {0}'.format(name))
|
||||
return False
|
||||
except ClientError as error:
|
||||
log.debug(error)
|
||||
log.error('Failed to create ALB {0}: {1}: {2}'.format(name,
|
||||
error.response['Error']['Code'],
|
||||
error.response['Error']['Message']))
|
||||
|
||||
try:
|
||||
alb = conn.create_target_group(Name=name, Protocol=protocol, Port=port,
|
||||
VpcId=vpc_id, HealthCheckProtocol=health_check_protocol,
|
||||
HealthCheckPort=health_check_port,
|
||||
HealthCheckPath=health_check_path,
|
||||
HealthCheckIntervalSeconds=health_check_interval_seconds,
|
||||
HealthCheckTimeoutSeconds=health_check_timeout_seconds,
|
||||
HealthyThresholdCount=healthy_threshold_count,
|
||||
UnhealthyThresholdCount=unhealthy_threshold_count)
|
||||
if alb:
|
||||
log.info('Created ALB {0}: {1}'.format(name,
|
||||
alb['TargetGroups'][0]['TargetGroupArn']))
|
||||
return True
|
||||
else:
|
||||
log.error('Failed to create ALB {0}'.format(name))
|
||||
return False
|
||||
except ClientError as error:
|
||||
log.debug(error)
|
||||
log.error('Failed to create ALB {0}: {1}: {2}'.format(name,
|
||||
error.response['Error']['Code'],
|
||||
error.response['Error']['Message']))
|
||||
|
||||
|
||||
def delete_target_group(name, region=None, key=None, keyid=None, profile=None):
|
||||
def delete_target_group(name,
|
||||
region=None,
|
||||
key=None,
|
||||
keyid=None,
|
||||
profile=None):
|
||||
'''
|
||||
Delete target group.
|
||||
|
||||
name
|
||||
(string) - The Amazon Resource Name (ARN) of the resource.
|
||||
(string) - Target Group Name or Amazon Resource Name (ARN).
|
||||
|
||||
returns
|
||||
(bool) - True on success, False on failure.
|
||||
@ -167,9 +180,20 @@ def delete_target_group(name, region=None, key=None, keyid=None, profile=None):
|
||||
'''
|
||||
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
|
||||
|
||||
if not target_group_exists(name, region, key, keyid, profile):
|
||||
return True
|
||||
|
||||
try:
|
||||
conn.delete_target_group(TargetGroupArn=name)
|
||||
log.info('Deleted target group {0}'.format(name))
|
||||
if name.startswith('arn:aws:elasticloadbalancing'):
|
||||
conn.delete_target_group(TargetGroupArn=name)
|
||||
log.info('Deleted target group {0}'.format(name))
|
||||
else:
|
||||
tg_info = conn.describe_target_groups(Names=[name])
|
||||
if len(tg_info['TargetGroups']) != 1:
|
||||
return False
|
||||
arn = tg_info['TargetGroups'][0]['TargetGroupArn']
|
||||
conn.delete_target_group(TargetGroupArn=arn)
|
||||
log.info('Deleted target group {0} ARN {1}'.format(name, arn))
|
||||
return True
|
||||
except ClientError as error:
|
||||
log.debug(error)
|
||||
@ -177,7 +201,11 @@ def delete_target_group(name, region=None, key=None, keyid=None, profile=None):
|
||||
return False
|
||||
|
||||
|
||||
def target_group_exists(name, region=None, key=None, keyid=None, profile=None):
|
||||
def target_group_exists(name,
|
||||
region=None,
|
||||
key=None,
|
||||
keyid=None,
|
||||
profile=None):
|
||||
'''
|
||||
Check to see if an target group exists.
|
||||
|
||||
@ -200,11 +228,16 @@ def target_group_exists(name, region=None, key=None, keyid=None, profile=None):
|
||||
log.warning('The target group does not exist in region {0}'.format(region))
|
||||
return False
|
||||
except ClientError as error:
|
||||
log.warning(error)
|
||||
log.warning('target_group_exists check for {0} returned: {1}'.format(name, error))
|
||||
return False
|
||||
|
||||
|
||||
def describe_target_health(name, targets=None, region=None, key=None, keyid=None, profile=None):
|
||||
def describe_target_health(name,
|
||||
targets=None,
|
||||
region=None,
|
||||
key=None,
|
||||
keyid=None,
|
||||
profile=None):
|
||||
'''
|
||||
Get the curret health check status for targets in a target group.
|
||||
|
||||
@ -234,8 +267,12 @@ def describe_target_health(name, targets=None, region=None, key=None, keyid=None
|
||||
return {}
|
||||
|
||||
|
||||
def register_targets(name, targets, region=None, key=None, keyid=None,
|
||||
profile=None):
|
||||
def register_targets(name,
|
||||
targets,
|
||||
region=None,
|
||||
key=None,
|
||||
keyid=None,
|
||||
profile=None):
|
||||
'''
|
||||
Register targets to a target froup of an ALB. ``targets`` is either a
|
||||
instance id string or a list of instance id's.
|
||||
@ -264,15 +301,18 @@ def register_targets(name, targets, region=None, key=None, keyid=None,
|
||||
registered_targets = conn.register_targets(TargetGroupArn=name, Targets=targetsdict)
|
||||
if registered_targets:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
return False
|
||||
except ClientError as error:
|
||||
log.warning(error)
|
||||
return False
|
||||
|
||||
|
||||
def deregister_targets(name, targets, region=None, key=None, keyid=None,
|
||||
profile=None):
|
||||
def deregister_targets(name,
|
||||
targets,
|
||||
region=None,
|
||||
key=None,
|
||||
keyid=None,
|
||||
profile=None):
|
||||
'''
|
||||
Deregister targets to a target froup of an ALB. ``targets`` is either a
|
||||
instance id string or a list of instance id's.
|
||||
@ -301,8 +341,7 @@ def deregister_targets(name, targets, region=None, key=None, keyid=None,
|
||||
registered_targets = conn.deregister_targets(TargetGroupArn=name, Targets=targetsdict)
|
||||
if registered_targets:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
return False
|
||||
except ClientError as error:
|
||||
log.warning(error)
|
||||
return False
|
||||
|
@ -2403,33 +2403,39 @@ def has_exec(cmd):
|
||||
return which(cmd) is not None
|
||||
|
||||
|
||||
def exec_code(lang, code, cwd=None):
|
||||
def exec_code(lang, code, cwd=None, args=None, **kwargs):
|
||||
'''
|
||||
Pass in two strings, the first naming the executable language, aka -
|
||||
python2, python3, ruby, perl, lua, etc. the second string containing
|
||||
the code you wish to execute. The stdout will be returned.
|
||||
|
||||
All parameters from :mod:`cmd.run_all <salt.modules.cmdmod.run_all>` except python_shell can be used.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cmd.exec_code ruby 'puts "cheese"'
|
||||
salt '*' cmd.exec_code ruby 'puts "cheese"' args='["arg1", "arg2"]' env='{"FOO": "bar"}'
|
||||
'''
|
||||
return exec_code_all(lang, code, cwd)['stdout']
|
||||
return exec_code_all(lang, code, cwd, args, **kwargs)['stdout']
|
||||
|
||||
|
||||
def exec_code_all(lang, code, cwd=None):
|
||||
def exec_code_all(lang, code, cwd=None, args=None, **kwargs):
|
||||
'''
|
||||
Pass in two strings, the first naming the executable language, aka -
|
||||
python2, python3, ruby, perl, lua, etc. the second string containing
|
||||
the code you wish to execute. All cmd artifacts (stdout, stderr, retcode, pid)
|
||||
will be returned.
|
||||
|
||||
All parameters from :mod:`cmd.run_all <salt.modules.cmdmod.run_all>` except python_shell can be used.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cmd.exec_code_all ruby 'puts "cheese"'
|
||||
salt '*' cmd.exec_code_all ruby 'puts "cheese"' args='["arg1", "arg2"]' env='{"FOO": "bar"}'
|
||||
'''
|
||||
powershell = lang.lower().startswith("powershell")
|
||||
|
||||
@ -2446,7 +2452,12 @@ def exec_code_all(lang, code, cwd=None):
|
||||
else:
|
||||
cmd = [lang, codefile]
|
||||
|
||||
ret = run_all(cmd, cwd=cwd, python_shell=False)
|
||||
if isinstance(args, six.string_types):
|
||||
cmd.append(args)
|
||||
elif isinstance(args, list):
|
||||
cmd += args
|
||||
|
||||
ret = run_all(cmd, cwd=cwd, python_shell=False, **kwargs)
|
||||
os.remove(codefile)
|
||||
return ret
|
||||
|
||||
|
@ -80,7 +80,7 @@ def recv(files, dest):
|
||||
return 'Destination unavailable'
|
||||
|
||||
try:
|
||||
with salt.utils.fopen(final, 'w+') as fp_:
|
||||
with salt.utils.files.fopen(final, 'w+') as fp_:
|
||||
fp_.write(data)
|
||||
ret[final] = True
|
||||
except IOError:
|
||||
|
@ -23,6 +23,10 @@ import salt.utils.path
|
||||
import salt.utils.platform
|
||||
from salt.exceptions import CommandExecutionError
|
||||
|
||||
__func_alias__ = {
|
||||
'format_': 'format'
|
||||
}
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
HAS_HDPARM = salt.utils.path.which('hdparm') is not None
|
||||
|
@ -234,6 +234,7 @@ except ImportError:
|
||||
# pylint: enable=import-error
|
||||
|
||||
HAS_NSENTER = bool(salt.utils.path.which('nsenter'))
|
||||
HUB_PREFIX = 'docker.io/'
|
||||
|
||||
# Set up logging
|
||||
log = logging.getLogger(__name__)
|
||||
@ -902,9 +903,14 @@ def compare_container(first, second, ignore=None):
|
||||
continue
|
||||
val1 = result1[conf_dict][item]
|
||||
val2 = result2[conf_dict].get(item)
|
||||
if item in ('OomKillDisable',):
|
||||
if item in ('OomKillDisable',) or (val1 is None or val2 is None):
|
||||
if bool(val1) != bool(val2):
|
||||
ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2}
|
||||
elif item == 'Image':
|
||||
image1 = inspect_image(val1)['Id']
|
||||
image2 = inspect_image(val2)['Id']
|
||||
if image1 != image2:
|
||||
ret.setdefault(conf_dict, {})[item] = {'old': image1, 'new': image2}
|
||||
else:
|
||||
if item == 'Links':
|
||||
val1 = _scrub_links(val1, first)
|
||||
@ -920,9 +926,14 @@ def compare_container(first, second, ignore=None):
|
||||
continue
|
||||
val1 = result1[conf_dict].get(item)
|
||||
val2 = result2[conf_dict][item]
|
||||
if item in ('OomKillDisable',):
|
||||
if item in ('OomKillDisable',) or (val1 is None or val2 is None):
|
||||
if bool(val1) != bool(val2):
|
||||
ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2}
|
||||
elif item == 'Image':
|
||||
image1 = inspect_image(val1)['Id']
|
||||
image2 = inspect_image(val2)['Id']
|
||||
if image1 != image2:
|
||||
ret.setdefault(conf_dict, {})[item] = {'old': image1, 'new': image2}
|
||||
else:
|
||||
if item == 'Links':
|
||||
val1 = _scrub_links(val1, first)
|
||||
@ -1479,6 +1490,43 @@ def list_tags():
|
||||
return sorted(ret)
|
||||
|
||||
|
||||
def resolve_tag(name, tags=None):
|
||||
'''
|
||||
.. versionadded:: 2017.7.2,Oxygen
|
||||
|
||||
Given an image tag, check the locally-pulled tags (using
|
||||
:py:func:`docker.list_tags <salt.modules.dockermod.list_tags>`) and return
|
||||
the matching tag. This helps disambiguate differences on some platforms
|
||||
where images from the Docker Hub are prefixed with ``docker.io/``. If an
|
||||
image name with no tag is passed, a tag of ``latest`` is assumed.
|
||||
|
||||
If the specified image is not pulled locally, this function will return
|
||||
``False``.
|
||||
|
||||
tags
|
||||
An optional Python list of tags to check against. If passed, then
|
||||
:py:func:`docker.list_tags <salt.modules.dockermod.list_tags>` will not
|
||||
be run to get a list of tags. This is useful when resolving a number of
|
||||
tags at the same time.
|
||||
|
||||
CLI Examples:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt myminion docker.resolve_tag busybox
|
||||
salt myminion docker.resolve_tag busybox:latest
|
||||
'''
|
||||
tag_name = ':'.join(salt.utils.docker.get_repo_tag(name))
|
||||
if tags is None:
|
||||
tags = list_tags()
|
||||
if tag_name in tags:
|
||||
return tag_name
|
||||
full_name = HUB_PREFIX + tag_name
|
||||
if not name.startswith(HUB_PREFIX) and full_name in tags:
|
||||
return full_name
|
||||
return False
|
||||
|
||||
|
||||
def logs(name):
|
||||
'''
|
||||
Returns the logs for the container. Equivalent to running the ``docker
|
||||
@ -3885,8 +3933,9 @@ def save(name,
|
||||
saved_path = salt.utils.files.mkstemp()
|
||||
else:
|
||||
saved_path = path
|
||||
|
||||
cmd = ['docker', 'save', '-o', saved_path, inspect_image(name)['Id']]
|
||||
# use the image name if its valid if not use the image id
|
||||
image_to_save = name if name in inspect_image(name)['RepoTags'] else inspect_image(name)['Id']
|
||||
cmd = ['docker', 'save', '-o', saved_path, image_to_save]
|
||||
time_started = time.time()
|
||||
result = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||
if result['retcode'] != 0:
|
||||
@ -4033,7 +4082,10 @@ def networks(names=None, ids=None):
|
||||
|
||||
def create_network(name,
|
||||
driver=None,
|
||||
driver_opts=None):
|
||||
driver_opts=None,
|
||||
gateway=None,
|
||||
ip_range=None,
|
||||
subnet=None):
|
||||
'''
|
||||
Create a new network
|
||||
|
||||
@ -4046,16 +4098,46 @@ def create_network(name,
|
||||
driver_opts
|
||||
Options for the network driver.
|
||||
|
||||
gateway
|
||||
IPv4 or IPv6 gateway for the master subnet
|
||||
|
||||
ip_range
|
||||
Allocate container IP from a sub-range within the subnet
|
||||
|
||||
subnet:
|
||||
Subnet in CIDR format that represents a network segment
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt myminion docker.create_network web_network driver=bridge
|
||||
salt myminion docker.create_network macvlan_network \
|
||||
driver=macvlan \
|
||||
driver_opts="{'parent':'eth0'}" \
|
||||
gateway=172.20.0.1 \
|
||||
subnet=172.20.0.0/24
|
||||
'''
|
||||
# If any settings which need to be set via the IPAM config are specified, create the IPAM config data structure
|
||||
# with these values set.
|
||||
if gateway or ip_range or subnet:
|
||||
ipam = {
|
||||
'Config': [{
|
||||
'Gateway': gateway,
|
||||
'IPRange': ip_range,
|
||||
'Subnet': subnet
|
||||
}],
|
||||
'Driver': 'default',
|
||||
'Options': {}
|
||||
}
|
||||
else:
|
||||
ipam = None
|
||||
|
||||
response = _client_wrapper('create_network',
|
||||
name,
|
||||
driver=driver,
|
||||
options=driver_opts,
|
||||
ipam=ipam,
|
||||
check_duplicate=True)
|
||||
|
||||
_clear_context()
|
||||
@ -5376,7 +5458,7 @@ def sls(name, mods=None, saltenv='base', **kwargs):
|
||||
)
|
||||
if not isinstance(ret, dict):
|
||||
__context__['retcode'] = 1
|
||||
elif not salt.utils.check_state_result(ret):
|
||||
elif not __utils__['state.check_result'](ret):
|
||||
__context__['retcode'] = 2
|
||||
else:
|
||||
__context__['retcode'] = 0
|
||||
@ -5450,7 +5532,7 @@ def sls_build(name, base='opensuse/python', mods=None, saltenv='base',
|
||||
# Now execute the state into the container
|
||||
ret = sls(id_, mods, saltenv, **kwargs)
|
||||
# fail if the state was not successful
|
||||
if not dryrun and not salt.utils.check_state_result(ret):
|
||||
if not dryrun and not __utils__['state.check_result'](ret):
|
||||
raise CommandExecutionError(ret)
|
||||
if dryrun is False:
|
||||
ret = commit(id_, name)
|
||||
|
@ -24,6 +24,8 @@ import salt.utils.kickstart
|
||||
import salt.syspaths
|
||||
from salt.exceptions import SaltInvocationError
|
||||
|
||||
# Import 3rd-party libs
|
||||
from salt.ext import six
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@ -325,6 +327,8 @@ def _bootstrap_yum(
|
||||
'''
|
||||
if pkgs is None:
|
||||
pkgs = []
|
||||
elif isinstance(pkgs, six.string_types):
|
||||
pkgs = pkgs.split(',')
|
||||
|
||||
default_pkgs = ('yum', 'centos-release', 'iputils')
|
||||
for pkg in default_pkgs:
|
||||
@ -333,6 +337,8 @@ def _bootstrap_yum(
|
||||
|
||||
if exclude_pkgs is None:
|
||||
exclude_pkgs = []
|
||||
elif isinstance(exclude_pkgs, six.string_types):
|
||||
exclude_pkgs = exclude_pkgs.split(',')
|
||||
|
||||
for pkg in exclude_pkgs:
|
||||
pkgs.remove(pkg)
|
||||
@ -393,15 +399,27 @@ def _bootstrap_deb(
|
||||
if repo_url is None:
|
||||
repo_url = 'http://ftp.debian.org/debian/'
|
||||
|
||||
if not salt.utils.which('debootstrap'):
|
||||
log.error('Required tool debootstrap is not installed.')
|
||||
return False
|
||||
|
||||
if isinstance(pkgs, (list, tuple)):
|
||||
pkgs = ','.join(pkgs)
|
||||
if isinstance(exclude_pkgs, (list, tuple)):
|
||||
exclude_pkgs = ','.join(exclude_pkgs)
|
||||
|
||||
deb_args = [
|
||||
'debootstrap',
|
||||
'--foreign',
|
||||
'--arch',
|
||||
_cmd_quote(arch),
|
||||
'--include',
|
||||
] + pkgs + [
|
||||
'--exclude',
|
||||
] + exclude_pkgs + [
|
||||
_cmd_quote(arch)]
|
||||
|
||||
if pkgs:
|
||||
deb_args += ['--include', _cmd_quote(pkgs)]
|
||||
if exclude_pkgs:
|
||||
deb_args += ['--exclude', _cmd_quote(exclude_pkgs)]
|
||||
|
||||
deb_args += [
|
||||
_cmd_quote(flavor),
|
||||
_cmd_quote(root),
|
||||
_cmd_quote(repo_url),
|
||||
@ -469,6 +487,8 @@ def _bootstrap_pacman(
|
||||
|
||||
if pkgs is None:
|
||||
pkgs = []
|
||||
elif isinstance(pkgs, six.string_types):
|
||||
pkgs = pkgs.split(',')
|
||||
|
||||
default_pkgs = ('pacman', 'linux', 'systemd-sysvcompat', 'grub')
|
||||
for pkg in default_pkgs:
|
||||
@ -477,6 +497,8 @@ def _bootstrap_pacman(
|
||||
|
||||
if exclude_pkgs is None:
|
||||
exclude_pkgs = []
|
||||
elif isinstance(exclude_pkgs, six.string_types):
|
||||
exclude_pkgs = exclude_pkgs.split(',')
|
||||
|
||||
for pkg in exclude_pkgs:
|
||||
pkgs.remove(pkg)
|
||||
|
@ -31,6 +31,7 @@ from salt.modules.inspectlib.entities import (AllowedDir, IgnoredDir, Package,
|
||||
import salt.utils # Can be removed when reinit_crypto is moved
|
||||
import salt.utils.files
|
||||
import salt.utils.fsutils
|
||||
import salt.utils.path
|
||||
import salt.utils.stringutils
|
||||
from salt.exceptions import CommandExecutionError
|
||||
|
||||
@ -312,7 +313,7 @@ class Inspector(EnvLoader):
|
||||
continue
|
||||
if not valid or not os.path.exists(obj) or not os.access(obj, os.R_OK):
|
||||
continue
|
||||
if os.path.islink(obj):
|
||||
if salt.utils.path.islink(obj):
|
||||
links.append(obj)
|
||||
elif os.path.isdir(obj):
|
||||
dirs.append(obj)
|
||||
|
@ -17,11 +17,14 @@
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
import os
|
||||
import grp
|
||||
import pwd
|
||||
from xml.dom import minidom
|
||||
import platform
|
||||
import socket
|
||||
try:
|
||||
import grp
|
||||
import pwd
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils.files
|
||||
|
@ -1463,6 +1463,8 @@ def _parser():
|
||||
add_arg('--or-mark', dest='or-mark', action='append')
|
||||
add_arg('--xor-mark', dest='xor-mark', action='append')
|
||||
add_arg('--set-mark', dest='set-mark', action='append')
|
||||
add_arg('--nfmask', dest='nfmask', action='append')
|
||||
add_arg('--ctmask', dest='ctmask', action='append')
|
||||
## CONNSECMARK
|
||||
add_arg('--save', dest='save', action='append')
|
||||
add_arg('--restore', dest='restore', action='append')
|
||||
|
@ -159,7 +159,7 @@ def vgdisplay(vgname=''):
|
||||
return ret
|
||||
|
||||
|
||||
def lvdisplay(lvname=''):
|
||||
def lvdisplay(lvname='', quiet=False):
|
||||
'''
|
||||
Return information about the logical volume(s)
|
||||
|
||||
@ -174,7 +174,10 @@ def lvdisplay(lvname=''):
|
||||
cmd = ['lvdisplay', '-c']
|
||||
if lvname:
|
||||
cmd.append(lvname)
|
||||
cmd_ret = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||
if quiet:
|
||||
cmd_ret = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet')
|
||||
else:
|
||||
cmd_ret = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||
|
||||
if cmd_ret['retcode'] != 0:
|
||||
return {}
|
||||
|
@ -41,7 +41,11 @@ def list_exports(exports='/etc/exports'):
|
||||
if line.startswith('#'):
|
||||
continue
|
||||
comps = line.split()
|
||||
ret[comps[0]] = []
|
||||
|
||||
# Handle the case where the same path is given twice
|
||||
if not comps[0] in ret:
|
||||
ret[comps[0]] = []
|
||||
|
||||
newshares = []
|
||||
for perm in comps[1:]:
|
||||
if perm.startswith('/'):
|
||||
@ -49,7 +53,10 @@ def list_exports(exports='/etc/exports'):
|
||||
continue
|
||||
permcomps = perm.split('(')
|
||||
permcomps[1] = permcomps[1].replace(')', '')
|
||||
hosts = permcomps[0].split(',')
|
||||
hosts = permcomps[0]
|
||||
if type(hosts) is not str:
|
||||
# Lists, etc would silently mangle /etc/exports
|
||||
raise TypeError('hosts argument must be a string')
|
||||
options = permcomps[1].split(',')
|
||||
ret[comps[0]].append({'hosts': hosts, 'options': options})
|
||||
for share in newshares:
|
||||
@ -73,6 +80,31 @@ def del_export(exports='/etc/exports', path=None):
|
||||
return edict
|
||||
|
||||
|
||||
def add_export(exports='/etc/exports', path=None, hosts=None, options=None):
|
||||
'''
|
||||
Add an export
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' nfs3.add_export path='/srv/test' hosts='127.0.0.1' options=['rw']
|
||||
'''
|
||||
if options is None:
|
||||
options = []
|
||||
if type(hosts) is not str:
|
||||
# Lists, etc would silently mangle /etc/exports
|
||||
raise TypeError('hosts argument must be a string')
|
||||
edict = list_exports(exports)
|
||||
if path not in edict:
|
||||
edict[path] = []
|
||||
new = {'hosts': hosts, 'options': options}
|
||||
edict[path].append(new)
|
||||
_write_exports(exports, edict)
|
||||
|
||||
return new
|
||||
|
||||
|
||||
def _write_exports(exports, edict):
|
||||
'''
|
||||
Write an exports file to disk
|
||||
@ -90,7 +122,29 @@ def _write_exports(exports, edict):
|
||||
for export in edict:
|
||||
line = export
|
||||
for perms in edict[export]:
|
||||
hosts = ','.join(perms['hosts'])
|
||||
hosts = perms['hosts']
|
||||
options = ','.join(perms['options'])
|
||||
line += ' {0}({1})'.format(hosts, options)
|
||||
efh.write('{0}\n'.format(line))
|
||||
|
||||
|
||||
def reload_exports():
|
||||
'''
|
||||
Trigger a reload of the exports file to apply changes
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' nfs3.reload_exports
|
||||
'''
|
||||
ret = {}
|
||||
|
||||
command = 'exportfs -r'
|
||||
|
||||
output = __salt__['cmd.run_all'](command)
|
||||
ret['stdout'] = output['stdout']
|
||||
ret['stderr'] = output['stderr']
|
||||
ret['result'] = not output['retcode']
|
||||
|
||||
return ret
|
||||
|
@ -539,6 +539,7 @@ def install(name=None,
|
||||
cmd.append('pacman')
|
||||
|
||||
errors = []
|
||||
targets = []
|
||||
if pkg_type == 'file':
|
||||
cmd.extend(['-U', '--noprogressbar', '--noconfirm'])
|
||||
cmd.extend(pkg_params)
|
||||
@ -549,7 +550,6 @@ def install(name=None,
|
||||
if sysupgrade:
|
||||
cmd.append('-u')
|
||||
cmd.extend(['--noprogressbar', '--noconfirm', '--needed'])
|
||||
targets = []
|
||||
wildcards = []
|
||||
for param, version_num in six.iteritems(pkg_params):
|
||||
if version_num is None:
|
||||
|
@ -58,6 +58,7 @@ log = logging.getLogger(__name__)
|
||||
# Import salt libs
|
||||
from salt.exceptions import CommandExecutionError
|
||||
import salt.utils.files
|
||||
import salt.utils.path
|
||||
|
||||
# Function alias to not shadow built-ins.
|
||||
__func_alias__ = {
|
||||
@ -95,7 +96,7 @@ def __virtual__():
|
||||
global __virtualname__
|
||||
__virtualname__ = 'service'
|
||||
return __virtualname__
|
||||
if salt.utils.which('sv'):
|
||||
if salt.utils.path.which('sv'):
|
||||
return __virtualname__
|
||||
return (False, 'Runit not available. Please install sv')
|
||||
|
||||
|
602
salt/modules/saltcheck.py
Normal file
602
salt/modules/saltcheck.py
Normal file
@ -0,0 +1,602 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
A module for testing the logic of states and highstates
|
||||
|
||||
Saltcheck provides unittest like functionality requiring only the knowledge of salt module execution and yaml.
|
||||
|
||||
In order to run state and highstate saltcheck tests a sub-folder of a state must be creaed and named "saltcheck-tests".
|
||||
|
||||
Tests for a state should be created in files ending in *.tst and placed in the saltcheck-tests folder.
|
||||
|
||||
Multiple tests can be created in a file.
|
||||
Multiple *.tst files can be created in the saltcheck-tests folder.
|
||||
The "id" of a test works in the same manner as in salt state files.
|
||||
They should be unique and descriptive.
|
||||
|
||||
Example file system layout:
|
||||
/srv/salt/apache/
|
||||
init.sls
|
||||
config.sls
|
||||
saltcheck-tests/
|
||||
pkg_and_mods.tst
|
||||
config.tst
|
||||
|
||||
|
||||
Saltcheck Test Syntax:
|
||||
|
||||
Unique-ID:
|
||||
module_and_function:
|
||||
args:
|
||||
kwargs:
|
||||
assertion:
|
||||
expected-return:
|
||||
|
||||
|
||||
Example test 1:
|
||||
|
||||
echo-test-hello:
|
||||
module_and_function: test.echo
|
||||
args:
|
||||
- "hello"
|
||||
kwargs:
|
||||
assertion: assertEqual
|
||||
expected-return: 'hello'
|
||||
|
||||
|
||||
:codeauthor: William Cannon <william.cannon@gmail.com>
|
||||
:maturity: new
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
import yaml
|
||||
try:
|
||||
import salt.utils
|
||||
import salt.client
|
||||
import salt.exceptions
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__virtualname__ = 'saltcheck'
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Check dependencies - may be useful in future
|
||||
'''
|
||||
return __virtualname__
|
||||
|
||||
|
||||
def update_master_cache():
|
||||
'''
|
||||
Updates the master cache onto the minion - transfers all salt-check-tests
|
||||
Should be done one time before running tests, and if tests are updated
|
||||
Can be automated by setting "auto_update_master_cache: True" in minion config
|
||||
|
||||
CLI Example:
|
||||
salt '*' saltcheck.update_master_cache
|
||||
'''
|
||||
__salt__['cp.cache_master']()
|
||||
return True
|
||||
|
||||
|
||||
def run_test(**kwargs):
|
||||
'''
|
||||
Execute one saltcheck test and return result
|
||||
|
||||
:param keyword arg test:
|
||||
CLI Example::
|
||||
salt '*' saltcheck.run_test
|
||||
test='{"module_and_function": "test.echo",
|
||||
"assertion": "assertEqual",
|
||||
"expected-return": "This works!",
|
||||
"args":["This works!"] }'
|
||||
'''
|
||||
# salt converts the string to a dictionary auto-magically
|
||||
scheck = SaltCheck()
|
||||
test = kwargs.get('test', None)
|
||||
if test and isinstance(test, dict):
|
||||
return scheck.run_test(test)
|
||||
else:
|
||||
return "Test must be a dictionary"
|
||||
|
||||
|
||||
def run_state_tests(state):
|
||||
'''
|
||||
Execute all tests for a salt state and return results
|
||||
Nested states will also be tested
|
||||
|
||||
:param str state: the name of a user defined state
|
||||
|
||||
CLI Example::
|
||||
salt '*' saltcheck.run_state_tests postfix
|
||||
'''
|
||||
scheck = SaltCheck()
|
||||
paths = scheck.get_state_search_path_list()
|
||||
stl = StateTestLoader(search_paths=paths)
|
||||
results = {}
|
||||
sls_list = _get_state_sls(state)
|
||||
for state_name in sls_list:
|
||||
mypath = stl.convert_sls_to_path(state_name)
|
||||
stl.add_test_files_for_sls(mypath)
|
||||
stl.load_test_suite()
|
||||
results_dict = {}
|
||||
for key, value in stl.test_dict.items():
|
||||
result = scheck.run_test(value)
|
||||
results_dict[key] = result
|
||||
results[state_name] = results_dict
|
||||
passed = 0
|
||||
failed = 0
|
||||
missing_tests = 0
|
||||
for state in results:
|
||||
if len(results[state].items()) == 0:
|
||||
missing_tests = missing_tests + 1
|
||||
else:
|
||||
for dummy, val in results[state].items():
|
||||
log.info("dummy={}, val={}".format(dummy, val))
|
||||
if val.startswith('Pass'):
|
||||
passed = passed + 1
|
||||
if val.startswith('Fail'):
|
||||
failed = failed + 1
|
||||
out_list = []
|
||||
for key, value in results.items():
|
||||
out_list.append({key: value})
|
||||
out_list.sort()
|
||||
out_list.append({"TEST RESULTS": {'Passed': passed, 'Failed': failed, 'Missing Tests': missing_tests}})
|
||||
return out_list
|
||||
|
||||
|
||||
def run_highstate_tests():
|
||||
'''
|
||||
Execute all tests for a salt highstate and return results
|
||||
|
||||
CLI Example::
|
||||
salt '*' saltcheck.run_highstate_tests
|
||||
'''
|
||||
scheck = SaltCheck()
|
||||
paths = scheck.get_state_search_path_list()
|
||||
stl = StateTestLoader(search_paths=paths)
|
||||
results = {}
|
||||
sls_list = _get_top_states()
|
||||
all_states = []
|
||||
for top_state in sls_list:
|
||||
sls_list = _get_state_sls(top_state)
|
||||
for state in sls_list:
|
||||
if state not in all_states:
|
||||
all_states.append(state)
|
||||
|
||||
for state_name in all_states:
|
||||
mypath = stl.convert_sls_to_path(state_name)
|
||||
stl.add_test_files_for_sls(mypath)
|
||||
stl.load_test_suite()
|
||||
results_dict = {}
|
||||
for key, value in stl.test_dict.items():
|
||||
result = scheck.run_test(value)
|
||||
results_dict[key] = result
|
||||
results[state_name] = results_dict
|
||||
passed = 0
|
||||
failed = 0
|
||||
missing_tests = 0
|
||||
for state in results:
|
||||
if len(results[state].items()) == 0:
|
||||
missing_tests = missing_tests + 1
|
||||
else:
|
||||
for dummy, val in results[state].items():
|
||||
log.info("dummy={}, val={}".format(dummy, val))
|
||||
if val.startswith('Pass'):
|
||||
passed = passed + 1
|
||||
if val.startswith('Fail'):
|
||||
failed = failed + 1
|
||||
out_list = []
|
||||
for key, value in results.items():
|
||||
out_list.append({key: value})
|
||||
out_list.sort()
|
||||
out_list.append({"TEST RESULTS": {'Passed': passed, 'Failed': failed, 'Missing Tests': missing_tests}})
|
||||
return out_list
|
||||
|
||||
|
||||
def _is_valid_module(module):
|
||||
'''return a list of all modules available on minion'''
|
||||
modules = __salt__['sys.list_modules']()
|
||||
return bool(module in modules)
|
||||
|
||||
|
||||
def _get_auto_update_cache_value():
|
||||
'''return the config value of auto_update_master_cache'''
|
||||
__salt__['config.get']('auto_update_master_cache')
|
||||
return True
|
||||
|
||||
|
||||
def _is_valid_function(module_name, function):
|
||||
'''Determine if a function is valid for a module'''
|
||||
try:
|
||||
functions = __salt__['sys.list_functions'](module_name)
|
||||
except salt.exceptions.SaltException:
|
||||
functions = ["unable to look up functions"]
|
||||
return "{0}.{1}".format(module_name, function) in functions
|
||||
|
||||
|
||||
def _get_top_states():
|
||||
''' equivalent to a salt cli: salt web state.show_top'''
|
||||
alt_states = []
|
||||
try:
|
||||
returned = __salt__['state.show_top']()
|
||||
for i in returned['base']:
|
||||
alt_states.append(i)
|
||||
except Exception:
|
||||
raise
|
||||
# log.info("top states: {}".format(alt_states))
|
||||
return alt_states
|
||||
|
||||
|
||||
def _get_state_sls(state):
|
||||
''' equivalent to a salt cli: salt web state.show_low_sls STATE'''
|
||||
sls_list_state = []
|
||||
try:
|
||||
returned = __salt__['state.show_low_sls'](state)
|
||||
for i in returned:
|
||||
if i['__sls__'] not in sls_list_state:
|
||||
sls_list_state.append(i['__sls__'])
|
||||
except Exception:
|
||||
raise
|
||||
return sls_list_state
|
||||
|
||||
|
||||
class SaltCheck(object):
|
||||
'''
|
||||
This class implements the saltcheck
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
# self.sls_list_top = []
|
||||
self.sls_list_state = []
|
||||
self.modules = []
|
||||
self.results_dict = {}
|
||||
self.results_dict_summary = {}
|
||||
self.assertions_list = '''assertEqual assertNotEqual
|
||||
assertTrue assertFalse
|
||||
assertIn assertNotIn
|
||||
assertGreater
|
||||
assertGreaterEqual
|
||||
assertLess assertLessEqual'''.split()
|
||||
self.auto_update_master_cache = _get_auto_update_cache_value
|
||||
# self.salt_lc = salt.client.Caller(mopts=__opts__)
|
||||
self.salt_lc = salt.client.Caller()
|
||||
if self.auto_update_master_cache:
|
||||
update_master_cache()
|
||||
|
||||
def __is_valid_test(self, test_dict):
|
||||
'''Determine if a test contains:
|
||||
a test name,
|
||||
a valid module and function,
|
||||
a valid assertion,
|
||||
an expected return value'''
|
||||
tots = 0 # need total of >= 6 to be a valid test
|
||||
m_and_f = test_dict.get('module_and_function', None)
|
||||
assertion = test_dict.get('assertion', None)
|
||||
expected_return = test_dict.get('expected-return', None)
|
||||
log.info("__is_valid_test has test: {}".format(test_dict))
|
||||
if m_and_f:
|
||||
tots += 1
|
||||
module, function = m_and_f.split('.')
|
||||
if _is_valid_module(module):
|
||||
tots += 1
|
||||
if _is_valid_function(module, function):
|
||||
tots += 1
|
||||
log.info("__is_valid_test has valid m_and_f")
|
||||
if assertion:
|
||||
tots += 1
|
||||
if assertion in self.assertions_list:
|
||||
tots += 1
|
||||
log.info("__is_valid_test has valid_assertion")
|
||||
if expected_return:
|
||||
tots += 1
|
||||
log.info("__is_valid_test has valid_expected_return")
|
||||
log.info("__is_valid_test score: {}".format(tots))
|
||||
return tots >= 6
|
||||
|
||||
def call_salt_command(self,
|
||||
fun,
|
||||
args,
|
||||
kwargs):
|
||||
'''Generic call of salt Caller command'''
|
||||
value = False
|
||||
try:
|
||||
if args and kwargs:
|
||||
value = self.salt_lc.cmd(fun, *args, **kwargs)
|
||||
elif args and not kwargs:
|
||||
value = self.salt_lc.cmd(fun, *args)
|
||||
elif not args and kwargs:
|
||||
value = self.salt_lc.cmd(fun, **kwargs)
|
||||
else:
|
||||
value = self.salt_lc.cmd(fun)
|
||||
except salt.exceptions.SaltException:
|
||||
raise
|
||||
except Exception:
|
||||
raise
|
||||
return value
|
||||
|
||||
def run_test(self, test_dict):
|
||||
'''Run a single saltcheck test'''
|
||||
if self.__is_valid_test(test_dict):
|
||||
mod_and_func = test_dict['module_and_function']
|
||||
args = test_dict.get('args', None)
|
||||
kwargs = test_dict.get('kwargs', None)
|
||||
assertion = test_dict['assertion']
|
||||
expected_return = test_dict['expected-return']
|
||||
actual_return = self.call_salt_command(mod_and_func, args, kwargs)
|
||||
if assertion != "assertIn":
|
||||
expected_return = self.cast_expected_to_returned_type(expected_return, actual_return)
|
||||
if assertion == "assertEqual":
|
||||
value = self.__assert_equal(expected_return, actual_return)
|
||||
elif assertion == "assertNotEqual":
|
||||
value = self.__assert_not_equal(expected_return, actual_return)
|
||||
elif assertion == "assertTrue":
|
||||
value = self.__assert_true(expected_return)
|
||||
elif assertion == "assertFalse":
|
||||
value = self.__assert_false(expected_return)
|
||||
elif assertion == "assertIn":
|
||||
value = self.__assert_in(expected_return, actual_return)
|
||||
elif assertion == "assertNotIn":
|
||||
value = self.__assert_not_in(expected_return, actual_return)
|
||||
elif assertion == "assertGreater":
|
||||
value = self.__assert_greater(expected_return, actual_return)
|
||||
elif assertion == "assertGreaterEqual":
|
||||
value = self.__assert_greater_equal(expected_return, actual_return)
|
||||
elif assertion == "assertLess":
|
||||
value = self.__assert_less(expected_return, actual_return)
|
||||
elif assertion == "assertLessEqual":
|
||||
value = self.__assert_less_equal(expected_return, actual_return)
|
||||
else:
|
||||
value = "Fail - bas assertion"
|
||||
else:
|
||||
return "Fail - invalid test"
|
||||
return value
|
||||
|
||||
@staticmethod
|
||||
def cast_expected_to_returned_type(expected, returned):
|
||||
'''
|
||||
Determine the type of variable returned
|
||||
Cast the expected to the type of variable returned
|
||||
'''
|
||||
ret_type = type(returned)
|
||||
new_expected = expected
|
||||
if expected == "False" and ret_type == bool:
|
||||
expected = False
|
||||
try:
|
||||
new_expected = ret_type(expected)
|
||||
except ValueError:
|
||||
log.info("Unable to cast expected into type of returned")
|
||||
log.info("returned = {}".format(returned))
|
||||
log.info("type of returned = {}".format(type(returned)))
|
||||
log.info("expected = {}".format(expected))
|
||||
log.info("type of expected = {}".format(type(expected)))
|
||||
return new_expected
|
||||
|
||||
@staticmethod
|
||||
def __assert_equal(expected, returned):
|
||||
'''
|
||||
Test if two objects are equal
|
||||
'''
|
||||
result = "Pass"
|
||||
|
||||
try:
|
||||
assert (expected == returned), "{0} is not equal to {1}".format(expected, returned)
|
||||
except AssertionError as err:
|
||||
result = "Fail: " + str(err)
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def __assert_not_equal(expected, returned):
|
||||
'''
|
||||
Test if two objects are not equal
|
||||
'''
|
||||
result = "Pass"
|
||||
try:
|
||||
assert (expected != returned), "{0} is equal to {1}".format(expected, returned)
|
||||
except AssertionError as err:
|
||||
result = "Fail: " + str(err)
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def __assert_true(returned):
|
||||
'''
|
||||
Test if an boolean is True
|
||||
'''
|
||||
result = "Pass"
|
||||
try:
|
||||
assert (returned is True), "{0} not True".format(returned)
|
||||
except AssertionError as err:
|
||||
result = "Fail: " + str(err)
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def __assert_false(returned):
|
||||
'''
|
||||
Test if an boolean is False
|
||||
'''
|
||||
result = "Pass"
|
||||
if isinstance(returned, str):
|
||||
try:
|
||||
returned = bool(returned)
|
||||
except ValueError:
|
||||
raise
|
||||
try:
|
||||
assert (returned is False), "{0} not False".format(returned)
|
||||
except AssertionError as err:
|
||||
result = "Fail: " + str(err)
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def __assert_in(expected, returned):
|
||||
'''
|
||||
Test if a value is in the list of returned values
|
||||
'''
|
||||
result = "Pass"
|
||||
try:
|
||||
assert (expected in returned), "{0} not False".format(returned)
|
||||
except AssertionError as err:
|
||||
result = "Fail: " + str(err)
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def __assert_not_in(expected, returned):
|
||||
'''
|
||||
Test if a value is not in the list of returned values
|
||||
'''
|
||||
result = "Pass"
|
||||
try:
|
||||
assert (expected not in returned), "{0} not False".format(returned)
|
||||
except AssertionError as err:
|
||||
result = "Fail: " + str(err)
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def __assert_greater(expected, returned):
|
||||
'''
|
||||
Test if a value is greater than the returned value
|
||||
'''
|
||||
result = "Pass"
|
||||
try:
|
||||
assert (expected > returned), "{0} not False".format(returned)
|
||||
except AssertionError as err:
|
||||
result = "Fail: " + str(err)
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def __assert_greater_equal(expected, returned):
|
||||
'''
|
||||
Test if a value is greater than or equal to the returned value
|
||||
'''
|
||||
result = "Pass"
|
||||
try:
|
||||
assert (expected >= returned), "{0} not False".format(returned)
|
||||
except AssertionError as err:
|
||||
result = "Fail: " + str(err)
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def __assert_less(expected, returned):
|
||||
'''
|
||||
Test if a value is less than the returned value
|
||||
'''
|
||||
result = "Pass"
|
||||
try:
|
||||
assert (expected < returned), "{0} not False".format(returned)
|
||||
except AssertionError as err:
|
||||
result = "Fail: " + str(err)
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def __assert_less_equal(expected, returned):
|
||||
'''
|
||||
Test if a value is less than or equal to the returned value
|
||||
'''
|
||||
result = "Pass"
|
||||
try:
|
||||
assert (expected <= returned), "{0} not False".format(returned)
|
||||
except AssertionError as err:
|
||||
result = "Fail: " + str(err)
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def get_state_search_path_list():
|
||||
'''For the state file system, return a
|
||||
list of paths to search for states'''
|
||||
# state cache should be updated before running this method
|
||||
search_list = []
|
||||
cachedir = __opts__.get('cachedir', None)
|
||||
environment = __opts__['environment']
|
||||
if environment:
|
||||
path = cachedir + os.sep + "files" + os.sep + environment
|
||||
search_list.append(path)
|
||||
path = cachedir + os.sep + "files" + os.sep + "base"
|
||||
search_list.append(path)
|
||||
return search_list
|
||||
|
||||
|
||||
class StateTestLoader(object):
|
||||
'''
|
||||
Class loads in test files for a state
|
||||
e.g. state_dir/saltcheck-tests/[1.tst, 2.tst, 3.tst]
|
||||
'''
|
||||
|
||||
def __init__(self, search_paths):
|
||||
self.search_paths = search_paths
|
||||
self.path_type = None
|
||||
self.test_files = [] # list of file paths
|
||||
self.test_dict = {}
|
||||
|
||||
def load_test_suite(self):
|
||||
'''load tests either from one file, or a set of files'''
|
||||
self.test_dict = {}
|
||||
for myfile in self.test_files:
|
||||
self.load_file(myfile)
|
||||
self.test_files = []
|
||||
|
||||
def load_file(self, filepath):
|
||||
'''
|
||||
loads in one test file
|
||||
'''
|
||||
try:
|
||||
with salt.utils.files.fopen(filepath, 'r') as myfile:
|
||||
# with open(filepath, 'r') as myfile:
|
||||
contents_yaml = yaml.load(myfile)
|
||||
for key, value in contents_yaml.items():
|
||||
self.test_dict[key] = value
|
||||
except:
|
||||
raise
|
||||
return
|
||||
|
||||
def gather_files(self, filepath):
|
||||
'''gather files for a test suite'''
|
||||
self.test_files = []
|
||||
log.info("gather_files: {}".format(time.time()))
|
||||
filepath = filepath + os.sep + 'saltcheck-tests'
|
||||
rootdir = filepath
|
||||
# for dirname, subdirlist, filelist in os.walk(rootdir):
|
||||
for dirname, dummy, filelist in os.walk(rootdir):
|
||||
for fname in filelist:
|
||||
if fname.endswith('.tst'):
|
||||
start_path = dirname + os.sep + fname
|
||||
full_path = os.path.abspath(start_path)
|
||||
self.test_files.append(full_path)
|
||||
return
|
||||
|
||||
@staticmethod
|
||||
def convert_sls_to_paths(sls_list):
|
||||
'''Converting sls to paths'''
|
||||
new_sls_list = []
|
||||
for sls in sls_list:
|
||||
sls = sls.replace(".", os.sep)
|
||||
new_sls_list.append(sls)
|
||||
return new_sls_list
|
||||
|
||||
@staticmethod
|
||||
def convert_sls_to_path(sls):
|
||||
'''Converting sls to paths'''
|
||||
sls = sls.replace(".", os.sep)
|
||||
return sls
|
||||
|
||||
def add_test_files_for_sls(self, sls_path):
|
||||
'''Adding test files'''
|
||||
for path in self.search_paths:
|
||||
full_path = path + os.sep + sls_path
|
||||
rootdir = full_path
|
||||
if os.path.isdir(full_path):
|
||||
log.info("searching path= {}".format(full_path))
|
||||
# for dirname, subdirlist, filelist in os.walk(rootdir, topdown=True):
|
||||
for dirname, subdirlist, dummy in os.walk(rootdir, topdown=True):
|
||||
if "saltcheck-tests" in subdirlist:
|
||||
self.gather_files(dirname)
|
||||
log.info("test_files list: {}".format(self.test_files))
|
||||
log.info("found subdir match in = {}".format(dirname))
|
||||
else:
|
||||
log.info("did not find subdir match in = {}".format(dirname))
|
||||
del subdirlist[:]
|
||||
else:
|
||||
log.info("path is not a directory= {}".format(full_path))
|
||||
return
|
@ -2,12 +2,15 @@
|
||||
'''
|
||||
Utility functions for use with or in SLS files
|
||||
'''
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Salt libs
|
||||
import salt.exceptions
|
||||
import salt.loader
|
||||
import salt.template
|
||||
import salt.utils
|
||||
import salt.utils.args
|
||||
import salt.utils.dictupdate
|
||||
|
||||
|
||||
@ -172,7 +175,7 @@ def serialize(serializer, obj, **mod_kwargs):
|
||||
{% set json_string = salt.slsutil.serialize('json',
|
||||
{'foo': 'Foo!'}) %}
|
||||
'''
|
||||
kwargs = salt.utils.clean_kwargs(**mod_kwargs)
|
||||
kwargs = salt.utils.args.clean_kwargs(**mod_kwargs)
|
||||
return _get_serialize_fn(serializer, 'serialize')(obj, **kwargs)
|
||||
|
||||
|
||||
@ -196,6 +199,6 @@ def deserialize(serializer, stream_or_string, **mod_kwargs):
|
||||
{% set python_object = salt.slsutil.deserialize('json',
|
||||
'{"foo": "Foo!"}') %}
|
||||
'''
|
||||
kwargs = salt.utils.clean_kwargs(**mod_kwargs)
|
||||
kwargs = salt.utils.args.clean_kwargs(**mod_kwargs)
|
||||
return _get_serialize_fn(serializer, 'deserialize')(stream_or_string,
|
||||
**kwargs)
|
||||
|
@ -98,8 +98,7 @@ def _set_retcode(ret, highstate=None):
|
||||
if isinstance(ret, list):
|
||||
__context__['retcode'] = 1
|
||||
return
|
||||
if not salt.utils.check_state_result(ret, highstate=highstate):
|
||||
|
||||
if not __utils__['state.check_result'](ret, highstate=highstate):
|
||||
__context__['retcode'] = 2
|
||||
|
||||
|
||||
@ -273,12 +272,12 @@ def _get_opts(**kwargs):
|
||||
else:
|
||||
opts['environment'] = kwargs['saltenv']
|
||||
|
||||
if 'pillarenv' in kwargs:
|
||||
pillarenv = kwargs['pillarenv']
|
||||
if 'pillarenv' in kwargs or opts.get('pillarenv_from_saltenv', False):
|
||||
pillarenv = kwargs.get('pillarenv') or kwargs.get('saltenv')
|
||||
if pillarenv is not None and not isinstance(pillarenv, six.string_types):
|
||||
opts['pillarenv'] = str(kwargs['pillarenv'])
|
||||
opts['pillarenv'] = str(pillarenv)
|
||||
else:
|
||||
opts['pillarenv'] = kwargs['pillarenv']
|
||||
opts['pillarenv'] = pillarenv
|
||||
|
||||
return opts
|
||||
|
||||
@ -316,7 +315,7 @@ def low(data, queue=False, **kwargs):
|
||||
ret = st_.call(data)
|
||||
if isinstance(ret, list):
|
||||
__context__['retcode'] = 1
|
||||
if salt.utils.check_state_result(ret):
|
||||
if __utils__['state.check_result'](ret):
|
||||
__context__['retcode'] = 2
|
||||
return ret
|
||||
|
||||
@ -1463,6 +1462,17 @@ def show_low_sls(mods, test=None, queue=False, **kwargs):
|
||||
saltenv
|
||||
Specify a salt fileserver environment to be used when applying states
|
||||
|
||||
pillar
|
||||
Custom Pillar values, passed as a dictionary of key-value pairs
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' state.show_low_sls test pillar='{"foo": "bar"}'
|
||||
|
||||
.. note::
|
||||
Values passed this way will override Pillar values set via
|
||||
``pillar_roots`` or an external Pillar source.
|
||||
|
||||
pillarenv
|
||||
Specify a Pillar environment to be used when applying states. This
|
||||
can also be set in the minion config file using the
|
||||
@ -1497,12 +1507,26 @@ def show_low_sls(mods, test=None, queue=False, **kwargs):
|
||||
# the 'base' saltenv if none is configured and none was passed.
|
||||
if opts['environment'] is None:
|
||||
opts['environment'] = 'base'
|
||||
|
||||
pillar_override = kwargs.get('pillar')
|
||||
pillar_enc = kwargs.get('pillar_enc')
|
||||
if pillar_enc is None \
|
||||
and pillar_override is not None \
|
||||
and not isinstance(pillar_override, dict):
|
||||
raise SaltInvocationError(
|
||||
'Pillar data must be formatted as a dictionary, unless pillar_enc '
|
||||
'is specified.'
|
||||
)
|
||||
|
||||
try:
|
||||
st_ = salt.state.HighState(opts,
|
||||
pillar_override,
|
||||
proxy=__proxy__,
|
||||
initial_pillar=_get_initial_pillar(opts))
|
||||
except NameError:
|
||||
st_ = salt.state.HighState(opts, initial_pillar=_get_initial_pillar(opts))
|
||||
st_ = salt.state.HighState(opts,
|
||||
pillar_override,
|
||||
initial_pillar=_get_initial_pillar(opts))
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
__context__['retcode'] = 5
|
||||
|
@ -15,6 +15,7 @@ import random
|
||||
# Import Salt libs
|
||||
import salt
|
||||
import salt.utils
|
||||
import salt.utils.args
|
||||
import salt.utils.hashutils
|
||||
import salt.utils.platform
|
||||
import salt.version
|
||||
@ -323,7 +324,7 @@ def arg_clean(*args, **kwargs):
|
||||
|
||||
salt '*' test.arg_clean 1 "two" 3.1 txt="hello" wow='{a: 1, b: "hello"}'
|
||||
'''
|
||||
return dict(args=args, kwargs=salt.utils.clean_kwargs(**kwargs))
|
||||
return dict(args=args, kwargs=salt.utils.args.clean_kwargs(**kwargs))
|
||||
|
||||
|
||||
def fib(num):
|
||||
|
@ -837,6 +837,9 @@ def create_cert_binding(name, site, hostheader='', ipaddress='*', port=443,
|
||||
# IIS 7.5 and earlier have different syntax for associating a certificate with a site
|
||||
# Modify IP spec to IIS 7.5 format
|
||||
iis7path = binding_path.replace(r"\*!", "\\0.0.0.0!")
|
||||
# win 2008 uses the following format: ip!port and not ip!port!
|
||||
if iis7path.endswith("!"):
|
||||
iis7path = iis7path[:-1]
|
||||
|
||||
ps_cmd = ['New-Item',
|
||||
'-Path', "'{0}'".format(iis7path),
|
||||
@ -1255,6 +1258,9 @@ def set_container_setting(name, container, settings):
|
||||
salt '*' win_iis.set_container_setting name='MyTestPool' container='AppPools'
|
||||
settings="{'managedPipeLineMode': 'Integrated'}"
|
||||
'''
|
||||
|
||||
identityType_map2string = {'0': 'LocalSystem', '1': 'LocalService', '2': 'NetworkService', '3': 'SpecificUser', '4': 'ApplicationPoolIdentity'}
|
||||
identityType_map2numeric = {'LocalSystem': '0', 'LocalService': '1', 'NetworkService': '2', 'SpecificUser': '3', 'ApplicationPoolIdentity': '4'}
|
||||
ps_cmd = list()
|
||||
container_path = r"IIS:\{0}\{1}".format(container, name)
|
||||
|
||||
@ -1281,6 +1287,10 @@ def set_container_setting(name, container, settings):
|
||||
except ValueError:
|
||||
value = "'{0}'".format(settings[setting])
|
||||
|
||||
# Map to numeric to support server 2008
|
||||
if setting == 'processModel.identityType' and settings[setting] in identityType_map2numeric.keys():
|
||||
value = identityType_map2numeric[settings[setting]]
|
||||
|
||||
ps_cmd.extend(['Set-ItemProperty',
|
||||
'-Path', "'{0}'".format(container_path),
|
||||
'-Name', "'{0}'".format(setting),
|
||||
@ -1300,6 +1310,10 @@ def set_container_setting(name, container, settings):
|
||||
failed_settings = dict()
|
||||
|
||||
for setting in settings:
|
||||
# map identity type from numeric to string for comparing
|
||||
if setting == 'processModel.identityType' and settings[setting] in identityType_map2string.keys():
|
||||
settings[setting] = identityType_map2string[settings[setting]]
|
||||
|
||||
if str(settings[setting]) != str(new_settings[setting]):
|
||||
failed_settings[setting] = settings[setting]
|
||||
|
||||
|
@ -2834,7 +2834,8 @@ def _findOptionValueInSeceditFile(option):
|
||||
_reader = codecs.open(_tfile, 'r', encoding='utf-16')
|
||||
_secdata = _reader.readlines()
|
||||
_reader.close()
|
||||
_ret = __salt__['file.remove'](_tfile)
|
||||
if __salt__['file.file_exists'](_tfile):
|
||||
_ret = __salt__['file.remove'](_tfile)
|
||||
for _line in _secdata:
|
||||
if _line.startswith(option):
|
||||
return True, _line.split('=')[1].strip()
|
||||
@ -2855,16 +2856,20 @@ def _importSeceditConfig(infdata):
|
||||
_tInfFile = '{0}\\{1}'.format(__salt__['config.get']('cachedir'),
|
||||
'salt-secedit-config-{0}.inf'.format(_d))
|
||||
# make sure our temp files don't already exist
|
||||
_ret = __salt__['file.remove'](_tSdbfile)
|
||||
_ret = __salt__['file.remove'](_tInfFile)
|
||||
if __salt__['file.file_exists'](_tSdbfile):
|
||||
_ret = __salt__['file.remove'](_tSdbfile)
|
||||
if __salt__['file.file_exists'](_tInfFile):
|
||||
_ret = __salt__['file.remove'](_tInfFile)
|
||||
# add the inf data to the file, win_file sure could use the write() function
|
||||
_ret = __salt__['file.touch'](_tInfFile)
|
||||
_ret = __salt__['file.append'](_tInfFile, infdata)
|
||||
# run secedit to make the change
|
||||
_ret = __salt__['cmd.run']('secedit /configure /db {0} /cfg {1}'.format(_tSdbfile, _tInfFile))
|
||||
# cleanup our temp files
|
||||
_ret = __salt__['file.remove'](_tSdbfile)
|
||||
_ret = __salt__['file.remove'](_tInfFile)
|
||||
if __salt__['file.file_exists'](_tSdbfile):
|
||||
_ret = __salt__['file.remove'](_tSdbfile)
|
||||
if __salt__['file.file_exists'](_tInfFile):
|
||||
_ret = __salt__['file.remove'](_tInfFile)
|
||||
return True
|
||||
except Exception as e:
|
||||
log.debug('error occurred while trying to import secedit data')
|
||||
@ -4173,8 +4178,6 @@ def _writeAdminTemplateRegPolFile(admtemplate_data,
|
||||
existing_data = ''
|
||||
base_policy_settings = {}
|
||||
policy_data = _policy_info()
|
||||
#//{0}:policy[@displayName = "{1}" and (@class = "Both" or @class = "{2}") ]
|
||||
#policySearchXpath = etree.XPath('//*[@ns1:id = $id or @ns1:name = $id]')
|
||||
policySearchXpath = '//ns1:*[@id = "{0}" or @name = "{0}"]'
|
||||
try:
|
||||
if admx_policy_definitions is None or adml_policy_resources is None:
|
||||
@ -4205,8 +4208,7 @@ def _writeAdminTemplateRegPolFile(admtemplate_data,
|
||||
this_valuename = None
|
||||
if str(base_policy_settings[adm_namespace][admPolicy]).lower() == 'disabled':
|
||||
log.debug('time to disable {0}'.format(admPolicy))
|
||||
#this_policy = policySearchXpath(admx_policy_definitions, id=admPolicy, namespaces={'ns1': adm_namespace})
|
||||
this_policy = admx_policy_definitions.xpath(policySearchXpath.format('ns1', admPolicy), namespaces={'ns1': adm_namespace})
|
||||
this_policy = admx_policy_definitions.xpath(policySearchXpath.format(admPolicy), namespaces={'ns1': adm_namespace})
|
||||
if this_policy:
|
||||
this_policy = this_policy[0]
|
||||
if 'class' in this_policy.attrib:
|
||||
@ -4317,7 +4319,6 @@ def _writeAdminTemplateRegPolFile(admtemplate_data,
|
||||
log.error(msg.format(this_policy.attrib))
|
||||
else:
|
||||
log.debug('time to enable and set the policy "{0}"'.format(admPolicy))
|
||||
#this_policy = policySearchXpath(admx_policy_definitions, id=admPolicy, namespaces={'ns1': adm_namespace})
|
||||
this_policy = admx_policy_definitions.xpath(policySearchXpath.format(admPolicy), namespaces={'ns1': adm_namespace})
|
||||
log.debug('found this_policy == {0}'.format(this_policy))
|
||||
if this_policy:
|
||||
|
@ -977,7 +977,7 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
||||
# Version is ignored
|
||||
salt '*' pkg.install pkgs="['foo', 'bar']" version=1.2.3
|
||||
|
||||
If passed with a comma seperated list in the ``name`` parameter, the
|
||||
If passed with a comma separated list in the ``name`` parameter, the
|
||||
version will apply to all packages in the list.
|
||||
|
||||
CLI Example:
|
||||
@ -1286,7 +1286,7 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
||||
use_msiexec, msiexec = _get_msiexec(pkginfo[version_num].get('msiexec', False))
|
||||
|
||||
# Build cmd and arguments
|
||||
# cmd and arguments must be seperated for use with the task scheduler
|
||||
# cmd and arguments must be separated for use with the task scheduler
|
||||
if use_msiexec:
|
||||
cmd = msiexec
|
||||
arguments = ['/i', cached_pkg]
|
||||
@ -1328,7 +1328,9 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
||||
|
||||
# Run Scheduled Task
|
||||
# Special handling for installing salt
|
||||
if pkg_name in ['salt-minion', 'salt-minion-py3']:
|
||||
if re.search(r'salt[\s_.-]*minion',
|
||||
pkg_name,
|
||||
flags=re.IGNORECASE + re.UNICODE) is not None:
|
||||
ret[pkg_name] = {'install status': 'task started'}
|
||||
if not __salt__['task.run'](name='update-salt-software'):
|
||||
log.error('Failed to install {0}'.format(pkg_name))
|
||||
@ -1360,7 +1362,8 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
||||
else:
|
||||
|
||||
# Combine cmd and arguments
|
||||
cmd = [cmd].extend(arguments)
|
||||
cmd = [cmd]
|
||||
cmd.extend(arguments)
|
||||
|
||||
# Launch the command
|
||||
result = __salt__['cmd.run_all'](cmd,
|
||||
|
@ -170,7 +170,11 @@ def get_certs(context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE):
|
||||
if key not in blacklist_keys:
|
||||
cert_info[key.lower()] = item[key]
|
||||
|
||||
cert_info['dnsnames'] = [name.get('Unicode') for name in item.get('DnsNameList', {})]
|
||||
names = item.get('DnsNameList', None)
|
||||
if isinstance(names, list):
|
||||
cert_info['dnsnames'] = [name.get('Unicode') for name in names]
|
||||
else:
|
||||
cert_info['dnsnames'] = []
|
||||
ret[item['Thumbprint']] = cert_info
|
||||
return ret
|
||||
|
||||
|
@ -2834,7 +2834,7 @@ def _parse_repo_file(filename):
|
||||
|
||||
# Try to extract leading comments
|
||||
headers = ''
|
||||
with salt.utils.fopen(filename, 'r') as rawfile:
|
||||
with salt.utils.files.fopen(filename, 'r') as rawfile:
|
||||
for line in rawfile:
|
||||
if line.strip().startswith('#'):
|
||||
headers += '{0}\n'.format(line.strip())
|
||||
|
@ -20,7 +20,7 @@ try:
|
||||
except ImportError as exc:
|
||||
cpy_error = exc
|
||||
|
||||
__virtualname__ = os.path.abspath(__file__).rsplit('/')[-2] or 'rest_cherrypy'
|
||||
__virtualname__ = os.path.abspath(__file__).rsplit(os.sep)[-2] or 'rest_cherrypy'
|
||||
|
||||
logger = logging.getLogger(__virtualname__)
|
||||
cpy_min = '3.2.2'
|
||||
|
@ -10,7 +10,7 @@ import os
|
||||
import salt.auth
|
||||
from salt.utils.versions import StrictVersion as _StrictVersion
|
||||
|
||||
__virtualname__ = os.path.abspath(__file__).rsplit('/')[-2] or 'rest_tornado'
|
||||
__virtualname__ = os.path.abspath(__file__).rsplit(os.sep)[-2] or 'rest_tornado'
|
||||
|
||||
logger = logging.getLogger(__virtualname__)
|
||||
|
||||
|
@ -108,7 +108,7 @@ import pprint
|
||||
import textwrap
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
import salt.utils.color
|
||||
import salt.utils.stringutils
|
||||
import salt.output
|
||||
from salt.utils.locales import sdecode
|
||||
@ -158,7 +158,7 @@ def output(data, **kwargs): # pylint: disable=unused-argument
|
||||
def _format_host(host, data):
|
||||
host = sdecode(host)
|
||||
|
||||
colors = salt.utils.get_colors(
|
||||
colors = salt.utils.color.get_colors(
|
||||
__opts__.get('color'),
|
||||
__opts__.get('color_theme'))
|
||||
tabular = __opts__.get('state_tabular', False)
|
||||
|
@ -8,9 +8,9 @@ The ``salt-key`` command makes use of this outputter to format its output.
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
import salt.output
|
||||
from salt.utils.locales import sdecode
|
||||
import salt.utils.color
|
||||
|
||||
|
||||
def output(data, **kwargs): # pylint: disable=unused-argument
|
||||
@ -18,7 +18,7 @@ def output(data, **kwargs): # pylint: disable=unused-argument
|
||||
Read in the dict structure generated by the salt key API methods and
|
||||
print the structure.
|
||||
'''
|
||||
color = salt.utils.get_colors(
|
||||
color = salt.utils.color.get_colors(
|
||||
__opts__.get('color'),
|
||||
__opts__.get('color_theme'))
|
||||
strip_colors = __opts__.get('strip_colors', True)
|
||||
|
@ -29,9 +29,9 @@ from numbers import Number
|
||||
|
||||
# Import salt libs
|
||||
import salt.output
|
||||
import salt.utils.color
|
||||
import salt.utils.locales
|
||||
import salt.utils.odict
|
||||
from salt.utils import get_colors
|
||||
from salt.ext.six import string_types
|
||||
|
||||
|
||||
@ -41,7 +41,7 @@ class NestDisplay(object):
|
||||
'''
|
||||
def __init__(self):
|
||||
self.__dict__.update(
|
||||
get_colors(
|
||||
salt.utils.color.get_colors(
|
||||
__opts__.get('color'),
|
||||
__opts__.get('color_theme')
|
||||
)
|
||||
|
@ -15,7 +15,7 @@ Example output::
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
import salt.utils.color
|
||||
|
||||
# Import 3rd-party libs
|
||||
from salt.ext import six
|
||||
@ -26,7 +26,7 @@ class NestDisplay(object):
|
||||
Create generator for nested output
|
||||
'''
|
||||
def __init__(self):
|
||||
self.colors = salt.utils.get_colors(
|
||||
self.colors = salt.utils.color.get_colors(
|
||||
__opts__.get(u'color'),
|
||||
__opts__.get(u'color_theme'))
|
||||
|
||||
|
@ -11,7 +11,7 @@ and should not be called directly.
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Salt libs
|
||||
import salt.utils
|
||||
import salt.utils.color
|
||||
|
||||
# Import 3rd-party libs
|
||||
from salt.ext import six
|
||||
@ -27,7 +27,7 @@ def output(data, **kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
Format the data for printing stage information from the overstate system
|
||||
'''
|
||||
colors = salt.utils.get_colors(
|
||||
colors = salt.utils.color.get_colors(
|
||||
__opts__.get('color'),
|
||||
__opts__.get('color_theme'))
|
||||
ostr = ''
|
||||
|
@ -42,12 +42,10 @@ from functools import reduce # pylint: disable=redefined-builtin
|
||||
|
||||
# Import salt libs
|
||||
import salt.output
|
||||
import salt.utils.locales
|
||||
from salt.ext.six import string_types
|
||||
from salt.utils import get_colors
|
||||
from salt.ext.six.moves import map # pylint: disable=redefined-builtin
|
||||
from salt.ext.six.moves import zip # pylint: disable=redefined-builtin
|
||||
|
||||
from salt.ext.six.moves import map, zip # pylint: disable=redefined-builtin
|
||||
import salt.utils.color
|
||||
import salt.utils.locales
|
||||
|
||||
__virtualname__ = 'table'
|
||||
|
||||
@ -78,7 +76,7 @@ class TableDisplay(object):
|
||||
width=50, # column max width
|
||||
wrapfunc=None): # function wrapper
|
||||
self.__dict__.update(
|
||||
get_colors(
|
||||
salt.utils.color.get_colors(
|
||||
__opts__.get('color'),
|
||||
__opts__.get('color_theme')
|
||||
)
|
||||
|
@ -233,7 +233,7 @@ class PillarCache(object):
|
||||
functions=self.functions,
|
||||
pillar_override=self.pillar_override,
|
||||
pillarenv=self.pillarenv)
|
||||
return fresh_pillar.compile_pillar() # FIXME We are not yet passing pillar_dirs in here
|
||||
return fresh_pillar.compile_pillar()
|
||||
|
||||
def compile_pillar(self, *args, **kwargs): # Will likely just be pillar_dirs
|
||||
log.debug('Scanning pillar cache for information about minion {0} and saltenv {1}'.format(self.minion_id, self.saltenv))
|
||||
@ -763,7 +763,7 @@ class Pillar(object):
|
||||
|
||||
return pillar, errors
|
||||
|
||||
def _external_pillar_data(self, pillar, val, pillar_dirs, key):
|
||||
def _external_pillar_data(self, pillar, val, key):
|
||||
'''
|
||||
Builds actual pillar data structure and updates the ``pillar`` variable
|
||||
'''
|
||||
@ -772,26 +772,16 @@ class Pillar(object):
|
||||
if isinstance(val, dict):
|
||||
ext = self.ext_pillars[key](self.minion_id, pillar, **val)
|
||||
elif isinstance(val, list):
|
||||
if key == 'git':
|
||||
ext = self.ext_pillars[key](self.minion_id,
|
||||
val,
|
||||
pillar_dirs)
|
||||
else:
|
||||
ext = self.ext_pillars[key](self.minion_id,
|
||||
pillar,
|
||||
*val)
|
||||
ext = self.ext_pillars[key](self.minion_id,
|
||||
pillar,
|
||||
*val)
|
||||
else:
|
||||
if key == 'git':
|
||||
ext = self.ext_pillars[key](self.minion_id,
|
||||
val,
|
||||
pillar_dirs)
|
||||
else:
|
||||
ext = self.ext_pillars[key](self.minion_id,
|
||||
pillar,
|
||||
val)
|
||||
ext = self.ext_pillars[key](self.minion_id,
|
||||
pillar,
|
||||
val)
|
||||
return ext
|
||||
|
||||
def ext_pillar(self, pillar, pillar_dirs, errors=None):
|
||||
def ext_pillar(self, pillar, errors=None):
|
||||
'''
|
||||
Render the external pillar data
|
||||
'''
|
||||
@ -843,9 +833,8 @@ class Pillar(object):
|
||||
continue
|
||||
try:
|
||||
ext = self._external_pillar_data(pillar,
|
||||
val,
|
||||
pillar_dirs,
|
||||
key)
|
||||
val,
|
||||
key)
|
||||
except Exception as exc:
|
||||
errors.append(
|
||||
'Failed to load ext_pillar {0}: {1}'.format(
|
||||
@ -867,16 +856,14 @@ class Pillar(object):
|
||||
ext = None
|
||||
return pillar, errors
|
||||
|
||||
def compile_pillar(self, ext=True, pillar_dirs=None):
|
||||
def compile_pillar(self, ext=True):
|
||||
'''
|
||||
Render the pillar data and return
|
||||
'''
|
||||
top, top_errors = self.get_top()
|
||||
if ext:
|
||||
if self.opts.get('ext_pillar_first', False):
|
||||
self.opts['pillar'], errors = self.ext_pillar(
|
||||
self.pillar_override,
|
||||
pillar_dirs)
|
||||
self.opts['pillar'], errors = self.ext_pillar(self.pillar_override)
|
||||
self.rend = salt.loader.render(self.opts, self.functions)
|
||||
matches = self.top_matches(top)
|
||||
pillar, errors = self.render_pillar(matches, errors=errors)
|
||||
@ -888,8 +875,7 @@ class Pillar(object):
|
||||
else:
|
||||
matches = self.top_matches(top)
|
||||
pillar, errors = self.render_pillar(matches)
|
||||
pillar, errors = self.ext_pillar(
|
||||
pillar, pillar_dirs, errors=errors)
|
||||
pillar, errors = self.ext_pillar(pillar, errors=errors)
|
||||
else:
|
||||
matches = self.top_matches(top)
|
||||
pillar, errors = self.render_pillar(matches)
|
||||
@ -984,6 +970,6 @@ class Pillar(object):
|
||||
# ext_pillar etc.
|
||||
class AsyncPillar(Pillar):
|
||||
@tornado.gen.coroutine
|
||||
def compile_pillar(self, ext=True, pillar_dirs=None):
|
||||
ret = super(AsyncPillar, self).compile_pillar(ext=ext, pillar_dirs=pillar_dirs)
|
||||
def compile_pillar(self, ext=True):
|
||||
ret = super(AsyncPillar, self).compile_pillar(ext=ext)
|
||||
raise tornado.gen.Return(ret)
|
||||
|
@ -3,12 +3,6 @@
|
||||
Use a git repository as a Pillar source
|
||||
---------------------------------------
|
||||
|
||||
.. note::
|
||||
This external pillar has been rewritten for the :ref:`2015.8.0
|
||||
<release-2015-8-0>` release. The old method of configuring this
|
||||
external pillar will be maintained for a couple releases, allowing time for
|
||||
configurations to be updated to reflect the new usage.
|
||||
|
||||
This external pillar allows for a Pillar top file and Pillar SLS files to be
|
||||
sourced from a git repository.
|
||||
|
||||
@ -41,8 +35,7 @@ the repo's URL. Configuration details can be found below.
|
||||
- bar
|
||||
|
||||
Additionally, while git_pillar allows for the branch/tag to be overridden
|
||||
(see :ref:`here <git-pillar-env-remap>`, or :ref:`here
|
||||
<git-pillar-env-remap-legacy>` for Salt releases before 2015.8.0), keep in
|
||||
(see :ref:`here <git-pillar-env-remap>`), keep in
|
||||
mind that the top file must reference the actual environment name. It is
|
||||
common practice to make the environment in a git_pillar top file match the
|
||||
branch/tag name, but when remapping, the environment of course no longer
|
||||
@ -51,113 +44,10 @@ the repo's URL. Configuration details can be found below.
|
||||
common misconfiguration that may be to blame, and is a good first step in
|
||||
troubleshooting.
|
||||
|
||||
.. _git-pillar-pre-2015-8-0:
|
||||
.. _git-pillar-configuration:
|
||||
|
||||
Configuring git_pillar for Salt releases before 2015.8.0
|
||||
========================================================
|
||||
|
||||
.. note::
|
||||
This legacy configuration for git_pillar will no longer be supported as of
|
||||
the **Oxygen** release of Salt.
|
||||
|
||||
For Salt releases earlier than :ref:`2015.8.0 <release-2015-8-0>`,
|
||||
GitPython is the only supported provider for git_pillar. Individual
|
||||
repositories can be configured under the :conf_master:`ext_pillar`
|
||||
configuration parameter like so:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
ext_pillar:
|
||||
- git: master https://gitserver/git-pillar.git root=subdirectory
|
||||
|
||||
The repository is specified in the format ``<branch> <repo_url>``, with an
|
||||
optional ``root`` parameter (added in the :ref:`2014.7.0
|
||||
<release-2014-7-0>` release) which allows the pillar SLS files to be
|
||||
served up from a subdirectory (similar to :conf_master:`gitfs_root` in gitfs).
|
||||
|
||||
To use more than one branch from the same repo, multiple lines must be
|
||||
specified under :conf_master:`ext_pillar`:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
ext_pillar:
|
||||
- git: master https://gitserver/git-pillar.git
|
||||
- git: dev https://gitserver/git-pillar.git
|
||||
|
||||
.. _git-pillar-env-remap-legacy:
|
||||
|
||||
To remap a specific branch to a specific Pillar environment, use the format
|
||||
``<branch>:<env>``:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
ext_pillar:
|
||||
- git: develop:dev https://gitserver/git-pillar.git
|
||||
- git: master:prod https://gitserver/git-pillar.git
|
||||
|
||||
In this case, the ``develop`` branch would need its own ``top.sls`` with a
|
||||
``dev`` section in it, like this:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
dev:
|
||||
'*':
|
||||
- bar
|
||||
|
||||
The ``master`` branch would need its own ``top.sls`` with a ``prod`` section in
|
||||
it:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
prod:
|
||||
'*':
|
||||
- bar
|
||||
|
||||
If ``__env__`` is specified as the branch name, then git_pillar will first look
|
||||
at the minion's :conf_minion:`environment` option. If unset, it will fall back
|
||||
to using branch specified by the master's :conf_master:`gitfs_base`:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
ext_pillar:
|
||||
- git: __env__ https://gitserver/git-pillar.git root=pillar
|
||||
|
||||
The corresponding Pillar top file would look like this:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
{{saltenv}}:
|
||||
'*':
|
||||
- bar
|
||||
|
||||
.. note::
|
||||
This feature was unintentionally omitted when git_pillar was rewritten for
|
||||
the 2015.8.0 release. It was added again in the 2016.3.4 release, but it
|
||||
has changed slightly in that release. On Salt masters running 2015.8.0
|
||||
through 2016.3.3, this feature can only be accessed using the legacy config
|
||||
described above. For 2016.3.4 and later, refer to explanation of the
|
||||
``__env__`` parameter in the below section.
|
||||
|
||||
Versions 2016.3.0 through 2016.3.4 incorrectly check the *master's*
|
||||
``environment`` config option (instead of the minion's) before falling back
|
||||
to :conf_master:`gitfs_base`. This has been fixed in the 2016.3.5 and
|
||||
2016.11.1 releases (2016.11.0 contains the incorrect behavior).
|
||||
|
||||
Additionally, in releases before 2016.11.0, both ``{{env}}`` and
|
||||
``{{saltenv}}`` could be used as a placeholder for the environment.
|
||||
Starting in 2016.11.0, ``{{env}}`` is no longer supported.
|
||||
|
||||
.. _git-pillar-2015-8-0-and-later:
|
||||
|
||||
Configuring git_pillar for Salt releases 2015.8.0 and later
|
||||
===========================================================
|
||||
|
||||
.. note::
|
||||
In version 2015.8.0, the method of configuring git external pillars has
|
||||
changed, and now more closely resembles that of the :ref:`Git Fileserver
|
||||
Backend <tutorial-gitfs>`. If Salt detects the old configuration schema, it
|
||||
will use the pre-2015.8.0 code to compile the external pillar. A warning
|
||||
will also be logged.
|
||||
Configuring git_pillar for Salt
|
||||
===============================
|
||||
|
||||
Beginning with Salt version 2015.8.0, pygit2_ is now supported in addition to
|
||||
GitPython_. The requirements for GitPython_ and pygit2_ are the same as for
|
||||
@ -258,32 +148,6 @@ The corresponding Pillar top file would look like this:
|
||||
'*':
|
||||
- bar
|
||||
|
||||
.. note::
|
||||
This feature was unintentionally omitted when git_pillar was rewritten for
|
||||
the 2015.8.0 release. It was added again in the 2016.3.4 release, but it
|
||||
has changed slightly in that release. The fallback value replaced by
|
||||
``{{env}}`` is :conf_master: is :conf_master:`git_pillar_base`, while the
|
||||
legacy config's version of this feature replaces ``{{env}}`` with
|
||||
:conf_master:`gitfs_base`.
|
||||
|
||||
On Salt masters running 2015.8.0 through 2016.3.3, this feature can only be
|
||||
accessed using the legacy config in the previous section of this page.
|
||||
|
||||
The same issue which affected the behavior of the minion's
|
||||
:conf_minion:`environment` config value using the legacy configuration
|
||||
syntax (see the documentation in the pre-2015.8.0 section above for the
|
||||
legacy support of this feature) also affects the new-style git_pillar
|
||||
syntax in version 2016.3.4. This has been corrected in version 2016.3.5 and
|
||||
2016.11.1 (2016.11.0 contains the incorrect behavior).
|
||||
|
||||
2016.3.4 incorrectly checks the *master's* ``environment`` config option
|
||||
(instead of the minion's) before falling back to the master's
|
||||
:conf_master:`git_pillar_base`.
|
||||
|
||||
Additionally, in releases before 2016.11.0, both ``{{env}}`` and
|
||||
``{{saltenv}}`` could be used as a placeholder for the environment.
|
||||
Starting in 2016.11.0, ``{{env}}`` is no longer supported.
|
||||
|
||||
With the addition of pygit2_ support, git_pillar can now interact with
|
||||
authenticated remotes. Authentication works just like in gitfs (as outlined in
|
||||
the :ref:`Git Fileserver Backend Walkthrough <gitfs-authentication>`), only
|
||||
@ -469,8 +333,6 @@ from __future__ import absolute_import
|
||||
# Import python libs
|
||||
import copy
|
||||
import logging
|
||||
import hashlib
|
||||
import os
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils.gitfs
|
||||
@ -482,13 +344,6 @@ from salt.pillar import Pillar
|
||||
|
||||
# Import third party libs
|
||||
from salt.ext import six
|
||||
# pylint: disable=import-error
|
||||
try:
|
||||
import git
|
||||
HAS_GITPYTHON = True
|
||||
except ImportError:
|
||||
HAS_GITPYTHON = False
|
||||
# pylint: enable=import-error
|
||||
|
||||
PER_REMOTE_OVERRIDES = ('env', 'root', 'ssl_verify', 'refspecs')
|
||||
PER_REMOTE_ONLY = ('name', 'mountpoint')
|
||||
@ -509,339 +364,89 @@ def __virtual__():
|
||||
# No git external pillars were configured
|
||||
return False
|
||||
|
||||
for ext_pillar in git_ext_pillars:
|
||||
if isinstance(ext_pillar['git'], six.string_types):
|
||||
# Verification of legacy git pillar configuration
|
||||
if not HAS_GITPYTHON:
|
||||
log.error(
|
||||
'Git-based ext_pillar is enabled in configuration but '
|
||||
'could not be loaded, is GitPython installed?'
|
||||
)
|
||||
return False
|
||||
if not git.__version__ > '0.3.0':
|
||||
return False
|
||||
return __virtualname__
|
||||
else:
|
||||
# Verification of new git pillar configuration
|
||||
try:
|
||||
salt.utils.gitfs.GitPillar(__opts__)
|
||||
# Initialization of the GitPillar object did not fail, so we
|
||||
# know we have valid configuration syntax and that a valid
|
||||
# provider was detected.
|
||||
return __virtualname__
|
||||
except FileserverConfigError:
|
||||
pass
|
||||
return False
|
||||
try:
|
||||
salt.utils.gitfs.GitPillar(__opts__)
|
||||
# Initialization of the GitPillar object did not fail, so we
|
||||
# know we have valid configuration syntax and that a valid
|
||||
# provider was detected.
|
||||
return __virtualname__
|
||||
except FileserverConfigError:
|
||||
return False
|
||||
|
||||
|
||||
def ext_pillar(minion_id, repo, pillar_dirs):
|
||||
def ext_pillar(minion_id, repo):
|
||||
'''
|
||||
Checkout the ext_pillar sources and compile the resulting pillar SLS
|
||||
'''
|
||||
if isinstance(repo, six.string_types):
|
||||
return _legacy_git_pillar(minion_id, repo, pillar_dirs)
|
||||
else:
|
||||
opts = copy.deepcopy(__opts__)
|
||||
opts['pillar_roots'] = {}
|
||||
opts['__git_pillar'] = True
|
||||
pillar = salt.utils.gitfs.GitPillar(opts)
|
||||
pillar.init_remotes(repo, PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
if __opts__.get('__role') == 'minion':
|
||||
# If masterless, fetch the remotes. We'll need to remove this once
|
||||
# we make the minion daemon able to run standalone.
|
||||
pillar.fetch_remotes()
|
||||
pillar.checkout()
|
||||
ret = {}
|
||||
merge_strategy = __opts__.get(
|
||||
'pillar_source_merging_strategy',
|
||||
'smart'
|
||||
)
|
||||
merge_lists = __opts__.get(
|
||||
'pillar_merge_lists',
|
||||
False
|
||||
)
|
||||
for pillar_dir, env in six.iteritems(pillar.pillar_dirs):
|
||||
# If pillarenv is set, only grab pillars with that match pillarenv
|
||||
if opts['pillarenv'] and env != opts['pillarenv']:
|
||||
log.debug(
|
||||
'env \'%s\' for pillar dir \'%s\' does not match '
|
||||
'pillarenv \'%s\', skipping',
|
||||
env, pillar_dir, opts['pillarenv']
|
||||
)
|
||||
continue
|
||||
if pillar_dir in pillar.pillar_linked_dirs:
|
||||
log.debug(
|
||||
'git_pillar is skipping processing on %s as it is a '
|
||||
'mounted repo', pillar_dir
|
||||
)
|
||||
continue
|
||||
else:
|
||||
log.debug(
|
||||
'git_pillar is processing pillar SLS from %s for pillar '
|
||||
'env \'%s\'', pillar_dir, env
|
||||
)
|
||||
|
||||
if env == '__env__':
|
||||
env = opts.get('pillarenv') \
|
||||
or opts.get('environment') \
|
||||
or opts.get('git_pillar_base')
|
||||
log.debug('__env__ maps to %s', env)
|
||||
|
||||
pillar_roots = [pillar_dir]
|
||||
|
||||
if __opts__['git_pillar_includes']:
|
||||
# Add the rest of the pillar_dirs in this environment to the
|
||||
# list, excluding the current pillar_dir being processed. This
|
||||
# is because it was already specified above as the first in the
|
||||
# list, so that its top file is sourced from the correct
|
||||
# location and not from another git_pillar remote.
|
||||
pillar_roots.extend(
|
||||
[d for (d, e) in six.iteritems(pillar.pillar_dirs)
|
||||
if env == e and d != pillar_dir]
|
||||
)
|
||||
|
||||
opts['pillar_roots'] = {env: pillar_roots}
|
||||
|
||||
local_pillar = Pillar(opts, __grains__, minion_id, env)
|
||||
ret = salt.utils.dictupdate.merge(
|
||||
ret,
|
||||
local_pillar.compile_pillar(ext=False),
|
||||
strategy=merge_strategy,
|
||||
merge_lists=merge_lists
|
||||
)
|
||||
return ret
|
||||
|
||||
|
||||
# Legacy git_pillar code
|
||||
class _LegacyGitPillar(object):
|
||||
'''
|
||||
Deal with the remote git repository for Pillar
|
||||
'''
|
||||
|
||||
def __init__(self, branch, repo_location, opts):
|
||||
'''
|
||||
Try to initialize the Git repo object
|
||||
'''
|
||||
self.branch = self.map_branch(branch, opts)
|
||||
self.rp_location = repo_location
|
||||
self.opts = opts
|
||||
self._envs = set()
|
||||
self.working_dir = ''
|
||||
self.repo = None
|
||||
|
||||
hash_type = getattr(hashlib, opts['hash_type'])
|
||||
hash_str = '{0} {1}'.format(self.branch, self.rp_location)
|
||||
repo_hash = hash_type(salt.utils.stringutils.to_bytes(hash_str)).hexdigest()
|
||||
rp_ = os.path.join(self.opts['cachedir'], 'pillar_gitfs', repo_hash)
|
||||
|
||||
if not os.path.isdir(rp_):
|
||||
os.makedirs(rp_)
|
||||
try:
|
||||
self.repo = git.Repo.init(rp_)
|
||||
except (git.exc.NoSuchPathError,
|
||||
git.exc.InvalidGitRepositoryError) as exc:
|
||||
log.error(
|
||||
'GitPython exception caught while initializing the repo: %s. '
|
||||
'Maybe the git CLI program is not available.', exc
|
||||
)
|
||||
except Exception as exc:
|
||||
log.exception('Undefined exception in git pillar. '
|
||||
'This may be a bug should be reported to the '
|
||||
'SaltStack developers.')
|
||||
|
||||
# Git directory we are working on
|
||||
# Should be the same as self.repo.working_dir
|
||||
self.working_dir = rp_
|
||||
|
||||
if isinstance(self.repo, git.Repo):
|
||||
if not self.repo.remotes:
|
||||
try:
|
||||
self.repo.create_remote('origin', self.rp_location)
|
||||
# ignore git ssl verification if requested
|
||||
if self.opts.get('pillar_gitfs_ssl_verify', True):
|
||||
self.repo.git.config('http.sslVerify', 'true')
|
||||
else:
|
||||
self.repo.git.config('http.sslVerify', 'false')
|
||||
except os.error:
|
||||
# This exception occurs when two processes are
|
||||
# trying to write to the git config at once, go
|
||||
# ahead and pass over it since this is the only
|
||||
# write.
|
||||
# This should place a lock down.
|
||||
pass
|
||||
else:
|
||||
if self.repo.remotes.origin.url != self.rp_location:
|
||||
self.repo.remotes.origin.config_writer.set(
|
||||
'url', self.rp_location)
|
||||
|
||||
def map_branch(self, branch, opts=None):
|
||||
opts = __opts__ if opts is None else opts
|
||||
if branch == '__env__':
|
||||
branch = opts.get('environment') or 'base'
|
||||
if branch == 'base':
|
||||
branch = opts.get('gitfs_base') or 'master'
|
||||
elif ':' in branch:
|
||||
branch = branch.split(':', 1)[0]
|
||||
return branch
|
||||
|
||||
def update(self):
|
||||
'''
|
||||
Ensure you are following the latest changes on the remote
|
||||
|
||||
Return boolean whether it worked
|
||||
'''
|
||||
try:
|
||||
log.debug('Legacy git_pillar: Updating \'%s\'', self.rp_location)
|
||||
self.repo.git.fetch()
|
||||
except git.exc.GitCommandError as exc:
|
||||
log.error(
|
||||
'Unable to fetch the latest changes from remote %s: %s',
|
||||
self.rp_location, exc
|
||||
)
|
||||
return False
|
||||
|
||||
try:
|
||||
checkout_ref = 'origin/{0}'.format(self.branch)
|
||||
log.debug('Legacy git_pillar: Checking out %s for \'%s\'',
|
||||
checkout_ref, self.rp_location)
|
||||
self.repo.git.checkout(checkout_ref)
|
||||
except git.exc.GitCommandError as exc:
|
||||
log.error(
|
||||
'Legacy git_pillar: Failed to checkout %s for \'%s\': %s',
|
||||
checkout_ref, self.rp_location, exc
|
||||
)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def envs(self):
|
||||
'''
|
||||
Return a list of refs that can be used as environments
|
||||
'''
|
||||
if isinstance(self.repo, git.Repo):
|
||||
remote = self.repo.remote()
|
||||
for ref in self.repo.refs:
|
||||
parted = ref.name.partition('/')
|
||||
short = parted[2] if parted[2] else parted[0]
|
||||
if isinstance(ref, git.Head):
|
||||
if short == 'master':
|
||||
short = 'base'
|
||||
if ref not in remote.stale_refs:
|
||||
self._envs.add(short)
|
||||
elif isinstance(ref, git.Tag):
|
||||
self._envs.add(short)
|
||||
|
||||
return list(self._envs)
|
||||
|
||||
|
||||
def _legacy_git_pillar(minion_id, repo_string, pillar_dirs):
|
||||
'''
|
||||
Support pre-Beryllium config schema
|
||||
'''
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'The git ext_pillar configuration is deprecated. Please refer to the '
|
||||
'documentation at '
|
||||
'https://docs.saltstack.com/en/latest/ref/pillar/all/salt.pillar.git_pillar.html '
|
||||
'for more information. This configuration will no longer be supported '
|
||||
'as of the Oxygen release of Salt.'
|
||||
)
|
||||
if pillar_dirs is None:
|
||||
return
|
||||
# split the branch, repo name and optional extra (key=val) parameters.
|
||||
options = repo_string.strip().split()
|
||||
branch_env = options[0]
|
||||
repo_location = options[1]
|
||||
root = ''
|
||||
|
||||
for extraopt in options[2:]:
|
||||
# Support multiple key=val attributes as custom parameters.
|
||||
DELIM = '='
|
||||
if DELIM not in extraopt:
|
||||
log.error(
|
||||
'Legacy git_pillar: Incorrectly formatted extra parameter '
|
||||
'\'%s\' within \'%s\' missing \'%s\')',
|
||||
extraopt, repo_string, DELIM
|
||||
)
|
||||
key, val = _extract_key_val(extraopt, DELIM)
|
||||
if key == 'root':
|
||||
root = val
|
||||
else:
|
||||
log.error(
|
||||
'Legacy git_pillar: Unrecognized extra parameter \'%s\' '
|
||||
'in \'%s\'',
|
||||
key, repo_string
|
||||
)
|
||||
|
||||
# environment is "different" from the branch
|
||||
cfg_branch, _, environment = branch_env.partition(':')
|
||||
|
||||
gitpil = _LegacyGitPillar(cfg_branch, repo_location, __opts__)
|
||||
branch = gitpil.branch
|
||||
|
||||
if environment == '':
|
||||
if branch == 'master':
|
||||
environment = 'base'
|
||||
else:
|
||||
environment = branch
|
||||
|
||||
# normpath is needed to remove appended '/' if root is empty string.
|
||||
pillar_dir = os.path.normpath(os.path.join(gitpil.working_dir, root))
|
||||
log.debug(
|
||||
'Legacy git_pillar: pillar_dir for \'%s\' is \'%s\'',
|
||||
repo_string, pillar_dir
|
||||
)
|
||||
log.debug(
|
||||
'Legacy git_pillar: branch for \'%s\' is \'%s\'',
|
||||
repo_string, branch
|
||||
)
|
||||
|
||||
pillar_dirs.setdefault(pillar_dir, {})
|
||||
|
||||
if cfg_branch == '__env__' and branch not in ['master', 'base']:
|
||||
gitpil.update()
|
||||
elif pillar_dirs[pillar_dir].get(branch, False):
|
||||
log.debug(
|
||||
'Already processed pillar_dir \'%s\' for \'%s\'',
|
||||
pillar_dir, repo_string
|
||||
)
|
||||
return {} # we've already seen this combo
|
||||
|
||||
pillar_dirs[pillar_dir].setdefault(branch, True)
|
||||
|
||||
# Don't recurse forever-- the Pillar object will re-call the ext_pillar
|
||||
# function
|
||||
if __opts__['pillar_roots'].get(branch, []) == [pillar_dir]:
|
||||
return {}
|
||||
|
||||
opts = copy.deepcopy(__opts__)
|
||||
|
||||
opts['pillar_roots'][environment] = [pillar_dir]
|
||||
opts['pillar_roots'] = {}
|
||||
opts['__git_pillar'] = True
|
||||
pillar = salt.utils.gitfs.GitPillar(opts)
|
||||
pillar.init_remotes(repo, PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
if __opts__.get('__role') == 'minion':
|
||||
# If masterless, fetch the remotes. We'll need to remove this once
|
||||
# we make the minion daemon able to run standalone.
|
||||
pillar.fetch_remotes()
|
||||
pillar.checkout()
|
||||
ret = {}
|
||||
merge_strategy = __opts__.get(
|
||||
'pillar_source_merging_strategy',
|
||||
'smart'
|
||||
)
|
||||
merge_lists = __opts__.get(
|
||||
'pillar_merge_lists',
|
||||
False
|
||||
)
|
||||
for pillar_dir, env in six.iteritems(pillar.pillar_dirs):
|
||||
# If pillarenv is set, only grab pillars with that match pillarenv
|
||||
if opts['pillarenv'] and env != opts['pillarenv']:
|
||||
log.debug(
|
||||
'env \'%s\' for pillar dir \'%s\' does not match '
|
||||
'pillarenv \'%s\', skipping',
|
||||
env, pillar_dir, opts['pillarenv']
|
||||
)
|
||||
continue
|
||||
if pillar_dir in pillar.pillar_linked_dirs:
|
||||
log.debug(
|
||||
'git_pillar is skipping processing on %s as it is a '
|
||||
'mounted repo', pillar_dir
|
||||
)
|
||||
continue
|
||||
else:
|
||||
log.debug(
|
||||
'git_pillar is processing pillar SLS from %s for pillar '
|
||||
'env \'%s\'', pillar_dir, env
|
||||
)
|
||||
|
||||
pil = Pillar(opts, __grains__, minion_id, branch)
|
||||
if env == '__env__':
|
||||
env = opts.get('pillarenv') \
|
||||
or opts.get('environment') \
|
||||
or opts.get('git_pillar_base')
|
||||
log.debug('__env__ maps to %s', env)
|
||||
|
||||
return pil.compile_pillar(ext=False)
|
||||
pillar_roots = [pillar_dir]
|
||||
|
||||
if __opts__['git_pillar_includes']:
|
||||
# Add the rest of the pillar_dirs in this environment to the
|
||||
# list, excluding the current pillar_dir being processed. This
|
||||
# is because it was already specified above as the first in the
|
||||
# list, so that its top file is sourced from the correct
|
||||
# location and not from another git_pillar remote.
|
||||
pillar_roots.extend(
|
||||
[d for (d, e) in six.iteritems(pillar.pillar_dirs)
|
||||
if env == e and d != pillar_dir]
|
||||
)
|
||||
|
||||
def _update(branch, repo_location):
|
||||
'''
|
||||
Ensure you are following the latest changes on the remote
|
||||
opts['pillar_roots'] = {env: pillar_roots}
|
||||
|
||||
return boolean whether it worked
|
||||
'''
|
||||
gitpil = _LegacyGitPillar(branch, repo_location, __opts__)
|
||||
|
||||
return gitpil.update()
|
||||
|
||||
|
||||
def _envs(branch, repo_location):
|
||||
'''
|
||||
Return a list of refs that can be used as environments
|
||||
'''
|
||||
gitpil = _LegacyGitPillar(branch, repo_location, __opts__)
|
||||
|
||||
return gitpil.envs()
|
||||
local_pillar = Pillar(opts, __grains__, minion_id, env)
|
||||
ret = salt.utils.dictupdate.merge(
|
||||
ret,
|
||||
local_pillar.compile_pillar(ext=False),
|
||||
strategy=merge_strategy,
|
||||
merge_lists=merge_lists
|
||||
)
|
||||
return ret
|
||||
|
||||
|
||||
def _extract_key_val(kv, delimiter='='):
|
||||
|
@ -11,7 +11,6 @@ import logging
|
||||
import salt.pillar.git_pillar
|
||||
import salt.utils.gitfs
|
||||
from salt.exceptions import SaltRunnerError
|
||||
from salt.ext import six
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@ -21,18 +20,13 @@ def update(branch=None, repo=None):
|
||||
.. versionadded:: 2014.1.0
|
||||
|
||||
.. versionchanged:: 2015.8.4
|
||||
This runner function now supports the :ref:`new git_pillar
|
||||
configuration schema <git-pillar-2015-8-0-and-later>` introduced in
|
||||
This runner function now supports the :ref:`git_pillar
|
||||
configuration schema <git-pillar-configuration>` introduced in
|
||||
2015.8.0. Additionally, the branch and repo can now be omitted to
|
||||
update all git_pillar remotes. The return data has also changed. For
|
||||
releases 2015.8.3 and earlier, there is no value returned. Starting
|
||||
with 2015.8.4, the return data is a dictionary. If using the :ref:`old
|
||||
git_pillar configuration schema <git-pillar-pre-2015-8-0>`, then the
|
||||
dictionary values will be ``True`` if the update completed without
|
||||
error, and ``False`` if an error occurred. If using the :ref:`new
|
||||
git_pillar configuration schema <git-pillar-2015-8-0-and-later>`, the
|
||||
values will be ``True`` only if new commits were fetched, and ``False``
|
||||
if there were errors or no new commits were fetched.
|
||||
update all git_pillar remotes. The return data has also changed to
|
||||
a dictionary. The values will be ``True`` only if new commits were
|
||||
fetched, and ``False`` if there were errors or no new commits were
|
||||
fetched.
|
||||
|
||||
Fetch one or all configured git_pillar remotes.
|
||||
|
||||
@ -56,7 +50,7 @@ def update(branch=None, repo=None):
|
||||
|
||||
# Update specific branch and repo
|
||||
salt-run git_pillar.update branch='branch' repo='https://foo.com/bar.git'
|
||||
# Update all repos (2015.8.4 and later)
|
||||
# Update all repos
|
||||
salt-run git_pillar.update
|
||||
# Run with debug logging
|
||||
salt-run git_pillar.update -l debug
|
||||
@ -67,47 +61,30 @@ def update(branch=None, repo=None):
|
||||
if pillar_type != 'git':
|
||||
continue
|
||||
pillar_conf = ext_pillar[pillar_type]
|
||||
if isinstance(pillar_conf, six.string_types):
|
||||
parts = pillar_conf.split()
|
||||
if len(parts) >= 2:
|
||||
desired_branch, desired_repo = parts[:2]
|
||||
# Skip this remote if it doesn't match the search criteria
|
||||
if branch is not None:
|
||||
if branch != desired_branch:
|
||||
continue
|
||||
if repo is not None:
|
||||
if repo != desired_repo:
|
||||
continue
|
||||
ret[pillar_conf] = salt.pillar.git_pillar._LegacyGitPillar(
|
||||
parts[0],
|
||||
parts[1],
|
||||
__opts__).update()
|
||||
|
||||
else:
|
||||
pillar = salt.utils.gitfs.GitPillar(__opts__)
|
||||
pillar.init_remotes(pillar_conf,
|
||||
salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
|
||||
salt.pillar.git_pillar.PER_REMOTE_ONLY)
|
||||
for remote in pillar.remotes:
|
||||
# Skip this remote if it doesn't match the search criteria
|
||||
if branch is not None:
|
||||
if branch != remote.branch:
|
||||
continue
|
||||
if repo is not None:
|
||||
if repo != remote.url:
|
||||
continue
|
||||
try:
|
||||
result = remote.fetch()
|
||||
except Exception as exc:
|
||||
log.error(
|
||||
'Exception \'{0}\' caught while fetching git_pillar '
|
||||
'remote \'{1}\''.format(exc, remote.id),
|
||||
exc_info_on_loglevel=logging.DEBUG
|
||||
)
|
||||
result = False
|
||||
finally:
|
||||
remote.clear_lock()
|
||||
ret[remote.id] = result
|
||||
pillar = salt.utils.gitfs.GitPillar(__opts__)
|
||||
pillar.init_remotes(pillar_conf,
|
||||
salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
|
||||
salt.pillar.git_pillar.PER_REMOTE_ONLY)
|
||||
for remote in pillar.remotes:
|
||||
# Skip this remote if it doesn't match the search criteria
|
||||
if branch is not None:
|
||||
if branch != remote.branch:
|
||||
continue
|
||||
if repo is not None:
|
||||
if repo != remote.url:
|
||||
continue
|
||||
try:
|
||||
result = remote.fetch()
|
||||
except Exception as exc:
|
||||
log.error(
|
||||
'Exception \'{0}\' caught while fetching git_pillar '
|
||||
'remote \'{1}\''.format(exc, remote.id),
|
||||
exc_info_on_loglevel=logging.DEBUG
|
||||
)
|
||||
result = False
|
||||
finally:
|
||||
remote.clear_lock()
|
||||
ret[remote.id] = result
|
||||
|
||||
if not ret:
|
||||
if branch is not None or repo is not None:
|
||||
|
@ -529,7 +529,7 @@ def sync_roster(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
|
||||
|
||||
def sync_eauth_tokens(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
|
||||
'''
|
||||
.. versionadded:: 2017.7.2
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
Sync eauth token modules from ``salt://_tokens`` to the master
|
||||
|
||||
|
@ -81,7 +81,7 @@ def orchestrate(mods,
|
||||
pillar_enc=pillar_enc,
|
||||
orchestration_jid=orchestration_jid)
|
||||
ret = {'data': {minion.opts['id']: running}, 'outputter': 'highstate'}
|
||||
res = salt.utils.check_state_result(ret['data'])
|
||||
res = __utils__['state.check_result'](ret['data'])
|
||||
if res:
|
||||
ret['retcode'] = 0
|
||||
else:
|
||||
|
@ -1312,7 +1312,7 @@ def _tags_present(name, tags, region, key, keyid, profile):
|
||||
tags_to_add = tags
|
||||
tags_to_update = {}
|
||||
tags_to_remove = []
|
||||
if lb['tags']:
|
||||
if lb.get('tags'):
|
||||
for _tag in lb['tags']:
|
||||
if _tag not in tags.keys():
|
||||
if _tag not in tags_to_remove:
|
||||
|
@ -50,7 +50,152 @@ def __virtual__():
|
||||
'''
|
||||
Only load if boto is available.
|
||||
'''
|
||||
return 'boto_elbv2' if 'boto_elbv2.target_group_exists' in __salt__ else False
|
||||
if 'boto_elbv2.target_group_exists' in __salt__:
|
||||
return 'boto_elbv2'
|
||||
return (False, "The boto_elbv2 module cannot be loaded: boto3 library not found")
|
||||
|
||||
|
||||
def create_target_group(name, protocol, port, vpc_id,
|
||||
region=None, key=None, keyid=None, profile=None,
|
||||
health_check_protocol='HTTP', health_check_port='traffic-port',
|
||||
health_check_path='/', health_check_interval_seconds=30,
|
||||
health_check_timeout_seconds=5, healthy_threshold_count=5,
|
||||
unhealthy_threshold_count=2, **kwargs):
|
||||
|
||||
'''
|
||||
.. versionadded:: 2017.11.0
|
||||
|
||||
Create target group if not present.
|
||||
|
||||
name
|
||||
(string) - The name of the target group.
|
||||
protocol
|
||||
(string) - The protocol to use for routing traffic to the targets
|
||||
port
|
||||
(int) - The port on which the targets receive traffic. This port is used unless
|
||||
you specify a port override when registering the traffic.
|
||||
vpc_id
|
||||
(string) - The identifier of the virtual private cloud (VPC).
|
||||
health_check_protocol
|
||||
(string) - The protocol the load balancer uses when performing health check on
|
||||
targets. The default is the HTTP protocol.
|
||||
health_check_port
|
||||
(string) - The port the load balancer uses when performing health checks on
|
||||
targets. The default is 'traffic-port', which indicates the port on which each
|
||||
target receives traffic from the load balancer.
|
||||
health_check_path
|
||||
(string) - The ping path that is the destination on the targets for health
|
||||
checks. The default is /.
|
||||
health_check_interval_seconds
|
||||
(integer) - The approximate amount of time, in seconds, between health checks
|
||||
of an individual target. The default is 30 seconds.
|
||||
health_check_timeout_seconds
|
||||
(integer) - The amount of time, in seconds, during which no response from a
|
||||
target means a failed health check. The default is 5 seconds.
|
||||
healthy_threshold_count
|
||||
(integer) - The number of consecutive health checks successes required before
|
||||
considering an unhealthy target healthy. The default is 5.
|
||||
unhealthy_threshold_count
|
||||
(integer) - The number of consecutive health check failures required before
|
||||
considering a target unhealthy. The default is 2.
|
||||
|
||||
returns
|
||||
(bool) - True on success, False on failure.
|
||||
|
||||
CLI example:
|
||||
.. code-block:: yaml
|
||||
|
||||
create-target:
|
||||
boto_elb2.create_targets_group:
|
||||
- name: myALB
|
||||
- protocol: https
|
||||
- port: 443
|
||||
- vpc_id: myVPC
|
||||
'''
|
||||
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
|
||||
|
||||
if __salt__['boto_elbv2.target_group_exists'](name, region, key, keyid, profile):
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'Target Group {0} already exists'.format(name)
|
||||
return ret
|
||||
|
||||
if __opts__['test']:
|
||||
ret['comment'] = 'Target Group {0} will be created'.format(name)
|
||||
return ret
|
||||
|
||||
state = __salt__['boto_elbv2.create_target_group'](name,
|
||||
protocol,
|
||||
port,
|
||||
vpc_id,
|
||||
region=region,
|
||||
key=key,
|
||||
keyid=keyid,
|
||||
profile=profile,
|
||||
health_check_protocol=health_check_protocol,
|
||||
health_check_port=health_check_port,
|
||||
health_check_path=health_check_path,
|
||||
health_check_interval_seconds=health_check_interval_seconds,
|
||||
health_check_timeout_seconds=health_check_timeout_seconds,
|
||||
healthy_threshold_count=healthy_threshold_count,
|
||||
unhealthy_threshold_count=unhealthy_threshold_count,
|
||||
**kwargs)
|
||||
|
||||
if state:
|
||||
ret['changes']['target_group'] = name
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'Target Group {0} created'.format(name)
|
||||
else:
|
||||
ret['result'] = False
|
||||
ret['comment'] = 'Target Group {0} creation failed'.format(name)
|
||||
return ret
|
||||
|
||||
|
||||
def delete_target_group(name, region=None, key=None, keyid=None, profile=None):
|
||||
'''
|
||||
Delete target group.
|
||||
|
||||
name
|
||||
(string) - The Amazon Resource Name (ARN) of the resource.
|
||||
|
||||
returns
|
||||
(bool) - True on success, False on failure.
|
||||
|
||||
CLI example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
check-target:
|
||||
boto_elb2.delete_targets_group:
|
||||
- name: myALB
|
||||
- protocol: https
|
||||
- port: 443
|
||||
- vpc_id: myVPC
|
||||
'''
|
||||
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
|
||||
|
||||
if not __salt__['boto_elbv2.target_group_exists'](name, region, key, keyid, profile):
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'Target Group {0} does not exists'.format(name)
|
||||
return ret
|
||||
|
||||
if __opts__['test']:
|
||||
ret['comment'] = 'Target Group {0} will be deleted'.format(name)
|
||||
return ret
|
||||
|
||||
state = __salt__['boto_elbv2.delete_target_group'](name,
|
||||
region=region,
|
||||
key=key,
|
||||
keyid=keyid,
|
||||
profile=profile)
|
||||
|
||||
if state:
|
||||
ret['result'] = True
|
||||
ret['changes']['target_group'] = name
|
||||
ret['comment'] = 'Target Group {0} deleted'.format(name)
|
||||
else:
|
||||
ret['result'] = False
|
||||
ret['comment'] = 'Target Group {0} deletion failed'.format(name)
|
||||
return ret
|
||||
|
||||
|
||||
def targets_registered(name, targets, region=None, key=None, keyid=None,
|
||||
@ -77,10 +222,13 @@ def targets_registered(name, targets, region=None, key=None, keyid=None,
|
||||
- instance-id2
|
||||
'''
|
||||
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
|
||||
tg = __salt__['boto_elbv2.target_group_exists'](name, region, key, keyid, profile)
|
||||
|
||||
if tg:
|
||||
health = __salt__['boto_elbv2.describe_target_health'](name, region=region, key=key, keyid=keyid, profile=profile)
|
||||
if __salt__['boto_elbv2.target_group_exists'](name, region, key, keyid, profile):
|
||||
health = __salt__['boto_elbv2.describe_target_health'](name,
|
||||
region=region,
|
||||
key=key,
|
||||
keyid=keyid,
|
||||
profile=profile)
|
||||
failure = False
|
||||
changes = False
|
||||
newhealth_mock = copy.copy(health)
|
||||
@ -99,10 +247,10 @@ def targets_registered(name, targets, region=None, key=None, keyid=None,
|
||||
else:
|
||||
state = __salt__['boto_elbv2.register_targets'](name,
|
||||
targets,
|
||||
region,
|
||||
key,
|
||||
keyid,
|
||||
profile)
|
||||
region=region,
|
||||
key=key,
|
||||
keyid=keyid,
|
||||
profile=profile)
|
||||
if state:
|
||||
changes = True
|
||||
ret['result'] = True
|
||||
@ -119,7 +267,11 @@ def targets_registered(name, targets, region=None, key=None, keyid=None,
|
||||
ret['changes']['new'] = newhealth_mock
|
||||
else:
|
||||
ret['comment'] = 'Target Group {0} has been changed'.format(name)
|
||||
newhealth = __salt__['boto_elbv2.describe_target_health'](name, region=region, key=key, keyid=keyid, profile=profile)
|
||||
newhealth = __salt__['boto_elbv2.describe_target_health'](name,
|
||||
region=region,
|
||||
key=key,
|
||||
keyid=keyid,
|
||||
profile=profile)
|
||||
ret['changes']['new'] = newhealth
|
||||
return ret
|
||||
else:
|
||||
@ -128,7 +280,7 @@ def targets_registered(name, targets, region=None, key=None, keyid=None,
|
||||
|
||||
|
||||
def targets_deregistered(name, targets, region=None, key=None, keyid=None,
|
||||
profile=None, **kwargs):
|
||||
profile=None, **kwargs):
|
||||
'''
|
||||
Remove targets to an Application Load Balancer target group.
|
||||
|
||||
@ -150,9 +302,12 @@ def targets_deregistered(name, targets, region=None, key=None, keyid=None,
|
||||
- instance-id2
|
||||
'''
|
||||
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
|
||||
tg = __salt__['boto_elbv2.target_group_exists'](name, region, key, keyid, profile)
|
||||
if tg:
|
||||
health = __salt__['boto_elbv2.describe_target_health'](name, region=region, key=key, keyid=keyid, profile=profile)
|
||||
if __salt__['boto_elbv2.target_group_exists'](name, region, key, keyid, profile):
|
||||
health = __salt__['boto_elbv2.describe_target_health'](name,
|
||||
region=region,
|
||||
key=key,
|
||||
keyid=keyid,
|
||||
profile=profile)
|
||||
failure = False
|
||||
changes = False
|
||||
newhealth_mock = copy.copy(health)
|
||||
@ -168,11 +323,11 @@ def targets_deregistered(name, targets, region=None, key=None, keyid=None,
|
||||
newhealth_mock.update({target: "draining"})
|
||||
else:
|
||||
state = __salt__['boto_elbv2.deregister_targets'](name,
|
||||
targets,
|
||||
region,
|
||||
key,
|
||||
keyid,
|
||||
profile)
|
||||
targets,
|
||||
region=region,
|
||||
key=key,
|
||||
keyid=keyid,
|
||||
profile=profile)
|
||||
if state:
|
||||
changes = True
|
||||
ret['result'] = True
|
||||
@ -189,7 +344,11 @@ def targets_deregistered(name, targets, region=None, key=None, keyid=None,
|
||||
ret['changes']['new'] = newhealth_mock
|
||||
else:
|
||||
ret['comment'] = 'Target Group {0} has been changed'.format(name)
|
||||
newhealth = __salt__['boto_elbv2.describe_target_health'](name, region, key, keyid, profile)
|
||||
newhealth = __salt__['boto_elbv2.describe_target_health'](name,
|
||||
region=region,
|
||||
key=key,
|
||||
keyid=keyid,
|
||||
profile=profile)
|
||||
ret['changes']['new'] = newhealth
|
||||
return ret
|
||||
else:
|
||||
|
@ -137,13 +137,14 @@ def present(name,
|
||||
.. versionadded:: 2016.11.0
|
||||
|
||||
sls
|
||||
Allow for building images with ``dockerng.sls_build`` by specify the
|
||||
SLS files to build with. This can be a list or comma-seperated string.
|
||||
Allow for building of image with :py:func:`docker.sls_build
|
||||
<salt.modules.dockermod.sls_build>` by specifying the SLS files with
|
||||
which to build. This can be a list or comma-seperated string.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
myuser/myimage:mytag:
|
||||
dockerng.image_present:
|
||||
docker_image.present:
|
||||
- sls:
|
||||
- webapp1
|
||||
- webapp2
|
||||
@ -153,12 +154,14 @@ def present(name,
|
||||
.. versionadded: 2017.7.0
|
||||
|
||||
base
|
||||
Base image with which to start ``dockerng.sls_build``
|
||||
Base image with which to start :py:func:`docker.sls_build
|
||||
<salt.modules.dockermod.sls_build>`
|
||||
|
||||
.. versionadded: 2017.7.0
|
||||
|
||||
saltenv
|
||||
environment from which to pull sls files for ``dockerng.sls_build``.
|
||||
Environment from which to pull SLS files for :py:func:`docker.sls_build
|
||||
<salt.modules.dockermod.sls_build>`
|
||||
|
||||
.. versionadded: 2017.7.0
|
||||
'''
|
||||
@ -171,11 +174,14 @@ def present(name,
|
||||
ret['comment'] = 'Only one of \'build\' or \'load\' is permitted.'
|
||||
return ret
|
||||
|
||||
# Ensure that we have repo:tag notation
|
||||
image = ':'.join(salt.utils.docker.get_repo_tag(name))
|
||||
all_tags = __salt__['docker.list_tags']()
|
||||
resolved_tag = __salt__['docker.resolve_tag'](image)
|
||||
|
||||
if image in all_tags:
|
||||
if resolved_tag is False:
|
||||
# Specified image is not present
|
||||
image_info = None
|
||||
else:
|
||||
# Specified image is present
|
||||
if not force:
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'Image \'{0}\' already present'.format(name)
|
||||
@ -187,8 +193,6 @@ def present(name,
|
||||
ret['comment'] = \
|
||||
'Unable to get info for image \'{0}\': {1}'.format(name, exc)
|
||||
return ret
|
||||
else:
|
||||
image_info = None
|
||||
|
||||
if build or sls:
|
||||
action = 'built'
|
||||
@ -199,7 +203,7 @@ def present(name,
|
||||
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
if (image in all_tags and force) or image not in all_tags:
|
||||
if (resolved_tag is not False and force) or resolved_tag is False:
|
||||
ret['comment'] = 'Image \'{0}\' will be {1}'.format(name, action)
|
||||
return ret
|
||||
|
||||
@ -230,10 +234,10 @@ def present(name,
|
||||
if isinstance(sls, list):
|
||||
sls = ','.join(sls)
|
||||
try:
|
||||
image_update = __salt__['dockerng.sls_build'](name=image,
|
||||
base=base,
|
||||
mods=sls,
|
||||
saltenv=saltenv)
|
||||
image_update = __salt__['docker.sls_build'](name=image,
|
||||
base=base,
|
||||
mods=sls,
|
||||
saltenv=saltenv)
|
||||
except Exception as exc:
|
||||
ret['comment'] = (
|
||||
'Encountered error using sls {0} for building {1}: {2}'
|
||||
@ -263,10 +267,8 @@ def present(name,
|
||||
client_timeout=client_timeout
|
||||
)
|
||||
except Exception as exc:
|
||||
ret['comment'] = (
|
||||
'Encountered error pulling {0}: {1}'
|
||||
.format(image, exc)
|
||||
)
|
||||
ret['comment'] = \
|
||||
'Encountered error pulling {0}: {1}'.format(image, exc)
|
||||
return ret
|
||||
if (image_info is not None and image_info['Id'][:12] == image_update
|
||||
.get('Layers', {})
|
||||
@ -278,7 +280,7 @@ def present(name,
|
||||
# Only add to the changes dict if layers were pulled
|
||||
ret['changes'] = image_update
|
||||
|
||||
ret['result'] = image in __salt__['docker.list_tags']()
|
||||
ret['result'] = bool(__salt__['docker.resolve_tag'](image))
|
||||
|
||||
if not ret['result']:
|
||||
# This shouldn't happen, failure to pull should be caught above
|
||||
@ -356,23 +358,16 @@ def absent(name=None, images=None, force=False):
|
||||
ret['comment'] = 'One of \'name\' and \'images\' must be provided'
|
||||
return ret
|
||||
elif images is not None:
|
||||
targets = []
|
||||
for target in images:
|
||||
try:
|
||||
targets.append(':'.join(salt.utils.docker.get_repo_tag(target)))
|
||||
except TypeError:
|
||||
# Don't stomp on images with unicode characters in Python 2,
|
||||
# only force image to be a str if it wasn't already (which is
|
||||
# very unlikely).
|
||||
targets.append(':'.join(salt.utils.docker.get_repo_tag(str(target))))
|
||||
targets = images
|
||||
elif name:
|
||||
try:
|
||||
targets = [':'.join(salt.utils.docker.get_repo_tag(name))]
|
||||
except TypeError:
|
||||
targets = [':'.join(salt.utils.docker.get_repo_tag(str(name)))]
|
||||
targets = [name]
|
||||
|
||||
pre_tags = __salt__['docker.list_tags']()
|
||||
to_delete = [x for x in targets if x in pre_tags]
|
||||
to_delete = []
|
||||
for target in targets:
|
||||
resolved_tag = __salt__['docker.resolve_tag'](target, tags=pre_tags)
|
||||
if resolved_tag is not False:
|
||||
to_delete.append(resolved_tag)
|
||||
log.debug('targets = {0}'.format(targets))
|
||||
log.debug('to_delete = {0}'.format(to_delete))
|
||||
|
||||
|
@ -34,6 +34,7 @@ from __future__ import absolute_import
|
||||
import logging
|
||||
|
||||
# Import salt libs
|
||||
from salt.ext import six
|
||||
import salt.utils
|
||||
|
||||
# Enable proper logging
|
||||
@ -56,6 +57,9 @@ def __virtual__():
|
||||
def present(name,
|
||||
driver=None,
|
||||
driver_opts=None,
|
||||
gateway=None,
|
||||
ip_range=None,
|
||||
subnet=None,
|
||||
containers=None):
|
||||
'''
|
||||
Ensure that a network is present.
|
||||
@ -69,9 +73,18 @@ def present(name,
|
||||
driver_opts
|
||||
Options for the network driver.
|
||||
|
||||
gateway
|
||||
IPv4 or IPv6 gateway for the master subnet
|
||||
|
||||
ip_range
|
||||
Allocate container IP from a sub-range within the subnet
|
||||
|
||||
containers:
|
||||
List of container names that should be part of this network
|
||||
|
||||
subnet:
|
||||
Subnet in CIDR format that represents a network segment
|
||||
|
||||
Usage Examples:
|
||||
|
||||
.. code-block:: yaml
|
||||
@ -91,6 +104,18 @@ def present(name,
|
||||
- cont1
|
||||
- cont2
|
||||
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
network_baz:
|
||||
docker_network.present
|
||||
- name: baz
|
||||
- driver_opts:
|
||||
- parent: eth0
|
||||
- gateway: "172.20.0.1"
|
||||
- ip_range: "172.20.0.128/25"
|
||||
- subnet: "172.20.0.0/24"
|
||||
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
@ -100,10 +125,10 @@ def present(name,
|
||||
if salt.utils.is_dictlist(driver_opts):
|
||||
driver_opts = salt.utils.repack_dictlist(driver_opts)
|
||||
|
||||
if containers is None:
|
||||
containers = []
|
||||
# map containers to container's Ids.
|
||||
containers = [__salt__['docker.inspect_container'](c)['Id'] for c in containers]
|
||||
# If any containers are specified, get details of each one, we need the Id and Name fields later
|
||||
if containers is not None:
|
||||
containers = [__salt__['docker.inspect_container'](c) for c in containers]
|
||||
|
||||
networks = __salt__['docker.networks'](names=[name])
|
||||
log.trace(
|
||||
'docker_network.present: current networks: {0}'.format(networks)
|
||||
@ -118,24 +143,99 @@ def present(name,
|
||||
network = network_iter
|
||||
break
|
||||
|
||||
if network is not None:
|
||||
if all(c in network['Containers'] for c in containers):
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'Network \'{0}\' already exists.'.format(name)
|
||||
return ret
|
||||
result = True
|
||||
for container in containers:
|
||||
if container not in network['Containers']:
|
||||
try:
|
||||
ret['changes']['connected'] = __salt__['docker.connect_container_to_network'](
|
||||
container, name)
|
||||
except Exception as exc:
|
||||
ret['comment'] = ('Failed to connect container \'{0}\' to network \'{1}\' {2}'.format(
|
||||
container, name, exc))
|
||||
result = False
|
||||
ret['result'] = result
|
||||
# We might disconnect containers in the process of recreating the network, we'll need to keep track these containers
|
||||
# so we can reconnect them later.
|
||||
containers_disconnected = {}
|
||||
|
||||
# If the network already exists
|
||||
if network is not None:
|
||||
log.debug('Network \'{0}\' already exists'.format(name))
|
||||
|
||||
# Set the comment now to say that it already exists, if we need to recreate the network with new config we'll
|
||||
# update the comment later.
|
||||
ret['comment'] = 'Network \'{0}\' already exists'.format(name)
|
||||
|
||||
# Update network details with result from network inspect, which will contain details of any containers
|
||||
# attached to the network.
|
||||
network = __salt__['docker.inspect_network'](network_id=network['Id'])
|
||||
|
||||
log.trace('Details of \'{0}\' network: {1}'.format(name, network))
|
||||
|
||||
# For the IPAM and driver config options which can be passed, check that if they are passed, they match the
|
||||
# current configuration.
|
||||
original_config = {}
|
||||
new_config = {}
|
||||
|
||||
if driver and driver != network['Driver']:
|
||||
new_config['driver'] = driver
|
||||
original_config['driver'] = network['Driver']
|
||||
|
||||
if driver_opts and driver_opts != network['Options']:
|
||||
new_config['driver_opts'] = driver_opts
|
||||
original_config['driver_opts'] = network['Options']
|
||||
|
||||
# Multiple IPAM configs is probably not that common so for now we'll only worry about the simple case where
|
||||
# there's a single IPAM config. If there's more than one (or none at all) then we'll bail out.
|
||||
if len(network['IPAM']['Config']) != 1:
|
||||
ret['comment'] = ('docker_network.present does only supports Docker networks with a single IPAM config,'
|
||||
'network \'{0}\' has {1}'.format(name, len(network['IPAM']['Config'])))
|
||||
return ret
|
||||
|
||||
ipam = network['IPAM']['Config'][0]
|
||||
|
||||
if gateway and gateway != ipam['Gateway']:
|
||||
new_config['gateway'] = gateway
|
||||
original_config['gateway'] = ipam['Gateway']
|
||||
|
||||
if subnet and subnet != ipam['Subnet']:
|
||||
new_config['subnet'] = subnet
|
||||
original_config['subnet'] = ipam['Subnet']
|
||||
|
||||
if ip_range:
|
||||
# IPRange isn't always configured so check it's even set before attempting to compare it.
|
||||
if 'IPRange' in ipam and ip_range != ipam['IPRange']:
|
||||
new_config['ip_range'] = ip_range
|
||||
original_config['ip_range'] = ipam['IPRange']
|
||||
elif 'IPRange' not in ipam:
|
||||
new_config['ip_range'] = ip_range
|
||||
original_config['ip_range'] = ''
|
||||
|
||||
if new_config != original_config:
|
||||
log.debug('New config is different to current;\nnew: {0}\ncurrent: {1}'.format(new_config, original_config))
|
||||
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['comment'] = 'Network {0} will be recreated with new config'.format(name)
|
||||
return ret
|
||||
|
||||
remove_result = _remove_network(name, network['Containers'])
|
||||
if not remove_result['result']:
|
||||
return remove_result
|
||||
|
||||
# We've removed the network, so there are now no containers attached to it.
|
||||
if network['Containers']:
|
||||
containers_disconnected = network['Containers']
|
||||
network['Containers'] = []
|
||||
|
||||
try:
|
||||
__salt__['docker.create_network'](
|
||||
name,
|
||||
driver=driver,
|
||||
driver_opts=driver_opts,
|
||||
gateway=gateway,
|
||||
ip_range=ip_range,
|
||||
subnet=subnet)
|
||||
except Exception as exc:
|
||||
ret['comment'] = ('Failed to replace network \'{0}\': {1}'
|
||||
.format(name, exc))
|
||||
return ret
|
||||
|
||||
ret['changes']['updated'] = {name: {'old': original_config, 'new': new_config}}
|
||||
ret['comment'] = 'Network \'{0}\' was replaced with updated config'.format(name)
|
||||
|
||||
# If the network does not yet exist, we create it
|
||||
else:
|
||||
log.debug('The network \'{0}\' will be created'.format(name))
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['comment'] = ('The network \'{0}\' will be created'.format(name))
|
||||
@ -144,22 +244,78 @@ def present(name,
|
||||
ret['changes']['created'] = __salt__['docker.create_network'](
|
||||
name,
|
||||
driver=driver,
|
||||
driver_opts=driver_opts)
|
||||
driver_opts=driver_opts,
|
||||
gateway=gateway,
|
||||
ip_range=ip_range,
|
||||
subnet=subnet)
|
||||
|
||||
except Exception as exc:
|
||||
ret['comment'] = ('Failed to create network \'{0}\': {1}'
|
||||
.format(name, exc))
|
||||
else:
|
||||
result = True
|
||||
for container in containers:
|
||||
try:
|
||||
ret['changes']['connected'] = __salt__['docker.connect_container_to_network'](
|
||||
container, name)
|
||||
except Exception as exc:
|
||||
ret['comment'] = ('Failed to connect container \'{0}\' to network \'{1}\' {2}'.format(
|
||||
container, name, exc))
|
||||
result = False
|
||||
ret['result'] = result
|
||||
return ret
|
||||
|
||||
# Finally, figure out the list of containers which should now be connected.
|
||||
containers_to_connect = {}
|
||||
# If no containers were specified in the state but we have disconnected some in the process of recreating the
|
||||
# network, we should reconnect those containers.
|
||||
if containers is None and containers_disconnected:
|
||||
containers_to_connect = containers_disconnected
|
||||
# If containers were specified in the state, regardless of what we've disconnected, we should now just connect
|
||||
# the containers specified.
|
||||
elif containers:
|
||||
for container in containers:
|
||||
containers_to_connect[container['Id']] = container
|
||||
|
||||
if network is None:
|
||||
network = {'Containers': {}}
|
||||
|
||||
# At this point, if all the containers we want connected are already connected to the network, we can set our
|
||||
# result and finish.
|
||||
if all(c in network['Containers'] for c in containers_to_connect):
|
||||
ret['result'] = True
|
||||
return ret
|
||||
|
||||
# If we've not exited by this point it's because we have containers which we need to connect to the network.
|
||||
result = True
|
||||
reconnected_containers = []
|
||||
connected_containers = []
|
||||
for container_id, container in six.iteritems(containers_to_connect):
|
||||
if container_id not in network['Containers']:
|
||||
try:
|
||||
connect_result = __salt__['docker.connect_container_to_network'](container_id, name)
|
||||
log.trace(
|
||||
'docker.connect_container_to_network({0}, {1}) result: {2}'.
|
||||
format(container, name, connect_result)
|
||||
)
|
||||
# If this container was one we disconnected earlier, add it to the reconnected list.
|
||||
if container_id in containers_disconnected:
|
||||
reconnected_containers.append(container['Name'])
|
||||
# Otherwise add it to the connected list.
|
||||
else:
|
||||
connected_containers.append(container['Name'])
|
||||
|
||||
except Exception as exc:
|
||||
ret['comment'] = ('Failed to connect container \'{0}\' to network \'{1}\' {2}'.format(
|
||||
container['Name'], name, exc))
|
||||
result = False
|
||||
|
||||
# If we populated any of our container lists then add them to our list of changes.
|
||||
if connected_containers:
|
||||
ret['changes']['connected'] = connected_containers
|
||||
if reconnected_containers:
|
||||
ret['changes']['reconnected'] = reconnected_containers
|
||||
|
||||
# Figure out if we removed any containers as a result of replacing the network and then not re-connecting the
|
||||
# containers, because they weren't specified in the state.
|
||||
disconnected_containers = []
|
||||
for container_id, container in six.iteritems(containers_disconnected):
|
||||
if container_id not in containers_to_connect:
|
||||
disconnected_containers.append(container['Name'])
|
||||
|
||||
if disconnected_containers:
|
||||
ret['changes']['disconnected'] = disconnected_containers
|
||||
|
||||
ret['result'] = result
|
||||
return ret
|
||||
|
||||
|
||||
@ -207,16 +363,32 @@ def absent(name, driver=None):
|
||||
ret['comment'] = ('The network \'{0}\' will be removed'.format(name))
|
||||
return ret
|
||||
|
||||
for container in networks[0]['Containers']:
|
||||
return _remove_network(network=name, containers=networks[0]['Containers'])
|
||||
|
||||
|
||||
def _remove_network(network, containers=None):
|
||||
'''
|
||||
Remove network, removing any specified containers from it beforehand
|
||||
'''
|
||||
|
||||
ret = {'name': network,
|
||||
'changes': {},
|
||||
'result': False,
|
||||
'comment': ''}
|
||||
|
||||
if containers is None:
|
||||
containers = []
|
||||
for container in containers:
|
||||
try:
|
||||
ret['changes']['disconnected'] = __salt__['docker.disconnect_container_from_network'](container, name)
|
||||
ret['changes']['disconnected'] = __salt__['docker.disconnect_container_from_network'](container, network)
|
||||
except Exception as exc:
|
||||
ret['comment'] = ('Failed to disconnect container \'{0}\' to network \'{1}\' {2}'.format(
|
||||
container, name, exc))
|
||||
ret['comment'] = ('Failed to disconnect container \'{0}\' from network \'{1}\' {2}'.format(
|
||||
container, network, exc))
|
||||
try:
|
||||
ret['changes']['removed'] = __salt__['docker.remove_network'](name)
|
||||
ret['changes']['removed'] = __salt__['docker.remove_network'](network)
|
||||
ret['result'] = True
|
||||
except Exception as exc:
|
||||
ret['comment'] = ('Failed to remove network \'{0}\': {1}'
|
||||
.format(name, exc))
|
||||
.format(network, exc))
|
||||
|
||||
return ret
|
||||
|
@ -627,7 +627,11 @@ def _clean_dir(root, keep, exclude_pat):
|
||||
while True:
|
||||
fn_ = os.path.dirname(fn_)
|
||||
real_keep.add(fn_)
|
||||
if fn_ in ['/', ''.join([os.path.splitdrive(fn_)[0], '\\\\'])]:
|
||||
if fn_ in [
|
||||
os.sep,
|
||||
''.join([os.path.splitdrive(fn_)[0], os.sep]),
|
||||
''.join([os.path.splitdrive(fn_)[0], os.sep, os.sep])
|
||||
]:
|
||||
break
|
||||
|
||||
def _delete_not_kept(nfn):
|
||||
@ -1407,7 +1411,8 @@ def symlink(
|
||||
return ret
|
||||
|
||||
|
||||
def absent(name):
|
||||
def absent(name,
|
||||
**kwargs):
|
||||
'''
|
||||
Make sure that the named file or directory is absent. If it exists, it will
|
||||
be deleted. This will work to reverse any of the functions in the file
|
||||
@ -1469,7 +1474,8 @@ def absent(name):
|
||||
return ret
|
||||
|
||||
|
||||
def exists(name):
|
||||
def exists(name,
|
||||
**kwargs):
|
||||
'''
|
||||
Verify that the named file or directory is present or exists.
|
||||
Ensures pre-requisites outside of Salt's purview
|
||||
@ -1495,7 +1501,8 @@ def exists(name):
|
||||
return ret
|
||||
|
||||
|
||||
def missing(name):
|
||||
def missing(name,
|
||||
**kwargs):
|
||||
'''
|
||||
Verify that the named file or directory is missing, this returns True only
|
||||
if the named file is missing but does not remove the file if it is present.
|
||||
|
@ -1303,6 +1303,23 @@ def latest(name,
|
||||
'if it does not already exist).',
|
||||
comments
|
||||
)
|
||||
remote_tags = set([
|
||||
x.replace('refs/tags/', '') for x in __salt__['git.ls_remote'](
|
||||
cwd=target,
|
||||
remote=remote,
|
||||
opts="--tags",
|
||||
user=user,
|
||||
password=password,
|
||||
identity=identity,
|
||||
saltenv=__env__,
|
||||
ignore_retcode=True,
|
||||
).keys() if '^{}' not in x
|
||||
])
|
||||
if set(all_local_tags) != remote_tags:
|
||||
has_remote_rev = False
|
||||
ret['changes']['new_tags'] = list(remote_tags.symmetric_difference(
|
||||
all_local_tags
|
||||
))
|
||||
|
||||
if not has_remote_rev:
|
||||
try:
|
||||
@ -2238,13 +2255,18 @@ def detached(name,
|
||||
|
||||
local_commit_id = _get_local_rev_and_branch(target, user, password)[0]
|
||||
|
||||
if remote_rev_type is 'hash' \
|
||||
and __salt__['git.describe'](target,
|
||||
rev,
|
||||
user=user,
|
||||
password=password):
|
||||
# The rev is a hash and it exists locally so skip to checkout
|
||||
hash_exists_locally = True
|
||||
if remote_rev_type is 'hash':
|
||||
try:
|
||||
__salt__['git.describe'](target,
|
||||
rev,
|
||||
user=user,
|
||||
password=password,
|
||||
ignore_retcode=True)
|
||||
except CommandExecutionError:
|
||||
hash_exists_locally = False
|
||||
else:
|
||||
# The rev is a hash and it exists locally so skip to checkout
|
||||
hash_exists_locally = True
|
||||
else:
|
||||
# Check that remote is present and set to correct url
|
||||
remotes = __salt__['git.remotes'](target,
|
||||
|
@ -42,7 +42,7 @@ import sys
|
||||
from salt.ext import six
|
||||
|
||||
# Import Salt libs
|
||||
import salt.utils
|
||||
import salt.utils.platform
|
||||
import salt.utils.win_functions
|
||||
|
||||
|
||||
@ -61,7 +61,7 @@ def _changes(name,
|
||||
|
||||
# User and Domain names are not case sensitive in Windows. Let's make them
|
||||
# all lower case so we can compare properly
|
||||
if salt.utils.is_windows():
|
||||
if salt.utils.platform.is_windows():
|
||||
if lgrp['members']:
|
||||
lgrp['members'] = [user.lower() for user in lgrp['members']]
|
||||
if members:
|
||||
|
@ -194,7 +194,6 @@ at some point be deprecated in favor of a more generic ``firewall`` state.
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
from salt.state import STATE_INTERNAL_KEYWORDS as _STATE_INTERNAL_KEYWORDS
|
||||
|
||||
|
||||
@ -810,7 +809,7 @@ def mod_aggregate(low, chunks, running):
|
||||
if low.get('fun') not in agg_enabled:
|
||||
return low
|
||||
for chunk in chunks:
|
||||
tag = salt.utils.gen_state_tag(chunk)
|
||||
tag = __utils__['state.gen_tag'](chunk)
|
||||
if tag in running:
|
||||
# Already ran the iptables state, skip aggregation
|
||||
continue
|
||||
|
@ -268,7 +268,7 @@ def lv_present(name,
|
||||
else:
|
||||
lvpath = '/dev/{0}/{1}'.format(vgname, name)
|
||||
|
||||
if __salt__['lvm.lvdisplay'](lvpath):
|
||||
if __salt__['lvm.lvdisplay'](lvpath, quiet=True):
|
||||
ret['comment'] = 'Logical Volume {0} already present'.format(name)
|
||||
elif __opts__['test']:
|
||||
ret['comment'] = 'Logical Volume {0} is set to be created'.format(name)
|
||||
|
@ -75,7 +75,8 @@ def mounted(name,
|
||||
extra_mount_invisible_keys=None,
|
||||
extra_mount_ignore_fs_keys=None,
|
||||
extra_mount_translate_options=None,
|
||||
hidden_opts=None):
|
||||
hidden_opts=None,
|
||||
**kwargs):
|
||||
'''
|
||||
Verify that a device is mounted
|
||||
|
||||
@ -698,7 +699,8 @@ def unmounted(name,
|
||||
device=None,
|
||||
config='/etc/fstab',
|
||||
persist=False,
|
||||
user=None):
|
||||
user=None,
|
||||
**kwargs):
|
||||
'''
|
||||
.. versionadded:: 0.17.0
|
||||
|
||||
|
@ -92,6 +92,13 @@ def managed(name,
|
||||
Use certain profiles to generate the config.
|
||||
If not specified, will use the platform default profile(s).
|
||||
|
||||
compliance_report: ``False``
|
||||
Return the compliance report in the comment.
|
||||
The compliance report structured object can be found however
|
||||
in the ``pchanges`` field of the output (not displayed on the CLI).
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
|
||||
test: ``False``
|
||||
Dry run? If set as ``True``, will apply the config, discard
|
||||
and return the changes. Default: ``False`` and will commit
|
||||
@ -140,6 +147,7 @@ def managed(name,
|
||||
debug = kwargs.get('debug', False) or __opts__.get('debug', False)
|
||||
commit = kwargs.get('commit', True) or __opts__.get('commit', True)
|
||||
replace = kwargs.get('replace', False) or __opts__.get('replace', False)
|
||||
return_compliance_report = kwargs.get('compliance_report', False) or __opts__.get('compliance_report', False)
|
||||
profiles = kwargs.get('profiles', [])
|
||||
temp_file = __salt__['temp.file']()
|
||||
log.debug('Creating temp file: {0}'.format(temp_file))
|
||||
@ -180,7 +188,13 @@ def managed(name,
|
||||
log.debug('Loaded config result:')
|
||||
log.debug(loaded_changes)
|
||||
__salt__['file.remove'](temp_file)
|
||||
return salt.utils.napalm.loaded_ret(ret, loaded_changes, test, debug)
|
||||
loaded_changes['compliance_report'] = compliance_report
|
||||
return salt.utils.napalm.loaded_ret(ret,
|
||||
loaded_changes,
|
||||
test,
|
||||
debug,
|
||||
opts=__opts__,
|
||||
compliance_report=return_compliance_report)
|
||||
|
||||
|
||||
def configured(name,
|
||||
|
@ -81,7 +81,6 @@ import os
|
||||
import re
|
||||
|
||||
# Import Salt libs
|
||||
import salt.utils # Can be removed once gen_state_tag is moved
|
||||
import salt.utils.pkg
|
||||
import salt.utils.platform
|
||||
import salt.utils.versions
|
||||
@ -3071,7 +3070,7 @@ def mod_aggregate(low, chunks, running):
|
||||
if low.get('fun') not in agg_enabled:
|
||||
return low
|
||||
for chunk in chunks:
|
||||
tag = salt.utils.gen_state_tag(chunk)
|
||||
tag = __utils__['state.gen_tag'](chunk)
|
||||
if tag in running:
|
||||
# Already ran the pkg state, skip aggregation
|
||||
continue
|
||||
|
@ -97,6 +97,7 @@ import salt.utils
|
||||
import salt.utils.files
|
||||
import salt.utils.pkg.deb
|
||||
import salt.utils.pkg.rpm
|
||||
import salt.utils.versions
|
||||
|
||||
|
||||
def __virtual__():
|
||||
@ -132,7 +133,7 @@ def managed(name, ppa=None, **kwargs):
|
||||
|
||||
disabled : False
|
||||
Included to reduce confusion due to APT's use of the ``disabled``
|
||||
argument. If this is passed for a yum/dnf/zypper-based distro, then the
|
||||
argument. If this is passed for a YUM/DNF/Zypper-based distro, then the
|
||||
reverse will be passed as ``enabled``. For example passing
|
||||
``disabled=True`` will assume ``enabled=False``.
|
||||
|
||||
@ -151,7 +152,7 @@ def managed(name, ppa=None, **kwargs):
|
||||
enabled configuration. Anything supplied for this list will be saved
|
||||
in the repo configuration with a comment marker (#) in front.
|
||||
|
||||
Additional configuration values seen in yum repo files, such as ``gpgkey`` or
|
||||
Additional configuration values seen in repo files, such as ``gpgkey`` or
|
||||
``gpgcheck``, will be used directly as key-value pairs. For example:
|
||||
|
||||
.. code-block:: yaml
|
||||
@ -258,29 +259,45 @@ def managed(name, ppa=None, **kwargs):
|
||||
|
||||
Use either ``keyid``/``keyserver`` or ``key_url``, but not both.
|
||||
|
||||
consolidate
|
||||
If set to true, this will consolidate all sources definitions to
|
||||
the sources.list file, cleanup the now unused files, consolidate
|
||||
components (e.g. main) for the same URI, type, and architecture
|
||||
to a single line, and finally remove comments from the sources.list
|
||||
file. The consolidate will run every time the state is processed. The
|
||||
option only needs to be set on one repo managed by salt to take effect.
|
||||
consolidate : False
|
||||
If set to ``True``, this will consolidate all sources definitions to the
|
||||
sources.list file, cleanup the now unused files, consolidate components
|
||||
(e.g. main) for the same URI, type, and architecture to a single line,
|
||||
and finally remove comments from the sources.list file. The consolidate
|
||||
will run every time the state is processed. The option only needs to be
|
||||
set on one repo managed by salt to take effect.
|
||||
|
||||
clean_file
|
||||
If set to true, empty file before config repo, dangerous if use
|
||||
multiple sources in one file.
|
||||
clean_file : False
|
||||
If set to ``True``, empty the file before config repo
|
||||
|
||||
.. note::
|
||||
Use with care. This can be dangerous if multiple sources are
|
||||
configured in the same file.
|
||||
|
||||
.. versionadded:: 2015.8.0
|
||||
|
||||
refresh_db
|
||||
If set to false this will skip refreshing the apt package database on
|
||||
debian based systems.
|
||||
refresh : True
|
||||
If set to ``False`` this will skip refreshing the apt package database
|
||||
on debian based systems.
|
||||
|
||||
refresh_db : True
|
||||
.. deprecated:: Oxygen
|
||||
Use ``refresh`` instead.
|
||||
|
||||
require_in
|
||||
Set this to a list of pkg.installed or pkg.latest to trigger the
|
||||
running of apt-get update prior to attempting to install these
|
||||
packages. Setting a require in the pkg will not work for this.
|
||||
packages. Setting a require in the pkg state will not work for this.
|
||||
'''
|
||||
if 'refresh_db' in kwargs:
|
||||
salt.utils.versions.warn_until(
|
||||
'Neon',
|
||||
'The \'refresh_db\' argument to \'pkg.mod_repo\' has been '
|
||||
'renamed to \'refresh\'. Support for using \'refresh_db\' will be '
|
||||
'removed in the Neon release of Salt.'
|
||||
)
|
||||
kwargs['refresh'] = kwargs.pop('refresh_db')
|
||||
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': None,
|
||||
|
@ -30,7 +30,6 @@ import time
|
||||
|
||||
# Import salt libs
|
||||
import salt.syspaths
|
||||
import salt.utils # Can be removed once check_state_result is moved
|
||||
import salt.utils.event
|
||||
import salt.utils.versions
|
||||
from salt.ext import six
|
||||
@ -83,7 +82,8 @@ def state(name,
|
||||
batch=None,
|
||||
queue=False,
|
||||
subset=None,
|
||||
orchestration_jid=None):
|
||||
orchestration_jid=None,
|
||||
**kwargs):
|
||||
'''
|
||||
Invoke a state run on a given target
|
||||
|
||||
@ -341,7 +341,7 @@ def state(name,
|
||||
except KeyError:
|
||||
m_state = False
|
||||
if m_state:
|
||||
m_state = salt.utils.check_state_result(m_ret, recurse=True)
|
||||
m_state = __utils__['state.check_result'](m_ret, recurse=True)
|
||||
|
||||
if not m_state:
|
||||
if minion not in fail_minions:
|
||||
|
@ -481,7 +481,6 @@ def container_setting(name, container, settings=None):
|
||||
:param str container: The type of IIS container. The container types are:
|
||||
AppPools, Sites, SslBindings
|
||||
:param str settings: A dictionary of the setting names and their values.
|
||||
|
||||
Example of usage for the ``AppPools`` container:
|
||||
|
||||
.. code-block:: yaml
|
||||
@ -510,6 +509,8 @@ def container_setting(name, container, settings=None):
|
||||
logFile.period: Daily
|
||||
limits.maxUrlSegments: 32
|
||||
'''
|
||||
|
||||
identityType_map2string = {0: 'LocalSystem', 1: 'LocalService', 2: 'NetworkService', 3: 'SpecificUser', 4: 'ApplicationPoolIdentity'}
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'comment': str(),
|
||||
@ -529,6 +530,10 @@ def container_setting(name, container, settings=None):
|
||||
container=container,
|
||||
settings=settings.keys())
|
||||
for setting in settings:
|
||||
# map identity type from numeric to string for comparing
|
||||
if setting == 'processModel.identityType' and settings[setting] in identityType_map2string.keys():
|
||||
settings[setting] = identityType_map2string[settings[setting]]
|
||||
|
||||
if str(settings[setting]) != str(current_settings[setting]):
|
||||
ret_settings['changes'][setting] = {'old': current_settings[setting],
|
||||
'new': settings[setting]}
|
||||
@ -541,8 +546,8 @@ def container_setting(name, container, settings=None):
|
||||
ret['changes'] = ret_settings
|
||||
return ret
|
||||
|
||||
__salt__['win_iis.set_container_setting'](name=name, container=container,
|
||||
settings=settings)
|
||||
__salt__['win_iis.set_container_setting'](name=name, container=container, settings=settings)
|
||||
|
||||
new_settings = __salt__['win_iis.get_container_setting'](name=name,
|
||||
container=container,
|
||||
settings=settings.keys())
|
||||
|
@ -1,6 +1,11 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Some of the utils used by salt
|
||||
|
||||
NOTE: The dev team is working on splitting up this file for the Oxygen release.
|
||||
Please do not add any new functions to this file. New functions should be
|
||||
organized in other files under salt/utils/. Please consult the dev team if you
|
||||
are unsure where a new function should go.
|
||||
'''
|
||||
|
||||
# Import python libs
|
||||
@ -14,9 +19,7 @@ import fnmatch
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
import numbers
|
||||
import os
|
||||
import posixpath
|
||||
import random
|
||||
import re
|
||||
import shlex
|
||||
@ -24,10 +27,8 @@ import shutil
|
||||
import socket
|
||||
import sys
|
||||
import pstats
|
||||
import tempfile
|
||||
import time
|
||||
import types
|
||||
import warnings
|
||||
import string
|
||||
import subprocess
|
||||
import getpass
|
||||
@ -35,7 +36,6 @@ import getpass
|
||||
# Import 3rd-party libs
|
||||
from salt.ext import six
|
||||
# pylint: disable=import-error
|
||||
from salt.ext.six.moves.urllib.parse import urlparse # pylint: disable=no-name-in-module
|
||||
# pylint: disable=redefined-builtin
|
||||
from salt.ext.six.moves import range
|
||||
from salt.ext.six.moves import zip
|
||||
@ -121,7 +121,6 @@ import salt.utils.dictupdate
|
||||
import salt.utils.versions
|
||||
import salt.version
|
||||
from salt.utils.decorators.jinja import jinja_filter
|
||||
from salt.textformat import TextFormat
|
||||
from salt.exceptions import (
|
||||
CommandExecutionError, SaltClientError,
|
||||
CommandNotFoundError, SaltSystemExit,
|
||||
@ -130,84 +129,6 @@ from salt.exceptions import (
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
_empty = object()
|
||||
|
||||
|
||||
def get_color_theme(theme):
|
||||
'''
|
||||
Return the color theme to use
|
||||
'''
|
||||
# Keep the heavy lifting out of the module space
|
||||
import yaml
|
||||
if not os.path.isfile(theme):
|
||||
log.warning('The named theme {0} if not available'.format(theme))
|
||||
|
||||
# Late import to avoid circular import.
|
||||
import salt.utils.files
|
||||
try:
|
||||
with salt.utils.files.fopen(theme, 'rb') as fp_:
|
||||
colors = yaml.safe_load(fp_.read())
|
||||
ret = {}
|
||||
for color in colors:
|
||||
ret[color] = '\033[{0}m'.format(colors[color])
|
||||
if not isinstance(colors, dict):
|
||||
log.warning('The theme file {0} is not a dict'.format(theme))
|
||||
return {}
|
||||
return ret
|
||||
except Exception:
|
||||
log.warning('Failed to read the color theme {0}'.format(theme))
|
||||
return {}
|
||||
|
||||
|
||||
def get_colors(use=True, theme=None):
|
||||
'''
|
||||
Return the colors as an easy to use dict. Pass `False` to deactivate all
|
||||
colors by setting them to empty strings. Pass a string containing only the
|
||||
name of a single color to be used in place of all colors. Examples:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
colors = get_colors() # enable all colors
|
||||
no_colors = get_colors(False) # disable all colors
|
||||
red_colors = get_colors('RED') # set all colors to red
|
||||
'''
|
||||
|
||||
colors = {
|
||||
'BLACK': TextFormat('black'),
|
||||
'DARK_GRAY': TextFormat('bold', 'black'),
|
||||
'RED': TextFormat('red'),
|
||||
'LIGHT_RED': TextFormat('bold', 'red'),
|
||||
'GREEN': TextFormat('green'),
|
||||
'LIGHT_GREEN': TextFormat('bold', 'green'),
|
||||
'YELLOW': TextFormat('yellow'),
|
||||
'LIGHT_YELLOW': TextFormat('bold', 'yellow'),
|
||||
'BLUE': TextFormat('blue'),
|
||||
'LIGHT_BLUE': TextFormat('bold', 'blue'),
|
||||
'MAGENTA': TextFormat('magenta'),
|
||||
'LIGHT_MAGENTA': TextFormat('bold', 'magenta'),
|
||||
'CYAN': TextFormat('cyan'),
|
||||
'LIGHT_CYAN': TextFormat('bold', 'cyan'),
|
||||
'LIGHT_GRAY': TextFormat('white'),
|
||||
'WHITE': TextFormat('bold', 'white'),
|
||||
'DEFAULT_COLOR': TextFormat('default'),
|
||||
'ENDC': TextFormat('reset'),
|
||||
}
|
||||
if theme:
|
||||
colors.update(get_color_theme(theme))
|
||||
|
||||
if not use:
|
||||
for color in colors:
|
||||
colors[color] = ''
|
||||
if isinstance(use, six.string_types):
|
||||
# Try to set all of the colors to the passed color
|
||||
if use in colors:
|
||||
for color in colors:
|
||||
# except for color reset
|
||||
if color == 'ENDC':
|
||||
continue
|
||||
colors[color] = colors[use]
|
||||
|
||||
return colors
|
||||
|
||||
|
||||
def get_context(template, line, num_lines=5, marker=None):
|
||||
@ -1449,148 +1370,6 @@ def check_include_exclude(path_str, include_pat=None, exclude_pat=None):
|
||||
return ret
|
||||
|
||||
|
||||
def gen_state_tag(low):
|
||||
'''
|
||||
Generate the running dict tag string from the low data structure
|
||||
'''
|
||||
return '{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}'.format(low)
|
||||
|
||||
|
||||
def search_onfail_requisites(sid, highstate):
|
||||
"""
|
||||
For a particular low chunk, search relevant onfail related
|
||||
states
|
||||
"""
|
||||
onfails = []
|
||||
if '_|-' in sid:
|
||||
st = salt.state.split_low_tag(sid)
|
||||
else:
|
||||
st = {'__id__': sid}
|
||||
for fstate, fchunks in six.iteritems(highstate):
|
||||
if fstate == st['__id__']:
|
||||
continue
|
||||
else:
|
||||
for mod_, fchunk in six.iteritems(fchunks):
|
||||
if (
|
||||
not isinstance(mod_, six.string_types) or
|
||||
mod_.startswith('__')
|
||||
):
|
||||
continue
|
||||
else:
|
||||
if not isinstance(fchunk, list):
|
||||
continue
|
||||
else:
|
||||
# bydefault onfail will fail, but you can
|
||||
# set onfail_stop: False to prevent the highstate
|
||||
# to stop if you handle it
|
||||
onfail_handled = False
|
||||
for fdata in fchunk:
|
||||
if not isinstance(fdata, dict):
|
||||
continue
|
||||
onfail_handled = (fdata.get('onfail_stop', True)
|
||||
is False)
|
||||
if onfail_handled:
|
||||
break
|
||||
if not onfail_handled:
|
||||
continue
|
||||
for fdata in fchunk:
|
||||
if not isinstance(fdata, dict):
|
||||
continue
|
||||
for knob, fvalue in six.iteritems(fdata):
|
||||
if knob != 'onfail':
|
||||
continue
|
||||
for freqs in fvalue:
|
||||
for fmod, fid in six.iteritems(freqs):
|
||||
if not (
|
||||
fid == st['__id__'] and
|
||||
fmod == st.get('state', fmod)
|
||||
):
|
||||
continue
|
||||
onfails.append((fstate, mod_, fchunk))
|
||||
return onfails
|
||||
|
||||
|
||||
def check_onfail_requisites(state_id, state_result, running, highstate):
|
||||
'''
|
||||
When a state fail and is part of a highstate, check
|
||||
if there is onfail requisites.
|
||||
When we find onfail requisites, we will consider the state failed
|
||||
only if at least one of those onfail requisites also failed
|
||||
|
||||
Returns:
|
||||
|
||||
True: if onfail handlers suceeded
|
||||
False: if one on those handler failed
|
||||
None: if the state does not have onfail requisites
|
||||
|
||||
'''
|
||||
nret = None
|
||||
if (
|
||||
state_id and state_result and
|
||||
highstate and isinstance(highstate, dict)
|
||||
):
|
||||
onfails = search_onfail_requisites(state_id, highstate)
|
||||
if onfails:
|
||||
for handler in onfails:
|
||||
fstate, mod_, fchunk = handler
|
||||
ofresult = True
|
||||
for rstateid, rstate in six.iteritems(running):
|
||||
if '_|-' in rstateid:
|
||||
st = salt.state.split_low_tag(rstateid)
|
||||
# in case of simple state, try to guess
|
||||
else:
|
||||
id_ = rstate.get('__id__', rstateid)
|
||||
if not id_:
|
||||
raise ValueError('no state id')
|
||||
st = {'__id__': id_, 'state': mod_}
|
||||
if mod_ == st['state'] and fstate == st['__id__']:
|
||||
ofresult = rstate.get('result', _empty)
|
||||
if ofresult in [False, True]:
|
||||
nret = ofresult
|
||||
if ofresult is False:
|
||||
# as soon as we find an errored onfail, we stop
|
||||
break
|
||||
# consider that if we parsed onfailes without changing
|
||||
# the ret, that we have failed
|
||||
if nret is None:
|
||||
nret = False
|
||||
return nret
|
||||
|
||||
|
||||
def check_state_result(running, recurse=False, highstate=None):
|
||||
'''
|
||||
Check the total return value of the run and determine if the running
|
||||
dict has any issues
|
||||
'''
|
||||
if not isinstance(running, dict):
|
||||
return False
|
||||
|
||||
if not running:
|
||||
return False
|
||||
|
||||
ret = True
|
||||
for state_id, state_result in six.iteritems(running):
|
||||
if not recurse and not isinstance(state_result, dict):
|
||||
ret = False
|
||||
if ret and isinstance(state_result, dict):
|
||||
result = state_result.get('result', _empty)
|
||||
if result is False:
|
||||
ret = False
|
||||
# only override return value if we are not already failed
|
||||
elif result is _empty and isinstance(state_result, dict) and ret:
|
||||
ret = check_state_result(
|
||||
state_result, recurse=True, highstate=highstate)
|
||||
# if we detect a fail, check for onfail requisites
|
||||
if not ret:
|
||||
# ret can be None in case of no onfail reqs, recast it to bool
|
||||
ret = bool(check_onfail_requisites(state_id, state_result,
|
||||
running, highstate))
|
||||
# return as soon as we got a failure
|
||||
if not ret:
|
||||
break
|
||||
return ret
|
||||
|
||||
|
||||
def st_mode_to_octal(mode):
|
||||
'''
|
||||
Convert the st_mode value from a stat(2) call (as returned from os.stat())
|
||||
@ -3430,3 +3209,135 @@ def kwargs_warn_until(kwargs,
|
||||
stacklevel=stacklevel,
|
||||
_version_info_=_version_info_,
|
||||
_dont_call_warnings=_dont_call_warnings)
|
||||
|
||||
|
||||
def get_color_theme(theme):
|
||||
'''
|
||||
Return the color theme to use
|
||||
|
||||
.. deprecated:: Oxygen
|
||||
'''
|
||||
# Late import to avoid circular import.
|
||||
import salt.utils.color
|
||||
import salt.utils.versions
|
||||
|
||||
salt.utils.versions.warn_until(
|
||||
'Neon',
|
||||
'Use of \'salt.utils.get_color_theme\' detected. This function has '
|
||||
'been moved to \'salt.utils.color.get_color_theme\' as of Salt '
|
||||
'Oxygen. This warning will be removed in Salt Neon.'
|
||||
)
|
||||
return salt.utils.color.get_color_theme(theme)
|
||||
|
||||
|
||||
def get_colors(use=True, theme=None):
|
||||
'''
|
||||
Return the colors as an easy to use dict. Pass `False` to deactivate all
|
||||
colors by setting them to empty strings. Pass a string containing only the
|
||||
name of a single color to be used in place of all colors. Examples:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
colors = get_colors() # enable all colors
|
||||
no_colors = get_colors(False) # disable all colors
|
||||
red_colors = get_colors('RED') # set all colors to red
|
||||
|
||||
.. deprecated:: Oxygen
|
||||
'''
|
||||
# Late import to avoid circular import.
|
||||
import salt.utils.color
|
||||
import salt.utils.versions
|
||||
|
||||
salt.utils.versions.warn_until(
|
||||
'Neon',
|
||||
'Use of \'salt.utils.get_colors\' detected. This function has '
|
||||
'been moved to \'salt.utils.color.get_colors\' as of Salt '
|
||||
'Oxygen. This warning will be removed in Salt Neon.'
|
||||
)
|
||||
return salt.utils.color.get_colors(use=use, theme=theme)
|
||||
|
||||
|
||||
def gen_state_tag(low):
|
||||
'''
|
||||
Generate the running dict tag string from the low data structure
|
||||
|
||||
.. deprecated:: Oxygen
|
||||
'''
|
||||
# Late import to avoid circular import.
|
||||
import salt.utils.versions
|
||||
import salt.utils.state
|
||||
salt.utils.versions.warn_until(
|
||||
'Neon',
|
||||
'Use of \'salt.utils.gen_state_tag\' detected. This function has been '
|
||||
'moved to \'salt.utils.state.gen_tag\' as of Salt Oxygen. This warning '
|
||||
'will be removed in Salt Neon.'
|
||||
)
|
||||
return salt.utils.state.gen_tag(low)
|
||||
|
||||
|
||||
def search_onfail_requisites(sid, highstate):
|
||||
'''
|
||||
For a particular low chunk, search relevant onfail related states
|
||||
|
||||
.. deprecated:: Oxygen
|
||||
'''
|
||||
# Late import to avoid circular import.
|
||||
import salt.utils.versions
|
||||
import salt.utils.state
|
||||
salt.utils.versions.warn_until(
|
||||
'Neon',
|
||||
'Use of \'salt.utils.search_onfail_requisites\' detected. This function '
|
||||
'has been moved to \'salt.utils.state.search_onfail_requisites\' as of '
|
||||
'Salt Oxygen. This warning will be removed in Salt Neon.'
|
||||
)
|
||||
return salt.utils.state.search_onfail_requisites(sid, highstate)
|
||||
|
||||
|
||||
def check_onfail_requisites(state_id, state_result, running, highstate):
|
||||
'''
|
||||
When a state fail and is part of a highstate, check
|
||||
if there is onfail requisites.
|
||||
When we find onfail requisites, we will consider the state failed
|
||||
only if at least one of those onfail requisites also failed
|
||||
|
||||
Returns:
|
||||
|
||||
True: if onfail handlers suceeded
|
||||
False: if one on those handler failed
|
||||
None: if the state does not have onfail requisites
|
||||
|
||||
.. deprecated:: Oxygen
|
||||
'''
|
||||
# Late import to avoid circular import.
|
||||
import salt.utils.versions
|
||||
import salt.utils.state
|
||||
salt.utils.versions.warn_until(
|
||||
'Neon',
|
||||
'Use of \'salt.utils.check_onfail_requisites\' detected. This function '
|
||||
'has been moved to \'salt.utils.state.check_onfail_requisites\' as of '
|
||||
'Salt Oxygen. This warning will be removed in Salt Neon.'
|
||||
)
|
||||
return salt.utils.state.check_onfail_requisites(
|
||||
state_id, state_result, running, highstate
|
||||
)
|
||||
|
||||
|
||||
def check_state_result(running, recurse=False, highstate=None):
|
||||
'''
|
||||
Check the total return value of the run and determine if the running
|
||||
dict has any issues
|
||||
|
||||
.. deprecated:: Oxygen
|
||||
'''
|
||||
# Late import to avoid circular import.
|
||||
import salt.utils.versions
|
||||
import salt.utils.state
|
||||
salt.utils.versions.warn_until(
|
||||
'Neon',
|
||||
'Use of \'salt.utils.check_state_result\' detected. This function '
|
||||
'has been moved to \'salt.utils.state.check_result\' as of '
|
||||
'Salt Oxygen. This warning will be removed in Salt Neon.'
|
||||
)
|
||||
return salt.utils.state.check_result(
|
||||
running, recurse=recurse, highstate=highstate
|
||||
)
|
||||
|
@ -392,7 +392,7 @@ def query(params=None, setname=None, requesturl=None, location=None,
|
||||
service_url = prov_dict.get('service_url', 'amazonaws.com')
|
||||
|
||||
if not location:
|
||||
location = get_location(opts, provider)
|
||||
location = get_location(opts, prov_dict)
|
||||
|
||||
if endpoint is None:
|
||||
if not requesturl:
|
||||
|
@ -292,12 +292,14 @@ def salt_config_to_yaml(configuration, line_break='\n'):
|
||||
Dumper=SafeOrderedDumper)
|
||||
|
||||
|
||||
def bootstrap(vm_, opts):
|
||||
def bootstrap(vm_, opts=None):
|
||||
'''
|
||||
This is the primary entry point for logging into any system (POSIX or
|
||||
Windows) to install Salt. It will make the decision on its own as to which
|
||||
deploy function to call.
|
||||
'''
|
||||
if opts is None:
|
||||
opts = __opts__
|
||||
deploy_config = salt.config.get_cloud_config_value(
|
||||
'deploy',
|
||||
vm_, opts, default=False)
|
||||
|
92
salt/utils/color.py
Normal file
92
salt/utils/color.py
Normal file
@ -0,0 +1,92 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Functions used for CLI color themes.
|
||||
'''
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
import os
|
||||
|
||||
# Import Salt libs
|
||||
from salt.ext import six
|
||||
from salt.textformat import TextFormat
|
||||
import salt.utils.files
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_color_theme(theme):
|
||||
'''
|
||||
Return the color theme to use
|
||||
'''
|
||||
# Keep the heavy lifting out of the module space
|
||||
import yaml
|
||||
if not os.path.isfile(theme):
|
||||
log.warning('The named theme {0} if not available'.format(theme))
|
||||
|
||||
try:
|
||||
with salt.utils.files.fopen(theme, 'rb') as fp_:
|
||||
colors = yaml.safe_load(fp_.read())
|
||||
ret = {}
|
||||
for color in colors:
|
||||
ret[color] = '\033[{0}m'.format(colors[color])
|
||||
if not isinstance(colors, dict):
|
||||
log.warning('The theme file {0} is not a dict'.format(theme))
|
||||
return {}
|
||||
return ret
|
||||
except Exception:
|
||||
log.warning('Failed to read the color theme {0}'.format(theme))
|
||||
return {}
|
||||
|
||||
|
||||
def get_colors(use=True, theme=None):
|
||||
'''
|
||||
Return the colors as an easy to use dict. Pass `False` to deactivate all
|
||||
colors by setting them to empty strings. Pass a string containing only the
|
||||
name of a single color to be used in place of all colors. Examples:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
colors = get_colors() # enable all colors
|
||||
no_colors = get_colors(False) # disable all colors
|
||||
red_colors = get_colors('RED') # set all colors to red
|
||||
|
||||
'''
|
||||
|
||||
colors = {
|
||||
'BLACK': TextFormat('black'),
|
||||
'DARK_GRAY': TextFormat('bold', 'black'),
|
||||
'RED': TextFormat('red'),
|
||||
'LIGHT_RED': TextFormat('bold', 'red'),
|
||||
'GREEN': TextFormat('green'),
|
||||
'LIGHT_GREEN': TextFormat('bold', 'green'),
|
||||
'YELLOW': TextFormat('yellow'),
|
||||
'LIGHT_YELLOW': TextFormat('bold', 'yellow'),
|
||||
'BLUE': TextFormat('blue'),
|
||||
'LIGHT_BLUE': TextFormat('bold', 'blue'),
|
||||
'MAGENTA': TextFormat('magenta'),
|
||||
'LIGHT_MAGENTA': TextFormat('bold', 'magenta'),
|
||||
'CYAN': TextFormat('cyan'),
|
||||
'LIGHT_CYAN': TextFormat('bold', 'cyan'),
|
||||
'LIGHT_GRAY': TextFormat('white'),
|
||||
'WHITE': TextFormat('bold', 'white'),
|
||||
'DEFAULT_COLOR': TextFormat('default'),
|
||||
'ENDC': TextFormat('reset'),
|
||||
}
|
||||
if theme:
|
||||
colors.update(get_color_theme(theme))
|
||||
|
||||
if not use:
|
||||
for color in colors:
|
||||
colors[color] = ''
|
||||
if isinstance(use, six.string_types):
|
||||
# Try to set all of the colors to the passed color
|
||||
if use in colors:
|
||||
for color in colors:
|
||||
# except for color reset
|
||||
if color == 'ENDC':
|
||||
continue
|
||||
colors[color] = colors[use]
|
||||
|
||||
return colors
|
@ -7,11 +7,13 @@ import contextlib
|
||||
import errno
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import stat
|
||||
import subprocess
|
||||
import tempfile
|
||||
import time
|
||||
import urllib
|
||||
|
||||
# Import Salt libs
|
||||
import salt.utils # Can be removed when backup_minion is moved
|
||||
@ -460,3 +462,39 @@ def is_fcntl_available(check_sunos=False):
|
||||
if check_sunos and salt.utils.platform.is_sunos():
|
||||
return False
|
||||
return HAS_FCNTL
|
||||
|
||||
|
||||
def safe_filename_leaf(file_basename):
|
||||
'''
|
||||
Input the basename of a file, without the directory tree, and returns a safe name to use
|
||||
i.e. only the required characters are converted by urllib.quote
|
||||
If the input is a PY2 String, output a PY2 String. If input is Unicode output Unicode.
|
||||
For consistency all platforms are treated the same. Hard coded to utf8 as its ascii compatible
|
||||
windows is \\ / : * ? " < > | posix is /
|
||||
|
||||
.. versionadded:: 2017.7.2
|
||||
'''
|
||||
def _replace(re_obj):
|
||||
return urllib.quote(re_obj.group(0), safe=u'')
|
||||
if not isinstance(file_basename, six.text_type):
|
||||
# the following string is not prefixed with u
|
||||
return re.sub('[\\\\:/*?"<>|]',
|
||||
_replace,
|
||||
six.text_type(file_basename, 'utf8').encode('ascii', 'backslashreplace'))
|
||||
# the following string is prefixed with u
|
||||
return re.sub(u'[\\\\:/*?"<>|]', _replace, file_basename, flags=re.UNICODE)
|
||||
|
||||
|
||||
def safe_filepath(file_path_name):
|
||||
'''
|
||||
Input the full path and filename, splits on directory separator and calls safe_filename_leaf for
|
||||
each part of the path.
|
||||
|
||||
.. versionadded:: 2017.7.2
|
||||
'''
|
||||
(drive, path) = os.path.splitdrive(file_path_name)
|
||||
path = os.sep.join([safe_filename_leaf(file_section) for file_section in file_path_name.rsplit(os.sep)])
|
||||
if drive:
|
||||
return os.sep.join([drive, path])
|
||||
else:
|
||||
return path
|
||||
|
@ -2894,18 +2894,6 @@ class GitPillar(GitBase):
|
||||
return False
|
||||
return True
|
||||
|
||||
def update(self):
|
||||
'''
|
||||
Execute a git fetch on all of the repos. In this case, simply execute
|
||||
self.fetch_remotes() from the parent class.
|
||||
|
||||
This function only exists to make the git_pillar update code in
|
||||
master.py (salt.master.Maintenance.handle_git_pillar) less complicated,
|
||||
once the legacy git_pillar code is purged we can remove this function
|
||||
and just run pillar.fetch_remotes() there.
|
||||
'''
|
||||
return self.fetch_remotes()
|
||||
|
||||
|
||||
class WinRepo(GitBase):
|
||||
'''
|
||||
|
@ -723,6 +723,13 @@ class CkMinions(object):
|
||||
'''
|
||||
Read in the form and determine which auth check routine to execute
|
||||
'''
|
||||
# This function is only called from salt.auth.Authorize(), which is also
|
||||
# deprecated and will be removed in Neon.
|
||||
salt.utils.versions.warn_until(
|
||||
'Neon',
|
||||
'The \'any_auth\' function has been deprecated. Support for this '
|
||||
'function will be removed in Salt {version}.'
|
||||
)
|
||||
if form == 'publish':
|
||||
return self.auth_check(
|
||||
auth_list,
|
||||
|
@ -22,6 +22,7 @@ import importlib
|
||||
from functools import wraps
|
||||
|
||||
# Import Salt libs
|
||||
import salt.output
|
||||
import salt.utils.platform
|
||||
|
||||
# Import 3rd-party libs
|
||||
@ -432,58 +433,58 @@ def default_ret(name):
|
||||
return ret
|
||||
|
||||
|
||||
def loaded_ret(ret, loaded, test, debug):
|
||||
def loaded_ret(ret, loaded, test, debug, compliance_report=False, opts=None):
|
||||
'''
|
||||
Return the final state output.
|
||||
|
||||
ret
|
||||
The initial state output structure.
|
||||
|
||||
loaded
|
||||
The loaded dictionary.
|
||||
'''
|
||||
# Always get the comment
|
||||
ret.update({
|
||||
'comment': loaded.get('comment', '')
|
||||
})
|
||||
changes = {}
|
||||
pchanges = {}
|
||||
ret['comment'] = loaded['comment']
|
||||
if 'diff' in loaded:
|
||||
changes['diff'] = loaded['diff']
|
||||
pchanges['diff'] = loaded['diff']
|
||||
if 'compliance_report' in loaded:
|
||||
if compliance_report:
|
||||
changes['compliance_report'] = loaded['compliance_report']
|
||||
pchanges['compliance_report'] = loaded['compliance_report']
|
||||
if debug and 'loaded_config' in loaded:
|
||||
changes['loaded_config'] = loaded['loaded_config']
|
||||
pchanges['loaded_config'] = loaded['loaded_config']
|
||||
ret['pchanges'] = pchanges
|
||||
if changes.get('diff'):
|
||||
ret['comment'] = '{comment_base}\n\nConfiguration diff:\n\n{diff}'.format(comment_base=ret['comment'],
|
||||
diff=changes['diff'])
|
||||
if changes.get('loaded_config'):
|
||||
ret['comment'] = '{comment_base}\n\nLoaded config:\n\n{loaded_cfg}'.format(
|
||||
comment_base=ret['comment'],
|
||||
loaded_cfg=changes['loaded_config'])
|
||||
if changes.get('compliance_report'):
|
||||
ret['comment'] = '{comment_base}\n\nCompliance report:\n\n{compliance}'.format(
|
||||
comment_base=ret['comment'],
|
||||
compliance=salt.output.string_format(changes['compliance_report'], 'nested', opts=opts))
|
||||
if not loaded.get('result', False):
|
||||
# Failure of some sort
|
||||
return ret
|
||||
if debug:
|
||||
# Always check for debug
|
||||
pchanges.update({
|
||||
'loaded_config': loaded.get('loaded_config', '')
|
||||
})
|
||||
ret.update({
|
||||
"pchanges": pchanges
|
||||
})
|
||||
if not loaded.get('already_configured', True):
|
||||
# We're making changes
|
||||
pchanges.update({
|
||||
"diff": loaded.get('diff', '')
|
||||
})
|
||||
ret.update({
|
||||
'pchanges': pchanges
|
||||
})
|
||||
if test:
|
||||
for k, v in pchanges.items():
|
||||
ret.update({
|
||||
"comment": "{}:\n{}\n\n{}".format(k, v, ret.get("comment", ''))
|
||||
})
|
||||
ret.update({
|
||||
'result': None,
|
||||
})
|
||||
ret['result'] = None
|
||||
return ret
|
||||
# Not test, changes were applied
|
||||
ret.update({
|
||||
'result': True,
|
||||
'changes': pchanges,
|
||||
'comment': "Configuration changed!\n{}".format(ret.get('comment', ''))
|
||||
'changes': changes,
|
||||
'comment': "Configuration changed!\n{}".format(loaded['comment'])
|
||||
})
|
||||
return ret
|
||||
# No changes
|
||||
ret.update({
|
||||
'result': True
|
||||
'result': True,
|
||||
'changes': {}
|
||||
})
|
||||
return ret
|
||||
|
@ -290,8 +290,9 @@ class SaltNova(object):
|
||||
self.session = keystoneauth1.session.Session(auth=options, verify=verify)
|
||||
conn = client.Client(version=self.version, session=self.session, **self.client_kwargs)
|
||||
self.kwargs['auth_token'] = conn.client.session.get_token()
|
||||
self.catalog = conn.client.session.get('/auth/catalog', endpoint_filter={'service_type': 'identity'}).json().get('catalog', [])
|
||||
if conn.client.get_endpoint(service_type='identity').endswith('v3'):
|
||||
identity_service_type = kwargs.get('identity_service_type', 'identity')
|
||||
self.catalog = conn.client.session.get('/auth/catalog', endpoint_filter={'service_type': identity_service_type}).json().get('catalog', [])
|
||||
if conn.client.get_endpoint(service_type=identity_service_type).endswith('v3'):
|
||||
self._v3_setup(region_name)
|
||||
else:
|
||||
self._v2_setup(region_name)
|
||||
|
@ -104,8 +104,12 @@ def parse_pkginfo(line, osarch=None):
|
||||
if epoch not in ('(none)', '0'):
|
||||
version = ':'.join((epoch, version))
|
||||
|
||||
install_date = datetime.datetime.utcfromtimestamp(int(install_time)).isoformat() + "Z"
|
||||
install_date_time_t = int(install_time)
|
||||
if install_time not in ('(none)', '0'):
|
||||
install_date = datetime.datetime.utcfromtimestamp(int(install_time)).isoformat() + "Z"
|
||||
install_date_time_t = int(install_time)
|
||||
else:
|
||||
install_date = None
|
||||
install_date_time_t = None
|
||||
|
||||
return pkginfo(name, version, arch, repoid, install_date, install_date_time_t)
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user