Merge branch 'develop' of github.com:saltstack/salt into develop

This commit is contained in:
C. R. Oldham 2014-11-04 15:44:16 -07:00
commit 3505ca2cc8
168 changed files with 3913 additions and 1626 deletions

1
.gitignore vendored
View File

@ -40,6 +40,7 @@ htmlcov/
/.idea /.idea
/.ropeproject /.ropeproject
.ropeproject .ropeproject
*_flymake.py
/*.iml /*.iml
*.sublime-project *.sublime-project
*.sublime-workspace *.sublime-workspace

View File

@ -4844,3 +4844,27 @@ source_file = _build/locale/topics/releases/2014.1.11.pot
source_lang = en source_lang = en
source_name = topics/releases/2014.1.11.rst source_name = topics/releases/2014.1.11.rst
[salt.ref--cli--salt-unity]
file_filter = locale/<lang>/LC_MESSAGES/ref/cli/salt-unity.po
source_file = _build/locale/ref/cli/salt-unity.pot
source_lang = en
source_name = ref/cli/salt-unity.rst
[salt.topics--development--architecture]
file_filter = locale/<lang>/LC_MESSAGES/topics/development/architecture.po
source_file = _build/locale/topics/development/architecture.pot
source_lang = en
source_name = topics/development/architecture.rst
[salt.topics--releases--2014_1_12]
file_filter = locale/<lang>/LC_MESSAGES/topics/releases/2014.1.12.po
source_file = _build/locale/topics/releases/2014.1.12.pot
source_lang = en
source_name = topics/releases/2014.1.12.rst
[salt.topics--releases--2014_1_13]
file_filter = locale/<lang>/LC_MESSAGES/topics/releases/2014.1.13.po
source_file = _build/locale/topics/releases/2014.1.13.pot
source_lang = en
source_name = topics/releases/2014.1.13.rst

View File

@ -10,5 +10,5 @@
<p>Latest Salt release: <a href="{{ pathto('topics/releases/{0}'.format(release)) }}">{{ release }}</a></p> <p>Latest Salt release: <a href="{{ pathto('topics/releases/{0}'.format(release)) }}">{{ release }}</a></p>
<p>Try the shiny new release candidate of Salt, <p>Try the shiny new release candidate of Salt,
<a href="{{ pathto('topics/releases/2014.7.0') }}">v2014.7.0rc6</a>! More info <a href="{{ pathto('topics/releases/2014.7.0') }}">v2014.7.0rc7</a>! More info
<a href="{{ pathto('topics/releases/releasecandidate') }}">here</a>.</p> <a href="{{ pathto('topics/releases/releasecandidate') }}">here</a>.</p>

View File

@ -240,6 +240,9 @@ distro the minion is running, in case they differ from the example below.
Windows Windows
******* *******
For Windows machines, restarting the minion at can be accomplished by
adding the following state:
.. code-block:: yaml .. code-block:: yaml
schedule-start: schedule-start:

View File

@ -39,15 +39,15 @@ Options
.. option:: --async .. option:: --async
Instead of waiting for the job to run on minions only print the jod id of Instead of waiting for the job to run on minions only print the job id of
the started execution and complete. the started execution and complete.
.. option:: --state-output=STATE_OUTPUT .. option:: --state-output=STATE_OUTPUT
.. versionadded:: 0.17 .. versionadded:: 0.17
Override the configured state_output value for minion output. Default: Choose the format of the state output. The options are `full`,
full `terse`, `mixed`, `changes` and `filter`. Default: full
.. option:: --subset=SUBSET .. option:: --subset=SUBSET
@ -75,7 +75,9 @@ Options
.. option:: -a EAUTH, --auth=EAUTH .. option:: -a EAUTH, --auth=EAUTH
Pass in an external authentication medium to validate against. The Pass in an external authentication medium to validate against. The
credentials will be prompted for. Can be used with the -T option. credentials will be prompted for. The options are `auto`,
`keystone`, `ldap`, `pam` and `stormpath`. Can be used with the -T
option.
.. option:: -T, --make-token .. option:: -T, --make-token
@ -85,9 +87,13 @@ Options
.. option:: --return=RETURNER .. option:: --return=RETURNER
Chose an alternative returner to call on the minion, if an alternative Choose an alternative returner to call on the minion, if an
returner is used then the return will not come back to the command line alternative returner is used then the return will not come back to
but will be sent to the specified return system. the command line but will be sent to the specified return system.
The options are `carbon`, `cassandra`, `couchbase`, `couchdb`,
`elasticsearch`, `etcd`, `hipchat`, `local`, `local_cache`,
`memcache`, `mongo`, `mysql`, `odbc`, `postgres`, `redis`,
`sentry`, `slack`, `sms`, `smtp`, `sqlite3`, `syslog` and `xmpp`.
.. option:: -d, --doc, --documentation .. option:: -d, --doc, --documentation

View File

@ -202,11 +202,11 @@ a state that has not yet been executed. The state containing the ``prereq``
requisite is defined as the pre-requiring state. The state specified in the requisite is defined as the pre-requiring state. The state specified in the
``prereq`` statement is defined as the pre-required state. ``prereq`` statement is defined as the pre-required state.
When ``prereq`` is called, the pre-required state reports if it expects to When a ``prereq`` requisite is evaluated, the pre-required state reports if it
have any changes. It does this by running the pre-required single state as a expects to have any changes. It does this by running the pre-required single
test-run by enabling ``test=True``. This test-run will return a dictionary state as a test-run by enabling ``test=True``. This test-run will return a
containing a key named "changes". (See the ``watch`` section above for dictionary containing a key named "changes". (See the ``watch`` section above
examples of "changes" dictionaries.) for examples of "changes" dictionaries.)
If the "changes" key contains a populated dictionary, it means that the If the "changes" key contains a populated dictionary, it means that the
pre-required state expects changes to occur when the state is actually pre-required state expects changes to occur when the state is actually

View File

@ -315,8 +315,7 @@ different from the base must be specified of the alternates:
'python': 'dev-python/mysql-python', 'python': 'dev-python/mysql-python',
}, },
}, },
merge=salt['pillar.get']('mysql:lookup'), merge=salt['pillar.get']('mysql:lookup'), base=default) %}
base=default) %}
Overriding values in the lookup table Overriding values in the lookup table
@ -338,6 +337,26 @@ Pillar would replace the ``config`` value from the call above.
lookup: lookup:
config: /usr/local/etc/mysql/my.cnf config: /usr/local/etc/mysql/my.cnf
.. note:: Protecting Expansion of Content with Special Characters
When templating keep in mind that YAML does have special characters
for quoting, flows and other special structure and content. When a
Jinja substitution may have special characters that will be
incorrectly parsed by YAML the expansion must be protected by quoting.
It is a good policy to quote all Jinja expansions especially when
values may originate from Pillar. Salt provides a Jinja filter for
doing just this: ``yaml_dquote``
.. code-block:: jinja
{%- set baz = '"The quick brown fox . . ."' %}
{%- set zap = 'The word of the day is "salty".' %}
{%- load_yaml as foo %}
bar: {{ baz|yaml_dquote }}
zip: {{ zap|yaml_dquote }}
{%- endload %}
Single-purpose SLS files Single-purpose SLS files
------------------------ ------------------------

Binary file not shown.

View File

@ -107,6 +107,10 @@ try:
PAM_AUTHENTICATE = LIBPAM.pam_authenticate PAM_AUTHENTICATE = LIBPAM.pam_authenticate
PAM_AUTHENTICATE.restype = c_int PAM_AUTHENTICATE.restype = c_int
PAM_AUTHENTICATE.argtypes = [PamHandle, c_int] PAM_AUTHENTICATE.argtypes = [PamHandle, c_int]
PAM_END = LIBPAM.pam_end
PAM_END.restype = c_int
PAM_END.argtypes = [PamHandle, c_int]
except Exception: except Exception:
HAS_PAM = False HAS_PAM = False
else: else:
@ -155,9 +159,11 @@ def authenticate(username, password, service='login'):
if retval != 0: if retval != 0:
# TODO: This is not an authentication error, something # TODO: This is not an authentication error, something
# has gone wrong starting up PAM # has gone wrong starting up PAM
PAM_END(handle, retval)
return False return False
retval = PAM_AUTHENTICATE(handle, 0) retval = PAM_AUTHENTICATE(handle, 0)
PAM_END(handle, 0)
return retval == 0 return retval == 0

91
salt/auth/yubico.py Normal file
View File

@ -0,0 +1,91 @@
# -*- coding: utf-8 -*-
'''
Provide authentication using YubiKey
To get your YubiKey API key you will need to visit the website below.
https://upgrade.yubico.com/getapikey/
The resulting page will show the generated Client ID (aka AuthID or API ID)
and the generated API key (Secret Key). Make a note of both and use these
two values in your /etc/salt/master configuration.
/etc/salt/master
.. code-block:: yaml
yubico_users:
damian:
id: 12345
key: ABCDEFGHIJKLMNOPQRSTUVWXYZ
.. code-block:: yaml
external_auth:
yubico:
damian:
- test.*
Please wait five to ten minutes after generating the key before testing so that
the API key will be updated on all the YubiCloud servers.
:depends: - yubico-client Python module
'''
import logging
log = logging.getLogger(__name__)
try:
from yubico_client import Yubico, yubico_exceptions
HAS_YUBICO = True
except ImportError:
HAS_YUBICO = False
def __get_yubico_users(username):
'''
Grab the YubiKey Client ID & Secret Key
'''
user = {}
try:
if __opts__['yubico_users'].get(username, None):
(user['id'], user['key']) = __opts__['yubico_users'][username].values()
else:
return None
except KeyError:
return None
return user
def auth(username, password):
'''
Authentcate against yubico server
'''
_cred = __get_yubico_users(username)
client = Yubico(_cred['id'], _cred['key'])
try:
if client.verify(password):
return True
else:
return False
except yubico_exceptions.StatusCodeError, e:
log.info('Unable to verify YubiKey `{0}`'.format(e))
return False
return False
if __name__ == '__main__':
__opts__ = {'yubico_users': {'damian': {'id': '12345', 'key': 'ABC123'}}}
if auth('damian', 'OPT'):
print "Authenticated"
else:
print "Failed to authenticate"

View File

@ -8,6 +8,7 @@ from __future__ import print_function
import logging import logging
import os import os
import sys import sys
from glob import glob
# Import salt libs # Import salt libs
import salt.cli.caller import salt.cli.caller
@ -20,6 +21,7 @@ import salt.output
import salt.runner import salt.runner
import salt.auth import salt.auth
import salt.key import salt.key
from salt.config import _expand_glob_path
from salt.utils import parsers, print_cli from salt.utils import parsers, print_cli
from salt.utils.verify import check_user, verify_env, verify_files from salt.utils.verify import check_user, verify_env, verify_files
@ -167,7 +169,6 @@ class SaltCMD(parsers.SaltCMDOptionParser):
retcodes = [] retcodes = []
try: try:
# local will be None when there was an error # local will be None when there was an error
errors = []
if local: if local:
if self.options.subset: if self.options.subset:
cmd_func = local.cmd_subset cmd_func = local.cmd_subset
@ -193,20 +194,16 @@ class SaltCMD(parsers.SaltCMDOptionParser):
kwargs['verbose'] = True kwargs['verbose'] = True
ret = {} ret = {}
for full_ret in cmd_func(**kwargs): for full_ret in cmd_func(**kwargs):
try:
ret_, out, retcode = self._format_ret(full_ret) ret_, out, retcode = self._format_ret(full_ret)
retcodes.append(retcode) retcodes.append(retcode)
self._output_ret(ret_, out) self._output_ret(ret_, out)
ret.update(ret_) ret.update(ret_)
except KeyError:
errors.append(full_ret)
# Returns summary # Returns summary
if self.config['cli_summary'] is True: if self.config['cli_summary'] is True:
if self.config['fun'] != 'sys.doc': if self.config['fun'] != 'sys.doc':
if self.options.output is None: if self.options.output is None:
self._print_returns_summary(ret) self._print_returns_summary(ret)
self._print_errors_summary(errors)
# NOTE: Return code is set here based on if all minions # NOTE: Return code is set here based on if all minions
# returned 'ok' with a retcode of 0. # returned 'ok' with a retcode of 0.
@ -220,15 +217,6 @@ class SaltCMD(parsers.SaltCMDOptionParser):
out = '' out = ''
self._output_ret(ret, out) self._output_ret(ret, out)
def _print_errors_summary(self, errors):
if errors:
print_cli('\n')
print_cli('---------------------------')
print_cli('Errors')
print_cli('---------------------------')
for minion in errors:
print_cli(self._format_error(minion))
def _print_returns_summary(self, ret): def _print_returns_summary(self, ret):
''' '''
Display returns summary Display returns summary
@ -281,12 +269,6 @@ class SaltCMD(parsers.SaltCMDOptionParser):
retcode = data['retcode'] retcode = data['retcode']
return ret, out, retcode return ret, out, retcode
def _format_error(self, minion_error):
for minion, error_doc in minion_error.items():
error = 'Minion [{0}] encountered exception \'{1}\''.format(minion, error_doc['exception']['message'])
return error
def _print_docs(self, ret): def _print_docs(self, ret):
''' '''
Print out the docstrings for all of the functions on the minions Print out the docstrings for all of the functions on the minions
@ -418,12 +400,12 @@ class SaltCall(parsers.SaltCallOptionParser):
if self.options.file_root: if self.options.file_root:
# check if the argument is pointing to a file on disk # check if the argument is pointing to a file on disk
file_root = os.path.abspath(self.options.file_root) file_root = os.path.abspath(self.options.file_root)
self.config['file_roots'] = {'base': [file_root]} self.config['file_roots'] = {'base': _expand_glob_path([file_root])}
if self.options.pillar_root: if self.options.pillar_root:
# check if the argument is pointing to a file on disk # check if the argument is pointing to a file on disk
pillar_root = os.path.abspath(self.options.pillar_root) pillar_root = os.path.abspath(self.options.pillar_root)
self.config['pillar_roots'] = {'base': [pillar_root]} self.config['pillar_roots'] = {'base': _expand_glob_path([pillar_root])}
if self.options.local: if self.options.local:
self.config['file_client'] = 'local' self.config['file_client'] = 'local'

View File

@ -156,7 +156,7 @@ class Batch(object):
# add all minions that belong to this iterator and # add all minions that belong to this iterator and
# that have not responded to parts{} with an empty response # that have not responded to parts{} with an empty response
for minion in minion_tracker[queue]['minions']: for minion in minion_tracker[queue]['minions']:
if minion not in parts.keys(): if minion not in parts:
parts[minion] = {} parts[minion] = {}
parts[minion]['ret'] = {} parts[minion]['ret'] = {}
@ -180,7 +180,7 @@ class Batch(object):
self.opts) self.opts)
# remove inactive iterators from the iters list # remove inactive iterators from the iters list
for queue in minion_tracker.keys(): for queue in minion_tracker:
# only remove inactive queues # only remove inactive queues
if not minion_tracker[queue]['active'] and queue in iters: if not minion_tracker[queue]['active'] and queue in iters:
iters.remove(queue) iters.remove(queue)

View File

@ -255,8 +255,11 @@ class RAETCaller(ZeroMQCaller):
''' '''
Pass in the command line options Pass in the command line options
''' '''
self.stack = self._setup_caller_stack(opts) stack, estatename, yardname = self._setup_caller_stack(opts)
self.stack = stack
salt.transport.jobber_stack = self.stack salt.transport.jobber_stack = self.stack
#salt.transport.jobber_estate_name = estatename
#salt.transport.jobber_yard_name = yardname
super(RAETCaller, self).__init__(opts) super(RAETCaller, self).__init__(opts)
@ -307,8 +310,8 @@ class RAETCaller(ZeroMQCaller):
raise ValueError(emsg) raise ValueError(emsg)
sockdirpath = opts['sock_dir'] sockdirpath = opts['sock_dir']
name = 'caller' + nacling.uuid(size=18) stackname = 'caller' + nacling.uuid(size=18)
stack = LaneStack(name=name, stack = LaneStack(name=stackname,
lanename=lanename, lanename=lanename,
sockdirpath=sockdirpath) sockdirpath=sockdirpath)
@ -318,4 +321,11 @@ class RAETCaller(ZeroMQCaller):
lanename=lanename, lanename=lanename,
dirpath=sockdirpath)) dirpath=sockdirpath))
log.debug("Created Caller Jobber Stack {0}\n".format(stack.name)) log.debug("Created Caller Jobber Stack {0}\n".format(stack.name))
return stack
# name of Road Estate for this caller
estatename = "{0}_{1}".format(role, kind)
# name of Yard for this caller
yardname = stack.local.name
# return identifiers needed to route back to this callers master
return (stack, estatename, yardname)

View File

@ -768,7 +768,7 @@ class LocalClient(object):
# get the info from the cache # get the info from the cache
ret = self.get_cache_returns(jid) ret = self.get_cache_returns(jid)
if ret != {}: if ret != {}:
found.update(set(ret.keys())) found.update(set(ret))
yield ret yield ret
# if you have all the returns, stop # if you have all the returns, stop
@ -778,7 +778,7 @@ class LocalClient(object):
# otherwise, get them from the event system # otherwise, get them from the event system
for event in event_iter: for event in event_iter:
if event != {}: if event != {}:
found.update(set(event.keys())) found.update(set(event))
yield event yield event
if len(found.intersection(minions)) >= len(minions): if len(found.intersection(minions)) >= len(minions):
raise StopIteration() raise StopIteration()
@ -787,8 +787,7 @@ class LocalClient(object):
def get_returns_no_block( def get_returns_no_block(
self, self,
jid, jid,
event=None, event=None):
gather_errors=False):
''' '''
Raw function to just return events of jid excluding timeout logic Raw function to just return events of jid excluding timeout logic
@ -798,7 +797,6 @@ class LocalClient(object):
event = self.event event = self.event
while True: while True:
if HAS_ZMQ: if HAS_ZMQ:
if not gather_errors:
try: try:
raw = event.get_event_noblock() raw = event.get_event_noblock()
if raw and raw.get('tag', '').startswith(jid): if raw and raw.get('tag', '').startswith(jid):
@ -810,18 +808,6 @@ class LocalClient(object):
yield None yield None
else: else:
raise raise
else:
try:
raw = event.get_event_noblock()
if raw and (raw.get('tag', '').startswith(jid) or raw.get('tag', '').startswith('_salt_error')):
yield raw
else:
yield None
except zmq.ZMQError as ex:
if ex.errno == errno.EAGAIN or ex.errno == errno.EINTR:
yield None
else:
raise
else: else:
raw = event.get_event_noblock() raw = event.get_event_noblock()
if raw and raw.get('tag', '').startswith(jid): if raw and raw.get('tag', '').startswith(jid):
@ -837,7 +823,6 @@ class LocalClient(object):
tgt='*', tgt='*',
tgt_type='glob', tgt_type='glob',
expect_minions=False, expect_minions=False,
gather_errors=True,
**kwargs): **kwargs):
''' '''
Watch the event system and return job data as it comes in Watch the event system and return job data as it comes in
@ -868,7 +853,7 @@ class LocalClient(object):
syndic_wait = 0 syndic_wait = 0
last_time = False last_time = False
# iterator for this job's return # iterator for this job's return
ret_iter = self.get_returns_no_block(jid, gather_errors=gather_errors) ret_iter = self.get_returns_no_block(jid)
# iterator for the info of this job # iterator for the info of this job
jinfo_iter = [] jinfo_iter = []
timeout_at = time.time() + timeout timeout_at = time.time() + timeout
@ -886,10 +871,7 @@ class LocalClient(object):
# if we got None, then there were no events # if we got None, then there were no events
if raw is None: if raw is None:
break break
if gather_errors:
if raw['tag'] == '_salt_error':
ret = {raw['data']['id']: raw['data']['data']}
yield ret
if 'minions' in raw.get('data', {}): if 'minions' in raw.get('data', {}):
minions.update(raw['data']['minions']) minions.update(raw['data']['minions'])
continue continue
@ -1078,7 +1060,7 @@ class LocalClient(object):
ret[minion] = m_data ret[minion] = m_data
# if we have all the minion returns, lets just return # if we have all the minion returns, lets just return
if len(set(ret.keys()).intersection(minions)) >= len(minions): if len(set(ret).intersection(minions)) >= len(minions):
return ret return ret
# otherwise lets use the listener we created above to get the rest # otherwise lets use the listener we created above to get the rest
@ -1094,7 +1076,7 @@ class LocalClient(object):
ret[minion] = m_data ret[minion] = m_data
# are we done yet? # are we done yet?
if len(set(ret.keys()).intersection(minions)) >= len(minions): if len(set(ret).intersection(minions)) >= len(minions):
return ret return ret
# otherwise we hit the timeout, return what we have # otherwise we hit the timeout, return what we have

View File

@ -441,7 +441,7 @@ class SSH(object):
sret = {} sret = {}
outputter = self.opts.get('output', 'nested') outputter = self.opts.get('output', 'nested')
for ret in self.handle_ssh(): for ret in self.handle_ssh():
host = ret.keys()[0] host = ret.iterkeys().next()
self.cache_job(jid, host, ret[host]) self.cache_job(jid, host, ret[host])
ret = self.key_deploy(host, ret) ret = self.key_deploy(host, ret)
if not isinstance(ret[host], dict): if not isinstance(ret[host], dict):

View File

@ -177,7 +177,7 @@ class CloudClient(object):
self.opts['providers'].update({name: {driver: provider}}) self.opts['providers'].update({name: {driver: provider}})
for name, profile in pillars.pop('profiles', {}).items(): for name, profile in pillars.pop('profiles', {}).items():
provider = profile['provider'].split(':')[0] provider = profile['provider'].split(':')[0]
driver = self.opts['providers'][provider].keys()[0] driver = self.opts['providers'][provider].iterkeys().next()
profile['provider'] = '{0}:{1}'.format(provider, driver) profile['provider'] = '{0}:{1}'.format(provider, driver)
profile['profile'] = name profile['profile'] = name
self.opts['profiles'].update({name: profile}) self.opts['profiles'].update({name: profile})
@ -214,7 +214,7 @@ class CloudClient(object):
# also filter them to speedup methods like # also filter them to speedup methods like
# __filter_non_working_providers # __filter_non_working_providers
providers = [a.get('provider', '').split(':')[0] providers = [a.get('provider', '').split(':')[0]
for a in opts['profiles'].values() for a in opts['profiles'].itervalues()
if a.get('provider', '')] if a.get('provider', '')]
if providers: if providers:
_providers = opts.get('providers', {}) _providers = opts.get('providers', {})
@ -367,7 +367,7 @@ class CloudClient(object):
mapper = salt.cloud.Map(self._opts_defaults()) mapper = salt.cloud.Map(self._opts_defaults())
providers = self.opts['providers'] providers = self.opts['providers']
if provider in providers: if provider in providers:
provider += ':{0}'.format(providers[provider].keys()[0]) provider += ':{0}'.format(providers[provider].iterkeys().next())
else: else:
return False return False
if isinstance(names, str): if isinstance(names, str):
@ -400,7 +400,7 @@ class CloudClient(object):
mapper = salt.cloud.Map(self._opts_defaults()) mapper = salt.cloud.Map(self._opts_defaults())
providers = mapper.map_providers_parallel() providers = mapper.map_providers_parallel()
if provider in providers: if provider in providers:
provider += ':{0}'.format(providers[provider].keys()[0]) provider += ':{0}'.format(providers[provider].iterkeys().next())
else: else:
return False return False
if isinstance(names, str): if isinstance(names, str):
@ -1484,7 +1484,7 @@ class Cloud(object):
Remove any mis-configured cloud providers from the available listing Remove any mis-configured cloud providers from the available listing
''' '''
for alias, drivers in self.opts['providers'].copy().iteritems(): for alias, drivers in self.opts['providers'].copy().iteritems():
for driver in drivers.copy().keys(): for driver in drivers.copy():
fun = '{0}.get_configured_provider'.format(driver) fun = '{0}.get_configured_provider'.format(driver)
if fun not in self.clouds: if fun not in self.clouds:
# Mis-configured provider that got removed? # Mis-configured provider that got removed?
@ -1538,7 +1538,7 @@ class Map(Cloud):
interpolated_map = {} interpolated_map = {}
for profile, mapped_vms in rendered_map.items(): for profile, mapped_vms in rendered_map.items():
names = set(mapped_vms.keys()) names = set(mapped_vms)
if profile not in self.opts['profiles']: if profile not in self.opts['profiles']:
if 'Errors' not in interpolated_map: if 'Errors' not in interpolated_map:
interpolated_map['Errors'] = {} interpolated_map['Errors'] = {}
@ -1694,7 +1694,7 @@ class Map(Cloud):
def _has_loop(self, dmap, seen=None, val=None): def _has_loop(self, dmap, seen=None, val=None):
if seen is None: if seen is None:
for values in dmap['create'].values(): for values in dmap['create'].itervalues():
seen = [] seen = []
try: try:
machines = values['requires'] machines = values['requires']
@ -2097,7 +2097,7 @@ class Map(Cloud):
if self.opts['start_action']: if self.opts['start_action']:
actionlist = [] actionlist = []
grp = -1 grp = -1
for key, val in groupby(dmap['create'].values(), for key, val in groupby(dmap['create'].itervalues(),
lambda x: x['level']): lambda x: x['level']):
actionlist.append([]) actionlist.append([])
grp += 1 grp += 1
@ -2117,7 +2117,7 @@ class Map(Cloud):
timeout=self.opts['timeout'] * 60, expr_form='list' timeout=self.opts['timeout'] * 60, expr_form='list'
)) ))
for obj in output_multip: for obj in output_multip:
obj.values()[0]['ret'] = out[obj.keys()[0]] obj.itervalues().next()['ret'] = out[obj.iterkeys().next()]
output.update(obj) output.update(obj)
else: else:
for obj in output_multip: for obj in output_multip:

View File

@ -304,7 +304,7 @@ class SaltCloud(parsers.SaltCloudParser):
log.info('Complete') log.info('Complete')
if dmap.get('existing', None): if dmap.get('existing', None):
for name in dmap['existing'].keys(): for name in dmap['existing']:
ret[name] = {'Message': 'Already running'} ret[name] = {'Message': 'Already running'}
except (SaltCloudException, Exception) as exc: except (SaltCloudException, Exception) as exc:

View File

@ -102,7 +102,7 @@ def avail_locations(call=None):
ret = {} ret = {}
for region in items['Regions']['Region']: for region in items['Regions']['Region']:
ret[region['RegionId']] = {} ret[region['RegionId']] = {}
for item in region.keys(): for item in region:
ret[region['RegionId']][item] = str(region[item]) ret[region['RegionId']][item] = str(region[item])
return ret return ret
@ -133,7 +133,7 @@ def avail_images(kwargs=None, call=None):
ret = {} ret = {}
for image in items['Images']['Image']: for image in items['Images']['Image']:
ret[image['ImageId']] = {} ret[image['ImageId']] = {}
for item in image.keys(): for item in image:
ret[image['ImageId']][item] = str(image[item]) ret[image['ImageId']][item] = str(image[item])
return ret return ret
@ -155,7 +155,7 @@ def avail_sizes(call=None):
ret = {} ret = {}
for image in items['InstanceTypes']['InstanceType']: for image in items['InstanceTypes']['InstanceType']:
ret[image['InstanceTypeId']] = {} ret[image['InstanceTypeId']] = {}
for item in image.keys(): for item in image:
ret[image['InstanceTypeId']][item] = str(image[item]) ret[image['InstanceTypeId']][item] = str(image[item])
return ret return ret
@ -192,7 +192,7 @@ def list_availability_zones(call=None):
for zone in items['Zones']['Zone']: for zone in items['Zones']['Zone']:
ret[zone['ZoneId']] = {} ret[zone['ZoneId']] = {}
for item in zone.keys(): for item in zone:
ret[zone['ZoneId']][item] = str(zone[item]) ret[zone['ZoneId']][item] = str(zone[item])
return ret return ret
@ -225,7 +225,7 @@ def list_nodes_min(call=None):
for node in nodes['InstanceStatuses']['InstanceStatus']: for node in nodes['InstanceStatuses']['InstanceStatus']:
ret[node['InstanceId']] = {} ret[node['InstanceId']] = {}
for item in node.keys(): for item in node:
ret[node['InstanceId']][item] = node[item] ret[node['InstanceId']][item] = node[item]
return ret return ret
@ -299,7 +299,7 @@ def list_nodes_full(call=None):
'size': 'TODO', 'size': 'TODO',
'state': items['Status'] 'state': items['Status']
} }
for item in items.keys(): for item in items:
value = items[item] value = items[item]
if value is not None: if value is not None:
value = str(value) value = str(value)
@ -350,7 +350,7 @@ def list_securitygroup(call=None):
ret = {} ret = {}
for sg in result['SecurityGroups']['SecurityGroup']: for sg in result['SecurityGroups']['SecurityGroup']:
ret[sg['SecurityGroupId']] = {} ret[sg['SecurityGroupId']] = {}
for item in sg.keys(): for item in sg:
ret[sg['SecurityGroupId']][item] = sg[item] ret[sg['SecurityGroupId']][item] = sg[item]
return ret return ret
@ -368,7 +368,7 @@ def get_image(vm_):
if not vm_image: if not vm_image:
raise SaltCloudNotFound('No image specified for this VM.') raise SaltCloudNotFound('No image specified for this VM.')
if vm_image and str(vm_image) in images.keys(): if vm_image and str(vm_image) in images:
return images[vm_image]['ImageId'] return images[vm_image]['ImageId']
raise SaltCloudNotFound( raise SaltCloudNotFound(
'The specified image, {0!r}, could not be found.'.format(vm_image) 'The specified image, {0!r}, could not be found.'.format(vm_image)
@ -387,7 +387,7 @@ def get_securitygroup(vm_):
if not securitygroup: if not securitygroup:
raise SaltCloudNotFound('No securitygroup ID specified for this VM.') raise SaltCloudNotFound('No securitygroup ID specified for this VM.')
if securitygroup and str(securitygroup) in sgs.keys(): if securitygroup and str(securitygroup) in sgs:
return sgs[securitygroup]['SecurityGroupId'] return sgs[securitygroup]['SecurityGroupId']
raise SaltCloudNotFound( raise SaltCloudNotFound(
'The specified security group, {0!r}, could not be found.'.format( 'The specified security group, {0!r}, could not be found.'.format(
@ -407,7 +407,7 @@ def get_size(vm_):
if not vm_size: if not vm_size:
raise SaltCloudNotFound('No size specified for this VM.') raise SaltCloudNotFound('No size specified for this VM.')
if vm_size and str(vm_size) in sizes.keys(): if vm_size and str(vm_size) in sizes:
return sizes[vm_size]['InstanceTypeId'] return sizes[vm_size]['InstanceTypeId']
raise SaltCloudNotFound( raise SaltCloudNotFound(
@ -427,7 +427,7 @@ def __get_location(vm_):
if not vm_location: if not vm_location:
raise SaltCloudNotFound('No location specified for this VM.') raise SaltCloudNotFound('No location specified for this VM.')
if vm_location and str(vm_location) in locations.keys(): if vm_location and str(vm_location) in locations:
return locations[vm_location]['RegionId'] return locations[vm_location]['RegionId']
raise SaltCloudNotFound( raise SaltCloudNotFound(
'The specified location, {0!r}, could not be found.'.format( 'The specified location, {0!r}, could not be found.'.format(
@ -779,7 +779,7 @@ def show_disk(name, call=None):
for disk in items['Disks']['Disk']: for disk in items['Disks']['Disk']:
ret[disk['DiskId']] = {} ret[disk['DiskId']] = {}
for item in disk.keys(): for item in disk:
ret[disk['DiskId']][item] = str(disk[item]) ret[disk['DiskId']][item] = str(disk[item])
return ret return ret
@ -817,7 +817,7 @@ def list_monitor_data(kwargs=None, call=None):
for data in monitorData['InstanceMonitorData']: for data in monitorData['InstanceMonitorData']:
ret[data['InstanceId']] = {} ret[data['InstanceId']] = {}
for item in data.keys(): for item in data:
ret[data['InstanceId']][item] = str(data[item]) ret[data['InstanceId']][item] = str(data[item])
return ret return ret
@ -892,7 +892,7 @@ def show_image(kwargs, call=None):
for image in items['Images']['Image']: for image in items['Images']['Image']:
ret[image['ImageId']] = {} ret[image['ImageId']] = {}
for item in image.keys(): for item in image:
ret[image['ImageId']][item] = str(image[item]) ret[image['ImageId']][item] = str(image[item])
return ret return ret

View File

@ -111,7 +111,7 @@ def __virtual__():
# Let's bring the functions imported from libcloud_aws to the current # Let's bring the functions imported from libcloud_aws to the current
# namespace. # namespace.
keysdiff = set(POST_IMPORT_LOCALS_KEYS.keys()).difference( keysdiff = set(POST_IMPORT_LOCALS_KEYS).difference(
PRE_IMPORT_LOCALS_KEYS PRE_IMPORT_LOCALS_KEYS
) )
for key in keysdiff: for key in keysdiff:

View File

@ -82,7 +82,7 @@ def avail_locations(call=None):
ret = {} ret = {}
for region in items['regions']: for region in items['regions']:
ret[region['name']] = {} ret[region['name']] = {}
for item in region.keys(): for item in region:
ret[region['name']][item] = str(region[item]) ret[region['name']][item] = str(region[item])
return ret return ret
@ -102,7 +102,7 @@ def avail_images(call=None):
ret = {} ret = {}
for image in items['images']: for image in items['images']:
ret[image['id']] = {} ret[image['id']] = {}
for item in image.keys(): for item in image:
ret[image['id']][item] = str(image[item]) ret[image['id']][item] = str(image[item])
return ret return ret
@ -122,7 +122,7 @@ def avail_sizes(call=None):
ret = {} ret = {}
for size in items['sizes']: for size in items['sizes']:
ret[size['name']] = {} ret[size['name']] = {}
for item in size.keys(): for item in size:
ret[size['name']][item] = str(size[item]) ret[size['name']][item] = str(size[item])
return ret return ret
@ -165,7 +165,7 @@ def list_nodes_full(call=None):
ret = {} ret = {}
for node in items['droplets']: for node in items['droplets']:
ret[node['name']] = {} ret[node['name']] = {}
for item in node.keys(): for item in node:
value = node[item] value = node[item]
if value is not None: if value is not None:
value = str(value) value = str(value)
@ -598,7 +598,7 @@ def list_keypairs(call=None):
ret = {} ret = {}
for keypair in items['ssh_keys']: for keypair in items['ssh_keys']:
ret[keypair['name']] = {} ret[keypair['name']] = {}
for item in keypair.keys(): for item in keypair:
ret[keypair['name']][item] = str(keypair[item]) ret[keypair['name']][item] = str(keypair[item])
return ret return ret

View File

@ -226,7 +226,7 @@ def _xml_to_dict(xmltree):
if '}' in name: if '}' in name:
comps = name.split('}') comps = name.split('}')
name = comps[1] name = comps[1]
if name not in xmldict.keys(): if name not in xmldict:
if sys.version_info < (2, 7): if sys.version_info < (2, 7):
children_len = len(item.getchildren()) children_len = len(item.getchildren())
else: else:
@ -329,7 +329,7 @@ def query(params=None, setname=None, requesturl=None, location=None,
params_with_headers['SignatureMethod'] = 'HmacSHA256' params_with_headers['SignatureMethod'] = 'HmacSHA256'
params_with_headers['Timestamp'] = '{0}'.format(timestamp) params_with_headers['Timestamp'] = '{0}'.format(timestamp)
params_with_headers['Version'] = ec2_api_version params_with_headers['Version'] = ec2_api_version
keys = sorted(params_with_headers.keys()) keys = sorted(params_with_headers)
values = map(params_with_headers.get, keys) values = map(params_with_headers.get, keys)
querystring = urllib.urlencode(list(zip(keys, values))) querystring = urllib.urlencode(list(zip(keys, values)))
@ -928,7 +928,7 @@ def get_availability_zone(vm_):
zones = _list_availability_zones() zones = _list_availability_zones()
# Validate user-specified AZ # Validate user-specified AZ
if avz not in zones.keys(): if avz not in zones:
raise SaltCloudException( raise SaltCloudException(
'The specified availability zone isn\'t valid in this region: ' 'The specified availability zone isn\'t valid in this region: '
'{0}\n'.format( '{0}\n'.format(
@ -1987,7 +1987,7 @@ def create(vm_=None, call=None):
'\'tag\' should be a dict.' '\'tag\' should be a dict.'
) )
for value in tags.values(): for value in tags.itervalues():
if not isinstance(value, str): if not isinstance(value, str):
raise SaltCloudConfigError( raise SaltCloudConfigError(
'\'tag\' values must be strings. Try quoting the values. ' '\'tag\' values must be strings. Try quoting the values. '
@ -2663,7 +2663,7 @@ def list_nodes_full(location=None, call=None):
if not location: if not location:
ret = {} ret = {}
locations = set( locations = set(
get_location(vm_) for vm_ in __opts__['profiles'].values() get_location(vm_) for vm_ in __opts__['profiles'].itervalues()
if _vm_provider_driver(vm_) if _vm_provider_driver(vm_)
) )
for loc in locations: for loc in locations:
@ -3524,10 +3524,10 @@ def get_console_output(
ret = {} ret = {}
data = query(params, return_root=True) data = query(params, return_root=True)
for item in data: for item in data:
if item.keys()[0] == 'output': if item.iterkeys().next() == 'output':
ret['output_decoded'] = binascii.a2b_base64(item.values()[0]) ret['output_decoded'] = binascii.a2b_base64(item.itervalues().next())
else: else:
ret[item.keys()[0]] = item.values()[0] ret[item.iterkeys().next()] = item.itervalues().next()
return ret return ret

View File

@ -42,66 +42,6 @@ Setting up Service Account Authentication:
/etc/salt/cloud file as 'service_account_private_key' setting. /etc/salt/cloud file as 'service_account_private_key' setting.
- Consider using a more secure location for your private key. - Consider using a more secure location for your private key.
Supported commands:
# Create a few instances fro profile_name in /etc/salt/cloud.profiles
- salt-cloud -p profile_name inst1 inst2 inst3
# Delete an instance
- salt-cloud -d inst1
# Look up data on an instance
- salt-cloud -a show_instance inst2
# List available locations (aka 'zones') for provider 'gce'
- salt-cloud --list-locations gce
# List available instance sizes (aka 'machine types') for provider 'gce'
- salt-cloud --list-sizes gce
# List available images for provider 'gce'
- salt-cloud --list-images gce
# Create a persistent disk
- salt-cloud -f create_disk gce disk_name=pd location=us-central1-b ima...
# Permanently delete a persistent disk
- salt-cloud -f delete_disk gce disk_name=pd
# Attach an existing disk to an existing instance
- salt-cloud -a attach_disk myinstance disk_name=mydisk mode=READ_ONLY
# Detach a disk from an instance
- salt-cloud -a detach_disk myinstance disk_name=mydisk
# Show information about the named disk
- salt-cloud -a show_disk myinstance disk_name=pd
- salt-cloud -f show_disk gce disk_name=pd
# Create a snapshot of a persistent disk
- salt-cloud -f create_snapshot gce name=snap-1 disk_name=pd
# Permanently delete a disk snapshot
- salt-cloud -f delete_snapshot gce name=snap-1
# Show information about the named snapshot
- salt-cloud -f show_snapshot gce name=snap-1
# Create a network
- salt-cloud -f create_network gce name=mynet cidr=10.10.10.0/24
# Delete a network
- salt-cloud -f delete_network gce name=mynet
# Show info for a network
- salt-cloud -f show_network gce name=mynet
# Create a firewall rule
- salt-cloud -f create_fwrule gce name=fw1 network=mynet allow=tcp:80
# Delete a firewall rule
- salt-cloud -f delete_fwrule gce name=fw1
# Show info for a firewall rule
-salt-cloud -f show_fwrule gce name=fw1
# Create a load-balancer HTTP health check
- salt-cloud -f create_hc gce name=hc path=/ port=80
# Delete a load-balancer HTTP health check
- salt-cloud -f delete_hc gce name=hc
# Show info about an HTTP health check
- salt-cloud -f show_hc gce name=hc
# Create a load-balancer configuration
- salt-cloud -f create_lb gce name=lb region=us-central1 ports=80 ...
# Delete a load-balancer configuration
- salt-cloud -f delete_lb gce name=lb
# Show details about load-balancer
- salt-cloud -f show_lb gce name=lb
# Add member to load-balancer
- salt-cloud -f attach_lb gce name=lb member=www1
# Remove member from load-balancer
- salt-cloud -f detach_lb gce name=lb member=www1
.. code-block:: yaml .. code-block:: yaml
my-gce-config: my-gce-config:

View File

@ -126,7 +126,7 @@ def get_image(vm_):
vm_image = config.get_cloud_config_value('image', vm_, __opts__) vm_image = config.get_cloud_config_value('image', vm_, __opts__)
if vm_image and str(vm_image) in images.keys(): if vm_image and str(vm_image) in images:
return images[vm_image] return images[vm_image]
raise SaltCloudNotFound( raise SaltCloudNotFound(
@ -143,7 +143,7 @@ def get_size(vm_):
if not vm_size: if not vm_size:
raise SaltCloudNotFound('No size specified for this VM.') raise SaltCloudNotFound('No size specified for this VM.')
if vm_size and str(vm_size) in sizes.keys(): if vm_size and str(vm_size) in sizes:
return sizes[vm_size] return sizes[vm_size]
raise SaltCloudNotFound( raise SaltCloudNotFound(
@ -697,7 +697,7 @@ def get_node(name):
:return: node object :return: node object
''' '''
nodes = list_nodes() nodes = list_nodes()
if name in nodes.keys(): if name in nodes:
return nodes[name] return nodes[name]
return None return None
@ -717,7 +717,7 @@ def joyent_node_state(id_):
'deleted': 2, 'deleted': 2,
'unknown': 4} 'unknown': 4}
if id_ not in states.keys(): if id_ not in states:
id_ = 'unknown' id_ = 'unknown'
return node_state(states[id_]) return node_state(states[id_])
@ -747,16 +747,16 @@ def reformat_node(item=None, full=False):
# add any undefined desired keys # add any undefined desired keys
for key in desired_keys: for key in desired_keys:
if key not in item.keys(): if key not in item:
item[key] = None item[key] = None
# remove all the extra key value pairs to provide a brief listing # remove all the extra key value pairs to provide a brief listing
if not full: if not full:
for key in item.keys(): for key in item:
if key not in desired_keys: if key not in desired_keys:
del item[key] del item[key]
if 'state' in item.keys(): if 'state' in item:
item['state'] = joyent_node_state(item['state']) item['state'] = joyent_node_state(item['state'])
return item return item
@ -779,7 +779,7 @@ def list_nodes(full=False, call=None):
ret = {} ret = {}
if POLL_ALL_LOCATIONS: if POLL_ALL_LOCATIONS:
for location in JOYENT_LOCATIONS.keys(): for location in JOYENT_LOCATIONS:
result = query(command='my/machines', location=location, result = query(command='my/machines', location=location,
method='GET') method='GET')
nodes = result[1] nodes = result[1]

View File

@ -225,7 +225,7 @@ def get_image(conn, vm_):
'ascii', 'salt-cloud-force-ascii' 'ascii', 'salt-cloud-force-ascii'
) )
for img in image_list.keys(): for img in image_list:
if vm_image in (image_list[img]['id'], img): if vm_image in (image_list[img]['id'], img):
return image_list[img]['id'] return image_list[img]['id']
@ -783,7 +783,7 @@ def list_nodes(call=None, **kwargs):
if not server_list: if not server_list:
return {} return {}
for server in server_list.keys(): for server in server_list:
server_tmp = conn.server_show(server_list[server]['id'])[server] server_tmp = conn.server_show(server_list[server]['id'])[server]
ret[server] = { ret[server] = {
'id': server_tmp['id'], 'id': server_tmp['id'],
@ -815,7 +815,7 @@ def list_nodes_full(call=None, **kwargs):
if not server_list: if not server_list:
return {} return {}
for server in server_list.keys(): for server in server_list:
try: try:
ret[server] = conn.server_show_libcloud( ret[server] = conn.server_show_libcloud(
server_list[server]['id'] server_list[server]['id']

View File

@ -721,7 +721,7 @@ def list_nodes(call=None):
nodes['error']['Errors']['Error']['Message'] nodes['error']['Errors']['Error']['Message']
) )
) )
for node in nodes.keys(): for node in nodes:
ret[node] = { ret[node] = {
'id': nodes[node]['hostname'], 'id': nodes[node]['hostname'],
'ram': nodes[node]['memoryCount'], 'ram': nodes[node]['memoryCount'],

View File

@ -305,7 +305,7 @@ def _deploy(vm_):
''' '''
# TODO: review salt.utils.cloud.bootstrap(vm_, __opts__) # TODO: review salt.utils.cloud.bootstrap(vm_, __opts__)
# TODO: review salt.utils.cloud.wait_for_ip # TODO: review salt.utils.cloud.wait_for_ip
ip_address = wait_for_ip(vm_['name']) ip_address = wait_for_ip(vm_)
template_user = config.get_cloud_config_value( template_user = config.get_cloud_config_value(
'template_user', vm_, __opts__ 'template_user', vm_, __opts__

View File

@ -456,7 +456,7 @@ def list_nodes_full(conn=None, call=None):
ret = {} ret = {}
for node in nodes: for node in nodes:
pairs = {} pairs = {}
for key, value in zip(node.__dict__.keys(), node.__dict__.values()): for key, value in zip(node.__dict__, node.__dict__.itervalues()):
pairs[key] = value pairs[key] = value
ret[node.name] = pairs ret[node.name] = pairs
del ret[node.name]['driver'] del ret[node.name]['driver']

View File

@ -639,13 +639,25 @@ def _validate_file_roots(opts):
if not isinstance(opts['file_roots'], dict): if not isinstance(opts['file_roots'], dict):
log.warning('The file_roots parameter is not properly formatted,' log.warning('The file_roots parameter is not properly formatted,'
' using defaults') ' using defaults')
return {'base': [salt.syspaths.BASE_FILE_ROOTS_DIR]} return {'base': _expand_glob_path([salt.syspaths.BASE_FILE_ROOTS_DIR])}
for saltenv, dirs in list(opts['file_roots'].items()): for saltenv, dirs in list(opts['file_roots'].items()):
if not isinstance(dirs, list) and not isinstance(dirs, tuple): if not isinstance(dirs, list) and not isinstance(dirs, tuple):
opts['file_roots'][saltenv] = [] opts['file_roots'][saltenv] = []
opts['file_roots'][saltenv] = _expand_glob_path(opts['file_roots'][saltenv])
return opts['file_roots'] return opts['file_roots']
def _expand_glob_path(file_roots):
'''
Applies shell globbing to a set of directories and returns
the expanded paths
'''
unglobbed_path = []
for path in file_roots:
unglobbed_path.extend(glob.glob(path))
return unglobbed_path
def _validate_opts(opts): def _validate_opts(opts):
''' '''
Check that all of the types of values passed into the config are Check that all of the types of values passed into the config are
@ -1309,7 +1321,7 @@ def old_to_new(opts):
for provider in providers: for provider in providers:
provider_config = {} provider_config = {}
for opt in opts.keys(): for opt in opts:
if not opt.startswith(provider): if not opt.startswith(provider):
continue continue
value = opts.pop(opt) value = opts.pop(opt)
@ -1405,7 +1417,7 @@ def apply_vm_profiles_config(providers, overrides, defaults=None):
vms.pop(profile) vms.pop(profile)
continue continue
driver = providers[details['provider']].keys()[0] driver = providers[details['provider']].iterkeys().next()
providers[details['provider']][driver].setdefault( providers[details['provider']][driver].setdefault(
'profiles', {}).update({profile: details}) 'profiles', {}).update({profile: details})
details['provider'] = '{0[provider]}:{1}'.format(details, driver) details['provider'] = '{0[provider]}:{1}'.format(details, driver)
@ -1440,7 +1452,7 @@ def apply_vm_profiles_config(providers, overrides, defaults=None):
vms.pop(profile) vms.pop(profile)
continue continue
driver = providers[extended['provider']].keys()[0] driver = providers[extended['provider']].iterkeys().next()
providers[extended['provider']][driver].setdefault( providers[extended['provider']][driver].setdefault(
'profiles', {}).update({profile: extended}) 'profiles', {}).update({profile: extended})
@ -1757,7 +1769,7 @@ def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):
if vm_['provider'] in opts['providers']: if vm_['provider'] in opts['providers']:
# There's only one driver defined for this provider. This is safe. # There's only one driver defined for this provider. This is safe.
alias_defs = opts['providers'].get(vm_['provider']) alias_defs = opts['providers'].get(vm_['provider'])
provider_driver_defs = alias_defs[alias_defs.keys()[0]] provider_driver_defs = alias_defs[alias_defs.iterkeys().next()]
if name in provider_driver_defs: if name in provider_driver_defs:
# The setting name exists in the VM's provider configuration. # The setting name exists in the VM's provider configuration.
# Return it! # Return it!

View File

@ -14,6 +14,7 @@ import multiprocessing
import traceback import traceback
import itertools import itertools
from collections import deque from collections import deque
import random
# Import salt libs # Import salt libs
import salt.daemons.masterapi import salt.daemons.masterapi
@ -275,7 +276,8 @@ class SaltRaetRoadStackJoined(ioflo.base.deeding.Deed):
joined = False joined = False
if stack and isinstance(stack, RoadStack): if stack and isinstance(stack, RoadStack):
if stack.remotes: if stack.remotes:
joined = stack.remotes.values()[0].joined for remote in stack.remotes.values():
joined = any([remote.joined for remote in stack.remotes.values()])
self.status.update(joined=joined) self.status.update(joined=joined)
@ -305,7 +307,7 @@ class SaltRaetRoadStackRejected(ioflo.base.deeding.Deed):
rejected = False rejected = False
if stack and isinstance(stack, RoadStack): if stack and isinstance(stack, RoadStack):
if stack.remotes: if stack.remotes:
rejected = (stack.remotes.values()[0].acceptance rejected = (stack.remotes.itervalues().next().acceptance
== raeting.acceptances.rejected) == raeting.acceptances.rejected)
else: # no remotes so assume rejected else: # no remotes so assume rejected
rejected = True rejected = True
@ -361,7 +363,8 @@ class SaltRaetRoadStackAllowed(ioflo.base.deeding.Deed):
allowed = False allowed = False
if stack and isinstance(stack, RoadStack): if stack and isinstance(stack, RoadStack):
if stack.remotes: if stack.remotes:
allowed = stack.remotes.values()[0].allowed for remote in stack.remotes.values():
allowed = any([remote.allowed for remote in stack.remotes.values()])
self.status.update(allowed=allowed) self.status.update(allowed=allowed)
@ -521,10 +524,22 @@ class SaltLoadPillar(ioflo.base.deeding.Deed):
''' '''
Initial pillar Initial pillar
''' '''
# default master is the first remote # default master is the first remote that is allowed
# this default destination will not work with multiple masters available_masters = [remote for remote in self.road_stack.value.remotes.values()
if remote.allowed]
while not available_masters:
available_masters = [remote for remote in self.road_stack.value.remotes.values()
if remote.allowed]
time.sleep(0.1)
random_master = self.opts.value.get('random_master')
if random_master:
master = available_masters[random.randint(0, len(available_masters) - 1)]
else:
master = available_masters[0]
route = {'src': (self.road_stack.value.local.name, None, None), route = {'src': (self.road_stack.value.local.name, None, None),
'dst': (self.road_stack.value.remotes.values()[0].name, None, 'remote_cmd')} 'dst': (self.road_stack.value.remotes.itervalues().next().name, None, 'remote_cmd')}
load = {'id': self.opts.value['id'], load = {'id': self.opts.value['id'],
'grains': self.grains.value, 'grains': self.grains.value,
'saltenv': self.opts.value['environment'], 'saltenv': self.opts.value['environment'],
@ -882,17 +897,30 @@ class SaltRaetRouter(ioflo.base.deeding.Deed):
if not self.road_stack.value.remotes: if not self.road_stack.value.remotes:
log.error("Missing joined master. Unable to route " log.error("Missing joined master. Unable to route "
"remote_cmd '{0}'.".format(msg)) "remote_cmd '{0}'.".format(msg))
d_estate = self.road_stack.value.remotes.values()[0].name return
#log.error("**** Missing destination estate for 'remote_cmd'. Unable to route "
#"remote_cmd '{0}'.".format(msg))
#return
d_estate = self.road_stack.value.remotes.itervalues().next().name
msg['route']['dst'] = (d_estate, d_yard, d_share) msg['route']['dst'] = (d_estate, d_yard, d_share)
log.error("**** Missing destination estate for 'remote_cmd'. "
"Using default route={0}.".format(msg['route']['dst']))
self.road_stack.value.message(msg, self.road_stack.value.message(msg,
self.road_stack.value.nameRemotes[d_estate].uid) self.road_stack.value.nameRemotes[d_estate].uid)
elif d_share == 'call_cmd': # salt call minion to master elif d_share == 'call_cmd': # salt call return pub to master
if not self.road_stack.value.remotes: if not self.road_stack.value.remotes:
log.error("Missing joined master. Unable to route " log.error("Missing joined master. Unable to route "
"call_cmd '{0}'.".format(msg)) "call_cmd '{0}'.".format(msg))
d_estate = self.road_stack.value.remotes.values()[0].name return
#log.error("**** Missing destination estate for 'call_cmd'. Unable to route "
#"call_cmd '{0}'.".format(msg))
#return
d_estate = self.road_stack.value.remotes.itervalues().next().name
d_share = 'remote_cmd' d_share = 'remote_cmd'
msg['route']['dst'] = (d_estate, d_yard, d_share) msg['route']['dst'] = (d_estate, d_yard, d_share)
log.error("**** Missing destination estate for 'call_cmd'. "
"Using default route={0}.".format(msg['route']['dst']))
self.road_stack.value.message(msg, self.road_stack.value.message(msg,
self.road_stack.value.nameRemotes[d_estate].uid) self.road_stack.value.nameRemotes[d_estate].uid)
@ -986,7 +1014,7 @@ class SaltRaetPublisher(ioflo.base.deeding.Deed):
''' '''
pub_data = pub_msg['return'] pub_data = pub_msg['return']
# only publish to available minions by intersecting sets # only publish to available minions by intersecting sets
minions = self.availables.value & set(self.stack.value.nameRemotes.keys()) minions = self.availables.value & set(self.stack.value.nameRemotes)
for minion in minions: for minion in minions:
uid = self.stack.value.fetchUidByName(minion) uid = self.stack.value.fetchUidByName(minion)
if uid: if uid:

View File

@ -55,7 +55,7 @@ framer bootstrap be inactive first join
frame joined frame joined
print Joined print Joined
go next go next if elapsed >= 2
frame allow frame allow
print Allowing... print Allowing...
@ -69,14 +69,16 @@ framer bootstrap be inactive first join
frame allowed frame allowed
print Allowed print Allowed
go next go next if elapsed >= 2
frame pillar frame pillar
print Pillaring
enter enter
do salt load pillar do salt load pillar
go loading go loading
frame loading frame loading
print Loading
enter enter
do salt load modules do salt load modules
go router go router

View File

@ -20,6 +20,13 @@ from raet.keeping import Keep
from salt.key import RaetKey from salt.key import RaetKey
# Python equivalent of an enum
APPL_KINDS = OrderedDict([('master', 0), ('minion', 1), ('syndic', 2), ('call', 3)])
APPL_KIND_NAMES = odict((v, k) for k, v in APPL_KINDS.iteritems()) # inverse map
ApplKind = namedtuple('ApplKind', APPL_KINDS)
applKinds = ApplKind(**APPL_KINDS)
class SaltKeep(Keep): class SaltKeep(Keep):
''' '''
RAET protocol estate on road data persistence for a given estate RAET protocol estate on road data persistence for a given estate
@ -105,8 +112,7 @@ class SaltKeep(Keep):
return None return None
mid = data['role'] mid = data['role']
statae = raeting.ACCEPTANCES.keys() for status in raeting.ACCEPTANCES:
for status in statae:
keydata = self.saltRaetKey.read_remote(mid, status) keydata = self.saltRaetKey.read_remote(mid, status)
if keydata: if keydata:
break break

View File

@ -326,7 +326,7 @@ class BasicTestCase(unittest.TestCase):
self.assertEqual(main.local.priver.keyhex, mainData['prihex']) self.assertEqual(main.local.priver.keyhex, mainData['prihex'])
self.assertEqual(main.local.signer.keyhex, mainData['sighex']) self.assertEqual(main.local.signer.keyhex, mainData['sighex'])
self.assertEqual(len(main.remotes.values()), 2) self.assertEqual(len(main.remotes), 2)
# other stack # other stack
opts = self.createOpts(role='other', opts = self.createOpts(role='other',
@ -564,7 +564,7 @@ class BasicTestCase(unittest.TestCase):
self.assertEqual(main.local.priver.keyhex, mainData['prihex']) self.assertEqual(main.local.priver.keyhex, mainData['prihex'])
self.assertEqual(main.local.signer.keyhex, mainData['sighex']) self.assertEqual(main.local.signer.keyhex, mainData['sighex'])
self.assertEqual(len(main.remotes.values()), 2) self.assertEqual(len(main.remotes), 2)
# other stack # other stack
opts = self.createOpts(role='other', opts = self.createOpts(role='other',
@ -804,7 +804,7 @@ class BasicTestCase(unittest.TestCase):
self.assertEqual(main.local.priver.keyhex, mainData['prihex']) self.assertEqual(main.local.priver.keyhex, mainData['prihex'])
self.assertEqual(main.local.signer.keyhex, mainData['sighex']) self.assertEqual(main.local.signer.keyhex, mainData['sighex'])
self.assertEqual(len(main.remotes.values()), 2) self.assertEqual(len(main.remotes), 2)
# other stack # other stack
opts = self.createOpts(role='other', opts = self.createOpts(role='other',
@ -1057,7 +1057,7 @@ class BasicTestCase(unittest.TestCase):
self.assertEqual(main.local.priver.keyhex, mainData['prihex']) self.assertEqual(main.local.priver.keyhex, mainData['prihex'])
self.assertEqual(main.local.signer.keyhex, mainData['sighex']) self.assertEqual(main.local.signer.keyhex, mainData['sighex'])
self.assertEqual(len(main.remotes.values()), 2) self.assertEqual(len(main.remotes), 2)
for data in [data1, data2]: for data in [data1, data2]:
remote = main.nameRemotes[data['name']] remote = main.nameRemotes[data['name']]
self.assertEqual(remote.name, data['name']) self.assertEqual(remote.name, data['name'])
@ -1200,7 +1200,7 @@ class BasicTestCase(unittest.TestCase):
self.assertEqual(main.local.priver.keyhex, mainData['prihex']) self.assertEqual(main.local.priver.keyhex, mainData['prihex'])
self.assertEqual(main.local.signer.keyhex, mainData['sighex']) self.assertEqual(main.local.signer.keyhex, mainData['sighex'])
self.assertEqual(len(main.remotes.values()), 2) self.assertEqual(len(main.remotes), 2)
for data in [data1, data2]: for data in [data1, data2]:
remote = main.nameRemotes[data['name']] remote = main.nameRemotes[data['name']]
self.assertEqual(remote.name, data['name']) self.assertEqual(remote.name, data['name'])
@ -1351,7 +1351,7 @@ class BasicTestCase(unittest.TestCase):
self.assertEqual(main.local.priver.keyhex, mainData['prihex']) self.assertEqual(main.local.priver.keyhex, mainData['prihex'])
self.assertEqual(main.local.signer.keyhex, mainData['sighex']) self.assertEqual(main.local.signer.keyhex, mainData['sighex'])
self.assertEqual(len(main.remotes.values()), 2) self.assertEqual(len(main.remotes), 2)
for data in [data1, data2]: for data in [data1, data2]:
remote = main.nameRemotes[data['name']] remote = main.nameRemotes[data['name']]
self.assertEqual(remote.name, data['name']) self.assertEqual(remote.name, data['name'])
@ -1458,21 +1458,21 @@ class BasicTestCase(unittest.TestCase):
self.service(main, other, duration=1.0) self.service(main, other, duration=1.0)
self.assertEqual(len(main.transactions), 0) self.assertEqual(len(main.transactions), 0)
remote = main.remotes.values()[0] remote = main.remotes.itervalues().next()
self.assertTrue(remote.joined) self.assertTrue(remote.joined)
self.assertEqual(len(other.transactions), 0) self.assertEqual(len(other.transactions), 0)
remote = other.remotes.values()[0] remote = other.remotes.itervalues().next()
self.assertTrue(remote.joined) self.assertTrue(remote.joined)
self.allow(other, main) self.allow(other, main)
self.assertEqual(len(main.transactions), 0) self.assertEqual(len(main.transactions), 0)
remote = main.remotes.values()[0] remote = main.remotes.itervalues().next()
self.assertTrue(remote.allowed) self.assertTrue(remote.allowed)
self.assertEqual(len(other.transactions), 0) self.assertEqual(len(other.transactions), 0)
remote = other.remotes.values()[0] remote = other.remotes.itervalues().next()
self.assertTrue(remote.allowed) self.assertTrue(remote.allowed)
for remote in main.remotes.values(): for remote in main.remotes.itervalues():
path = os.path.join(main.keep.remotedirpath, path = os.path.join(main.keep.remotedirpath,
"{0}.{1}.{2}".format(main.keep.prefix, remote.name, main.keep.ext)) "{0}.{1}.{2}".format(main.keep.prefix, remote.name, main.keep.ext))
self.assertTrue(os.path.exists(path)) self.assertTrue(os.path.exists(path))
@ -1580,21 +1580,21 @@ class BasicTestCase(unittest.TestCase):
self.join(other, main) self.join(other, main)
self.assertEqual(len(main.transactions), 0) self.assertEqual(len(main.transactions), 0)
remote = main.remotes.values()[0] remote = main.remotes.itervalues().next()
self.assertTrue(remote.joined) self.assertTrue(remote.joined)
self.assertEqual(len(other.transactions), 0) self.assertEqual(len(other.transactions), 0)
remote = other.remotes.values()[0] remote = other.remotes.itervalues().next()
self.assertTrue(remote.joined) self.assertTrue(remote.joined)
self.allow(other, main) self.allow(other, main)
self.assertEqual(len(main.transactions), 0) self.assertEqual(len(main.transactions), 0)
remote = main.remotes.values()[0] remote = main.remotes.itervalues().next()
self.assertTrue(remote.allowed) self.assertTrue(remote.allowed)
self.assertEqual(len(other.transactions), 0) self.assertEqual(len(other.transactions), 0)
remote = other.remotes.values()[0] remote = other.remotes.itervalues().next()
self.assertTrue(remote.allowed) self.assertTrue(remote.allowed)
for remote in main.remotes.values(): for remote in main.remotes.itervalues():
path = os.path.join(main.keep.remotedirpath, path = os.path.join(main.keep.remotedirpath,
"{0}.{1}.{2}".format(main.keep.prefix, remote.name, main.keep.ext)) "{0}.{1}.{2}".format(main.keep.prefix, remote.name, main.keep.ext))
self.assertTrue(os.path.exists(path)) self.assertTrue(os.path.exists(path))
@ -1702,21 +1702,21 @@ class BasicTestCase(unittest.TestCase):
self.join(other, main) self.join(other, main)
self.assertEqual(len(main.transactions), 0) self.assertEqual(len(main.transactions), 0)
remote = main.remotes.values()[0] remote = main.remotes.itervalues().next()
self.assertTrue(remote.joined) self.assertTrue(remote.joined)
self.assertEqual(len(other.transactions), 0) self.assertEqual(len(other.transactions), 0)
remote = other.remotes.values()[0] remote = other.remotes.itervalues().next()
self.assertTrue(remote.joined) self.assertTrue(remote.joined)
self.allow(other, main) self.allow(other, main)
self.assertEqual(len(main.transactions), 0) self.assertEqual(len(main.transactions), 0)
remote = main.remotes.values()[0] remote = main.remotes.itervalues().next()
self.assertTrue(remote.allowed) self.assertTrue(remote.allowed)
self.assertEqual(len(other.transactions), 0) self.assertEqual(len(other.transactions), 0)
remote = other.remotes.values()[0] remote = other.remotes.itervalues().next()
self.assertTrue(remote.allowed) self.assertTrue(remote.allowed)
for remote in main.remotes.values(): for remote in main.remotes.itervalues():
path = os.path.join(main.keep.remotedirpath, path = os.path.join(main.keep.remotedirpath,
"{0}.{1}.{2}".format(main.keep.prefix, remote.name, main.keep.ext)) "{0}.{1}.{2}".format(main.keep.prefix, remote.name, main.keep.ext))
self.assertTrue(os.path.exists(path)) self.assertTrue(os.path.exists(path))
@ -1828,21 +1828,21 @@ class BasicTestCase(unittest.TestCase):
self.service(main, other1, duration=1.0) self.service(main, other1, duration=1.0)
self.assertEqual(len(main.transactions), 0) self.assertEqual(len(main.transactions), 0)
remote = main.remotes.values()[0] remote = main.remotes.itervalues().next()
self.assertTrue(remote.joined) self.assertTrue(remote.joined)
self.assertEqual(len(other1.transactions), 0) self.assertEqual(len(other1.transactions), 0)
remote = other1.remotes.values()[0] remote = other1.remotes.itervalues().next()
self.assertTrue(remote.joined) self.assertTrue(remote.joined)
self.allow(other1, main) self.allow(other1, main)
self.assertEqual(len(main.transactions), 0) self.assertEqual(len(main.transactions), 0)
remote = main.remotes.values()[0] remote = main.remotes.itervalues().next()
self.assertTrue(remote.allowed) self.assertTrue(remote.allowed)
self.assertEqual(len(other1.transactions), 0) self.assertEqual(len(other1.transactions), 0)
remote = other1.remotes.values()[0] remote = other1.remotes.itervalues().next()
self.assertTrue(remote.allowed) self.assertTrue(remote.allowed)
for remote in main.remotes.values(): for remote in main.remotes.itervalues():
path = os.path.join(main.keep.remotedirpath, path = os.path.join(main.keep.remotedirpath,
"{0}.{1}.{2}".format(main.keep.prefix, remote.name, main.keep.ext)) "{0}.{1}.{2}".format(main.keep.prefix, remote.name, main.keep.ext))
self.assertTrue(os.path.exists(path)) self.assertTrue(os.path.exists(path))
@ -1954,21 +1954,21 @@ class BasicTestCase(unittest.TestCase):
self.join(other2, main) self.join(other2, main)
self.assertEqual(len(main.transactions), 0) self.assertEqual(len(main.transactions), 0)
remote = main.remotes.values()[0] remote = main.remotes.itervalues().next()
self.assertTrue(remote.joined) self.assertTrue(remote.joined)
self.assertEqual(len(other2.transactions), 0) self.assertEqual(len(other2.transactions), 0)
remote = other2.remotes.values()[0] remote = other2.remotes.itervalues().next()
self.assertTrue(remote.joined) self.assertTrue(remote.joined)
self.allow(other2, main) self.allow(other2, main)
self.assertEqual(len(main.transactions), 0) self.assertEqual(len(main.transactions), 0)
remote = main.remotes.values()[0] remote = main.remotes.itervalues().next()
self.assertTrue(remote.allowed) self.assertTrue(remote.allowed)
self.assertEqual(len(other2.transactions), 0) self.assertEqual(len(other2.transactions), 0)
remote = other2.remotes.values()[0] remote = other2.remotes.itervalues().next()
self.assertTrue(remote.allowed) self.assertTrue(remote.allowed)
for remote in main.remotes.values(): for remote in main.remotes.itervalues():
path = os.path.join(main.keep.remotedirpath, path = os.path.join(main.keep.remotedirpath,
"{0}.{1}.{2}".format(main.keep.prefix, remote.name, main.keep.ext)) "{0}.{1}.{2}".format(main.keep.prefix, remote.name, main.keep.ext))
self.assertTrue(os.path.exists(path)) self.assertTrue(os.path.exists(path))
@ -2079,21 +2079,21 @@ class BasicTestCase(unittest.TestCase):
self.join(other1, main) self.join(other1, main)
self.assertEqual(len(main.transactions), 0) self.assertEqual(len(main.transactions), 0)
remote = main.remotes.values()[0] remote = main.remotes.itervalues().next()
self.assertTrue(remote.joined) self.assertTrue(remote.joined)
self.assertEqual(len(other1.transactions), 0) self.assertEqual(len(other1.transactions), 0)
remote = other1.remotes.values()[0] remote = other1.remotes.itervalues().next()
self.assertTrue(remote.joined) self.assertTrue(remote.joined)
self.allow(other1, main) self.allow(other1, main)
self.assertEqual(len(main.transactions), 0) self.assertEqual(len(main.transactions), 0)
remote = main.remotes.values()[0] remote = main.remotes.itervalues().next()
self.assertTrue(remote.allowed) self.assertTrue(remote.allowed)
self.assertEqual(len(other1.transactions), 0) self.assertEqual(len(other1.transactions), 0)
remote = other1.remotes.values()[0] remote = other1.remotes.itervalues().next()
self.assertTrue(remote.allowed) self.assertTrue(remote.allowed)
for remote in main.remotes.values(): for remote in main.remotes.itervalues():
path = os.path.join(main.keep.remotedirpath, path = os.path.join(main.keep.remotedirpath,
"{0}.{1}.{2}".format(main.keep.prefix, remote.name, main.keep.ext)) "{0}.{1}.{2}".format(main.keep.prefix, remote.name, main.keep.ext))
self.assertTrue(os.path.exists(path)) self.assertTrue(os.path.exists(path))
@ -2148,21 +2148,21 @@ class BasicTestCase(unittest.TestCase):
self.join(other2, main) self.join(other2, main)
self.assertEqual(len(main.transactions), 0) self.assertEqual(len(main.transactions), 0)
remote = main.remotes.values()[0] remote = main.remotes.itervalues().next()
self.assertTrue(remote.joined) self.assertTrue(remote.joined)
self.assertEqual(len(other2.transactions), 0) self.assertEqual(len(other2.transactions), 0)
remote = other2.remotes.values()[0] remote = other2.remotes.itervalues().next()
self.assertTrue(remote.joined) self.assertTrue(remote.joined)
self.allow(other2, main) self.allow(other2, main)
self.assertEqual(len(main.transactions), 0) self.assertEqual(len(main.transactions), 0)
remote = main.remotes.values()[0] remote = main.remotes.itervalues().next()
self.assertTrue(remote.allowed) self.assertTrue(remote.allowed)
self.assertEqual(len(other2.transactions), 0) self.assertEqual(len(other2.transactions), 0)
remote = other2.remotes.values()[0] remote = other2.remotes.itervalues().next()
self.assertTrue(remote.allowed) self.assertTrue(remote.allowed)
for remote in main.remotes.values(): for remote in main.remotes.itervalues():
path = os.path.join(main.keep.remotedirpath, path = os.path.join(main.keep.remotedirpath,
"{0}.{1}.{2}".format(main.keep.prefix, remote.name, main.keep.ext)) "{0}.{1}.{2}".format(main.keep.prefix, remote.name, main.keep.ext))
self.assertTrue(os.path.exists(path)) self.assertTrue(os.path.exists(path))

View File

@ -13,13 +13,6 @@ class SaltException(Exception):
Base exception class; all Salt-specific exceptions should subclass this Base exception class; all Salt-specific exceptions should subclass this
''' '''
def pack(self):
'''
Pack this exception into a serializable dictionary that is safe for
transport via msgpack
'''
return dict(message=self.__unicode__(), args=self.args)
class SaltClientError(SaltException): class SaltClientError(SaltException):
''' '''

View File

@ -14,7 +14,7 @@ import requests
# Import salt libs # Import salt libs
from salt.exceptions import ( from salt.exceptions import (
CommandExecutionError, MinionError, SaltReqTimeoutError CommandExecutionError, MinionError
) )
import salt.client import salt.client
import salt.crypt import salt.crypt
@ -985,11 +985,8 @@ class RemoteClient(Client):
load['loc'] = 0 load['loc'] = 0
else: else:
load['loc'] = fn_.tell() load['loc'] = fn_.tell()
try:
channel = self._get_channel() channel = self._get_channel()
data = channel.send(load) data = channel.send(load)
except SaltReqTimeoutError:
return ''
if not data: if not data:
if init_retries: if init_retries:
init_retries -= 1 init_retries -= 1
@ -1053,11 +1050,9 @@ class RemoteClient(Client):
load = {'saltenv': saltenv, load = {'saltenv': saltenv,
'prefix': prefix, 'prefix': prefix,
'cmd': '_file_list'} 'cmd': '_file_list'}
try:
channel = self._get_channel() channel = self._get_channel()
return channel.send(load) return channel.send(load)
except SaltReqTimeoutError:
return ''
def file_list_emptydirs(self, saltenv='base', prefix='', env=None): def file_list_emptydirs(self, saltenv='base', prefix='', env=None):
''' '''
@ -1076,11 +1071,8 @@ class RemoteClient(Client):
load = {'saltenv': saltenv, load = {'saltenv': saltenv,
'prefix': prefix, 'prefix': prefix,
'cmd': '_file_list_emptydirs'} 'cmd': '_file_list_emptydirs'}
try:
channel = self._get_channel() channel = self._get_channel()
channel.send(load) channel.send(load)
except SaltReqTimeoutError:
return ''
def dir_list(self, saltenv='base', prefix='', env=None): def dir_list(self, saltenv='base', prefix='', env=None):
''' '''
@ -1099,11 +1091,8 @@ class RemoteClient(Client):
load = {'saltenv': saltenv, load = {'saltenv': saltenv,
'prefix': prefix, 'prefix': prefix,
'cmd': '_dir_list'} 'cmd': '_dir_list'}
try:
channel = self._get_channel() channel = self._get_channel()
return channel.send(load) return channel.send(load)
except SaltReqTimeoutError:
return ''
def symlink_list(self, saltenv='base', prefix='', env=None): def symlink_list(self, saltenv='base', prefix='', env=None):
''' '''
@ -1112,11 +1101,8 @@ class RemoteClient(Client):
load = {'saltenv': saltenv, load = {'saltenv': saltenv,
'prefix': prefix, 'prefix': prefix,
'cmd': '_symlink_list'} 'cmd': '_symlink_list'}
try:
channel = self._get_channel() channel = self._get_channel()
return channel.send(load) return channel.send(load)
except SaltReqTimeoutError:
return ''
def hash_file(self, path, saltenv='base', env=None): def hash_file(self, path, saltenv='base', env=None):
''' '''
@ -1151,11 +1137,8 @@ class RemoteClient(Client):
load = {'path': path, load = {'path': path,
'saltenv': saltenv, 'saltenv': saltenv,
'cmd': '_file_hash'} 'cmd': '_file_hash'}
try:
channel = self._get_channel() channel = self._get_channel()
return channel.send(load) return channel.send(load)
except SaltReqTimeoutError:
return ''
def list_env(self, saltenv='base', env=None): def list_env(self, saltenv='base', env=None):
''' '''
@ -1173,33 +1156,24 @@ class RemoteClient(Client):
load = {'saltenv': saltenv, load = {'saltenv': saltenv,
'cmd': '_file_list'} 'cmd': '_file_list'}
try:
channel = self._get_channel() channel = self._get_channel()
return channel.send(load) return channel.send(load)
except SaltReqTimeoutError:
return ''
def envs(self): def envs(self):
''' '''
Return a list of available environments Return a list of available environments
''' '''
load = {'cmd': '_file_envs'} load = {'cmd': '_file_envs'}
try:
channel = self._get_channel() channel = self._get_channel()
return channel.send(load) return channel.send(load)
except SaltReqTimeoutError:
return ''
def master_opts(self): def master_opts(self):
''' '''
Return the master opts data Return the master opts data
''' '''
load = {'cmd': '_master_opts'} load = {'cmd': '_master_opts'}
try:
channel = self._get_channel() channel = self._get_channel()
return channel.send(load) return channel.send(load)
except SaltReqTimeoutError:
return ''
def ext_nodes(self): def ext_nodes(self):
''' '''
@ -1211,11 +1185,8 @@ class RemoteClient(Client):
'opts': self.opts} 'opts': self.opts}
if self.auth: if self.auth:
load['tok'] = self.auth.gen_token('salt') load['tok'] = self.auth.gen_token('salt')
try:
channel = self._get_channel() channel = self._get_channel()
return channel.send(load) return channel.send(load)
except SaltReqTimeoutError:
return ''
class FSClient(RemoteClient): class FSClient(RemoteClient):

View File

@ -190,11 +190,6 @@ def diff_mtime_map(map1, map2):
''' '''
Is there a change to the mtime map? return a boolean Is there a change to the mtime map? return a boolean
''' '''
# check if the file lists are different
if cmp(sorted(map1.keys()), sorted(map2.keys())) != 0:
#log.debug('diff_mtime_map: the keys are different')
return True
# check if the mtimes are the same # check if the mtimes are the same
if cmp(sorted(map1), sorted(map2)) != 0: if cmp(sorted(map1), sorted(map2)) != 0:
#log.debug('diff_mtime_map: the maps are different') #log.debug('diff_mtime_map: the maps are different')

View File

@ -52,6 +52,7 @@ import re
import shutil import shutil
import subprocess import subprocess
from datetime import datetime from datetime import datetime
from salt._compat import text_type as _text_type
VALID_PROVIDERS = ('gitpython', 'pygit2', 'dulwich') VALID_PROVIDERS = ('gitpython', 'pygit2', 'dulwich')
PER_REMOTE_PARAMS = ('base', 'mountpoint', 'root') PER_REMOTE_PARAMS = ('base', 'mountpoint', 'root')
@ -614,14 +615,18 @@ def init():
per_remote_defaults = {} per_remote_defaults = {}
for param in override_params: for param in override_params:
per_remote_defaults[param] = __opts__['gitfs_{0}'.format(param)] per_remote_defaults[param] = \
_text_type(__opts__['gitfs_{0}'.format(param)])
for remote in __opts__['gitfs_remotes']: for remote in __opts__['gitfs_remotes']:
repo_conf = copy.deepcopy(per_remote_defaults) repo_conf = copy.deepcopy(per_remote_defaults)
bad_per_remote_conf = False bad_per_remote_conf = False
if isinstance(remote, dict): if isinstance(remote, dict):
repo_url = next(iter(remote)) repo_url = next(iter(remote))
per_remote_conf = salt.utils.repack_dictlist(remote[repo_url]) per_remote_conf = dict(
[(key, _text_type(val)) for key, val in
salt.utils.repack_dictlist(remote[repo_url]).items()]
)
if not per_remote_conf: if not per_remote_conf:
log.error( log.error(
'Invalid per-remote configuration for remote {0}. If no ' 'Invalid per-remote configuration for remote {0}. If no '
@ -1253,10 +1258,9 @@ def serve_file(load, fnd):
required_load_keys = set(['path', 'loc', 'saltenv']) required_load_keys = set(['path', 'loc', 'saltenv'])
if not all(x in load for x in required_load_keys): if not all(x in load for x in required_load_keys):
log.debug( log.debug(
'Not all of the required key in load are present. Missing: {0}'.format( 'Not all of the required keys present in payload. '
', '.join( 'Missing: {0}'.format(
required_load_keys.difference(load.keys()) ', '.join(required_load_keys.difference(load))
)
) )
) )
return ret return ret

View File

@ -35,6 +35,7 @@ import logging
import os import os
import shutil import shutil
from datetime import datetime from datetime import datetime
from salt._compat import text_type as _text_type
VALID_BRANCH_METHODS = ('branches', 'bookmarks', 'mixed') VALID_BRANCH_METHODS = ('branches', 'bookmarks', 'mixed')
PER_REMOTE_PARAMS = ('base', 'branch_method', 'mountpoint', 'root') PER_REMOTE_PARAMS = ('base', 'branch_method', 'mountpoint', 'root')
@ -170,19 +171,23 @@ def init():
per_remote_defaults = {} per_remote_defaults = {}
for param in PER_REMOTE_PARAMS: for param in PER_REMOTE_PARAMS:
per_remote_defaults[param] = __opts__['hgfs_{0}'.format(param)] per_remote_defaults[param] = \
_text_type(__opts__['hgfs_{0}'.format(param)])
for remote in __opts__['hgfs_remotes']: for remote in __opts__['hgfs_remotes']:
repo_conf = copy.deepcopy(per_remote_defaults) repo_conf = copy.deepcopy(per_remote_defaults)
if isinstance(remote, dict): if isinstance(remote, dict):
repo_uri = next(iter(remote)) repo_url = next(iter(remote))
per_remote_conf = salt.utils.repack_dictlist(remote[repo_uri]) per_remote_conf = dict(
[(key, _text_type(val)) for key, val in
salt.utils.repack_dictlist(remote[repo_url]).items()]
)
if not per_remote_conf: if not per_remote_conf:
log.error( log.error(
'Invalid per-remote configuration for remote {0}. If no ' 'Invalid per-remote configuration for remote {0}. If no '
'per-remote parameters are being specified, there may be ' 'per-remote parameters are being specified, there may be '
'a trailing colon after the URI, which should be removed. ' 'a trailing colon after the URI, which should be removed. '
'Check the master configuration file.'.format(repo_uri) 'Check the master configuration file.'.format(repo_url)
) )
branch_method = \ branch_method = \
@ -192,7 +197,7 @@ def init():
log.error( log.error(
'Invalid branch_method {0!r} for remote {1}. Valid ' 'Invalid branch_method {0!r} for remote {1}. Valid '
'branch methods are: {2}. This remote will be ignored.' 'branch methods are: {2}. This remote will be ignored.'
.format(branch_method, repo_uri, .format(branch_method, repo_url,
', '.join(VALID_BRANCH_METHODS)) ', '.join(VALID_BRANCH_METHODS))
) )
continue continue
@ -203,18 +208,18 @@ def init():
'Invalid configuration parameter {0!r} for remote {1}. ' 'Invalid configuration parameter {0!r} for remote {1}. '
'Valid parameters are: {2}. See the documentation for ' 'Valid parameters are: {2}. See the documentation for '
'further information.'.format( 'further information.'.format(
param, repo_uri, ', '.join(PER_REMOTE_PARAMS) param, repo_url, ', '.join(PER_REMOTE_PARAMS)
) )
) )
per_remote_conf.pop(param) per_remote_conf.pop(param)
repo_conf.update(per_remote_conf) repo_conf.update(per_remote_conf)
else: else:
repo_uri = remote repo_url = remote
if not isinstance(repo_uri, string_types): if not isinstance(repo_url, string_types):
log.error( log.error(
'Invalid gitfs remote {0}. Remotes must be strings, you may ' 'Invalid gitfs remote {0}. Remotes must be strings, you may '
'need to enclose the URI in quotes'.format(repo_uri) 'need to enclose the URI in quotes'.format(repo_url)
) )
continue continue
@ -227,7 +232,7 @@ def init():
pass pass
hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5')) hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5'))
repo_hash = hash_type(repo_uri).hexdigest() repo_hash = hash_type(repo_url).hexdigest()
rp_ = os.path.join(bp_, repo_hash) rp_ = os.path.join(bp_, repo_hash)
if not os.path.isdir(rp_): if not os.path.isdir(rp_):
os.makedirs(rp_) os.makedirs(rp_)
@ -243,7 +248,7 @@ def init():
'Cache path {0} (corresponding remote: {1}) exists but is not ' 'Cache path {0} (corresponding remote: {1}) exists but is not '
'a valid mercurial repository. You will need to manually ' 'a valid mercurial repository. You will need to manually '
'delete this directory on the master to continue to use this ' 'delete this directory on the master to continue to use this '
'hgfs remote.'.format(rp_, repo_uri) 'hgfs remote.'.format(rp_, repo_url)
) )
continue continue
@ -253,11 +258,11 @@ def init():
hgconfpath = os.path.join(rp_, '.hg', 'hgrc') hgconfpath = os.path.join(rp_, '.hg', 'hgrc')
with salt.utils.fopen(hgconfpath, 'w+') as hgconfig: with salt.utils.fopen(hgconfpath, 'w+') as hgconfig:
hgconfig.write('[paths]\n') hgconfig.write('[paths]\n')
hgconfig.write('default = {0}\n'.format(repo_uri)) hgconfig.write('default = {0}\n'.format(repo_url))
repo_conf.update({ repo_conf.update({
'repo': repo, 'repo': repo,
'uri': repo_uri, 'url': repo_url,
'hash': repo_hash, 'hash': repo_hash,
'cachedir': rp_ 'cachedir': rp_
}) })
@ -271,7 +276,7 @@ def init():
timestamp = datetime.now().strftime('%d %b %Y %H:%M:%S.%f') timestamp = datetime.now().strftime('%d %b %Y %H:%M:%S.%f')
fp_.write('# hgfs_remote map as of {0}\n'.format(timestamp)) fp_.write('# hgfs_remote map as of {0}\n'.format(timestamp))
for repo in repos: for repo in repos:
fp_.write('{0} = {1}\n'.format(repo['hash'], repo['uri'])) fp_.write('{0} = {1}\n'.format(repo['hash'], repo['url']))
except OSError: except OSError:
pass pass
else: else:
@ -323,7 +328,7 @@ def update():
except Exception as exc: except Exception as exc:
log.error( log.error(
'Exception {0} caught while updating hgfs remote {1}' 'Exception {0} caught while updating hgfs remote {1}'
.format(exc, repo['uri']), .format(exc, repo['url']),
exc_info_on_loglevel=logging.DEBUG exc_info_on_loglevel=logging.DEBUG
) )
else: else:

View File

@ -256,7 +256,7 @@ def file_list(load):
if not metadata or saltenv not in metadata: if not metadata or saltenv not in metadata:
return ret return ret
for buckets in _find_files(metadata[saltenv]).values(): for buckets in _find_files(metadata[saltenv]).itervalues():
files = filter(lambda f: not fs.is_file_ignored(__opts__, f), buckets) files = filter(lambda f: not fs.is_file_ignored(__opts__, f), buckets)
ret += _trim_env_off_path(files, saltenv) ret += _trim_env_off_path(files, saltenv)
@ -297,7 +297,7 @@ def dir_list(load):
return ret return ret
# grab all the dirs from the buckets cache file # grab all the dirs from the buckets cache file
for dirs in _find_dirs(metadata[saltenv]).values(): for dirs in _find_dirs(metadata[saltenv]).itervalues():
# trim env and trailing slash # trim env and trailing slash
dirs = _trim_env_off_path(dirs, saltenv, trim_slash=True) dirs = _trim_env_off_path(dirs, saltenv, trim_slash=True)
# remove empty string left by the base env dir in single bucket mode # remove empty string left by the base env dir in single bucket mode

View File

@ -30,6 +30,7 @@ import logging
import os import os
import shutil import shutil
from datetime import datetime from datetime import datetime
from salt._compat import text_type as _text_type
PER_REMOTE_PARAMS = ('mountpoint', 'root', 'trunk', 'branches', 'tags') PER_REMOTE_PARAMS = ('mountpoint', 'root', 'trunk', 'branches', 'tags')
@ -90,7 +91,7 @@ def _rev(repo):
log.error( log.error(
'Error retrieving revision ID for svnfs remote {0} ' 'Error retrieving revision ID for svnfs remote {0} '
'(cachedir: {1}): {2}' '(cachedir: {1}): {2}'
.format(repo['uri'], repo['repo'], exc) .format(repo['url'], repo['repo'], exc)
) )
else: else:
return repo_info['revision'].number return repo_info['revision'].number
@ -107,19 +108,23 @@ def init():
per_remote_defaults = {} per_remote_defaults = {}
for param in PER_REMOTE_PARAMS: for param in PER_REMOTE_PARAMS:
per_remote_defaults[param] = __opts__['svnfs_{0}'.format(param)] per_remote_defaults[param] = \
_text_type(__opts__['svnfs_{0}'.format(param)])
for remote in __opts__['svnfs_remotes']: for remote in __opts__['svnfs_remotes']:
repo_conf = copy.deepcopy(per_remote_defaults) repo_conf = copy.deepcopy(per_remote_defaults)
if isinstance(remote, dict): if isinstance(remote, dict):
repo_uri = next(iter(remote)) repo_url = next(iter(remote))
per_remote_conf = salt.utils.repack_dictlist(remote[repo_uri]) per_remote_conf = dict(
[(key, _text_type(val)) for key, val in
salt.utils.repack_dictlist(remote[repo_url]).items()]
)
if not per_remote_conf: if not per_remote_conf:
log.error( log.error(
'Invalid per-remote configuration for remote {0}. If no ' 'Invalid per-remote configuration for remote {0}. If no '
'per-remote parameters are being specified, there may be ' 'per-remote parameters are being specified, there may be '
'a trailing colon after the URI, which should be removed. ' 'a trailing colon after the URI, which should be removed. '
'Check the master configuration file.'.format(repo_uri) 'Check the master configuration file.'.format(repo_url)
) )
for param in (x for x in per_remote_conf for param in (x for x in per_remote_conf
@ -128,18 +133,18 @@ def init():
'Invalid configuration parameter {0!r} for remote {1}. ' 'Invalid configuration parameter {0!r} for remote {1}. '
'Valid parameters are: {2}. See the documentation for ' 'Valid parameters are: {2}. See the documentation for '
'further information.'.format( 'further information.'.format(
param, repo_uri, ', '.join(PER_REMOTE_PARAMS) param, repo_url, ', '.join(PER_REMOTE_PARAMS)
) )
) )
per_remote_conf.pop(param) per_remote_conf.pop(param)
repo_conf.update(per_remote_conf) repo_conf.update(per_remote_conf)
else: else:
repo_uri = remote repo_url = remote
if not isinstance(repo_uri, string_types): if not isinstance(repo_url, string_types):
log.error( log.error(
'Invalid gitfs remote {0}. Remotes must be strings, you may ' 'Invalid gitfs remote {0}. Remotes must be strings, you may '
'need to enclose the URI in quotes'.format(repo_uri) 'need to enclose the URI in quotes'.format(repo_url)
) )
continue continue
@ -152,7 +157,7 @@ def init():
pass pass
hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5')) hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5'))
repo_hash = hash_type(repo_uri).hexdigest() repo_hash = hash_type(repo_url).hexdigest()
rp_ = os.path.join(bp_, repo_hash) rp_ = os.path.join(bp_, repo_hash)
if not os.path.isdir(rp_): if not os.path.isdir(rp_):
os.makedirs(rp_) os.makedirs(rp_)
@ -160,13 +165,13 @@ def init():
if not os.listdir(rp_): if not os.listdir(rp_):
# Only attempt a new checkout if the directory is empty. # Only attempt a new checkout if the directory is empty.
try: try:
CLIENT.checkout(repo_uri, rp_) CLIENT.checkout(repo_url, rp_)
repos.append(rp_) repos.append(rp_)
new_remote = True new_remote = True
except pysvn._pysvn.ClientError as exc: except pysvn._pysvn.ClientError as exc:
log.error( log.error(
'Failed to initialize svnfs remote {0!r}: {1}' 'Failed to initialize svnfs remote {0!r}: {1}'
.format(repo_uri, exc) .format(repo_url, exc)
) )
continue continue
else: else:
@ -179,13 +184,13 @@ def init():
'Cache path {0} (corresponding remote: {1}) exists but is ' 'Cache path {0} (corresponding remote: {1}) exists but is '
'not a valid subversion checkout. You will need to ' 'not a valid subversion checkout. You will need to '
'manually delete this directory on the master to continue ' 'manually delete this directory on the master to continue '
'to use this svnfs remote.'.format(rp_, repo_uri) 'to use this svnfs remote.'.format(rp_, repo_url)
) )
continue continue
repo_conf.update({ repo_conf.update({
'repo': rp_, 'repo': rp_,
'uri': repo_uri, 'url': repo_url,
'hash': repo_hash, 'hash': repo_hash,
'cachedir': rp_ 'cachedir': rp_
}) })
@ -200,7 +205,7 @@ def init():
for repo_conf in repos: for repo_conf in repos:
fp_.write( fp_.write(
'{0} = {1}\n'.format( '{0} = {1}\n'.format(
repo_conf['hash'], repo_conf['uri'] repo_conf['hash'], repo_conf['url']
) )
) )
except OSError: except OSError:
@ -253,7 +258,7 @@ def update():
except pysvn._pysvn.ClientError as exc: except pysvn._pysvn.ClientError as exc:
log.error( log.error(
'Error updating svnfs remote {0} (cachedir: {1}): {2}' 'Error updating svnfs remote {0} (cachedir: {1}): {2}'
.format(repo['uri'], repo['cachedir'], exc) .format(repo['url'], repo['cachedir'], exc)
) )
try: try:
os.remove(lk_fn) os.remove(lk_fn)
@ -328,7 +333,7 @@ def envs(ignore_cache=False):
log.error( log.error(
'svnfs trunk path {0!r} does not exist in repo {1}, no base ' 'svnfs trunk path {0!r} does not exist in repo {1}, no base '
'environment will be provided by this remote' 'environment will be provided by this remote'
.format(repo['trunk'], repo['uri']) .format(repo['trunk'], repo['url'])
) )
branches = os.path.join(repo['repo'], repo['branches']) branches = os.path.join(repo['repo'], repo['branches'])
@ -337,7 +342,7 @@ def envs(ignore_cache=False):
else: else:
log.error( log.error(
'svnfs branches path {0!r} does not exist in repo {1}' 'svnfs branches path {0!r} does not exist in repo {1}'
.format(repo['branches'], repo['uri']) .format(repo['branches'], repo['url'])
) )
tags = os.path.join(repo['repo'], repo['tags']) tags = os.path.join(repo['repo'], repo['tags'])
@ -346,7 +351,7 @@ def envs(ignore_cache=False):
else: else:
log.error( log.error(
'svnfs tags path {0!r} does not exist in repo {1}' 'svnfs tags path {0!r} does not exist in repo {1}'
.format(repo['tags'], repo['uri']) .format(repo['tags'], repo['url'])
) )
return [x for x in sorted(ret) if _env_is_exposed(x)] return [x for x in sorted(ret) if _env_is_exposed(x)]

View File

@ -187,6 +187,15 @@ def returners(opts, functions, whitelist=None):
) )
def utils(opts, whitelist=None):
'''
Returns the utility modules
'''
load = _create_loader(opts, 'utils', 'utils',
ext_type_dirs='utils_dirs')
return LazyLoader(load, whitelist=whitelist)
def pillars(opts, functions): def pillars(opts, functions):
''' '''
Returns the pillars modules Returns the pillars modules
@ -716,9 +725,9 @@ class Loader(object):
mod.__salt__ = functions mod.__salt__ = functions
try: try:
context = sys.modules[ context = sys.modules[
functions[functions.keys()[0]].__module__ functions[functions.iterkeys().next()].__module__
].__context__ ].__context__
except (AttributeError, IndexError): except (AttributeError, StopIteration):
context = {} context = {}
mod.__context__ = context mod.__context__ = context
return funcs return funcs

View File

@ -677,6 +677,11 @@ def __remove_temp_logging_handler():
logging.captureWarnings(True) logging.captureWarnings(True)
# Let's setup a global exception hook handler which will log all exceptions
# Store a reference to the original handler
__GLOBAL_EXCEPTION_HANDLER = sys.excepthook
def __global_logging_exception_handler(exc_type, exc_value, exc_traceback): def __global_logging_exception_handler(exc_type, exc_value, exc_traceback):
''' '''
This function will log all python exceptions. This function will log all python exceptions.
@ -693,7 +698,7 @@ def __global_logging_exception_handler(exc_type, exc_value, exc_traceback):
) )
) )
# Call the original sys.excepthook # Call the original sys.excepthook
sys.__excepthook__(exc_type, exc_value, exc_traceback) __GLOBAL_EXCEPTION_HANDLER(exc_type, exc_value, exc_traceback)
# Set our own exception handler as the one to use # Set our own exception handler as the one to use

View File

@ -89,6 +89,42 @@ class SMaster(object):
return salt.daemons.masterapi.access_keys(self.opts) return salt.daemons.masterapi.access_keys(self.opts)
class Scheduler(multiprocessing.Process):
'''
The master scheduler process.
This runs in its own process so that it can have a fully
independent loop from the Maintenance process.
'''
def __init__(self, opts):
super(Scheduler, self).__init__()
self.opts = opts
# Init Scheduler
self.schedule = salt.utils.schedule.Schedule(self.opts,
salt.loader.runner(self.opts),
returners=salt.loader.returners(self.opts, {}))
def run(self):
salt.utils.appendproctitle('Scheduler')
while True:
self.handle_schedule()
try:
time.sleep(self.schedule.loop_interval)
except KeyboardInterrupt:
break
def handle_schedule(self):
'''
Evaluate the scheduler
'''
try:
self.schedule.eval()
except Exception as exc:
log.error(
'Exception {0} occurred in scheduled job'.format(exc)
)
class Maintenance(multiprocessing.Process): class Maintenance(multiprocessing.Process):
''' '''
A generalized maintenence process which performances maintenence A generalized maintenence process which performances maintenence
@ -104,14 +140,7 @@ class Maintenance(multiprocessing.Process):
self.opts = opts self.opts = opts
# Init fileserver manager # Init fileserver manager
self.fileserver = salt.fileserver.Fileserver(self.opts) self.fileserver = salt.fileserver.Fileserver(self.opts)
# Load Runners # Matcher
self.runners = salt.loader.runner(self.opts)
# Load Returners
self.returners = salt.loader.returners(self.opts, {})
# Init Scheduler
self.schedule = salt.utils.schedule.Schedule(self.opts,
self.runners,
returners=self.returners)
self.ckminions = salt.utils.minions.CkMinions(self.opts) self.ckminions = salt.utils.minions.CkMinions(self.opts)
# Make Event bus for firing # Make Event bus for firing
self.event = salt.utils.event.MasterEvent(self.opts['sock_dir']) self.event = salt.utils.event.MasterEvent(self.opts['sock_dir'])
@ -147,7 +176,6 @@ class Maintenance(multiprocessing.Process):
salt.daemons.masterapi.clean_expired_tokens(self.opts) salt.daemons.masterapi.clean_expired_tokens(self.opts)
self.handle_search(now, last) self.handle_search(now, last)
self.handle_pillargit() self.handle_pillargit()
self.handle_schedule()
self.handle_presence(old_present) self.handle_presence(old_present)
self.handle_key_rotate(now) self.handle_key_rotate(now)
salt.daemons.masterapi.fileserver_update(self.fileserver) salt.daemons.masterapi.fileserver_update(self.fileserver)
@ -194,21 +222,6 @@ class Maintenance(multiprocessing.Process):
log.error('Exception {0} occurred in file server update ' log.error('Exception {0} occurred in file server update '
'for git_pillar module.'.format(exc)) 'for git_pillar module.'.format(exc))
def handle_schedule(self):
'''
Evaluate the scheduler
'''
try:
self.schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if self.schedule.loop_interval < self.loop_interval:
self.loop_interval = self.schedule.loop_interval
except Exception as exc:
log.error(
'Exception {0} occurred in scheduled job'.format(exc)
)
def handle_presence(self, old_present): def handle_presence(self, old_present):
''' '''
Fire presence events if enabled Fire presence events if enabled
@ -302,17 +315,6 @@ class Master(SMaster):
) )
) )
def __handle_error_react(self, event):
log.error('Received minion error from [{minion}]: {data}'.format(minion=event['id'], data=event['data']['exception']))
def __register_reactions(self):
'''
Register any reactions the master will need
'''
log.info('Registering master reactions')
log.info('Registering master error handling')
self.opts['reactor'].append({'_salt_error': self.__handle_error_react})
def _pre_flight(self): def _pre_flight(self):
''' '''
Run pre flight checks. If anything in this method fails then the master Run pre flight checks. If anything in this method fails then the master
@ -320,7 +322,6 @@ class Master(SMaster):
''' '''
errors = [] errors = []
fileserver = salt.fileserver.Fileserver(self.opts) fileserver = salt.fileserver.Fileserver(self.opts)
self.__register_reactions()
if not fileserver.servers: if not fileserver.servers:
errors.append( errors.append(
'Failed to load fileserver backends, the configured backends ' 'Failed to load fileserver backends, the configured backends '
@ -351,6 +352,8 @@ class Master(SMaster):
process_manager = salt.utils.process.ProcessManager() process_manager = salt.utils.process.ProcessManager()
log.info('Creating master maintenance process') log.info('Creating master maintenance process')
process_manager.add_process(Maintenance, args=(self.opts,)) process_manager.add_process(Maintenance, args=(self.opts,))
log.info('Creating master scheduler process')
process_manager.add_process(Scheduler, args=(self.opts,))
log.info('Creating master publisher process') log.info('Creating master publisher process')
process_manager.add_process(Publisher, args=(self.opts,)) process_manager.add_process(Publisher, args=(self.opts,))
log.info('Creating master event publisher process') log.info('Creating master event publisher process')
@ -1138,7 +1141,7 @@ class AESFuncs(object):
return False return False
load['grains']['id'] = load['id'] load['grains']['id'] = load['id']
mods = set() mods = set()
for func in self.mminion.functions.values(): for func in self.mminion.functions.itervalues():
mods.add(func.__module__) mods.add(func.__module__)
for mod in mods: for mod in mods:
sys.modules[mod].__grains__ = load['grains'] sys.modules[mod].__grains__ = load['grains']

View File

@ -66,7 +66,6 @@ import salt.utils.args
import salt.utils.event import salt.utils.event
import salt.utils.minion import salt.utils.minion
import salt.utils.schedule import salt.utils.schedule
import salt.utils.error
import salt.exitcodes import salt.exitcodes
from salt.defaults import DEFAULT_TARGET_DELIM from salt.defaults import DEFAULT_TARGET_DELIM
@ -190,7 +189,7 @@ def load_args_and_kwargs(func, args, data=None):
'by salt.utils.args.parse_input() before calling ' 'by salt.utils.args.parse_input() before calling '
'salt.minion.load_args_and_kwargs().' 'salt.minion.load_args_and_kwargs().'
) )
if argspec.keywords or string_kwarg.keys()[0] in argspec.args: if argspec.keywords or string_kwarg.iterkeys().next() in argspec.args:
# Function supports **kwargs or is a positional argument to # Function supports **kwargs or is a positional argument to
# the function. # the function.
_kwargs.update(string_kwarg) _kwargs.update(string_kwarg)
@ -513,7 +512,12 @@ class MultiMinion(MinionBase):
while True: while True:
package = None package = None
for minion in minions.itervalues():
if isinstance(minion, dict):
minion = minion['minion']
if not hasattr(minion, 'schedule'):
continue
loop_interval = self.process_schedule(minion, loop_interval)
socks = dict(self.poller.poll(1)) socks = dict(self.poller.poll(1))
if socks.get(self.epull_sock) == zmq.POLLIN: if socks.get(self.epull_sock) == zmq.POLLIN:
try: try:
@ -618,7 +622,7 @@ class Minion(MinionBase):
# add default scheduling jobs to the minions scheduler # add default scheduling jobs to the minions scheduler
if 'mine.update' in self.functions: if 'mine.update' in self.functions:
log.info('Added mine.update to schedular') log.info('Added mine.update to scheduler')
self.schedule.add_job({ self.schedule.add_job({
'__mine_interval': '__mine_interval':
{ {
@ -1091,7 +1095,6 @@ class Minion(MinionBase):
except Exception: except Exception:
msg = 'The minion function caused an exception' msg = 'The minion function caused an exception'
log.warning(msg, exc_info_on_loglevel=logging.DEBUG) log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data)
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
ret['out'] = 'nested' ret['out'] = 'nested'
else: else:
@ -1625,10 +1628,6 @@ class Minion(MinionBase):
self.schedule.modify_job(name='__master_alive', self.schedule.modify_job(name='__master_alive',
schedule=schedule) schedule=schedule)
elif package.startswith('_salt_error'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
log.debug('Forwarding salt error event tag={tag}'.format(tag=tag))
self._fire_master(data, tag)
# Main Minion Tune In # Main Minion Tune In
def tune_in(self): def tune_in(self):
@ -1807,7 +1806,7 @@ class Minion(MinionBase):
self._running = False self._running = False
if getattr(self, 'poller', None) is not None: if getattr(self, 'poller', None) is not None:
if isinstance(self.poller.sockets, dict): if isinstance(self.poller.sockets, dict):
for socket in self.poller.sockets.keys(): for socket in self.poller.sockets:
if socket.closed is False: if socket.closed is False:
socket.close() socket.close()
self.poller.unregister(socket) self.poller.unregister(socket)

View File

@ -436,7 +436,7 @@ def config(name, config, edit=True):
''' '''
for entry in config: for entry in config:
key = entry.keys()[0] key = entry.iterkeys().next()
configs = _parse_config(entry[key], key) configs = _parse_config(entry[key], key)
if edit: if edit:
with salt.utils.fopen(name, 'w') as configfile: with salt.utils.fopen(name, 'w') as configfile:

View File

@ -232,7 +232,7 @@ def latest_version(*names, **kwargs):
virtpkgs = _get_virtual() virtpkgs = _get_virtual()
all_virt = set() all_virt = set()
for provides in virtpkgs.values(): for provides in virtpkgs.itervalues():
all_virt.update(provides) all_virt.update(provides)
for name in names: for name in names:
@ -457,7 +457,7 @@ def install(name=None,
refreshdb = False refreshdb = False
for pkg in pkgs: for pkg in pkgs:
if isinstance(pkg, dict): if isinstance(pkg, dict):
_name = pkg.keys()[0] _name = pkg.iterkeys().next()
_latest_version = latest_version(_name, refresh=False, show_installed=True) _latest_version = latest_version(_name, refresh=False, show_installed=True)
_version = pkg[_name] _version = pkg[_name]
# If the versions don't match, refresh is True, otherwise no need to refresh # If the versions don't match, refresh is True, otherwise no need to refresh
@ -1162,8 +1162,7 @@ def get_repo(repo, **kwargs):
ppa_name, dist) ppa_name, dist)
else: else:
if HAS_SOFTWAREPROPERTIES: if HAS_SOFTWAREPROPERTIES:
repo = softwareproperties.ppa.expand_ppa_line( repo = softwareproperties.ppa.PPAShortcutHandler(repo).expand(
repo,
__grains__['lsb_distrib_codename'])[0] __grains__['lsb_distrib_codename'])[0]
else: else:
repo = LP_SRC_FORMAT.format(owner_name, ppa_name, dist) repo = LP_SRC_FORMAT.format(owner_name, ppa_name, dist)
@ -1186,7 +1185,7 @@ def get_repo(repo, **kwargs):
.format(repo) .format(repo)
) )
for source in repos.values(): for source in repos.itervalues():
for sub in source: for sub in source:
if (sub['type'] == repo_type and if (sub['type'] == repo_type and
# strip trailing '/' from repo_uri, it's valid in definition # strip trailing '/' from repo_uri, it's valid in definition
@ -1235,7 +1234,7 @@ def del_repo(repo, **kwargs):
else: else:
repo = LP_SRC_FORMAT.format(owner_name, ppa_name, dist) repo = LP_SRC_FORMAT.format(owner_name, ppa_name, dist)
else: else:
repo = softwareproperties.ppa.expand_ppa_line(repo, dist)[0] repo = softwareproperties.ppa.PPAShortcutHandler(repo).expand(dist)[0]
sources = sourceslist.SourcesList() sources = sourceslist.SourcesList()
repos = [s for s in sources.list if not s.invalid] repos = [s for s in sources.list if not s.invalid]
@ -1626,8 +1625,7 @@ def expand_repo_def(repokwargs):
dist) dist)
else: else:
if HAS_SOFTWAREPROPERTIES: if HAS_SOFTWAREPROPERTIES:
repo = softwareproperties.ppa.expand_ppa_line( repo = softwareproperties.ppa.PPAShortcutHandler(repo).expand(dist)[0]
repo, dist)[0]
else: else:
repo = LP_SRC_FORMAT.format(owner_name, ppa_name, dist) repo = LP_SRC_FORMAT.format(owner_name, ppa_name, dist)
@ -1894,5 +1892,5 @@ def owner(*paths):
if 'no path found' in ret[path].lower(): if 'no path found' in ret[path].lower():
ret[path] = '' ret[path] = ''
if len(ret) == 1: if len(ret) == 1:
return ret.values()[0] return ret.itervalues().next()
return ret return ret

View File

@ -1,13 +1,14 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
''' '''
A module to wrap archive calls A module to wrap (non-Windows) archive calls
.. versionadded:: 2014.1.0 .. versionadded:: 2014.1.0
''' '''
# Import salt libs # Import salt libs
import salt._compat import salt._compat
from salt.utils import which as _which, which_bin as _which_bin from salt.utils import \
which as _which, which_bin as _which_bin, is_windows as _is_windows
import salt.utils.decorators as decorators import salt.utils.decorators as decorators
# TODO: Check that the passed arguments are correct # TODO: Check that the passed arguments are correct
@ -19,6 +20,8 @@ __func_alias__ = {
def __virtual__(): def __virtual__():
if _is_windows():
return False
commands = ('tar', 'gzip', 'gunzip', 'zip', 'unzip', 'rar', 'unrar') commands = ('tar', 'gzip', 'gunzip', 'zip', 'unzip', 'rar', 'unrar')
# If none of the above commands are in $PATH this module is a no-go # If none of the above commands are in $PATH this module is a no-go
if not any(_which(cmd) for cmd in commands): if not any(_which(cmd) for cmd in commands):

View File

@ -1227,18 +1227,16 @@ def describe(vpc_id=None, region=None, key=None, keyid=None, profile=None):
state=None, state=None,
tags=None, tags=None,
dhcp_options_id=None, dhcp_options_id=None,
instance_tenancy=None instance_tenancy=None)
)
if not conn: if not conn:
return False return False
if not vpc_id: if not vpc_id:
raise SaltInvocationError( raise SaltInvocationError('VPC ID needs to be specified.')
'VPC ID needs to be specified.')
try: try:
filter_parameters = {'filters': {'vpc-id': vpc_id}} filter_parameters = {'vpc_ids': vpc_id}
vpcs = conn.get_all_vpcs(**filter_parameters) vpcs = conn.get_all_vpcs(**filter_parameters)

View File

@ -1174,20 +1174,20 @@ def _parse_settings_eth(opts, iface_type, enabled, iface):
iface_data['inet']['ethtool'] = ethtool iface_data['inet']['ethtool'] = ethtool
# return a list of sorted keys to ensure consistent order # return a list of sorted keys to ensure consistent order
iface_data['inet']['ethtool_keys'] = sorted(ethtool.keys()) iface_data['inet']['ethtool_keys'] = sorted(ethtool)
if iface_type == 'bridge': if iface_type == 'bridge':
bridging = _parse_bridge_opts(opts, iface) bridging = _parse_bridge_opts(opts, iface)
if bridging: if bridging:
iface_data['inet']['bridging'] = bridging iface_data['inet']['bridging'] = bridging
iface_data['inet']['bridging_keys'] = sorted(bridging.keys()) iface_data['inet']['bridging_keys'] = sorted(bridging)
elif iface_type == 'bond': elif iface_type == 'bond':
bonding = _parse_settings_bond(opts, iface) bonding = _parse_settings_bond(opts, iface)
if bonding: if bonding:
iface_data['inet']['bonding'] = bonding iface_data['inet']['bonding'] = bonding
iface_data['inet']['bonding']['slaves'] = opts['slaves'] iface_data['inet']['bonding']['slaves'] = opts['slaves']
iface_data['inet']['bonding_keys'] = sorted(bonding.keys()) iface_data['inet']['bonding_keys'] = sorted(bonding)
elif iface_type == 'slave': elif iface_type == 'slave':
adapters[iface]['master'] = opts['master'] adapters[iface]['master'] = opts['master']

View File

@ -91,7 +91,7 @@ def set_config(config_file='/etc/dnsmasq.conf', follow=True, **kwargs):
if filename.endswith('#') and filename.endswith('#'): if filename.endswith('#') and filename.endswith('#'):
continue continue
includes.append('{0}/{1}'.format(dnsopts['conf-dir'], filename)) includes.append('{0}/{1}'.format(dnsopts['conf-dir'], filename))
for key in kwargs.keys(): for key in kwargs:
if key in dnsopts: if key in dnsopts:
if isinstance(dnsopts[key], str): if isinstance(dnsopts[key], str):
for config in includes: for config in includes:

View File

@ -155,7 +155,7 @@ def parse_zone(zonefile=None, zone=None):
line = multi.replace('(', '').replace(')', '') line = multi.replace('(', '').replace(')', '')
else: else:
continue continue
if 'ORIGIN' in zonedict.keys(): if 'ORIGIN' in zonedict:
comps = line.replace('@', zonedict['ORIGIN']).split() comps = line.replace('@', zonedict['ORIGIN']).split()
else: else:
comps = line.split() comps = line.split()
@ -179,7 +179,7 @@ def parse_zone(zonefile=None, zone=None):
if comps[2] == 'NS': if comps[2] == 'NS':
zonedict.setdefault('NS', []).append(comps[3]) zonedict.setdefault('NS', []).append(comps[3])
elif comps[2] == 'MX': elif comps[2] == 'MX':
if 'MX' not in zonedict.keys(): if 'MX' not in zonedict:
zonedict.setdefault('MX', []).append({'priority': comps[3], zonedict.setdefault('MX', []).append({'priority': comps[3],
'host': comps[4]}) 'host': comps[4]})
else: else:

View File

@ -1751,7 +1751,7 @@ def _run_wrapper(status, container, func, cmd, *args, **kwargs):
_invalid(status, id_=container, comment='Container is not running') _invalid(status, id_=container, comment='Container is not running')
return status return status
full_cmd = ('nsenter --target {pid} --mount --uts --ipc --net --pid' full_cmd = ('nsenter --target {pid} --mount --uts --ipc --net --pid'
' {cmd}'.format(pid=container_pid, cmd=cmd)) ' -- {cmd}'.format(pid=container_pid, cmd=cmd))
else: else:
raise NotImplementedError( raise NotImplementedError(
'Unknown docker ExecutionDriver {0!r}. Or didn\'t find command' 'Unknown docker ExecutionDriver {0!r}. Or didn\'t find command'

View File

@ -113,7 +113,7 @@ def file_list(*packages):
'description': ' '.join(comps[3:])} 'description': ' '.join(comps[3:])}
if 'No packages found' in line: if 'No packages found' in line:
errors.append(line) errors.append(line)
for pkg in pkgs.keys(): for pkg in pkgs:
files = [] files = []
cmd = 'dpkg -L {0}'.format(pkg) cmd = 'dpkg -L {0}'.format(pkg)
for line in __salt__['cmd.run'](cmd).splitlines(): for line in __salt__['cmd.run'](cmd).splitlines():
@ -155,7 +155,7 @@ def file_dict(*packages):
'description': ' '.join(comps[3:])} 'description': ' '.join(comps[3:])}
if 'No packages found' in line: if 'No packages found' in line:
errors.append(line) errors.append(line)
for pkg in pkgs.keys(): for pkg in pkgs:
files = [] files = []
cmd = 'dpkg -L {0}'.format(pkg) cmd = 'dpkg -L {0}'.format(pkg)
for line in __salt__['cmd.run'](cmd).splitlines(): for line in __salt__['cmd.run'](cmd).splitlines():

84
salt/modules/drbd.py Normal file
View File

@ -0,0 +1,84 @@
# -*- coding: utf-8 -*-
'''
DRBD administration module
'''
import logging
log = logging.getLogger(__name__)
def overview():
'''
Show status of the DRBD devices
CLI Example:
.. code-block:: bash
salt '*' drbd.overview
'''
cmd = 'drbd-overview'
for line in __salt__['cmd.run'](cmd).splitlines():
ret = {}
fields = line.strip().split()
minnum = fields[0].split(':')[0]
device = fields[0].split(':')[1]
connstate = fields[1]
role = fields[2].split('/')
localrole = role[0]
partnerrole = role[1]
diskstate = fields[3].split('/')
localdiskstate = diskstate[0]
partnerdiskstate = diskstate[1]
if localdiskstate == "UpToDate":
if partnerdiskstate == "UpToDate":
if fields[4]:
mountpoint = fields[4]
fs_mounted = fields[5]
totalsize = fields[6]
usedsize = fields[7]
remainsize = fields[8]
perc = fields[9]
ret = {
'minor number': minnum,
'device': device,
'connection state': connstate,
'local role': localrole,
'partner role': partnerrole,
'local disk state': localdiskstate,
'partner disk state': partnerdiskstate,
'mountpoint': mountpoint,
'fs': fs_mounted,
'total size': totalsize,
'used': usedsize,
'remains': remainsize,
'percent': perc,
}
else:
ret = {
'minor number': minnum,
'device': device,
'connection state': connstate,
'local role': localrole,
'partner role': partnerrole,
'local disk state': localdiskstate,
'partner disk state': partnerdiskstate,
}
else:
syncbar = fields[4]
synced = fields[6]
syncedbytes = fields[7]
sync = synced+syncedbytes
ret = {
'minor number': minnum,
'device': device,
'connection state': connstate,
'local role': localrole,
'partner role': partnerrole,
'local disk state': localdiskstate,
'partner disk state': partnerdiskstate,
'synchronisation: ': syncbar,
'synched': sync,
}
return ret

View File

@ -135,7 +135,7 @@ def setenv(environ, false_unsets=False, clear_all=False, update_minion=False):
return False return False
if clear_all is True: if clear_all is True:
# Unset any keys not defined in 'environ' dict supplied by user # Unset any keys not defined in 'environ' dict supplied by user
to_unset = [key for key in os.environ.keys() if key not in environ] to_unset = [key for key in os.environ if key not in environ]
for key in to_unset: for key in to_unset:
ret[key] = setval(key, False, false_unsets) ret[key] = setval(key, False, false_unsets)
for key, val in environ.items(): for key, val in environ.items():

View File

@ -548,7 +548,7 @@ def check_hash(path, file_hash):
return get_hash(path, hash_form) == hash_value return get_hash(path, hash_form) == hash_value
def find(path, **kwargs): def find(path, *args, **kwargs):
''' '''
Approximate the Unix ``find(1)`` command and return a list of paths that Approximate the Unix ``find(1)`` command and return a list of paths that
meet the specified criteria. meet the specified criteria.
@ -662,6 +662,11 @@ def find(path, **kwargs):
salt '*' file.find /var mtime=+30d size=+10m print=path,size,mtime salt '*' file.find /var mtime=+30d size=+10m print=path,size,mtime
salt '*' file.find /var/log name=\\*.[0-9] mtime=+30d size=+10m delete salt '*' file.find /var/log name=\\*.[0-9] mtime=+30d size=+10m delete
''' '''
if 'delete' in args:
kwargs['delete'] = 'f'
elif 'print' in args:
kwargs['print'] = 'path'
try: try:
finder = salt.utils.find.Finder(kwargs) finder = salt.utils.find.Finder(kwargs)
except ValueError as ex: except ValueError as ex:
@ -2148,7 +2153,7 @@ def access(path, mode):
if mode in modes: if mode in modes:
return os.access(path, modes[mode]) return os.access(path, modes[mode])
elif mode in modes.values(): elif mode in modes.itervalues():
return os.access(path, mode) return os.access(path, mode)
else: else:
raise SaltInvocationError('Invalid mode specified.') raise SaltInvocationError('Invalid mode specified.')
@ -4106,7 +4111,7 @@ def open_files(by_pid=False):
# Then we look at the open files for each PID # Then we look at the open files for each PID
files = {} files = {}
for pid in pids.keys(): for pid in pids:
ppath = '/proc/{0}'.format(pid) ppath = '/proc/{0}'.format(pid)
try: try:
tids = os.listdir('{0}/task'.format(ppath)) tids = os.listdir('{0}/task'.format(ppath))

View File

@ -475,7 +475,7 @@ def file_list(*packages):
''' '''
ret = file_dict(*packages) ret = file_dict(*packages)
files = [] files = []
for pkg_files in ret['files'].values(): for pkg_files in ret['files'].itervalues():
files.extend(pkg_files) files.extend(pkg_files)
ret['files'] = files ret['files'] = files
return ret return ret

View File

@ -249,7 +249,7 @@ def avail_platforms():
salt myminion genesis.avail_platforms salt myminion genesis.avail_platforms
''' '''
ret = {} ret = {}
for platform in CMD_MAP.keys(): for platform in CMD_MAP:
ret[platform] = True ret[platform] = True
for cmd in CMD_MAP[platform]: for cmd in CMD_MAP[platform]:
if not salt.utils.which(cmd): if not salt.utils.which(cmd):

View File

@ -27,7 +27,7 @@ def mount_image(location):
mnt = __salt__['qemu_nbd.init'](location) mnt = __salt__['qemu_nbd.init'](location)
if not mnt: if not mnt:
return '' return ''
first = mnt.keys()[0] first = mnt.iterkeys().next()
__context__['img.mnt_{0}'.format(first)] = mnt __context__['img.mnt_{0}'.format(first)] = mnt
return first return first
return '' return ''
@ -95,4 +95,4 @@ def bootstrap(location, size, fmt):
__salt__['partition.probe'](nbd) __salt__['partition.probe'](nbd)
__salt__['partition.mkfs']('{0}p1'.format(nbd), 'ext4') __salt__['partition.mkfs']('{0}p1'.format(nbd), 'ext4')
mnt = __salt__['qemu_nbd.mount'](nbd) mnt = __salt__['qemu_nbd.mount'](nbd)
#return __salt__['pkg.bootstrap'](nbd, mnt.keys()[0]) #return __salt__['pkg.bootstrap'](nbd, mnt.iterkeys().next())

View File

@ -56,7 +56,7 @@ def running_service_owners(
for service in execs: for service in execs:
if path == execs[service]: if path == execs[service]:
pkg = __salt__['pkg.owner'](path) pkg = __salt__['pkg.owner'](path)
ret[service] = pkg.values()[0] ret[service] = pkg.itervalues().next()
return ret return ret
@ -94,7 +94,7 @@ def enabled_service_owners():
continue continue
start_cmd = data['ExecStart']['path'] start_cmd = data['ExecStart']['path']
pkg = __salt__['pkg.owner'](start_cmd) pkg = __salt__['pkg.owner'](start_cmd)
ret[service] = pkg.values()[0] ret[service] = pkg.itervalues().next()
return ret return ret
@ -131,7 +131,7 @@ def service_highstate(requires=True):
if requires: if requires:
exists = False exists = False
for item in ret[service]['service']: for item in ret[service]['service']:
if isinstance(item, dict) and item.keys()[0] == 'require': if isinstance(item, dict) and item.iterkeys().next() == 'require':
exists = True exists = True
if not exists: if not exists:
ret[service]['service'].append( ret[service]['service'].append(

View File

@ -878,7 +878,7 @@ role_id=ce377245c4ec9b70e1c639c89e8cead4
**connection_args)[user]['id'] **connection_args)[user]['id']
else: else:
user = user_get(user_id, profile=profile, user = user_get(user_id, profile=profile,
**connection_args).keys()[0]['name'] **connection_args).iterkeys().next()['name']
if not user_id: if not user_id:
return {'Error': 'Unable to resolve user id'} return {'Error': 'Unable to resolve user id'}
@ -887,7 +887,7 @@ role_id=ce377245c4ec9b70e1c639c89e8cead4
**connection_args)[tenant]['id'] **connection_args)[tenant]['id']
else: else:
tenant = tenant_get(tenant_id, profile=profile, tenant = tenant_get(tenant_id, profile=profile,
**connection_args).keys()[0]['name'] **connection_args).iterkeys().next()['name']
if not tenant_id: if not tenant_id:
return {'Error': 'Unable to resolve tenant id'} return {'Error': 'Unable to resolve tenant id'}
@ -896,7 +896,7 @@ role_id=ce377245c4ec9b70e1c639c89e8cead4
**connection_args)[role]['id'] **connection_args)[role]['id']
else: else:
role = role_get(role_id, profile=profile, role = role_get(role_id, profile=profile,
**connection_args).keys()[0]['name'] **connection_args).iterkeys().next()['name']
if not role_id: if not role_id:
return {'Error': 'Unable to resolve role id'} return {'Error': 'Unable to resolve role id'}
@ -927,7 +927,7 @@ role_id=ce377245c4ec9b70e1c639c89e8cead4
**connection_args)[user]['id'] **connection_args)[user]['id']
else: else:
user = user_get(user_id, profile=profile, user = user_get(user_id, profile=profile,
**connection_args).keys()[0]['name'] **connection_args).iterkeys().next()['name']
if not user_id: if not user_id:
return {'Error': 'Unable to resolve user id'} return {'Error': 'Unable to resolve user id'}
@ -936,7 +936,7 @@ role_id=ce377245c4ec9b70e1c639c89e8cead4
**connection_args)[tenant]['id'] **connection_args)[tenant]['id']
else: else:
tenant = tenant_get(tenant_id, profile=profile, tenant = tenant_get(tenant_id, profile=profile,
**connection_args).keys()[0]['name'] **connection_args).iterkeys().next()['name']
if not tenant_id: if not tenant_id:
return {'Error': 'Unable to resolve tenant id'} return {'Error': 'Unable to resolve tenant id'}
@ -944,7 +944,7 @@ role_id=ce377245c4ec9b70e1c639c89e8cead4
role_id = role_get(name=role, profile=profile, role_id = role_get(name=role, profile=profile,
**connection_args)[role]['id'] **connection_args)[role]['id']
else: else:
role = role_get(role_id).keys()[0]['name'] role = role_get(role_id).iterkeys().next()['name']
if not role_id: if not role_id:
return {'Error': 'Unable to resolve role id'} return {'Error': 'Unable to resolve role id'}

View File

@ -85,7 +85,7 @@ def _service_by_name(name):
# Match on label # Match on label
return services[name] return services[name]
for service in services.values(): for service in services.itervalues():
if service['file_path'].lower() == name: if service['file_path'].lower() == name:
# Match on full path # Match on full path
return service return service

View File

@ -5,6 +5,7 @@ Support for Linux File Access Control Lists
# Import salt libs # Import salt libs
import salt.utils import salt.utils
from salt.exceptions import CommandExecutionError
# Define the module's virtual name # Define the module's virtual name
__virtualname__ = 'acl' __virtualname__ = 'acl'
@ -35,7 +36,12 @@ def version():
return ret[1].strip() return ret[1].strip()
def getfacl(*args): def _raise_on_no_files(*args):
if len(args) == 0:
raise CommandExecutionError('You need to specify at least one file or directory to work with!')
def getfacl(*args, **kwargs):
''' '''
Return (extremely verbose) map of FACLs on specified file(s) Return (extremely verbose) map of FACLs on specified file(s)
@ -45,9 +51,16 @@ def getfacl(*args):
salt '*' acl.getfacl /tmp/house/kitchen salt '*' acl.getfacl /tmp/house/kitchen
salt '*' acl.getfacl /tmp/house/kitchen /tmp/house/livingroom salt '*' acl.getfacl /tmp/house/kitchen /tmp/house/livingroom
salt '*' acl.getfacl /tmp/house/kitchen /tmp/house/livingroom recursive=True
''' '''
recursive = kwargs.pop('recursive', False)
_raise_on_no_files(*args)
ret = {} ret = {}
cmd = 'getfacl -p' cmd = 'getfacl -p'
if recursive:
cmd += ' -R'
for dentry in args: for dentry in args:
cmd += ' {0}'.format(dentry) cmd += ' {0}'.format(dentry)
out = __salt__['cmd.run'](cmd).splitlines() out = __salt__['cmd.run'](cmd).splitlines()
@ -81,24 +94,24 @@ def getfacl(*args):
del vals['type'] del vals['type']
for entity in ('user', 'group'): for entity in ('user', 'group'):
plural = entity + 's' plural = entity + 's'
if entity in vals.keys(): if entity in vals:
usergroup = vals[entity] usergroup = vals[entity]
del vals[entity] del vals[entity]
if acl_type == 'acl': if acl_type == 'acl':
ret[dentry][plural].append({usergroup: vals}) ret[dentry][plural].append({usergroup: vals})
elif acl_type == 'default': elif acl_type == 'default':
if 'defaults' not in ret[dentry].keys(): if 'defaults' not in ret[dentry]:
ret[dentry]['defaults'] = {} ret[dentry]['defaults'] = {}
if plural not in ret[dentry]['defaults'].keys(): if plural not in ret[dentry]['defaults']:
ret[dentry]['defaults'][plural] = [] ret[dentry]['defaults'][plural] = []
ret[dentry]['defaults'][plural].append({usergroup: vals}) ret[dentry]['defaults'][plural].append({usergroup: vals})
for entity in ('other', 'mask'): for entity in ('other', 'mask'):
if entity in vals.keys(): if entity in vals:
del vals[entity] del vals[entity]
if acl_type == 'acl': if acl_type == 'acl':
ret[dentry][entity] = vals ret[dentry][entity] = vals
elif acl_type == 'default': elif acl_type == 'default':
if 'defaults' not in ret[dentry].keys(): if 'defaults' not in ret[dentry]:
ret[dentry]['defaults'] = {} ret[dentry]['defaults'] = {}
ret[dentry]['defaults'][entity] = vals ret[dentry]['defaults'][entity] = vals
return ret return ret
@ -147,7 +160,7 @@ def _parse_acl(acl, user, group):
return vals return vals
def wipefacls(*args): def wipefacls(*args, **kwargs):
''' '''
Remove all FACLs from the specified file(s) Remove all FACLs from the specified file(s)
@ -157,15 +170,21 @@ def wipefacls(*args):
salt '*' acl.wipefacls /tmp/house/kitchen salt '*' acl.wipefacls /tmp/house/kitchen
salt '*' acl.wipefacls /tmp/house/kitchen /tmp/house/livingroom salt '*' acl.wipefacls /tmp/house/kitchen /tmp/house/livingroom
salt '*' acl.wipefacls /tmp/house/kitchen /tmp/house/livingroom recursive=True
''' '''
recursive = kwargs.pop('recursive', False)
_raise_on_no_files(*args)
cmd = 'setfacl -b' cmd = 'setfacl -b'
if recursive:
cmd += ' -R'
for dentry in args: for dentry in args:
cmd += ' {0}'.format(dentry) cmd += ' {0}'.format(dentry)
__salt__['cmd.run'](cmd) __salt__['cmd.run'](cmd)
return True return True
def modfacl(acl_type, acl_name, perms, *args): def modfacl(acl_type, acl_name, perms, *args, **kwargs):
''' '''
Add or modify a FACL for the specified file(s) Add or modify a FACL for the specified file(s)
@ -177,8 +196,15 @@ def modfacl(acl_type, acl_name, perms, *args):
salt '*' acl.modfacl default:group mygroup rx /tmp/house/kitchen salt '*' acl.modfacl default:group mygroup rx /tmp/house/kitchen
salt '*' acl.modfacl d:u myuser 7 /tmp/house/kitchen salt '*' acl.modfacl d:u myuser 7 /tmp/house/kitchen
salt '*' acl.modfacl g mygroup 0 /tmp/house/kitchen /tmp/house/livingroom salt '*' acl.modfacl g mygroup 0 /tmp/house/kitchen /tmp/house/livingroom
salt '*' acl.modfacl user myuser rwx /tmp/house/kitchen recursive=True
''' '''
recursive = kwargs.pop('recursive', False)
_raise_on_no_files(*args)
cmd = 'setfacl -m' cmd = 'setfacl -m'
if recursive:
cmd += ' -R'
prefix = '' prefix = ''
if acl_type.startswith('d'): if acl_type.startswith('d'):
@ -197,7 +223,7 @@ def modfacl(acl_type, acl_name, perms, *args):
return True return True
def delfacl(acl_type, acl_name, *args): def delfacl(acl_type, acl_name, *args, **kwargs):
''' '''
Remove specific FACL from the specified file(s) Remove specific FACL from the specified file(s)
@ -209,8 +235,15 @@ def delfacl(acl_type, acl_name, *args):
salt '*' acl.delfacl default:group mygroup /tmp/house/kitchen salt '*' acl.delfacl default:group mygroup /tmp/house/kitchen
salt '*' acl.delfacl d:u myuser /tmp/house/kitchen salt '*' acl.delfacl d:u myuser /tmp/house/kitchen
salt '*' acl.delfacl g myuser /tmp/house/kitchen /tmp/house/livingroom salt '*' acl.delfacl g myuser /tmp/house/kitchen /tmp/house/livingroom
salt '*' acl.delfacl user myuser /tmp/house/kitchen recursive=True
''' '''
recursive = kwargs.pop('recursive', False)
_raise_on_no_files(*args)
cmd = 'setfacl -x' cmd = 'setfacl -x'
if recursive:
cmd += ' -R'
prefix = '' prefix = ''
if acl_type.startswith('d'): if acl_type.startswith('d'):

View File

@ -200,7 +200,7 @@ def pvcreate(devices, **kwargs):
'pvmetadatacopies', 'metadatacopies', 'metadataignore', 'pvmetadatacopies', 'metadatacopies', 'metadataignore',
'restorefile', 'norestorefile', 'labelsector', 'restorefile', 'norestorefile', 'labelsector',
'setphysicalvolumesize') 'setphysicalvolumesize')
for var in kwargs.keys(): for var in kwargs:
if kwargs[var] and var in valid: if kwargs[var] and var in valid:
cmd.append('--{0}'.format(var)) cmd.append('--{0}'.format(var))
cmd.append(kwargs[var]) cmd.append(kwargs[var])
@ -246,7 +246,7 @@ def vgcreate(vgname, devices, **kwargs):
cmd.append(device) cmd.append(device)
valid = ('clustered', 'maxlogicalvolumes', 'maxphysicalvolumes', valid = ('clustered', 'maxlogicalvolumes', 'maxphysicalvolumes',
'vgmetadatacopies', 'metadatacopies', 'physicalextentsize') 'vgmetadatacopies', 'metadatacopies', 'physicalextentsize')
for var in kwargs.keys(): for var in kwargs:
if kwargs[var] and var in valid: if kwargs[var] and var in valid:
cmd.append('--{0}'.format(var)) cmd.append('--{0}'.format(var))
cmd.append(kwargs[var]) cmd.append(kwargs[var])

View File

@ -141,15 +141,16 @@ def assign(name, value):
cmd = 'sysctl -w {0}="{1}"'.format(name, value) cmd = 'sysctl -w {0}="{1}"'.format(name, value)
data = __salt__['cmd.run_all'](cmd) data = __salt__['cmd.run_all'](cmd)
out = data['stdout'] out = data['stdout']
err = data['stderr']
# Example: # Example:
# # sysctl -w net.ipv4.tcp_rmem="4096 87380 16777216" # # sysctl -w net.ipv4.tcp_rmem="4096 87380 16777216"
# net.ipv4.tcp_rmem = 4096 87380 16777216 # net.ipv4.tcp_rmem = 4096 87380 16777216
regex = re.compile(r'^{0}\s+=\s+{1}$'.format(re.escape(name), re.escape(value))) regex = re.compile(r'^{0}\s+=\s+{1}$'.format(re.escape(name), re.escape(value)))
if not regex.match(out): if not regex.match(out) or 'Invalid argument' in str(err):
if data['retcode'] != 0 and data['stderr']: if data['retcode'] != 0 and err:
error = data['stderr'] error = err
else: else:
error = out error = out
raise CommandExecutionError('sysctl -w failed: {0}'.format(error)) raise CommandExecutionError('sysctl -w failed: {0}'.format(error))

View File

@ -466,7 +466,7 @@ def _get_network_conf(conf_tuples=None, **kwargs):
new[iface]['lxc.network.hwaddr'] = omac new[iface]['lxc.network.hwaddr'] = omac
ret = [] ret = []
for v in new.values(): for v in new.itervalues():
for row in v: for row in v:
ret.append({row: v[row]}) ret.append({row: v[row]})
return ret return ret
@ -2332,7 +2332,7 @@ def write_conf(conf_file, conf):
if isinstance(line, str): if isinstance(line, str):
fp_.write(line) fp_.write(line)
elif isinstance(line, dict): elif isinstance(line, dict):
key = line.keys()[0] key = line.iterkeys().next()
out_line = None out_line = None
if isinstance(line[key], str): if isinstance(line[key], str):
out_line = ' = '.join((key, line[key])) out_line = ' = '.join((key, line[key]))
@ -2378,7 +2378,7 @@ def edit_conf(conf_file, out_format='simple', **kwargs):
data.append(line) data.append(line)
continue continue
else: else:
key = line.keys()[0] key = line.iterkeys().next()
if key not in kwargs: if key not in kwargs:
data.append(line) data.append(line)
continue continue

View File

@ -315,7 +315,7 @@ def filter_by(lookup, expr_form='compound', minion_id=None):
expr_funcs = dict(inspect.getmembers(sys.modules[__name__], expr_funcs = dict(inspect.getmembers(sys.modules[__name__],
predicate=inspect.isfunction)) predicate=inspect.isfunction))
for key in lookup.keys(): for key in lookup:
if minion_id and expr_funcs[expr_form](key, minion_id): if minion_id and expr_funcs[expr_form](key, minion_id):
return lookup[key] return lookup[key]
elif expr_funcs[expr_form](key, minion_id): elif expr_funcs[expr_form](key, minion_id):

View File

@ -6,6 +6,8 @@ Module for Management of Memcached Keys
.. versionadded:: 2014.1.0 .. versionadded:: 2014.1.0
''' '''
# TODO: use salt.utils.memcache
# Import python libs # Import python libs
import logging import logging

View File

@ -571,7 +571,7 @@ def query(database, query, **connection_args):
# into Python objects. It leaves them as strings. # into Python objects. It leaves them as strings.
orig_conv = MySQLdb.converters.conversions orig_conv = MySQLdb.converters.conversions
conv_iter = iter(orig_conv) conv_iter = iter(orig_conv)
conv = dict(zip(conv_iter, [str] * len(orig_conv.keys()))) conv = dict(zip(conv_iter, [str] * len(orig_conv)))
# some converters are lists, do not break theses # some converters are lists, do not break theses
conv[FIELD_TYPE.BLOB] = [ conv[FIELD_TYPE.BLOB] = [
(FLAG.BINARY, str), (FLAG.BINARY, str),
@ -1461,7 +1461,7 @@ def __ssl_option_sanitize(ssl_option):
# Like most other "salt dsl" YAML structures, ssl_option is a list of single-element dicts # Like most other "salt dsl" YAML structures, ssl_option is a list of single-element dicts
for opt in ssl_option: for opt in ssl_option:
key = opt.keys()[0] key = opt.iterkeys().next()
value = opt[key] value = opt[key]
normal_key = key.strip().upper() normal_key = key.strip().upper()

View File

@ -63,7 +63,7 @@ def _execute_pillar(pillar_name, run_type):
#Check if is a dict to get the arguments #Check if is a dict to get the arguments
#in command if not set the arguments to empty string #in command if not set the arguments to empty string
if isinstance(command, dict): if isinstance(command, dict):
plugin = command.keys()[0] plugin = command.iterkeys().next()
args = command[plugin] args = command[plugin]
else: else:
plugin = command plugin = command
@ -165,7 +165,7 @@ def retcode_pillar(pillar_name):
#Check if is a dict to get the arguments #Check if is a dict to get the arguments
#in command if not set the arguments to empty string #in command if not set the arguments to empty string
if isinstance(command, dict): if isinstance(command, dict):
plugin = command.keys()[0] plugin = command.iterkeys().next()
args = command[plugin] args = command[plugin]
else: else:
plugin = command plugin = command

View File

@ -262,6 +262,137 @@ def _netstat_bsd():
return ret return ret
def _netstat_route_linux():
'''
Return netstat routing information for Linux distros
'''
ret = []
cmd = 'netstat -A inet -rn | tail -n+3'
out = __salt__['cmd.run'](cmd)
for line in out.splitlines():
comps = line.split()
ret.append({
'addr_family': 'inet',
'destination': comps[0],
'gateway': comps[1],
'netmask': comps[2],
'flags': comps[3],
'interface': comps[7]})
cmd = 'netstat -A inet6 -rn | tail -n+3'
out = __salt__['cmd.run'](cmd)
for line in out.splitlines():
comps = line.split()
if len(comps) == 6:
ret.append({
'addr_family': 'inet6',
'destination': comps[0],
'gateway': comps[1],
'netmask': '',
'flags': comps[3],
'interface': comps[5]})
elif len(comps) == 7:
ret.append({
'addr_family': 'inet6',
'destination': comps[0],
'gateway': comps[1],
'netmask': '',
'flags': comps[3],
'interface': comps[6]})
else:
continue
return ret
def _netstat_route_freebsd():
'''
Return netstat routing information for FreeBSD and OS X
'''
ret = []
cmd = 'netstat -f inet -rn | tail -n+5'
out = __salt__['cmd.run'](cmd)
for line in out.splitlines():
comps = line.split()
ret.append({
'addr_family': 'inet',
'destination': comps[0],
'gateway': comps[1],
'netmask': comps[2],
'flags': comps[3],
'interface': comps[5]})
cmd = 'netstat -f inet6 -rn | tail -n+5'
out = __salt__['cmd.run'](cmd)
for line in out.splitlines():
comps = line.split()
ret.append({
'addr_family': 'inet6',
'destination': comps[0],
'gateway': comps[1],
'netmask': '',
'flags': comps[2],
'interface': comps[3]})
return ret
def _netstat_route_netbsd():
'''
Return netstat routing information for NetBSD
'''
ret = []
cmd = 'netstat -f inet -rn | tail -n+5'
out = __salt__['cmd.run'](cmd)
for line in out.splitlines():
comps = line.split()
ret.append({
'addr_family': 'inet',
'destination': comps[0],
'gateway': comps[1],
'netmask': '',
'flags': comps[3],
'interface': comps[6]})
cmd = 'netstat -f inet6 -rn | tail -n+5'
out = __salt__['cmd.run'](cmd)
for line in out.splitlines():
comps = line.split()
ret.append({
'addr_family': 'inet6',
'destination': comps[0],
'gateway': comps[1],
'netmask': '',
'flags': comps[3],
'interface': comps[6]})
return ret
def _netstat_route_openbsd():
'''
Return netstat routing information for OpenBSD
'''
ret = []
cmd = 'netstat -f inet -rn | tail -n+5'
out = __salt__['cmd.run'](cmd)
for line in out.splitlines():
comps = line.split()
ret.append({
'addr_family': 'inet',
'destination': comps[0],
'gateway': comps[1],
'netmask': '',
'flags': comps[2],
'interface': comps[7]})
cmd = 'netstat -f inet6 -rn | tail -n+5'
out = __salt__['cmd.run'](cmd)
for line in out.splitlines():
comps = line.split()
ret.append({
'addr_family': 'inet6',
'destination': comps[0],
'gateway': comps[1],
'netmask': '',
'flags': comps[2],
'interface': comps[7]})
return ret
def netstat(): def netstat():
''' '''
Return information on open ports and states Return information on open ports and states
@ -854,3 +985,71 @@ def mod_bufsize(iface, *args, **kwargs):
return _mod_bufsize_linux(iface, *args, **kwargs) return _mod_bufsize_linux(iface, *args, **kwargs)
return False return False
def routes(family=None):
'''
Return currently configured routes from routing table
CLI Example::
salt '*' network.routes
'''
if family != 'inet' and family != 'inet6' and family is not None:
raise CommandExecutionError('Invalid address family {0}'.format(family))
if __grains__['kernel'] == 'Linux':
routes = _netstat_route_linux()
elif __grains__['os'] in ['FreeBSD', 'MacOS', 'Darwin']:
routes = _netstat_route_freebsd()
elif __grains__['os'] in ['NetBSD']:
routes = _netstat_route_netbsd()
elif __grains__['os'] in ['OpenBSD']:
routes = _netstat_route_openbsd()
else:
raise CommandExecutionError('Not yet supported on this platform')
if not family:
return routes
else:
ret = []
for route in routes:
if route['addr_family'] == family:
ret.append(route)
return ret
def default_route(family=None):
'''
Return default route(s) from routing table
CLI Example::
salt '*' network.default_route
'''
if family != 'inet' and family != 'inet6' and family is not None:
raise CommandExecutionError('Invalid address family {0}'.format(family))
_routes = routes()
default_route = {}
if __grains__['kernel'] == 'Linux':
default_route['inet'] = ['0.0.0.0', 'default']
default_route['inet6'] = ['::/0', 'default']
elif __grains__['os'] in ['FreeBSD', 'NetBSD', 'OpenBSD', 'MacOS', 'Darwin']:
default_route['inet'] = ['default']
default_route['inet6'] = ['default']
else:
raise CommandExecutionError('Not yet supported on this platform')
ret = []
for route in _routes:
if family:
if route['destination'] in default_route[family]:
ret.append(route)
else:
if route['destination'] in default_route['inet'] or \
route['destination'] in default_route['inet6']:
ret.append(route)
return ret

View File

@ -590,5 +590,5 @@ def owner(*paths):
for path in paths: for path in paths:
ret[path] = __salt__['cmd.run_stdout'](cmd.format(path)) ret[path] = __salt__['cmd.run_stdout'](cmd.format(path))
if len(ret) == 1: if len(ret) == 1:
return ret.values()[0] return ret.itervalues().next()
return ret return ret

View File

@ -163,7 +163,7 @@ def version(*names, **kwargs):
for name in names: for name in names:
if '*' in name: if '*' in name:
pkg_glob = True pkg_glob = True
for match in fnmatch.filter(pkgs.keys(), name): for match in fnmatch.filter(pkgs, name):
ret[match] = pkgs.get(match, []) ret[match] = pkgs.get(match, [])
else: else:
ret[name] = pkgs.get(name, []) ret[name] = pkgs.get(name, [])
@ -173,8 +173,8 @@ def version(*names, **kwargs):
# return dict # return dict
if len(ret) == 1 and not pkg_glob: if len(ret) == 1 and not pkg_glob:
try: try:
return ret.values()[0] return ret.itervalues().next()
except IndexError: except StopIteration:
return '' return ''
return ret return ret
@ -210,7 +210,7 @@ def sort_pkglist(pkgs):
# It doesn't matter that ['4.9','4.10'] would be sorted to ['4.10','4.9'], # It doesn't matter that ['4.9','4.10'] would be sorted to ['4.10','4.9'],
# so long as the sorting is consistent. # so long as the sorting is consistent.
try: try:
for key in pkgs.keys(): for key in pkgs:
# Passing the pkglist to set() also removes duplicate version # Passing the pkglist to set() also removes duplicate version
# numbers (if present). # numbers (if present).
pkgs[key] = sorted(set(pkgs[key])) pkgs[key] = sorted(set(pkgs[key]))
@ -230,7 +230,7 @@ def stringify(pkgs):
salt '*' pkg_resource.stringify 'vim: 7.127' salt '*' pkg_resource.stringify 'vim: 7.127'
''' '''
try: try:
for key in pkgs.keys(): for key in pkgs:
pkgs[key] = ','.join(pkgs[key]) pkgs[key] = ','.join(pkgs[key])
except AttributeError as e: except AttributeError as e:
log.exception(e) log.exception(e)

View File

@ -508,7 +508,7 @@ def file_list(package):
''' '''
ret = file_dict(package) ret = file_dict(package)
files = [] files = []
for pkg_files in ret['files'].values(): for pkg_files in ret['files'].itervalues():
files.extend(pkg_files) files.extend(pkg_files)
ret['files'] = files ret['files'] = files
return ret return ret

View File

@ -152,7 +152,7 @@ def add_user(name, password=None, runas=None):
# Now, Clear the random password from the account, if necessary # Now, Clear the random password from the account, if necessary
res2 = clear_password(name, runas) res2 = clear_password(name, runas)
if 'Error' in res2.keys(): if 'Error' in res2:
# Clearing the password failed. We should try to cleanup # Clearing the password failed. We should try to cleanup
# and rerun and error. # and rerun and error.
delete_user(name, runas) delete_user(name, runas)

View File

@ -100,7 +100,7 @@ def verify(*package, **kwargs):
fname = line[13:] fname = line[13:]
if line[11:12] in ftypes: if line[11:12] in ftypes:
fdict['type'] = ftypes[line[11:12]] fdict['type'] = ftypes[line[11:12]]
if 'type' not in fdict.keys() or fdict['type'] not in ignore_types: if 'type' not in fdict or fdict['type'] not in ignore_types:
if line[0:1] == 'S': if line[0:1] == 'S':
fdict['mismatch'].append('size') fdict['mismatch'].append('size')
if line[1:2] == 'M': if line[1:2] == 'M':
@ -175,7 +175,7 @@ def file_dict(*packages):
continue continue
comps = line.split() comps = line.split()
pkgs[comps[0]] = {'version': comps[1]} pkgs[comps[0]] = {'version': comps[1]}
for pkg in pkgs.keys(): for pkg in pkgs:
files = [] files = []
cmd = 'rpm -ql {0}'.format(pkg) cmd = 'rpm -ql {0}'.format(pkg)
out = __salt__['cmd.run'](cmd, output_loglevel='trace') out = __salt__['cmd.run'](cmd, output_loglevel='trace')

View File

@ -62,7 +62,7 @@ def list_(show_all=False, return_yaml=True):
if 'schedule' in __pillar__: if 'schedule' in __pillar__:
schedule.update(__pillar__['schedule']) schedule.update(__pillar__['schedule'])
for job in schedule.keys(): for job in schedule:
if job == 'enabled': if job == 'enabled':
continue continue
@ -72,7 +72,7 @@ def list_(show_all=False, return_yaml=True):
del schedule[job] del schedule[job]
continue continue
for item in schedule[job].keys(): for item in schedule[job]:
if item not in SCHEDULE_CONF: if item not in SCHEDULE_CONF:
del schedule[job][item] del schedule[job][item]
continue continue
@ -81,7 +81,7 @@ def list_(show_all=False, return_yaml=True):
if schedule[job][item] == 'false': if schedule[job][item] == 'false':
schedule[job][item] = False schedule[job][item] = False
if '_seconds' in schedule[job].keys(): if '_seconds' in schedule[job]:
schedule[job]['seconds'] = schedule[job]['_seconds'] schedule[job]['seconds'] = schedule[job]['_seconds']
del schedule[job]['_seconds'] del schedule[job]['_seconds']
@ -114,7 +114,7 @@ def purge(**kwargs):
if 'schedule' in __pillar__: if 'schedule' in __pillar__:
schedule.update(__pillar__['schedule']) schedule.update(__pillar__['schedule'])
for name in schedule.keys(): for name in schedule:
if name == 'enabled': if name == 'enabled':
continue continue
if name.startswith('__'): if name.startswith('__'):

42
salt/modules/sdb.py Normal file
View File

@ -0,0 +1,42 @@
# -*- coding: utf-8 -*-
'''
Module for Manipulating Data via the Salt DB API
================================================
'''
# Import salt libs
import salt.utils.sdb
__func_alias__ = {
'set_': 'set',
}
def get(uri):
'''
Get a value from a db, using a uri in the form of sdb://<profile>/<key>. If
the uri provided does not start with sdb://, then it will be returned as-is.
CLI Example:
.. code-block:: bash
salt '*' sdb.get sdb://mymemcached/foo
'''
return salt.utils.sdb.sdb_get(uri, __opts__)
def set_(uri, value):
'''
Set a value in a db, using a uri in the form of ``sdb://<profile>/<key>``.
If the uri provided does not start with ``sdb://`` or the value is not
succesfully set, return ``False``.
CLI Example:
.. code-block:: bash
salt '*' sdb.set sdb://mymemcached/foo bar
'''
return salt.utils.sdb.sdb_set(uri, value, __opts__)

View File

@ -70,14 +70,14 @@ def getenforce():
salt '*' selinux.getenforce salt '*' selinux.getenforce
''' '''
enforce = os.path.join(selinux_fs_path(), 'enforce')
try: try:
enforce = os.path.join(selinux_fs_path(), 'enforce')
with salt.utils.fopen(enforce, 'r') as _fp: with salt.utils.fopen(enforce, 'r') as _fp:
if _fp.readline().strip() == '0': if _fp.readline().strip() == '0':
return 'Permissive' return 'Permissive'
else: else:
return 'Enforcing' return 'Enforcing'
except (IOError, OSError) as exc: except (IOError, OSError, AttributeError) as exc:
msg = 'Could not read SELinux enforce file: {0}' msg = 'Could not read SELinux enforce file: {0}'
raise CommandExecutionError(msg.format(str(exc))) raise CommandExecutionError(msg.format(str(exc)))

View File

@ -36,6 +36,9 @@ __outputter__ = {
'template_str': 'highstate', 'template_str': 'highstate',
} }
__func_alias__ = {
'apply_': 'apply'
}
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@ -245,6 +248,153 @@ def template_str(tem, queue=False, **kwargs):
return ret return ret
def apply_(mods=None,
**kwargs):
'''
Apply states! This function will call highstate or state.sls based on the
arguments passed in, state.apply is intended to be the main gateway for
all state executions.
CLI Example:
.. code-block:: bash
salt '*' state.apply
salt '*' state.apply test
salt '*' state.apply test,pkgs
'''
if mods:
return sls(mods, **kwargs)
return highstate(**kwargs)
def request(mods=None,
**kwargs):
'''
Request that the local admin execute a state run via
`salt-callstate.apply_request`
All arguments match state.apply
CLI Example:
.. code-block:: bash
salt '*' state.request
salt '*' state.request test
salt '*' state.request test,pkgs
'''
kwargs['test'] = True
ret = apply_(mods, **kwargs)
notify_path = os.path.join(__opts__['cachedir'], 'req_state.p')
serial = salt.payload.Serial(__opts__)
req = check_request()
req.update({kwargs.get('name', 'default'): {
'test_run': ret,
'mods': mods,
'kwargs': kwargs
}
})
cumask = os.umask(077)
try:
if salt.utils.is_windows():
# Make sure cache file isn't read-only
__salt__['cmd.run']('attrib -R "{0}"'.format(notify_path))
with salt.utils.fopen(notify_path, 'w+b') as fp_:
serial.dump(req, fp_)
except (IOError, OSError):
msg = 'Unable to write state request file {0}. Check permission.'
log.error(msg.format(notify_path))
os.umask(cumask)
return ret
def check_request(name=None):
'''
Return the state request information, if any
CLI Example:
.. code-block:: bash
salt '*' state.check_request
'''
notify_path = os.path.join(__opts__['cachedir'], 'req_state.p')
serial = salt.payload.Serial(__opts__)
if os.path.isfile(notify_path):
with open(notify_path, 'rb') as fp_:
req = serial.load(fp_)
if name:
return req[name]
return req
return {}
def clear_request(name=None):
'''
Clear out the state execution request without executing it
CLI Example:
.. code-block:: bash
salt '*' state.clear_request
'''
notify_path = os.path.join(__opts__['cachedir'], 'req_state.p')
serial = salt.payload.Serial(__opts__)
if not os.path.isfile(notify_path):
return True
if not name:
try:
os.remove(notify_path)
except (IOError, OSError):
pass
else:
req = check_request()
if name in req:
req.pop(name)
else:
return False
cumask = os.umask(077)
try:
if salt.utils.is_windows():
# Make sure cache file isn't read-only
__salt__['cmd.run']('attrib -R "{0}"'.format(notify_path))
with salt.utils.fopen(notify_path, 'w+b') as fp_:
serial.dump(req, fp_)
except (IOError, OSError):
msg = 'Unable to write state request file {0}. Check permission.'
log.error(msg.format(notify_path))
os.umask(cumask)
return True
def run_request(name='default', **kwargs):
'''
Execute the pending state request
CLI Example:
.. code-block:: bash
salt '*' state.run_request
'''
req = check_request()
if name not in req:
return {}
n_req = req[name]
if 'mods' not in n_req or 'kwargs' not in n_req:
return {}
req['kwargs'].update(kwargs)
if req:
ret = apply_(n_req['mods'], **n_req['kwargs'])
try:
os.remove(os.path.join(__opts__['cachedir'], 'req_state.p'))
except (IOError, OSError):
pass
return ret
return {}
def highstate(test=None, def highstate(test=None,
queue=False, queue=False,
**kwargs): **kwargs):

View File

@ -235,7 +235,7 @@ def returner_doc(*args):
returners_ = salt.loader.returners(__opts__, []) returners_ = salt.loader.returners(__opts__, [])
docs = {} docs = {}
if not args: if not args:
for fun in returners_.keys(): for fun in returners_:
docs[fun] = returners_[fun].__doc__ docs[fun] = returners_[fun].__doc__
return _strip_rst(docs) return _strip_rst(docs)
@ -251,7 +251,8 @@ def returner_doc(*args):
else: else:
target_mod = '' target_mod = ''
if _use_fnmatch: if _use_fnmatch:
for fun in fnmatch.filter(returners_.keys(), target_mod): for fun in returners_:
if fun == module or fun.startswith(target_mod):
docs[fun] = returners_[fun].__doc__ docs[fun] = returners_[fun].__doc__
else: else:
for fun in returners_.keys(): for fun in returners_.keys():
@ -747,7 +748,7 @@ def list_returner_functions(*args, **kwargs):
returners_ = salt.loader.returners(__opts__, []) returners_ = salt.loader.returners(__opts__, [])
if not args: if not args:
# We're being asked for all functions # We're being asked for all functions
return sorted(returners_.keys()) return sorted(returners_)
names = set() names = set()
for module in args: for module in args:
@ -760,7 +761,8 @@ def list_returner_functions(*args, **kwargs):
# sysctl # sysctl
module = module + '.' if not module.endswith('.') else module module = module + '.' if not module.endswith('.') else module
if _use_fnmatch: if _use_fnmatch:
for func in fnmatch.filter(returners_.keys(), target_mod): for func in returners_:
if func.startswith(module):
names.add(func) names.add(func)
else: else:
for func in returners_.keys(): for func in returners_.keys():

View File

@ -447,7 +447,3 @@ def tty(*args, **kwargs): # pylint: disable=W0613
salt '*' test.tty pts3 'This is a test' salt '*' test.tty pts3 'This is a test'
''' '''
return 'ERROR: This function has been moved to cmd.tty' return 'ERROR: This function has been moved to cmd.tty'
def assertion(assertion_):
assert assertion_

View File

@ -83,7 +83,7 @@ def _find_utmp():
result[os.stat(utmp).st_mtime] = utmp result[os.stat(utmp).st_mtime] = utmp
except Exception: except Exception:
pass pass
return result[sorted(result.keys()).pop()] return result[sorted(result).pop()]
def _default_runlevel(): def _default_runlevel():

View File

@ -461,7 +461,7 @@ def _nic_profile(profile_name, hypervisor, **kwargs):
elif isinstance(config_data, list): elif isinstance(config_data, list):
for interface in config_data: for interface in config_data:
if isinstance(interface, dict): if isinstance(interface, dict):
if len(interface.keys()) == 1: if len(interface) == 1:
append_dict_profile_to_interface_list(interface) append_dict_profile_to_interface_list(interface)
else: else:
interfaces.append(interface) interfaces.append(interface)
@ -551,7 +551,7 @@ def init(name,
# When using a disk profile extract the sole dict key of the first # When using a disk profile extract the sole dict key of the first
# array element as the filename for disk # array element as the filename for disk
disk_name = diskp[0].keys()[0] disk_name = diskp[0].iterkeys().next()
disk_type = diskp[0][disk_name]['format'] disk_type = diskp[0][disk_name]['format']
disk_file_name = '{0}.{1}'.format(disk_name, disk_type) disk_file_name = '{0}.{1}'.format(disk_name, disk_type)
@ -797,7 +797,7 @@ def get_nics(vm_):
# driver, source, and match can all have optional attributes # driver, source, and match can all have optional attributes
if re.match('(driver|source|address)', v_node.tagName): if re.match('(driver|source|address)', v_node.tagName):
temp = {} temp = {}
for key in v_node.attributes.keys(): for key in v_node.attributes:
temp[key] = v_node.getAttribute(key) temp[key] = v_node.getAttribute(key)
nic[str(v_node.tagName)] = temp nic[str(v_node.tagName)] = temp
# virtualport needs to be handled separately, to pick up the # virtualport needs to be handled separately, to pick up the
@ -805,7 +805,7 @@ def get_nics(vm_):
if v_node.tagName == 'virtualport': if v_node.tagName == 'virtualport':
temp = {} temp = {}
temp['type'] = v_node.getAttribute('type') temp['type'] = v_node.getAttribute('type')
for key in v_node.attributes.keys(): for key in v_node.attributes:
temp[key] = v_node.getAttribute(key) temp[key] = v_node.getAttribute(key)
nic['virtualport'] = temp nic['virtualport'] = temp
if 'mac' not in nic: if 'mac' not in nic:
@ -855,7 +855,7 @@ def get_graphics(vm_):
for node in doc.getElementsByTagName('domain'): for node in doc.getElementsByTagName('domain'):
g_nodes = node.getElementsByTagName('graphics') g_nodes = node.getElementsByTagName('graphics')
for g_node in g_nodes: for g_node in g_nodes:
for key in g_node.attributes.keys(): for key in g_node.attributes:
out[key] = g_node.getAttribute(key) out[key] = g_node.getAttribute(key)
return out return out
@ -1685,7 +1685,7 @@ def vm_netstats(vm_=None):
'tx_errs': 0, 'tx_errs': 0,
'tx_drop': 0 'tx_drop': 0
} }
for attrs in nics.values(): for attrs in nics.itervalues():
if 'target' in attrs: if 'target' in attrs:
dev = attrs['target'] dev = attrs['target']
stats = dom.interfaceStats(dev) stats = dom.interfaceStats(dev)

View File

@ -6,6 +6,15 @@ Manage groups on Windows
# Import salt libs # Import salt libs
import salt.utils import salt.utils
try:
import win32com.client
import pythoncom
import pywintypes
HAS_DEPENDENCIES = True
except ImportError:
HAS_DEPENDENCIES = False
# Define the module's virtual name # Define the module's virtual name
__virtualname__ = 'group' __virtualname__ = 'group'
@ -14,7 +23,10 @@ def __virtual__():
''' '''
Set the group module if the kernel is Windows Set the group module if the kernel is Windows
''' '''
return __virtualname__ if salt.utils.is_windows() else False if salt.utils.is_windows() and HAS_DEPENDENCIES:
return __virtualname__
else:
return False
def add(name, gid=None, system=False): def add(name, gid=None, system=False):
@ -27,11 +39,35 @@ def add(name, gid=None, system=False):
salt '*' group.add foo salt '*' group.add foo
''' '''
cmd = 'net localgroup {0} /add'.format(name) ret = {'name': name,
'result': True,
'changes': [],
'comment': ''}
ret = __salt__['cmd.run_all'](cmd) if not info(name):
pythoncom.CoInitialize()
nt = win32com.client.Dispatch('AdsNameSpaces')
try:
compObj = nt.GetObject('', 'WinNT://.,computer')
newGroup = compObj.Create('group', name)
newGroup.SetInfo()
ret['changes'].append((
'Successfully created group {0}'
).format(name))
except pywintypes.com_error as com_err:
ret['result'] = False
if len(com_err.excepinfo) >= 2:
friendly_error = com_err.excepinfo[2].rstrip('\r\n')
ret['comment'] = (
'Failed to create group {0}. {1}'
).format(name, friendly_error)
else:
ret['result'] = None
ret['comment'] = (
'The group {0} already exists.'
).format(name)
return not ret['retcode'] return ret
def delete(name): def delete(name):
@ -44,9 +80,32 @@ def delete(name):
salt '*' group.delete foo salt '*' group.delete foo
''' '''
ret = __salt__['cmd.run_all']('net localgroup {0} /delete'.format(name)) ret = {'name': name,
'result': True,
'changes': [],
'comment': ''}
return not ret['retcode'] if info(name):
pythoncom.CoInitialize()
nt = win32com.client.Dispatch('AdsNameSpaces')
try:
compObj = nt.GetObject('', 'WinNT://.,computer')
compObj.Delete('group', name)
ret['changes'].append(('Successfully removed group {0}').format(name))
except pywintypes.com_error as com_err:
ret['result'] = False
if len(com_err.excepinfo) >= 2:
friendly_error = com_err.excepinfo[2].rstrip('\r\n')
ret['comment'] = (
'Failed to remove group {0}. {1}'
).format(name, friendly_error)
else:
ret['result'] = None
ret['comment'] = (
'The group {0} does not exists.'
).format(name)
return ret
def info(name): def info(name):
@ -59,20 +118,20 @@ def info(name):
salt '*' group.info foo salt '*' group.info foo
''' '''
lines = __salt__['cmd.run']('net localgroup {0}'.format(name)).splitlines() pythoncom.CoInitialize()
memberline = False nt = win32com.client.Dispatch('AdsNameSpaces')
try:
groupObj = nt.GetObject('', 'WinNT://./' + name + ',group')
gr_name = groupObj.Name
gr_mem = [] gr_mem = []
gr_name = '' for member in groupObj.members():
for line in lines: gr_mem.append(
if 'Alias name' in line: member.ADSPath.replace('WinNT://', '').replace(
comps = line.split(' ', 1) '/', '\\').encode('ascii', 'backslashreplace'))
gr_name = comps[1].strip() except pywintypes.com_error:
if 'successfully' in line: return False
memberline = False
if memberline:
gr_mem.append(line.strip())
if '---' in line:
memberline = True
if not gr_name: if not gr_name:
return False return False
@ -96,33 +155,216 @@ def getent(refresh=False):
return __context__['group.getent'] return __context__['group.getent']
ret = [] ret = []
ret2 = []
lines = __salt__['cmd.run']('net localgroup').splitlines()
groupline = False
for line in lines:
if 'successfully' in line:
groupline = False
if groupline:
ret.append(line.strip('*').strip())
if '---' in line:
groupline = True
for item in ret:
members = []
gid = __salt__['file.group_to_gid'](item)
memberlines = __salt__['cmd.run']('net localgroup "{0}"'.format(item)).splitlines()
memberline = False
for line in memberlines:
if 'successfully' in line:
memberline = False
if memberline:
members.append(line.strip('*').strip())
if '---' in line:
memberline = True
group = {'gid': gid,
'members': members,
'name': item,
'passwd': 'x'}
ret2.append(group)
__context__['group.getent'] = ret2 pythoncom.CoInitialize()
return ret2 nt = win32com.client.Dispatch('AdsNameSpaces')
results = nt.GetObject('', 'WinNT://.')
results.Filter = ['group']
for result in results:
member_list = []
for member in result.members():
member_list.append(
member.AdsPath.replace('WinNT://', '').replace(
'/', '\\').encode('ascii', 'backslashreplace'))
group = {'gid': __salt__['file.group_to_gid'](result.name),
'members': member_list,
'name': result.name,
'passwd': 'x'}
ret.append(group)
__context__['group.getent'] = ret
return ret
def adduser(name, username):
'''
add a user to a group
CLI Example:
.. code-block:: bash
salt '*' group.adduser foo username
'''
ret = {'name': name,
'result': True,
'changes': {'Users Added': []},
'comment': ''}
pythoncom.CoInitialize()
nt = win32com.client.Dispatch('AdsNameSpaces')
groupObj = nt.GetObject('', 'WinNT://./' + name + ',group')
existingMembers = []
for member in groupObj.members():
existingMembers.append(
member.ADSPath.replace('WinNT://', '').replace(
'/', '\\').encode('ascii', 'backslashreplace').lower())
try:
if __fixlocaluser(username.lower()) not in existingMembers:
if not __opts__['test']:
groupObj.Add('WinNT://' + username.replace('\\', '/'))
ret['changes']['Users Added'].append(username)
else:
ret['comment'] = (
'User {0} is already a member of {1}'
).format(username, name)
ret['result'] = None
except pywintypes.com_error as com_err:
if len(com_err.excepinfo) >= 2:
friendly_error = com_err.excepinfo[2].rstrip('\r\n')
ret['comment'] = (
'Failed to add {0} to group {1}. {2}'
).format(username, name, friendly_error)
ret['result'] = False
return ret
return ret
def deluser(name, username):
'''
remove a user from a group
CLI Example:
.. code-block:: bash
salt '*' group.deluser foo username
'''
ret = {'name': name,
'result': True,
'changes': {'Users Removed': []},
'comment': ''}
pythoncom.CoInitialize()
nt = win32com.client.Dispatch('AdsNameSpaces')
groupObj = nt.GetObject('', 'WinNT://./' + name + ',group')
existingMembers = []
for member in groupObj.members():
existingMembers.append(
member.ADSPath.replace('WinNT://', '').replace(
'/', '\\').encode('ascii', 'backslashreplace').lower())
try:
if __fixlocaluser(username.lower()) in existingMembers:
if not __opts__['test']:
groupObj.Remove('WinNT://' + username.replace('\\', '/'))
ret['changes']['Users Removed'].append(username)
else:
ret['comment'] = (
'User {0} is not a member of {1}'
).format(username, name)
ret['result'] = None
except pywintypes.com_error as com_err:
if len(com_err.excepinfo) >= 2:
friendly_error = com_err.excepinfo[2].rstrip('\r\n')
ret['comment'] = (
'Failed to remove {0} from group {1}. {2}'
).format(username, name, friendly_error)
ret['result'] = False
return ret
return ret
def members(name, members_list):
'''
remove a user from a group
CLI Example:
.. code-block:: bash
salt '*' group.members foo 'user1,user2,user3'
'''
ret = {'name': name,
'result': True,
'changes': {'Users Added': [], 'Users Removed': []},
'comment': []}
members_list = [__fixlocaluser(thisMember) for thisMember in members_list.lower().split(",")]
if not isinstance(members_list, list):
ret['result'] = False
ret['comment'].append('Members is not a list object')
return ret
pythoncom.CoInitialize()
nt = win32com.client.Dispatch('AdsNameSpaces')
try:
groupObj = nt.GetObject('', 'WinNT://./' + name + ',group')
except pywintypes.com_error as com_err:
if len(com_err.excepinfo) >= 2:
friendly_error = com_err.excepinfo[2].rstrip('\r\n')
ret['result'] = False
ret['comment'].append((
'Failure accessing group {0}. {1}'
).format(name, friendly_error))
return ret
existingMembers = []
for member in groupObj.members():
existingMembers.append(
member.ADSPath.replace('WinNT://', '').replace(
'/', '\\').encode('ascii', 'backslashreplace').lower())
existingMembers.sort()
members_list.sort()
if existingMembers == members_list:
ret['result'] = None
ret['comment'].append(('{0} membership is correct').format(name))
return ret
# add users
for member in members_list:
if member not in existingMembers:
try:
if not __opts__['test']:
groupObj.Add('WinNT://' + member.replace('\\', '/'))
ret['changes']['Users Added'].append(member)
except pywintypes.com_error as com_err:
if len(com_err.excepinfo) >= 2:
friendly_error = com_err.excepinfo[2].rstrip('\r\n')
ret['result'] = False
ret['comment'].append((
'Failed to add {0} to {1}. {2}'
).format(member, name, friendly_error))
#return ret
# remove users not in members_list
for member in existingMembers:
if member not in members_list:
try:
if not __opts__['test']:
groupObj.Remove('WinNT://' + member.replace('\\', '/'))
ret['changes']['Users Removed'].append(member)
except pywintypes.com_error as com_err:
if len(com_err.excepinfo) >= 2:
friendly_error = com_err.excepinfo[2].rstrip('\r\n')
ret['result'] = False
ret['comment'].append((
'Failed to remove {0} from {1}. {2}'
).format(member, name, friendly_error))
#return ret
return ret
def __fixlocaluser(username):
'''
prefixes a username w/o a backslash with the computername
i.e. __fixlocaluser('Administrator') would return 'computername\administrator'
'''
if '\\' not in username:
username = ('{0}\\{1}').format(__salt__['grains.get']('host'), username)
return username.lower()

View File

@ -62,7 +62,10 @@ def get_servers():
cmd = 'w32tm /query /configuration' cmd = 'w32tm /query /configuration'
lines = __salt__['cmd.run'](cmd).splitlines() lines = __salt__['cmd.run'](cmd).splitlines()
for line in lines: for line in lines:
try:
if 'NtpServer' in line: if 'NtpServer' in line:
_, ntpsvrs = line.rstrip(' (Local)').split(':', 1) _, ntpsvrs = line.rstrip(' (Local)').split(':', 1)
return sorted(ntpsvrs.split()) return sorted(ntpsvrs.split())
except ValueError as e:
return False
return False return False

View File

@ -758,6 +758,5 @@ def _reverse_cmp_pkg_versions(pkg1, pkg2):
def _get_latest_pkg_version(pkginfo): def _get_latest_pkg_version(pkginfo):
if len(pkginfo) == 1: if len(pkginfo) == 1:
return pkginfo.keys().pop() return pkginfo.iterkeys().next()
pkgkeys = pkginfo.keys() return sorted(pkginfo, cmp=_reverse_cmp_pkg_versions).pop()
return sorted(pkgkeys, cmp=_reverse_cmp_pkg_versions).pop()

View File

@ -6,11 +6,17 @@ Windows Service module.
# Import python libs # Import python libs
import salt.utils import salt.utils
from subprocess import list2cmdline from subprocess import list2cmdline
import time
import logging
log = logging.getLogger(__name__)
# Define the module's virtual name # Define the module's virtual name
__virtualname__ = 'service' __virtualname__ = 'service'
BUFFSIZE = 5000 BUFFSIZE = 5000
SERVICE_STOP_DELAY_SECONDS = 15
SERVICE_STOP_POLL_MAX_ATTEMPTS = 5
def __virtual__(): def __virtual__():
@ -220,8 +226,25 @@ def stop(name):
salt '*' service.stop <service name> salt '*' service.stop <service name>
''' '''
# net stop issues a stop command and waits briefly (~30s), but will give
# up if the service takes too long to stop with a misleading
# "service could not be stopped" message and RC 0.
cmd = list2cmdline(['net', 'stop', name]) cmd = list2cmdline(['net', 'stop', name])
return not __salt__['cmd.retcode'](cmd) res = __salt__['cmd.run'](cmd)
if 'service was stopped' in res:
return True
# we requested a stop, but the service is still thinking about it.
# poll for the real status
for attempt in range(SERVICE_STOP_POLL_MAX_ATTEMPTS):
if not status(name):
return True
log.debug('Waiting for %s to stop', name)
time.sleep(SERVICE_STOP_DELAY_SECONDS)
log.warning('Giving up on waiting for service `%s` to stop', name)
return False
def restart(name): def restart(name):
@ -237,8 +260,7 @@ def restart(name):
if has_powershell(): if has_powershell():
cmd = 'Restart-Service {0}'.format(name) cmd = 'Restart-Service {0}'.format(name)
return not __salt__['cmd.retcode'](cmd, shell='powershell') return not __salt__['cmd.retcode'](cmd, shell='powershell')
stop(name) return stop(name) and start(name)
return start(name)
def status(name, sig=None): def status(name, sig=None):

View File

@ -269,7 +269,7 @@ class PyWinUpdater(object):
''' '''
updates = self.GetInstallationResults() updates = self.GetInstallationResults()
ret = 'The following are the updates and their return codes.\n' ret = 'The following are the updates and their return codes.\n'
for i in updates.keys(): for i in updates:
ret += '\t{0}\n'.format(updates[i]) ret += '\t{0}\n'.format(updates[i])
return ret return ret
@ -316,8 +316,8 @@ class PyWinUpdater(object):
def SetIncludes(self, includes): def SetIncludes(self, includes):
if includes: if includes:
for i in includes: for i in includes:
value = i[i.keys()[0]] value = i[i.iterkeys().next()]
include = i.keys()[0] include = i.iterkeys().next()
self.SetInclude(include, value) self.SetInclude(include, value)
log.debug('was asked to set {0} to {1}'.format(include, value)) log.debug('was asked to set {0} to {1}'.format(include, value))

538
salt/modules/xfs.py Normal file
View File

@ -0,0 +1,538 @@
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
# Copyright (C) 2014 SUSE LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
'''
Module for managing XFS file systems.
'''
import os
import re
import time
import logging
import salt.utils
from salt.exceptions import CommandExecutionError
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on POSIX-like systems
'''
return not salt.utils.is_windows() and __grains__.get('kernel') == 'Linux'
def _verify_run(out, cmd=None):
'''
Crash to the log if command execution was not successful.
'''
if out.get("retcode", 0) and out['stderr']:
if cmd:
log.debug('Command: "{0}"'.format(cmd))
log.debug('Return code: {0}'.format(out.get('retcode')))
log.debug('Error output:\n{0}'.format(out.get('stderr', "N/A")))
raise CommandExecutionError(out['stderr'])
def _xfs_info_get_kv(serialized):
'''
Parse one line of the XFS info output.
'''
# No need to know sub-elements here
if serialized.startswith("="):
serialized = serialized[1:].strip()
serialized = serialized.replace(" = ", "=*** ").replace(" =", "=")
# Keywords has no spaces, values do
opt = []
for tkn in serialized.split(" "):
if not opt or "=" in tkn:
opt.append(tkn)
else:
opt[len(opt) - 1] = opt[len(opt) - 1] + " " + tkn
# Preserve ordering
return [tuple(items.split("=")) for items in opt]
def _parse_xfs_info(data):
'''
Parse output from "xfs_info" or "xfs_growfs -n".
'''
ret = {}
spr = re.compile(r'\s+')
entry = None
for line in [spr.sub(" ", l).strip().replace(", ", " ") for l in data.split("\n")]:
if not line:
continue
nfo = _xfs_info_get_kv(line)
if not line.startswith("="):
entry = nfo.pop(0)
ret[entry[0]] = {'section': entry[(entry[1] != '***' and 1 or 0)]}
ret[entry[0]].update(dict(nfo))
return ret
def info(device):
'''
Get filesystem geometry information.
CLI Example:
.. code-block:: bash
salt '*' xfs.info /dev/sda1
'''
out = __salt__['cmd.run_all']("xfs_info {0}".format(device))
if out.get('stderr'):
raise CommandExecutionError(out['stderr'].replace("xfs_info:", "").strip())
return _parse_xfs_info(out['stdout'])
def _xfsdump_output(data):
'''
Parse CLI output of the xfsdump utility.
'''
out = {}
summary = []
summary_block = False
for line in [l.strip() for l in data.split("\n") if l.strip()]:
line = re.sub("^xfsdump: ", "", line)
if line.startswith("session id:"):
out['Session ID'] = line.split(" ")[-1]
elif line.startswith("session label:"):
out['Session label'] = re.sub("^session label: ", "", line)
elif line.startswith("media file size"):
out['Media size'] = re.sub(r"^media file size\s+", "", line)
elif line.startswith("dump complete:"):
out['Dump complete'] = re.sub(r"^dump complete:\s+", "", line)
elif line.startswith("Dump Status:"):
out['Status'] = re.sub(r"^Dump Status:\s+", "", line)
elif line.startswith("Dump Summary:"):
summary_block = True
continue
if line.startswith(" ") and summary_block:
summary.append(line.strip())
elif not line.startswith(" ") and summary_block:
summary_block = False
if summary:
out['Summary'] = ' '.join(summary)
return out
def dump(device, destination, level=0, label=None, noerase=None):
'''
Dump filesystem device to the media (file, tape etc).
Required parameters:
* **device**: XFS device, content of which to be dumped.
* **destination**: Specifies a dump destination.
Valid options are:
* **label**: Label of the dump. Otherwise automatically generated label is used.
* **level**: Specifies a dump level of 0 to 9.
* **noerase**: Pre-erase media.
Other options are not used in order to let ``xfsdump`` use its default
values, as they are most optimal. See the ``xfsdump(8)`` manpage for
a more complete description of these options.
CLI Example:
.. code-block:: bash
salt '*' xfs.dump /dev/sda1 /detination/on/the/client
salt '*' xfs.dump /dev/sda1 /detination/on/the/client label='Company accountancy'
salt '*' xfs.dump /dev/sda1 /detination/on/the/client noerase=True
'''
if not salt.utils.which("xfsdump"):
raise CommandExecutionError("Utility \"xfsdump\" has to be installed or missing.")
label = label and label or time.strftime("XFS dump for \"{0}\" of %Y.%m.%d, %H:%M".format(device),
time.localtime()).replace("'", '"')
cmd = ["xfsdump"]
cmd.append("-F") # Force
if not noerase:
cmd.append("-E") # pre-erase
cmd.append("-L '{0}'".format(label)) # Label
cmd.append("-l {0}".format(level)) # Dump level
cmd.append("-f {0}".format(destination)) # Media destination
cmd.append(device) # Device
cmd = ' '.join(cmd)
out = __salt__['cmd.run_all'](cmd)
_verify_run(out, cmd=cmd)
return _xfsdump_output(out['stdout'])
def _xr_to_keyset(line):
'''
Parse xfsrestore output keyset elements.
'''
tkns = [elm for elm in line.strip().split(":", 1) if elm]
if len(tkns) == 1:
return "'{0}': ".format(tkns[0])
else:
key, val = tkns
return "'{0}': '{1}',".format(key.strip(), val.strip())
def _xfs_inventory_output(out):
'''
Transform xfsrestore inventory data output to a Python dict source and evaluate it.
'''
data = []
out = [line for line in out.split("\n") if line.strip()]
# No inventory yet
if len(out) == 1 and 'restore status' in out[0].lower():
return {'restore_status': out[0]}
ident = 0
data.append("{")
for line in out[:-1]:
if len([elm for elm in line.strip().split(":") if elm]) == 1:
n_ident = len(re.sub("[^\t]", "", line))
if ident > n_ident:
for step in range(ident):
data.append("},")
ident = n_ident
data.append(_xr_to_keyset(line))
data.append("{")
else:
data.append(_xr_to_keyset(line))
for step in range(ident + 1):
data.append("},")
data.append("},")
# We are evaling into a python dict, a json load
# would be safer
data = eval('\n'.join(data))[0] # pylint: disable=W0123
data['restore_status'] = out[-1]
return data
def inventory():
'''
Display XFS dump inventory without restoration.
CLI Example:
.. code-block:: bash
salt '*' xfs.inventory
'''
out = __salt__['cmd.run_all']("xfsrestore -I")
_verify_run(out)
return _xfs_inventory_output(out['stdout'])
def _xfs_prune_output(out, uuid):
'''
Parse prune output.
'''
data = {}
cnt = []
cutpoint = False
for line in [l.strip() for l in out.split("\n") if l]:
if line.startswith("-"):
if cutpoint:
break
else:
cutpoint = True
continue
if cutpoint:
cnt.append(line)
for kset in [e for e in cnt[1:] if ':' in e]:
key, val = [t.strip() for t in kset.split(":", 1)]
data[key.lower().replace(" ", "_")] = val
return data.get('uuid') == uuid and data or {}
def prune_dump(sessionid):
'''
Prunes the dump session identified by the given session id.
CLI Example:
.. code-block:: bash
salt '*' xfs.prune_dump b74a3586-e52e-4a4a-8775-c3334fa8ea2c
'''
out = __salt__['cmd.run_all']("xfsinvutil -s {0} -F".format(sessionid))
_verify_run(out)
data = _xfs_prune_output(out['stdout'], sessionid)
if data:
return data
raise CommandExecutionError("Session UUID \"{0}\" was not found.".format(sessionid))
def _blkid_output(out):
'''
Parse blkid output.
'''
flt = lambda data: [el for el in data if el.strip()]
data = {}
for dev_meta in flt(out.split("\n\n")):
dev = {}
for items in flt(dev_meta.strip().split("\n")):
key, val = items.split("=", 1)
dev[key.lower()] = val
if dev.pop("type") == "xfs":
dev['label'] = dev.get('label')
data[dev.pop("devname")] = dev
mounts = _get_mounts()
for device in mounts.keys():
if data.get(device):
data[device].update(mounts[device])
return data
def devices():
'''
Get known XFS formatted devices on the system.
CLI Example:
.. code-block:: bash
salt '*' xfs.devices
'''
out = __salt__['cmd.run_all']("blkid -o export")
_verify_run(out)
return _blkid_output(out['stdout'])
def _xfs_estimate_output(out):
'''
Parse xfs_estimate output.
'''
spc = re.compile(r"\s+")
data = {}
for line in [l for l in out.split("\n") if l.strip()][1:]:
directory, bsize, blocks, megabytes, logsize = spc.sub(" ", line).split(" ")
data[directory] = {
'block _size': bsize,
'blocks': blocks,
'megabytes': megabytes,
'logsize': logsize,
}
return data
def estimate(path):
'''
Estimate the space that an XFS filesystem will take.
For each directory estimate the space that directory would take
if it were copied to an XFS filesystem.
Estimation does not cross mount points.
CLI Example:
.. code-block:: bash
salt '*' xfs.estimate /path/to/file
salt '*' xfs.estimate /path/to/dir/*
'''
if not os.path.exists(path):
raise CommandExecutionError("Path \"{0}\" was not found.".format(path))
out = __salt__['cmd.run_all']("xfs_estimate -v {0}".format(path))
_verify_run(out)
return _xfs_estimate_output(out["stdout"])
def mkfs(device, label=None, ssize=None, noforce=None,
bso=None, gmo=None, ino=None, lso=None, rso=None, nmo=None, dso=None):
'''
Create a file system on the specified device. By default wipes out with force.
General options:
* **label**: Specify volume label.
* **ssize**: Specify the fundamental sector size of the filesystem.
* **noforce**: Do not force create filesystem, if disk is already formatted.
Filesystem geometry options:
* **bso**: Block size options.
* **gmo**: Global metadata options.
* **dso**: Data section options. These options specify the location, size,
and other parameters of the data section of the filesystem.
* **ino**: Inode options to specify the inode size of the filesystem, and other inode allocation parameters.
* **lso**: Log section options.
* **nmo**: Naming options.
* **rso**: Realtime section options.
See the ``mkfs.xfs(8)`` manpage for a more complete description of corresponding options description.
CLI Example:
.. code-block:: bash
salt '*' xfs.mkfs /dev/sda1
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' noforce=True
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' lso='logdev=/dev/sda2,size=10000b'
'''
getopts = lambda args: dict(((args and ("=" in args)
and args or None)) and map(
lambda kw: kw.split("="), args.split(",")) or [])
cmd = ["mkfs.xfs"]
if label:
cmd.append("-L")
cmd.append("'{0}'".format(label))
if ssize:
cmd.append("-s")
cmd.append(ssize)
for switch, opts in [("-b", bso), ("-m", gmo), ("-n", nmo), ("-i", ino),
("-d", dso), ("-l", lso), ("-r", rso)]:
try:
if getopts(opts):
cmd.append(switch)
cmd.append(opts)
except Exception:
raise CommandExecutionError("Wrong parameters \"{0}\" for option \"{1}\"".format(opts, switch))
if not noforce:
cmd.append("-f")
cmd.append(device)
cmd = ' '.join(cmd)
out = __salt__['cmd.run_all'](cmd)
_verify_run(out, cmd=cmd)
return _parse_xfs_info(out['stdout'])
def modify(device, label=None, lazy_counting=None, uuid=None):
'''
Modify parameters of an XFS filesystem.
CLI Example:
.. code-block:: bash
salt '*' xfs.modify /dev/sda1 label='My backup' lazy_counting=False
salt '*' xfs.modify /dev/sda1 uuid=False
salt '*' xfs.modify /dev/sda1 uuid=True
'''
if not label and lazy_counting is None and uuid is None:
raise CommandExecutionError("Nothing specified for modification for \"{0}\" device".format(device))
cmd = ['xfs_admin']
if label:
cmd.append("-L")
cmd.append("'{0}'".format(label))
if lazy_counting is False:
cmd.append("-c")
cmd.append("0")
elif lazy_counting:
cmd.append("-c")
cmd.append("1")
if uuid is False:
cmd.append("-U")
cmd.append("nil")
elif uuid:
cmd.append("-U")
cmd.append("generate")
cmd.append(device)
cmd = ' '.join(cmd)
_verify_run(__salt__['cmd.run_all'](cmd), cmd=cmd)
out = __salt__['cmd.run_all']("blkid -o export {0}".format(device))
_verify_run(out)
return _blkid_output(out['stdout'])
def _get_mounts():
'''
List mounted filesystems.
'''
mounts = {}
for line in open("/proc/mounts").readlines():
device, mntpnt, fstype, options, fs_freq, fs_passno = line.strip().split(" ")
if fstype != 'xfs':
continue
mounts[device] = {
'mount_point': mntpnt,
'options': options.split(","),
}
return mounts
def defragment(device):
'''
Defragment mounted XFS filesystem.
In order to mount a filesystem, device should be properly mounted and writable.
CLI Example:
.. code-block:: bash
salt '*' xfs.defragment /dev/sda1
'''
if device == '/':
raise CommandExecutionError("Root is not a device.")
if not _get_mounts().get(device):
raise CommandExecutionError("Device \"{0}\" is not mounted".format(device))
out = __salt__['cmd.run_all']("xfs_fsr {0}".format(device))
_verify_run(out)
return {
'log': out['stdout']
}

View File

@ -965,7 +965,7 @@ def install(name=None,
exclude=exclude_arg, exclude=exclude_arg,
branch=branch_arg, branch=branch_arg,
gpgcheck='--nogpgcheck' if skip_verify else '', gpgcheck='--nogpgcheck' if skip_verify else '',
pkg=' '.join(to_reinstall.values()), pkg=' '.join(to_reinstall.itervalues()),
) )
__salt__['cmd.run'](cmd, output_loglevel='trace') __salt__['cmd.run'](cmd, output_loglevel='trace')
@ -1843,5 +1843,5 @@ def owner(*paths):
if 'not owned' in ret[path].lower(): if 'not owned' in ret[path].lower():
ret[path] = '' ret[path] = ''
if len(ret) == 1: if len(ret) == 1:
return ret.values()[0] return ret.itervalues().next()
return ret return ret

View File

@ -140,7 +140,9 @@ def lock(path,
identifier=None, identifier=None,
max_concurrency=1, max_concurrency=1,
timeout=None, timeout=None,
ephemeral_lease=False): ephemeral_lease=False,
force=False, # foricble get the lock regardless of open slots
):
''' '''
Get lock (with optional timeout) Get lock (with optional timeout)
''' '''
@ -151,6 +153,11 @@ def lock(path,
identifier, identifier,
max_leases=max_concurrency, max_leases=max_concurrency,
ephemeral_lease=ephemeral_lease) ephemeral_lease=ephemeral_lease)
# forcibly get the lock regardless of max_concurrency
if force:
SEMAPHORE_MAP[path].assured_path = True
# block waiting for lock acquisition # block waiting for lock acquisition
if timeout: if timeout:
logging.info('Acquiring lock {0} with timeout={1}'.format(path, timeout)) logging.info('Acquiring lock {0} with timeout={1}'.format(path, timeout))

View File

@ -1,6 +1,8 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
''' '''
Package support for openSUSE via the zypper package manager Package support for openSUSE via the zypper package manager
:optdepends: - `zypp` Python module. Install with `zypper install python-zypp`
''' '''
# Import python libs # Import python libs
@ -8,25 +10,21 @@ import copy
import logging import logging
import re import re
import os import os
import ConfigParser
import urlparse
from xml.dom import minidom as dom from xml.dom import minidom as dom
from contextlib import contextmanager as _contextmanager
# Import salt libs # Import salt libs
import salt.utils import salt.utils
from salt.utils.decorators import depends as _depends
from salt.exceptions import ( from salt.exceptions import (
CommandExecutionError, MinionError, SaltInvocationError) CommandExecutionError, MinionError)
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
HAS_ZYPP = False HAS_ZYPP = False
LOCKS = "/etc/zypp/locks" ZYPP_HOME = "/etc/zypp"
LOCKS = "{0}/locks".format(ZYPP_HOME)
try: REPOS = "{0}/repos.d".format(ZYPP_HOME)
import zypp
HAS_ZYPP = True
except ImportError:
pass
# Define the module's virtual name # Define the module's virtual name
__virtualname__ = 'pkg' __virtualname__ = 'pkg'
@ -36,8 +34,6 @@ def __virtual__():
''' '''
Set the virtual pkg module if the os is openSUSE Set the virtual pkg module if the os is openSUSE
''' '''
if not HAS_ZYPP:
return False
if __grains__.get('os_family', '') != 'Suse': if __grains__.get('os_family', '') != 'Suse':
return False return False
# Not all versions of Suse use zypper, check that it is available # Not all versions of Suse use zypper, check that it is available
@ -142,7 +138,7 @@ def latest_version(*names, **kwargs):
pkginfo[key] = val pkginfo[key] = val
# Ignore if the needed keys weren't found in this iteration # Ignore if the needed keys weren't found in this iteration
if not set(('name', 'version', 'status')) <= set(pkginfo.keys()): if not set(('name', 'version', 'status')) <= set(pkginfo):
continue continue
status = pkginfo['status'].lower() status = pkginfo['status'].lower()
@ -229,203 +225,35 @@ def list_pkgs(versions_as_list=False, **kwargs):
return ret return ret
class _RepoInfo(object): def _get_configured_repos():
''' '''
Incapsulate all properties that are dumped in zypp._RepoInfo.dumpOn: Get all the info about repositories from the configurations.
http://doc.opensuse.org/projects/libzypp/HEAD/classzypp_1_1RepoInfo.html#a2ba8fdefd586731621435428f0ec6ff1
''' '''
repo_types = {}
if HAS_ZYPP: repos_cfg = ConfigParser.ConfigParser()
repo_types = { repos_cfg.read([REPOS + "/" + fname for fname in os.listdir(REPOS)])
zypp.RepoType.NONE_e: 'NONE',
zypp.RepoType.RPMMD_e: 'rpm-md',
zypp.RepoType.YAST2_e: 'yast2',
zypp.RepoType.RPMPLAINDIR_e: 'plaindir',
}
def __init__(self, zypp_repo_info=None): return repos_cfg
self.zypp = zypp_repo_info if zypp_repo_info else zypp.RepoInfo()
@property
def options(self):
class_items = self.__class__.__dict__.iteritems()
return dict([(k, getattr(self, k)) for k, v in class_items
if isinstance(v, property) and k != 'options'
and getattr(self, k) not in (None, '')])
def _check_only_mirrorlist_or_url(self):
if all(x in self.options for x in ('mirrorlist', 'url')):
raise ValueError(
'Only one of \'mirrorlist\' and \'url\' can be specified')
def _zypp_url(self, url):
return zypp.Url(url) if url else zypp.Url()
@options.setter
def options(self, value):
for k, v in value.iteritems():
setattr(self, k, v)
@property
def alias(self):
return self.zypp.alias()
@alias.setter
def alias(self, value):
if value:
self.zypp.setAlias(value)
else:
raise ValueError('Alias cannot be empty')
@property
def autorefresh(self):
return self.zypp.autorefresh()
@autorefresh.setter
def autorefresh(self, value):
self.zypp.setAutorefresh(value)
@property
def enabled(self):
return self.zypp.enabled()
@enabled.setter
def enabled(self, value):
self.zypp.setEnabled(value)
@property
def gpgcheck(self):
return self.zypp.gpgCheck()
@gpgcheck.setter
def gpgcheck(self, value):
self.zypp.setGpgCheck(value)
@property
def gpgkey(self):
return self.zypp.gpgKeyUrl().asCompleteString()
@gpgkey.setter
def gpgkey(self, value):
self.zypp.setGpgKeyUrl(self._zypp_url(value))
@property
def keeppackages(self):
return self.zypp.keepPackages()
@keeppackages.setter
def keeppackages(self, value):
self.zypp.setKeepPackages(value)
@property
def metadataPath(self):
return self.zypp.metadataPath().c_str()
@metadataPath.setter
def metadataPath(self, value):
self.zypp.setMetadataPath(value)
@property
def mirrorlist(self):
return self.zypp.mirrorListUrl().asCompleteString()
@mirrorlist.setter
def mirrorlist(self, value):
self.zypp.setMirrorListUrl(self._zypp_url(value))
# self._check_only_mirrorlist_or_url()
@property
def name(self):
return self.zypp.name()
@name.setter
def name(self, value):
self.zypp.setName(value)
@property
def packagesPath(self):
return self.zypp.packagesPath().c_str()
@packagesPath.setter
def packagesPath(self, value):
self.zypp.setPackagesPath(self._zypp_url(value))
@property
def path(self):
return self.zypp.path().c_str()
@path.setter
def path(self, value):
self.zypp.setPath(self._zypp_url(value))
@property
def priority(self):
return self.zypp.priority()
@priority.setter
def priority(self, value):
self.zypp.setPriority(value)
@property
def service(self):
return self.zypp.service()
@service.setter
def service(self, value):
self.zypp.setService(value)
@property
def targetdistro(self):
return self.zypp.targetDistribution()
@targetdistro.setter
def targetdistro(self, value):
self.zypp.setTargetDistribution(value)
@property
def type(self):
return self.repo_types[self.zypp.type().toEnum()]
@type.setter
def type(self, value):
self.zypp.setType(next(k for k, v in self.repo_types if v == value))
@property
def url(self):
return self.zypp.url().asCompleteString()
@url.setter
def url(self, value):
self.zypp.setBaseUrl(self._zypp_url(value))
# self._check_only_mirrorlist_or_url()
@_contextmanager def _get_repo_info(alias, repos_cfg=None):
def _try_zypp():
''' '''
Convert errors like: Get one repo meta-data.
'RuntimeError: [|] Repository has no alias defined.'
into
'ERROR: Repository has no alias defined.'.
''' '''
try: try:
yield meta = dict((repos_cfg or _get_configured_repos()).items(alias))
except RuntimeError as e: meta['alias'] = alias
raise CommandExecutionError(re.sub(r'\[.*\] ', '', str(e))) for k, v in meta.items():
if v in ['0', '1']:
meta[k] = int(meta[k]) == 1
elif v == 'NONE':
meta[k] = None
return meta
except Exception:
return {}
@_depends('zypp') def get_repo(repo):
def _get_zypp_repo(repo, **kwargs):
'''
Get zypp._RepoInfo object by repo alias.
'''
with _try_zypp():
return zypp.RepoManager().getRepositoryInfo(repo)
@_depends('zypp')
def get_repo(repo, **kwargs):
''' '''
Display a repo. Display a repo.
@ -435,14 +263,9 @@ def get_repo(repo, **kwargs):
salt '*' pkg.get_repo alias salt '*' pkg.get_repo alias
''' '''
try: return _get_repo_info(repo)
r = _RepoInfo(_get_zypp_repo(repo))
except CommandExecutionError:
return {}
return r.options
@_depends('zypp')
def list_repos(): def list_repos():
''' '''
Lists all repos. Lists all repos.
@ -453,15 +276,15 @@ def list_repos():
salt '*' pkg.list_repos salt '*' pkg.list_repos
''' '''
with _try_zypp(): repos_cfg = _get_configured_repos()
ret = {} all_repos = {}
for r in zypp.RepoManager().knownRepositories(): for alias in repos_cfg.sections():
ret[r.alias()] = get_repo(r.alias()) all_repos[alias] = _get_repo_info(alias, repos_cfg=repos_cfg)
return ret
return all_repos
@_depends('zypp') def del_repo(repo):
def del_repo(repo, **kwargs):
''' '''
Delete a repo. Delete a repo.
@ -470,26 +293,46 @@ def del_repo(repo, **kwargs):
.. code-block:: bash .. code-block:: bash
salt '*' pkg.del_repo alias salt '*' pkg.del_repo alias
salt '*' pkg.del_repo alias
''' '''
r = _get_zypp_repo(repo) repos_cfg = _get_configured_repos()
with _try_zypp(): for alias in repos_cfg.sections():
zypp.RepoManager().removeRepository(r) if alias == repo:
return 'File {1} containing repo {0!r} has been removed.\n'.format( cmd = ('zypper -x --non-interactive rr --loose-auth --loose-query {0}'.format(alias))
repo, r.path().c_str()) doc = dom.parseString(__salt__['cmd.run'](cmd, output_loglevel='trace'))
msg = doc.getElementsByTagName("message")
if doc.getElementsByTagName("progress") and msg:
return {
repo: True,
'message': msg[0].childNodes[0].nodeValue,
}
raise CommandExecutionError('Repository "{0}" not found.'.format(repo))
@_depends('zypp')
def mod_repo(repo, **kwargs): def mod_repo(repo, **kwargs):
''' '''
Modify one or more values for a repo. If the repo does not exist, it will Modify one or more values for a repo. If the repo does not exist, it will
be created, so long as the following values are specified: be created, so long as the following values are specified:
repo repo or alias
alias by which the zypper refers to the repo alias by which the zypper refers to the repo
url or mirrorlist url or mirrorlist
the URL for zypper to reference the URL for zypper to reference
enabled
enable or disable (True or False) repository,
but do not remove if disabled.
refresh
enable or disable (True or False) auto-refresh of the repository.
cache
Enable or disable (True or False) RPM files caching.
gpgcheck
Enable or disable (True or False) GOG check for this repository.
Key/Value pairs may also be removed from a repo's configuration by setting Key/Value pairs may also be removed from a repo's configuration by setting
a key to a blank value. Bear in mind that a name cannot be deleted, and a a key to a blank value. Bear in mind that a name cannot be deleted, and a
url can only be deleted if a mirrorlist is specified (or vice versa). url can only be deleted if a mirrorlist is specified (or vice versa).
@ -499,33 +342,91 @@ def mod_repo(repo, **kwargs):
.. code-block:: bash .. code-block:: bash
salt '*' pkg.mod_repo alias alias=new_alias salt '*' pkg.mod_repo alias alias=new_alias
salt '*' pkg.mod_repo alias enabled=True
salt '*' pkg.mod_repo alias url= mirrorlist=http://host.com/ salt '*' pkg.mod_repo alias url= mirrorlist=http://host.com/
''' '''
# Filter out '__pub' arguments, as well as saltenv
repo_opts = {}
for x in kwargs:
if not x.startswith('__') and x not in ('saltenv',):
repo_opts[x] = kwargs[x]
repo_manager = zypp.RepoManager() repos_cfg = _get_configured_repos()
added = False
# An attempt to add new one?
if repo not in repos_cfg.sections():
url = kwargs.get("url", kwargs.get("mirrorlist"))
if not url:
raise CommandExecutionError(
'Repository "{0}" not found and no URL passed to create one.'.format(repo))
if not urlparse.urlparse(url).scheme:
raise CommandExecutionError(
'Repository "{0}" not found and passed URL looks wrong.'.format(repo))
# Is there already such repo under different alias?
for alias in repos_cfg.sections():
repo_meta = _get_repo_info(alias, repos_cfg=repos_cfg)
# Complete user URL, in case it is not
new_url = urlparse.urlparse(url)
if not new_url.path:
new_url = urlparse.ParseResult(scheme=new_url.scheme,
netloc=new_url.netloc,
path='/',
params=new_url.params,
query=new_url.query,
fragment=new_url.fragment)
base_url = urlparse.urlparse(repo_meta["baseurl"])
if new_url == base_url:
raise CommandExecutionError(
'Repository "{0}" already exists as "{1}".'.format(repo, alias))
# Add new repo
doc = None
try: try:
r = _RepoInfo(repo_manager.getRepositoryInfo(repo)) # Try to parse the output and find the error,
new_repo = False # but this not always working (depends on Zypper version)
except RuntimeError: doc = dom.parseString(__salt__['cmd.run'](("zypper -x ar {0} '{1}'".format(url, repo)),
r = _RepoInfo() output_loglevel='trace'))
r.alias = repo except Exception:
new_repo = True # No XML out available, but it is still unknown the state of the result.
try: pass
r.options = repo_opts
except ValueError as e: if doc:
raise SaltInvocationError(str(e)) msg_nodes = doc.getElementsByTagName("message")
with _try_zypp(): if msg_nodes:
if new_repo: msg_node = msg_nodes[0]
repo_manager.addRepository(r.zypp) if msg_node.getAttribute("type") == "error":
else: raise CommandExecutionError(msg_node.childNodes[0].nodeValue)
repo_manager.modifyRepository(repo, r.zypp)
return r.options # Verify the repository has been added
repos_cfg = _get_configured_repos()
if repo not in repos_cfg.sections():
raise CommandExecutionError(
'Failed add new repository "{0}" for unknown reason. Please look into Zypper logs.'.format(repo))
added = True
# Modify added or existing repo according to the options
cmd_opt = []
if "enabled" in kwargs:
cmd_opt.append(kwargs["enabled"] and "--enable" or "--disable")
if "refresh" in kwargs:
cmd_opt.append(kwargs["refresh"] and "--refresh" or "--no-refresh")
if "cache" in kwargs:
cmd_opt.append(kwargs["cache"] and "--keep-packages" or "--no-keep-packages")
if "gpgcheck" in kwargs:
cmd_opt.append(kwargs["gpgcheck"] and "--gpgcheck" or "--no-gpgcheck")
if cmd_opt:
__salt__['cmd.run'](("zypper -x mr {0} '{1}'".format(' '.join(cmd_opt), repo)),
output_loglevel='trace')
# If repo nor added neither modified, error should be thrown
if not added and not cmd_opt:
raise CommandExecutionError('Modification of the repository "{0}" was not specified.'.format(repo))
return {}
def refresh_db(): def refresh_db():
@ -1085,3 +986,29 @@ def list_installed_patterns():
salt '*' pkg.list_installed_patterns salt '*' pkg.list_installed_patterns
''' '''
return _get_patterns(installed_only=True) return _get_patterns(installed_only=True)
def search(criteria):
'''
List known packags, available to the system.
CLI Examples:
.. code-block:: bash
salt '*' pkg.search <criteria>
'''
doc = dom.parseString(__salt__['cmd.run'](('zypper --xmlout se {0}'.format(criteria)),
output_loglevel='trace'))
solvables = doc.getElementsByTagName("solvable")
if not solvables:
raise CommandExecutionError("No packages found by criteria \"{0}\".".format(criteria))
out = {}
for solvable in [s for s in solvables
if s.getAttribute("status") == "not-installed" and
s.getAttribute("kind") == "package"]:
out[solvable.getAttribute("name")] = {
'summary': solvable.getAttribute("summary")
}
return out

View File

@ -69,7 +69,7 @@ class SaltInfo(object):
minion = self.minions[mid] minion = self.minions[mid]
minion.update({'grains': event_info['return']}) minion.update({'grains': event_info['return']})
logger.debug("In process minion grains update with minions={0}".format(self.minions.keys())) logger.debug("In process minion grains update with minions={0}".format(self.minions))
self.publish_minions() self.publish_minions()
def process_ret_job_event(self, event_data): def process_ret_job_event(self, event_data):
@ -157,7 +157,7 @@ class SaltInfo(object):
if set(salt_data['data'].get('lost', [])): if set(salt_data['data'].get('lost', [])):
dropped_minions = set(salt_data['data'].get('lost', [])) dropped_minions = set(salt_data['data'].get('lost', []))
else: else:
dropped_minions = set(self.minions.keys()) - set(salt_data['data'].get('present', [])) dropped_minions = set(self.minions) - set(salt_data['data'].get('present', []))
for minion in dropped_minions: for minion in dropped_minions:
changed = True changed = True
@ -169,9 +169,9 @@ class SaltInfo(object):
logger.debug('got new minions') logger.debug('got new minions')
new_minions = set(salt_data['data'].get('new', [])) new_minions = set(salt_data['data'].get('new', []))
changed = True changed = True
elif set(salt_data['data'].get('present', [])) - set(self.minions.keys()): elif set(salt_data['data'].get('present', [])) - set(self.minions):
logger.debug('detected new minions') logger.debug('detected new minions')
new_minions = set(salt_data['data'].get('present', [])) - set(self.minions.keys()) new_minions = set(salt_data['data'].get('present', [])) - set(self.minions)
changed = True changed = True
else: else:
new_minions = [] new_minions = []

View File

@ -18,8 +18,8 @@ def output(data):
Rather basic.... Rather basic....
''' '''
tmp = {} tmp = {}
for min_ in data.keys(): for min_ in data:
for process in data[min_].keys(): for process in data[min_]:
add = False add = False
if data[min_][process]['result'] is False: if data[min_][process]['result'] is False:
add = True add = True

View File

@ -232,8 +232,8 @@ def _format_host(host, data):
# Append result counts to end of output # Append result counts to end of output
colorfmt = u'{0}{1}{2[ENDC]}' colorfmt = u'{0}{1}{2[ENDC]}'
rlabel = {True: u'Succeeded', False: u'Failed', None: u'Not Run'} rlabel = {True: u'Succeeded', False: u'Failed', None: u'Not Run'}
count_max_len = max([len(str(x)) for x in rcounts.values()] or [0]) count_max_len = max([len(str(x)) for x in rcounts.itervalues()] or [0])
label_max_len = max([len(x) for x in rlabel.values()] or [0]) label_max_len = max([len(x) for x in rlabel.itervalues()] or [0])
line_max_len = label_max_len + count_max_len + 2 # +2 for ': ' line_max_len = label_max_len + count_max_len + 2 # +2 for ': '
hstrs.append( hstrs.append(
colorfmt.format( colorfmt.format(
@ -295,7 +295,7 @@ def _format_host(host, data):
) )
totals = u'{0}\nTotal states run: {1:>{2}}'.format('-' * line_max_len, totals = u'{0}\nTotal states run: {1:>{2}}'.format('-' * line_max_len,
sum(rcounts.values()), sum(rcounts.itervalues()),
line_max_len - 7) line_max_len - 7)
hstrs.append(colorfmt.format(colors['CYAN'], totals, colors)) hstrs.append(colorfmt.format(colors['CYAN'], totals, colors))
@ -349,7 +349,7 @@ def _strip_clean(returns):
''' '''
rm_tags = [] rm_tags = []
for tag in returns: for tag in returns:
if not isinstance(tag, dict): if isinstance(tag, dict):
continue continue
if returns[tag]['result'] and not returns[tag]['changes']: if returns[tag]['result'] and not returns[tag]['changes']:
rm_tags.append(tag) rm_tags.append(tag)

View File

@ -38,6 +38,6 @@ def output(data):
if 'nics' in vm_data: if 'nics' in vm_data:
for mac in vm_data['nics']: for mac in vm_data['nics']:
out += ' Nic - {0}:\n'.format(mac) out += ' Nic - {0}:\n'.format(mac)
out += ' Source: {0}\n'.format(vm_data['nics'][mac]['source'][vm_data['nics'][mac]['source'].keys()[0]]) out += ' Source: {0}\n'.format(vm_data['nics'][mac]['source'][vm_data['nics'][mac]['source'].iterkeys().next()])
out += ' Type: {0}\n'.format(vm_data['nics'][mac]['type']) out += ' Type: {0}\n'.format(vm_data['nics'][mac]['type'])
return out return out

View File

@ -95,7 +95,7 @@ class OverState(object):
''' '''
names = set() names = set()
for comp in self.over: for comp in self.over:
names.add(comp.keys()[0]) names.add(comp.iterkeys().next())
return names return names
def get_stage(self, name): def get_stage(self, name):
@ -177,7 +177,7 @@ class OverState(object):
if isinstance(fun_d, str): if isinstance(fun_d, str):
fun = fun_d fun = fun_d
elif isinstance(fun_d, dict): elif isinstance(fun_d, dict):
fun = fun_d.keys()[0] fun = fun_d.iterkeys().next()
arg = fun_d[fun] arg = fun_d[fun]
else: else:
yield {name: {}} yield {name: {}}
@ -212,7 +212,7 @@ class OverState(object):
else: else:
# Req has not be called # Req has not be called
for comp in self.over: for comp in self.over:
rname = comp.keys()[0] rname = comp.iterkeys().next()
if req == rname: if req == rname:
rstage = comp[rname] rstage = comp[rname]
v_stage = self.verify_stage(rstage) v_stage = self.verify_stage(rstage)
@ -263,7 +263,7 @@ class OverState(object):
self.over_run = {} self.over_run = {}
for comp in self.over: for comp in self.over:
name = comp.keys()[0] name = comp.iterkeys().next()
stage = comp[name] stage = comp[name]
if name not in self.over_run: if name not in self.over_run:
self.call_stage(name, stage) self.call_stage(name, stage)
@ -286,7 +286,7 @@ class OverState(object):
self.over_run = {} self.over_run = {}
yield self.over yield self.over
for comp in self.over: for comp in self.over:
name = comp.keys()[0] name = comp.iterkeys().next()
stage = comp[name] stage = comp[name]
if name not in self.over_run: if name not in self.over_run:
v_stage = self.verify_stage(stage) v_stage = self.verify_stage(stage)
@ -296,7 +296,7 @@ class OverState(object):
else: else:
for sret in self.call_stage(name, stage): for sret in self.call_stage(name, stage):
for yret in yielder(sret): for yret in yielder(sret):
sname = yret.keys()[0] sname = yret.iterkeys().next()
yield [self.get_stage(sname)] yield [self.get_stage(sname)]
final = {} final = {}
for minion in yret[sname]: for minion in yret[sname]:

View File

@ -94,10 +94,9 @@ class Serial(object):
return msgpack.loads(msg, use_list=True) return msgpack.loads(msg, use_list=True)
except Exception as exc: except Exception as exc:
log.critical('Could not deserialize msgpack message: {0}' log.critical('Could not deserialize msgpack message: {0}'
'In an attempt to keep Salt running, returning an empty dict.'
'This often happens when trying to read a file not in binary mode.' 'This often happens when trying to read a file not in binary mode.'
'Please open an issue and include the following error: {1}'.format(msg, exc)) 'Please open an issue and include the following error: {1}'.format(msg, exc))
return {} raise
def load(self, fn_): def load(self, fn_):
''' '''
@ -193,7 +192,7 @@ class SREQ(object):
''' '''
if hasattr(self, '_socket'): if hasattr(self, '_socket'):
if isinstance(self.poller.sockets, dict): if isinstance(self.poller.sockets, dict):
for socket in self.poller.sockets.keys(): for socket in self.poller.sockets:
self.poller.unregister(socket) self.poller.unregister(socket)
else: else:
for socket in self.poller.sockets: for socket in self.poller.sockets:
@ -235,7 +234,7 @@ class SREQ(object):
def destroy(self): def destroy(self):
if isinstance(self.poller.sockets, dict): if isinstance(self.poller.sockets, dict):
for socket in self.poller.sockets.keys(): for socket in self.poller.sockets:
if socket.closed is False: if socket.closed is False:
socket.setsockopt(zmq.LINGER, 1) socket.setsockopt(zmq.LINGER, 1)
socket.close() socket.close()

View File

@ -37,7 +37,7 @@ def merge_aggregate(obj_a, obj_b):
def merge_overwrite(obj_a, obj_b): def merge_overwrite(obj_a, obj_b):
for obj in obj_b: for obj in obj_b:
if obj in obj_a.keys(): if obj in obj_a:
obj_a[obj] = obj_b[obj] obj_a[obj] = obj_b[obj]
return obj_a return obj_a
return merge_recurse(obj_a, obj_b) return merge_recurse(obj_a, obj_b)
@ -282,7 +282,7 @@ class Pillar(object):
''' '''
top = collections.defaultdict(OrderedDict) top = collections.defaultdict(OrderedDict)
orders = collections.defaultdict(OrderedDict) orders = collections.defaultdict(OrderedDict)
for ctops in tops.values(): for ctops in tops.itervalues():
for ctop in ctops: for ctop in ctops:
for saltenv, targets in ctop.items(): for saltenv, targets in ctop.items():
if saltenv == 'include': if saltenv == 'include':
@ -306,7 +306,7 @@ class Pillar(object):
if isinstance(comp, string_types): if isinstance(comp, string_types):
states[comp] = True states[comp] = True
top[saltenv][tgt] = matches top[saltenv][tgt] = matches
top[saltenv][tgt].extend(list(states.keys())) top[saltenv][tgt].extend(states)
return self.sort_top_targets(top, orders) return self.sort_top_targets(top, orders)
def sort_top_targets(self, top, orders): def sort_top_targets(self, top, orders):
@ -316,7 +316,7 @@ class Pillar(object):
sorted_top = collections.defaultdict(OrderedDict) sorted_top = collections.defaultdict(OrderedDict)
# pylint: disable=cell-var-from-loop # pylint: disable=cell-var-from-loop
for saltenv, targets in top.items(): for saltenv, targets in top.items():
sorted_targets = sorted(targets.keys(), sorted_targets = sorted(targets,
key=lambda target: orders[saltenv][target]) key=lambda target: orders[saltenv][target])
for target in sorted_targets: for target in sorted_targets:
sorted_top[saltenv][target] = targets[target] sorted_top[saltenv][target] = targets[target]

View File

@ -210,7 +210,7 @@ def ext_pillar(minion_id, # pylint: disable=W0613
name_field = model_meta['name'] name_field = model_meta['name']
fields = model_meta['fields'] fields = model_meta['fields']
if 'filter' in model_meta.keys(): if 'filter' in model_meta:
qs = (model_orm.objects qs = (model_orm.objects
.filter(**model_meta['filter']) .filter(**model_meta['filter'])
.values(*fields)) .values(*fields))

Some files were not shown because too many files have changed in this diff Show More